summaryrefslogtreecommitdiff
path: root/include/linux/rcutree.h
diff options
context:
space:
mode:
authorThomas Gleixner <tglx@linutronix.de>2011-10-05 11:59:38 -0700
committerClark Williams <williams@redhat.com>2012-04-11 11:07:46 -0500
commit5f9d7f17c8bd2efb468bd096d935e94bfaec35ba (patch)
tree2b10ecff3e743499a7bce54e554fa266a9a7daa3 /include/linux/rcutree.h
parentfeac2435f73071097dc733d24c640447c810955b (diff)
rcu: Merge RCU-bh into RCU-preempt
The Linux kernel has long RCU-bh read-side critical sections that intolerably increase scheduling latency under mainline's RCU-bh rules, which include RCU-bh read-side critical sections being non-preemptible. This patch therefore arranges for RCU-bh to be implemented in terms of RCU-preempt for CONFIG_PREEMPT_RT_FULL=y. This has the downside of defeating the purpose of RCU-bh, namely, handling the case where the system is subjected to a network-based denial-of-service attack that keeps at least one CPU doing full-time softirq processing. This issue will be fixed by a later commit. The current commit will need some work to make it appropriate for mainline use, for example, it needs to be extended to cover Tiny RCU. [ paulmck: Added a useful changelog ] Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Link: http://lkml.kernel.org/r/20111005185938.GA20403@linux.vnet.ibm.com Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'include/linux/rcutree.h')
-rw-r--r--include/linux/rcutree.h18
1 files changed, 16 insertions, 2 deletions
diff --git a/include/linux/rcutree.h b/include/linux/rcutree.h
index e8ee5dd0854c..5b2d03e18c3e 100644
--- a/include/linux/rcutree.h
+++ b/include/linux/rcutree.h
@@ -57,7 +57,11 @@ static inline void exit_rcu(void)
#endif /* #else #ifdef CONFIG_TREE_PREEMPT_RCU */
+#ifndef CONFIG_PREEMPT_RT_FULL
extern void synchronize_rcu_bh(void);
+#else
+# define synchronize_rcu_bh() synchronize_rcu()
+#endif
extern void synchronize_sched_expedited(void);
extern void synchronize_rcu_expedited(void);
@@ -85,19 +89,29 @@ static inline void synchronize_rcu_bh_expedited(void)
}
extern void rcu_barrier(void);
+#ifdef CONFIG_PREEMPT_RT_FULL
+# define rcu_barrier_bh rcu_barrier
+#else
extern void rcu_barrier_bh(void);
+#endif
extern void rcu_barrier_sched(void);
extern unsigned long rcutorture_testseq;
extern unsigned long rcutorture_vernum;
extern long rcu_batches_completed(void);
-extern long rcu_batches_completed_bh(void);
extern long rcu_batches_completed_sched(void);
extern void rcu_force_quiescent_state(void);
-extern void rcu_bh_force_quiescent_state(void);
extern void rcu_sched_force_quiescent_state(void);
+#ifndef CONFIG_PREEMPT_RT_FULL
+extern void rcu_bh_force_quiescent_state(void);
+extern long rcu_batches_completed_bh(void);
+#else
+# define rcu_bh_force_quiescent_state rcu_force_quiescent_state
+# define rcu_batches_completed_bh rcu_batches_completed
+#endif
+
/* A context switch is a grace period for RCU-sched and RCU-bh. */
static inline int rcu_blocking_is_gp(void)
{