diff options
-rw-r--r-- | include/linux/rcupdate.h | 25 | ||||
-rw-r--r-- | include/linux/rcutree.h | 18 | ||||
-rw-r--r-- | kernel/rcupdate.c | 2 | ||||
-rw-r--r-- | kernel/rcutree.c | 10 |
4 files changed, 53 insertions, 2 deletions
diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h index 23a500f70fcf..888d24ac66ba 100644 --- a/include/linux/rcupdate.h +++ b/include/linux/rcupdate.h @@ -101,6 +101,9 @@ extern void call_rcu(struct rcu_head *head, #endif /* #else #ifdef CONFIG_PREEMPT_RCU */ +#ifdef CONFIG_PREEMPT_RT_FULL +#define call_rcu_bh call_rcu +#else /** * call_rcu_bh() - Queue an RCU for invocation after a quicker grace period. * @head: structure to be used for queueing the RCU updates. @@ -121,6 +124,7 @@ extern void call_rcu(struct rcu_head *head, */ extern void call_rcu_bh(struct rcu_head *head, void (*func)(struct rcu_head *head)); +#endif /** * call_rcu_sched() - Queue an RCU for invocation after sched grace period. @@ -190,7 +194,13 @@ static inline int rcu_preempt_depth(void) /* Internal to kernel */ extern void rcu_sched_qs(int cpu); + +#ifndef CONFIG_PREEMPT_RT_FULL extern void rcu_bh_qs(int cpu); +#else +static inline void rcu_bh_qs(int cpu) { } +#endif + extern void rcu_check_callbacks(int cpu, int user); struct notifier_block; extern void rcu_idle_enter(void); @@ -331,7 +341,14 @@ static inline int rcu_read_lock_held(void) * rcu_read_lock_bh_held() is defined out of line to avoid #include-file * hell. */ +#ifdef CONFIG_PREEMPT_RT_FULL +static inline int rcu_read_lock_bh_held(void) +{ + return rcu_read_lock_held(); +} +#else extern int rcu_read_lock_bh_held(void); +#endif /** * rcu_read_lock_sched_held() - might we be in RCU-sched read-side critical section? @@ -780,10 +797,14 @@ static inline void rcu_read_unlock(void) static inline void rcu_read_lock_bh(void) { local_bh_disable(); +#ifdef CONFIG_PREEMPT_RT_FULL + rcu_read_lock(); +#else __acquire(RCU_BH); rcu_lock_acquire(&rcu_bh_lock_map); rcu_lockdep_assert(!rcu_is_cpu_idle(), "rcu_read_lock_bh() used illegally while idle"); +#endif } /* @@ -793,10 +814,14 @@ static inline void rcu_read_lock_bh(void) */ static inline void rcu_read_unlock_bh(void) { +#ifdef CONFIG_PREEMPT_RT_FULL + rcu_read_unlock(); +#else rcu_lockdep_assert(!rcu_is_cpu_idle(), "rcu_read_unlock_bh() used illegally while idle"); rcu_lock_release(&rcu_bh_lock_map); __release(RCU_BH); +#endif local_bh_enable(); } diff --git a/include/linux/rcutree.h b/include/linux/rcutree.h index e8ee5dd0854c..5b2d03e18c3e 100644 --- a/include/linux/rcutree.h +++ b/include/linux/rcutree.h @@ -57,7 +57,11 @@ static inline void exit_rcu(void) #endif /* #else #ifdef CONFIG_TREE_PREEMPT_RCU */ +#ifndef CONFIG_PREEMPT_RT_FULL extern void synchronize_rcu_bh(void); +#else +# define synchronize_rcu_bh() synchronize_rcu() +#endif extern void synchronize_sched_expedited(void); extern void synchronize_rcu_expedited(void); @@ -85,19 +89,29 @@ static inline void synchronize_rcu_bh_expedited(void) } extern void rcu_barrier(void); +#ifdef CONFIG_PREEMPT_RT_FULL +# define rcu_barrier_bh rcu_barrier +#else extern void rcu_barrier_bh(void); +#endif extern void rcu_barrier_sched(void); extern unsigned long rcutorture_testseq; extern unsigned long rcutorture_vernum; extern long rcu_batches_completed(void); -extern long rcu_batches_completed_bh(void); extern long rcu_batches_completed_sched(void); extern void rcu_force_quiescent_state(void); -extern void rcu_bh_force_quiescent_state(void); extern void rcu_sched_force_quiescent_state(void); +#ifndef CONFIG_PREEMPT_RT_FULL +extern void rcu_bh_force_quiescent_state(void); +extern long rcu_batches_completed_bh(void); +#else +# define rcu_bh_force_quiescent_state rcu_force_quiescent_state +# define rcu_batches_completed_bh rcu_batches_completed +#endif + /* A context switch is a grace period for RCU-sched and RCU-bh. */ static inline int rcu_blocking_is_gp(void) { diff --git a/kernel/rcupdate.c b/kernel/rcupdate.c index a86f1741cc27..63fc4b3e7af3 100644 --- a/kernel/rcupdate.c +++ b/kernel/rcupdate.c @@ -77,6 +77,7 @@ int debug_lockdep_rcu_enabled(void) } EXPORT_SYMBOL_GPL(debug_lockdep_rcu_enabled); +#ifndef CONFIG_PREEMPT_RT_FULL /** * rcu_read_lock_bh_held() - might we be in RCU-bh read-side critical section? * @@ -103,6 +104,7 @@ int rcu_read_lock_bh_held(void) return in_softirq() || irqs_disabled(); } EXPORT_SYMBOL_GPL(rcu_read_lock_bh_held); +#endif #endif /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */ diff --git a/kernel/rcutree.c b/kernel/rcutree.c index 1050d6d3922c..3c1ae8c584ce 100644 --- a/kernel/rcutree.c +++ b/kernel/rcutree.c @@ -172,6 +172,7 @@ void rcu_sched_qs(int cpu) rdp->passed_quiesce = 1; } +#ifndef CONFIG_PREEMPT_RT_FULL void rcu_bh_qs(int cpu) { struct rcu_data *rdp = &per_cpu(rcu_bh_data, cpu); @@ -182,6 +183,7 @@ void rcu_bh_qs(int cpu) trace_rcu_grace_period("rcu_bh", rdp->gpnum, "cpuqs"); rdp->passed_quiesce = 1; } +#endif /* * Note a context switch. This is a quiescent state for RCU-sched, @@ -228,6 +230,7 @@ long rcu_batches_completed_sched(void) } EXPORT_SYMBOL_GPL(rcu_batches_completed_sched); +#ifndef CONFIG_PREEMPT_RT_FULL /* * Return the number of RCU BH batches processed thus far for debug & stats. */ @@ -245,6 +248,7 @@ void rcu_bh_force_quiescent_state(void) force_quiescent_state(&rcu_bh_state, 0); } EXPORT_SYMBOL_GPL(rcu_bh_force_quiescent_state); +#endif /* * Record the number of times rcutorture tests have been initiated and @@ -1885,6 +1889,7 @@ void call_rcu_sched(struct rcu_head *head, void (*func)(struct rcu_head *rcu)) } EXPORT_SYMBOL_GPL(call_rcu_sched); +#ifndef CONFIG_PREEMPT_RT_FULL /* * Queue an RCU callback for invocation after a quicker grace period. */ @@ -1893,6 +1898,7 @@ void call_rcu_bh(struct rcu_head *head, void (*func)(struct rcu_head *rcu)) __call_rcu(head, func, &rcu_bh_state, 0); } EXPORT_SYMBOL_GPL(call_rcu_bh); +#endif /** * synchronize_sched - wait until an rcu-sched grace period has elapsed. @@ -1929,6 +1935,7 @@ void synchronize_sched(void) } EXPORT_SYMBOL_GPL(synchronize_sched); +#ifndef CONFIG_PREEMPT_RT_FULL /** * synchronize_rcu_bh - wait until an rcu_bh grace period has elapsed. * @@ -1949,6 +1956,7 @@ void synchronize_rcu_bh(void) wait_rcu_gp(call_rcu_bh); } EXPORT_SYMBOL_GPL(synchronize_rcu_bh); +#endif static atomic_t sync_sched_expedited_started = ATOMIC_INIT(0); static atomic_t sync_sched_expedited_done = ATOMIC_INIT(0); @@ -2224,6 +2232,7 @@ static void _rcu_barrier(struct rcu_state *rsp, mutex_unlock(&rcu_barrier_mutex); } +#ifndef CONFIG_PREEMPT_RT_FULL /** * rcu_barrier_bh - Wait until all in-flight call_rcu_bh() callbacks complete. */ @@ -2232,6 +2241,7 @@ void rcu_barrier_bh(void) _rcu_barrier(&rcu_bh_state, call_rcu_bh); } EXPORT_SYMBOL_GPL(rcu_barrier_bh); +#endif /** * rcu_barrier_sched - Wait for in-flight call_rcu_sched() callbacks. |