summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorKonstantin Khlebnikov <khlebnikov@yandex-team.ru>2015-07-15 12:52:04 +0300
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>2015-10-27 09:51:58 +0900
commit98197d3de58a62785be3e421864d6145955f197d (patch)
tree7c32b124ba6ddd924eccf5601d4aa9e4c23202a7
parent3905f7abd0410acdd497661e84c7f6ef672c1e04 (diff)
sched/preempt: Fix cond_resched_lock() and cond_resched_softirq()
commit fe32d3cd5e8eb0f82e459763374aa80797023403 upstream. These functions check should_resched() before unlocking spinlock/bh-enable: preempt_count always non-zero => should_resched() always returns false. cond_resched_lock() worked iff spin_needbreak is set. This patch adds argument "preempt_offset" to should_resched(). preempt_count offset constants for that: PREEMPT_DISABLE_OFFSET - offset after preempt_disable() PREEMPT_LOCK_OFFSET - offset after spin_lock() SOFTIRQ_DISABLE_OFFSET - offset after local_bh_distable() SOFTIRQ_LOCK_OFFSET - offset after spin_lock_bh() Signed-off-by: Konstantin Khlebnikov <khlebnikov@yandex-team.ru> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Cc: Alexander Graf <agraf@suse.de> Cc: Boris Ostrovsky <boris.ostrovsky@oracle.com> Cc: David Vrabel <david.vrabel@citrix.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Mike Galbraith <efault@gmx.de> Cc: Paul Mackerras <paulus@samba.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Thomas Gleixner <tglx@linutronix.de> Fixes: bdb438065890 ("sched: Extract the basic add/sub preempt_count modifiers") Link: http://lkml.kernel.org/r/20150715095204.12246.98268.stgit@buzz Signed-off-by: Ingo Molnar <mingo@kernel.org> Signed-off-by: Mike Galbraith <efault@gmx.de> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
-rw-r--r--arch/x86/include/asm/preempt.h4
-rw-r--r--include/asm-generic/preempt.h5
-rw-r--r--include/linux/preempt.h5
-rw-r--r--include/linux/preempt_mask.h14
-rw-r--r--include/linux/sched.h6
-rw-r--r--kernel/sched/core.c6
6 files changed, 22 insertions, 18 deletions
diff --git a/arch/x86/include/asm/preempt.h b/arch/x86/include/asm/preempt.h
index 8f3271842533..67b6cd00a44f 100644
--- a/arch/x86/include/asm/preempt.h
+++ b/arch/x86/include/asm/preempt.h
@@ -90,9 +90,9 @@ static __always_inline bool __preempt_count_dec_and_test(void)
/*
* Returns true when we need to resched and can (barring IRQ state).
*/
-static __always_inline bool should_resched(void)
+static __always_inline bool should_resched(int preempt_offset)
{
- return unlikely(!raw_cpu_read_4(__preempt_count));
+ return unlikely(raw_cpu_read_4(__preempt_count) == preempt_offset);
}
#ifdef CONFIG_PREEMPT
diff --git a/include/asm-generic/preempt.h b/include/asm-generic/preempt.h
index eb6f9e6c3075..b6a53e8e526a 100644
--- a/include/asm-generic/preempt.h
+++ b/include/asm-generic/preempt.h
@@ -71,9 +71,10 @@ static __always_inline bool __preempt_count_dec_and_test(void)
/*
* Returns true when we need to resched and can (barring IRQ state).
*/
-static __always_inline bool should_resched(void)
+static __always_inline bool should_resched(int preempt_offset)
{
- return unlikely(!preempt_count() && tif_need_resched());
+ return unlikely(preempt_count() == preempt_offset &&
+ tif_need_resched());
}
#ifdef CONFIG_PREEMPT
diff --git a/include/linux/preempt.h b/include/linux/preempt.h
index de83b4eb1642..8cd6725c5758 100644
--- a/include/linux/preempt.h
+++ b/include/linux/preempt.h
@@ -20,7 +20,8 @@
#if defined(CONFIG_DEBUG_PREEMPT) || defined(CONFIG_PREEMPT_TRACER)
extern void preempt_count_add(int val);
extern void preempt_count_sub(int val);
-#define preempt_count_dec_and_test() ({ preempt_count_sub(1); should_resched(); })
+#define preempt_count_dec_and_test() \
+ ({ preempt_count_sub(1); should_resched(0); })
#else
#define preempt_count_add(val) __preempt_count_add(val)
#define preempt_count_sub(val) __preempt_count_sub(val)
@@ -59,7 +60,7 @@ do { \
#define preempt_check_resched() \
do { \
- if (should_resched()) \
+ if (should_resched(0)) \
__preempt_schedule(); \
} while (0)
diff --git a/include/linux/preempt_mask.h b/include/linux/preempt_mask.h
index 1f654ee836b7..5cb25f17331a 100644
--- a/include/linux/preempt_mask.h
+++ b/include/linux/preempt_mask.h
@@ -71,13 +71,21 @@
*/
#define in_nmi() (preempt_count() & NMI_MASK)
+/*
+ * The preempt_count offset after preempt_disable();
+ */
#if defined(CONFIG_PREEMPT_COUNT)
-# define PREEMPT_DISABLE_OFFSET 1
+# define PREEMPT_DISABLE_OFFSET PREEMPT_OFFSET
#else
-# define PREEMPT_DISABLE_OFFSET 0
+# define PREEMPT_DISABLE_OFFSET 0
#endif
/*
+ * The preempt_count offset after spin_lock()
+ */
+#define PREEMPT_LOCK_OFFSET PREEMPT_DISABLE_OFFSET
+
+/*
* The preempt_count offset needed for things like:
*
* spin_lock_bh()
@@ -90,7 +98,7 @@
*
* Work as expected.
*/
-#define SOFTIRQ_LOCK_OFFSET (SOFTIRQ_DISABLE_OFFSET + PREEMPT_DISABLE_OFFSET)
+#define SOFTIRQ_LOCK_OFFSET (SOFTIRQ_DISABLE_OFFSET + PREEMPT_LOCK_OFFSET)
/*
* Are we running in atomic context? WARNING: this macro cannot
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 26a2e6122734..61f4f2d5c882 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -2834,12 +2834,6 @@ extern int _cond_resched(void);
extern int __cond_resched_lock(spinlock_t *lock);
-#ifdef CONFIG_PREEMPT_COUNT
-#define PREEMPT_LOCK_OFFSET PREEMPT_OFFSET
-#else
-#define PREEMPT_LOCK_OFFSET 0
-#endif
-
#define cond_resched_lock(lock) ({ \
___might_sleep(__FILE__, __LINE__, PREEMPT_LOCK_OFFSET);\
__cond_resched_lock(lock); \
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 8476206a1e19..4d870eb6086b 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -4232,7 +4232,7 @@ SYSCALL_DEFINE0(sched_yield)
int __sched _cond_resched(void)
{
- if (should_resched()) {
+ if (should_resched(0)) {
preempt_schedule_common();
return 1;
}
@@ -4250,7 +4250,7 @@ EXPORT_SYMBOL(_cond_resched);
*/
int __cond_resched_lock(spinlock_t *lock)
{
- int resched = should_resched();
+ int resched = should_resched(PREEMPT_LOCK_OFFSET);
int ret = 0;
lockdep_assert_held(lock);
@@ -4272,7 +4272,7 @@ int __sched __cond_resched_softirq(void)
{
BUG_ON(!in_softirq());
- if (should_resched()) {
+ if (should_resched(SOFTIRQ_DISABLE_OFFSET)) {
local_bh_enable();
preempt_schedule_common();
local_bh_disable();