sched/preempt: Fix cond_resched_lock() and cond_resched_softirq()
authorKonstantin Khlebnikov <khlebnikov@yandex-team.ru>
Wed, 15 Jul 2015 09:52:04 +0000 (12:52 +0300)
committerIngo Molnar <mingo@kernel.org>
Mon, 3 Aug 2015 10:21:24 +0000 (12:21 +0200)
These functions check should_resched() before unlocking spinlock/bh-enable:
preempt_count always non-zero => should_resched() always returns false.
cond_resched_lock() worked iff spin_needbreak is set.

This patch adds argument "preempt_offset" to should_resched().

preempt_count offset constants for that:

  PREEMPT_DISABLE_OFFSET  - offset after preempt_disable()
  PREEMPT_LOCK_OFFSET     - offset after spin_lock()
  SOFTIRQ_DISABLE_OFFSET  - offset after local_bh_distable()
  SOFTIRQ_LOCK_OFFSET     - offset after spin_lock_bh()

Signed-off-by: Konstantin Khlebnikov <khlebnikov@yandex-team.ru>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Cc: Alexander Graf <agraf@suse.de>
Cc: Boris Ostrovsky <boris.ostrovsky@oracle.com>
Cc: David Vrabel <david.vrabel@citrix.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Fixes: bdb438065890 ("sched: Extract the basic add/sub preempt_count modifiers")
Link: http://lkml.kernel.org/r/20150715095204.12246.98268.stgit@buzz
Signed-off-by: Ingo Molnar <mingo@kernel.org>
arch/x86/include/asm/preempt.h
include/asm-generic/preempt.h
include/linux/preempt.h
include/linux/sched.h
kernel/sched/core.c

index dca71714f86076f5fae6508fe6bbcb2c573723ae..b12f81022a6b2c574f9c037e616f90373502c24e 100644 (file)
@@ -90,9 +90,9 @@ static __always_inline bool __preempt_count_dec_and_test(void)
 /*
  * Returns true when we need to resched and can (barring IRQ state).
  */
-static __always_inline bool should_resched(void)
+static __always_inline bool should_resched(int preempt_offset)
 {
-       return unlikely(!raw_cpu_read_4(__preempt_count));
+       return unlikely(raw_cpu_read_4(__preempt_count) == preempt_offset);
 }
 
 #ifdef CONFIG_PREEMPT
index d0a7a4753db2b3cb516ceed3c331ad330b92e877..0bec580a48854f0a49b012b203663ebeddf98257 100644 (file)
@@ -71,9 +71,10 @@ static __always_inline bool __preempt_count_dec_and_test(void)
 /*
  * Returns true when we need to resched and can (barring IRQ state).
  */
-static __always_inline bool should_resched(void)
+static __always_inline bool should_resched(int preempt_offset)
 {
-       return unlikely(!preempt_count() && tif_need_resched());
+       return unlikely(preempt_count() == preempt_offset &&
+                       tif_need_resched());
 }
 
 #ifdef CONFIG_PREEMPT
index 84991f1851739490774436726e49b5c43f16db75..bea8dd8ff5e026f8fc3e3bc446af7aea7ec891e2 100644 (file)
  */
 #define in_nmi()       (preempt_count() & NMI_MASK)
 
+/*
+ * The preempt_count offset after preempt_disable();
+ */
 #if defined(CONFIG_PREEMPT_COUNT)
-# define PREEMPT_DISABLE_OFFSET 1
+# define PREEMPT_DISABLE_OFFSET        PREEMPT_OFFSET
 #else
-# define PREEMPT_DISABLE_OFFSET 0
+# define PREEMPT_DISABLE_OFFSET        0
 #endif
 
+/*
+ * The preempt_count offset after spin_lock()
+ */
+#define PREEMPT_LOCK_OFFSET    PREEMPT_DISABLE_OFFSET
+
 /*
  * The preempt_count offset needed for things like:
  *
  *
  * Work as expected.
  */
-#define SOFTIRQ_LOCK_OFFSET (SOFTIRQ_DISABLE_OFFSET + PREEMPT_DISABLE_OFFSET)
+#define SOFTIRQ_LOCK_OFFSET (SOFTIRQ_DISABLE_OFFSET + PREEMPT_LOCK_OFFSET)
 
 /*
  * Are we running in atomic context?  WARNING: this macro cannot
 #if defined(CONFIG_DEBUG_PREEMPT) || defined(CONFIG_PREEMPT_TRACER)
 extern void preempt_count_add(int val);
 extern void preempt_count_sub(int val);
-#define preempt_count_dec_and_test() ({ preempt_count_sub(1); should_resched(); })
+#define preempt_count_dec_and_test() \
+       ({ preempt_count_sub(1); should_resched(0); })
 #else
 #define preempt_count_add(val) __preempt_count_add(val)
 #define preempt_count_sub(val) __preempt_count_sub(val)
@@ -184,7 +193,7 @@ do { \
 
 #define preempt_check_resched() \
 do { \
-       if (should_resched()) \
+       if (should_resched(0)) \
                __preempt_schedule(); \
 } while (0)
 
index 65a8a8651596f600024307c38825e7977d6fa73a..9c144657aace23bdbb16e0ff132a6922c3699e1b 100644 (file)
@@ -2891,12 +2891,6 @@ extern int _cond_resched(void);
 
 extern int __cond_resched_lock(spinlock_t *lock);
 
-#ifdef CONFIG_PREEMPT_COUNT
-#define PREEMPT_LOCK_OFFSET    PREEMPT_OFFSET
-#else
-#define PREEMPT_LOCK_OFFSET    0
-#endif
-
 #define cond_resched_lock(lock) ({                             \
        ___might_sleep(__FILE__, __LINE__, PREEMPT_LOCK_OFFSET);\
        __cond_resched_lock(lock);                              \
index fa5826cc612f4336a8b942d45fd7f379759a128e..f5fad2b12bafcd8459b33c8522db2d803b4ea277 100644 (file)
@@ -4496,7 +4496,7 @@ SYSCALL_DEFINE0(sched_yield)
 
 int __sched _cond_resched(void)
 {
-       if (should_resched()) {
+       if (should_resched(0)) {
                preempt_schedule_common();
                return 1;
        }
@@ -4514,7 +4514,7 @@ EXPORT_SYMBOL(_cond_resched);
  */
 int __cond_resched_lock(spinlock_t *lock)
 {
-       int resched = should_resched();
+       int resched = should_resched(PREEMPT_LOCK_OFFSET);
        int ret = 0;
 
        lockdep_assert_held(lock);
@@ -4536,7 +4536,7 @@ int __sched __cond_resched_softirq(void)
 {
        BUG_ON(!in_softirq());
 
-       if (should_resched()) {
+       if (should_resched(SOFTIRQ_DISABLE_OFFSET)) {
                local_bh_enable();
                preempt_schedule_common();
                local_bh_disable();
This page took 0.02962 seconds and 5 git commands to generate.