sched/headers: Move the PREEMPT_COUNT defines from <linux/sched.h> to <linux/preempt.h>
authorIngo Molnar <mingo@kernel.org>
Fri, 3 Feb 2017 21:07:57 +0000 (22:07 +0100)
committerIngo Molnar <mingo@kernel.org>
Fri, 3 Mar 2017 00:43:47 +0000 (01:43 +0100)
These defines are not really part of the scheduler's driver API, but are
related to the preempt count - so move them to <linux/preempt.h>.

Acked-by: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: linux-kernel@vger.kernel.org
Signed-off-by: Ingo Molnar <mingo@kernel.org>
include/linux/preempt.h
include/linux/sched.h

index 7eeceac52dea2509ea28344d1181ccae023aead9..cae461224948a715c029467d573f18716873f54f 100644 (file)
 /* We use the MSB mostly because its available */
 #define PREEMPT_NEED_RESCHED   0x80000000
 
+#define PREEMPT_DISABLED       (PREEMPT_DISABLE_OFFSET + PREEMPT_ENABLED)
+
+/*
+ * Disable preemption until the scheduler is running -- use an unconditional
+ * value so that it also works on !PREEMPT_COUNT kernels.
+ *
+ * Reset by start_kernel()->sched_init()->init_idle()->init_idle_preempt_count().
+ */
+#define INIT_PREEMPT_COUNT     PREEMPT_OFFSET
+
+/*
+ * Initial preempt_count value; reflects the preempt_count schedule invariant
+ * which states that during context switches:
+ *
+ *    preempt_count() == 2*PREEMPT_DISABLE_OFFSET
+ *
+ * Note: PREEMPT_DISABLE_OFFSET is 0 for !PREEMPT_COUNT kernels.
+ * Note: See finish_task_switch().
+ */
+#define FORK_PREEMPT_COUNT     (2*PREEMPT_DISABLE_OFFSET + PREEMPT_ENABLED)
+
 /* preempt_count() and related functions, depends on PREEMPT_NEED_RESCHED */
 #include <asm/preempt.h>
 
index 5398356e33c7b84566610bbe16a31821aba0d9d6..3b3e31da416eec95bab9d8ae713d08a6141bf45a 100644 (file)
@@ -265,27 +265,6 @@ struct task_cputime_atomic {
                .sum_exec_runtime = ATOMIC64_INIT(0),           \
        }
 
-#define PREEMPT_DISABLED       (PREEMPT_DISABLE_OFFSET + PREEMPT_ENABLED)
-
-/*
- * Disable preemption until the scheduler is running -- use an unconditional
- * value so that it also works on !PREEMPT_COUNT kernels.
- *
- * Reset by start_kernel()->sched_init()->init_idle()->init_idle_preempt_count().
- */
-#define INIT_PREEMPT_COUNT     PREEMPT_OFFSET
-
-/*
- * Initial preempt_count value; reflects the preempt_count schedule invariant
- * which states that during context switches:
- *
- *    preempt_count() == 2*PREEMPT_DISABLE_OFFSET
- *
- * Note: PREEMPT_DISABLE_OFFSET is 0 for !PREEMPT_COUNT kernels.
- * Note: See finish_task_switch().
- */
-#define FORK_PREEMPT_COUNT     (2*PREEMPT_DISABLE_OFFSET + PREEMPT_ENABLED)
-
 /**
  * struct thread_group_cputimer - thread group interval timer counts
  * @cputime_atomic:    atomic thread group interval timers.