aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorIngo Molnar <mingo@kernel.org>2017-02-03 16:07:57 -0500
committerIngo Molnar <mingo@kernel.org>2017-03-02 19:43:47 -0500
commitd04b0ad37e4b6ac39a56c823ae76ab37cd044dc7 (patch)
treedbc70de4f29558e567a2efe1affa2c131cf1fb32
parentc7af7877eeacfeaaf6a1b6f54c481292ef116837 (diff)
sched/headers: Move the PREEMPT_COUNT defines from <linux/sched.h> to <linux/preempt.h>
These defines are not really part of the scheduler's driver API, but are related to the preempt count - so move them to <linux/preempt.h>. Acked-by: Linus Torvalds <torvalds@linux-foundation.org> Cc: Mike Galbraith <efault@gmx.de> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: linux-kernel@vger.kernel.org Signed-off-by: Ingo Molnar <mingo@kernel.org>
-rw-r--r--include/linux/preempt.h21
-rw-r--r--include/linux/sched.h21
2 files changed, 21 insertions, 21 deletions
diff --git a/include/linux/preempt.h b/include/linux/preempt.h
index 7eeceac52dea..cae461224948 100644
--- a/include/linux/preempt.h
+++ b/include/linux/preempt.h
@@ -55,6 +55,27 @@
55/* We use the MSB mostly because its available */ 55/* We use the MSB mostly because its available */
56#define PREEMPT_NEED_RESCHED 0x80000000 56#define PREEMPT_NEED_RESCHED 0x80000000
57 57
58#define PREEMPT_DISABLED (PREEMPT_DISABLE_OFFSET + PREEMPT_ENABLED)
59
60/*
61 * Disable preemption until the scheduler is running -- use an unconditional
62 * value so that it also works on !PREEMPT_COUNT kernels.
63 *
64 * Reset by start_kernel()->sched_init()->init_idle()->init_idle_preempt_count().
65 */
66#define INIT_PREEMPT_COUNT PREEMPT_OFFSET
67
68/*
69 * Initial preempt_count value; reflects the preempt_count schedule invariant
70 * which states that during context switches:
71 *
72 * preempt_count() == 2*PREEMPT_DISABLE_OFFSET
73 *
74 * Note: PREEMPT_DISABLE_OFFSET is 0 for !PREEMPT_COUNT kernels.
75 * Note: See finish_task_switch().
76 */
77#define FORK_PREEMPT_COUNT (2*PREEMPT_DISABLE_OFFSET + PREEMPT_ENABLED)
78
58/* preempt_count() and related functions, depends on PREEMPT_NEED_RESCHED */ 79/* preempt_count() and related functions, depends on PREEMPT_NEED_RESCHED */
59#include <asm/preempt.h> 80#include <asm/preempt.h>
60 81
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 5398356e33c7..3b3e31da416e 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -265,27 +265,6 @@ struct task_cputime_atomic {
265 .sum_exec_runtime = ATOMIC64_INIT(0), \ 265 .sum_exec_runtime = ATOMIC64_INIT(0), \
266 } 266 }
267 267
268#define PREEMPT_DISABLED (PREEMPT_DISABLE_OFFSET + PREEMPT_ENABLED)
269
270/*
271 * Disable preemption until the scheduler is running -- use an unconditional
272 * value so that it also works on !PREEMPT_COUNT kernels.
273 *
274 * Reset by start_kernel()->sched_init()->init_idle()->init_idle_preempt_count().
275 */
276#define INIT_PREEMPT_COUNT PREEMPT_OFFSET
277
278/*
279 * Initial preempt_count value; reflects the preempt_count schedule invariant
280 * which states that during context switches:
281 *
282 * preempt_count() == 2*PREEMPT_DISABLE_OFFSET
283 *
284 * Note: PREEMPT_DISABLE_OFFSET is 0 for !PREEMPT_COUNT kernels.
285 * Note: See finish_task_switch().
286 */
287#define FORK_PREEMPT_COUNT (2*PREEMPT_DISABLE_OFFSET + PREEMPT_ENABLED)
288
289/** 268/**
290 * struct thread_group_cputimer - thread group interval timer counts 269 * struct thread_group_cputimer - thread group interval timer counts
291 * @cputime_atomic: atomic thread group interval timers. 270 * @cputime_atomic: atomic thread group interval timers.