diff options
author | Peter Zijlstra <peterz@infradead.org> | 2015-09-28 11:45:40 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@kernel.org> | 2015-10-06 11:08:12 -0400 |
commit | 87dcbc0610cb580c8eaf289f52aca3620af825f0 (patch) | |
tree | 8fe4c05507c8c9ff4e2369cc16f86cad2a82dd14 /include/linux/sched.h | |
parent | fe19159225d8516f3f57a5fe8f735c01684f0ddd (diff) |
sched/core: Simplify INIT_PREEMPT_COUNT
As per the following commit:
d86ee4809d03 ("sched: optimize cond_resched()")
we need PREEMPT_ACTIVE to avoid cond_resched() from working before
the scheduler is set up.
However, keeping preemption disabled should do the same thing
already, making the PREEMPT_ACTIVE part entirely redundant.
The only complication is !PREEMPT_COUNT kernels, where
PREEMPT_DISABLED ends up being 0. Instead we use an unconditional
PREEMPT_OFFSET to set preempt_count() even on !PREEMPT_COUNT
kernels.
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Reviewed-by: Steven Rostedt <rostedt@goodmis.org>
Reviewed-by: Thomas Gleixner <tglx@linutronix.de>
Reviewed-by: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: linux-kernel@vger.kernel.org
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'include/linux/sched.h')
-rw-r--r-- | include/linux/sched.h | 11 |
1 files changed, 5 insertions, 6 deletions
diff --git a/include/linux/sched.h b/include/linux/sched.h index d086cf0ca2c7..e5b8cbc4b8d6 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h | |||
@@ -606,19 +606,18 @@ struct task_cputime_atomic { | |||
606 | #endif | 606 | #endif |
607 | 607 | ||
608 | /* | 608 | /* |
609 | * Disable preemption until the scheduler is running. | 609 | * Disable preemption until the scheduler is running -- use an unconditional |
610 | * Reset by start_kernel()->sched_init()->init_idle(). | 610 | * value so that it also works on !PREEMPT_COUNT kernels. |
611 | * | 611 | * |
612 | * We include PREEMPT_ACTIVE to avoid cond_resched() from working | 612 | * Reset by start_kernel()->sched_init()->init_idle()->init_idle_preempt_count(). |
613 | * before the scheduler is active -- see should_resched(). | ||
614 | */ | 613 | */ |
615 | #define INIT_PREEMPT_COUNT (PREEMPT_DISABLED + PREEMPT_ACTIVE) | 614 | #define INIT_PREEMPT_COUNT PREEMPT_OFFSET |
616 | 615 | ||
617 | /** | 616 | /** |
618 | * struct thread_group_cputimer - thread group interval timer counts | 617 | * struct thread_group_cputimer - thread group interval timer counts |
619 | * @cputime_atomic: atomic thread group interval timers. | 618 | * @cputime_atomic: atomic thread group interval timers. |
620 | * @running: non-zero when there are timers running and | 619 | * @running: non-zero when there are timers running and |
621 | * @cputime receives updates. | 620 | * @cputime receives updates. |
622 | * | 621 | * |
623 | * This structure contains the version of task_cputime, above, that is | 622 | * This structure contains the version of task_cputime, above, that is |
624 | * used for thread group CPU timer calculations. | 623 | * used for thread group CPU timer calculations. |