diff options
author | Thomas Gleixner <tglx@linutronix.de> | 2019-07-26 17:19:37 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@kernel.org> | 2019-07-31 13:03:34 -0400 |
commit | c1a280b68d4e6b6db4a65aa7865c22d8789ddf09 (patch) | |
tree | 24c5d965843b44cbe654f5bde0cc68583e3e3321 /include | |
parent | 2a11c76e5301dddefcb618dac04f74e6314df6bc (diff) |
sched/preempt: Use CONFIG_PREEMPTION where appropriate
CONFIG_PREEMPTION is selected by CONFIG_PREEMPT and by
CONFIG_PREEMPT_RT. Both PREEMPT and PREEMPT_RT require the same
functionality which today depends on CONFIG_PREEMPT.
Switch the preemption code, scheduler and init task over to use
CONFIG_PREEMPTION.
That's the first step towards RT in that area. The more complex changes are
coming separately.
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Masami Hiramatsu <mhiramat@kernel.org>
Cc: Paolo Bonzini <pbonzini@redhat.com>
Cc: Paul E. McKenney <paulmck@linux.ibm.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Steven Rostedt <rostedt@goodmis.org>
Link: http://lkml.kernel.org/r/20190726212124.117528401@linutronix.de
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'include')
-rw-r--r-- | include/asm-generic/preempt.h | 4 | ||||
-rw-r--r-- | include/linux/preempt.h | 6 | ||||
-rw-r--r-- | include/linux/sched.h | 6 |
3 files changed, 8 insertions, 8 deletions
diff --git a/include/asm-generic/preempt.h b/include/asm-generic/preempt.h index c3046c920063..d683f5e6d791 100644 --- a/include/asm-generic/preempt.h +++ b/include/asm-generic/preempt.h | |||
@@ -78,11 +78,11 @@ static __always_inline bool should_resched(int preempt_offset) | |||
78 | tif_need_resched()); | 78 | tif_need_resched()); |
79 | } | 79 | } |
80 | 80 | ||
81 | #ifdef CONFIG_PREEMPT | 81 | #ifdef CONFIG_PREEMPTION |
82 | extern asmlinkage void preempt_schedule(void); | 82 | extern asmlinkage void preempt_schedule(void); |
83 | #define __preempt_schedule() preempt_schedule() | 83 | #define __preempt_schedule() preempt_schedule() |
84 | extern asmlinkage void preempt_schedule_notrace(void); | 84 | extern asmlinkage void preempt_schedule_notrace(void); |
85 | #define __preempt_schedule_notrace() preempt_schedule_notrace() | 85 | #define __preempt_schedule_notrace() preempt_schedule_notrace() |
86 | #endif /* CONFIG_PREEMPT */ | 86 | #endif /* CONFIG_PREEMPTION */ |
87 | 87 | ||
88 | #endif /* __ASM_PREEMPT_H */ | 88 | #endif /* __ASM_PREEMPT_H */ |
diff --git a/include/linux/preempt.h b/include/linux/preempt.h index dd92b1a93919..bbb68dba37cc 100644 --- a/include/linux/preempt.h +++ b/include/linux/preempt.h | |||
@@ -182,7 +182,7 @@ do { \ | |||
182 | 182 | ||
183 | #define preemptible() (preempt_count() == 0 && !irqs_disabled()) | 183 | #define preemptible() (preempt_count() == 0 && !irqs_disabled()) |
184 | 184 | ||
185 | #ifdef CONFIG_PREEMPT | 185 | #ifdef CONFIG_PREEMPTION |
186 | #define preempt_enable() \ | 186 | #define preempt_enable() \ |
187 | do { \ | 187 | do { \ |
188 | barrier(); \ | 188 | barrier(); \ |
@@ -203,7 +203,7 @@ do { \ | |||
203 | __preempt_schedule(); \ | 203 | __preempt_schedule(); \ |
204 | } while (0) | 204 | } while (0) |
205 | 205 | ||
206 | #else /* !CONFIG_PREEMPT */ | 206 | #else /* !CONFIG_PREEMPTION */ |
207 | #define preempt_enable() \ | 207 | #define preempt_enable() \ |
208 | do { \ | 208 | do { \ |
209 | barrier(); \ | 209 | barrier(); \ |
@@ -217,7 +217,7 @@ do { \ | |||
217 | } while (0) | 217 | } while (0) |
218 | 218 | ||
219 | #define preempt_check_resched() do { } while (0) | 219 | #define preempt_check_resched() do { } while (0) |
220 | #endif /* CONFIG_PREEMPT */ | 220 | #endif /* CONFIG_PREEMPTION */ |
221 | 221 | ||
222 | #define preempt_disable_notrace() \ | 222 | #define preempt_disable_notrace() \ |
223 | do { \ | 223 | do { \ |
diff --git a/include/linux/sched.h b/include/linux/sched.h index 9f51932bd543..6947516a2d3e 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h | |||
@@ -1767,7 +1767,7 @@ static inline int test_tsk_need_resched(struct task_struct *tsk) | |||
1767 | * value indicates whether a reschedule was done in fact. | 1767 | * value indicates whether a reschedule was done in fact. |
1768 | * cond_resched_lock() will drop the spinlock before scheduling, | 1768 | * cond_resched_lock() will drop the spinlock before scheduling, |
1769 | */ | 1769 | */ |
1770 | #ifndef CONFIG_PREEMPT | 1770 | #ifndef CONFIG_PREEMPTION |
1771 | extern int _cond_resched(void); | 1771 | extern int _cond_resched(void); |
1772 | #else | 1772 | #else |
1773 | static inline int _cond_resched(void) { return 0; } | 1773 | static inline int _cond_resched(void) { return 0; } |
@@ -1796,12 +1796,12 @@ static inline void cond_resched_rcu(void) | |||
1796 | 1796 | ||
1797 | /* | 1797 | /* |
1798 | * Does a critical section need to be broken due to another | 1798 | * Does a critical section need to be broken due to another |
1799 | * task waiting?: (technically does not depend on CONFIG_PREEMPT, | 1799 | * task waiting?: (technically does not depend on CONFIG_PREEMPTION, |
1800 | * but a general need for low latency) | 1800 | * but a general need for low latency) |
1801 | */ | 1801 | */ |
1802 | static inline int spin_needbreak(spinlock_t *lock) | 1802 | static inline int spin_needbreak(spinlock_t *lock) |
1803 | { | 1803 | { |
1804 | #ifdef CONFIG_PREEMPT | 1804 | #ifdef CONFIG_PREEMPTION |
1805 | return spin_is_contended(lock); | 1805 | return spin_is_contended(lock); |
1806 | #else | 1806 | #else |
1807 | return 0; | 1807 | return 0; |