summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--include/asm-generic/preempt.h4
-rw-r--r--include/linux/preempt.h6
-rw-r--r--include/linux/sched.h6
-rw-r--r--init/init_task.c2
-rw-r--r--init/main.c2
-rw-r--r--kernel/sched/core.c14
-rw-r--r--kernel/sched/fair.c2
-rw-r--r--kernel/sched/sched.h4
8 files changed, 20 insertions, 20 deletions
diff --git a/include/asm-generic/preempt.h b/include/asm-generic/preempt.h
index c3046c920063..d683f5e6d791 100644
--- a/include/asm-generic/preempt.h
+++ b/include/asm-generic/preempt.h
@@ -78,11 +78,11 @@ static __always_inline bool should_resched(int preempt_offset)
78 tif_need_resched()); 78 tif_need_resched());
79} 79}
80 80
81#ifdef CONFIG_PREEMPT 81#ifdef CONFIG_PREEMPTION
82extern asmlinkage void preempt_schedule(void); 82extern asmlinkage void preempt_schedule(void);
83#define __preempt_schedule() preempt_schedule() 83#define __preempt_schedule() preempt_schedule()
84extern asmlinkage void preempt_schedule_notrace(void); 84extern asmlinkage void preempt_schedule_notrace(void);
85#define __preempt_schedule_notrace() preempt_schedule_notrace() 85#define __preempt_schedule_notrace() preempt_schedule_notrace()
86#endif /* CONFIG_PREEMPT */ 86#endif /* CONFIG_PREEMPTION */
87 87
88#endif /* __ASM_PREEMPT_H */ 88#endif /* __ASM_PREEMPT_H */
diff --git a/include/linux/preempt.h b/include/linux/preempt.h
index dd92b1a93919..bbb68dba37cc 100644
--- a/include/linux/preempt.h
+++ b/include/linux/preempt.h
@@ -182,7 +182,7 @@ do { \
182 182
183#define preemptible() (preempt_count() == 0 && !irqs_disabled()) 183#define preemptible() (preempt_count() == 0 && !irqs_disabled())
184 184
185#ifdef CONFIG_PREEMPT 185#ifdef CONFIG_PREEMPTION
186#define preempt_enable() \ 186#define preempt_enable() \
187do { \ 187do { \
188 barrier(); \ 188 barrier(); \
@@ -203,7 +203,7 @@ do { \
203 __preempt_schedule(); \ 203 __preempt_schedule(); \
204} while (0) 204} while (0)
205 205
206#else /* !CONFIG_PREEMPT */ 206#else /* !CONFIG_PREEMPTION */
207#define preempt_enable() \ 207#define preempt_enable() \
208do { \ 208do { \
209 barrier(); \ 209 barrier(); \
@@ -217,7 +217,7 @@ do { \
217} while (0) 217} while (0)
218 218
219#define preempt_check_resched() do { } while (0) 219#define preempt_check_resched() do { } while (0)
220#endif /* CONFIG_PREEMPT */ 220#endif /* CONFIG_PREEMPTION */
221 221
222#define preempt_disable_notrace() \ 222#define preempt_disable_notrace() \
223do { \ 223do { \
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 9f51932bd543..6947516a2d3e 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -1767,7 +1767,7 @@ static inline int test_tsk_need_resched(struct task_struct *tsk)
1767 * value indicates whether a reschedule was done in fact. 1767 * value indicates whether a reschedule was done in fact.
1768 * cond_resched_lock() will drop the spinlock before scheduling, 1768 * cond_resched_lock() will drop the spinlock before scheduling,
1769 */ 1769 */
1770#ifndef CONFIG_PREEMPT 1770#ifndef CONFIG_PREEMPTION
1771extern int _cond_resched(void); 1771extern int _cond_resched(void);
1772#else 1772#else
1773static inline int _cond_resched(void) { return 0; } 1773static inline int _cond_resched(void) { return 0; }
@@ -1796,12 +1796,12 @@ static inline void cond_resched_rcu(void)
1796 1796
1797/* 1797/*
1798 * Does a critical section need to be broken due to another 1798 * Does a critical section need to be broken due to another
1799 * task waiting?: (technically does not depend on CONFIG_PREEMPT, 1799 * task waiting?: (technically does not depend on CONFIG_PREEMPTION,
1800 * but a general need for low latency) 1800 * but a general need for low latency)
1801 */ 1801 */
1802static inline int spin_needbreak(spinlock_t *lock) 1802static inline int spin_needbreak(spinlock_t *lock)
1803{ 1803{
1804#ifdef CONFIG_PREEMPT 1804#ifdef CONFIG_PREEMPTION
1805 return spin_is_contended(lock); 1805 return spin_is_contended(lock);
1806#else 1806#else
1807 return 0; 1807 return 0;
diff --git a/init/init_task.c b/init/init_task.c
index 7ab773b9b3cd..bfe06c53b14e 100644
--- a/init/init_task.c
+++ b/init/init_task.c
@@ -174,7 +174,7 @@ struct task_struct init_task
174#ifdef CONFIG_FUNCTION_GRAPH_TRACER 174#ifdef CONFIG_FUNCTION_GRAPH_TRACER
175 .ret_stack = NULL, 175 .ret_stack = NULL,
176#endif 176#endif
177#if defined(CONFIG_TRACING) && defined(CONFIG_PREEMPT) 177#if defined(CONFIG_TRACING) && defined(CONFIG_PREEMPTION)
178 .trace_recursion = 0, 178 .trace_recursion = 0,
179#endif 179#endif
180#ifdef CONFIG_LIVEPATCH 180#ifdef CONFIG_LIVEPATCH
diff --git a/init/main.c b/init/main.c
index 96f8d5af52d6..653693da8da6 100644
--- a/init/main.c
+++ b/init/main.c
@@ -433,7 +433,7 @@ noinline void __ref rest_init(void)
433 433
434 /* 434 /*
435 * Enable might_sleep() and smp_processor_id() checks. 435 * Enable might_sleep() and smp_processor_id() checks.
436 * They cannot be enabled earlier because with CONFIG_PREEMPT=y 436 * They cannot be enabled earlier because with CONFIG_PREEMPTION=y
437 * kernel_thread() would trigger might_sleep() splats. With 437 * kernel_thread() would trigger might_sleep() splats. With
438 * CONFIG_PREEMPT_VOLUNTARY=y the init task might have scheduled 438 * CONFIG_PREEMPT_VOLUNTARY=y the init task might have scheduled
439 * already, but it's stuck on the kthreadd_done completion. 439 * already, but it's stuck on the kthreadd_done completion.
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 2b037f195473..604a5e137efe 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -3581,7 +3581,7 @@ static inline void sched_tick_start(int cpu) { }
3581static inline void sched_tick_stop(int cpu) { } 3581static inline void sched_tick_stop(int cpu) { }
3582#endif 3582#endif
3583 3583
3584#if defined(CONFIG_PREEMPT) && (defined(CONFIG_DEBUG_PREEMPT) || \ 3584#if defined(CONFIG_PREEMPTION) && (defined(CONFIG_DEBUG_PREEMPT) || \
3585 defined(CONFIG_TRACE_PREEMPT_TOGGLE)) 3585 defined(CONFIG_TRACE_PREEMPT_TOGGLE))
3586/* 3586/*
3587 * If the value passed in is equal to the current preempt count 3587 * If the value passed in is equal to the current preempt count
@@ -3782,7 +3782,7 @@ again:
3782 * task, then the wakeup sets TIF_NEED_RESCHED and schedule() gets 3782 * task, then the wakeup sets TIF_NEED_RESCHED and schedule() gets
3783 * called on the nearest possible occasion: 3783 * called on the nearest possible occasion:
3784 * 3784 *
3785 * - If the kernel is preemptible (CONFIG_PREEMPT=y): 3785 * - If the kernel is preemptible (CONFIG_PREEMPTION=y):
3786 * 3786 *
3787 * - in syscall or exception context, at the next outmost 3787 * - in syscall or exception context, at the next outmost
3788 * preempt_enable(). (this might be as soon as the wake_up()'s 3788 * preempt_enable(). (this might be as soon as the wake_up()'s
@@ -3791,7 +3791,7 @@ again:
3791 * - in IRQ context, return from interrupt-handler to 3791 * - in IRQ context, return from interrupt-handler to
3792 * preemptible context 3792 * preemptible context
3793 * 3793 *
3794 * - If the kernel is not preemptible (CONFIG_PREEMPT is not set) 3794 * - If the kernel is not preemptible (CONFIG_PREEMPTION is not set)
3795 * then at the next: 3795 * then at the next:
3796 * 3796 *
3797 * - cond_resched() call 3797 * - cond_resched() call
@@ -4033,7 +4033,7 @@ static void __sched notrace preempt_schedule_common(void)
4033 } while (need_resched()); 4033 } while (need_resched());
4034} 4034}
4035 4035
4036#ifdef CONFIG_PREEMPT 4036#ifdef CONFIG_PREEMPTION
4037/* 4037/*
4038 * this is the entry point to schedule() from in-kernel preemption 4038 * this is the entry point to schedule() from in-kernel preemption
4039 * off of preempt_enable. Kernel preemptions off return from interrupt 4039 * off of preempt_enable. Kernel preemptions off return from interrupt
@@ -4105,7 +4105,7 @@ asmlinkage __visible void __sched notrace preempt_schedule_notrace(void)
4105} 4105}
4106EXPORT_SYMBOL_GPL(preempt_schedule_notrace); 4106EXPORT_SYMBOL_GPL(preempt_schedule_notrace);
4107 4107
4108#endif /* CONFIG_PREEMPT */ 4108#endif /* CONFIG_PREEMPTION */
4109 4109
4110/* 4110/*
4111 * this is the entry point to schedule() from kernel preemption 4111 * this is the entry point to schedule() from kernel preemption
@@ -5416,7 +5416,7 @@ SYSCALL_DEFINE0(sched_yield)
5416 return 0; 5416 return 0;
5417} 5417}
5418 5418
5419#ifndef CONFIG_PREEMPT 5419#ifndef CONFIG_PREEMPTION
5420int __sched _cond_resched(void) 5420int __sched _cond_resched(void)
5421{ 5421{
5422 if (should_resched(0)) { 5422 if (should_resched(0)) {
@@ -5433,7 +5433,7 @@ EXPORT_SYMBOL(_cond_resched);
5433 * __cond_resched_lock() - if a reschedule is pending, drop the given lock, 5433 * __cond_resched_lock() - if a reschedule is pending, drop the given lock,
5434 * call schedule, and on return reacquire the lock. 5434 * call schedule, and on return reacquire the lock.
5435 * 5435 *
5436 * This works OK both with and without CONFIG_PREEMPT. We do strange low-level 5436 * This works OK both with and without CONFIG_PREEMPTION. We do strange low-level
5437 * operations here to prevent schedule() from being called twice (once via 5437 * operations here to prevent schedule() from being called twice (once via
5438 * spin_unlock(), once by hand). 5438 * spin_unlock(), once by hand).
5439 */ 5439 */
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index bc9cfeaac8bd..aff9d76d8d65 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -7430,7 +7430,7 @@ static int detach_tasks(struct lb_env *env)
7430 detached++; 7430 detached++;
7431 env->imbalance -= load; 7431 env->imbalance -= load;
7432 7432
7433#ifdef CONFIG_PREEMPT 7433#ifdef CONFIG_PREEMPTION
7434 /* 7434 /*
7435 * NEWIDLE balancing is a source of latency, so preemptible 7435 * NEWIDLE balancing is a source of latency, so preemptible
7436 * kernels will stop after the first task is detached to minimize 7436 * kernels will stop after the first task is detached to minimize
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index 802b1f3405f2..f2ce6ba1c5d5 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -1943,7 +1943,7 @@ unsigned long arch_scale_freq_capacity(int cpu)
1943#endif 1943#endif
1944 1944
1945#ifdef CONFIG_SMP 1945#ifdef CONFIG_SMP
1946#ifdef CONFIG_PREEMPT 1946#ifdef CONFIG_PREEMPTION
1947 1947
1948static inline void double_rq_lock(struct rq *rq1, struct rq *rq2); 1948static inline void double_rq_lock(struct rq *rq1, struct rq *rq2);
1949 1949
@@ -1995,7 +1995,7 @@ static inline int _double_lock_balance(struct rq *this_rq, struct rq *busiest)
1995 return ret; 1995 return ret;
1996} 1996}
1997 1997
1998#endif /* CONFIG_PREEMPT */ 1998#endif /* CONFIG_PREEMPTION */
1999 1999
2000/* 2000/*
2001 * double_lock_balance - lock the busiest runqueue, this_rq is locked already. 2001 * double_lock_balance - lock the busiest runqueue, this_rq is locked already.