summaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
Diffstat (limited to 'include')
-rw-r--r--include/asm-generic/preempt.h4
-rw-r--r--include/linux/preempt.h6
-rw-r--r--include/linux/sched.h6
3 files changed, 8 insertions, 8 deletions
diff --git a/include/asm-generic/preempt.h b/include/asm-generic/preempt.h
index c3046c920063..d683f5e6d791 100644
--- a/include/asm-generic/preempt.h
+++ b/include/asm-generic/preempt.h
@@ -78,11 +78,11 @@ static __always_inline bool should_resched(int preempt_offset)
78 tif_need_resched()); 78 tif_need_resched());
79} 79}
80 80
81#ifdef CONFIG_PREEMPT 81#ifdef CONFIG_PREEMPTION
82extern asmlinkage void preempt_schedule(void); 82extern asmlinkage void preempt_schedule(void);
83#define __preempt_schedule() preempt_schedule() 83#define __preempt_schedule() preempt_schedule()
84extern asmlinkage void preempt_schedule_notrace(void); 84extern asmlinkage void preempt_schedule_notrace(void);
85#define __preempt_schedule_notrace() preempt_schedule_notrace() 85#define __preempt_schedule_notrace() preempt_schedule_notrace()
86#endif /* CONFIG_PREEMPT */ 86#endif /* CONFIG_PREEMPTION */
87 87
88#endif /* __ASM_PREEMPT_H */ 88#endif /* __ASM_PREEMPT_H */
diff --git a/include/linux/preempt.h b/include/linux/preempt.h
index dd92b1a93919..bbb68dba37cc 100644
--- a/include/linux/preempt.h
+++ b/include/linux/preempt.h
@@ -182,7 +182,7 @@ do { \
182 182
183#define preemptible() (preempt_count() == 0 && !irqs_disabled()) 183#define preemptible() (preempt_count() == 0 && !irqs_disabled())
184 184
185#ifdef CONFIG_PREEMPT 185#ifdef CONFIG_PREEMPTION
186#define preempt_enable() \ 186#define preempt_enable() \
187do { \ 187do { \
188 barrier(); \ 188 barrier(); \
@@ -203,7 +203,7 @@ do { \
203 __preempt_schedule(); \ 203 __preempt_schedule(); \
204} while (0) 204} while (0)
205 205
206#else /* !CONFIG_PREEMPT */ 206#else /* !CONFIG_PREEMPTION */
207#define preempt_enable() \ 207#define preempt_enable() \
208do { \ 208do { \
209 barrier(); \ 209 barrier(); \
@@ -217,7 +217,7 @@ do { \
217} while (0) 217} while (0)
218 218
219#define preempt_check_resched() do { } while (0) 219#define preempt_check_resched() do { } while (0)
220#endif /* CONFIG_PREEMPT */ 220#endif /* CONFIG_PREEMPTION */
221 221
222#define preempt_disable_notrace() \ 222#define preempt_disable_notrace() \
223do { \ 223do { \
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 9f51932bd543..6947516a2d3e 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -1767,7 +1767,7 @@ static inline int test_tsk_need_resched(struct task_struct *tsk)
1767 * value indicates whether a reschedule was done in fact. 1767 * value indicates whether a reschedule was done in fact.
1768 * cond_resched_lock() will drop the spinlock before scheduling, 1768 * cond_resched_lock() will drop the spinlock before scheduling,
1769 */ 1769 */
1770#ifndef CONFIG_PREEMPT 1770#ifndef CONFIG_PREEMPTION
1771extern int _cond_resched(void); 1771extern int _cond_resched(void);
1772#else 1772#else
1773static inline int _cond_resched(void) { return 0; } 1773static inline int _cond_resched(void) { return 0; }
@@ -1796,12 +1796,12 @@ static inline void cond_resched_rcu(void)
1796 1796
1797/* 1797/*
1798 * Does a critical section need to be broken due to another 1798 * Does a critical section need to be broken due to another
1799 * task waiting?: (technically does not depend on CONFIG_PREEMPT, 1799 * task waiting?: (technically does not depend on CONFIG_PREEMPTION,
1800 * but a general need for low latency) 1800 * but a general need for low latency)
1801 */ 1801 */
1802static inline int spin_needbreak(spinlock_t *lock) 1802static inline int spin_needbreak(spinlock_t *lock)
1803{ 1803{
1804#ifdef CONFIG_PREEMPT 1804#ifdef CONFIG_PREEMPTION
1805 return spin_is_contended(lock); 1805 return spin_is_contended(lock);
1806#else 1806#else
1807 return 0; 1807 return 0;