aboutsummaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2008-01-25 15:08:33 -0500
committerIngo Molnar <mingo@elte.hu>2008-01-25 15:08:33 -0500
commit6478d8800b75253b2a934ddcb734e13ade023ad0 (patch)
treedf4017269b8755735578445c0a8a9e8b3b2615e9 /include
parent58b8a73ab8becfcaea84abc2a06038281efa4c8a (diff)
sched: remove the !PREEMPT_BKL code
remove the !PREEMPT_BKL code. this removes 160 lines of legacy code. Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'include')
-rw-r--r--include/linux/hardirq.h6
-rw-r--r--include/linux/smp_lock.h14
2 files changed, 2 insertions, 18 deletions
diff --git a/include/linux/hardirq.h b/include/linux/hardirq.h
index 8d302298a161..2961ec788046 100644
--- a/include/linux/hardirq.h
+++ b/include/linux/hardirq.h
@@ -72,11 +72,7 @@
72#define in_softirq() (softirq_count()) 72#define in_softirq() (softirq_count())
73#define in_interrupt() (irq_count()) 73#define in_interrupt() (irq_count())
74 74
75#if defined(CONFIG_PREEMPT) && !defined(CONFIG_PREEMPT_BKL) 75#define in_atomic() ((preempt_count() & ~PREEMPT_ACTIVE) != 0)
76# define in_atomic() ((preempt_count() & ~PREEMPT_ACTIVE) != kernel_locked())
77#else
78# define in_atomic() ((preempt_count() & ~PREEMPT_ACTIVE) != 0)
79#endif
80 76
81#ifdef CONFIG_PREEMPT 77#ifdef CONFIG_PREEMPT
82# define PREEMPT_CHECK_OFFSET 1 78# define PREEMPT_CHECK_OFFSET 1
diff --git a/include/linux/smp_lock.h b/include/linux/smp_lock.h
index 58962c51dee1..aab3a4cff4e1 100644
--- a/include/linux/smp_lock.h
+++ b/include/linux/smp_lock.h
@@ -17,22 +17,10 @@ extern void __lockfunc __release_kernel_lock(void);
17 __release_kernel_lock(); \ 17 __release_kernel_lock(); \
18} while (0) 18} while (0)
19 19
20/*
21 * Non-SMP kernels will never block on the kernel lock,
22 * so we are better off returning a constant zero from
23 * reacquire_kernel_lock() so that the compiler can see
24 * it at compile-time.
25 */
26#if defined(CONFIG_SMP) && !defined(CONFIG_PREEMPT_BKL)
27# define return_value_on_smp return
28#else
29# define return_value_on_smp
30#endif
31
32static inline int reacquire_kernel_lock(struct task_struct *task) 20static inline int reacquire_kernel_lock(struct task_struct *task)
33{ 21{
34 if (unlikely(task->lock_depth >= 0)) 22 if (unlikely(task->lock_depth >= 0))
35 return_value_on_smp __reacquire_kernel_lock(); 23 return __reacquire_kernel_lock();
36 return 0; 24 return 0;
37} 25}
38 26