diff options
author | Peter Zijlstra <peterz@infradead.org> | 2013-08-14 08:55:24 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@kernel.org> | 2013-09-25 08:07:32 -0400 |
commit | 4a2b4b222743bb07fedf985b884550f2ca067ea9 (patch) | |
tree | 587e80512c6cdf727b27d0f806758833547a65ed | |
parent | ea8117478918a4734586d35ff530721b682425be (diff) |
sched: Introduce preempt_count accessor functions
Replace the single preempt_count() 'function' that's an lvalue with
two proper functions:
preempt_count() - returns the preempt_count value as rvalue
preempt_count_set() - Allows setting the preempt-count value
Also provide preempt_count_ptr() as a convenience wrapper to implement
all modifying operations.
Signed-off-by: Peter Zijlstra <peterz@infradead.org>
Link: http://lkml.kernel.org/n/tip-orxrbycjozopqfhb4dxdkdvb@git.kernel.org
[ Fixed build failure. ]
Signed-off-by: Ingo Molnar <mingo@kernel.org>
-rw-r--r-- | include/linux/preempt.h | 25 | ||||
-rw-r--r-- | init/main.c | 2 | ||||
-rw-r--r-- | kernel/sched/core.c | 4 | ||||
-rw-r--r-- | kernel/softirq.c | 4 | ||||
-rw-r--r-- | kernel/timer.c | 8 | ||||
-rw-r--r-- | lib/locking-selftest.c | 2 | ||||
-rw-r--r-- | lib/smp_processor_id.c | 3 |
7 files changed, 30 insertions, 18 deletions
diff --git a/include/linux/preempt.h b/include/linux/preempt.h index f5d4723cdb3d..eaac52a8fe6a 100644 --- a/include/linux/preempt.h +++ b/include/linux/preempt.h | |||
@@ -10,19 +10,32 @@ | |||
10 | #include <linux/linkage.h> | 10 | #include <linux/linkage.h> |
11 | #include <linux/list.h> | 11 | #include <linux/list.h> |
12 | 12 | ||
13 | static __always_inline int preempt_count(void) | ||
14 | { | ||
15 | return current_thread_info()->preempt_count; | ||
16 | } | ||
17 | |||
18 | static __always_inline int *preempt_count_ptr(void) | ||
19 | { | ||
20 | return ¤t_thread_info()->preempt_count; | ||
21 | } | ||
22 | |||
23 | static __always_inline void preempt_count_set(int pc) | ||
24 | { | ||
25 | *preempt_count_ptr() = pc; | ||
26 | } | ||
27 | |||
13 | #if defined(CONFIG_DEBUG_PREEMPT) || defined(CONFIG_PREEMPT_TRACER) | 28 | #if defined(CONFIG_DEBUG_PREEMPT) || defined(CONFIG_PREEMPT_TRACER) |
14 | extern void add_preempt_count(int val); | 29 | extern void add_preempt_count(int val); |
15 | extern void sub_preempt_count(int val); | 30 | extern void sub_preempt_count(int val); |
16 | #else | 31 | #else |
17 | # define add_preempt_count(val) do { preempt_count() += (val); } while (0) | 32 | # define add_preempt_count(val) do { *preempt_count_ptr() += (val); } while (0) |
18 | # define sub_preempt_count(val) do { preempt_count() -= (val); } while (0) | 33 | # define sub_preempt_count(val) do { *preempt_count_ptr() -= (val); } while (0) |
19 | #endif | 34 | #endif |
20 | 35 | ||
21 | #define inc_preempt_count() add_preempt_count(1) | 36 | #define inc_preempt_count() add_preempt_count(1) |
22 | #define dec_preempt_count() sub_preempt_count(1) | 37 | #define dec_preempt_count() sub_preempt_count(1) |
23 | 38 | ||
24 | #define preempt_count() (current_thread_info()->preempt_count) | ||
25 | |||
26 | #ifdef CONFIG_PREEMPT | 39 | #ifdef CONFIG_PREEMPT |
27 | 40 | ||
28 | asmlinkage void preempt_schedule(void); | 41 | asmlinkage void preempt_schedule(void); |
@@ -81,9 +94,9 @@ do { \ | |||
81 | 94 | ||
82 | /* For debugging and tracer internals only! */ | 95 | /* For debugging and tracer internals only! */ |
83 | #define add_preempt_count_notrace(val) \ | 96 | #define add_preempt_count_notrace(val) \ |
84 | do { preempt_count() += (val); } while (0) | 97 | do { *preempt_count_ptr() += (val); } while (0) |
85 | #define sub_preempt_count_notrace(val) \ | 98 | #define sub_preempt_count_notrace(val) \ |
86 | do { preempt_count() -= (val); } while (0) | 99 | do { *preempt_count_ptr() -= (val); } while (0) |
87 | #define inc_preempt_count_notrace() add_preempt_count_notrace(1) | 100 | #define inc_preempt_count_notrace() add_preempt_count_notrace(1) |
88 | #define dec_preempt_count_notrace() sub_preempt_count_notrace(1) | 101 | #define dec_preempt_count_notrace() sub_preempt_count_notrace(1) |
89 | 102 | ||
diff --git a/init/main.c b/init/main.c index af310afbef28..7cc4b7889a88 100644 --- a/init/main.c +++ b/init/main.c | |||
@@ -692,7 +692,7 @@ int __init_or_module do_one_initcall(initcall_t fn) | |||
692 | 692 | ||
693 | if (preempt_count() != count) { | 693 | if (preempt_count() != count) { |
694 | sprintf(msgbuf, "preemption imbalance "); | 694 | sprintf(msgbuf, "preemption imbalance "); |
695 | preempt_count() = count; | 695 | preempt_count_set(count); |
696 | } | 696 | } |
697 | if (irqs_disabled()) { | 697 | if (irqs_disabled()) { |
698 | strlcat(msgbuf, "disabled interrupts ", sizeof(msgbuf)); | 698 | strlcat(msgbuf, "disabled interrupts ", sizeof(msgbuf)); |
diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 242da0c03aba..fe89afac4d09 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c | |||
@@ -2219,7 +2219,7 @@ void __kprobes add_preempt_count(int val) | |||
2219 | if (DEBUG_LOCKS_WARN_ON((preempt_count() < 0))) | 2219 | if (DEBUG_LOCKS_WARN_ON((preempt_count() < 0))) |
2220 | return; | 2220 | return; |
2221 | #endif | 2221 | #endif |
2222 | preempt_count() += val; | 2222 | add_preempt_count_notrace(val); |
2223 | #ifdef CONFIG_DEBUG_PREEMPT | 2223 | #ifdef CONFIG_DEBUG_PREEMPT |
2224 | /* | 2224 | /* |
2225 | * Spinlock count overflowing soon? | 2225 | * Spinlock count overflowing soon? |
@@ -2250,7 +2250,7 @@ void __kprobes sub_preempt_count(int val) | |||
2250 | 2250 | ||
2251 | if (preempt_count() == val) | 2251 | if (preempt_count() == val) |
2252 | trace_preempt_on(CALLER_ADDR0, get_parent_ip(CALLER_ADDR1)); | 2252 | trace_preempt_on(CALLER_ADDR0, get_parent_ip(CALLER_ADDR1)); |
2253 | preempt_count() -= val; | 2253 | sub_preempt_count_notrace(val); |
2254 | } | 2254 | } |
2255 | EXPORT_SYMBOL(sub_preempt_count); | 2255 | EXPORT_SYMBOL(sub_preempt_count); |
2256 | 2256 | ||
diff --git a/kernel/softirq.c b/kernel/softirq.c index 53cc09ceb0b8..a90de70cf1f3 100644 --- a/kernel/softirq.c +++ b/kernel/softirq.c | |||
@@ -106,7 +106,7 @@ static void __local_bh_disable(unsigned long ip, unsigned int cnt) | |||
106 | * We must manually increment preempt_count here and manually | 106 | * We must manually increment preempt_count here and manually |
107 | * call the trace_preempt_off later. | 107 | * call the trace_preempt_off later. |
108 | */ | 108 | */ |
109 | preempt_count() += cnt; | 109 | add_preempt_count_notrace(cnt); |
110 | /* | 110 | /* |
111 | * Were softirqs turned off above: | 111 | * Were softirqs turned off above: |
112 | */ | 112 | */ |
@@ -256,7 +256,7 @@ restart: | |||
256 | " exited with %08x?\n", vec_nr, | 256 | " exited with %08x?\n", vec_nr, |
257 | softirq_to_name[vec_nr], h->action, | 257 | softirq_to_name[vec_nr], h->action, |
258 | prev_count, preempt_count()); | 258 | prev_count, preempt_count()); |
259 | preempt_count() = prev_count; | 259 | preempt_count_set(prev_count); |
260 | } | 260 | } |
261 | 261 | ||
262 | rcu_bh_qs(cpu); | 262 | rcu_bh_qs(cpu); |
diff --git a/kernel/timer.c b/kernel/timer.c index 4296d13db3d1..6582b82fa966 100644 --- a/kernel/timer.c +++ b/kernel/timer.c | |||
@@ -1092,7 +1092,7 @@ static int cascade(struct tvec_base *base, struct tvec *tv, int index) | |||
1092 | static void call_timer_fn(struct timer_list *timer, void (*fn)(unsigned long), | 1092 | static void call_timer_fn(struct timer_list *timer, void (*fn)(unsigned long), |
1093 | unsigned long data) | 1093 | unsigned long data) |
1094 | { | 1094 | { |
1095 | int preempt_count = preempt_count(); | 1095 | int count = preempt_count(); |
1096 | 1096 | ||
1097 | #ifdef CONFIG_LOCKDEP | 1097 | #ifdef CONFIG_LOCKDEP |
1098 | /* | 1098 | /* |
@@ -1119,16 +1119,16 @@ static void call_timer_fn(struct timer_list *timer, void (*fn)(unsigned long), | |||
1119 | 1119 | ||
1120 | lock_map_release(&lockdep_map); | 1120 | lock_map_release(&lockdep_map); |
1121 | 1121 | ||
1122 | if (preempt_count != preempt_count()) { | 1122 | if (count != preempt_count()) { |
1123 | WARN_ONCE(1, "timer: %pF preempt leak: %08x -> %08x\n", | 1123 | WARN_ONCE(1, "timer: %pF preempt leak: %08x -> %08x\n", |
1124 | fn, preempt_count, preempt_count()); | 1124 | fn, count, preempt_count()); |
1125 | /* | 1125 | /* |
1126 | * Restore the preempt count. That gives us a decent | 1126 | * Restore the preempt count. That gives us a decent |
1127 | * chance to survive and extract information. If the | 1127 | * chance to survive and extract information. If the |
1128 | * callback kept a lock held, bad luck, but not worse | 1128 | * callback kept a lock held, bad luck, but not worse |
1129 | * than the BUG() we had. | 1129 | * than the BUG() we had. |
1130 | */ | 1130 | */ |
1131 | preempt_count() = preempt_count; | 1131 | preempt_count_set(count); |
1132 | } | 1132 | } |
1133 | } | 1133 | } |
1134 | 1134 | ||
diff --git a/lib/locking-selftest.c b/lib/locking-selftest.c index 6dc09d8f4c24..872a15a2a637 100644 --- a/lib/locking-selftest.c +++ b/lib/locking-selftest.c | |||
@@ -1002,7 +1002,7 @@ static void dotest(void (*testcase_fn)(void), int expected, int lockclass_mask) | |||
1002 | * Some tests (e.g. double-unlock) might corrupt the preemption | 1002 | * Some tests (e.g. double-unlock) might corrupt the preemption |
1003 | * count, so restore it: | 1003 | * count, so restore it: |
1004 | */ | 1004 | */ |
1005 | preempt_count() = saved_preempt_count; | 1005 | preempt_count_set(saved_preempt_count); |
1006 | #ifdef CONFIG_TRACE_IRQFLAGS | 1006 | #ifdef CONFIG_TRACE_IRQFLAGS |
1007 | if (softirq_count()) | 1007 | if (softirq_count()) |
1008 | current->softirqs_enabled = 0; | 1008 | current->softirqs_enabled = 0; |
diff --git a/lib/smp_processor_id.c b/lib/smp_processor_id.c index 4c0d0e51d49e..04abe53f12a1 100644 --- a/lib/smp_processor_id.c +++ b/lib/smp_processor_id.c | |||
@@ -9,10 +9,9 @@ | |||
9 | 9 | ||
10 | notrace unsigned int debug_smp_processor_id(void) | 10 | notrace unsigned int debug_smp_processor_id(void) |
11 | { | 11 | { |
12 | unsigned long preempt_count = preempt_count(); | ||
13 | int this_cpu = raw_smp_processor_id(); | 12 | int this_cpu = raw_smp_processor_id(); |
14 | 13 | ||
15 | if (likely(preempt_count)) | 14 | if (likely(preempt_count())) |
16 | goto out; | 15 | goto out; |
17 | 16 | ||
18 | if (irqs_disabled()) | 17 | if (irqs_disabled()) |