diff options
| author | Peter Zijlstra <peterz@infradead.org> | 2013-08-14 08:55:24 -0400 |
|---|---|---|
| committer | Ingo Molnar <mingo@kernel.org> | 2013-09-25 08:07:32 -0400 |
| commit | 4a2b4b222743bb07fedf985b884550f2ca067ea9 (patch) | |
| tree | 587e80512c6cdf727b27d0f806758833547a65ed /kernel | |
| parent | ea8117478918a4734586d35ff530721b682425be (diff) | |
sched: Introduce preempt_count accessor functions
Replace the single preempt_count() 'function' that's an lvalue with
two proper functions:
preempt_count() - returns the preempt_count value as rvalue
preempt_count_set() - Allows setting the preempt-count value
Also provide preempt_count_ptr() as a convenience wrapper to implement
all modifying operations.
Signed-off-by: Peter Zijlstra <peterz@infradead.org>
Link: http://lkml.kernel.org/n/tip-orxrbycjozopqfhb4dxdkdvb@git.kernel.org
[ Fixed build failure. ]
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel')
| -rw-r--r-- | kernel/sched/core.c | 4 | ||||
| -rw-r--r-- | kernel/softirq.c | 4 | ||||
| -rw-r--r-- | kernel/timer.c | 8 |
3 files changed, 8 insertions, 8 deletions
diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 242da0c03aba..fe89afac4d09 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c | |||
| @@ -2219,7 +2219,7 @@ void __kprobes add_preempt_count(int val) | |||
| 2219 | if (DEBUG_LOCKS_WARN_ON((preempt_count() < 0))) | 2219 | if (DEBUG_LOCKS_WARN_ON((preempt_count() < 0))) |
| 2220 | return; | 2220 | return; |
| 2221 | #endif | 2221 | #endif |
| 2222 | preempt_count() += val; | 2222 | add_preempt_count_notrace(val); |
| 2223 | #ifdef CONFIG_DEBUG_PREEMPT | 2223 | #ifdef CONFIG_DEBUG_PREEMPT |
| 2224 | /* | 2224 | /* |
| 2225 | * Spinlock count overflowing soon? | 2225 | * Spinlock count overflowing soon? |
| @@ -2250,7 +2250,7 @@ void __kprobes sub_preempt_count(int val) | |||
| 2250 | 2250 | ||
| 2251 | if (preempt_count() == val) | 2251 | if (preempt_count() == val) |
| 2252 | trace_preempt_on(CALLER_ADDR0, get_parent_ip(CALLER_ADDR1)); | 2252 | trace_preempt_on(CALLER_ADDR0, get_parent_ip(CALLER_ADDR1)); |
| 2253 | preempt_count() -= val; | 2253 | sub_preempt_count_notrace(val); |
| 2254 | } | 2254 | } |
| 2255 | EXPORT_SYMBOL(sub_preempt_count); | 2255 | EXPORT_SYMBOL(sub_preempt_count); |
| 2256 | 2256 | ||
diff --git a/kernel/softirq.c b/kernel/softirq.c index 53cc09ceb0b8..a90de70cf1f3 100644 --- a/kernel/softirq.c +++ b/kernel/softirq.c | |||
| @@ -106,7 +106,7 @@ static void __local_bh_disable(unsigned long ip, unsigned int cnt) | |||
| 106 | * We must manually increment preempt_count here and manually | 106 | * We must manually increment preempt_count here and manually |
| 107 | * call the trace_preempt_off later. | 107 | * call the trace_preempt_off later. |
| 108 | */ | 108 | */ |
| 109 | preempt_count() += cnt; | 109 | add_preempt_count_notrace(cnt); |
| 110 | /* | 110 | /* |
| 111 | * Were softirqs turned off above: | 111 | * Were softirqs turned off above: |
| 112 | */ | 112 | */ |
| @@ -256,7 +256,7 @@ restart: | |||
| 256 | " exited with %08x?\n", vec_nr, | 256 | " exited with %08x?\n", vec_nr, |
| 257 | softirq_to_name[vec_nr], h->action, | 257 | softirq_to_name[vec_nr], h->action, |
| 258 | prev_count, preempt_count()); | 258 | prev_count, preempt_count()); |
| 259 | preempt_count() = prev_count; | 259 | preempt_count_set(prev_count); |
| 260 | } | 260 | } |
| 261 | 261 | ||
| 262 | rcu_bh_qs(cpu); | 262 | rcu_bh_qs(cpu); |
diff --git a/kernel/timer.c b/kernel/timer.c index 4296d13db3d1..6582b82fa966 100644 --- a/kernel/timer.c +++ b/kernel/timer.c | |||
| @@ -1092,7 +1092,7 @@ static int cascade(struct tvec_base *base, struct tvec *tv, int index) | |||
| 1092 | static void call_timer_fn(struct timer_list *timer, void (*fn)(unsigned long), | 1092 | static void call_timer_fn(struct timer_list *timer, void (*fn)(unsigned long), |
| 1093 | unsigned long data) | 1093 | unsigned long data) |
| 1094 | { | 1094 | { |
| 1095 | int preempt_count = preempt_count(); | 1095 | int count = preempt_count(); |
| 1096 | 1096 | ||
| 1097 | #ifdef CONFIG_LOCKDEP | 1097 | #ifdef CONFIG_LOCKDEP |
| 1098 | /* | 1098 | /* |
| @@ -1119,16 +1119,16 @@ static void call_timer_fn(struct timer_list *timer, void (*fn)(unsigned long), | |||
| 1119 | 1119 | ||
| 1120 | lock_map_release(&lockdep_map); | 1120 | lock_map_release(&lockdep_map); |
| 1121 | 1121 | ||
| 1122 | if (preempt_count != preempt_count()) { | 1122 | if (count != preempt_count()) { |
| 1123 | WARN_ONCE(1, "timer: %pF preempt leak: %08x -> %08x\n", | 1123 | WARN_ONCE(1, "timer: %pF preempt leak: %08x -> %08x\n", |
| 1124 | fn, preempt_count, preempt_count()); | 1124 | fn, count, preempt_count()); |
| 1125 | /* | 1125 | /* |
| 1126 | * Restore the preempt count. That gives us a decent | 1126 | * Restore the preempt count. That gives us a decent |
| 1127 | * chance to survive and extract information. If the | 1127 | * chance to survive and extract information. If the |
| 1128 | * callback kept a lock held, bad luck, but not worse | 1128 | * callback kept a lock held, bad luck, but not worse |
| 1129 | * than the BUG() we had. | 1129 | * than the BUG() we had. |
| 1130 | */ | 1130 | */ |
| 1131 | preempt_count() = preempt_count; | 1131 | preempt_count_set(count); |
| 1132 | } | 1132 | } |
| 1133 | } | 1133 | } |
| 1134 | 1134 | ||
