diff options
author | Oleg Nesterov <oleg@redhat.com> | 2013-06-20 11:50:11 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@kernel.org> | 2013-06-20 11:58:55 -0400 |
commit | e1ebe86203e6532eb5a0ae8f26ccae47aca548ae (patch) | |
tree | dc9464fa0779f4d45d79f360192a8cf533bb90aa /kernel/events/hw_breakpoint.c | |
parent | f070a4dba984975f6353c6f1d966da1a6ce4b86f (diff) |
hw_breakpoint: Simplify list/idx mess in toggle_bp_slot() paths
The enable/disable logic in toggle_bp_slot() is not symmetrical
and imho very confusing. "old_count" in toggle_bp_task_slot() is
actually new_count because this bp was already removed from the
list.
Change toggle_bp_slot() to always call list_add/list_del after
toggle_bp_task_slot(). This way old_idx is task_bp_pinned() and
this entry should be decremented, new_idx is +/-weight and we
need to increment this element. The code/logic looks obvious.
Reported-by: Vince Weaver <vincent.weaver@maine.edu>
Signed-off-by: Oleg Nesterov <oleg@redhat.com>
Acked-by: Frederic Weisbecker <fweisbec@gmail.com>
Link: http://lkml.kernel.org/r/20130620155011.GA6330@redhat.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel/events/hw_breakpoint.c')
-rw-r--r-- | kernel/events/hw_breakpoint.c | 40 |
1 files changed, 16 insertions, 24 deletions
diff --git a/kernel/events/hw_breakpoint.c b/kernel/events/hw_breakpoint.c index ef8ebe560949..dee0148dcf54 100644 --- a/kernel/events/hw_breakpoint.c +++ b/kernel/events/hw_breakpoint.c | |||
@@ -185,26 +185,20 @@ fetch_this_slot(struct bp_busy_slots *slots, int weight) | |||
185 | static void toggle_bp_task_slot(struct perf_event *bp, int cpu, bool enable, | 185 | static void toggle_bp_task_slot(struct perf_event *bp, int cpu, bool enable, |
186 | enum bp_type_idx type, int weight) | 186 | enum bp_type_idx type, int weight) |
187 | { | 187 | { |
188 | unsigned int *tsk_pinned; | 188 | /* tsk_pinned[n-1] is the number of tasks having n>0 breakpoints */ |
189 | int old_count = 0; | 189 | unsigned int *tsk_pinned = per_cpu(nr_task_bp_pinned[type], cpu); |
190 | int old_idx = 0; | 190 | int old_idx, new_idx; |
191 | int idx = 0; | 191 | |
192 | 192 | old_idx = task_bp_pinned(cpu, bp, type) - 1; | |
193 | old_count = task_bp_pinned(cpu, bp, type); | 193 | if (enable) |
194 | old_idx = old_count - 1; | 194 | new_idx = old_idx + weight; |
195 | idx = old_idx + weight; | 195 | else |
196 | 196 | new_idx = old_idx - weight; | |
197 | /* tsk_pinned[n] is the number of tasks having n breakpoints */ | 197 | |
198 | tsk_pinned = per_cpu(nr_task_bp_pinned[type], cpu); | 198 | if (old_idx >= 0) |
199 | if (enable) { | 199 | tsk_pinned[old_idx]--; |
200 | tsk_pinned[idx]++; | 200 | if (new_idx >= 0) |
201 | if (old_count > 0) | 201 | tsk_pinned[new_idx]++; |
202 | tsk_pinned[old_idx]--; | ||
203 | } else { | ||
204 | tsk_pinned[idx]--; | ||
205 | if (old_count > 0) | ||
206 | tsk_pinned[old_idx]++; | ||
207 | } | ||
208 | } | 202 | } |
209 | 203 | ||
210 | /* | 204 | /* |
@@ -228,10 +222,6 @@ toggle_bp_slot(struct perf_event *bp, bool enable, enum bp_type_idx type, | |||
228 | } | 222 | } |
229 | 223 | ||
230 | /* Pinned counter task profiling */ | 224 | /* Pinned counter task profiling */ |
231 | |||
232 | if (!enable) | ||
233 | list_del(&bp->hw.bp_list); | ||
234 | |||
235 | if (cpu >= 0) { | 225 | if (cpu >= 0) { |
236 | toggle_bp_task_slot(bp, cpu, enable, type, weight); | 226 | toggle_bp_task_slot(bp, cpu, enable, type, weight); |
237 | } else { | 227 | } else { |
@@ -241,6 +231,8 @@ toggle_bp_slot(struct perf_event *bp, bool enable, enum bp_type_idx type, | |||
241 | 231 | ||
242 | if (enable) | 232 | if (enable) |
243 | list_add_tail(&bp->hw.bp_list, &bp_task_head); | 233 | list_add_tail(&bp->hw.bp_list, &bp_task_head); |
234 | else | ||
235 | list_del(&bp->hw.bp_list); | ||
244 | } | 236 | } |
245 | 237 | ||
246 | /* | 238 | /* |