diff options
author | Oleg Nesterov <oleg@redhat.com> | 2013-06-20 11:50:15 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@kernel.org> | 2013-06-20 11:58:56 -0400 |
commit | 1c10adbb929936316f71df089ace699fce037e24 (patch) | |
tree | 31cb135dfa9f016724fe97e096153abe507ea073 /kernel/events | |
parent | 7ab71f3244e9f970c29566c5a67e13d1fa38c387 (diff) |
hw_breakpoint: Introduce cpumask_of_bp()
Add the trivial helper which simply returns cpumask_of() or
cpu_possible_mask depending on bp->cpu.
Change fetch_bp_busy_slots() and toggle_bp_slot() to always do
for_each_cpu(cpumask_of_bp) to simplify the code and avoid the
code duplication.
Reported-by: Vince Weaver <vincent.weaver@maine.edu>
Signed-off-by: Oleg Nesterov <oleg@redhat.com>
Acked-by: Frederic Weisbecker <fweisbec@gmail.com>
Link: http://lkml.kernel.org/r/20130620155015.GA6340@redhat.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel/events')
-rw-r--r-- | kernel/events/hw_breakpoint.c | 43 |
1 files changed, 17 insertions, 26 deletions
diff --git a/kernel/events/hw_breakpoint.c b/kernel/events/hw_breakpoint.c index 5cd4f6d9652c..9c71445328af 100644 --- a/kernel/events/hw_breakpoint.c +++ b/kernel/events/hw_breakpoint.c | |||
@@ -127,6 +127,13 @@ static int task_bp_pinned(int cpu, struct perf_event *bp, enum bp_type_idx type) | |||
127 | return count; | 127 | return count; |
128 | } | 128 | } |
129 | 129 | ||
130 | static const struct cpumask *cpumask_of_bp(struct perf_event *bp) | ||
131 | { | ||
132 | if (bp->cpu >= 0) | ||
133 | return cpumask_of(bp->cpu); | ||
134 | return cpu_possible_mask; | ||
135 | } | ||
136 | |||
130 | /* | 137 | /* |
131 | * Report the number of pinned/un-pinned breakpoints we have in | 138 | * Report the number of pinned/un-pinned breakpoints we have in |
132 | * a given cpu (cpu > -1) or in all of them (cpu = -1). | 139 | * a given cpu (cpu > -1) or in all of them (cpu = -1). |
@@ -135,25 +142,13 @@ static void | |||
135 | fetch_bp_busy_slots(struct bp_busy_slots *slots, struct perf_event *bp, | 142 | fetch_bp_busy_slots(struct bp_busy_slots *slots, struct perf_event *bp, |
136 | enum bp_type_idx type) | 143 | enum bp_type_idx type) |
137 | { | 144 | { |
138 | int cpu = bp->cpu; | 145 | const struct cpumask *cpumask = cpumask_of_bp(bp); |
139 | struct task_struct *tsk = bp->hw.bp_target; | 146 | int cpu; |
140 | |||
141 | if (cpu >= 0) { | ||
142 | slots->pinned = per_cpu(nr_cpu_bp_pinned[type], cpu); | ||
143 | if (!tsk) | ||
144 | slots->pinned += max_task_bp_pinned(cpu, type); | ||
145 | else | ||
146 | slots->pinned += task_bp_pinned(cpu, bp, type); | ||
147 | slots->flexible = per_cpu(nr_bp_flexible[type], cpu); | ||
148 | |||
149 | return; | ||
150 | } | ||
151 | 147 | ||
152 | for_each_possible_cpu(cpu) { | 148 | for_each_cpu(cpu, cpumask) { |
153 | unsigned int nr; | 149 | unsigned int nr = per_cpu(nr_cpu_bp_pinned[type], cpu); |
154 | 150 | ||
155 | nr = per_cpu(nr_cpu_bp_pinned[type], cpu); | 151 | if (!bp->hw.bp_target) |
156 | if (!tsk) | ||
157 | nr += max_task_bp_pinned(cpu, type); | 152 | nr += max_task_bp_pinned(cpu, type); |
158 | else | 153 | else |
159 | nr += task_bp_pinned(cpu, bp, type); | 154 | nr += task_bp_pinned(cpu, bp, type); |
@@ -205,25 +200,21 @@ static void | |||
205 | toggle_bp_slot(struct perf_event *bp, bool enable, enum bp_type_idx type, | 200 | toggle_bp_slot(struct perf_event *bp, bool enable, enum bp_type_idx type, |
206 | int weight) | 201 | int weight) |
207 | { | 202 | { |
208 | int cpu = bp->cpu; | 203 | const struct cpumask *cpumask = cpumask_of_bp(bp); |
209 | struct task_struct *tsk = bp->hw.bp_target; | 204 | int cpu; |
210 | 205 | ||
211 | if (!enable) | 206 | if (!enable) |
212 | weight = -weight; | 207 | weight = -weight; |
213 | 208 | ||
214 | /* Pinned counter cpu profiling */ | 209 | /* Pinned counter cpu profiling */ |
215 | if (!tsk) { | 210 | if (!bp->hw.bp_target) { |
216 | per_cpu(nr_cpu_bp_pinned[type], cpu) += weight; | 211 | per_cpu(nr_cpu_bp_pinned[type], bp->cpu) += weight; |
217 | return; | 212 | return; |
218 | } | 213 | } |
219 | 214 | ||
220 | /* Pinned counter task profiling */ | 215 | /* Pinned counter task profiling */ |
221 | if (cpu >= 0) { | 216 | for_each_cpu(cpu, cpumask) |
222 | toggle_bp_task_slot(bp, cpu, type, weight); | 217 | toggle_bp_task_slot(bp, cpu, type, weight); |
223 | } else { | ||
224 | for_each_possible_cpu(cpu) | ||
225 | toggle_bp_task_slot(bp, cpu, type, weight); | ||
226 | } | ||
227 | 218 | ||
228 | if (enable) | 219 | if (enable) |
229 | list_add_tail(&bp->hw.bp_list, &bp_task_head); | 220 | list_add_tail(&bp->hw.bp_list, &bp_task_head); |