aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorRobert Richter <rric@kernel.org>2013-02-06 12:26:24 -0500
committerIngo Molnar <mingo@kernel.org>2013-02-06 13:45:22 -0500
commit2c53c3dd0b6497484b29fd49d34ef98acbc14577 (patch)
tree6aae3a8a73292f0a024cbd78de68ad7b0498d3b5 /arch
parentf2b4367a69c60c644a1df36f63a65e0e677d3b0f (diff)
perf/x86/amd: Rework northbridge event constraints handler
Code simplification. No functional changes. Signed-off-by: Robert Richter <rric@kernel.org> Signed-off-by: Jacob Shin <jacob.shin@amd.com> Acked-by: Stephane Eranian <eranian@google.com> Cc: Paul Mackerras <paulus@samba.org> Cc: Arnaldo Carvalho de Melo <acme@ghostprotocols.net> Cc: Jiri Olsa <jolsa@redhat.com> Cc: Robert Richter <rric@kernel.org> Cc: Peter Zijlstra <a.p.zijlstra@chello.nl> Link: http://lkml.kernel.org/r/1360171589-6381-2-git-send-email-jacob.shin@amd.com Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'arch')
-rw-r--r--arch/x86/kernel/cpu/perf_event_amd.c68
1 files changed, 26 insertions, 42 deletions
diff --git a/arch/x86/kernel/cpu/perf_event_amd.c b/arch/x86/kernel/cpu/perf_event_amd.c
index c93bc4e813a0..e7963c7af683 100644
--- a/arch/x86/kernel/cpu/perf_event_amd.c
+++ b/arch/x86/kernel/cpu/perf_event_amd.c
@@ -256,9 +256,8 @@ amd_get_event_constraints(struct cpu_hw_events *cpuc, struct perf_event *event)
256{ 256{
257 struct hw_perf_event *hwc = &event->hw; 257 struct hw_perf_event *hwc = &event->hw;
258 struct amd_nb *nb = cpuc->amd_nb; 258 struct amd_nb *nb = cpuc->amd_nb;
259 struct perf_event *old = NULL; 259 struct perf_event *old;
260 int max = x86_pmu.num_counters; 260 int idx, new = -1;
261 int i, j, k = -1;
262 261
263 /* 262 /*
264 * if not NB event or no NB, then no constraints 263 * if not NB event or no NB, then no constraints
@@ -276,48 +275,33 @@ amd_get_event_constraints(struct cpu_hw_events *cpuc, struct perf_event *event)
276 * because of successive calls to x86_schedule_events() from 275 * because of successive calls to x86_schedule_events() from
277 * hw_perf_group_sched_in() without hw_perf_enable() 276 * hw_perf_group_sched_in() without hw_perf_enable()
278 */ 277 */
279 for (i = 0; i < max; i++) { 278 for (idx = 0; idx < x86_pmu.num_counters; idx++) {
280 /* 279 if (new == -1 || hwc->idx == idx)
281 * keep track of first free slot 280 /* assign free slot, prefer hwc->idx */
282 */ 281 old = cmpxchg(nb->owners + idx, NULL, event);
283 if (k == -1 && !nb->owners[i]) 282 else if (nb->owners[idx] == event)
284 k = i; 283 /* event already present */
284 old = event;
285 else
286 continue;
287
288 if (old && old != event)
289 continue;
290
291 /* reassign to this slot */
292 if (new != -1)
293 cmpxchg(nb->owners + new, event, NULL);
294 new = idx;
285 295
286 /* already present, reuse */ 296 /* already present, reuse */
287 if (nb->owners[i] == event) 297 if (old == event)
288 goto done;
289 }
290 /*
291 * not present, so grab a new slot
292 * starting either at:
293 */
294 if (hwc->idx != -1) {
295 /* previous assignment */
296 i = hwc->idx;
297 } else if (k != -1) {
298 /* start from free slot found */
299 i = k;
300 } else {
301 /*
302 * event not found, no slot found in
303 * first pass, try again from the
304 * beginning
305 */
306 i = 0;
307 }
308 j = i;
309 do {
310 old = cmpxchg(nb->owners+i, NULL, event);
311 if (!old)
312 break; 298 break;
313 if (++i == max) 299 }
314 i = 0; 300
315 } while (i != j); 301 if (new == -1)
316done: 302 return &emptyconstraint;
317 if (!old) 303
318 return &nb->event_constraints[i]; 304 return &nb->event_constraints[new];
319
320 return &emptyconstraint;
321} 305}
322 306
323static struct amd_nb *amd_alloc_nb(int cpu) 307static struct amd_nb *amd_alloc_nb(int cpu)