aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2014-12-19 16:15:24 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2014-12-19 16:15:24 -0500
commit88a57667f2990f00b019d46c8426441c9e516d51 (patch)
tree392f5dcb9724e688aa307e2ed2cc8ee13e66f570 /arch
parent34b85e3574424beb30e4cd163e6da2e2282d2683 (diff)
parentac931f87a647ca156f65a4c00e7297165e4fa2d8 (diff)
Merge branch 'perf-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull perf fixes and cleanups from Ingo Molnar: "A kernel fix plus mostly tooling fixes, but also some tooling restructuring and cleanups" * 'perf-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (39 commits) perf: Fix building warning on ARM 32 perf symbols: Fix use after free in filename__read_build_id perf evlist: Use roundup_pow_of_two tools: Adopt roundup_pow_of_two perf tools: Make the mmap length autotuning more robust tools: Adopt rounddown_pow_of_two and deps tools: Adopt fls_long and deps tools: Move bitops.h from tools/perf/util to tools/ tools: Introduce asm-generic/bitops.h tools lib: Move asm-generic/bitops/find.h code to tools/include and tools/lib tools: Whitespace prep patches for moving bitops.h tools: Move code originally from asm-generic/atomic.h into tools/include/asm-generic/ tools: Move code originally from linux/log2.h to tools/include/linux/ tools: Move __ffs implementation to tools/include/asm-generic/bitops/__ffs.h perf evlist: Do not use hard coded value for a mmap_pages default perf trace: Let the perf_evlist__mmap autosize the number of pages to use perf evlist: Improve the strerror_mmap method perf evlist: Clarify sterror_mmap variable names perf evlist: Fixup brown paper bag on "hint" for --mmap-pages cmdline arg perf trace: Provide a better explanation when mmap fails ...
Diffstat (limited to 'arch')
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel_uncore.c22
1 files changed, 19 insertions, 3 deletions
diff --git a/arch/x86/kernel/cpu/perf_event_intel_uncore.c b/arch/x86/kernel/cpu/perf_event_intel_uncore.c
index 08f3fed2b0f2..10b8d3eaaf15 100644
--- a/arch/x86/kernel/cpu/perf_event_intel_uncore.c
+++ b/arch/x86/kernel/cpu/perf_event_intel_uncore.c
@@ -276,6 +276,17 @@ static struct intel_uncore_box *uncore_alloc_box(struct intel_uncore_type *type,
276 return box; 276 return box;
277} 277}
278 278
279/*
280 * Using uncore_pmu_event_init pmu event_init callback
281 * as a detection point for uncore events.
282 */
283static int uncore_pmu_event_init(struct perf_event *event);
284
285static bool is_uncore_event(struct perf_event *event)
286{
287 return event->pmu->event_init == uncore_pmu_event_init;
288}
289
279static int 290static int
280uncore_collect_events(struct intel_uncore_box *box, struct perf_event *leader, bool dogrp) 291uncore_collect_events(struct intel_uncore_box *box, struct perf_event *leader, bool dogrp)
281{ 292{
@@ -290,13 +301,18 @@ uncore_collect_events(struct intel_uncore_box *box, struct perf_event *leader, b
290 return -EINVAL; 301 return -EINVAL;
291 302
292 n = box->n_events; 303 n = box->n_events;
293 box->event_list[n] = leader; 304
294 n++; 305 if (is_uncore_event(leader)) {
306 box->event_list[n] = leader;
307 n++;
308 }
309
295 if (!dogrp) 310 if (!dogrp)
296 return n; 311 return n;
297 312
298 list_for_each_entry(event, &leader->sibling_list, group_entry) { 313 list_for_each_entry(event, &leader->sibling_list, group_entry) {
299 if (event->state <= PERF_EVENT_STATE_OFF) 314 if (!is_uncore_event(event) ||
315 event->state <= PERF_EVENT_STATE_OFF)
300 continue; 316 continue;
301 317
302 if (n >= max_count) 318 if (n >= max_count)