aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kernel/cpu/perf_event_intel.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2014-10-13 09:58:15 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2014-10-13 09:58:15 -0400
commit9d9420f1209a1facea7110d549ac695f5aeeb503 (patch)
tree7956d1c40420644830decbbc90b8bbdfeb194364 /arch/x86/kernel/cpu/perf_event_intel.c
parent6d5f0ebfc0be9cbfeaafdd9258d5fa24b7975a36 (diff)
parentcc6cd47e7395bc05c5077009808b820633eb3f18 (diff)
Merge branch 'perf-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull perf updates from Ingo Molnar: "Kernel side updates: - Fix and enhance poll support (Jiri Olsa) - Re-enable inheritance optimization (Jiri Olsa) - Enhance Intel memory events support (Stephane Eranian) - Refactor the Intel uncore driver to be more maintainable (Zheng Yan) - Enhance and fix Intel CPU and uncore PMU drivers (Peter Zijlstra, Andi Kleen) - [ plus various smaller fixes/cleanups ] User visible tooling updates: - Add +field argument support for --field option, so that one can add fields to the default list of fields to show, ie now one can just do: perf report --fields +pid And the pid will appear in addition to the default fields (Jiri Olsa) - Add +field argument support for --sort option (Jiri Olsa) - Honour -w in the report tools (report, top), allowing to specify the widths for the histogram entries columns (Namhyung Kim) - Properly show submicrosecond times in 'perf kvm stat' (Christian Borntraeger) - Add beautifier for mremap flags param in 'trace' (Alex Snast) - perf script: Allow callchains if any event samples them - Don't truncate Intel style addresses in 'annotate' (Alex Converse) - Allow profiling when kptr_restrict == 1 for non root users, kernel samples will just remain unresolved (Andi Kleen) - Allow configuring default options for callchains in config file (Namhyung Kim) - Support operations for shared futexes. (Davidlohr Bueso) - "perf kvm stat report" improvements by Alexander Yarygin: - Save pid string in opts.target.pid - Enable the target.system_wide flag - Unify the title bar output - [ plus lots of other fixes and small improvements. ] Tooling infrastructure changes: - Refactor unit and scale function parameters for PMU parsing routines (Matt Fleming) - Improve DSO long names lookup with rbtree, resulting in great speedup for workloads with lots of DSOs (Waiman Long) - We were not handling POLLHUP notifications for event file descriptors Fix it by filtering entries in the events file descriptor array after poll() returns, refcounting mmaps so that when the last fd pointing to a perf mmap goes away we do the unmap (Arnaldo Carvalho de Melo) - Intel PT prep work, from Adrian Hunter, including: - Let a user specify a PMU event without any config terms - Add perf-with-kcore script - Let default config be defined for a PMU - Add perf_pmu__scan_file() - Add a 'perf test' for tracking with sched_switch - Add 'flush' callback to scripting API - Use ring buffer consume method to look like other tools (Arnaldo Carvalho de Melo) - hists browser (used in top and report) refactorings, getting rid of unused variables and reducing source code size by handling similar cases in a fewer functions (Namhyung Kim). - Replace thread unsafe strerror() with strerror_r() accross the whole tools/perf/ tree (Masami Hiramatsu) - Rename ordered_samples to ordered_events and allow setting a queue size for ordering events (Jiri Olsa) - [ plus lots of fixes, cleanups and other improvements ]" * 'perf-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (198 commits) perf/x86: Tone down kernel messages when the PMU check fails in a virtual environment perf/x86/intel/uncore: Fix minor race in box set up perf record: Fix error message for --filter option not coming after tracepoint perf tools: Fix build breakage on arm64 targets perf symbols: Improve DSO long names lookup speed with rbtree perf symbols: Encapsulate dsos list head into struct dsos perf bench futex: Sanitize -q option in requeue perf bench futex: Support operations for shared futexes perf trace: Fix mmap return address truncation to 32-bit perf tools: Refactor unit and scale function parameters perf tools: Fix line number in the config file error message perf tools: Convert {record,top}.call-graph option to call-graph.record-mode perf tools: Introduce perf_callchain_config() perf callchain: Move some parser functions to callchain.c perf tools: Move callchain config from record_opts to callchain_param perf hists browser: Fix callchain print bug on TUI perf tools: Use ACCESS_ONCE() instead of volatile cast perf tools: Modify error code for when perf_session__new() fails perf tools: Fix perf record as non root with kptr_restrict == 1 perf stat: Fix --per-core on multi socket systems ...
Diffstat (limited to 'arch/x86/kernel/cpu/perf_event_intel.c')
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel.c229
1 files changed, 199 insertions, 30 deletions
diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c
index 2502d0d9d246..3851def5057c 100644
--- a/arch/x86/kernel/cpu/perf_event_intel.c
+++ b/arch/x86/kernel/cpu/perf_event_intel.c
@@ -220,6 +220,15 @@ static struct event_constraint intel_hsw_event_constraints[] = {
220 EVENT_CONSTRAINT_END 220 EVENT_CONSTRAINT_END
221}; 221};
222 222
223static struct event_constraint intel_bdw_event_constraints[] = {
224 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
225 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
226 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
227 INTEL_UEVENT_CONSTRAINT(0x148, 0x4), /* L1D_PEND_MISS.PENDING */
228 INTEL_EVENT_CONSTRAINT(0xa3, 0x4), /* CYCLE_ACTIVITY.* */
229 EVENT_CONSTRAINT_END
230};
231
223static u64 intel_pmu_event_map(int hw_event) 232static u64 intel_pmu_event_map(int hw_event)
224{ 233{
225 return intel_perfmon_event_map[hw_event]; 234 return intel_perfmon_event_map[hw_event];
@@ -415,6 +424,126 @@ static __initconst const u64 snb_hw_cache_event_ids
415 424
416}; 425};
417 426
427static __initconst const u64 hsw_hw_cache_event_ids
428 [PERF_COUNT_HW_CACHE_MAX]
429 [PERF_COUNT_HW_CACHE_OP_MAX]
430 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
431{
432 [ C(L1D ) ] = {
433 [ C(OP_READ) ] = {
434 [ C(RESULT_ACCESS) ] = 0x81d0, /* MEM_UOPS_RETIRED.ALL_LOADS */
435 [ C(RESULT_MISS) ] = 0x151, /* L1D.REPLACEMENT */
436 },
437 [ C(OP_WRITE) ] = {
438 [ C(RESULT_ACCESS) ] = 0x82d0, /* MEM_UOPS_RETIRED.ALL_STORES */
439 [ C(RESULT_MISS) ] = 0x0,
440 },
441 [ C(OP_PREFETCH) ] = {
442 [ C(RESULT_ACCESS) ] = 0x0,
443 [ C(RESULT_MISS) ] = 0x0,
444 },
445 },
446 [ C(L1I ) ] = {
447 [ C(OP_READ) ] = {
448 [ C(RESULT_ACCESS) ] = 0x0,
449 [ C(RESULT_MISS) ] = 0x280, /* ICACHE.MISSES */
450 },
451 [ C(OP_WRITE) ] = {
452 [ C(RESULT_ACCESS) ] = -1,
453 [ C(RESULT_MISS) ] = -1,
454 },
455 [ C(OP_PREFETCH) ] = {
456 [ C(RESULT_ACCESS) ] = 0x0,
457 [ C(RESULT_MISS) ] = 0x0,
458 },
459 },
460 [ C(LL ) ] = {
461 [ C(OP_READ) ] = {
462 /* OFFCORE_RESPONSE:ALL_DATA_RD|ALL_CODE_RD */
463 [ C(RESULT_ACCESS) ] = 0x1b7,
464 /* OFFCORE_RESPONSE:ALL_DATA_RD|ALL_CODE_RD|SUPPLIER_NONE|
465 L3_MISS|ANY_SNOOP */
466 [ C(RESULT_MISS) ] = 0x1b7,
467 },
468 [ C(OP_WRITE) ] = {
469 [ C(RESULT_ACCESS) ] = 0x1b7, /* OFFCORE_RESPONSE:ALL_RFO */
470 /* OFFCORE_RESPONSE:ALL_RFO|SUPPLIER_NONE|L3_MISS|ANY_SNOOP */
471 [ C(RESULT_MISS) ] = 0x1b7,
472 },
473 [ C(OP_PREFETCH) ] = {
474 [ C(RESULT_ACCESS) ] = 0x0,
475 [ C(RESULT_MISS) ] = 0x0,
476 },
477 },
478 [ C(DTLB) ] = {
479 [ C(OP_READ) ] = {
480 [ C(RESULT_ACCESS) ] = 0x81d0, /* MEM_UOPS_RETIRED.ALL_LOADS */
481 [ C(RESULT_MISS) ] = 0x108, /* DTLB_LOAD_MISSES.MISS_CAUSES_A_WALK */
482 },
483 [ C(OP_WRITE) ] = {
484 [ C(RESULT_ACCESS) ] = 0x82d0, /* MEM_UOPS_RETIRED.ALL_STORES */
485 [ C(RESULT_MISS) ] = 0x149, /* DTLB_STORE_MISSES.MISS_CAUSES_A_WALK */
486 },
487 [ C(OP_PREFETCH) ] = {
488 [ C(RESULT_ACCESS) ] = 0x0,
489 [ C(RESULT_MISS) ] = 0x0,
490 },
491 },
492 [ C(ITLB) ] = {
493 [ C(OP_READ) ] = {
494 [ C(RESULT_ACCESS) ] = 0x6085, /* ITLB_MISSES.STLB_HIT */
495 [ C(RESULT_MISS) ] = 0x185, /* ITLB_MISSES.MISS_CAUSES_A_WALK */
496 },
497 [ C(OP_WRITE) ] = {
498 [ C(RESULT_ACCESS) ] = -1,
499 [ C(RESULT_MISS) ] = -1,
500 },
501 [ C(OP_PREFETCH) ] = {
502 [ C(RESULT_ACCESS) ] = -1,
503 [ C(RESULT_MISS) ] = -1,
504 },
505 },
506 [ C(BPU ) ] = {
507 [ C(OP_READ) ] = {
508 [ C(RESULT_ACCESS) ] = 0xc4, /* BR_INST_RETIRED.ALL_BRANCHES */
509 [ C(RESULT_MISS) ] = 0xc5, /* BR_MISP_RETIRED.ALL_BRANCHES */
510 },
511 [ C(OP_WRITE) ] = {
512 [ C(RESULT_ACCESS) ] = -1,
513 [ C(RESULT_MISS) ] = -1,
514 },
515 [ C(OP_PREFETCH) ] = {
516 [ C(RESULT_ACCESS) ] = -1,
517 [ C(RESULT_MISS) ] = -1,
518 },
519 },
520};
521
522static __initconst const u64 hsw_hw_cache_extra_regs
523 [PERF_COUNT_HW_CACHE_MAX]
524 [PERF_COUNT_HW_CACHE_OP_MAX]
525 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
526{
527 [ C(LL ) ] = {
528 [ C(OP_READ) ] = {
529 /* OFFCORE_RESPONSE:ALL_DATA_RD|ALL_CODE_RD */
530 [ C(RESULT_ACCESS) ] = 0x2d5,
531 /* OFFCORE_RESPONSE:ALL_DATA_RD|ALL_CODE_RD|SUPPLIER_NONE|
532 L3_MISS|ANY_SNOOP */
533 [ C(RESULT_MISS) ] = 0x3fbc0202d5ull,
534 },
535 [ C(OP_WRITE) ] = {
536 [ C(RESULT_ACCESS) ] = 0x122, /* OFFCORE_RESPONSE:ALL_RFO */
537 /* OFFCORE_RESPONSE:ALL_RFO|SUPPLIER_NONE|L3_MISS|ANY_SNOOP */
538 [ C(RESULT_MISS) ] = 0x3fbc020122ull,
539 },
540 [ C(OP_PREFETCH) ] = {
541 [ C(RESULT_ACCESS) ] = 0x0,
542 [ C(RESULT_MISS) ] = 0x0,
543 },
544 },
545};
546
418static __initconst const u64 westmere_hw_cache_event_ids 547static __initconst const u64 westmere_hw_cache_event_ids
419 [PERF_COUNT_HW_CACHE_MAX] 548 [PERF_COUNT_HW_CACHE_MAX]
420 [PERF_COUNT_HW_CACHE_OP_MAX] 549 [PERF_COUNT_HW_CACHE_OP_MAX]
@@ -1905,6 +2034,24 @@ hsw_get_event_constraints(struct cpu_hw_events *cpuc, struct perf_event *event)
1905 return c; 2034 return c;
1906} 2035}
1907 2036
2037/*
2038 * Broadwell:
2039 * The INST_RETIRED.ALL period always needs to have lowest
2040 * 6bits cleared (BDM57). It shall not use a period smaller
2041 * than 100 (BDM11). We combine the two to enforce
2042 * a min-period of 128.
2043 */
2044static unsigned bdw_limit_period(struct perf_event *event, unsigned left)
2045{
2046 if ((event->hw.config & INTEL_ARCH_EVENT_MASK) ==
2047 X86_CONFIG(.event=0xc0, .umask=0x01)) {
2048 if (left < 128)
2049 left = 128;
2050 left &= ~0x3fu;
2051 }
2052 return left;
2053}
2054
1908PMU_FORMAT_ATTR(event, "config:0-7" ); 2055PMU_FORMAT_ATTR(event, "config:0-7" );
1909PMU_FORMAT_ATTR(umask, "config:8-15" ); 2056PMU_FORMAT_ATTR(umask, "config:8-15" );
1910PMU_FORMAT_ATTR(edge, "config:18" ); 2057PMU_FORMAT_ATTR(edge, "config:18" );
@@ -2367,15 +2514,15 @@ __init int intel_pmu_init(void)
2367 * Install the hw-cache-events table: 2514 * Install the hw-cache-events table:
2368 */ 2515 */
2369 switch (boot_cpu_data.x86_model) { 2516 switch (boot_cpu_data.x86_model) {
2370 case 14: /* 65 nm core solo/duo, "Yonah" */ 2517 case 14: /* 65nm Core "Yonah" */
2371 pr_cont("Core events, "); 2518 pr_cont("Core events, ");
2372 break; 2519 break;
2373 2520
2374 case 15: /* original 65 nm celeron/pentium/core2/xeon, "Merom"/"Conroe" */ 2521 case 15: /* 65nm Core2 "Merom" */
2375 x86_add_quirk(intel_clovertown_quirk); 2522 x86_add_quirk(intel_clovertown_quirk);
2376 case 22: /* single-core 65 nm celeron/core2solo "Merom-L"/"Conroe-L" */ 2523 case 22: /* 65nm Core2 "Merom-L" */
2377 case 23: /* current 45 nm celeron/core2/xeon "Penryn"/"Wolfdale" */ 2524 case 23: /* 45nm Core2 "Penryn" */
2378 case 29: /* six-core 45 nm xeon "Dunnington" */ 2525 case 29: /* 45nm Core2 "Dunnington (MP) */
2379 memcpy(hw_cache_event_ids, core2_hw_cache_event_ids, 2526 memcpy(hw_cache_event_ids, core2_hw_cache_event_ids,
2380 sizeof(hw_cache_event_ids)); 2527 sizeof(hw_cache_event_ids));
2381 2528
@@ -2386,9 +2533,9 @@ __init int intel_pmu_init(void)
2386 pr_cont("Core2 events, "); 2533 pr_cont("Core2 events, ");
2387 break; 2534 break;
2388 2535
2389 case 26: /* 45 nm nehalem, "Bloomfield" */ 2536 case 30: /* 45nm Nehalem */
2390 case 30: /* 45 nm nehalem, "Lynnfield" */ 2537 case 26: /* 45nm Nehalem-EP */
2391 case 46: /* 45 nm nehalem-ex, "Beckton" */ 2538 case 46: /* 45nm Nehalem-EX */
2392 memcpy(hw_cache_event_ids, nehalem_hw_cache_event_ids, 2539 memcpy(hw_cache_event_ids, nehalem_hw_cache_event_ids,
2393 sizeof(hw_cache_event_ids)); 2540 sizeof(hw_cache_event_ids));
2394 memcpy(hw_cache_extra_regs, nehalem_hw_cache_extra_regs, 2541 memcpy(hw_cache_extra_regs, nehalem_hw_cache_extra_regs,
@@ -2415,11 +2562,11 @@ __init int intel_pmu_init(void)
2415 pr_cont("Nehalem events, "); 2562 pr_cont("Nehalem events, ");
2416 break; 2563 break;
2417 2564
2418 case 28: /* Atom */ 2565 case 28: /* 45nm Atom "Pineview" */
2419 case 38: /* Lincroft */ 2566 case 38: /* 45nm Atom "Lincroft" */
2420 case 39: /* Penwell */ 2567 case 39: /* 32nm Atom "Penwell" */
2421 case 53: /* Cloverview */ 2568 case 53: /* 32nm Atom "Cloverview" */
2422 case 54: /* Cedarview */ 2569 case 54: /* 32nm Atom "Cedarview" */
2423 memcpy(hw_cache_event_ids, atom_hw_cache_event_ids, 2570 memcpy(hw_cache_event_ids, atom_hw_cache_event_ids,
2424 sizeof(hw_cache_event_ids)); 2571 sizeof(hw_cache_event_ids));
2425 2572
@@ -2430,8 +2577,8 @@ __init int intel_pmu_init(void)
2430 pr_cont("Atom events, "); 2577 pr_cont("Atom events, ");
2431 break; 2578 break;
2432 2579
2433 case 55: /* Atom 22nm "Silvermont" */ 2580 case 55: /* 22nm Atom "Silvermont" */
2434 case 77: /* Avoton "Silvermont" */ 2581 case 77: /* 22nm Atom "Silvermont Avoton/Rangely" */
2435 memcpy(hw_cache_event_ids, slm_hw_cache_event_ids, 2582 memcpy(hw_cache_event_ids, slm_hw_cache_event_ids,
2436 sizeof(hw_cache_event_ids)); 2583 sizeof(hw_cache_event_ids));
2437 memcpy(hw_cache_extra_regs, slm_hw_cache_extra_regs, 2584 memcpy(hw_cache_extra_regs, slm_hw_cache_extra_regs,
@@ -2446,9 +2593,9 @@ __init int intel_pmu_init(void)
2446 pr_cont("Silvermont events, "); 2593 pr_cont("Silvermont events, ");
2447 break; 2594 break;
2448 2595
2449 case 37: /* 32 nm nehalem, "Clarkdale" */ 2596 case 37: /* 32nm Westmere */
2450 case 44: /* 32 nm nehalem, "Gulftown" */ 2597 case 44: /* 32nm Westmere-EP */
2451 case 47: /* 32 nm Xeon E7 */ 2598 case 47: /* 32nm Westmere-EX */
2452 memcpy(hw_cache_event_ids, westmere_hw_cache_event_ids, 2599 memcpy(hw_cache_event_ids, westmere_hw_cache_event_ids,
2453 sizeof(hw_cache_event_ids)); 2600 sizeof(hw_cache_event_ids));
2454 memcpy(hw_cache_extra_regs, nehalem_hw_cache_extra_regs, 2601 memcpy(hw_cache_extra_regs, nehalem_hw_cache_extra_regs,
@@ -2474,8 +2621,8 @@ __init int intel_pmu_init(void)
2474 pr_cont("Westmere events, "); 2621 pr_cont("Westmere events, ");
2475 break; 2622 break;
2476 2623
2477 case 42: /* SandyBridge */ 2624 case 42: /* 32nm SandyBridge */
2478 case 45: /* SandyBridge, "Romely-EP" */ 2625 case 45: /* 32nm SandyBridge-E/EN/EP */
2479 x86_add_quirk(intel_sandybridge_quirk); 2626 x86_add_quirk(intel_sandybridge_quirk);
2480 memcpy(hw_cache_event_ids, snb_hw_cache_event_ids, 2627 memcpy(hw_cache_event_ids, snb_hw_cache_event_ids,
2481 sizeof(hw_cache_event_ids)); 2628 sizeof(hw_cache_event_ids));
@@ -2506,8 +2653,9 @@ __init int intel_pmu_init(void)
2506 2653
2507 pr_cont("SandyBridge events, "); 2654 pr_cont("SandyBridge events, ");
2508 break; 2655 break;
2509 case 58: /* IvyBridge */ 2656
2510 case 62: /* IvyBridge EP */ 2657 case 58: /* 22nm IvyBridge */
2658 case 62: /* 22nm IvyBridge-EP/EX */
2511 memcpy(hw_cache_event_ids, snb_hw_cache_event_ids, 2659 memcpy(hw_cache_event_ids, snb_hw_cache_event_ids,
2512 sizeof(hw_cache_event_ids)); 2660 sizeof(hw_cache_event_ids));
2513 /* dTLB-load-misses on IVB is different than SNB */ 2661 /* dTLB-load-misses on IVB is different than SNB */
@@ -2539,20 +2687,19 @@ __init int intel_pmu_init(void)
2539 break; 2687 break;
2540 2688
2541 2689
2542 case 60: /* Haswell Client */ 2690 case 60: /* 22nm Haswell Core */
2543 case 70: 2691 case 63: /* 22nm Haswell Server */
2544 case 71: 2692 case 69: /* 22nm Haswell ULT */
2545 case 63: 2693 case 70: /* 22nm Haswell + GT3e (Intel Iris Pro graphics) */
2546 case 69:
2547 x86_pmu.late_ack = true; 2694 x86_pmu.late_ack = true;
2548 memcpy(hw_cache_event_ids, snb_hw_cache_event_ids, sizeof(hw_cache_event_ids)); 2695 memcpy(hw_cache_event_ids, hsw_hw_cache_event_ids, sizeof(hw_cache_event_ids));
2549 memcpy(hw_cache_extra_regs, snb_hw_cache_extra_regs, sizeof(hw_cache_extra_regs)); 2696 memcpy(hw_cache_extra_regs, hsw_hw_cache_extra_regs, sizeof(hw_cache_extra_regs));
2550 2697
2551 intel_pmu_lbr_init_snb(); 2698 intel_pmu_lbr_init_snb();
2552 2699
2553 x86_pmu.event_constraints = intel_hsw_event_constraints; 2700 x86_pmu.event_constraints = intel_hsw_event_constraints;
2554 x86_pmu.pebs_constraints = intel_hsw_pebs_event_constraints; 2701 x86_pmu.pebs_constraints = intel_hsw_pebs_event_constraints;
2555 x86_pmu.extra_regs = intel_snb_extra_regs; 2702 x86_pmu.extra_regs = intel_snbep_extra_regs;
2556 x86_pmu.pebs_aliases = intel_pebs_aliases_snb; 2703 x86_pmu.pebs_aliases = intel_pebs_aliases_snb;
2557 /* all extra regs are per-cpu when HT is on */ 2704 /* all extra regs are per-cpu when HT is on */
2558 x86_pmu.er_flags |= ERF_HAS_RSP_1; 2705 x86_pmu.er_flags |= ERF_HAS_RSP_1;
@@ -2565,6 +2712,28 @@ __init int intel_pmu_init(void)
2565 pr_cont("Haswell events, "); 2712 pr_cont("Haswell events, ");
2566 break; 2713 break;
2567 2714
2715 case 61: /* 14nm Broadwell Core-M */
2716 x86_pmu.late_ack = true;
2717 memcpy(hw_cache_event_ids, hsw_hw_cache_event_ids, sizeof(hw_cache_event_ids));
2718 memcpy(hw_cache_extra_regs, hsw_hw_cache_extra_regs, sizeof(hw_cache_extra_regs));
2719
2720 intel_pmu_lbr_init_snb();
2721
2722 x86_pmu.event_constraints = intel_bdw_event_constraints;
2723 x86_pmu.pebs_constraints = intel_hsw_pebs_event_constraints;
2724 x86_pmu.extra_regs = intel_snbep_extra_regs;
2725 x86_pmu.pebs_aliases = intel_pebs_aliases_snb;
2726 /* all extra regs are per-cpu when HT is on */
2727 x86_pmu.er_flags |= ERF_HAS_RSP_1;
2728 x86_pmu.er_flags |= ERF_NO_HT_SHARING;
2729
2730 x86_pmu.hw_config = hsw_hw_config;
2731 x86_pmu.get_event_constraints = hsw_get_event_constraints;
2732 x86_pmu.cpu_events = hsw_events_attrs;
2733 x86_pmu.limit_period = bdw_limit_period;
2734 pr_cont("Broadwell events, ");
2735 break;
2736
2568 default: 2737 default:
2569 switch (x86_pmu.version) { 2738 switch (x86_pmu.version) {
2570 case 1: 2739 case 1: