diff options
Diffstat (limited to 'arch/x86/kernel/cpu')
-rw-r--r-- | arch/x86/kernel/cpu/Makefile | 4 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/perf_event.c | 14 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/perf_event.h | 49 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/perf_event_intel.c | 229 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/perf_event_intel_ds.c | 185 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/perf_event_intel_lbr.c | 8 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/perf_event_intel_uncore.c | 3175 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/perf_event_intel_uncore.h | 439 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/perf_event_intel_uncore_nhmex.c | 1221 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/perf_event_intel_uncore_snb.c | 636 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/perf_event_intel_uncore_snbep.c | 2258 |
11 files changed, 4580 insertions, 3638 deletions
diff --git a/arch/x86/kernel/cpu/Makefile b/arch/x86/kernel/cpu/Makefile index 77dcab277710..01d5453b5502 100644 --- a/arch/x86/kernel/cpu/Makefile +++ b/arch/x86/kernel/cpu/Makefile | |||
@@ -39,7 +39,9 @@ obj-$(CONFIG_CPU_SUP_AMD) += perf_event_amd_iommu.o | |||
39 | endif | 39 | endif |
40 | obj-$(CONFIG_CPU_SUP_INTEL) += perf_event_p6.o perf_event_knc.o perf_event_p4.o | 40 | obj-$(CONFIG_CPU_SUP_INTEL) += perf_event_p6.o perf_event_knc.o perf_event_p4.o |
41 | obj-$(CONFIG_CPU_SUP_INTEL) += perf_event_intel_lbr.o perf_event_intel_ds.o perf_event_intel.o | 41 | obj-$(CONFIG_CPU_SUP_INTEL) += perf_event_intel_lbr.o perf_event_intel_ds.o perf_event_intel.o |
42 | obj-$(CONFIG_CPU_SUP_INTEL) += perf_event_intel_uncore.o perf_event_intel_rapl.o | 42 | obj-$(CONFIG_CPU_SUP_INTEL) += perf_event_intel_uncore.o perf_event_intel_uncore_snb.o |
43 | obj-$(CONFIG_CPU_SUP_INTEL) += perf_event_intel_uncore_snbep.o perf_event_intel_uncore_nhmex.o | ||
44 | obj-$(CONFIG_CPU_SUP_INTEL) += perf_event_intel_rapl.o | ||
43 | endif | 45 | endif |
44 | 46 | ||
45 | 47 | ||
diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c index 2879ecdaac43..16c73022306e 100644 --- a/arch/x86/kernel/cpu/perf_event.c +++ b/arch/x86/kernel/cpu/perf_event.c | |||
@@ -243,7 +243,8 @@ static bool check_hw_exists(void) | |||
243 | 243 | ||
244 | msr_fail: | 244 | msr_fail: |
245 | printk(KERN_CONT "Broken PMU hardware detected, using software events only.\n"); | 245 | printk(KERN_CONT "Broken PMU hardware detected, using software events only.\n"); |
246 | printk(KERN_ERR "Failed to access perfctr msr (MSR %x is %Lx)\n", reg, val_new); | 246 | printk(boot_cpu_has(X86_FEATURE_HYPERVISOR) ? KERN_INFO : KERN_ERR |
247 | "Failed to access perfctr msr (MSR %x is %Lx)\n", reg, val_new); | ||
247 | 248 | ||
248 | return false; | 249 | return false; |
249 | } | 250 | } |
@@ -387,7 +388,7 @@ int x86_pmu_hw_config(struct perf_event *event) | |||
387 | precise++; | 388 | precise++; |
388 | 389 | ||
389 | /* Support for IP fixup */ | 390 | /* Support for IP fixup */ |
390 | if (x86_pmu.lbr_nr) | 391 | if (x86_pmu.lbr_nr || x86_pmu.intel_cap.pebs_format >= 2) |
391 | precise++; | 392 | precise++; |
392 | } | 393 | } |
393 | 394 | ||
@@ -443,6 +444,12 @@ int x86_pmu_hw_config(struct perf_event *event) | |||
443 | if (event->attr.type == PERF_TYPE_RAW) | 444 | if (event->attr.type == PERF_TYPE_RAW) |
444 | event->hw.config |= event->attr.config & X86_RAW_EVENT_MASK; | 445 | event->hw.config |= event->attr.config & X86_RAW_EVENT_MASK; |
445 | 446 | ||
447 | if (event->attr.sample_period && x86_pmu.limit_period) { | ||
448 | if (x86_pmu.limit_period(event, event->attr.sample_period) > | ||
449 | event->attr.sample_period) | ||
450 | return -EINVAL; | ||
451 | } | ||
452 | |||
446 | return x86_setup_perfctr(event); | 453 | return x86_setup_perfctr(event); |
447 | } | 454 | } |
448 | 455 | ||
@@ -980,6 +987,9 @@ int x86_perf_event_set_period(struct perf_event *event) | |||
980 | if (left > x86_pmu.max_period) | 987 | if (left > x86_pmu.max_period) |
981 | left = x86_pmu.max_period; | 988 | left = x86_pmu.max_period; |
982 | 989 | ||
990 | if (x86_pmu.limit_period) | ||
991 | left = x86_pmu.limit_period(event, left); | ||
992 | |||
983 | per_cpu(pmc_prev_left[idx], smp_processor_id()) = left; | 993 | per_cpu(pmc_prev_left[idx], smp_processor_id()) = left; |
984 | 994 | ||
985 | /* | 995 | /* |
diff --git a/arch/x86/kernel/cpu/perf_event.h b/arch/x86/kernel/cpu/perf_event.h index 8ade93111e03..d98a34d435d7 100644 --- a/arch/x86/kernel/cpu/perf_event.h +++ b/arch/x86/kernel/cpu/perf_event.h | |||
@@ -67,8 +67,10 @@ struct event_constraint { | |||
67 | */ | 67 | */ |
68 | #define PERF_X86_EVENT_PEBS_LDLAT 0x1 /* ld+ldlat data address sampling */ | 68 | #define PERF_X86_EVENT_PEBS_LDLAT 0x1 /* ld+ldlat data address sampling */ |
69 | #define PERF_X86_EVENT_PEBS_ST 0x2 /* st data address sampling */ | 69 | #define PERF_X86_EVENT_PEBS_ST 0x2 /* st data address sampling */ |
70 | #define PERF_X86_EVENT_PEBS_ST_HSW 0x4 /* haswell style st data sampling */ | 70 | #define PERF_X86_EVENT_PEBS_ST_HSW 0x4 /* haswell style datala, store */ |
71 | #define PERF_X86_EVENT_COMMITTED 0x8 /* event passed commit_txn */ | 71 | #define PERF_X86_EVENT_COMMITTED 0x8 /* event passed commit_txn */ |
72 | #define PERF_X86_EVENT_PEBS_LD_HSW 0x10 /* haswell style datala, load */ | ||
73 | #define PERF_X86_EVENT_PEBS_NA_HSW 0x20 /* haswell style datala, unknown */ | ||
72 | 74 | ||
73 | struct amd_nb { | 75 | struct amd_nb { |
74 | int nb_id; /* NorthBridge id */ | 76 | int nb_id; /* NorthBridge id */ |
@@ -252,18 +254,52 @@ struct cpu_hw_events { | |||
252 | EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVENT_MASK) | 254 | EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVENT_MASK) |
253 | 255 | ||
254 | #define INTEL_PLD_CONSTRAINT(c, n) \ | 256 | #define INTEL_PLD_CONSTRAINT(c, n) \ |
255 | __EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVENT_MASK, \ | 257 | __EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVENT_MASK|X86_ALL_EVENT_FLAGS, \ |
256 | HWEIGHT(n), 0, PERF_X86_EVENT_PEBS_LDLAT) | 258 | HWEIGHT(n), 0, PERF_X86_EVENT_PEBS_LDLAT) |
257 | 259 | ||
258 | #define INTEL_PST_CONSTRAINT(c, n) \ | 260 | #define INTEL_PST_CONSTRAINT(c, n) \ |
259 | __EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVENT_MASK, \ | 261 | __EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVENT_MASK|X86_ALL_EVENT_FLAGS, \ |
260 | HWEIGHT(n), 0, PERF_X86_EVENT_PEBS_ST) | 262 | HWEIGHT(n), 0, PERF_X86_EVENT_PEBS_ST) |
261 | 263 | ||
262 | /* DataLA version of store sampling without extra enable bit. */ | 264 | /* Event constraint, but match on all event flags too. */ |
263 | #define INTEL_PST_HSW_CONSTRAINT(c, n) \ | 265 | #define INTEL_FLAGS_EVENT_CONSTRAINT(c, n) \ |
264 | __EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVENT_MASK, \ | 266 | EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVENT_MASK|X86_ALL_EVENT_FLAGS) |
267 | |||
268 | /* Check only flags, but allow all event/umask */ | ||
269 | #define INTEL_ALL_EVENT_CONSTRAINT(code, n) \ | ||
270 | EVENT_CONSTRAINT(code, n, X86_ALL_EVENT_FLAGS) | ||
271 | |||
272 | /* Check flags and event code, and set the HSW store flag */ | ||
273 | #define INTEL_FLAGS_EVENT_CONSTRAINT_DATALA_ST(code, n) \ | ||
274 | __EVENT_CONSTRAINT(code, n, \ | ||
275 | ARCH_PERFMON_EVENTSEL_EVENT|X86_ALL_EVENT_FLAGS, \ | ||
276 | HWEIGHT(n), 0, PERF_X86_EVENT_PEBS_ST_HSW) | ||
277 | |||
278 | /* Check flags and event code, and set the HSW load flag */ | ||
279 | #define INTEL_FLAGS_EVENT_CONSTRAINT_DATALA_LD(code, n) \ | ||
280 | __EVENT_CONSTRAINT(code, n, \ | ||
281 | ARCH_PERFMON_EVENTSEL_EVENT|X86_ALL_EVENT_FLAGS, \ | ||
282 | HWEIGHT(n), 0, PERF_X86_EVENT_PEBS_LD_HSW) | ||
283 | |||
284 | /* Check flags and event code/umask, and set the HSW store flag */ | ||
285 | #define INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_ST(code, n) \ | ||
286 | __EVENT_CONSTRAINT(code, n, \ | ||
287 | INTEL_ARCH_EVENT_MASK|X86_ALL_EVENT_FLAGS, \ | ||
265 | HWEIGHT(n), 0, PERF_X86_EVENT_PEBS_ST_HSW) | 288 | HWEIGHT(n), 0, PERF_X86_EVENT_PEBS_ST_HSW) |
266 | 289 | ||
290 | /* Check flags and event code/umask, and set the HSW load flag */ | ||
291 | #define INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_LD(code, n) \ | ||
292 | __EVENT_CONSTRAINT(code, n, \ | ||
293 | INTEL_ARCH_EVENT_MASK|X86_ALL_EVENT_FLAGS, \ | ||
294 | HWEIGHT(n), 0, PERF_X86_EVENT_PEBS_LD_HSW) | ||
295 | |||
296 | /* Check flags and event code/umask, and set the HSW N/A flag */ | ||
297 | #define INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_NA(code, n) \ | ||
298 | __EVENT_CONSTRAINT(code, n, \ | ||
299 | INTEL_ARCH_EVENT_MASK|INTEL_ARCH_EVENT_MASK, \ | ||
300 | HWEIGHT(n), 0, PERF_X86_EVENT_PEBS_NA_HSW) | ||
301 | |||
302 | |||
267 | /* | 303 | /* |
268 | * We define the end marker as having a weight of -1 | 304 | * We define the end marker as having a weight of -1 |
269 | * to enable blacklisting of events using a counter bitmask | 305 | * to enable blacklisting of events using a counter bitmask |
@@ -409,6 +445,7 @@ struct x86_pmu { | |||
409 | struct x86_pmu_quirk *quirks; | 445 | struct x86_pmu_quirk *quirks; |
410 | int perfctr_second_write; | 446 | int perfctr_second_write; |
411 | bool late_ack; | 447 | bool late_ack; |
448 | unsigned (*limit_period)(struct perf_event *event, unsigned l); | ||
412 | 449 | ||
413 | /* | 450 | /* |
414 | * sysfs attrs | 451 | * sysfs attrs |
diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c index 2502d0d9d246..3851def5057c 100644 --- a/arch/x86/kernel/cpu/perf_event_intel.c +++ b/arch/x86/kernel/cpu/perf_event_intel.c | |||
@@ -220,6 +220,15 @@ static struct event_constraint intel_hsw_event_constraints[] = { | |||
220 | EVENT_CONSTRAINT_END | 220 | EVENT_CONSTRAINT_END |
221 | }; | 221 | }; |
222 | 222 | ||
223 | static struct event_constraint intel_bdw_event_constraints[] = { | ||
224 | FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */ | ||
225 | FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */ | ||
226 | FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */ | ||
227 | INTEL_UEVENT_CONSTRAINT(0x148, 0x4), /* L1D_PEND_MISS.PENDING */ | ||
228 | INTEL_EVENT_CONSTRAINT(0xa3, 0x4), /* CYCLE_ACTIVITY.* */ | ||
229 | EVENT_CONSTRAINT_END | ||
230 | }; | ||
231 | |||
223 | static u64 intel_pmu_event_map(int hw_event) | 232 | static u64 intel_pmu_event_map(int hw_event) |
224 | { | 233 | { |
225 | return intel_perfmon_event_map[hw_event]; | 234 | return intel_perfmon_event_map[hw_event]; |
@@ -415,6 +424,126 @@ static __initconst const u64 snb_hw_cache_event_ids | |||
415 | 424 | ||
416 | }; | 425 | }; |
417 | 426 | ||
427 | static __initconst const u64 hsw_hw_cache_event_ids | ||
428 | [PERF_COUNT_HW_CACHE_MAX] | ||
429 | [PERF_COUNT_HW_CACHE_OP_MAX] | ||
430 | [PERF_COUNT_HW_CACHE_RESULT_MAX] = | ||
431 | { | ||
432 | [ C(L1D ) ] = { | ||
433 | [ C(OP_READ) ] = { | ||
434 | [ C(RESULT_ACCESS) ] = 0x81d0, /* MEM_UOPS_RETIRED.ALL_LOADS */ | ||
435 | [ C(RESULT_MISS) ] = 0x151, /* L1D.REPLACEMENT */ | ||
436 | }, | ||
437 | [ C(OP_WRITE) ] = { | ||
438 | [ C(RESULT_ACCESS) ] = 0x82d0, /* MEM_UOPS_RETIRED.ALL_STORES */ | ||
439 | [ C(RESULT_MISS) ] = 0x0, | ||
440 | }, | ||
441 | [ C(OP_PREFETCH) ] = { | ||
442 | [ C(RESULT_ACCESS) ] = 0x0, | ||
443 | [ C(RESULT_MISS) ] = 0x0, | ||
444 | }, | ||
445 | }, | ||
446 | [ C(L1I ) ] = { | ||
447 | [ C(OP_READ) ] = { | ||
448 | [ C(RESULT_ACCESS) ] = 0x0, | ||
449 | [ C(RESULT_MISS) ] = 0x280, /* ICACHE.MISSES */ | ||
450 | }, | ||
451 | [ C(OP_WRITE) ] = { | ||
452 | [ C(RESULT_ACCESS) ] = -1, | ||
453 | [ C(RESULT_MISS) ] = -1, | ||
454 | }, | ||
455 | [ C(OP_PREFETCH) ] = { | ||
456 | [ C(RESULT_ACCESS) ] = 0x0, | ||
457 | [ C(RESULT_MISS) ] = 0x0, | ||
458 | }, | ||
459 | }, | ||
460 | [ C(LL ) ] = { | ||
461 | [ C(OP_READ) ] = { | ||
462 | /* OFFCORE_RESPONSE:ALL_DATA_RD|ALL_CODE_RD */ | ||
463 | [ C(RESULT_ACCESS) ] = 0x1b7, | ||
464 | /* OFFCORE_RESPONSE:ALL_DATA_RD|ALL_CODE_RD|SUPPLIER_NONE| | ||
465 | L3_MISS|ANY_SNOOP */ | ||
466 | [ C(RESULT_MISS) ] = 0x1b7, | ||
467 | }, | ||
468 | [ C(OP_WRITE) ] = { | ||
469 | [ C(RESULT_ACCESS) ] = 0x1b7, /* OFFCORE_RESPONSE:ALL_RFO */ | ||
470 | /* OFFCORE_RESPONSE:ALL_RFO|SUPPLIER_NONE|L3_MISS|ANY_SNOOP */ | ||
471 | [ C(RESULT_MISS) ] = 0x1b7, | ||
472 | }, | ||
473 | [ C(OP_PREFETCH) ] = { | ||
474 | [ C(RESULT_ACCESS) ] = 0x0, | ||
475 | [ C(RESULT_MISS) ] = 0x0, | ||
476 | }, | ||
477 | }, | ||
478 | [ C(DTLB) ] = { | ||
479 | [ C(OP_READ) ] = { | ||
480 | [ C(RESULT_ACCESS) ] = 0x81d0, /* MEM_UOPS_RETIRED.ALL_LOADS */ | ||
481 | [ C(RESULT_MISS) ] = 0x108, /* DTLB_LOAD_MISSES.MISS_CAUSES_A_WALK */ | ||
482 | }, | ||
483 | [ C(OP_WRITE) ] = { | ||
484 | [ C(RESULT_ACCESS) ] = 0x82d0, /* MEM_UOPS_RETIRED.ALL_STORES */ | ||
485 | [ C(RESULT_MISS) ] = 0x149, /* DTLB_STORE_MISSES.MISS_CAUSES_A_WALK */ | ||
486 | }, | ||
487 | [ C(OP_PREFETCH) ] = { | ||
488 | [ C(RESULT_ACCESS) ] = 0x0, | ||
489 | [ C(RESULT_MISS) ] = 0x0, | ||
490 | }, | ||
491 | }, | ||
492 | [ C(ITLB) ] = { | ||
493 | [ C(OP_READ) ] = { | ||
494 | [ C(RESULT_ACCESS) ] = 0x6085, /* ITLB_MISSES.STLB_HIT */ | ||
495 | [ C(RESULT_MISS) ] = 0x185, /* ITLB_MISSES.MISS_CAUSES_A_WALK */ | ||
496 | }, | ||
497 | [ C(OP_WRITE) ] = { | ||
498 | [ C(RESULT_ACCESS) ] = -1, | ||
499 | [ C(RESULT_MISS) ] = -1, | ||
500 | }, | ||
501 | [ C(OP_PREFETCH) ] = { | ||
502 | [ C(RESULT_ACCESS) ] = -1, | ||
503 | [ C(RESULT_MISS) ] = -1, | ||
504 | }, | ||
505 | }, | ||
506 | [ C(BPU ) ] = { | ||
507 | [ C(OP_READ) ] = { | ||
508 | [ C(RESULT_ACCESS) ] = 0xc4, /* BR_INST_RETIRED.ALL_BRANCHES */ | ||
509 | [ C(RESULT_MISS) ] = 0xc5, /* BR_MISP_RETIRED.ALL_BRANCHES */ | ||
510 | }, | ||
511 | [ C(OP_WRITE) ] = { | ||
512 | [ C(RESULT_ACCESS) ] = -1, | ||
513 | [ C(RESULT_MISS) ] = -1, | ||
514 | }, | ||
515 | [ C(OP_PREFETCH) ] = { | ||
516 | [ C(RESULT_ACCESS) ] = -1, | ||
517 | [ C(RESULT_MISS) ] = -1, | ||
518 | }, | ||
519 | }, | ||
520 | }; | ||
521 | |||
522 | static __initconst const u64 hsw_hw_cache_extra_regs | ||
523 | [PERF_COUNT_HW_CACHE_MAX] | ||
524 | [PERF_COUNT_HW_CACHE_OP_MAX] | ||
525 | [PERF_COUNT_HW_CACHE_RESULT_MAX] = | ||
526 | { | ||
527 | [ C(LL ) ] = { | ||
528 | [ C(OP_READ) ] = { | ||
529 | /* OFFCORE_RESPONSE:ALL_DATA_RD|ALL_CODE_RD */ | ||
530 | [ C(RESULT_ACCESS) ] = 0x2d5, | ||
531 | /* OFFCORE_RESPONSE:ALL_DATA_RD|ALL_CODE_RD|SUPPLIER_NONE| | ||
532 | L3_MISS|ANY_SNOOP */ | ||
533 | [ C(RESULT_MISS) ] = 0x3fbc0202d5ull, | ||
534 | }, | ||
535 | [ C(OP_WRITE) ] = { | ||
536 | [ C(RESULT_ACCESS) ] = 0x122, /* OFFCORE_RESPONSE:ALL_RFO */ | ||
537 | /* OFFCORE_RESPONSE:ALL_RFO|SUPPLIER_NONE|L3_MISS|ANY_SNOOP */ | ||
538 | [ C(RESULT_MISS) ] = 0x3fbc020122ull, | ||
539 | }, | ||
540 | [ C(OP_PREFETCH) ] = { | ||
541 | [ C(RESULT_ACCESS) ] = 0x0, | ||
542 | [ C(RESULT_MISS) ] = 0x0, | ||
543 | }, | ||
544 | }, | ||
545 | }; | ||
546 | |||
418 | static __initconst const u64 westmere_hw_cache_event_ids | 547 | static __initconst const u64 westmere_hw_cache_event_ids |
419 | [PERF_COUNT_HW_CACHE_MAX] | 548 | [PERF_COUNT_HW_CACHE_MAX] |
420 | [PERF_COUNT_HW_CACHE_OP_MAX] | 549 | [PERF_COUNT_HW_CACHE_OP_MAX] |
@@ -1905,6 +2034,24 @@ hsw_get_event_constraints(struct cpu_hw_events *cpuc, struct perf_event *event) | |||
1905 | return c; | 2034 | return c; |
1906 | } | 2035 | } |
1907 | 2036 | ||
2037 | /* | ||
2038 | * Broadwell: | ||
2039 | * The INST_RETIRED.ALL period always needs to have lowest | ||
2040 | * 6bits cleared (BDM57). It shall not use a period smaller | ||
2041 | * than 100 (BDM11). We combine the two to enforce | ||
2042 | * a min-period of 128. | ||
2043 | */ | ||
2044 | static unsigned bdw_limit_period(struct perf_event *event, unsigned left) | ||
2045 | { | ||
2046 | if ((event->hw.config & INTEL_ARCH_EVENT_MASK) == | ||
2047 | X86_CONFIG(.event=0xc0, .umask=0x01)) { | ||
2048 | if (left < 128) | ||
2049 | left = 128; | ||
2050 | left &= ~0x3fu; | ||
2051 | } | ||
2052 | return left; | ||
2053 | } | ||
2054 | |||
1908 | PMU_FORMAT_ATTR(event, "config:0-7" ); | 2055 | PMU_FORMAT_ATTR(event, "config:0-7" ); |
1909 | PMU_FORMAT_ATTR(umask, "config:8-15" ); | 2056 | PMU_FORMAT_ATTR(umask, "config:8-15" ); |
1910 | PMU_FORMAT_ATTR(edge, "config:18" ); | 2057 | PMU_FORMAT_ATTR(edge, "config:18" ); |
@@ -2367,15 +2514,15 @@ __init int intel_pmu_init(void) | |||
2367 | * Install the hw-cache-events table: | 2514 | * Install the hw-cache-events table: |
2368 | */ | 2515 | */ |
2369 | switch (boot_cpu_data.x86_model) { | 2516 | switch (boot_cpu_data.x86_model) { |
2370 | case 14: /* 65 nm core solo/duo, "Yonah" */ | 2517 | case 14: /* 65nm Core "Yonah" */ |
2371 | pr_cont("Core events, "); | 2518 | pr_cont("Core events, "); |
2372 | break; | 2519 | break; |
2373 | 2520 | ||
2374 | case 15: /* original 65 nm celeron/pentium/core2/xeon, "Merom"/"Conroe" */ | 2521 | case 15: /* 65nm Core2 "Merom" */ |
2375 | x86_add_quirk(intel_clovertown_quirk); | 2522 | x86_add_quirk(intel_clovertown_quirk); |
2376 | case 22: /* single-core 65 nm celeron/core2solo "Merom-L"/"Conroe-L" */ | 2523 | case 22: /* 65nm Core2 "Merom-L" */ |
2377 | case 23: /* current 45 nm celeron/core2/xeon "Penryn"/"Wolfdale" */ | 2524 | case 23: /* 45nm Core2 "Penryn" */ |
2378 | case 29: /* six-core 45 nm xeon "Dunnington" */ | 2525 | case 29: /* 45nm Core2 "Dunnington (MP) */ |
2379 | memcpy(hw_cache_event_ids, core2_hw_cache_event_ids, | 2526 | memcpy(hw_cache_event_ids, core2_hw_cache_event_ids, |
2380 | sizeof(hw_cache_event_ids)); | 2527 | sizeof(hw_cache_event_ids)); |
2381 | 2528 | ||
@@ -2386,9 +2533,9 @@ __init int intel_pmu_init(void) | |||
2386 | pr_cont("Core2 events, "); | 2533 | pr_cont("Core2 events, "); |
2387 | break; | 2534 | break; |
2388 | 2535 | ||
2389 | case 26: /* 45 nm nehalem, "Bloomfield" */ | 2536 | case 30: /* 45nm Nehalem */ |
2390 | case 30: /* 45 nm nehalem, "Lynnfield" */ | 2537 | case 26: /* 45nm Nehalem-EP */ |
2391 | case 46: /* 45 nm nehalem-ex, "Beckton" */ | 2538 | case 46: /* 45nm Nehalem-EX */ |
2392 | memcpy(hw_cache_event_ids, nehalem_hw_cache_event_ids, | 2539 | memcpy(hw_cache_event_ids, nehalem_hw_cache_event_ids, |
2393 | sizeof(hw_cache_event_ids)); | 2540 | sizeof(hw_cache_event_ids)); |
2394 | memcpy(hw_cache_extra_regs, nehalem_hw_cache_extra_regs, | 2541 | memcpy(hw_cache_extra_regs, nehalem_hw_cache_extra_regs, |
@@ -2415,11 +2562,11 @@ __init int intel_pmu_init(void) | |||
2415 | pr_cont("Nehalem events, "); | 2562 | pr_cont("Nehalem events, "); |
2416 | break; | 2563 | break; |
2417 | 2564 | ||
2418 | case 28: /* Atom */ | 2565 | case 28: /* 45nm Atom "Pineview" */ |
2419 | case 38: /* Lincroft */ | 2566 | case 38: /* 45nm Atom "Lincroft" */ |
2420 | case 39: /* Penwell */ | 2567 | case 39: /* 32nm Atom "Penwell" */ |
2421 | case 53: /* Cloverview */ | 2568 | case 53: /* 32nm Atom "Cloverview" */ |
2422 | case 54: /* Cedarview */ | 2569 | case 54: /* 32nm Atom "Cedarview" */ |
2423 | memcpy(hw_cache_event_ids, atom_hw_cache_event_ids, | 2570 | memcpy(hw_cache_event_ids, atom_hw_cache_event_ids, |
2424 | sizeof(hw_cache_event_ids)); | 2571 | sizeof(hw_cache_event_ids)); |
2425 | 2572 | ||
@@ -2430,8 +2577,8 @@ __init int intel_pmu_init(void) | |||
2430 | pr_cont("Atom events, "); | 2577 | pr_cont("Atom events, "); |
2431 | break; | 2578 | break; |
2432 | 2579 | ||
2433 | case 55: /* Atom 22nm "Silvermont" */ | 2580 | case 55: /* 22nm Atom "Silvermont" */ |
2434 | case 77: /* Avoton "Silvermont" */ | 2581 | case 77: /* 22nm Atom "Silvermont Avoton/Rangely" */ |
2435 | memcpy(hw_cache_event_ids, slm_hw_cache_event_ids, | 2582 | memcpy(hw_cache_event_ids, slm_hw_cache_event_ids, |
2436 | sizeof(hw_cache_event_ids)); | 2583 | sizeof(hw_cache_event_ids)); |
2437 | memcpy(hw_cache_extra_regs, slm_hw_cache_extra_regs, | 2584 | memcpy(hw_cache_extra_regs, slm_hw_cache_extra_regs, |
@@ -2446,9 +2593,9 @@ __init int intel_pmu_init(void) | |||
2446 | pr_cont("Silvermont events, "); | 2593 | pr_cont("Silvermont events, "); |
2447 | break; | 2594 | break; |
2448 | 2595 | ||
2449 | case 37: /* 32 nm nehalem, "Clarkdale" */ | 2596 | case 37: /* 32nm Westmere */ |
2450 | case 44: /* 32 nm nehalem, "Gulftown" */ | 2597 | case 44: /* 32nm Westmere-EP */ |
2451 | case 47: /* 32 nm Xeon E7 */ | 2598 | case 47: /* 32nm Westmere-EX */ |
2452 | memcpy(hw_cache_event_ids, westmere_hw_cache_event_ids, | 2599 | memcpy(hw_cache_event_ids, westmere_hw_cache_event_ids, |
2453 | sizeof(hw_cache_event_ids)); | 2600 | sizeof(hw_cache_event_ids)); |
2454 | memcpy(hw_cache_extra_regs, nehalem_hw_cache_extra_regs, | 2601 | memcpy(hw_cache_extra_regs, nehalem_hw_cache_extra_regs, |
@@ -2474,8 +2621,8 @@ __init int intel_pmu_init(void) | |||
2474 | pr_cont("Westmere events, "); | 2621 | pr_cont("Westmere events, "); |
2475 | break; | 2622 | break; |
2476 | 2623 | ||
2477 | case 42: /* SandyBridge */ | 2624 | case 42: /* 32nm SandyBridge */ |
2478 | case 45: /* SandyBridge, "Romely-EP" */ | 2625 | case 45: /* 32nm SandyBridge-E/EN/EP */ |
2479 | x86_add_quirk(intel_sandybridge_quirk); | 2626 | x86_add_quirk(intel_sandybridge_quirk); |
2480 | memcpy(hw_cache_event_ids, snb_hw_cache_event_ids, | 2627 | memcpy(hw_cache_event_ids, snb_hw_cache_event_ids, |
2481 | sizeof(hw_cache_event_ids)); | 2628 | sizeof(hw_cache_event_ids)); |
@@ -2506,8 +2653,9 @@ __init int intel_pmu_init(void) | |||
2506 | 2653 | ||
2507 | pr_cont("SandyBridge events, "); | 2654 | pr_cont("SandyBridge events, "); |
2508 | break; | 2655 | break; |
2509 | case 58: /* IvyBridge */ | 2656 | |
2510 | case 62: /* IvyBridge EP */ | 2657 | case 58: /* 22nm IvyBridge */ |
2658 | case 62: /* 22nm IvyBridge-EP/EX */ | ||
2511 | memcpy(hw_cache_event_ids, snb_hw_cache_event_ids, | 2659 | memcpy(hw_cache_event_ids, snb_hw_cache_event_ids, |
2512 | sizeof(hw_cache_event_ids)); | 2660 | sizeof(hw_cache_event_ids)); |
2513 | /* dTLB-load-misses on IVB is different than SNB */ | 2661 | /* dTLB-load-misses on IVB is different than SNB */ |
@@ -2539,20 +2687,19 @@ __init int intel_pmu_init(void) | |||
2539 | break; | 2687 | break; |
2540 | 2688 | ||
2541 | 2689 | ||
2542 | case 60: /* Haswell Client */ | 2690 | case 60: /* 22nm Haswell Core */ |
2543 | case 70: | 2691 | case 63: /* 22nm Haswell Server */ |
2544 | case 71: | 2692 | case 69: /* 22nm Haswell ULT */ |
2545 | case 63: | 2693 | case 70: /* 22nm Haswell + GT3e (Intel Iris Pro graphics) */ |
2546 | case 69: | ||
2547 | x86_pmu.late_ack = true; | 2694 | x86_pmu.late_ack = true; |
2548 | memcpy(hw_cache_event_ids, snb_hw_cache_event_ids, sizeof(hw_cache_event_ids)); | 2695 | memcpy(hw_cache_event_ids, hsw_hw_cache_event_ids, sizeof(hw_cache_event_ids)); |
2549 | memcpy(hw_cache_extra_regs, snb_hw_cache_extra_regs, sizeof(hw_cache_extra_regs)); | 2696 | memcpy(hw_cache_extra_regs, hsw_hw_cache_extra_regs, sizeof(hw_cache_extra_regs)); |
2550 | 2697 | ||
2551 | intel_pmu_lbr_init_snb(); | 2698 | intel_pmu_lbr_init_snb(); |
2552 | 2699 | ||
2553 | x86_pmu.event_constraints = intel_hsw_event_constraints; | 2700 | x86_pmu.event_constraints = intel_hsw_event_constraints; |
2554 | x86_pmu.pebs_constraints = intel_hsw_pebs_event_constraints; | 2701 | x86_pmu.pebs_constraints = intel_hsw_pebs_event_constraints; |
2555 | x86_pmu.extra_regs = intel_snb_extra_regs; | 2702 | x86_pmu.extra_regs = intel_snbep_extra_regs; |
2556 | x86_pmu.pebs_aliases = intel_pebs_aliases_snb; | 2703 | x86_pmu.pebs_aliases = intel_pebs_aliases_snb; |
2557 | /* all extra regs are per-cpu when HT is on */ | 2704 | /* all extra regs are per-cpu when HT is on */ |
2558 | x86_pmu.er_flags |= ERF_HAS_RSP_1; | 2705 | x86_pmu.er_flags |= ERF_HAS_RSP_1; |
@@ -2565,6 +2712,28 @@ __init int intel_pmu_init(void) | |||
2565 | pr_cont("Haswell events, "); | 2712 | pr_cont("Haswell events, "); |
2566 | break; | 2713 | break; |
2567 | 2714 | ||
2715 | case 61: /* 14nm Broadwell Core-M */ | ||
2716 | x86_pmu.late_ack = true; | ||
2717 | memcpy(hw_cache_event_ids, hsw_hw_cache_event_ids, sizeof(hw_cache_event_ids)); | ||
2718 | memcpy(hw_cache_extra_regs, hsw_hw_cache_extra_regs, sizeof(hw_cache_extra_regs)); | ||
2719 | |||
2720 | intel_pmu_lbr_init_snb(); | ||
2721 | |||
2722 | x86_pmu.event_constraints = intel_bdw_event_constraints; | ||
2723 | x86_pmu.pebs_constraints = intel_hsw_pebs_event_constraints; | ||
2724 | x86_pmu.extra_regs = intel_snbep_extra_regs; | ||
2725 | x86_pmu.pebs_aliases = intel_pebs_aliases_snb; | ||
2726 | /* all extra regs are per-cpu when HT is on */ | ||
2727 | x86_pmu.er_flags |= ERF_HAS_RSP_1; | ||
2728 | x86_pmu.er_flags |= ERF_NO_HT_SHARING; | ||
2729 | |||
2730 | x86_pmu.hw_config = hsw_hw_config; | ||
2731 | x86_pmu.get_event_constraints = hsw_get_event_constraints; | ||
2732 | x86_pmu.cpu_events = hsw_events_attrs; | ||
2733 | x86_pmu.limit_period = bdw_limit_period; | ||
2734 | pr_cont("Broadwell events, "); | ||
2735 | break; | ||
2736 | |||
2568 | default: | 2737 | default: |
2569 | switch (x86_pmu.version) { | 2738 | switch (x86_pmu.version) { |
2570 | case 1: | 2739 | case 1: |
diff --git a/arch/x86/kernel/cpu/perf_event_intel_ds.c b/arch/x86/kernel/cpu/perf_event_intel_ds.c index 696ade311ded..b1553d05a5cb 100644 --- a/arch/x86/kernel/cpu/perf_event_intel_ds.c +++ b/arch/x86/kernel/cpu/perf_event_intel_ds.c | |||
@@ -108,14 +108,16 @@ static u64 precise_store_data(u64 status) | |||
108 | return val; | 108 | return val; |
109 | } | 109 | } |
110 | 110 | ||
111 | static u64 precise_store_data_hsw(struct perf_event *event, u64 status) | 111 | static u64 precise_datala_hsw(struct perf_event *event, u64 status) |
112 | { | 112 | { |
113 | union perf_mem_data_src dse; | 113 | union perf_mem_data_src dse; |
114 | u64 cfg = event->hw.config & INTEL_ARCH_EVENT_MASK; | ||
115 | 114 | ||
116 | dse.val = 0; | 115 | dse.val = PERF_MEM_NA; |
117 | dse.mem_op = PERF_MEM_OP_STORE; | 116 | |
118 | dse.mem_lvl = PERF_MEM_LVL_NA; | 117 | if (event->hw.flags & PERF_X86_EVENT_PEBS_ST_HSW) |
118 | dse.mem_op = PERF_MEM_OP_STORE; | ||
119 | else if (event->hw.flags & PERF_X86_EVENT_PEBS_LD_HSW) | ||
120 | dse.mem_op = PERF_MEM_OP_LOAD; | ||
119 | 121 | ||
120 | /* | 122 | /* |
121 | * L1 info only valid for following events: | 123 | * L1 info only valid for following events: |
@@ -125,15 +127,12 @@ static u64 precise_store_data_hsw(struct perf_event *event, u64 status) | |||
125 | * MEM_UOPS_RETIRED.SPLIT_STORES | 127 | * MEM_UOPS_RETIRED.SPLIT_STORES |
126 | * MEM_UOPS_RETIRED.ALL_STORES | 128 | * MEM_UOPS_RETIRED.ALL_STORES |
127 | */ | 129 | */ |
128 | if (cfg != 0x12d0 && cfg != 0x22d0 && cfg != 0x42d0 && cfg != 0x82d0) | 130 | if (event->hw.flags & PERF_X86_EVENT_PEBS_ST_HSW) { |
129 | return dse.mem_lvl; | 131 | if (status & 1) |
130 | 132 | dse.mem_lvl = PERF_MEM_LVL_L1 | PERF_MEM_LVL_HIT; | |
131 | if (status & 1) | 133 | else |
132 | dse.mem_lvl = PERF_MEM_LVL_L1 | PERF_MEM_LVL_HIT; | 134 | dse.mem_lvl = PERF_MEM_LVL_L1 | PERF_MEM_LVL_MISS; |
133 | else | 135 | } |
134 | dse.mem_lvl = PERF_MEM_LVL_L1 | PERF_MEM_LVL_MISS; | ||
135 | |||
136 | /* Nothing else supported. Sorry. */ | ||
137 | return dse.val; | 136 | return dse.val; |
138 | } | 137 | } |
139 | 138 | ||
@@ -569,28 +568,10 @@ struct event_constraint intel_atom_pebs_event_constraints[] = { | |||
569 | }; | 568 | }; |
570 | 569 | ||
571 | struct event_constraint intel_slm_pebs_event_constraints[] = { | 570 | struct event_constraint intel_slm_pebs_event_constraints[] = { |
572 | INTEL_UEVENT_CONSTRAINT(0x0103, 0x1), /* REHABQ.LD_BLOCK_ST_FORWARD_PS */ | 571 | /* UOPS_RETIRED.ALL, inv=1, cmask=16 (cycles:p). */ |
573 | INTEL_UEVENT_CONSTRAINT(0x0803, 0x1), /* REHABQ.LD_SPLITS_PS */ | 572 | INTEL_FLAGS_EVENT_CONSTRAINT(0x108001c2, 0xf), |
574 | INTEL_UEVENT_CONSTRAINT(0x0204, 0x1), /* MEM_UOPS_RETIRED.L2_HIT_LOADS_PS */ | 573 | /* Allow all events as PEBS with no flags */ |
575 | INTEL_UEVENT_CONSTRAINT(0x0404, 0x1), /* MEM_UOPS_RETIRED.L2_MISS_LOADS_PS */ | 574 | INTEL_ALL_EVENT_CONSTRAINT(0, 0x1), |
576 | INTEL_UEVENT_CONSTRAINT(0x0804, 0x1), /* MEM_UOPS_RETIRED.DTLB_MISS_LOADS_PS */ | ||
577 | INTEL_UEVENT_CONSTRAINT(0x2004, 0x1), /* MEM_UOPS_RETIRED.HITM_PS */ | ||
578 | INTEL_UEVENT_CONSTRAINT(0x00c0, 0x1), /* INST_RETIRED.ANY_PS */ | ||
579 | INTEL_UEVENT_CONSTRAINT(0x00c4, 0x1), /* BR_INST_RETIRED.ALL_BRANCHES_PS */ | ||
580 | INTEL_UEVENT_CONSTRAINT(0x7ec4, 0x1), /* BR_INST_RETIRED.JCC_PS */ | ||
581 | INTEL_UEVENT_CONSTRAINT(0xbfc4, 0x1), /* BR_INST_RETIRED.FAR_BRANCH_PS */ | ||
582 | INTEL_UEVENT_CONSTRAINT(0xebc4, 0x1), /* BR_INST_RETIRED.NON_RETURN_IND_PS */ | ||
583 | INTEL_UEVENT_CONSTRAINT(0xf7c4, 0x1), /* BR_INST_RETIRED.RETURN_PS */ | ||
584 | INTEL_UEVENT_CONSTRAINT(0xf9c4, 0x1), /* BR_INST_RETIRED.CALL_PS */ | ||
585 | INTEL_UEVENT_CONSTRAINT(0xfbc4, 0x1), /* BR_INST_RETIRED.IND_CALL_PS */ | ||
586 | INTEL_UEVENT_CONSTRAINT(0xfdc4, 0x1), /* BR_INST_RETIRED.REL_CALL_PS */ | ||
587 | INTEL_UEVENT_CONSTRAINT(0xfec4, 0x1), /* BR_INST_RETIRED.TAKEN_JCC_PS */ | ||
588 | INTEL_UEVENT_CONSTRAINT(0x00c5, 0x1), /* BR_INST_MISP_RETIRED.ALL_BRANCHES_PS */ | ||
589 | INTEL_UEVENT_CONSTRAINT(0x7ec5, 0x1), /* BR_INST_MISP_RETIRED.JCC_PS */ | ||
590 | INTEL_UEVENT_CONSTRAINT(0xebc5, 0x1), /* BR_INST_MISP_RETIRED.NON_RETURN_IND_PS */ | ||
591 | INTEL_UEVENT_CONSTRAINT(0xf7c5, 0x1), /* BR_INST_MISP_RETIRED.RETURN_PS */ | ||
592 | INTEL_UEVENT_CONSTRAINT(0xfbc5, 0x1), /* BR_INST_MISP_RETIRED.IND_CALL_PS */ | ||
593 | INTEL_UEVENT_CONSTRAINT(0xfec5, 0x1), /* BR_INST_MISP_RETIRED.TAKEN_JCC_PS */ | ||
594 | EVENT_CONSTRAINT_END | 575 | EVENT_CONSTRAINT_END |
595 | }; | 576 | }; |
596 | 577 | ||
@@ -626,68 +607,44 @@ struct event_constraint intel_westmere_pebs_event_constraints[] = { | |||
626 | 607 | ||
627 | struct event_constraint intel_snb_pebs_event_constraints[] = { | 608 | struct event_constraint intel_snb_pebs_event_constraints[] = { |
628 | INTEL_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PRECDIST */ | 609 | INTEL_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PRECDIST */ |
629 | INTEL_UEVENT_CONSTRAINT(0x01c2, 0xf), /* UOPS_RETIRED.ALL */ | ||
630 | INTEL_UEVENT_CONSTRAINT(0x02c2, 0xf), /* UOPS_RETIRED.RETIRE_SLOTS */ | ||
631 | INTEL_EVENT_CONSTRAINT(0xc4, 0xf), /* BR_INST_RETIRED.* */ | ||
632 | INTEL_EVENT_CONSTRAINT(0xc5, 0xf), /* BR_MISP_RETIRED.* */ | ||
633 | INTEL_PLD_CONSTRAINT(0x01cd, 0x8), /* MEM_TRANS_RETIRED.LAT_ABOVE_THR */ | 610 | INTEL_PLD_CONSTRAINT(0x01cd, 0x8), /* MEM_TRANS_RETIRED.LAT_ABOVE_THR */ |
634 | INTEL_PST_CONSTRAINT(0x02cd, 0x8), /* MEM_TRANS_RETIRED.PRECISE_STORES */ | 611 | INTEL_PST_CONSTRAINT(0x02cd, 0x8), /* MEM_TRANS_RETIRED.PRECISE_STORES */ |
635 | INTEL_EVENT_CONSTRAINT(0xd0, 0xf), /* MEM_UOP_RETIRED.* */ | 612 | /* UOPS_RETIRED.ALL, inv=1, cmask=16 (cycles:p). */ |
636 | INTEL_EVENT_CONSTRAINT(0xd1, 0xf), /* MEM_LOAD_UOPS_RETIRED.* */ | 613 | INTEL_FLAGS_EVENT_CONSTRAINT(0x108001c2, 0xf), |
637 | INTEL_EVENT_CONSTRAINT(0xd2, 0xf), /* MEM_LOAD_UOPS_LLC_HIT_RETIRED.* */ | 614 | /* Allow all events as PEBS with no flags */ |
638 | INTEL_EVENT_CONSTRAINT(0xd3, 0xf), /* MEM_LOAD_UOPS_LLC_MISS_RETIRED.* */ | 615 | INTEL_ALL_EVENT_CONSTRAINT(0, 0xf), |
639 | INTEL_UEVENT_CONSTRAINT(0x02d4, 0xf), /* MEM_LOAD_UOPS_MISC_RETIRED.LLC_MISS */ | ||
640 | EVENT_CONSTRAINT_END | 616 | EVENT_CONSTRAINT_END |
641 | }; | 617 | }; |
642 | 618 | ||
643 | struct event_constraint intel_ivb_pebs_event_constraints[] = { | 619 | struct event_constraint intel_ivb_pebs_event_constraints[] = { |
644 | INTEL_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PRECDIST */ | 620 | INTEL_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PRECDIST */ |
645 | INTEL_UEVENT_CONSTRAINT(0x01c2, 0xf), /* UOPS_RETIRED.ALL */ | ||
646 | INTEL_UEVENT_CONSTRAINT(0x02c2, 0xf), /* UOPS_RETIRED.RETIRE_SLOTS */ | ||
647 | INTEL_EVENT_CONSTRAINT(0xc4, 0xf), /* BR_INST_RETIRED.* */ | ||
648 | INTEL_EVENT_CONSTRAINT(0xc5, 0xf), /* BR_MISP_RETIRED.* */ | ||
649 | INTEL_PLD_CONSTRAINT(0x01cd, 0x8), /* MEM_TRANS_RETIRED.LAT_ABOVE_THR */ | 621 | INTEL_PLD_CONSTRAINT(0x01cd, 0x8), /* MEM_TRANS_RETIRED.LAT_ABOVE_THR */ |
650 | INTEL_PST_CONSTRAINT(0x02cd, 0x8), /* MEM_TRANS_RETIRED.PRECISE_STORES */ | 622 | INTEL_PST_CONSTRAINT(0x02cd, 0x8), /* MEM_TRANS_RETIRED.PRECISE_STORES */ |
651 | INTEL_EVENT_CONSTRAINT(0xd0, 0xf), /* MEM_UOP_RETIRED.* */ | 623 | /* UOPS_RETIRED.ALL, inv=1, cmask=16 (cycles:p). */ |
652 | INTEL_EVENT_CONSTRAINT(0xd1, 0xf), /* MEM_LOAD_UOPS_RETIRED.* */ | 624 | INTEL_FLAGS_EVENT_CONSTRAINT(0x108001c2, 0xf), |
653 | INTEL_EVENT_CONSTRAINT(0xd2, 0xf), /* MEM_LOAD_UOPS_LLC_HIT_RETIRED.* */ | 625 | /* Allow all events as PEBS with no flags */ |
654 | INTEL_EVENT_CONSTRAINT(0xd3, 0xf), /* MEM_LOAD_UOPS_LLC_MISS_RETIRED.* */ | 626 | INTEL_ALL_EVENT_CONSTRAINT(0, 0xf), |
655 | EVENT_CONSTRAINT_END | 627 | EVENT_CONSTRAINT_END |
656 | }; | 628 | }; |
657 | 629 | ||
658 | struct event_constraint intel_hsw_pebs_event_constraints[] = { | 630 | struct event_constraint intel_hsw_pebs_event_constraints[] = { |
659 | INTEL_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PRECDIST */ | 631 | INTEL_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PRECDIST */ |
660 | INTEL_PST_HSW_CONSTRAINT(0x01c2, 0xf), /* UOPS_RETIRED.ALL */ | 632 | INTEL_PLD_CONSTRAINT(0x01cd, 0xf), /* MEM_TRANS_RETIRED.* */ |
661 | INTEL_UEVENT_CONSTRAINT(0x02c2, 0xf), /* UOPS_RETIRED.RETIRE_SLOTS */ | 633 | /* UOPS_RETIRED.ALL, inv=1, cmask=16 (cycles:p). */ |
662 | INTEL_EVENT_CONSTRAINT(0xc4, 0xf), /* BR_INST_RETIRED.* */ | 634 | INTEL_FLAGS_EVENT_CONSTRAINT(0x108001c2, 0xf), |
663 | INTEL_UEVENT_CONSTRAINT(0x01c5, 0xf), /* BR_MISP_RETIRED.CONDITIONAL */ | 635 | INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_NA(0x01c2, 0xf), /* UOPS_RETIRED.ALL */ |
664 | INTEL_UEVENT_CONSTRAINT(0x04c5, 0xf), /* BR_MISP_RETIRED.ALL_BRANCHES */ | 636 | INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_LD(0x11d0, 0xf), /* MEM_UOPS_RETIRED.STLB_MISS_LOADS */ |
665 | INTEL_UEVENT_CONSTRAINT(0x20c5, 0xf), /* BR_MISP_RETIRED.NEAR_TAKEN */ | 637 | INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_LD(0x21d0, 0xf), /* MEM_UOPS_RETIRED.LOCK_LOADS */ |
666 | INTEL_PLD_CONSTRAINT(0x01cd, 0x8), /* MEM_TRANS_RETIRED.* */ | 638 | INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_LD(0x41d0, 0xf), /* MEM_UOPS_RETIRED.SPLIT_LOADS */ |
667 | /* MEM_UOPS_RETIRED.STLB_MISS_LOADS */ | 639 | INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_LD(0x81d0, 0xf), /* MEM_UOPS_RETIRED.ALL_LOADS */ |
668 | INTEL_UEVENT_CONSTRAINT(0x11d0, 0xf), | 640 | INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_ST(0x12d0, 0xf), /* MEM_UOPS_RETIRED.STLB_MISS_STORES */ |
669 | /* MEM_UOPS_RETIRED.STLB_MISS_STORES */ | 641 | INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_ST(0x42d0, 0xf), /* MEM_UOPS_RETIRED.SPLIT_STORES */ |
670 | INTEL_UEVENT_CONSTRAINT(0x12d0, 0xf), | 642 | INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_ST(0x82d0, 0xf), /* MEM_UOPS_RETIRED.ALL_STORES */ |
671 | INTEL_UEVENT_CONSTRAINT(0x21d0, 0xf), /* MEM_UOPS_RETIRED.LOCK_LOADS */ | 643 | INTEL_FLAGS_EVENT_CONSTRAINT_DATALA_LD(0xd1, 0xf), /* MEM_LOAD_UOPS_RETIRED.* */ |
672 | INTEL_UEVENT_CONSTRAINT(0x41d0, 0xf), /* MEM_UOPS_RETIRED.SPLIT_LOADS */ | 644 | INTEL_FLAGS_EVENT_CONSTRAINT_DATALA_LD(0xd2, 0xf), /* MEM_LOAD_UOPS_L3_HIT_RETIRED.* */ |
673 | /* MEM_UOPS_RETIRED.SPLIT_STORES */ | 645 | INTEL_FLAGS_EVENT_CONSTRAINT_DATALA_LD(0xd3, 0xf), /* MEM_LOAD_UOPS_L3_MISS_RETIRED.* */ |
674 | INTEL_UEVENT_CONSTRAINT(0x42d0, 0xf), | 646 | /* Allow all events as PEBS with no flags */ |
675 | INTEL_UEVENT_CONSTRAINT(0x81d0, 0xf), /* MEM_UOPS_RETIRED.ALL_LOADS */ | 647 | INTEL_ALL_EVENT_CONSTRAINT(0, 0xf), |
676 | INTEL_PST_HSW_CONSTRAINT(0x82d0, 0xf), /* MEM_UOPS_RETIRED.ALL_STORES */ | ||
677 | INTEL_UEVENT_CONSTRAINT(0x01d1, 0xf), /* MEM_LOAD_UOPS_RETIRED.L1_HIT */ | ||
678 | INTEL_UEVENT_CONSTRAINT(0x02d1, 0xf), /* MEM_LOAD_UOPS_RETIRED.L2_HIT */ | ||
679 | INTEL_UEVENT_CONSTRAINT(0x04d1, 0xf), /* MEM_LOAD_UOPS_RETIRED.L3_HIT */ | ||
680 | /* MEM_LOAD_UOPS_RETIRED.HIT_LFB */ | ||
681 | INTEL_UEVENT_CONSTRAINT(0x40d1, 0xf), | ||
682 | /* MEM_LOAD_UOPS_LLC_HIT_RETIRED.XSNP_MISS */ | ||
683 | INTEL_UEVENT_CONSTRAINT(0x01d2, 0xf), | ||
684 | /* MEM_LOAD_UOPS_LLC_HIT_RETIRED.XSNP_HIT */ | ||
685 | INTEL_UEVENT_CONSTRAINT(0x02d2, 0xf), | ||
686 | /* MEM_LOAD_UOPS_LLC_MISS_RETIRED.LOCAL_DRAM */ | ||
687 | INTEL_UEVENT_CONSTRAINT(0x01d3, 0xf), | ||
688 | INTEL_UEVENT_CONSTRAINT(0x04c8, 0xf), /* HLE_RETIRED.Abort */ | ||
689 | INTEL_UEVENT_CONSTRAINT(0x04c9, 0xf), /* RTM_RETIRED.Abort */ | ||
690 | |||
691 | EVENT_CONSTRAINT_END | 648 | EVENT_CONSTRAINT_END |
692 | }; | 649 | }; |
693 | 650 | ||
@@ -864,6 +821,10 @@ static inline u64 intel_hsw_transaction(struct pebs_record_hsw *pebs) | |||
864 | static void __intel_pmu_pebs_event(struct perf_event *event, | 821 | static void __intel_pmu_pebs_event(struct perf_event *event, |
865 | struct pt_regs *iregs, void *__pebs) | 822 | struct pt_regs *iregs, void *__pebs) |
866 | { | 823 | { |
824 | #define PERF_X86_EVENT_PEBS_HSW_PREC \ | ||
825 | (PERF_X86_EVENT_PEBS_ST_HSW | \ | ||
826 | PERF_X86_EVENT_PEBS_LD_HSW | \ | ||
827 | PERF_X86_EVENT_PEBS_NA_HSW) | ||
867 | /* | 828 | /* |
868 | * We cast to the biggest pebs_record but are careful not to | 829 | * We cast to the biggest pebs_record but are careful not to |
869 | * unconditionally access the 'extra' entries. | 830 | * unconditionally access the 'extra' entries. |
@@ -873,42 +834,40 @@ static void __intel_pmu_pebs_event(struct perf_event *event, | |||
873 | struct perf_sample_data data; | 834 | struct perf_sample_data data; |
874 | struct pt_regs regs; | 835 | struct pt_regs regs; |
875 | u64 sample_type; | 836 | u64 sample_type; |
876 | int fll, fst; | 837 | int fll, fst, dsrc; |
838 | int fl = event->hw.flags; | ||
877 | 839 | ||
878 | if (!intel_pmu_save_and_restart(event)) | 840 | if (!intel_pmu_save_and_restart(event)) |
879 | return; | 841 | return; |
880 | 842 | ||
881 | fll = event->hw.flags & PERF_X86_EVENT_PEBS_LDLAT; | 843 | sample_type = event->attr.sample_type; |
882 | fst = event->hw.flags & (PERF_X86_EVENT_PEBS_ST | | 844 | dsrc = sample_type & PERF_SAMPLE_DATA_SRC; |
883 | PERF_X86_EVENT_PEBS_ST_HSW); | 845 | |
846 | fll = fl & PERF_X86_EVENT_PEBS_LDLAT; | ||
847 | fst = fl & (PERF_X86_EVENT_PEBS_ST | PERF_X86_EVENT_PEBS_HSW_PREC); | ||
884 | 848 | ||
885 | perf_sample_data_init(&data, 0, event->hw.last_period); | 849 | perf_sample_data_init(&data, 0, event->hw.last_period); |
886 | 850 | ||
887 | data.period = event->hw.last_period; | 851 | data.period = event->hw.last_period; |
888 | sample_type = event->attr.sample_type; | ||
889 | 852 | ||
890 | /* | 853 | /* |
891 | * if PEBS-LL or PreciseStore | 854 | * Use latency for weight (only avail with PEBS-LL) |
892 | */ | 855 | */ |
893 | if (fll || fst) { | 856 | if (fll && (sample_type & PERF_SAMPLE_WEIGHT)) |
894 | /* | 857 | data.weight = pebs->lat; |
895 | * Use latency for weight (only avail with PEBS-LL) | 858 | |
896 | */ | 859 | /* |
897 | if (fll && (sample_type & PERF_SAMPLE_WEIGHT)) | 860 | * data.data_src encodes the data source |
898 | data.weight = pebs->lat; | 861 | */ |
899 | 862 | if (dsrc) { | |
900 | /* | 863 | u64 val = PERF_MEM_NA; |
901 | * data.data_src encodes the data source | 864 | if (fll) |
902 | */ | 865 | val = load_latency_data(pebs->dse); |
903 | if (sample_type & PERF_SAMPLE_DATA_SRC) { | 866 | else if (fst && (fl & PERF_X86_EVENT_PEBS_HSW_PREC)) |
904 | if (fll) | 867 | val = precise_datala_hsw(event, pebs->dse); |
905 | data.data_src.val = load_latency_data(pebs->dse); | 868 | else if (fst) |
906 | else if (event->hw.flags & PERF_X86_EVENT_PEBS_ST_HSW) | 869 | val = precise_store_data(pebs->dse); |
907 | data.data_src.val = | 870 | data.data_src.val = val; |
908 | precise_store_data_hsw(event, pebs->dse); | ||
909 | else | ||
910 | data.data_src.val = precise_store_data(pebs->dse); | ||
911 | } | ||
912 | } | 871 | } |
913 | 872 | ||
914 | /* | 873 | /* |
@@ -935,16 +894,16 @@ static void __intel_pmu_pebs_event(struct perf_event *event, | |||
935 | else | 894 | else |
936 | regs.flags &= ~PERF_EFLAGS_EXACT; | 895 | regs.flags &= ~PERF_EFLAGS_EXACT; |
937 | 896 | ||
938 | if ((event->attr.sample_type & PERF_SAMPLE_ADDR) && | 897 | if ((sample_type & PERF_SAMPLE_ADDR) && |
939 | x86_pmu.intel_cap.pebs_format >= 1) | 898 | x86_pmu.intel_cap.pebs_format >= 1) |
940 | data.addr = pebs->dla; | 899 | data.addr = pebs->dla; |
941 | 900 | ||
942 | if (x86_pmu.intel_cap.pebs_format >= 2) { | 901 | if (x86_pmu.intel_cap.pebs_format >= 2) { |
943 | /* Only set the TSX weight when no memory weight. */ | 902 | /* Only set the TSX weight when no memory weight. */ |
944 | if ((event->attr.sample_type & PERF_SAMPLE_WEIGHT) && !fll) | 903 | if ((sample_type & PERF_SAMPLE_WEIGHT) && !fll) |
945 | data.weight = intel_hsw_weight(pebs); | 904 | data.weight = intel_hsw_weight(pebs); |
946 | 905 | ||
947 | if (event->attr.sample_type & PERF_SAMPLE_TRANSACTION) | 906 | if (sample_type & PERF_SAMPLE_TRANSACTION) |
948 | data.txn = intel_hsw_transaction(pebs); | 907 | data.txn = intel_hsw_transaction(pebs); |
949 | } | 908 | } |
950 | 909 | ||
@@ -1055,7 +1014,7 @@ static void intel_pmu_drain_pebs_nhm(struct pt_regs *iregs) | |||
1055 | * BTS, PEBS probe and setup | 1014 | * BTS, PEBS probe and setup |
1056 | */ | 1015 | */ |
1057 | 1016 | ||
1058 | void intel_ds_init(void) | 1017 | void __init intel_ds_init(void) |
1059 | { | 1018 | { |
1060 | /* | 1019 | /* |
1061 | * No support for 32bit formats | 1020 | * No support for 32bit formats |
diff --git a/arch/x86/kernel/cpu/perf_event_intel_lbr.c b/arch/x86/kernel/cpu/perf_event_intel_lbr.c index 9dd2459a4c73..4af10617de33 100644 --- a/arch/x86/kernel/cpu/perf_event_intel_lbr.c +++ b/arch/x86/kernel/cpu/perf_event_intel_lbr.c | |||
@@ -697,7 +697,7 @@ static const int snb_lbr_sel_map[PERF_SAMPLE_BRANCH_MAX] = { | |||
697 | }; | 697 | }; |
698 | 698 | ||
699 | /* core */ | 699 | /* core */ |
700 | void intel_pmu_lbr_init_core(void) | 700 | void __init intel_pmu_lbr_init_core(void) |
701 | { | 701 | { |
702 | x86_pmu.lbr_nr = 4; | 702 | x86_pmu.lbr_nr = 4; |
703 | x86_pmu.lbr_tos = MSR_LBR_TOS; | 703 | x86_pmu.lbr_tos = MSR_LBR_TOS; |
@@ -712,7 +712,7 @@ void intel_pmu_lbr_init_core(void) | |||
712 | } | 712 | } |
713 | 713 | ||
714 | /* nehalem/westmere */ | 714 | /* nehalem/westmere */ |
715 | void intel_pmu_lbr_init_nhm(void) | 715 | void __init intel_pmu_lbr_init_nhm(void) |
716 | { | 716 | { |
717 | x86_pmu.lbr_nr = 16; | 717 | x86_pmu.lbr_nr = 16; |
718 | x86_pmu.lbr_tos = MSR_LBR_TOS; | 718 | x86_pmu.lbr_tos = MSR_LBR_TOS; |
@@ -733,7 +733,7 @@ void intel_pmu_lbr_init_nhm(void) | |||
733 | } | 733 | } |
734 | 734 | ||
735 | /* sandy bridge */ | 735 | /* sandy bridge */ |
736 | void intel_pmu_lbr_init_snb(void) | 736 | void __init intel_pmu_lbr_init_snb(void) |
737 | { | 737 | { |
738 | x86_pmu.lbr_nr = 16; | 738 | x86_pmu.lbr_nr = 16; |
739 | x86_pmu.lbr_tos = MSR_LBR_TOS; | 739 | x86_pmu.lbr_tos = MSR_LBR_TOS; |
@@ -753,7 +753,7 @@ void intel_pmu_lbr_init_snb(void) | |||
753 | } | 753 | } |
754 | 754 | ||
755 | /* atom */ | 755 | /* atom */ |
756 | void intel_pmu_lbr_init_atom(void) | 756 | void __init intel_pmu_lbr_init_atom(void) |
757 | { | 757 | { |
758 | /* | 758 | /* |
759 | * only models starting at stepping 10 seems | 759 | * only models starting at stepping 10 seems |
diff --git a/arch/x86/kernel/cpu/perf_event_intel_uncore.c b/arch/x86/kernel/cpu/perf_event_intel_uncore.c index 0939f86f543d..9762dbd9f3f7 100644 --- a/arch/x86/kernel/cpu/perf_event_intel_uncore.c +++ b/arch/x86/kernel/cpu/perf_event_intel_uncore.c | |||
@@ -1,83 +1,39 @@ | |||
1 | #include "perf_event_intel_uncore.h" | 1 | #include "perf_event_intel_uncore.h" |
2 | 2 | ||
3 | static struct intel_uncore_type *empty_uncore[] = { NULL, }; | 3 | static struct intel_uncore_type *empty_uncore[] = { NULL, }; |
4 | static struct intel_uncore_type **msr_uncores = empty_uncore; | 4 | struct intel_uncore_type **uncore_msr_uncores = empty_uncore; |
5 | static struct intel_uncore_type **pci_uncores = empty_uncore; | 5 | struct intel_uncore_type **uncore_pci_uncores = empty_uncore; |
6 | /* pci bus to socket mapping */ | ||
7 | static int pcibus_to_physid[256] = { [0 ... 255] = -1, }; | ||
8 | 6 | ||
9 | static struct pci_dev *extra_pci_dev[UNCORE_SOCKET_MAX][UNCORE_EXTRA_PCI_DEV_MAX]; | 7 | static bool pcidrv_registered; |
8 | struct pci_driver *uncore_pci_driver; | ||
9 | /* pci bus to socket mapping */ | ||
10 | int uncore_pcibus_to_physid[256] = { [0 ... 255] = -1, }; | ||
11 | struct pci_dev *uncore_extra_pci_dev[UNCORE_SOCKET_MAX][UNCORE_EXTRA_PCI_DEV_MAX]; | ||
10 | 12 | ||
11 | static DEFINE_RAW_SPINLOCK(uncore_box_lock); | 13 | static DEFINE_RAW_SPINLOCK(uncore_box_lock); |
12 | |||
13 | /* mask of cpus that collect uncore events */ | 14 | /* mask of cpus that collect uncore events */ |
14 | static cpumask_t uncore_cpu_mask; | 15 | static cpumask_t uncore_cpu_mask; |
15 | 16 | ||
16 | /* constraint for the fixed counter */ | 17 | /* constraint for the fixed counter */ |
17 | static struct event_constraint constraint_fixed = | 18 | static struct event_constraint uncore_constraint_fixed = |
18 | EVENT_CONSTRAINT(~0ULL, 1 << UNCORE_PMC_IDX_FIXED, ~0ULL); | 19 | EVENT_CONSTRAINT(~0ULL, 1 << UNCORE_PMC_IDX_FIXED, ~0ULL); |
19 | static struct event_constraint constraint_empty = | 20 | struct event_constraint uncore_constraint_empty = |
20 | EVENT_CONSTRAINT(0, 0, 0); | 21 | EVENT_CONSTRAINT(0, 0, 0); |
21 | 22 | ||
22 | #define __BITS_VALUE(x, i, n) ((typeof(x))(((x) >> ((i) * (n))) & \ | 23 | ssize_t uncore_event_show(struct kobject *kobj, |
23 | ((1ULL << (n)) - 1))) | 24 | struct kobj_attribute *attr, char *buf) |
24 | 25 | { | |
25 | DEFINE_UNCORE_FORMAT_ATTR(event, event, "config:0-7"); | 26 | struct uncore_event_desc *event = |
26 | DEFINE_UNCORE_FORMAT_ATTR(event_ext, event, "config:0-7,21"); | 27 | container_of(attr, struct uncore_event_desc, attr); |
27 | DEFINE_UNCORE_FORMAT_ATTR(umask, umask, "config:8-15"); | 28 | return sprintf(buf, "%s", event->config); |
28 | DEFINE_UNCORE_FORMAT_ATTR(edge, edge, "config:18"); | 29 | } |
29 | DEFINE_UNCORE_FORMAT_ATTR(tid_en, tid_en, "config:19"); | 30 | |
30 | DEFINE_UNCORE_FORMAT_ATTR(inv, inv, "config:23"); | 31 | struct intel_uncore_pmu *uncore_event_to_pmu(struct perf_event *event) |
31 | DEFINE_UNCORE_FORMAT_ATTR(cmask5, cmask, "config:24-28"); | ||
32 | DEFINE_UNCORE_FORMAT_ATTR(cmask8, cmask, "config:24-31"); | ||
33 | DEFINE_UNCORE_FORMAT_ATTR(thresh8, thresh, "config:24-31"); | ||
34 | DEFINE_UNCORE_FORMAT_ATTR(thresh5, thresh, "config:24-28"); | ||
35 | DEFINE_UNCORE_FORMAT_ATTR(occ_sel, occ_sel, "config:14-15"); | ||
36 | DEFINE_UNCORE_FORMAT_ATTR(occ_invert, occ_invert, "config:30"); | ||
37 | DEFINE_UNCORE_FORMAT_ATTR(occ_edge, occ_edge, "config:14-51"); | ||
38 | DEFINE_UNCORE_FORMAT_ATTR(filter_tid, filter_tid, "config1:0-4"); | ||
39 | DEFINE_UNCORE_FORMAT_ATTR(filter_link, filter_link, "config1:5-8"); | ||
40 | DEFINE_UNCORE_FORMAT_ATTR(filter_nid, filter_nid, "config1:10-17"); | ||
41 | DEFINE_UNCORE_FORMAT_ATTR(filter_nid2, filter_nid, "config1:32-47"); | ||
42 | DEFINE_UNCORE_FORMAT_ATTR(filter_state, filter_state, "config1:18-22"); | ||
43 | DEFINE_UNCORE_FORMAT_ATTR(filter_state2, filter_state, "config1:17-22"); | ||
44 | DEFINE_UNCORE_FORMAT_ATTR(filter_opc, filter_opc, "config1:23-31"); | ||
45 | DEFINE_UNCORE_FORMAT_ATTR(filter_opc2, filter_opc, "config1:52-60"); | ||
46 | DEFINE_UNCORE_FORMAT_ATTR(filter_band0, filter_band0, "config1:0-7"); | ||
47 | DEFINE_UNCORE_FORMAT_ATTR(filter_band1, filter_band1, "config1:8-15"); | ||
48 | DEFINE_UNCORE_FORMAT_ATTR(filter_band2, filter_band2, "config1:16-23"); | ||
49 | DEFINE_UNCORE_FORMAT_ATTR(filter_band3, filter_band3, "config1:24-31"); | ||
50 | DEFINE_UNCORE_FORMAT_ATTR(match_rds, match_rds, "config1:48-51"); | ||
51 | DEFINE_UNCORE_FORMAT_ATTR(match_rnid30, match_rnid30, "config1:32-35"); | ||
52 | DEFINE_UNCORE_FORMAT_ATTR(match_rnid4, match_rnid4, "config1:31"); | ||
53 | DEFINE_UNCORE_FORMAT_ATTR(match_dnid, match_dnid, "config1:13-17"); | ||
54 | DEFINE_UNCORE_FORMAT_ATTR(match_mc, match_mc, "config1:9-12"); | ||
55 | DEFINE_UNCORE_FORMAT_ATTR(match_opc, match_opc, "config1:5-8"); | ||
56 | DEFINE_UNCORE_FORMAT_ATTR(match_vnw, match_vnw, "config1:3-4"); | ||
57 | DEFINE_UNCORE_FORMAT_ATTR(match0, match0, "config1:0-31"); | ||
58 | DEFINE_UNCORE_FORMAT_ATTR(match1, match1, "config1:32-63"); | ||
59 | DEFINE_UNCORE_FORMAT_ATTR(mask_rds, mask_rds, "config2:48-51"); | ||
60 | DEFINE_UNCORE_FORMAT_ATTR(mask_rnid30, mask_rnid30, "config2:32-35"); | ||
61 | DEFINE_UNCORE_FORMAT_ATTR(mask_rnid4, mask_rnid4, "config2:31"); | ||
62 | DEFINE_UNCORE_FORMAT_ATTR(mask_dnid, mask_dnid, "config2:13-17"); | ||
63 | DEFINE_UNCORE_FORMAT_ATTR(mask_mc, mask_mc, "config2:9-12"); | ||
64 | DEFINE_UNCORE_FORMAT_ATTR(mask_opc, mask_opc, "config2:5-8"); | ||
65 | DEFINE_UNCORE_FORMAT_ATTR(mask_vnw, mask_vnw, "config2:3-4"); | ||
66 | DEFINE_UNCORE_FORMAT_ATTR(mask0, mask0, "config2:0-31"); | ||
67 | DEFINE_UNCORE_FORMAT_ATTR(mask1, mask1, "config2:32-63"); | ||
68 | |||
69 | static void uncore_pmu_start_hrtimer(struct intel_uncore_box *box); | ||
70 | static void uncore_pmu_cancel_hrtimer(struct intel_uncore_box *box); | ||
71 | static void uncore_perf_event_update(struct intel_uncore_box *box, struct perf_event *event); | ||
72 | static void uncore_pmu_event_read(struct perf_event *event); | ||
73 | |||
74 | static struct intel_uncore_pmu *uncore_event_to_pmu(struct perf_event *event) | ||
75 | { | 32 | { |
76 | return container_of(event->pmu, struct intel_uncore_pmu, pmu); | 33 | return container_of(event->pmu, struct intel_uncore_pmu, pmu); |
77 | } | 34 | } |
78 | 35 | ||
79 | static struct intel_uncore_box * | 36 | struct intel_uncore_box *uncore_pmu_to_box(struct intel_uncore_pmu *pmu, int cpu) |
80 | uncore_pmu_to_box(struct intel_uncore_pmu *pmu, int cpu) | ||
81 | { | 37 | { |
82 | struct intel_uncore_box *box; | 38 | struct intel_uncore_box *box; |
83 | 39 | ||
@@ -86,6 +42,9 @@ uncore_pmu_to_box(struct intel_uncore_pmu *pmu, int cpu) | |||
86 | return box; | 42 | return box; |
87 | 43 | ||
88 | raw_spin_lock(&uncore_box_lock); | 44 | raw_spin_lock(&uncore_box_lock); |
45 | /* Recheck in lock to handle races. */ | ||
46 | if (*per_cpu_ptr(pmu->box, cpu)) | ||
47 | goto out; | ||
89 | list_for_each_entry(box, &pmu->box_list, list) { | 48 | list_for_each_entry(box, &pmu->box_list, list) { |
90 | if (box->phys_id == topology_physical_package_id(cpu)) { | 49 | if (box->phys_id == topology_physical_package_id(cpu)) { |
91 | atomic_inc(&box->refcnt); | 50 | atomic_inc(&box->refcnt); |
@@ -93,12 +52,13 @@ uncore_pmu_to_box(struct intel_uncore_pmu *pmu, int cpu) | |||
93 | break; | 52 | break; |
94 | } | 53 | } |
95 | } | 54 | } |
55 | out: | ||
96 | raw_spin_unlock(&uncore_box_lock); | 56 | raw_spin_unlock(&uncore_box_lock); |
97 | 57 | ||
98 | return *per_cpu_ptr(pmu->box, cpu); | 58 | return *per_cpu_ptr(pmu->box, cpu); |
99 | } | 59 | } |
100 | 60 | ||
101 | static struct intel_uncore_box *uncore_event_to_box(struct perf_event *event) | 61 | struct intel_uncore_box *uncore_event_to_box(struct perf_event *event) |
102 | { | 62 | { |
103 | /* | 63 | /* |
104 | * perf core schedules event on the basis of cpu, uncore events are | 64 | * perf core schedules event on the basis of cpu, uncore events are |
@@ -107,7 +67,7 @@ static struct intel_uncore_box *uncore_event_to_box(struct perf_event *event) | |||
107 | return uncore_pmu_to_box(uncore_event_to_pmu(event), smp_processor_id()); | 67 | return uncore_pmu_to_box(uncore_event_to_pmu(event), smp_processor_id()); |
108 | } | 68 | } |
109 | 69 | ||
110 | static u64 uncore_msr_read_counter(struct intel_uncore_box *box, struct perf_event *event) | 70 | u64 uncore_msr_read_counter(struct intel_uncore_box *box, struct perf_event *event) |
111 | { | 71 | { |
112 | u64 count; | 72 | u64 count; |
113 | 73 | ||
@@ -119,7 +79,7 @@ static u64 uncore_msr_read_counter(struct intel_uncore_box *box, struct perf_eve | |||
119 | /* | 79 | /* |
120 | * generic get constraint function for shared match/mask registers. | 80 | * generic get constraint function for shared match/mask registers. |
121 | */ | 81 | */ |
122 | static struct event_constraint * | 82 | struct event_constraint * |
123 | uncore_get_constraint(struct intel_uncore_box *box, struct perf_event *event) | 83 | uncore_get_constraint(struct intel_uncore_box *box, struct perf_event *event) |
124 | { | 84 | { |
125 | struct intel_uncore_extra_reg *er; | 85 | struct intel_uncore_extra_reg *er; |
@@ -154,10 +114,10 @@ uncore_get_constraint(struct intel_uncore_box *box, struct perf_event *event) | |||
154 | return NULL; | 114 | return NULL; |
155 | } | 115 | } |
156 | 116 | ||
157 | return &constraint_empty; | 117 | return &uncore_constraint_empty; |
158 | } | 118 | } |
159 | 119 | ||
160 | static void uncore_put_constraint(struct intel_uncore_box *box, struct perf_event *event) | 120 | void uncore_put_constraint(struct intel_uncore_box *box, struct perf_event *event) |
161 | { | 121 | { |
162 | struct intel_uncore_extra_reg *er; | 122 | struct intel_uncore_extra_reg *er; |
163 | struct hw_perf_event_extra *reg1 = &event->hw.extra_reg; | 123 | struct hw_perf_event_extra *reg1 = &event->hw.extra_reg; |
@@ -178,7 +138,7 @@ static void uncore_put_constraint(struct intel_uncore_box *box, struct perf_even | |||
178 | reg1->alloc = 0; | 138 | reg1->alloc = 0; |
179 | } | 139 | } |
180 | 140 | ||
181 | static u64 uncore_shared_reg_config(struct intel_uncore_box *box, int idx) | 141 | u64 uncore_shared_reg_config(struct intel_uncore_box *box, int idx) |
182 | { | 142 | { |
183 | struct intel_uncore_extra_reg *er; | 143 | struct intel_uncore_extra_reg *er; |
184 | unsigned long flags; | 144 | unsigned long flags; |
@@ -193,2936 +153,6 @@ static u64 uncore_shared_reg_config(struct intel_uncore_box *box, int idx) | |||
193 | return config; | 153 | return config; |
194 | } | 154 | } |
195 | 155 | ||
196 | /* Sandy Bridge-EP uncore support */ | ||
197 | static struct intel_uncore_type snbep_uncore_cbox; | ||
198 | static struct intel_uncore_type snbep_uncore_pcu; | ||
199 | |||
200 | static void snbep_uncore_pci_disable_box(struct intel_uncore_box *box) | ||
201 | { | ||
202 | struct pci_dev *pdev = box->pci_dev; | ||
203 | int box_ctl = uncore_pci_box_ctl(box); | ||
204 | u32 config = 0; | ||
205 | |||
206 | if (!pci_read_config_dword(pdev, box_ctl, &config)) { | ||
207 | config |= SNBEP_PMON_BOX_CTL_FRZ; | ||
208 | pci_write_config_dword(pdev, box_ctl, config); | ||
209 | } | ||
210 | } | ||
211 | |||
212 | static void snbep_uncore_pci_enable_box(struct intel_uncore_box *box) | ||
213 | { | ||
214 | struct pci_dev *pdev = box->pci_dev; | ||
215 | int box_ctl = uncore_pci_box_ctl(box); | ||
216 | u32 config = 0; | ||
217 | |||
218 | if (!pci_read_config_dword(pdev, box_ctl, &config)) { | ||
219 | config &= ~SNBEP_PMON_BOX_CTL_FRZ; | ||
220 | pci_write_config_dword(pdev, box_ctl, config); | ||
221 | } | ||
222 | } | ||
223 | |||
224 | static void snbep_uncore_pci_enable_event(struct intel_uncore_box *box, struct perf_event *event) | ||
225 | { | ||
226 | struct pci_dev *pdev = box->pci_dev; | ||
227 | struct hw_perf_event *hwc = &event->hw; | ||
228 | |||
229 | pci_write_config_dword(pdev, hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN); | ||
230 | } | ||
231 | |||
232 | static void snbep_uncore_pci_disable_event(struct intel_uncore_box *box, struct perf_event *event) | ||
233 | { | ||
234 | struct pci_dev *pdev = box->pci_dev; | ||
235 | struct hw_perf_event *hwc = &event->hw; | ||
236 | |||
237 | pci_write_config_dword(pdev, hwc->config_base, hwc->config); | ||
238 | } | ||
239 | |||
240 | static u64 snbep_uncore_pci_read_counter(struct intel_uncore_box *box, struct perf_event *event) | ||
241 | { | ||
242 | struct pci_dev *pdev = box->pci_dev; | ||
243 | struct hw_perf_event *hwc = &event->hw; | ||
244 | u64 count = 0; | ||
245 | |||
246 | pci_read_config_dword(pdev, hwc->event_base, (u32 *)&count); | ||
247 | pci_read_config_dword(pdev, hwc->event_base + 4, (u32 *)&count + 1); | ||
248 | |||
249 | return count; | ||
250 | } | ||
251 | |||
252 | static void snbep_uncore_pci_init_box(struct intel_uncore_box *box) | ||
253 | { | ||
254 | struct pci_dev *pdev = box->pci_dev; | ||
255 | |||
256 | pci_write_config_dword(pdev, SNBEP_PCI_PMON_BOX_CTL, SNBEP_PMON_BOX_CTL_INT); | ||
257 | } | ||
258 | |||
259 | static void snbep_uncore_msr_disable_box(struct intel_uncore_box *box) | ||
260 | { | ||
261 | u64 config; | ||
262 | unsigned msr; | ||
263 | |||
264 | msr = uncore_msr_box_ctl(box); | ||
265 | if (msr) { | ||
266 | rdmsrl(msr, config); | ||
267 | config |= SNBEP_PMON_BOX_CTL_FRZ; | ||
268 | wrmsrl(msr, config); | ||
269 | } | ||
270 | } | ||
271 | |||
272 | static void snbep_uncore_msr_enable_box(struct intel_uncore_box *box) | ||
273 | { | ||
274 | u64 config; | ||
275 | unsigned msr; | ||
276 | |||
277 | msr = uncore_msr_box_ctl(box); | ||
278 | if (msr) { | ||
279 | rdmsrl(msr, config); | ||
280 | config &= ~SNBEP_PMON_BOX_CTL_FRZ; | ||
281 | wrmsrl(msr, config); | ||
282 | } | ||
283 | } | ||
284 | |||
285 | static void snbep_uncore_msr_enable_event(struct intel_uncore_box *box, struct perf_event *event) | ||
286 | { | ||
287 | struct hw_perf_event *hwc = &event->hw; | ||
288 | struct hw_perf_event_extra *reg1 = &hwc->extra_reg; | ||
289 | |||
290 | if (reg1->idx != EXTRA_REG_NONE) | ||
291 | wrmsrl(reg1->reg, uncore_shared_reg_config(box, 0)); | ||
292 | |||
293 | wrmsrl(hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN); | ||
294 | } | ||
295 | |||
296 | static void snbep_uncore_msr_disable_event(struct intel_uncore_box *box, | ||
297 | struct perf_event *event) | ||
298 | { | ||
299 | struct hw_perf_event *hwc = &event->hw; | ||
300 | |||
301 | wrmsrl(hwc->config_base, hwc->config); | ||
302 | } | ||
303 | |||
304 | static void snbep_uncore_msr_init_box(struct intel_uncore_box *box) | ||
305 | { | ||
306 | unsigned msr = uncore_msr_box_ctl(box); | ||
307 | |||
308 | if (msr) | ||
309 | wrmsrl(msr, SNBEP_PMON_BOX_CTL_INT); | ||
310 | } | ||
311 | |||
312 | static struct attribute *snbep_uncore_formats_attr[] = { | ||
313 | &format_attr_event.attr, | ||
314 | &format_attr_umask.attr, | ||
315 | &format_attr_edge.attr, | ||
316 | &format_attr_inv.attr, | ||
317 | &format_attr_thresh8.attr, | ||
318 | NULL, | ||
319 | }; | ||
320 | |||
321 | static struct attribute *snbep_uncore_ubox_formats_attr[] = { | ||
322 | &format_attr_event.attr, | ||
323 | &format_attr_umask.attr, | ||
324 | &format_attr_edge.attr, | ||
325 | &format_attr_inv.attr, | ||
326 | &format_attr_thresh5.attr, | ||
327 | NULL, | ||
328 | }; | ||
329 | |||
330 | static struct attribute *snbep_uncore_cbox_formats_attr[] = { | ||
331 | &format_attr_event.attr, | ||
332 | &format_attr_umask.attr, | ||
333 | &format_attr_edge.attr, | ||
334 | &format_attr_tid_en.attr, | ||
335 | &format_attr_inv.attr, | ||
336 | &format_attr_thresh8.attr, | ||
337 | &format_attr_filter_tid.attr, | ||
338 | &format_attr_filter_nid.attr, | ||
339 | &format_attr_filter_state.attr, | ||
340 | &format_attr_filter_opc.attr, | ||
341 | NULL, | ||
342 | }; | ||
343 | |||
344 | static struct attribute *snbep_uncore_pcu_formats_attr[] = { | ||
345 | &format_attr_event_ext.attr, | ||
346 | &format_attr_occ_sel.attr, | ||
347 | &format_attr_edge.attr, | ||
348 | &format_attr_inv.attr, | ||
349 | &format_attr_thresh5.attr, | ||
350 | &format_attr_occ_invert.attr, | ||
351 | &format_attr_occ_edge.attr, | ||
352 | &format_attr_filter_band0.attr, | ||
353 | &format_attr_filter_band1.attr, | ||
354 | &format_attr_filter_band2.attr, | ||
355 | &format_attr_filter_band3.attr, | ||
356 | NULL, | ||
357 | }; | ||
358 | |||
359 | static struct attribute *snbep_uncore_qpi_formats_attr[] = { | ||
360 | &format_attr_event_ext.attr, | ||
361 | &format_attr_umask.attr, | ||
362 | &format_attr_edge.attr, | ||
363 | &format_attr_inv.attr, | ||
364 | &format_attr_thresh8.attr, | ||
365 | &format_attr_match_rds.attr, | ||
366 | &format_attr_match_rnid30.attr, | ||
367 | &format_attr_match_rnid4.attr, | ||
368 | &format_attr_match_dnid.attr, | ||
369 | &format_attr_match_mc.attr, | ||
370 | &format_attr_match_opc.attr, | ||
371 | &format_attr_match_vnw.attr, | ||
372 | &format_attr_match0.attr, | ||
373 | &format_attr_match1.attr, | ||
374 | &format_attr_mask_rds.attr, | ||
375 | &format_attr_mask_rnid30.attr, | ||
376 | &format_attr_mask_rnid4.attr, | ||
377 | &format_attr_mask_dnid.attr, | ||
378 | &format_attr_mask_mc.attr, | ||
379 | &format_attr_mask_opc.attr, | ||
380 | &format_attr_mask_vnw.attr, | ||
381 | &format_attr_mask0.attr, | ||
382 | &format_attr_mask1.attr, | ||
383 | NULL, | ||
384 | }; | ||
385 | |||
386 | static struct uncore_event_desc snbep_uncore_imc_events[] = { | ||
387 | INTEL_UNCORE_EVENT_DESC(clockticks, "event=0xff,umask=0x00"), | ||
388 | INTEL_UNCORE_EVENT_DESC(cas_count_read, "event=0x04,umask=0x03"), | ||
389 | INTEL_UNCORE_EVENT_DESC(cas_count_write, "event=0x04,umask=0x0c"), | ||
390 | { /* end: all zeroes */ }, | ||
391 | }; | ||
392 | |||
393 | static struct uncore_event_desc snbep_uncore_qpi_events[] = { | ||
394 | INTEL_UNCORE_EVENT_DESC(clockticks, "event=0x14"), | ||
395 | INTEL_UNCORE_EVENT_DESC(txl_flits_active, "event=0x00,umask=0x06"), | ||
396 | INTEL_UNCORE_EVENT_DESC(drs_data, "event=0x102,umask=0x08"), | ||
397 | INTEL_UNCORE_EVENT_DESC(ncb_data, "event=0x103,umask=0x04"), | ||
398 | { /* end: all zeroes */ }, | ||
399 | }; | ||
400 | |||
401 | static struct attribute_group snbep_uncore_format_group = { | ||
402 | .name = "format", | ||
403 | .attrs = snbep_uncore_formats_attr, | ||
404 | }; | ||
405 | |||
406 | static struct attribute_group snbep_uncore_ubox_format_group = { | ||
407 | .name = "format", | ||
408 | .attrs = snbep_uncore_ubox_formats_attr, | ||
409 | }; | ||
410 | |||
411 | static struct attribute_group snbep_uncore_cbox_format_group = { | ||
412 | .name = "format", | ||
413 | .attrs = snbep_uncore_cbox_formats_attr, | ||
414 | }; | ||
415 | |||
416 | static struct attribute_group snbep_uncore_pcu_format_group = { | ||
417 | .name = "format", | ||
418 | .attrs = snbep_uncore_pcu_formats_attr, | ||
419 | }; | ||
420 | |||
421 | static struct attribute_group snbep_uncore_qpi_format_group = { | ||
422 | .name = "format", | ||
423 | .attrs = snbep_uncore_qpi_formats_attr, | ||
424 | }; | ||
425 | |||
426 | #define SNBEP_UNCORE_MSR_OPS_COMMON_INIT() \ | ||
427 | .init_box = snbep_uncore_msr_init_box, \ | ||
428 | .disable_box = snbep_uncore_msr_disable_box, \ | ||
429 | .enable_box = snbep_uncore_msr_enable_box, \ | ||
430 | .disable_event = snbep_uncore_msr_disable_event, \ | ||
431 | .enable_event = snbep_uncore_msr_enable_event, \ | ||
432 | .read_counter = uncore_msr_read_counter | ||
433 | |||
434 | static struct intel_uncore_ops snbep_uncore_msr_ops = { | ||
435 | SNBEP_UNCORE_MSR_OPS_COMMON_INIT(), | ||
436 | }; | ||
437 | |||
438 | #define SNBEP_UNCORE_PCI_OPS_COMMON_INIT() \ | ||
439 | .init_box = snbep_uncore_pci_init_box, \ | ||
440 | .disable_box = snbep_uncore_pci_disable_box, \ | ||
441 | .enable_box = snbep_uncore_pci_enable_box, \ | ||
442 | .disable_event = snbep_uncore_pci_disable_event, \ | ||
443 | .read_counter = snbep_uncore_pci_read_counter | ||
444 | |||
445 | static struct intel_uncore_ops snbep_uncore_pci_ops = { | ||
446 | SNBEP_UNCORE_PCI_OPS_COMMON_INIT(), | ||
447 | .enable_event = snbep_uncore_pci_enable_event, \ | ||
448 | }; | ||
449 | |||
450 | static struct event_constraint snbep_uncore_cbox_constraints[] = { | ||
451 | UNCORE_EVENT_CONSTRAINT(0x01, 0x1), | ||
452 | UNCORE_EVENT_CONSTRAINT(0x02, 0x3), | ||
453 | UNCORE_EVENT_CONSTRAINT(0x04, 0x3), | ||
454 | UNCORE_EVENT_CONSTRAINT(0x05, 0x3), | ||
455 | UNCORE_EVENT_CONSTRAINT(0x07, 0x3), | ||
456 | UNCORE_EVENT_CONSTRAINT(0x09, 0x3), | ||
457 | UNCORE_EVENT_CONSTRAINT(0x11, 0x1), | ||
458 | UNCORE_EVENT_CONSTRAINT(0x12, 0x3), | ||
459 | UNCORE_EVENT_CONSTRAINT(0x13, 0x3), | ||
460 | UNCORE_EVENT_CONSTRAINT(0x1b, 0xc), | ||
461 | UNCORE_EVENT_CONSTRAINT(0x1c, 0xc), | ||
462 | UNCORE_EVENT_CONSTRAINT(0x1d, 0xc), | ||
463 | UNCORE_EVENT_CONSTRAINT(0x1e, 0xc), | ||
464 | EVENT_CONSTRAINT_OVERLAP(0x1f, 0xe, 0xff), | ||
465 | UNCORE_EVENT_CONSTRAINT(0x21, 0x3), | ||
466 | UNCORE_EVENT_CONSTRAINT(0x23, 0x3), | ||
467 | UNCORE_EVENT_CONSTRAINT(0x31, 0x3), | ||
468 | UNCORE_EVENT_CONSTRAINT(0x32, 0x3), | ||
469 | UNCORE_EVENT_CONSTRAINT(0x33, 0x3), | ||
470 | UNCORE_EVENT_CONSTRAINT(0x34, 0x3), | ||
471 | UNCORE_EVENT_CONSTRAINT(0x35, 0x3), | ||
472 | UNCORE_EVENT_CONSTRAINT(0x36, 0x1), | ||
473 | UNCORE_EVENT_CONSTRAINT(0x37, 0x3), | ||
474 | UNCORE_EVENT_CONSTRAINT(0x38, 0x3), | ||
475 | UNCORE_EVENT_CONSTRAINT(0x39, 0x3), | ||
476 | UNCORE_EVENT_CONSTRAINT(0x3b, 0x1), | ||
477 | EVENT_CONSTRAINT_END | ||
478 | }; | ||
479 | |||
480 | static struct event_constraint snbep_uncore_r2pcie_constraints[] = { | ||
481 | UNCORE_EVENT_CONSTRAINT(0x10, 0x3), | ||
482 | UNCORE_EVENT_CONSTRAINT(0x11, 0x3), | ||
483 | UNCORE_EVENT_CONSTRAINT(0x12, 0x1), | ||
484 | UNCORE_EVENT_CONSTRAINT(0x23, 0x3), | ||
485 | UNCORE_EVENT_CONSTRAINT(0x24, 0x3), | ||
486 | UNCORE_EVENT_CONSTRAINT(0x25, 0x3), | ||
487 | UNCORE_EVENT_CONSTRAINT(0x26, 0x3), | ||
488 | UNCORE_EVENT_CONSTRAINT(0x32, 0x3), | ||
489 | UNCORE_EVENT_CONSTRAINT(0x33, 0x3), | ||
490 | UNCORE_EVENT_CONSTRAINT(0x34, 0x3), | ||
491 | EVENT_CONSTRAINT_END | ||
492 | }; | ||
493 | |||
494 | static struct event_constraint snbep_uncore_r3qpi_constraints[] = { | ||
495 | UNCORE_EVENT_CONSTRAINT(0x10, 0x3), | ||
496 | UNCORE_EVENT_CONSTRAINT(0x11, 0x3), | ||
497 | UNCORE_EVENT_CONSTRAINT(0x12, 0x3), | ||
498 | UNCORE_EVENT_CONSTRAINT(0x13, 0x1), | ||
499 | UNCORE_EVENT_CONSTRAINT(0x20, 0x3), | ||
500 | UNCORE_EVENT_CONSTRAINT(0x21, 0x3), | ||
501 | UNCORE_EVENT_CONSTRAINT(0x22, 0x3), | ||
502 | UNCORE_EVENT_CONSTRAINT(0x23, 0x3), | ||
503 | UNCORE_EVENT_CONSTRAINT(0x24, 0x3), | ||
504 | UNCORE_EVENT_CONSTRAINT(0x25, 0x3), | ||
505 | UNCORE_EVENT_CONSTRAINT(0x26, 0x3), | ||
506 | UNCORE_EVENT_CONSTRAINT(0x28, 0x3), | ||
507 | UNCORE_EVENT_CONSTRAINT(0x29, 0x3), | ||
508 | UNCORE_EVENT_CONSTRAINT(0x2a, 0x3), | ||
509 | UNCORE_EVENT_CONSTRAINT(0x2b, 0x3), | ||
510 | UNCORE_EVENT_CONSTRAINT(0x2c, 0x3), | ||
511 | UNCORE_EVENT_CONSTRAINT(0x2d, 0x3), | ||
512 | UNCORE_EVENT_CONSTRAINT(0x2e, 0x3), | ||
513 | UNCORE_EVENT_CONSTRAINT(0x2f, 0x3), | ||
514 | UNCORE_EVENT_CONSTRAINT(0x30, 0x3), | ||
515 | UNCORE_EVENT_CONSTRAINT(0x31, 0x3), | ||
516 | UNCORE_EVENT_CONSTRAINT(0x32, 0x3), | ||
517 | UNCORE_EVENT_CONSTRAINT(0x33, 0x3), | ||
518 | UNCORE_EVENT_CONSTRAINT(0x34, 0x3), | ||
519 | UNCORE_EVENT_CONSTRAINT(0x36, 0x3), | ||
520 | UNCORE_EVENT_CONSTRAINT(0x37, 0x3), | ||
521 | UNCORE_EVENT_CONSTRAINT(0x38, 0x3), | ||
522 | UNCORE_EVENT_CONSTRAINT(0x39, 0x3), | ||
523 | EVENT_CONSTRAINT_END | ||
524 | }; | ||
525 | |||
526 | static struct intel_uncore_type snbep_uncore_ubox = { | ||
527 | .name = "ubox", | ||
528 | .num_counters = 2, | ||
529 | .num_boxes = 1, | ||
530 | .perf_ctr_bits = 44, | ||
531 | .fixed_ctr_bits = 48, | ||
532 | .perf_ctr = SNBEP_U_MSR_PMON_CTR0, | ||
533 | .event_ctl = SNBEP_U_MSR_PMON_CTL0, | ||
534 | .event_mask = SNBEP_U_MSR_PMON_RAW_EVENT_MASK, | ||
535 | .fixed_ctr = SNBEP_U_MSR_PMON_UCLK_FIXED_CTR, | ||
536 | .fixed_ctl = SNBEP_U_MSR_PMON_UCLK_FIXED_CTL, | ||
537 | .ops = &snbep_uncore_msr_ops, | ||
538 | .format_group = &snbep_uncore_ubox_format_group, | ||
539 | }; | ||
540 | |||
541 | static struct extra_reg snbep_uncore_cbox_extra_regs[] = { | ||
542 | SNBEP_CBO_EVENT_EXTRA_REG(SNBEP_CBO_PMON_CTL_TID_EN, | ||
543 | SNBEP_CBO_PMON_CTL_TID_EN, 0x1), | ||
544 | SNBEP_CBO_EVENT_EXTRA_REG(0x0334, 0xffff, 0x4), | ||
545 | SNBEP_CBO_EVENT_EXTRA_REG(0x4334, 0xffff, 0x6), | ||
546 | SNBEP_CBO_EVENT_EXTRA_REG(0x0534, 0xffff, 0x4), | ||
547 | SNBEP_CBO_EVENT_EXTRA_REG(0x4534, 0xffff, 0x6), | ||
548 | SNBEP_CBO_EVENT_EXTRA_REG(0x0934, 0xffff, 0x4), | ||
549 | SNBEP_CBO_EVENT_EXTRA_REG(0x4934, 0xffff, 0x6), | ||
550 | SNBEP_CBO_EVENT_EXTRA_REG(0x4134, 0xffff, 0x6), | ||
551 | SNBEP_CBO_EVENT_EXTRA_REG(0x0135, 0xffff, 0x8), | ||
552 | SNBEP_CBO_EVENT_EXTRA_REG(0x0335, 0xffff, 0x8), | ||
553 | SNBEP_CBO_EVENT_EXTRA_REG(0x4135, 0xffff, 0xa), | ||
554 | SNBEP_CBO_EVENT_EXTRA_REG(0x4335, 0xffff, 0xa), | ||
555 | SNBEP_CBO_EVENT_EXTRA_REG(0x4435, 0xffff, 0x2), | ||
556 | SNBEP_CBO_EVENT_EXTRA_REG(0x4835, 0xffff, 0x2), | ||
557 | SNBEP_CBO_EVENT_EXTRA_REG(0x4a35, 0xffff, 0x2), | ||
558 | SNBEP_CBO_EVENT_EXTRA_REG(0x5035, 0xffff, 0x2), | ||
559 | SNBEP_CBO_EVENT_EXTRA_REG(0x0136, 0xffff, 0x8), | ||
560 | SNBEP_CBO_EVENT_EXTRA_REG(0x0336, 0xffff, 0x8), | ||
561 | SNBEP_CBO_EVENT_EXTRA_REG(0x4136, 0xffff, 0xa), | ||
562 | SNBEP_CBO_EVENT_EXTRA_REG(0x4336, 0xffff, 0xa), | ||
563 | SNBEP_CBO_EVENT_EXTRA_REG(0x4436, 0xffff, 0x2), | ||
564 | SNBEP_CBO_EVENT_EXTRA_REG(0x4836, 0xffff, 0x2), | ||
565 | SNBEP_CBO_EVENT_EXTRA_REG(0x4a36, 0xffff, 0x2), | ||
566 | SNBEP_CBO_EVENT_EXTRA_REG(0x4037, 0x40ff, 0x2), | ||
567 | EVENT_EXTRA_END | ||
568 | }; | ||
569 | |||
570 | static void snbep_cbox_put_constraint(struct intel_uncore_box *box, struct perf_event *event) | ||
571 | { | ||
572 | struct hw_perf_event_extra *reg1 = &event->hw.extra_reg; | ||
573 | struct intel_uncore_extra_reg *er = &box->shared_regs[0]; | ||
574 | int i; | ||
575 | |||
576 | if (uncore_box_is_fake(box)) | ||
577 | return; | ||
578 | |||
579 | for (i = 0; i < 5; i++) { | ||
580 | if (reg1->alloc & (0x1 << i)) | ||
581 | atomic_sub(1 << (i * 6), &er->ref); | ||
582 | } | ||
583 | reg1->alloc = 0; | ||
584 | } | ||
585 | |||
586 | static struct event_constraint * | ||
587 | __snbep_cbox_get_constraint(struct intel_uncore_box *box, struct perf_event *event, | ||
588 | u64 (*cbox_filter_mask)(int fields)) | ||
589 | { | ||
590 | struct hw_perf_event_extra *reg1 = &event->hw.extra_reg; | ||
591 | struct intel_uncore_extra_reg *er = &box->shared_regs[0]; | ||
592 | int i, alloc = 0; | ||
593 | unsigned long flags; | ||
594 | u64 mask; | ||
595 | |||
596 | if (reg1->idx == EXTRA_REG_NONE) | ||
597 | return NULL; | ||
598 | |||
599 | raw_spin_lock_irqsave(&er->lock, flags); | ||
600 | for (i = 0; i < 5; i++) { | ||
601 | if (!(reg1->idx & (0x1 << i))) | ||
602 | continue; | ||
603 | if (!uncore_box_is_fake(box) && (reg1->alloc & (0x1 << i))) | ||
604 | continue; | ||
605 | |||
606 | mask = cbox_filter_mask(0x1 << i); | ||
607 | if (!__BITS_VALUE(atomic_read(&er->ref), i, 6) || | ||
608 | !((reg1->config ^ er->config) & mask)) { | ||
609 | atomic_add(1 << (i * 6), &er->ref); | ||
610 | er->config &= ~mask; | ||
611 | er->config |= reg1->config & mask; | ||
612 | alloc |= (0x1 << i); | ||
613 | } else { | ||
614 | break; | ||
615 | } | ||
616 | } | ||
617 | raw_spin_unlock_irqrestore(&er->lock, flags); | ||
618 | if (i < 5) | ||
619 | goto fail; | ||
620 | |||
621 | if (!uncore_box_is_fake(box)) | ||
622 | reg1->alloc |= alloc; | ||
623 | |||
624 | return NULL; | ||
625 | fail: | ||
626 | for (; i >= 0; i--) { | ||
627 | if (alloc & (0x1 << i)) | ||
628 | atomic_sub(1 << (i * 6), &er->ref); | ||
629 | } | ||
630 | return &constraint_empty; | ||
631 | } | ||
632 | |||
633 | static u64 snbep_cbox_filter_mask(int fields) | ||
634 | { | ||
635 | u64 mask = 0; | ||
636 | |||
637 | if (fields & 0x1) | ||
638 | mask |= SNBEP_CB0_MSR_PMON_BOX_FILTER_TID; | ||
639 | if (fields & 0x2) | ||
640 | mask |= SNBEP_CB0_MSR_PMON_BOX_FILTER_NID; | ||
641 | if (fields & 0x4) | ||
642 | mask |= SNBEP_CB0_MSR_PMON_BOX_FILTER_STATE; | ||
643 | if (fields & 0x8) | ||
644 | mask |= SNBEP_CB0_MSR_PMON_BOX_FILTER_OPC; | ||
645 | |||
646 | return mask; | ||
647 | } | ||
648 | |||
649 | static struct event_constraint * | ||
650 | snbep_cbox_get_constraint(struct intel_uncore_box *box, struct perf_event *event) | ||
651 | { | ||
652 | return __snbep_cbox_get_constraint(box, event, snbep_cbox_filter_mask); | ||
653 | } | ||
654 | |||
655 | static int snbep_cbox_hw_config(struct intel_uncore_box *box, struct perf_event *event) | ||
656 | { | ||
657 | struct hw_perf_event_extra *reg1 = &event->hw.extra_reg; | ||
658 | struct extra_reg *er; | ||
659 | int idx = 0; | ||
660 | |||
661 | for (er = snbep_uncore_cbox_extra_regs; er->msr; er++) { | ||
662 | if (er->event != (event->hw.config & er->config_mask)) | ||
663 | continue; | ||
664 | idx |= er->idx; | ||
665 | } | ||
666 | |||
667 | if (idx) { | ||
668 | reg1->reg = SNBEP_C0_MSR_PMON_BOX_FILTER + | ||
669 | SNBEP_CBO_MSR_OFFSET * box->pmu->pmu_idx; | ||
670 | reg1->config = event->attr.config1 & snbep_cbox_filter_mask(idx); | ||
671 | reg1->idx = idx; | ||
672 | } | ||
673 | return 0; | ||
674 | } | ||
675 | |||
676 | static struct intel_uncore_ops snbep_uncore_cbox_ops = { | ||
677 | SNBEP_UNCORE_MSR_OPS_COMMON_INIT(), | ||
678 | .hw_config = snbep_cbox_hw_config, | ||
679 | .get_constraint = snbep_cbox_get_constraint, | ||
680 | .put_constraint = snbep_cbox_put_constraint, | ||
681 | }; | ||
682 | |||
683 | static struct intel_uncore_type snbep_uncore_cbox = { | ||
684 | .name = "cbox", | ||
685 | .num_counters = 4, | ||
686 | .num_boxes = 8, | ||
687 | .perf_ctr_bits = 44, | ||
688 | .event_ctl = SNBEP_C0_MSR_PMON_CTL0, | ||
689 | .perf_ctr = SNBEP_C0_MSR_PMON_CTR0, | ||
690 | .event_mask = SNBEP_CBO_MSR_PMON_RAW_EVENT_MASK, | ||
691 | .box_ctl = SNBEP_C0_MSR_PMON_BOX_CTL, | ||
692 | .msr_offset = SNBEP_CBO_MSR_OFFSET, | ||
693 | .num_shared_regs = 1, | ||
694 | .constraints = snbep_uncore_cbox_constraints, | ||
695 | .ops = &snbep_uncore_cbox_ops, | ||
696 | .format_group = &snbep_uncore_cbox_format_group, | ||
697 | }; | ||
698 | |||
699 | static u64 snbep_pcu_alter_er(struct perf_event *event, int new_idx, bool modify) | ||
700 | { | ||
701 | struct hw_perf_event *hwc = &event->hw; | ||
702 | struct hw_perf_event_extra *reg1 = &hwc->extra_reg; | ||
703 | u64 config = reg1->config; | ||
704 | |||
705 | if (new_idx > reg1->idx) | ||
706 | config <<= 8 * (new_idx - reg1->idx); | ||
707 | else | ||
708 | config >>= 8 * (reg1->idx - new_idx); | ||
709 | |||
710 | if (modify) { | ||
711 | hwc->config += new_idx - reg1->idx; | ||
712 | reg1->config = config; | ||
713 | reg1->idx = new_idx; | ||
714 | } | ||
715 | return config; | ||
716 | } | ||
717 | |||
718 | static struct event_constraint * | ||
719 | snbep_pcu_get_constraint(struct intel_uncore_box *box, struct perf_event *event) | ||
720 | { | ||
721 | struct hw_perf_event_extra *reg1 = &event->hw.extra_reg; | ||
722 | struct intel_uncore_extra_reg *er = &box->shared_regs[0]; | ||
723 | unsigned long flags; | ||
724 | int idx = reg1->idx; | ||
725 | u64 mask, config1 = reg1->config; | ||
726 | bool ok = false; | ||
727 | |||
728 | if (reg1->idx == EXTRA_REG_NONE || | ||
729 | (!uncore_box_is_fake(box) && reg1->alloc)) | ||
730 | return NULL; | ||
731 | again: | ||
732 | mask = 0xffULL << (idx * 8); | ||
733 | raw_spin_lock_irqsave(&er->lock, flags); | ||
734 | if (!__BITS_VALUE(atomic_read(&er->ref), idx, 8) || | ||
735 | !((config1 ^ er->config) & mask)) { | ||
736 | atomic_add(1 << (idx * 8), &er->ref); | ||
737 | er->config &= ~mask; | ||
738 | er->config |= config1 & mask; | ||
739 | ok = true; | ||
740 | } | ||
741 | raw_spin_unlock_irqrestore(&er->lock, flags); | ||
742 | |||
743 | if (!ok) { | ||
744 | idx = (idx + 1) % 4; | ||
745 | if (idx != reg1->idx) { | ||
746 | config1 = snbep_pcu_alter_er(event, idx, false); | ||
747 | goto again; | ||
748 | } | ||
749 | return &constraint_empty; | ||
750 | } | ||
751 | |||
752 | if (!uncore_box_is_fake(box)) { | ||
753 | if (idx != reg1->idx) | ||
754 | snbep_pcu_alter_er(event, idx, true); | ||
755 | reg1->alloc = 1; | ||
756 | } | ||
757 | return NULL; | ||
758 | } | ||
759 | |||
760 | static void snbep_pcu_put_constraint(struct intel_uncore_box *box, struct perf_event *event) | ||
761 | { | ||
762 | struct hw_perf_event_extra *reg1 = &event->hw.extra_reg; | ||
763 | struct intel_uncore_extra_reg *er = &box->shared_regs[0]; | ||
764 | |||
765 | if (uncore_box_is_fake(box) || !reg1->alloc) | ||
766 | return; | ||
767 | |||
768 | atomic_sub(1 << (reg1->idx * 8), &er->ref); | ||
769 | reg1->alloc = 0; | ||
770 | } | ||
771 | |||
772 | static int snbep_pcu_hw_config(struct intel_uncore_box *box, struct perf_event *event) | ||
773 | { | ||
774 | struct hw_perf_event *hwc = &event->hw; | ||
775 | struct hw_perf_event_extra *reg1 = &hwc->extra_reg; | ||
776 | int ev_sel = hwc->config & SNBEP_PMON_CTL_EV_SEL_MASK; | ||
777 | |||
778 | if (ev_sel >= 0xb && ev_sel <= 0xe) { | ||
779 | reg1->reg = SNBEP_PCU_MSR_PMON_BOX_FILTER; | ||
780 | reg1->idx = ev_sel - 0xb; | ||
781 | reg1->config = event->attr.config1 & (0xff << reg1->idx); | ||
782 | } | ||
783 | return 0; | ||
784 | } | ||
785 | |||
786 | static struct intel_uncore_ops snbep_uncore_pcu_ops = { | ||
787 | SNBEP_UNCORE_MSR_OPS_COMMON_INIT(), | ||
788 | .hw_config = snbep_pcu_hw_config, | ||
789 | .get_constraint = snbep_pcu_get_constraint, | ||
790 | .put_constraint = snbep_pcu_put_constraint, | ||
791 | }; | ||
792 | |||
793 | static struct intel_uncore_type snbep_uncore_pcu = { | ||
794 | .name = "pcu", | ||
795 | .num_counters = 4, | ||
796 | .num_boxes = 1, | ||
797 | .perf_ctr_bits = 48, | ||
798 | .perf_ctr = SNBEP_PCU_MSR_PMON_CTR0, | ||
799 | .event_ctl = SNBEP_PCU_MSR_PMON_CTL0, | ||
800 | .event_mask = SNBEP_PCU_MSR_PMON_RAW_EVENT_MASK, | ||
801 | .box_ctl = SNBEP_PCU_MSR_PMON_BOX_CTL, | ||
802 | .num_shared_regs = 1, | ||
803 | .ops = &snbep_uncore_pcu_ops, | ||
804 | .format_group = &snbep_uncore_pcu_format_group, | ||
805 | }; | ||
806 | |||
807 | static struct intel_uncore_type *snbep_msr_uncores[] = { | ||
808 | &snbep_uncore_ubox, | ||
809 | &snbep_uncore_cbox, | ||
810 | &snbep_uncore_pcu, | ||
811 | NULL, | ||
812 | }; | ||
813 | |||
814 | enum { | ||
815 | SNBEP_PCI_QPI_PORT0_FILTER, | ||
816 | SNBEP_PCI_QPI_PORT1_FILTER, | ||
817 | }; | ||
818 | |||
819 | static int snbep_qpi_hw_config(struct intel_uncore_box *box, struct perf_event *event) | ||
820 | { | ||
821 | struct hw_perf_event *hwc = &event->hw; | ||
822 | struct hw_perf_event_extra *reg1 = &hwc->extra_reg; | ||
823 | struct hw_perf_event_extra *reg2 = &hwc->branch_reg; | ||
824 | |||
825 | if ((hwc->config & SNBEP_PMON_CTL_EV_SEL_MASK) == 0x38) { | ||
826 | reg1->idx = 0; | ||
827 | reg1->reg = SNBEP_Q_Py_PCI_PMON_PKT_MATCH0; | ||
828 | reg1->config = event->attr.config1; | ||
829 | reg2->reg = SNBEP_Q_Py_PCI_PMON_PKT_MASK0; | ||
830 | reg2->config = event->attr.config2; | ||
831 | } | ||
832 | return 0; | ||
833 | } | ||
834 | |||
835 | static void snbep_qpi_enable_event(struct intel_uncore_box *box, struct perf_event *event) | ||
836 | { | ||
837 | struct pci_dev *pdev = box->pci_dev; | ||
838 | struct hw_perf_event *hwc = &event->hw; | ||
839 | struct hw_perf_event_extra *reg1 = &hwc->extra_reg; | ||
840 | struct hw_perf_event_extra *reg2 = &hwc->branch_reg; | ||
841 | |||
842 | if (reg1->idx != EXTRA_REG_NONE) { | ||
843 | int idx = box->pmu->pmu_idx + SNBEP_PCI_QPI_PORT0_FILTER; | ||
844 | struct pci_dev *filter_pdev = extra_pci_dev[box->phys_id][idx]; | ||
845 | WARN_ON_ONCE(!filter_pdev); | ||
846 | if (filter_pdev) { | ||
847 | pci_write_config_dword(filter_pdev, reg1->reg, | ||
848 | (u32)reg1->config); | ||
849 | pci_write_config_dword(filter_pdev, reg1->reg + 4, | ||
850 | (u32)(reg1->config >> 32)); | ||
851 | pci_write_config_dword(filter_pdev, reg2->reg, | ||
852 | (u32)reg2->config); | ||
853 | pci_write_config_dword(filter_pdev, reg2->reg + 4, | ||
854 | (u32)(reg2->config >> 32)); | ||
855 | } | ||
856 | } | ||
857 | |||
858 | pci_write_config_dword(pdev, hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN); | ||
859 | } | ||
860 | |||
861 | static struct intel_uncore_ops snbep_uncore_qpi_ops = { | ||
862 | SNBEP_UNCORE_PCI_OPS_COMMON_INIT(), | ||
863 | .enable_event = snbep_qpi_enable_event, | ||
864 | .hw_config = snbep_qpi_hw_config, | ||
865 | .get_constraint = uncore_get_constraint, | ||
866 | .put_constraint = uncore_put_constraint, | ||
867 | }; | ||
868 | |||
869 | #define SNBEP_UNCORE_PCI_COMMON_INIT() \ | ||
870 | .perf_ctr = SNBEP_PCI_PMON_CTR0, \ | ||
871 | .event_ctl = SNBEP_PCI_PMON_CTL0, \ | ||
872 | .event_mask = SNBEP_PMON_RAW_EVENT_MASK, \ | ||
873 | .box_ctl = SNBEP_PCI_PMON_BOX_CTL, \ | ||
874 | .ops = &snbep_uncore_pci_ops, \ | ||
875 | .format_group = &snbep_uncore_format_group | ||
876 | |||
877 | static struct intel_uncore_type snbep_uncore_ha = { | ||
878 | .name = "ha", | ||
879 | .num_counters = 4, | ||
880 | .num_boxes = 1, | ||
881 | .perf_ctr_bits = 48, | ||
882 | SNBEP_UNCORE_PCI_COMMON_INIT(), | ||
883 | }; | ||
884 | |||
885 | static struct intel_uncore_type snbep_uncore_imc = { | ||
886 | .name = "imc", | ||
887 | .num_counters = 4, | ||
888 | .num_boxes = 4, | ||
889 | .perf_ctr_bits = 48, | ||
890 | .fixed_ctr_bits = 48, | ||
891 | .fixed_ctr = SNBEP_MC_CHy_PCI_PMON_FIXED_CTR, | ||
892 | .fixed_ctl = SNBEP_MC_CHy_PCI_PMON_FIXED_CTL, | ||
893 | .event_descs = snbep_uncore_imc_events, | ||
894 | SNBEP_UNCORE_PCI_COMMON_INIT(), | ||
895 | }; | ||
896 | |||
897 | static struct intel_uncore_type snbep_uncore_qpi = { | ||
898 | .name = "qpi", | ||
899 | .num_counters = 4, | ||
900 | .num_boxes = 2, | ||
901 | .perf_ctr_bits = 48, | ||
902 | .perf_ctr = SNBEP_PCI_PMON_CTR0, | ||
903 | .event_ctl = SNBEP_PCI_PMON_CTL0, | ||
904 | .event_mask = SNBEP_QPI_PCI_PMON_RAW_EVENT_MASK, | ||
905 | .box_ctl = SNBEP_PCI_PMON_BOX_CTL, | ||
906 | .num_shared_regs = 1, | ||
907 | .ops = &snbep_uncore_qpi_ops, | ||
908 | .event_descs = snbep_uncore_qpi_events, | ||
909 | .format_group = &snbep_uncore_qpi_format_group, | ||
910 | }; | ||
911 | |||
912 | |||
913 | static struct intel_uncore_type snbep_uncore_r2pcie = { | ||
914 | .name = "r2pcie", | ||
915 | .num_counters = 4, | ||
916 | .num_boxes = 1, | ||
917 | .perf_ctr_bits = 44, | ||
918 | .constraints = snbep_uncore_r2pcie_constraints, | ||
919 | SNBEP_UNCORE_PCI_COMMON_INIT(), | ||
920 | }; | ||
921 | |||
922 | static struct intel_uncore_type snbep_uncore_r3qpi = { | ||
923 | .name = "r3qpi", | ||
924 | .num_counters = 3, | ||
925 | .num_boxes = 2, | ||
926 | .perf_ctr_bits = 44, | ||
927 | .constraints = snbep_uncore_r3qpi_constraints, | ||
928 | SNBEP_UNCORE_PCI_COMMON_INIT(), | ||
929 | }; | ||
930 | |||
931 | enum { | ||
932 | SNBEP_PCI_UNCORE_HA, | ||
933 | SNBEP_PCI_UNCORE_IMC, | ||
934 | SNBEP_PCI_UNCORE_QPI, | ||
935 | SNBEP_PCI_UNCORE_R2PCIE, | ||
936 | SNBEP_PCI_UNCORE_R3QPI, | ||
937 | }; | ||
938 | |||
939 | static struct intel_uncore_type *snbep_pci_uncores[] = { | ||
940 | [SNBEP_PCI_UNCORE_HA] = &snbep_uncore_ha, | ||
941 | [SNBEP_PCI_UNCORE_IMC] = &snbep_uncore_imc, | ||
942 | [SNBEP_PCI_UNCORE_QPI] = &snbep_uncore_qpi, | ||
943 | [SNBEP_PCI_UNCORE_R2PCIE] = &snbep_uncore_r2pcie, | ||
944 | [SNBEP_PCI_UNCORE_R3QPI] = &snbep_uncore_r3qpi, | ||
945 | NULL, | ||
946 | }; | ||
947 | |||
948 | static const struct pci_device_id snbep_uncore_pci_ids[] = { | ||
949 | { /* Home Agent */ | ||
950 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_HA), | ||
951 | .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_HA, 0), | ||
952 | }, | ||
953 | { /* MC Channel 0 */ | ||
954 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_IMC0), | ||
955 | .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_IMC, 0), | ||
956 | }, | ||
957 | { /* MC Channel 1 */ | ||
958 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_IMC1), | ||
959 | .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_IMC, 1), | ||
960 | }, | ||
961 | { /* MC Channel 2 */ | ||
962 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_IMC2), | ||
963 | .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_IMC, 2), | ||
964 | }, | ||
965 | { /* MC Channel 3 */ | ||
966 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_IMC3), | ||
967 | .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_IMC, 3), | ||
968 | }, | ||
969 | { /* QPI Port 0 */ | ||
970 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_QPI0), | ||
971 | .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_QPI, 0), | ||
972 | }, | ||
973 | { /* QPI Port 1 */ | ||
974 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_QPI1), | ||
975 | .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_QPI, 1), | ||
976 | }, | ||
977 | { /* R2PCIe */ | ||
978 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_R2PCIE), | ||
979 | .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_R2PCIE, 0), | ||
980 | }, | ||
981 | { /* R3QPI Link 0 */ | ||
982 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_R3QPI0), | ||
983 | .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_R3QPI, 0), | ||
984 | }, | ||
985 | { /* R3QPI Link 1 */ | ||
986 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_R3QPI1), | ||
987 | .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_R3QPI, 1), | ||
988 | }, | ||
989 | { /* QPI Port 0 filter */ | ||
990 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x3c86), | ||
991 | .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV, | ||
992 | SNBEP_PCI_QPI_PORT0_FILTER), | ||
993 | }, | ||
994 | { /* QPI Port 0 filter */ | ||
995 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x3c96), | ||
996 | .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV, | ||
997 | SNBEP_PCI_QPI_PORT1_FILTER), | ||
998 | }, | ||
999 | { /* end: all zeroes */ } | ||
1000 | }; | ||
1001 | |||
1002 | static struct pci_driver snbep_uncore_pci_driver = { | ||
1003 | .name = "snbep_uncore", | ||
1004 | .id_table = snbep_uncore_pci_ids, | ||
1005 | }; | ||
1006 | |||
1007 | /* | ||
1008 | * build pci bus to socket mapping | ||
1009 | */ | ||
1010 | static int snbep_pci2phy_map_init(int devid) | ||
1011 | { | ||
1012 | struct pci_dev *ubox_dev = NULL; | ||
1013 | int i, bus, nodeid; | ||
1014 | int err = 0; | ||
1015 | u32 config = 0; | ||
1016 | |||
1017 | while (1) { | ||
1018 | /* find the UBOX device */ | ||
1019 | ubox_dev = pci_get_device(PCI_VENDOR_ID_INTEL, devid, ubox_dev); | ||
1020 | if (!ubox_dev) | ||
1021 | break; | ||
1022 | bus = ubox_dev->bus->number; | ||
1023 | /* get the Node ID of the local register */ | ||
1024 | err = pci_read_config_dword(ubox_dev, 0x40, &config); | ||
1025 | if (err) | ||
1026 | break; | ||
1027 | nodeid = config; | ||
1028 | /* get the Node ID mapping */ | ||
1029 | err = pci_read_config_dword(ubox_dev, 0x54, &config); | ||
1030 | if (err) | ||
1031 | break; | ||
1032 | /* | ||
1033 | * every three bits in the Node ID mapping register maps | ||
1034 | * to a particular node. | ||
1035 | */ | ||
1036 | for (i = 0; i < 8; i++) { | ||
1037 | if (nodeid == ((config >> (3 * i)) & 0x7)) { | ||
1038 | pcibus_to_physid[bus] = i; | ||
1039 | break; | ||
1040 | } | ||
1041 | } | ||
1042 | } | ||
1043 | |||
1044 | if (!err) { | ||
1045 | /* | ||
1046 | * For PCI bus with no UBOX device, find the next bus | ||
1047 | * that has UBOX device and use its mapping. | ||
1048 | */ | ||
1049 | i = -1; | ||
1050 | for (bus = 255; bus >= 0; bus--) { | ||
1051 | if (pcibus_to_physid[bus] >= 0) | ||
1052 | i = pcibus_to_physid[bus]; | ||
1053 | else | ||
1054 | pcibus_to_physid[bus] = i; | ||
1055 | } | ||
1056 | } | ||
1057 | |||
1058 | if (ubox_dev) | ||
1059 | pci_dev_put(ubox_dev); | ||
1060 | |||
1061 | return err ? pcibios_err_to_errno(err) : 0; | ||
1062 | } | ||
1063 | /* end of Sandy Bridge-EP uncore support */ | ||
1064 | |||
1065 | /* IvyTown uncore support */ | ||
1066 | static void ivt_uncore_msr_init_box(struct intel_uncore_box *box) | ||
1067 | { | ||
1068 | unsigned msr = uncore_msr_box_ctl(box); | ||
1069 | if (msr) | ||
1070 | wrmsrl(msr, IVT_PMON_BOX_CTL_INT); | ||
1071 | } | ||
1072 | |||
1073 | static void ivt_uncore_pci_init_box(struct intel_uncore_box *box) | ||
1074 | { | ||
1075 | struct pci_dev *pdev = box->pci_dev; | ||
1076 | |||
1077 | pci_write_config_dword(pdev, SNBEP_PCI_PMON_BOX_CTL, IVT_PMON_BOX_CTL_INT); | ||
1078 | } | ||
1079 | |||
1080 | #define IVT_UNCORE_MSR_OPS_COMMON_INIT() \ | ||
1081 | .init_box = ivt_uncore_msr_init_box, \ | ||
1082 | .disable_box = snbep_uncore_msr_disable_box, \ | ||
1083 | .enable_box = snbep_uncore_msr_enable_box, \ | ||
1084 | .disable_event = snbep_uncore_msr_disable_event, \ | ||
1085 | .enable_event = snbep_uncore_msr_enable_event, \ | ||
1086 | .read_counter = uncore_msr_read_counter | ||
1087 | |||
1088 | static struct intel_uncore_ops ivt_uncore_msr_ops = { | ||
1089 | IVT_UNCORE_MSR_OPS_COMMON_INIT(), | ||
1090 | }; | ||
1091 | |||
1092 | static struct intel_uncore_ops ivt_uncore_pci_ops = { | ||
1093 | .init_box = ivt_uncore_pci_init_box, | ||
1094 | .disable_box = snbep_uncore_pci_disable_box, | ||
1095 | .enable_box = snbep_uncore_pci_enable_box, | ||
1096 | .disable_event = snbep_uncore_pci_disable_event, | ||
1097 | .enable_event = snbep_uncore_pci_enable_event, | ||
1098 | .read_counter = snbep_uncore_pci_read_counter, | ||
1099 | }; | ||
1100 | |||
1101 | #define IVT_UNCORE_PCI_COMMON_INIT() \ | ||
1102 | .perf_ctr = SNBEP_PCI_PMON_CTR0, \ | ||
1103 | .event_ctl = SNBEP_PCI_PMON_CTL0, \ | ||
1104 | .event_mask = IVT_PMON_RAW_EVENT_MASK, \ | ||
1105 | .box_ctl = SNBEP_PCI_PMON_BOX_CTL, \ | ||
1106 | .ops = &ivt_uncore_pci_ops, \ | ||
1107 | .format_group = &ivt_uncore_format_group | ||
1108 | |||
1109 | static struct attribute *ivt_uncore_formats_attr[] = { | ||
1110 | &format_attr_event.attr, | ||
1111 | &format_attr_umask.attr, | ||
1112 | &format_attr_edge.attr, | ||
1113 | &format_attr_inv.attr, | ||
1114 | &format_attr_thresh8.attr, | ||
1115 | NULL, | ||
1116 | }; | ||
1117 | |||
1118 | static struct attribute *ivt_uncore_ubox_formats_attr[] = { | ||
1119 | &format_attr_event.attr, | ||
1120 | &format_attr_umask.attr, | ||
1121 | &format_attr_edge.attr, | ||
1122 | &format_attr_inv.attr, | ||
1123 | &format_attr_thresh5.attr, | ||
1124 | NULL, | ||
1125 | }; | ||
1126 | |||
1127 | static struct attribute *ivt_uncore_cbox_formats_attr[] = { | ||
1128 | &format_attr_event.attr, | ||
1129 | &format_attr_umask.attr, | ||
1130 | &format_attr_edge.attr, | ||
1131 | &format_attr_tid_en.attr, | ||
1132 | &format_attr_thresh8.attr, | ||
1133 | &format_attr_filter_tid.attr, | ||
1134 | &format_attr_filter_link.attr, | ||
1135 | &format_attr_filter_state2.attr, | ||
1136 | &format_attr_filter_nid2.attr, | ||
1137 | &format_attr_filter_opc2.attr, | ||
1138 | NULL, | ||
1139 | }; | ||
1140 | |||
1141 | static struct attribute *ivt_uncore_pcu_formats_attr[] = { | ||
1142 | &format_attr_event_ext.attr, | ||
1143 | &format_attr_occ_sel.attr, | ||
1144 | &format_attr_edge.attr, | ||
1145 | &format_attr_thresh5.attr, | ||
1146 | &format_attr_occ_invert.attr, | ||
1147 | &format_attr_occ_edge.attr, | ||
1148 | &format_attr_filter_band0.attr, | ||
1149 | &format_attr_filter_band1.attr, | ||
1150 | &format_attr_filter_band2.attr, | ||
1151 | &format_attr_filter_band3.attr, | ||
1152 | NULL, | ||
1153 | }; | ||
1154 | |||
1155 | static struct attribute *ivt_uncore_qpi_formats_attr[] = { | ||
1156 | &format_attr_event_ext.attr, | ||
1157 | &format_attr_umask.attr, | ||
1158 | &format_attr_edge.attr, | ||
1159 | &format_attr_thresh8.attr, | ||
1160 | &format_attr_match_rds.attr, | ||
1161 | &format_attr_match_rnid30.attr, | ||
1162 | &format_attr_match_rnid4.attr, | ||
1163 | &format_attr_match_dnid.attr, | ||
1164 | &format_attr_match_mc.attr, | ||
1165 | &format_attr_match_opc.attr, | ||
1166 | &format_attr_match_vnw.attr, | ||
1167 | &format_attr_match0.attr, | ||
1168 | &format_attr_match1.attr, | ||
1169 | &format_attr_mask_rds.attr, | ||
1170 | &format_attr_mask_rnid30.attr, | ||
1171 | &format_attr_mask_rnid4.attr, | ||
1172 | &format_attr_mask_dnid.attr, | ||
1173 | &format_attr_mask_mc.attr, | ||
1174 | &format_attr_mask_opc.attr, | ||
1175 | &format_attr_mask_vnw.attr, | ||
1176 | &format_attr_mask0.attr, | ||
1177 | &format_attr_mask1.attr, | ||
1178 | NULL, | ||
1179 | }; | ||
1180 | |||
1181 | static struct attribute_group ivt_uncore_format_group = { | ||
1182 | .name = "format", | ||
1183 | .attrs = ivt_uncore_formats_attr, | ||
1184 | }; | ||
1185 | |||
1186 | static struct attribute_group ivt_uncore_ubox_format_group = { | ||
1187 | .name = "format", | ||
1188 | .attrs = ivt_uncore_ubox_formats_attr, | ||
1189 | }; | ||
1190 | |||
1191 | static struct attribute_group ivt_uncore_cbox_format_group = { | ||
1192 | .name = "format", | ||
1193 | .attrs = ivt_uncore_cbox_formats_attr, | ||
1194 | }; | ||
1195 | |||
1196 | static struct attribute_group ivt_uncore_pcu_format_group = { | ||
1197 | .name = "format", | ||
1198 | .attrs = ivt_uncore_pcu_formats_attr, | ||
1199 | }; | ||
1200 | |||
1201 | static struct attribute_group ivt_uncore_qpi_format_group = { | ||
1202 | .name = "format", | ||
1203 | .attrs = ivt_uncore_qpi_formats_attr, | ||
1204 | }; | ||
1205 | |||
1206 | static struct intel_uncore_type ivt_uncore_ubox = { | ||
1207 | .name = "ubox", | ||
1208 | .num_counters = 2, | ||
1209 | .num_boxes = 1, | ||
1210 | .perf_ctr_bits = 44, | ||
1211 | .fixed_ctr_bits = 48, | ||
1212 | .perf_ctr = SNBEP_U_MSR_PMON_CTR0, | ||
1213 | .event_ctl = SNBEP_U_MSR_PMON_CTL0, | ||
1214 | .event_mask = IVT_U_MSR_PMON_RAW_EVENT_MASK, | ||
1215 | .fixed_ctr = SNBEP_U_MSR_PMON_UCLK_FIXED_CTR, | ||
1216 | .fixed_ctl = SNBEP_U_MSR_PMON_UCLK_FIXED_CTL, | ||
1217 | .ops = &ivt_uncore_msr_ops, | ||
1218 | .format_group = &ivt_uncore_ubox_format_group, | ||
1219 | }; | ||
1220 | |||
1221 | static struct extra_reg ivt_uncore_cbox_extra_regs[] = { | ||
1222 | SNBEP_CBO_EVENT_EXTRA_REG(SNBEP_CBO_PMON_CTL_TID_EN, | ||
1223 | SNBEP_CBO_PMON_CTL_TID_EN, 0x1), | ||
1224 | SNBEP_CBO_EVENT_EXTRA_REG(0x1031, 0x10ff, 0x2), | ||
1225 | |||
1226 | SNBEP_CBO_EVENT_EXTRA_REG(0x1134, 0xffff, 0x4), | ||
1227 | SNBEP_CBO_EVENT_EXTRA_REG(0x4134, 0xffff, 0xc), | ||
1228 | SNBEP_CBO_EVENT_EXTRA_REG(0x5134, 0xffff, 0xc), | ||
1229 | SNBEP_CBO_EVENT_EXTRA_REG(0x0334, 0xffff, 0x4), | ||
1230 | SNBEP_CBO_EVENT_EXTRA_REG(0x4334, 0xffff, 0xc), | ||
1231 | SNBEP_CBO_EVENT_EXTRA_REG(0x0534, 0xffff, 0x4), | ||
1232 | SNBEP_CBO_EVENT_EXTRA_REG(0x4534, 0xffff, 0xc), | ||
1233 | SNBEP_CBO_EVENT_EXTRA_REG(0x0934, 0xffff, 0x4), | ||
1234 | SNBEP_CBO_EVENT_EXTRA_REG(0x4934, 0xffff, 0xc), | ||
1235 | SNBEP_CBO_EVENT_EXTRA_REG(0x0135, 0xffff, 0x10), | ||
1236 | SNBEP_CBO_EVENT_EXTRA_REG(0x0335, 0xffff, 0x10), | ||
1237 | SNBEP_CBO_EVENT_EXTRA_REG(0x2135, 0xffff, 0x10), | ||
1238 | SNBEP_CBO_EVENT_EXTRA_REG(0x2335, 0xffff, 0x10), | ||
1239 | SNBEP_CBO_EVENT_EXTRA_REG(0x4135, 0xffff, 0x18), | ||
1240 | SNBEP_CBO_EVENT_EXTRA_REG(0x4335, 0xffff, 0x18), | ||
1241 | SNBEP_CBO_EVENT_EXTRA_REG(0x4435, 0xffff, 0x8), | ||
1242 | SNBEP_CBO_EVENT_EXTRA_REG(0x4835, 0xffff, 0x8), | ||
1243 | SNBEP_CBO_EVENT_EXTRA_REG(0x4a35, 0xffff, 0x8), | ||
1244 | SNBEP_CBO_EVENT_EXTRA_REG(0x5035, 0xffff, 0x8), | ||
1245 | SNBEP_CBO_EVENT_EXTRA_REG(0x8135, 0xffff, 0x10), | ||
1246 | SNBEP_CBO_EVENT_EXTRA_REG(0x8335, 0xffff, 0x10), | ||
1247 | SNBEP_CBO_EVENT_EXTRA_REG(0x0136, 0xffff, 0x10), | ||
1248 | SNBEP_CBO_EVENT_EXTRA_REG(0x0336, 0xffff, 0x10), | ||
1249 | SNBEP_CBO_EVENT_EXTRA_REG(0x2136, 0xffff, 0x10), | ||
1250 | SNBEP_CBO_EVENT_EXTRA_REG(0x2336, 0xffff, 0x10), | ||
1251 | SNBEP_CBO_EVENT_EXTRA_REG(0x4136, 0xffff, 0x18), | ||
1252 | SNBEP_CBO_EVENT_EXTRA_REG(0x4336, 0xffff, 0x18), | ||
1253 | SNBEP_CBO_EVENT_EXTRA_REG(0x4436, 0xffff, 0x8), | ||
1254 | SNBEP_CBO_EVENT_EXTRA_REG(0x4836, 0xffff, 0x8), | ||
1255 | SNBEP_CBO_EVENT_EXTRA_REG(0x4a36, 0xffff, 0x8), | ||
1256 | SNBEP_CBO_EVENT_EXTRA_REG(0x5036, 0xffff, 0x8), | ||
1257 | SNBEP_CBO_EVENT_EXTRA_REG(0x8136, 0xffff, 0x10), | ||
1258 | SNBEP_CBO_EVENT_EXTRA_REG(0x8336, 0xffff, 0x10), | ||
1259 | SNBEP_CBO_EVENT_EXTRA_REG(0x4037, 0x40ff, 0x8), | ||
1260 | EVENT_EXTRA_END | ||
1261 | }; | ||
1262 | |||
1263 | static u64 ivt_cbox_filter_mask(int fields) | ||
1264 | { | ||
1265 | u64 mask = 0; | ||
1266 | |||
1267 | if (fields & 0x1) | ||
1268 | mask |= IVT_CB0_MSR_PMON_BOX_FILTER_TID; | ||
1269 | if (fields & 0x2) | ||
1270 | mask |= IVT_CB0_MSR_PMON_BOX_FILTER_LINK; | ||
1271 | if (fields & 0x4) | ||
1272 | mask |= IVT_CB0_MSR_PMON_BOX_FILTER_STATE; | ||
1273 | if (fields & 0x8) | ||
1274 | mask |= IVT_CB0_MSR_PMON_BOX_FILTER_NID; | ||
1275 | if (fields & 0x10) | ||
1276 | mask |= IVT_CB0_MSR_PMON_BOX_FILTER_OPC; | ||
1277 | |||
1278 | return mask; | ||
1279 | } | ||
1280 | |||
1281 | static struct event_constraint * | ||
1282 | ivt_cbox_get_constraint(struct intel_uncore_box *box, struct perf_event *event) | ||
1283 | { | ||
1284 | return __snbep_cbox_get_constraint(box, event, ivt_cbox_filter_mask); | ||
1285 | } | ||
1286 | |||
1287 | static int ivt_cbox_hw_config(struct intel_uncore_box *box, struct perf_event *event) | ||
1288 | { | ||
1289 | struct hw_perf_event_extra *reg1 = &event->hw.extra_reg; | ||
1290 | struct extra_reg *er; | ||
1291 | int idx = 0; | ||
1292 | |||
1293 | for (er = ivt_uncore_cbox_extra_regs; er->msr; er++) { | ||
1294 | if (er->event != (event->hw.config & er->config_mask)) | ||
1295 | continue; | ||
1296 | idx |= er->idx; | ||
1297 | } | ||
1298 | |||
1299 | if (idx) { | ||
1300 | reg1->reg = SNBEP_C0_MSR_PMON_BOX_FILTER + | ||
1301 | SNBEP_CBO_MSR_OFFSET * box->pmu->pmu_idx; | ||
1302 | reg1->config = event->attr.config1 & ivt_cbox_filter_mask(idx); | ||
1303 | reg1->idx = idx; | ||
1304 | } | ||
1305 | return 0; | ||
1306 | } | ||
1307 | |||
1308 | static void ivt_cbox_enable_event(struct intel_uncore_box *box, struct perf_event *event) | ||
1309 | { | ||
1310 | struct hw_perf_event *hwc = &event->hw; | ||
1311 | struct hw_perf_event_extra *reg1 = &hwc->extra_reg; | ||
1312 | |||
1313 | if (reg1->idx != EXTRA_REG_NONE) { | ||
1314 | u64 filter = uncore_shared_reg_config(box, 0); | ||
1315 | wrmsrl(reg1->reg, filter & 0xffffffff); | ||
1316 | wrmsrl(reg1->reg + 6, filter >> 32); | ||
1317 | } | ||
1318 | |||
1319 | wrmsrl(hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN); | ||
1320 | } | ||
1321 | |||
1322 | static struct intel_uncore_ops ivt_uncore_cbox_ops = { | ||
1323 | .init_box = ivt_uncore_msr_init_box, | ||
1324 | .disable_box = snbep_uncore_msr_disable_box, | ||
1325 | .enable_box = snbep_uncore_msr_enable_box, | ||
1326 | .disable_event = snbep_uncore_msr_disable_event, | ||
1327 | .enable_event = ivt_cbox_enable_event, | ||
1328 | .read_counter = uncore_msr_read_counter, | ||
1329 | .hw_config = ivt_cbox_hw_config, | ||
1330 | .get_constraint = ivt_cbox_get_constraint, | ||
1331 | .put_constraint = snbep_cbox_put_constraint, | ||
1332 | }; | ||
1333 | |||
1334 | static struct intel_uncore_type ivt_uncore_cbox = { | ||
1335 | .name = "cbox", | ||
1336 | .num_counters = 4, | ||
1337 | .num_boxes = 15, | ||
1338 | .perf_ctr_bits = 44, | ||
1339 | .event_ctl = SNBEP_C0_MSR_PMON_CTL0, | ||
1340 | .perf_ctr = SNBEP_C0_MSR_PMON_CTR0, | ||
1341 | .event_mask = IVT_CBO_MSR_PMON_RAW_EVENT_MASK, | ||
1342 | .box_ctl = SNBEP_C0_MSR_PMON_BOX_CTL, | ||
1343 | .msr_offset = SNBEP_CBO_MSR_OFFSET, | ||
1344 | .num_shared_regs = 1, | ||
1345 | .constraints = snbep_uncore_cbox_constraints, | ||
1346 | .ops = &ivt_uncore_cbox_ops, | ||
1347 | .format_group = &ivt_uncore_cbox_format_group, | ||
1348 | }; | ||
1349 | |||
1350 | static struct intel_uncore_ops ivt_uncore_pcu_ops = { | ||
1351 | IVT_UNCORE_MSR_OPS_COMMON_INIT(), | ||
1352 | .hw_config = snbep_pcu_hw_config, | ||
1353 | .get_constraint = snbep_pcu_get_constraint, | ||
1354 | .put_constraint = snbep_pcu_put_constraint, | ||
1355 | }; | ||
1356 | |||
1357 | static struct intel_uncore_type ivt_uncore_pcu = { | ||
1358 | .name = "pcu", | ||
1359 | .num_counters = 4, | ||
1360 | .num_boxes = 1, | ||
1361 | .perf_ctr_bits = 48, | ||
1362 | .perf_ctr = SNBEP_PCU_MSR_PMON_CTR0, | ||
1363 | .event_ctl = SNBEP_PCU_MSR_PMON_CTL0, | ||
1364 | .event_mask = IVT_PCU_MSR_PMON_RAW_EVENT_MASK, | ||
1365 | .box_ctl = SNBEP_PCU_MSR_PMON_BOX_CTL, | ||
1366 | .num_shared_regs = 1, | ||
1367 | .ops = &ivt_uncore_pcu_ops, | ||
1368 | .format_group = &ivt_uncore_pcu_format_group, | ||
1369 | }; | ||
1370 | |||
1371 | static struct intel_uncore_type *ivt_msr_uncores[] = { | ||
1372 | &ivt_uncore_ubox, | ||
1373 | &ivt_uncore_cbox, | ||
1374 | &ivt_uncore_pcu, | ||
1375 | NULL, | ||
1376 | }; | ||
1377 | |||
1378 | static struct intel_uncore_type ivt_uncore_ha = { | ||
1379 | .name = "ha", | ||
1380 | .num_counters = 4, | ||
1381 | .num_boxes = 2, | ||
1382 | .perf_ctr_bits = 48, | ||
1383 | IVT_UNCORE_PCI_COMMON_INIT(), | ||
1384 | }; | ||
1385 | |||
1386 | static struct intel_uncore_type ivt_uncore_imc = { | ||
1387 | .name = "imc", | ||
1388 | .num_counters = 4, | ||
1389 | .num_boxes = 8, | ||
1390 | .perf_ctr_bits = 48, | ||
1391 | .fixed_ctr_bits = 48, | ||
1392 | .fixed_ctr = SNBEP_MC_CHy_PCI_PMON_FIXED_CTR, | ||
1393 | .fixed_ctl = SNBEP_MC_CHy_PCI_PMON_FIXED_CTL, | ||
1394 | IVT_UNCORE_PCI_COMMON_INIT(), | ||
1395 | }; | ||
1396 | |||
1397 | /* registers in IRP boxes are not properly aligned */ | ||
1398 | static unsigned ivt_uncore_irp_ctls[] = {0xd8, 0xdc, 0xe0, 0xe4}; | ||
1399 | static unsigned ivt_uncore_irp_ctrs[] = {0xa0, 0xb0, 0xb8, 0xc0}; | ||
1400 | |||
1401 | static void ivt_uncore_irp_enable_event(struct intel_uncore_box *box, struct perf_event *event) | ||
1402 | { | ||
1403 | struct pci_dev *pdev = box->pci_dev; | ||
1404 | struct hw_perf_event *hwc = &event->hw; | ||
1405 | |||
1406 | pci_write_config_dword(pdev, ivt_uncore_irp_ctls[hwc->idx], | ||
1407 | hwc->config | SNBEP_PMON_CTL_EN); | ||
1408 | } | ||
1409 | |||
1410 | static void ivt_uncore_irp_disable_event(struct intel_uncore_box *box, struct perf_event *event) | ||
1411 | { | ||
1412 | struct pci_dev *pdev = box->pci_dev; | ||
1413 | struct hw_perf_event *hwc = &event->hw; | ||
1414 | |||
1415 | pci_write_config_dword(pdev, ivt_uncore_irp_ctls[hwc->idx], hwc->config); | ||
1416 | } | ||
1417 | |||
1418 | static u64 ivt_uncore_irp_read_counter(struct intel_uncore_box *box, struct perf_event *event) | ||
1419 | { | ||
1420 | struct pci_dev *pdev = box->pci_dev; | ||
1421 | struct hw_perf_event *hwc = &event->hw; | ||
1422 | u64 count = 0; | ||
1423 | |||
1424 | pci_read_config_dword(pdev, ivt_uncore_irp_ctrs[hwc->idx], (u32 *)&count); | ||
1425 | pci_read_config_dword(pdev, ivt_uncore_irp_ctrs[hwc->idx] + 4, (u32 *)&count + 1); | ||
1426 | |||
1427 | return count; | ||
1428 | } | ||
1429 | |||
1430 | static struct intel_uncore_ops ivt_uncore_irp_ops = { | ||
1431 | .init_box = ivt_uncore_pci_init_box, | ||
1432 | .disable_box = snbep_uncore_pci_disable_box, | ||
1433 | .enable_box = snbep_uncore_pci_enable_box, | ||
1434 | .disable_event = ivt_uncore_irp_disable_event, | ||
1435 | .enable_event = ivt_uncore_irp_enable_event, | ||
1436 | .read_counter = ivt_uncore_irp_read_counter, | ||
1437 | }; | ||
1438 | |||
1439 | static struct intel_uncore_type ivt_uncore_irp = { | ||
1440 | .name = "irp", | ||
1441 | .num_counters = 4, | ||
1442 | .num_boxes = 1, | ||
1443 | .perf_ctr_bits = 48, | ||
1444 | .event_mask = IVT_PMON_RAW_EVENT_MASK, | ||
1445 | .box_ctl = SNBEP_PCI_PMON_BOX_CTL, | ||
1446 | .ops = &ivt_uncore_irp_ops, | ||
1447 | .format_group = &ivt_uncore_format_group, | ||
1448 | }; | ||
1449 | |||
1450 | static struct intel_uncore_ops ivt_uncore_qpi_ops = { | ||
1451 | .init_box = ivt_uncore_pci_init_box, | ||
1452 | .disable_box = snbep_uncore_pci_disable_box, | ||
1453 | .enable_box = snbep_uncore_pci_enable_box, | ||
1454 | .disable_event = snbep_uncore_pci_disable_event, | ||
1455 | .enable_event = snbep_qpi_enable_event, | ||
1456 | .read_counter = snbep_uncore_pci_read_counter, | ||
1457 | .hw_config = snbep_qpi_hw_config, | ||
1458 | .get_constraint = uncore_get_constraint, | ||
1459 | .put_constraint = uncore_put_constraint, | ||
1460 | }; | ||
1461 | |||
1462 | static struct intel_uncore_type ivt_uncore_qpi = { | ||
1463 | .name = "qpi", | ||
1464 | .num_counters = 4, | ||
1465 | .num_boxes = 3, | ||
1466 | .perf_ctr_bits = 48, | ||
1467 | .perf_ctr = SNBEP_PCI_PMON_CTR0, | ||
1468 | .event_ctl = SNBEP_PCI_PMON_CTL0, | ||
1469 | .event_mask = IVT_QPI_PCI_PMON_RAW_EVENT_MASK, | ||
1470 | .box_ctl = SNBEP_PCI_PMON_BOX_CTL, | ||
1471 | .num_shared_regs = 1, | ||
1472 | .ops = &ivt_uncore_qpi_ops, | ||
1473 | .format_group = &ivt_uncore_qpi_format_group, | ||
1474 | }; | ||
1475 | |||
1476 | static struct intel_uncore_type ivt_uncore_r2pcie = { | ||
1477 | .name = "r2pcie", | ||
1478 | .num_counters = 4, | ||
1479 | .num_boxes = 1, | ||
1480 | .perf_ctr_bits = 44, | ||
1481 | .constraints = snbep_uncore_r2pcie_constraints, | ||
1482 | IVT_UNCORE_PCI_COMMON_INIT(), | ||
1483 | }; | ||
1484 | |||
1485 | static struct intel_uncore_type ivt_uncore_r3qpi = { | ||
1486 | .name = "r3qpi", | ||
1487 | .num_counters = 3, | ||
1488 | .num_boxes = 2, | ||
1489 | .perf_ctr_bits = 44, | ||
1490 | .constraints = snbep_uncore_r3qpi_constraints, | ||
1491 | IVT_UNCORE_PCI_COMMON_INIT(), | ||
1492 | }; | ||
1493 | |||
1494 | enum { | ||
1495 | IVT_PCI_UNCORE_HA, | ||
1496 | IVT_PCI_UNCORE_IMC, | ||
1497 | IVT_PCI_UNCORE_IRP, | ||
1498 | IVT_PCI_UNCORE_QPI, | ||
1499 | IVT_PCI_UNCORE_R2PCIE, | ||
1500 | IVT_PCI_UNCORE_R3QPI, | ||
1501 | }; | ||
1502 | |||
1503 | static struct intel_uncore_type *ivt_pci_uncores[] = { | ||
1504 | [IVT_PCI_UNCORE_HA] = &ivt_uncore_ha, | ||
1505 | [IVT_PCI_UNCORE_IMC] = &ivt_uncore_imc, | ||
1506 | [IVT_PCI_UNCORE_IRP] = &ivt_uncore_irp, | ||
1507 | [IVT_PCI_UNCORE_QPI] = &ivt_uncore_qpi, | ||
1508 | [IVT_PCI_UNCORE_R2PCIE] = &ivt_uncore_r2pcie, | ||
1509 | [IVT_PCI_UNCORE_R3QPI] = &ivt_uncore_r3qpi, | ||
1510 | NULL, | ||
1511 | }; | ||
1512 | |||
1513 | static const struct pci_device_id ivt_uncore_pci_ids[] = { | ||
1514 | { /* Home Agent 0 */ | ||
1515 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe30), | ||
1516 | .driver_data = UNCORE_PCI_DEV_DATA(IVT_PCI_UNCORE_HA, 0), | ||
1517 | }, | ||
1518 | { /* Home Agent 1 */ | ||
1519 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe38), | ||
1520 | .driver_data = UNCORE_PCI_DEV_DATA(IVT_PCI_UNCORE_HA, 1), | ||
1521 | }, | ||
1522 | { /* MC0 Channel 0 */ | ||
1523 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xeb4), | ||
1524 | .driver_data = UNCORE_PCI_DEV_DATA(IVT_PCI_UNCORE_IMC, 0), | ||
1525 | }, | ||
1526 | { /* MC0 Channel 1 */ | ||
1527 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xeb5), | ||
1528 | .driver_data = UNCORE_PCI_DEV_DATA(IVT_PCI_UNCORE_IMC, 1), | ||
1529 | }, | ||
1530 | { /* MC0 Channel 3 */ | ||
1531 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xeb0), | ||
1532 | .driver_data = UNCORE_PCI_DEV_DATA(IVT_PCI_UNCORE_IMC, 2), | ||
1533 | }, | ||
1534 | { /* MC0 Channel 4 */ | ||
1535 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xeb1), | ||
1536 | .driver_data = UNCORE_PCI_DEV_DATA(IVT_PCI_UNCORE_IMC, 3), | ||
1537 | }, | ||
1538 | { /* MC1 Channel 0 */ | ||
1539 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xef4), | ||
1540 | .driver_data = UNCORE_PCI_DEV_DATA(IVT_PCI_UNCORE_IMC, 4), | ||
1541 | }, | ||
1542 | { /* MC1 Channel 1 */ | ||
1543 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xef5), | ||
1544 | .driver_data = UNCORE_PCI_DEV_DATA(IVT_PCI_UNCORE_IMC, 5), | ||
1545 | }, | ||
1546 | { /* MC1 Channel 3 */ | ||
1547 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xef0), | ||
1548 | .driver_data = UNCORE_PCI_DEV_DATA(IVT_PCI_UNCORE_IMC, 6), | ||
1549 | }, | ||
1550 | { /* MC1 Channel 4 */ | ||
1551 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xef1), | ||
1552 | .driver_data = UNCORE_PCI_DEV_DATA(IVT_PCI_UNCORE_IMC, 7), | ||
1553 | }, | ||
1554 | { /* IRP */ | ||
1555 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe39), | ||
1556 | .driver_data = UNCORE_PCI_DEV_DATA(IVT_PCI_UNCORE_IRP, 0), | ||
1557 | }, | ||
1558 | { /* QPI0 Port 0 */ | ||
1559 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe32), | ||
1560 | .driver_data = UNCORE_PCI_DEV_DATA(IVT_PCI_UNCORE_QPI, 0), | ||
1561 | }, | ||
1562 | { /* QPI0 Port 1 */ | ||
1563 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe33), | ||
1564 | .driver_data = UNCORE_PCI_DEV_DATA(IVT_PCI_UNCORE_QPI, 1), | ||
1565 | }, | ||
1566 | { /* QPI1 Port 2 */ | ||
1567 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe3a), | ||
1568 | .driver_data = UNCORE_PCI_DEV_DATA(IVT_PCI_UNCORE_QPI, 2), | ||
1569 | }, | ||
1570 | { /* R2PCIe */ | ||
1571 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe34), | ||
1572 | .driver_data = UNCORE_PCI_DEV_DATA(IVT_PCI_UNCORE_R2PCIE, 0), | ||
1573 | }, | ||
1574 | { /* R3QPI0 Link 0 */ | ||
1575 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe36), | ||
1576 | .driver_data = UNCORE_PCI_DEV_DATA(IVT_PCI_UNCORE_R3QPI, 0), | ||
1577 | }, | ||
1578 | { /* R3QPI0 Link 1 */ | ||
1579 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe37), | ||
1580 | .driver_data = UNCORE_PCI_DEV_DATA(IVT_PCI_UNCORE_R3QPI, 1), | ||
1581 | }, | ||
1582 | { /* R3QPI1 Link 2 */ | ||
1583 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe3e), | ||
1584 | .driver_data = UNCORE_PCI_DEV_DATA(IVT_PCI_UNCORE_R3QPI, 2), | ||
1585 | }, | ||
1586 | { /* QPI Port 0 filter */ | ||
1587 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe86), | ||
1588 | .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV, | ||
1589 | SNBEP_PCI_QPI_PORT0_FILTER), | ||
1590 | }, | ||
1591 | { /* QPI Port 0 filter */ | ||
1592 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe96), | ||
1593 | .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV, | ||
1594 | SNBEP_PCI_QPI_PORT1_FILTER), | ||
1595 | }, | ||
1596 | { /* end: all zeroes */ } | ||
1597 | }; | ||
1598 | |||
1599 | static struct pci_driver ivt_uncore_pci_driver = { | ||
1600 | .name = "ivt_uncore", | ||
1601 | .id_table = ivt_uncore_pci_ids, | ||
1602 | }; | ||
1603 | /* end of IvyTown uncore support */ | ||
1604 | |||
1605 | /* Sandy Bridge uncore support */ | ||
1606 | static void snb_uncore_msr_enable_event(struct intel_uncore_box *box, struct perf_event *event) | ||
1607 | { | ||
1608 | struct hw_perf_event *hwc = &event->hw; | ||
1609 | |||
1610 | if (hwc->idx < UNCORE_PMC_IDX_FIXED) | ||
1611 | wrmsrl(hwc->config_base, hwc->config | SNB_UNC_CTL_EN); | ||
1612 | else | ||
1613 | wrmsrl(hwc->config_base, SNB_UNC_CTL_EN); | ||
1614 | } | ||
1615 | |||
1616 | static void snb_uncore_msr_disable_event(struct intel_uncore_box *box, struct perf_event *event) | ||
1617 | { | ||
1618 | wrmsrl(event->hw.config_base, 0); | ||
1619 | } | ||
1620 | |||
1621 | static void snb_uncore_msr_init_box(struct intel_uncore_box *box) | ||
1622 | { | ||
1623 | if (box->pmu->pmu_idx == 0) { | ||
1624 | wrmsrl(SNB_UNC_PERF_GLOBAL_CTL, | ||
1625 | SNB_UNC_GLOBAL_CTL_EN | SNB_UNC_GLOBAL_CTL_CORE_ALL); | ||
1626 | } | ||
1627 | } | ||
1628 | |||
1629 | static struct uncore_event_desc snb_uncore_events[] = { | ||
1630 | INTEL_UNCORE_EVENT_DESC(clockticks, "event=0xff,umask=0x00"), | ||
1631 | { /* end: all zeroes */ }, | ||
1632 | }; | ||
1633 | |||
1634 | static struct attribute *snb_uncore_formats_attr[] = { | ||
1635 | &format_attr_event.attr, | ||
1636 | &format_attr_umask.attr, | ||
1637 | &format_attr_edge.attr, | ||
1638 | &format_attr_inv.attr, | ||
1639 | &format_attr_cmask5.attr, | ||
1640 | NULL, | ||
1641 | }; | ||
1642 | |||
1643 | static struct attribute_group snb_uncore_format_group = { | ||
1644 | .name = "format", | ||
1645 | .attrs = snb_uncore_formats_attr, | ||
1646 | }; | ||
1647 | |||
1648 | static struct intel_uncore_ops snb_uncore_msr_ops = { | ||
1649 | .init_box = snb_uncore_msr_init_box, | ||
1650 | .disable_event = snb_uncore_msr_disable_event, | ||
1651 | .enable_event = snb_uncore_msr_enable_event, | ||
1652 | .read_counter = uncore_msr_read_counter, | ||
1653 | }; | ||
1654 | |||
1655 | static struct event_constraint snb_uncore_cbox_constraints[] = { | ||
1656 | UNCORE_EVENT_CONSTRAINT(0x80, 0x1), | ||
1657 | UNCORE_EVENT_CONSTRAINT(0x83, 0x1), | ||
1658 | EVENT_CONSTRAINT_END | ||
1659 | }; | ||
1660 | |||
1661 | static struct intel_uncore_type snb_uncore_cbox = { | ||
1662 | .name = "cbox", | ||
1663 | .num_counters = 2, | ||
1664 | .num_boxes = 4, | ||
1665 | .perf_ctr_bits = 44, | ||
1666 | .fixed_ctr_bits = 48, | ||
1667 | .perf_ctr = SNB_UNC_CBO_0_PER_CTR0, | ||
1668 | .event_ctl = SNB_UNC_CBO_0_PERFEVTSEL0, | ||
1669 | .fixed_ctr = SNB_UNC_FIXED_CTR, | ||
1670 | .fixed_ctl = SNB_UNC_FIXED_CTR_CTRL, | ||
1671 | .single_fixed = 1, | ||
1672 | .event_mask = SNB_UNC_RAW_EVENT_MASK, | ||
1673 | .msr_offset = SNB_UNC_CBO_MSR_OFFSET, | ||
1674 | .constraints = snb_uncore_cbox_constraints, | ||
1675 | .ops = &snb_uncore_msr_ops, | ||
1676 | .format_group = &snb_uncore_format_group, | ||
1677 | .event_descs = snb_uncore_events, | ||
1678 | }; | ||
1679 | |||
1680 | static struct intel_uncore_type *snb_msr_uncores[] = { | ||
1681 | &snb_uncore_cbox, | ||
1682 | NULL, | ||
1683 | }; | ||
1684 | |||
1685 | enum { | ||
1686 | SNB_PCI_UNCORE_IMC, | ||
1687 | }; | ||
1688 | |||
1689 | static struct uncore_event_desc snb_uncore_imc_events[] = { | ||
1690 | INTEL_UNCORE_EVENT_DESC(data_reads, "event=0x01"), | ||
1691 | INTEL_UNCORE_EVENT_DESC(data_reads.scale, "6.103515625e-5"), | ||
1692 | INTEL_UNCORE_EVENT_DESC(data_reads.unit, "MiB"), | ||
1693 | |||
1694 | INTEL_UNCORE_EVENT_DESC(data_writes, "event=0x02"), | ||
1695 | INTEL_UNCORE_EVENT_DESC(data_writes.scale, "6.103515625e-5"), | ||
1696 | INTEL_UNCORE_EVENT_DESC(data_writes.unit, "MiB"), | ||
1697 | |||
1698 | { /* end: all zeroes */ }, | ||
1699 | }; | ||
1700 | |||
1701 | #define SNB_UNCORE_PCI_IMC_EVENT_MASK 0xff | ||
1702 | #define SNB_UNCORE_PCI_IMC_BAR_OFFSET 0x48 | ||
1703 | |||
1704 | /* page size multiple covering all config regs */ | ||
1705 | #define SNB_UNCORE_PCI_IMC_MAP_SIZE 0x6000 | ||
1706 | |||
1707 | #define SNB_UNCORE_PCI_IMC_DATA_READS 0x1 | ||
1708 | #define SNB_UNCORE_PCI_IMC_DATA_READS_BASE 0x5050 | ||
1709 | #define SNB_UNCORE_PCI_IMC_DATA_WRITES 0x2 | ||
1710 | #define SNB_UNCORE_PCI_IMC_DATA_WRITES_BASE 0x5054 | ||
1711 | #define SNB_UNCORE_PCI_IMC_CTR_BASE SNB_UNCORE_PCI_IMC_DATA_READS_BASE | ||
1712 | |||
1713 | static struct attribute *snb_uncore_imc_formats_attr[] = { | ||
1714 | &format_attr_event.attr, | ||
1715 | NULL, | ||
1716 | }; | ||
1717 | |||
1718 | static struct attribute_group snb_uncore_imc_format_group = { | ||
1719 | .name = "format", | ||
1720 | .attrs = snb_uncore_imc_formats_attr, | ||
1721 | }; | ||
1722 | |||
1723 | static void snb_uncore_imc_init_box(struct intel_uncore_box *box) | ||
1724 | { | ||
1725 | struct pci_dev *pdev = box->pci_dev; | ||
1726 | int where = SNB_UNCORE_PCI_IMC_BAR_OFFSET; | ||
1727 | resource_size_t addr; | ||
1728 | u32 pci_dword; | ||
1729 | |||
1730 | pci_read_config_dword(pdev, where, &pci_dword); | ||
1731 | addr = pci_dword; | ||
1732 | |||
1733 | #ifdef CONFIG_PHYS_ADDR_T_64BIT | ||
1734 | pci_read_config_dword(pdev, where + 4, &pci_dword); | ||
1735 | addr |= ((resource_size_t)pci_dword << 32); | ||
1736 | #endif | ||
1737 | |||
1738 | addr &= ~(PAGE_SIZE - 1); | ||
1739 | |||
1740 | box->io_addr = ioremap(addr, SNB_UNCORE_PCI_IMC_MAP_SIZE); | ||
1741 | box->hrtimer_duration = UNCORE_SNB_IMC_HRTIMER_INTERVAL; | ||
1742 | } | ||
1743 | |||
1744 | static void snb_uncore_imc_enable_box(struct intel_uncore_box *box) | ||
1745 | {} | ||
1746 | |||
1747 | static void snb_uncore_imc_disable_box(struct intel_uncore_box *box) | ||
1748 | {} | ||
1749 | |||
1750 | static void snb_uncore_imc_enable_event(struct intel_uncore_box *box, struct perf_event *event) | ||
1751 | {} | ||
1752 | |||
1753 | static void snb_uncore_imc_disable_event(struct intel_uncore_box *box, struct perf_event *event) | ||
1754 | {} | ||
1755 | |||
1756 | static u64 snb_uncore_imc_read_counter(struct intel_uncore_box *box, struct perf_event *event) | ||
1757 | { | ||
1758 | struct hw_perf_event *hwc = &event->hw; | ||
1759 | |||
1760 | return (u64)*(unsigned int *)(box->io_addr + hwc->event_base); | ||
1761 | } | ||
1762 | |||
1763 | /* | ||
1764 | * custom event_init() function because we define our own fixed, free | ||
1765 | * running counters, so we do not want to conflict with generic uncore | ||
1766 | * logic. Also simplifies processing | ||
1767 | */ | ||
1768 | static int snb_uncore_imc_event_init(struct perf_event *event) | ||
1769 | { | ||
1770 | struct intel_uncore_pmu *pmu; | ||
1771 | struct intel_uncore_box *box; | ||
1772 | struct hw_perf_event *hwc = &event->hw; | ||
1773 | u64 cfg = event->attr.config & SNB_UNCORE_PCI_IMC_EVENT_MASK; | ||
1774 | int idx, base; | ||
1775 | |||
1776 | if (event->attr.type != event->pmu->type) | ||
1777 | return -ENOENT; | ||
1778 | |||
1779 | pmu = uncore_event_to_pmu(event); | ||
1780 | /* no device found for this pmu */ | ||
1781 | if (pmu->func_id < 0) | ||
1782 | return -ENOENT; | ||
1783 | |||
1784 | /* Sampling not supported yet */ | ||
1785 | if (hwc->sample_period) | ||
1786 | return -EINVAL; | ||
1787 | |||
1788 | /* unsupported modes and filters */ | ||
1789 | if (event->attr.exclude_user || | ||
1790 | event->attr.exclude_kernel || | ||
1791 | event->attr.exclude_hv || | ||
1792 | event->attr.exclude_idle || | ||
1793 | event->attr.exclude_host || | ||
1794 | event->attr.exclude_guest || | ||
1795 | event->attr.sample_period) /* no sampling */ | ||
1796 | return -EINVAL; | ||
1797 | |||
1798 | /* | ||
1799 | * Place all uncore events for a particular physical package | ||
1800 | * onto a single cpu | ||
1801 | */ | ||
1802 | if (event->cpu < 0) | ||
1803 | return -EINVAL; | ||
1804 | |||
1805 | /* check only supported bits are set */ | ||
1806 | if (event->attr.config & ~SNB_UNCORE_PCI_IMC_EVENT_MASK) | ||
1807 | return -EINVAL; | ||
1808 | |||
1809 | box = uncore_pmu_to_box(pmu, event->cpu); | ||
1810 | if (!box || box->cpu < 0) | ||
1811 | return -EINVAL; | ||
1812 | |||
1813 | event->cpu = box->cpu; | ||
1814 | |||
1815 | event->hw.idx = -1; | ||
1816 | event->hw.last_tag = ~0ULL; | ||
1817 | event->hw.extra_reg.idx = EXTRA_REG_NONE; | ||
1818 | event->hw.branch_reg.idx = EXTRA_REG_NONE; | ||
1819 | /* | ||
1820 | * check event is known (whitelist, determines counter) | ||
1821 | */ | ||
1822 | switch (cfg) { | ||
1823 | case SNB_UNCORE_PCI_IMC_DATA_READS: | ||
1824 | base = SNB_UNCORE_PCI_IMC_DATA_READS_BASE; | ||
1825 | idx = UNCORE_PMC_IDX_FIXED; | ||
1826 | break; | ||
1827 | case SNB_UNCORE_PCI_IMC_DATA_WRITES: | ||
1828 | base = SNB_UNCORE_PCI_IMC_DATA_WRITES_BASE; | ||
1829 | idx = UNCORE_PMC_IDX_FIXED + 1; | ||
1830 | break; | ||
1831 | default: | ||
1832 | return -EINVAL; | ||
1833 | } | ||
1834 | |||
1835 | /* must be done before validate_group */ | ||
1836 | event->hw.event_base = base; | ||
1837 | event->hw.config = cfg; | ||
1838 | event->hw.idx = idx; | ||
1839 | |||
1840 | /* no group validation needed, we have free running counters */ | ||
1841 | |||
1842 | return 0; | ||
1843 | } | ||
1844 | |||
1845 | static int snb_uncore_imc_hw_config(struct intel_uncore_box *box, struct perf_event *event) | ||
1846 | { | ||
1847 | return 0; | ||
1848 | } | ||
1849 | |||
1850 | static void snb_uncore_imc_event_start(struct perf_event *event, int flags) | ||
1851 | { | ||
1852 | struct intel_uncore_box *box = uncore_event_to_box(event); | ||
1853 | u64 count; | ||
1854 | |||
1855 | if (WARN_ON_ONCE(!(event->hw.state & PERF_HES_STOPPED))) | ||
1856 | return; | ||
1857 | |||
1858 | event->hw.state = 0; | ||
1859 | box->n_active++; | ||
1860 | |||
1861 | list_add_tail(&event->active_entry, &box->active_list); | ||
1862 | |||
1863 | count = snb_uncore_imc_read_counter(box, event); | ||
1864 | local64_set(&event->hw.prev_count, count); | ||
1865 | |||
1866 | if (box->n_active == 1) | ||
1867 | uncore_pmu_start_hrtimer(box); | ||
1868 | } | ||
1869 | |||
1870 | static void snb_uncore_imc_event_stop(struct perf_event *event, int flags) | ||
1871 | { | ||
1872 | struct intel_uncore_box *box = uncore_event_to_box(event); | ||
1873 | struct hw_perf_event *hwc = &event->hw; | ||
1874 | |||
1875 | if (!(hwc->state & PERF_HES_STOPPED)) { | ||
1876 | box->n_active--; | ||
1877 | |||
1878 | WARN_ON_ONCE(hwc->state & PERF_HES_STOPPED); | ||
1879 | hwc->state |= PERF_HES_STOPPED; | ||
1880 | |||
1881 | list_del(&event->active_entry); | ||
1882 | |||
1883 | if (box->n_active == 0) | ||
1884 | uncore_pmu_cancel_hrtimer(box); | ||
1885 | } | ||
1886 | |||
1887 | if ((flags & PERF_EF_UPDATE) && !(hwc->state & PERF_HES_UPTODATE)) { | ||
1888 | /* | ||
1889 | * Drain the remaining delta count out of a event | ||
1890 | * that we are disabling: | ||
1891 | */ | ||
1892 | uncore_perf_event_update(box, event); | ||
1893 | hwc->state |= PERF_HES_UPTODATE; | ||
1894 | } | ||
1895 | } | ||
1896 | |||
1897 | static int snb_uncore_imc_event_add(struct perf_event *event, int flags) | ||
1898 | { | ||
1899 | struct intel_uncore_box *box = uncore_event_to_box(event); | ||
1900 | struct hw_perf_event *hwc = &event->hw; | ||
1901 | |||
1902 | if (!box) | ||
1903 | return -ENODEV; | ||
1904 | |||
1905 | hwc->state = PERF_HES_UPTODATE | PERF_HES_STOPPED; | ||
1906 | if (!(flags & PERF_EF_START)) | ||
1907 | hwc->state |= PERF_HES_ARCH; | ||
1908 | |||
1909 | snb_uncore_imc_event_start(event, 0); | ||
1910 | |||
1911 | box->n_events++; | ||
1912 | |||
1913 | return 0; | ||
1914 | } | ||
1915 | |||
1916 | static void snb_uncore_imc_event_del(struct perf_event *event, int flags) | ||
1917 | { | ||
1918 | struct intel_uncore_box *box = uncore_event_to_box(event); | ||
1919 | int i; | ||
1920 | |||
1921 | snb_uncore_imc_event_stop(event, PERF_EF_UPDATE); | ||
1922 | |||
1923 | for (i = 0; i < box->n_events; i++) { | ||
1924 | if (event == box->event_list[i]) { | ||
1925 | --box->n_events; | ||
1926 | break; | ||
1927 | } | ||
1928 | } | ||
1929 | } | ||
1930 | |||
1931 | static int snb_pci2phy_map_init(int devid) | ||
1932 | { | ||
1933 | struct pci_dev *dev = NULL; | ||
1934 | int bus; | ||
1935 | |||
1936 | dev = pci_get_device(PCI_VENDOR_ID_INTEL, devid, dev); | ||
1937 | if (!dev) | ||
1938 | return -ENOTTY; | ||
1939 | |||
1940 | bus = dev->bus->number; | ||
1941 | |||
1942 | pcibus_to_physid[bus] = 0; | ||
1943 | |||
1944 | pci_dev_put(dev); | ||
1945 | |||
1946 | return 0; | ||
1947 | } | ||
1948 | |||
1949 | static struct pmu snb_uncore_imc_pmu = { | ||
1950 | .task_ctx_nr = perf_invalid_context, | ||
1951 | .event_init = snb_uncore_imc_event_init, | ||
1952 | .add = snb_uncore_imc_event_add, | ||
1953 | .del = snb_uncore_imc_event_del, | ||
1954 | .start = snb_uncore_imc_event_start, | ||
1955 | .stop = snb_uncore_imc_event_stop, | ||
1956 | .read = uncore_pmu_event_read, | ||
1957 | }; | ||
1958 | |||
1959 | static struct intel_uncore_ops snb_uncore_imc_ops = { | ||
1960 | .init_box = snb_uncore_imc_init_box, | ||
1961 | .enable_box = snb_uncore_imc_enable_box, | ||
1962 | .disable_box = snb_uncore_imc_disable_box, | ||
1963 | .disable_event = snb_uncore_imc_disable_event, | ||
1964 | .enable_event = snb_uncore_imc_enable_event, | ||
1965 | .hw_config = snb_uncore_imc_hw_config, | ||
1966 | .read_counter = snb_uncore_imc_read_counter, | ||
1967 | }; | ||
1968 | |||
1969 | static struct intel_uncore_type snb_uncore_imc = { | ||
1970 | .name = "imc", | ||
1971 | .num_counters = 2, | ||
1972 | .num_boxes = 1, | ||
1973 | .fixed_ctr_bits = 32, | ||
1974 | .fixed_ctr = SNB_UNCORE_PCI_IMC_CTR_BASE, | ||
1975 | .event_descs = snb_uncore_imc_events, | ||
1976 | .format_group = &snb_uncore_imc_format_group, | ||
1977 | .perf_ctr = SNB_UNCORE_PCI_IMC_DATA_READS_BASE, | ||
1978 | .event_mask = SNB_UNCORE_PCI_IMC_EVENT_MASK, | ||
1979 | .ops = &snb_uncore_imc_ops, | ||
1980 | .pmu = &snb_uncore_imc_pmu, | ||
1981 | }; | ||
1982 | |||
1983 | static struct intel_uncore_type *snb_pci_uncores[] = { | ||
1984 | [SNB_PCI_UNCORE_IMC] = &snb_uncore_imc, | ||
1985 | NULL, | ||
1986 | }; | ||
1987 | |||
1988 | static const struct pci_device_id snb_uncore_pci_ids[] = { | ||
1989 | { /* IMC */ | ||
1990 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SNB_IMC), | ||
1991 | .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0), | ||
1992 | }, | ||
1993 | { /* end: all zeroes */ }, | ||
1994 | }; | ||
1995 | |||
1996 | static const struct pci_device_id ivb_uncore_pci_ids[] = { | ||
1997 | { /* IMC */ | ||
1998 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IVB_IMC), | ||
1999 | .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0), | ||
2000 | }, | ||
2001 | { /* end: all zeroes */ }, | ||
2002 | }; | ||
2003 | |||
2004 | static const struct pci_device_id hsw_uncore_pci_ids[] = { | ||
2005 | { /* IMC */ | ||
2006 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_HSW_IMC), | ||
2007 | .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0), | ||
2008 | }, | ||
2009 | { /* end: all zeroes */ }, | ||
2010 | }; | ||
2011 | |||
2012 | static struct pci_driver snb_uncore_pci_driver = { | ||
2013 | .name = "snb_uncore", | ||
2014 | .id_table = snb_uncore_pci_ids, | ||
2015 | }; | ||
2016 | |||
2017 | static struct pci_driver ivb_uncore_pci_driver = { | ||
2018 | .name = "ivb_uncore", | ||
2019 | .id_table = ivb_uncore_pci_ids, | ||
2020 | }; | ||
2021 | |||
2022 | static struct pci_driver hsw_uncore_pci_driver = { | ||
2023 | .name = "hsw_uncore", | ||
2024 | .id_table = hsw_uncore_pci_ids, | ||
2025 | }; | ||
2026 | |||
2027 | /* end of Sandy Bridge uncore support */ | ||
2028 | |||
2029 | /* Nehalem uncore support */ | ||
2030 | static void nhm_uncore_msr_disable_box(struct intel_uncore_box *box) | ||
2031 | { | ||
2032 | wrmsrl(NHM_UNC_PERF_GLOBAL_CTL, 0); | ||
2033 | } | ||
2034 | |||
2035 | static void nhm_uncore_msr_enable_box(struct intel_uncore_box *box) | ||
2036 | { | ||
2037 | wrmsrl(NHM_UNC_PERF_GLOBAL_CTL, NHM_UNC_GLOBAL_CTL_EN_PC_ALL | NHM_UNC_GLOBAL_CTL_EN_FC); | ||
2038 | } | ||
2039 | |||
2040 | static void nhm_uncore_msr_enable_event(struct intel_uncore_box *box, struct perf_event *event) | ||
2041 | { | ||
2042 | struct hw_perf_event *hwc = &event->hw; | ||
2043 | |||
2044 | if (hwc->idx < UNCORE_PMC_IDX_FIXED) | ||
2045 | wrmsrl(hwc->config_base, hwc->config | SNB_UNC_CTL_EN); | ||
2046 | else | ||
2047 | wrmsrl(hwc->config_base, NHM_UNC_FIXED_CTR_CTL_EN); | ||
2048 | } | ||
2049 | |||
2050 | static struct attribute *nhm_uncore_formats_attr[] = { | ||
2051 | &format_attr_event.attr, | ||
2052 | &format_attr_umask.attr, | ||
2053 | &format_attr_edge.attr, | ||
2054 | &format_attr_inv.attr, | ||
2055 | &format_attr_cmask8.attr, | ||
2056 | NULL, | ||
2057 | }; | ||
2058 | |||
2059 | static struct attribute_group nhm_uncore_format_group = { | ||
2060 | .name = "format", | ||
2061 | .attrs = nhm_uncore_formats_attr, | ||
2062 | }; | ||
2063 | |||
2064 | static struct uncore_event_desc nhm_uncore_events[] = { | ||
2065 | INTEL_UNCORE_EVENT_DESC(clockticks, "event=0xff,umask=0x00"), | ||
2066 | INTEL_UNCORE_EVENT_DESC(qmc_writes_full_any, "event=0x2f,umask=0x0f"), | ||
2067 | INTEL_UNCORE_EVENT_DESC(qmc_normal_reads_any, "event=0x2c,umask=0x0f"), | ||
2068 | INTEL_UNCORE_EVENT_DESC(qhl_request_ioh_reads, "event=0x20,umask=0x01"), | ||
2069 | INTEL_UNCORE_EVENT_DESC(qhl_request_ioh_writes, "event=0x20,umask=0x02"), | ||
2070 | INTEL_UNCORE_EVENT_DESC(qhl_request_remote_reads, "event=0x20,umask=0x04"), | ||
2071 | INTEL_UNCORE_EVENT_DESC(qhl_request_remote_writes, "event=0x20,umask=0x08"), | ||
2072 | INTEL_UNCORE_EVENT_DESC(qhl_request_local_reads, "event=0x20,umask=0x10"), | ||
2073 | INTEL_UNCORE_EVENT_DESC(qhl_request_local_writes, "event=0x20,umask=0x20"), | ||
2074 | { /* end: all zeroes */ }, | ||
2075 | }; | ||
2076 | |||
2077 | static struct intel_uncore_ops nhm_uncore_msr_ops = { | ||
2078 | .disable_box = nhm_uncore_msr_disable_box, | ||
2079 | .enable_box = nhm_uncore_msr_enable_box, | ||
2080 | .disable_event = snb_uncore_msr_disable_event, | ||
2081 | .enable_event = nhm_uncore_msr_enable_event, | ||
2082 | .read_counter = uncore_msr_read_counter, | ||
2083 | }; | ||
2084 | |||
2085 | static struct intel_uncore_type nhm_uncore = { | ||
2086 | .name = "", | ||
2087 | .num_counters = 8, | ||
2088 | .num_boxes = 1, | ||
2089 | .perf_ctr_bits = 48, | ||
2090 | .fixed_ctr_bits = 48, | ||
2091 | .event_ctl = NHM_UNC_PERFEVTSEL0, | ||
2092 | .perf_ctr = NHM_UNC_UNCORE_PMC0, | ||
2093 | .fixed_ctr = NHM_UNC_FIXED_CTR, | ||
2094 | .fixed_ctl = NHM_UNC_FIXED_CTR_CTRL, | ||
2095 | .event_mask = NHM_UNC_RAW_EVENT_MASK, | ||
2096 | .event_descs = nhm_uncore_events, | ||
2097 | .ops = &nhm_uncore_msr_ops, | ||
2098 | .format_group = &nhm_uncore_format_group, | ||
2099 | }; | ||
2100 | |||
2101 | static struct intel_uncore_type *nhm_msr_uncores[] = { | ||
2102 | &nhm_uncore, | ||
2103 | NULL, | ||
2104 | }; | ||
2105 | /* end of Nehalem uncore support */ | ||
2106 | |||
2107 | /* Nehalem-EX uncore support */ | ||
2108 | DEFINE_UNCORE_FORMAT_ATTR(event5, event, "config:1-5"); | ||
2109 | DEFINE_UNCORE_FORMAT_ATTR(counter, counter, "config:6-7"); | ||
2110 | DEFINE_UNCORE_FORMAT_ATTR(match, match, "config1:0-63"); | ||
2111 | DEFINE_UNCORE_FORMAT_ATTR(mask, mask, "config2:0-63"); | ||
2112 | |||
2113 | static void nhmex_uncore_msr_init_box(struct intel_uncore_box *box) | ||
2114 | { | ||
2115 | wrmsrl(NHMEX_U_MSR_PMON_GLOBAL_CTL, NHMEX_U_PMON_GLOBAL_EN_ALL); | ||
2116 | } | ||
2117 | |||
2118 | static void nhmex_uncore_msr_disable_box(struct intel_uncore_box *box) | ||
2119 | { | ||
2120 | unsigned msr = uncore_msr_box_ctl(box); | ||
2121 | u64 config; | ||
2122 | |||
2123 | if (msr) { | ||
2124 | rdmsrl(msr, config); | ||
2125 | config &= ~((1ULL << uncore_num_counters(box)) - 1); | ||
2126 | /* WBox has a fixed counter */ | ||
2127 | if (uncore_msr_fixed_ctl(box)) | ||
2128 | config &= ~NHMEX_W_PMON_GLOBAL_FIXED_EN; | ||
2129 | wrmsrl(msr, config); | ||
2130 | } | ||
2131 | } | ||
2132 | |||
2133 | static void nhmex_uncore_msr_enable_box(struct intel_uncore_box *box) | ||
2134 | { | ||
2135 | unsigned msr = uncore_msr_box_ctl(box); | ||
2136 | u64 config; | ||
2137 | |||
2138 | if (msr) { | ||
2139 | rdmsrl(msr, config); | ||
2140 | config |= (1ULL << uncore_num_counters(box)) - 1; | ||
2141 | /* WBox has a fixed counter */ | ||
2142 | if (uncore_msr_fixed_ctl(box)) | ||
2143 | config |= NHMEX_W_PMON_GLOBAL_FIXED_EN; | ||
2144 | wrmsrl(msr, config); | ||
2145 | } | ||
2146 | } | ||
2147 | |||
2148 | static void nhmex_uncore_msr_disable_event(struct intel_uncore_box *box, struct perf_event *event) | ||
2149 | { | ||
2150 | wrmsrl(event->hw.config_base, 0); | ||
2151 | } | ||
2152 | |||
2153 | static void nhmex_uncore_msr_enable_event(struct intel_uncore_box *box, struct perf_event *event) | ||
2154 | { | ||
2155 | struct hw_perf_event *hwc = &event->hw; | ||
2156 | |||
2157 | if (hwc->idx >= UNCORE_PMC_IDX_FIXED) | ||
2158 | wrmsrl(hwc->config_base, NHMEX_PMON_CTL_EN_BIT0); | ||
2159 | else if (box->pmu->type->event_mask & NHMEX_PMON_CTL_EN_BIT0) | ||
2160 | wrmsrl(hwc->config_base, hwc->config | NHMEX_PMON_CTL_EN_BIT22); | ||
2161 | else | ||
2162 | wrmsrl(hwc->config_base, hwc->config | NHMEX_PMON_CTL_EN_BIT0); | ||
2163 | } | ||
2164 | |||
2165 | #define NHMEX_UNCORE_OPS_COMMON_INIT() \ | ||
2166 | .init_box = nhmex_uncore_msr_init_box, \ | ||
2167 | .disable_box = nhmex_uncore_msr_disable_box, \ | ||
2168 | .enable_box = nhmex_uncore_msr_enable_box, \ | ||
2169 | .disable_event = nhmex_uncore_msr_disable_event, \ | ||
2170 | .read_counter = uncore_msr_read_counter | ||
2171 | |||
2172 | static struct intel_uncore_ops nhmex_uncore_ops = { | ||
2173 | NHMEX_UNCORE_OPS_COMMON_INIT(), | ||
2174 | .enable_event = nhmex_uncore_msr_enable_event, | ||
2175 | }; | ||
2176 | |||
2177 | static struct attribute *nhmex_uncore_ubox_formats_attr[] = { | ||
2178 | &format_attr_event.attr, | ||
2179 | &format_attr_edge.attr, | ||
2180 | NULL, | ||
2181 | }; | ||
2182 | |||
2183 | static struct attribute_group nhmex_uncore_ubox_format_group = { | ||
2184 | .name = "format", | ||
2185 | .attrs = nhmex_uncore_ubox_formats_attr, | ||
2186 | }; | ||
2187 | |||
2188 | static struct intel_uncore_type nhmex_uncore_ubox = { | ||
2189 | .name = "ubox", | ||
2190 | .num_counters = 1, | ||
2191 | .num_boxes = 1, | ||
2192 | .perf_ctr_bits = 48, | ||
2193 | .event_ctl = NHMEX_U_MSR_PMON_EV_SEL, | ||
2194 | .perf_ctr = NHMEX_U_MSR_PMON_CTR, | ||
2195 | .event_mask = NHMEX_U_PMON_RAW_EVENT_MASK, | ||
2196 | .box_ctl = NHMEX_U_MSR_PMON_GLOBAL_CTL, | ||
2197 | .ops = &nhmex_uncore_ops, | ||
2198 | .format_group = &nhmex_uncore_ubox_format_group | ||
2199 | }; | ||
2200 | |||
2201 | static struct attribute *nhmex_uncore_cbox_formats_attr[] = { | ||
2202 | &format_attr_event.attr, | ||
2203 | &format_attr_umask.attr, | ||
2204 | &format_attr_edge.attr, | ||
2205 | &format_attr_inv.attr, | ||
2206 | &format_attr_thresh8.attr, | ||
2207 | NULL, | ||
2208 | }; | ||
2209 | |||
2210 | static struct attribute_group nhmex_uncore_cbox_format_group = { | ||
2211 | .name = "format", | ||
2212 | .attrs = nhmex_uncore_cbox_formats_attr, | ||
2213 | }; | ||
2214 | |||
2215 | /* msr offset for each instance of cbox */ | ||
2216 | static unsigned nhmex_cbox_msr_offsets[] = { | ||
2217 | 0x0, 0x80, 0x40, 0xc0, 0x20, 0xa0, 0x60, 0xe0, 0x240, 0x2c0, | ||
2218 | }; | ||
2219 | |||
2220 | static struct intel_uncore_type nhmex_uncore_cbox = { | ||
2221 | .name = "cbox", | ||
2222 | .num_counters = 6, | ||
2223 | .num_boxes = 10, | ||
2224 | .perf_ctr_bits = 48, | ||
2225 | .event_ctl = NHMEX_C0_MSR_PMON_EV_SEL0, | ||
2226 | .perf_ctr = NHMEX_C0_MSR_PMON_CTR0, | ||
2227 | .event_mask = NHMEX_PMON_RAW_EVENT_MASK, | ||
2228 | .box_ctl = NHMEX_C0_MSR_PMON_GLOBAL_CTL, | ||
2229 | .msr_offsets = nhmex_cbox_msr_offsets, | ||
2230 | .pair_ctr_ctl = 1, | ||
2231 | .ops = &nhmex_uncore_ops, | ||
2232 | .format_group = &nhmex_uncore_cbox_format_group | ||
2233 | }; | ||
2234 | |||
2235 | static struct uncore_event_desc nhmex_uncore_wbox_events[] = { | ||
2236 | INTEL_UNCORE_EVENT_DESC(clockticks, "event=0xff,umask=0"), | ||
2237 | { /* end: all zeroes */ }, | ||
2238 | }; | ||
2239 | |||
2240 | static struct intel_uncore_type nhmex_uncore_wbox = { | ||
2241 | .name = "wbox", | ||
2242 | .num_counters = 4, | ||
2243 | .num_boxes = 1, | ||
2244 | .perf_ctr_bits = 48, | ||
2245 | .event_ctl = NHMEX_W_MSR_PMON_CNT0, | ||
2246 | .perf_ctr = NHMEX_W_MSR_PMON_EVT_SEL0, | ||
2247 | .fixed_ctr = NHMEX_W_MSR_PMON_FIXED_CTR, | ||
2248 | .fixed_ctl = NHMEX_W_MSR_PMON_FIXED_CTL, | ||
2249 | .event_mask = NHMEX_PMON_RAW_EVENT_MASK, | ||
2250 | .box_ctl = NHMEX_W_MSR_GLOBAL_CTL, | ||
2251 | .pair_ctr_ctl = 1, | ||
2252 | .event_descs = nhmex_uncore_wbox_events, | ||
2253 | .ops = &nhmex_uncore_ops, | ||
2254 | .format_group = &nhmex_uncore_cbox_format_group | ||
2255 | }; | ||
2256 | |||
2257 | static int nhmex_bbox_hw_config(struct intel_uncore_box *box, struct perf_event *event) | ||
2258 | { | ||
2259 | struct hw_perf_event *hwc = &event->hw; | ||
2260 | struct hw_perf_event_extra *reg1 = &hwc->extra_reg; | ||
2261 | struct hw_perf_event_extra *reg2 = &hwc->branch_reg; | ||
2262 | int ctr, ev_sel; | ||
2263 | |||
2264 | ctr = (hwc->config & NHMEX_B_PMON_CTR_MASK) >> | ||
2265 | NHMEX_B_PMON_CTR_SHIFT; | ||
2266 | ev_sel = (hwc->config & NHMEX_B_PMON_CTL_EV_SEL_MASK) >> | ||
2267 | NHMEX_B_PMON_CTL_EV_SEL_SHIFT; | ||
2268 | |||
2269 | /* events that do not use the match/mask registers */ | ||
2270 | if ((ctr == 0 && ev_sel > 0x3) || (ctr == 1 && ev_sel > 0x6) || | ||
2271 | (ctr == 2 && ev_sel != 0x4) || ctr == 3) | ||
2272 | return 0; | ||
2273 | |||
2274 | if (box->pmu->pmu_idx == 0) | ||
2275 | reg1->reg = NHMEX_B0_MSR_MATCH; | ||
2276 | else | ||
2277 | reg1->reg = NHMEX_B1_MSR_MATCH; | ||
2278 | reg1->idx = 0; | ||
2279 | reg1->config = event->attr.config1; | ||
2280 | reg2->config = event->attr.config2; | ||
2281 | return 0; | ||
2282 | } | ||
2283 | |||
2284 | static void nhmex_bbox_msr_enable_event(struct intel_uncore_box *box, struct perf_event *event) | ||
2285 | { | ||
2286 | struct hw_perf_event *hwc = &event->hw; | ||
2287 | struct hw_perf_event_extra *reg1 = &hwc->extra_reg; | ||
2288 | struct hw_perf_event_extra *reg2 = &hwc->branch_reg; | ||
2289 | |||
2290 | if (reg1->idx != EXTRA_REG_NONE) { | ||
2291 | wrmsrl(reg1->reg, reg1->config); | ||
2292 | wrmsrl(reg1->reg + 1, reg2->config); | ||
2293 | } | ||
2294 | wrmsrl(hwc->config_base, NHMEX_PMON_CTL_EN_BIT0 | | ||
2295 | (hwc->config & NHMEX_B_PMON_CTL_EV_SEL_MASK)); | ||
2296 | } | ||
2297 | |||
2298 | /* | ||
2299 | * The Bbox has 4 counters, but each counter monitors different events. | ||
2300 | * Use bits 6-7 in the event config to select counter. | ||
2301 | */ | ||
2302 | static struct event_constraint nhmex_uncore_bbox_constraints[] = { | ||
2303 | EVENT_CONSTRAINT(0 , 1, 0xc0), | ||
2304 | EVENT_CONSTRAINT(0x40, 2, 0xc0), | ||
2305 | EVENT_CONSTRAINT(0x80, 4, 0xc0), | ||
2306 | EVENT_CONSTRAINT(0xc0, 8, 0xc0), | ||
2307 | EVENT_CONSTRAINT_END, | ||
2308 | }; | ||
2309 | |||
2310 | static struct attribute *nhmex_uncore_bbox_formats_attr[] = { | ||
2311 | &format_attr_event5.attr, | ||
2312 | &format_attr_counter.attr, | ||
2313 | &format_attr_match.attr, | ||
2314 | &format_attr_mask.attr, | ||
2315 | NULL, | ||
2316 | }; | ||
2317 | |||
2318 | static struct attribute_group nhmex_uncore_bbox_format_group = { | ||
2319 | .name = "format", | ||
2320 | .attrs = nhmex_uncore_bbox_formats_attr, | ||
2321 | }; | ||
2322 | |||
2323 | static struct intel_uncore_ops nhmex_uncore_bbox_ops = { | ||
2324 | NHMEX_UNCORE_OPS_COMMON_INIT(), | ||
2325 | .enable_event = nhmex_bbox_msr_enable_event, | ||
2326 | .hw_config = nhmex_bbox_hw_config, | ||
2327 | .get_constraint = uncore_get_constraint, | ||
2328 | .put_constraint = uncore_put_constraint, | ||
2329 | }; | ||
2330 | |||
2331 | static struct intel_uncore_type nhmex_uncore_bbox = { | ||
2332 | .name = "bbox", | ||
2333 | .num_counters = 4, | ||
2334 | .num_boxes = 2, | ||
2335 | .perf_ctr_bits = 48, | ||
2336 | .event_ctl = NHMEX_B0_MSR_PMON_CTL0, | ||
2337 | .perf_ctr = NHMEX_B0_MSR_PMON_CTR0, | ||
2338 | .event_mask = NHMEX_B_PMON_RAW_EVENT_MASK, | ||
2339 | .box_ctl = NHMEX_B0_MSR_PMON_GLOBAL_CTL, | ||
2340 | .msr_offset = NHMEX_B_MSR_OFFSET, | ||
2341 | .pair_ctr_ctl = 1, | ||
2342 | .num_shared_regs = 1, | ||
2343 | .constraints = nhmex_uncore_bbox_constraints, | ||
2344 | .ops = &nhmex_uncore_bbox_ops, | ||
2345 | .format_group = &nhmex_uncore_bbox_format_group | ||
2346 | }; | ||
2347 | |||
2348 | static int nhmex_sbox_hw_config(struct intel_uncore_box *box, struct perf_event *event) | ||
2349 | { | ||
2350 | struct hw_perf_event *hwc = &event->hw; | ||
2351 | struct hw_perf_event_extra *reg1 = &hwc->extra_reg; | ||
2352 | struct hw_perf_event_extra *reg2 = &hwc->branch_reg; | ||
2353 | |||
2354 | /* only TO_R_PROG_EV event uses the match/mask register */ | ||
2355 | if ((hwc->config & NHMEX_PMON_CTL_EV_SEL_MASK) != | ||
2356 | NHMEX_S_EVENT_TO_R_PROG_EV) | ||
2357 | return 0; | ||
2358 | |||
2359 | if (box->pmu->pmu_idx == 0) | ||
2360 | reg1->reg = NHMEX_S0_MSR_MM_CFG; | ||
2361 | else | ||
2362 | reg1->reg = NHMEX_S1_MSR_MM_CFG; | ||
2363 | reg1->idx = 0; | ||
2364 | reg1->config = event->attr.config1; | ||
2365 | reg2->config = event->attr.config2; | ||
2366 | return 0; | ||
2367 | } | ||
2368 | |||
2369 | static void nhmex_sbox_msr_enable_event(struct intel_uncore_box *box, struct perf_event *event) | ||
2370 | { | ||
2371 | struct hw_perf_event *hwc = &event->hw; | ||
2372 | struct hw_perf_event_extra *reg1 = &hwc->extra_reg; | ||
2373 | struct hw_perf_event_extra *reg2 = &hwc->branch_reg; | ||
2374 | |||
2375 | if (reg1->idx != EXTRA_REG_NONE) { | ||
2376 | wrmsrl(reg1->reg, 0); | ||
2377 | wrmsrl(reg1->reg + 1, reg1->config); | ||
2378 | wrmsrl(reg1->reg + 2, reg2->config); | ||
2379 | wrmsrl(reg1->reg, NHMEX_S_PMON_MM_CFG_EN); | ||
2380 | } | ||
2381 | wrmsrl(hwc->config_base, hwc->config | NHMEX_PMON_CTL_EN_BIT22); | ||
2382 | } | ||
2383 | |||
2384 | static struct attribute *nhmex_uncore_sbox_formats_attr[] = { | ||
2385 | &format_attr_event.attr, | ||
2386 | &format_attr_umask.attr, | ||
2387 | &format_attr_edge.attr, | ||
2388 | &format_attr_inv.attr, | ||
2389 | &format_attr_thresh8.attr, | ||
2390 | &format_attr_match.attr, | ||
2391 | &format_attr_mask.attr, | ||
2392 | NULL, | ||
2393 | }; | ||
2394 | |||
2395 | static struct attribute_group nhmex_uncore_sbox_format_group = { | ||
2396 | .name = "format", | ||
2397 | .attrs = nhmex_uncore_sbox_formats_attr, | ||
2398 | }; | ||
2399 | |||
2400 | static struct intel_uncore_ops nhmex_uncore_sbox_ops = { | ||
2401 | NHMEX_UNCORE_OPS_COMMON_INIT(), | ||
2402 | .enable_event = nhmex_sbox_msr_enable_event, | ||
2403 | .hw_config = nhmex_sbox_hw_config, | ||
2404 | .get_constraint = uncore_get_constraint, | ||
2405 | .put_constraint = uncore_put_constraint, | ||
2406 | }; | ||
2407 | |||
2408 | static struct intel_uncore_type nhmex_uncore_sbox = { | ||
2409 | .name = "sbox", | ||
2410 | .num_counters = 4, | ||
2411 | .num_boxes = 2, | ||
2412 | .perf_ctr_bits = 48, | ||
2413 | .event_ctl = NHMEX_S0_MSR_PMON_CTL0, | ||
2414 | .perf_ctr = NHMEX_S0_MSR_PMON_CTR0, | ||
2415 | .event_mask = NHMEX_PMON_RAW_EVENT_MASK, | ||
2416 | .box_ctl = NHMEX_S0_MSR_PMON_GLOBAL_CTL, | ||
2417 | .msr_offset = NHMEX_S_MSR_OFFSET, | ||
2418 | .pair_ctr_ctl = 1, | ||
2419 | .num_shared_regs = 1, | ||
2420 | .ops = &nhmex_uncore_sbox_ops, | ||
2421 | .format_group = &nhmex_uncore_sbox_format_group | ||
2422 | }; | ||
2423 | |||
2424 | enum { | ||
2425 | EXTRA_REG_NHMEX_M_FILTER, | ||
2426 | EXTRA_REG_NHMEX_M_DSP, | ||
2427 | EXTRA_REG_NHMEX_M_ISS, | ||
2428 | EXTRA_REG_NHMEX_M_MAP, | ||
2429 | EXTRA_REG_NHMEX_M_MSC_THR, | ||
2430 | EXTRA_REG_NHMEX_M_PGT, | ||
2431 | EXTRA_REG_NHMEX_M_PLD, | ||
2432 | EXTRA_REG_NHMEX_M_ZDP_CTL_FVC, | ||
2433 | }; | ||
2434 | |||
2435 | static struct extra_reg nhmex_uncore_mbox_extra_regs[] = { | ||
2436 | MBOX_INC_SEL_EXTAR_REG(0x0, DSP), | ||
2437 | MBOX_INC_SEL_EXTAR_REG(0x4, MSC_THR), | ||
2438 | MBOX_INC_SEL_EXTAR_REG(0x5, MSC_THR), | ||
2439 | MBOX_INC_SEL_EXTAR_REG(0x9, ISS), | ||
2440 | /* event 0xa uses two extra registers */ | ||
2441 | MBOX_INC_SEL_EXTAR_REG(0xa, ISS), | ||
2442 | MBOX_INC_SEL_EXTAR_REG(0xa, PLD), | ||
2443 | MBOX_INC_SEL_EXTAR_REG(0xb, PLD), | ||
2444 | /* events 0xd ~ 0x10 use the same extra register */ | ||
2445 | MBOX_INC_SEL_EXTAR_REG(0xd, ZDP_CTL_FVC), | ||
2446 | MBOX_INC_SEL_EXTAR_REG(0xe, ZDP_CTL_FVC), | ||
2447 | MBOX_INC_SEL_EXTAR_REG(0xf, ZDP_CTL_FVC), | ||
2448 | MBOX_INC_SEL_EXTAR_REG(0x10, ZDP_CTL_FVC), | ||
2449 | MBOX_INC_SEL_EXTAR_REG(0x16, PGT), | ||
2450 | MBOX_SET_FLAG_SEL_EXTRA_REG(0x0, DSP), | ||
2451 | MBOX_SET_FLAG_SEL_EXTRA_REG(0x1, ISS), | ||
2452 | MBOX_SET_FLAG_SEL_EXTRA_REG(0x5, PGT), | ||
2453 | MBOX_SET_FLAG_SEL_EXTRA_REG(0x6, MAP), | ||
2454 | EVENT_EXTRA_END | ||
2455 | }; | ||
2456 | |||
2457 | /* Nehalem-EX or Westmere-EX ? */ | ||
2458 | static bool uncore_nhmex; | ||
2459 | |||
2460 | static bool nhmex_mbox_get_shared_reg(struct intel_uncore_box *box, int idx, u64 config) | ||
2461 | { | ||
2462 | struct intel_uncore_extra_reg *er; | ||
2463 | unsigned long flags; | ||
2464 | bool ret = false; | ||
2465 | u64 mask; | ||
2466 | |||
2467 | if (idx < EXTRA_REG_NHMEX_M_ZDP_CTL_FVC) { | ||
2468 | er = &box->shared_regs[idx]; | ||
2469 | raw_spin_lock_irqsave(&er->lock, flags); | ||
2470 | if (!atomic_read(&er->ref) || er->config == config) { | ||
2471 | atomic_inc(&er->ref); | ||
2472 | er->config = config; | ||
2473 | ret = true; | ||
2474 | } | ||
2475 | raw_spin_unlock_irqrestore(&er->lock, flags); | ||
2476 | |||
2477 | return ret; | ||
2478 | } | ||
2479 | /* | ||
2480 | * The ZDP_CTL_FVC MSR has 4 fields which are used to control | ||
2481 | * events 0xd ~ 0x10. Besides these 4 fields, there are additional | ||
2482 | * fields which are shared. | ||
2483 | */ | ||
2484 | idx -= EXTRA_REG_NHMEX_M_ZDP_CTL_FVC; | ||
2485 | if (WARN_ON_ONCE(idx >= 4)) | ||
2486 | return false; | ||
2487 | |||
2488 | /* mask of the shared fields */ | ||
2489 | if (uncore_nhmex) | ||
2490 | mask = NHMEX_M_PMON_ZDP_CTL_FVC_MASK; | ||
2491 | else | ||
2492 | mask = WSMEX_M_PMON_ZDP_CTL_FVC_MASK; | ||
2493 | er = &box->shared_regs[EXTRA_REG_NHMEX_M_ZDP_CTL_FVC]; | ||
2494 | |||
2495 | raw_spin_lock_irqsave(&er->lock, flags); | ||
2496 | /* add mask of the non-shared field if it's in use */ | ||
2497 | if (__BITS_VALUE(atomic_read(&er->ref), idx, 8)) { | ||
2498 | if (uncore_nhmex) | ||
2499 | mask |= NHMEX_M_PMON_ZDP_CTL_FVC_EVENT_MASK(idx); | ||
2500 | else | ||
2501 | mask |= WSMEX_M_PMON_ZDP_CTL_FVC_EVENT_MASK(idx); | ||
2502 | } | ||
2503 | |||
2504 | if (!atomic_read(&er->ref) || !((er->config ^ config) & mask)) { | ||
2505 | atomic_add(1 << (idx * 8), &er->ref); | ||
2506 | if (uncore_nhmex) | ||
2507 | mask = NHMEX_M_PMON_ZDP_CTL_FVC_MASK | | ||
2508 | NHMEX_M_PMON_ZDP_CTL_FVC_EVENT_MASK(idx); | ||
2509 | else | ||
2510 | mask = WSMEX_M_PMON_ZDP_CTL_FVC_MASK | | ||
2511 | WSMEX_M_PMON_ZDP_CTL_FVC_EVENT_MASK(idx); | ||
2512 | er->config &= ~mask; | ||
2513 | er->config |= (config & mask); | ||
2514 | ret = true; | ||
2515 | } | ||
2516 | raw_spin_unlock_irqrestore(&er->lock, flags); | ||
2517 | |||
2518 | return ret; | ||
2519 | } | ||
2520 | |||
2521 | static void nhmex_mbox_put_shared_reg(struct intel_uncore_box *box, int idx) | ||
2522 | { | ||
2523 | struct intel_uncore_extra_reg *er; | ||
2524 | |||
2525 | if (idx < EXTRA_REG_NHMEX_M_ZDP_CTL_FVC) { | ||
2526 | er = &box->shared_regs[idx]; | ||
2527 | atomic_dec(&er->ref); | ||
2528 | return; | ||
2529 | } | ||
2530 | |||
2531 | idx -= EXTRA_REG_NHMEX_M_ZDP_CTL_FVC; | ||
2532 | er = &box->shared_regs[EXTRA_REG_NHMEX_M_ZDP_CTL_FVC]; | ||
2533 | atomic_sub(1 << (idx * 8), &er->ref); | ||
2534 | } | ||
2535 | |||
2536 | static u64 nhmex_mbox_alter_er(struct perf_event *event, int new_idx, bool modify) | ||
2537 | { | ||
2538 | struct hw_perf_event *hwc = &event->hw; | ||
2539 | struct hw_perf_event_extra *reg1 = &hwc->extra_reg; | ||
2540 | u64 idx, orig_idx = __BITS_VALUE(reg1->idx, 0, 8); | ||
2541 | u64 config = reg1->config; | ||
2542 | |||
2543 | /* get the non-shared control bits and shift them */ | ||
2544 | idx = orig_idx - EXTRA_REG_NHMEX_M_ZDP_CTL_FVC; | ||
2545 | if (uncore_nhmex) | ||
2546 | config &= NHMEX_M_PMON_ZDP_CTL_FVC_EVENT_MASK(idx); | ||
2547 | else | ||
2548 | config &= WSMEX_M_PMON_ZDP_CTL_FVC_EVENT_MASK(idx); | ||
2549 | if (new_idx > orig_idx) { | ||
2550 | idx = new_idx - orig_idx; | ||
2551 | config <<= 3 * idx; | ||
2552 | } else { | ||
2553 | idx = orig_idx - new_idx; | ||
2554 | config >>= 3 * idx; | ||
2555 | } | ||
2556 | |||
2557 | /* add the shared control bits back */ | ||
2558 | if (uncore_nhmex) | ||
2559 | config |= NHMEX_M_PMON_ZDP_CTL_FVC_MASK & reg1->config; | ||
2560 | else | ||
2561 | config |= WSMEX_M_PMON_ZDP_CTL_FVC_MASK & reg1->config; | ||
2562 | config |= NHMEX_M_PMON_ZDP_CTL_FVC_MASK & reg1->config; | ||
2563 | if (modify) { | ||
2564 | /* adjust the main event selector */ | ||
2565 | if (new_idx > orig_idx) | ||
2566 | hwc->config += idx << NHMEX_M_PMON_CTL_INC_SEL_SHIFT; | ||
2567 | else | ||
2568 | hwc->config -= idx << NHMEX_M_PMON_CTL_INC_SEL_SHIFT; | ||
2569 | reg1->config = config; | ||
2570 | reg1->idx = ~0xff | new_idx; | ||
2571 | } | ||
2572 | return config; | ||
2573 | } | ||
2574 | |||
2575 | static struct event_constraint * | ||
2576 | nhmex_mbox_get_constraint(struct intel_uncore_box *box, struct perf_event *event) | ||
2577 | { | ||
2578 | struct hw_perf_event_extra *reg1 = &event->hw.extra_reg; | ||
2579 | struct hw_perf_event_extra *reg2 = &event->hw.branch_reg; | ||
2580 | int i, idx[2], alloc = 0; | ||
2581 | u64 config1 = reg1->config; | ||
2582 | |||
2583 | idx[0] = __BITS_VALUE(reg1->idx, 0, 8); | ||
2584 | idx[1] = __BITS_VALUE(reg1->idx, 1, 8); | ||
2585 | again: | ||
2586 | for (i = 0; i < 2; i++) { | ||
2587 | if (!uncore_box_is_fake(box) && (reg1->alloc & (0x1 << i))) | ||
2588 | idx[i] = 0xff; | ||
2589 | |||
2590 | if (idx[i] == 0xff) | ||
2591 | continue; | ||
2592 | |||
2593 | if (!nhmex_mbox_get_shared_reg(box, idx[i], | ||
2594 | __BITS_VALUE(config1, i, 32))) | ||
2595 | goto fail; | ||
2596 | alloc |= (0x1 << i); | ||
2597 | } | ||
2598 | |||
2599 | /* for the match/mask registers */ | ||
2600 | if (reg2->idx != EXTRA_REG_NONE && | ||
2601 | (uncore_box_is_fake(box) || !reg2->alloc) && | ||
2602 | !nhmex_mbox_get_shared_reg(box, reg2->idx, reg2->config)) | ||
2603 | goto fail; | ||
2604 | |||
2605 | /* | ||
2606 | * If it's a fake box -- as per validate_{group,event}() we | ||
2607 | * shouldn't touch event state and we can avoid doing so | ||
2608 | * since both will only call get_event_constraints() once | ||
2609 | * on each event, this avoids the need for reg->alloc. | ||
2610 | */ | ||
2611 | if (!uncore_box_is_fake(box)) { | ||
2612 | if (idx[0] != 0xff && idx[0] != __BITS_VALUE(reg1->idx, 0, 8)) | ||
2613 | nhmex_mbox_alter_er(event, idx[0], true); | ||
2614 | reg1->alloc |= alloc; | ||
2615 | if (reg2->idx != EXTRA_REG_NONE) | ||
2616 | reg2->alloc = 1; | ||
2617 | } | ||
2618 | return NULL; | ||
2619 | fail: | ||
2620 | if (idx[0] != 0xff && !(alloc & 0x1) && | ||
2621 | idx[0] >= EXTRA_REG_NHMEX_M_ZDP_CTL_FVC) { | ||
2622 | /* | ||
2623 | * events 0xd ~ 0x10 are functional identical, but are | ||
2624 | * controlled by different fields in the ZDP_CTL_FVC | ||
2625 | * register. If we failed to take one field, try the | ||
2626 | * rest 3 choices. | ||
2627 | */ | ||
2628 | BUG_ON(__BITS_VALUE(reg1->idx, 1, 8) != 0xff); | ||
2629 | idx[0] -= EXTRA_REG_NHMEX_M_ZDP_CTL_FVC; | ||
2630 | idx[0] = (idx[0] + 1) % 4; | ||
2631 | idx[0] += EXTRA_REG_NHMEX_M_ZDP_CTL_FVC; | ||
2632 | if (idx[0] != __BITS_VALUE(reg1->idx, 0, 8)) { | ||
2633 | config1 = nhmex_mbox_alter_er(event, idx[0], false); | ||
2634 | goto again; | ||
2635 | } | ||
2636 | } | ||
2637 | |||
2638 | if (alloc & 0x1) | ||
2639 | nhmex_mbox_put_shared_reg(box, idx[0]); | ||
2640 | if (alloc & 0x2) | ||
2641 | nhmex_mbox_put_shared_reg(box, idx[1]); | ||
2642 | return &constraint_empty; | ||
2643 | } | ||
2644 | |||
2645 | static void nhmex_mbox_put_constraint(struct intel_uncore_box *box, struct perf_event *event) | ||
2646 | { | ||
2647 | struct hw_perf_event_extra *reg1 = &event->hw.extra_reg; | ||
2648 | struct hw_perf_event_extra *reg2 = &event->hw.branch_reg; | ||
2649 | |||
2650 | if (uncore_box_is_fake(box)) | ||
2651 | return; | ||
2652 | |||
2653 | if (reg1->alloc & 0x1) | ||
2654 | nhmex_mbox_put_shared_reg(box, __BITS_VALUE(reg1->idx, 0, 8)); | ||
2655 | if (reg1->alloc & 0x2) | ||
2656 | nhmex_mbox_put_shared_reg(box, __BITS_VALUE(reg1->idx, 1, 8)); | ||
2657 | reg1->alloc = 0; | ||
2658 | |||
2659 | if (reg2->alloc) { | ||
2660 | nhmex_mbox_put_shared_reg(box, reg2->idx); | ||
2661 | reg2->alloc = 0; | ||
2662 | } | ||
2663 | } | ||
2664 | |||
2665 | static int nhmex_mbox_extra_reg_idx(struct extra_reg *er) | ||
2666 | { | ||
2667 | if (er->idx < EXTRA_REG_NHMEX_M_ZDP_CTL_FVC) | ||
2668 | return er->idx; | ||
2669 | return er->idx + (er->event >> NHMEX_M_PMON_CTL_INC_SEL_SHIFT) - 0xd; | ||
2670 | } | ||
2671 | |||
2672 | static int nhmex_mbox_hw_config(struct intel_uncore_box *box, struct perf_event *event) | ||
2673 | { | ||
2674 | struct intel_uncore_type *type = box->pmu->type; | ||
2675 | struct hw_perf_event_extra *reg1 = &event->hw.extra_reg; | ||
2676 | struct hw_perf_event_extra *reg2 = &event->hw.branch_reg; | ||
2677 | struct extra_reg *er; | ||
2678 | unsigned msr; | ||
2679 | int reg_idx = 0; | ||
2680 | /* | ||
2681 | * The mbox events may require 2 extra MSRs at the most. But only | ||
2682 | * the lower 32 bits in these MSRs are significant, so we can use | ||
2683 | * config1 to pass two MSRs' config. | ||
2684 | */ | ||
2685 | for (er = nhmex_uncore_mbox_extra_regs; er->msr; er++) { | ||
2686 | if (er->event != (event->hw.config & er->config_mask)) | ||
2687 | continue; | ||
2688 | if (event->attr.config1 & ~er->valid_mask) | ||
2689 | return -EINVAL; | ||
2690 | |||
2691 | msr = er->msr + type->msr_offset * box->pmu->pmu_idx; | ||
2692 | if (WARN_ON_ONCE(msr >= 0xffff || er->idx >= 0xff)) | ||
2693 | return -EINVAL; | ||
2694 | |||
2695 | /* always use the 32~63 bits to pass the PLD config */ | ||
2696 | if (er->idx == EXTRA_REG_NHMEX_M_PLD) | ||
2697 | reg_idx = 1; | ||
2698 | else if (WARN_ON_ONCE(reg_idx > 0)) | ||
2699 | return -EINVAL; | ||
2700 | |||
2701 | reg1->idx &= ~(0xff << (reg_idx * 8)); | ||
2702 | reg1->reg &= ~(0xffff << (reg_idx * 16)); | ||
2703 | reg1->idx |= nhmex_mbox_extra_reg_idx(er) << (reg_idx * 8); | ||
2704 | reg1->reg |= msr << (reg_idx * 16); | ||
2705 | reg1->config = event->attr.config1; | ||
2706 | reg_idx++; | ||
2707 | } | ||
2708 | /* | ||
2709 | * The mbox only provides ability to perform address matching | ||
2710 | * for the PLD events. | ||
2711 | */ | ||
2712 | if (reg_idx == 2) { | ||
2713 | reg2->idx = EXTRA_REG_NHMEX_M_FILTER; | ||
2714 | if (event->attr.config2 & NHMEX_M_PMON_MM_CFG_EN) | ||
2715 | reg2->config = event->attr.config2; | ||
2716 | else | ||
2717 | reg2->config = ~0ULL; | ||
2718 | if (box->pmu->pmu_idx == 0) | ||
2719 | reg2->reg = NHMEX_M0_MSR_PMU_MM_CFG; | ||
2720 | else | ||
2721 | reg2->reg = NHMEX_M1_MSR_PMU_MM_CFG; | ||
2722 | } | ||
2723 | return 0; | ||
2724 | } | ||
2725 | |||
2726 | static u64 nhmex_mbox_shared_reg_config(struct intel_uncore_box *box, int idx) | ||
2727 | { | ||
2728 | struct intel_uncore_extra_reg *er; | ||
2729 | unsigned long flags; | ||
2730 | u64 config; | ||
2731 | |||
2732 | if (idx < EXTRA_REG_NHMEX_M_ZDP_CTL_FVC) | ||
2733 | return box->shared_regs[idx].config; | ||
2734 | |||
2735 | er = &box->shared_regs[EXTRA_REG_NHMEX_M_ZDP_CTL_FVC]; | ||
2736 | raw_spin_lock_irqsave(&er->lock, flags); | ||
2737 | config = er->config; | ||
2738 | raw_spin_unlock_irqrestore(&er->lock, flags); | ||
2739 | return config; | ||
2740 | } | ||
2741 | |||
2742 | static void nhmex_mbox_msr_enable_event(struct intel_uncore_box *box, struct perf_event *event) | ||
2743 | { | ||
2744 | struct hw_perf_event *hwc = &event->hw; | ||
2745 | struct hw_perf_event_extra *reg1 = &hwc->extra_reg; | ||
2746 | struct hw_perf_event_extra *reg2 = &hwc->branch_reg; | ||
2747 | int idx; | ||
2748 | |||
2749 | idx = __BITS_VALUE(reg1->idx, 0, 8); | ||
2750 | if (idx != 0xff) | ||
2751 | wrmsrl(__BITS_VALUE(reg1->reg, 0, 16), | ||
2752 | nhmex_mbox_shared_reg_config(box, idx)); | ||
2753 | idx = __BITS_VALUE(reg1->idx, 1, 8); | ||
2754 | if (idx != 0xff) | ||
2755 | wrmsrl(__BITS_VALUE(reg1->reg, 1, 16), | ||
2756 | nhmex_mbox_shared_reg_config(box, idx)); | ||
2757 | |||
2758 | if (reg2->idx != EXTRA_REG_NONE) { | ||
2759 | wrmsrl(reg2->reg, 0); | ||
2760 | if (reg2->config != ~0ULL) { | ||
2761 | wrmsrl(reg2->reg + 1, | ||
2762 | reg2->config & NHMEX_M_PMON_ADDR_MATCH_MASK); | ||
2763 | wrmsrl(reg2->reg + 2, NHMEX_M_PMON_ADDR_MASK_MASK & | ||
2764 | (reg2->config >> NHMEX_M_PMON_ADDR_MASK_SHIFT)); | ||
2765 | wrmsrl(reg2->reg, NHMEX_M_PMON_MM_CFG_EN); | ||
2766 | } | ||
2767 | } | ||
2768 | |||
2769 | wrmsrl(hwc->config_base, hwc->config | NHMEX_PMON_CTL_EN_BIT0); | ||
2770 | } | ||
2771 | |||
2772 | DEFINE_UNCORE_FORMAT_ATTR(count_mode, count_mode, "config:2-3"); | ||
2773 | DEFINE_UNCORE_FORMAT_ATTR(storage_mode, storage_mode, "config:4-5"); | ||
2774 | DEFINE_UNCORE_FORMAT_ATTR(wrap_mode, wrap_mode, "config:6"); | ||
2775 | DEFINE_UNCORE_FORMAT_ATTR(flag_mode, flag_mode, "config:7"); | ||
2776 | DEFINE_UNCORE_FORMAT_ATTR(inc_sel, inc_sel, "config:9-13"); | ||
2777 | DEFINE_UNCORE_FORMAT_ATTR(set_flag_sel, set_flag_sel, "config:19-21"); | ||
2778 | DEFINE_UNCORE_FORMAT_ATTR(filter_cfg_en, filter_cfg_en, "config2:63"); | ||
2779 | DEFINE_UNCORE_FORMAT_ATTR(filter_match, filter_match, "config2:0-33"); | ||
2780 | DEFINE_UNCORE_FORMAT_ATTR(filter_mask, filter_mask, "config2:34-61"); | ||
2781 | DEFINE_UNCORE_FORMAT_ATTR(dsp, dsp, "config1:0-31"); | ||
2782 | DEFINE_UNCORE_FORMAT_ATTR(thr, thr, "config1:0-31"); | ||
2783 | DEFINE_UNCORE_FORMAT_ATTR(fvc, fvc, "config1:0-31"); | ||
2784 | DEFINE_UNCORE_FORMAT_ATTR(pgt, pgt, "config1:0-31"); | ||
2785 | DEFINE_UNCORE_FORMAT_ATTR(map, map, "config1:0-31"); | ||
2786 | DEFINE_UNCORE_FORMAT_ATTR(iss, iss, "config1:0-31"); | ||
2787 | DEFINE_UNCORE_FORMAT_ATTR(pld, pld, "config1:32-63"); | ||
2788 | |||
2789 | static struct attribute *nhmex_uncore_mbox_formats_attr[] = { | ||
2790 | &format_attr_count_mode.attr, | ||
2791 | &format_attr_storage_mode.attr, | ||
2792 | &format_attr_wrap_mode.attr, | ||
2793 | &format_attr_flag_mode.attr, | ||
2794 | &format_attr_inc_sel.attr, | ||
2795 | &format_attr_set_flag_sel.attr, | ||
2796 | &format_attr_filter_cfg_en.attr, | ||
2797 | &format_attr_filter_match.attr, | ||
2798 | &format_attr_filter_mask.attr, | ||
2799 | &format_attr_dsp.attr, | ||
2800 | &format_attr_thr.attr, | ||
2801 | &format_attr_fvc.attr, | ||
2802 | &format_attr_pgt.attr, | ||
2803 | &format_attr_map.attr, | ||
2804 | &format_attr_iss.attr, | ||
2805 | &format_attr_pld.attr, | ||
2806 | NULL, | ||
2807 | }; | ||
2808 | |||
2809 | static struct attribute_group nhmex_uncore_mbox_format_group = { | ||
2810 | .name = "format", | ||
2811 | .attrs = nhmex_uncore_mbox_formats_attr, | ||
2812 | }; | ||
2813 | |||
2814 | static struct uncore_event_desc nhmex_uncore_mbox_events[] = { | ||
2815 | INTEL_UNCORE_EVENT_DESC(bbox_cmds_read, "inc_sel=0xd,fvc=0x2800"), | ||
2816 | INTEL_UNCORE_EVENT_DESC(bbox_cmds_write, "inc_sel=0xd,fvc=0x2820"), | ||
2817 | { /* end: all zeroes */ }, | ||
2818 | }; | ||
2819 | |||
2820 | static struct uncore_event_desc wsmex_uncore_mbox_events[] = { | ||
2821 | INTEL_UNCORE_EVENT_DESC(bbox_cmds_read, "inc_sel=0xd,fvc=0x5000"), | ||
2822 | INTEL_UNCORE_EVENT_DESC(bbox_cmds_write, "inc_sel=0xd,fvc=0x5040"), | ||
2823 | { /* end: all zeroes */ }, | ||
2824 | }; | ||
2825 | |||
2826 | static struct intel_uncore_ops nhmex_uncore_mbox_ops = { | ||
2827 | NHMEX_UNCORE_OPS_COMMON_INIT(), | ||
2828 | .enable_event = nhmex_mbox_msr_enable_event, | ||
2829 | .hw_config = nhmex_mbox_hw_config, | ||
2830 | .get_constraint = nhmex_mbox_get_constraint, | ||
2831 | .put_constraint = nhmex_mbox_put_constraint, | ||
2832 | }; | ||
2833 | |||
2834 | static struct intel_uncore_type nhmex_uncore_mbox = { | ||
2835 | .name = "mbox", | ||
2836 | .num_counters = 6, | ||
2837 | .num_boxes = 2, | ||
2838 | .perf_ctr_bits = 48, | ||
2839 | .event_ctl = NHMEX_M0_MSR_PMU_CTL0, | ||
2840 | .perf_ctr = NHMEX_M0_MSR_PMU_CNT0, | ||
2841 | .event_mask = NHMEX_M_PMON_RAW_EVENT_MASK, | ||
2842 | .box_ctl = NHMEX_M0_MSR_GLOBAL_CTL, | ||
2843 | .msr_offset = NHMEX_M_MSR_OFFSET, | ||
2844 | .pair_ctr_ctl = 1, | ||
2845 | .num_shared_regs = 8, | ||
2846 | .event_descs = nhmex_uncore_mbox_events, | ||
2847 | .ops = &nhmex_uncore_mbox_ops, | ||
2848 | .format_group = &nhmex_uncore_mbox_format_group, | ||
2849 | }; | ||
2850 | |||
2851 | static void nhmex_rbox_alter_er(struct intel_uncore_box *box, struct perf_event *event) | ||
2852 | { | ||
2853 | struct hw_perf_event *hwc = &event->hw; | ||
2854 | struct hw_perf_event_extra *reg1 = &hwc->extra_reg; | ||
2855 | |||
2856 | /* adjust the main event selector and extra register index */ | ||
2857 | if (reg1->idx % 2) { | ||
2858 | reg1->idx--; | ||
2859 | hwc->config -= 1 << NHMEX_R_PMON_CTL_EV_SEL_SHIFT; | ||
2860 | } else { | ||
2861 | reg1->idx++; | ||
2862 | hwc->config += 1 << NHMEX_R_PMON_CTL_EV_SEL_SHIFT; | ||
2863 | } | ||
2864 | |||
2865 | /* adjust extra register config */ | ||
2866 | switch (reg1->idx % 6) { | ||
2867 | case 2: | ||
2868 | /* shift the 8~15 bits to the 0~7 bits */ | ||
2869 | reg1->config >>= 8; | ||
2870 | break; | ||
2871 | case 3: | ||
2872 | /* shift the 0~7 bits to the 8~15 bits */ | ||
2873 | reg1->config <<= 8; | ||
2874 | break; | ||
2875 | }; | ||
2876 | } | ||
2877 | |||
2878 | /* | ||
2879 | * Each rbox has 4 event set which monitor PQI port 0~3 or 4~7. | ||
2880 | * An event set consists of 6 events, the 3rd and 4th events in | ||
2881 | * an event set use the same extra register. So an event set uses | ||
2882 | * 5 extra registers. | ||
2883 | */ | ||
2884 | static struct event_constraint * | ||
2885 | nhmex_rbox_get_constraint(struct intel_uncore_box *box, struct perf_event *event) | ||
2886 | { | ||
2887 | struct hw_perf_event *hwc = &event->hw; | ||
2888 | struct hw_perf_event_extra *reg1 = &hwc->extra_reg; | ||
2889 | struct hw_perf_event_extra *reg2 = &hwc->branch_reg; | ||
2890 | struct intel_uncore_extra_reg *er; | ||
2891 | unsigned long flags; | ||
2892 | int idx, er_idx; | ||
2893 | u64 config1; | ||
2894 | bool ok = false; | ||
2895 | |||
2896 | if (!uncore_box_is_fake(box) && reg1->alloc) | ||
2897 | return NULL; | ||
2898 | |||
2899 | idx = reg1->idx % 6; | ||
2900 | config1 = reg1->config; | ||
2901 | again: | ||
2902 | er_idx = idx; | ||
2903 | /* the 3rd and 4th events use the same extra register */ | ||
2904 | if (er_idx > 2) | ||
2905 | er_idx--; | ||
2906 | er_idx += (reg1->idx / 6) * 5; | ||
2907 | |||
2908 | er = &box->shared_regs[er_idx]; | ||
2909 | raw_spin_lock_irqsave(&er->lock, flags); | ||
2910 | if (idx < 2) { | ||
2911 | if (!atomic_read(&er->ref) || er->config == reg1->config) { | ||
2912 | atomic_inc(&er->ref); | ||
2913 | er->config = reg1->config; | ||
2914 | ok = true; | ||
2915 | } | ||
2916 | } else if (idx == 2 || idx == 3) { | ||
2917 | /* | ||
2918 | * these two events use different fields in a extra register, | ||
2919 | * the 0~7 bits and the 8~15 bits respectively. | ||
2920 | */ | ||
2921 | u64 mask = 0xff << ((idx - 2) * 8); | ||
2922 | if (!__BITS_VALUE(atomic_read(&er->ref), idx - 2, 8) || | ||
2923 | !((er->config ^ config1) & mask)) { | ||
2924 | atomic_add(1 << ((idx - 2) * 8), &er->ref); | ||
2925 | er->config &= ~mask; | ||
2926 | er->config |= config1 & mask; | ||
2927 | ok = true; | ||
2928 | } | ||
2929 | } else { | ||
2930 | if (!atomic_read(&er->ref) || | ||
2931 | (er->config == (hwc->config >> 32) && | ||
2932 | er->config1 == reg1->config && | ||
2933 | er->config2 == reg2->config)) { | ||
2934 | atomic_inc(&er->ref); | ||
2935 | er->config = (hwc->config >> 32); | ||
2936 | er->config1 = reg1->config; | ||
2937 | er->config2 = reg2->config; | ||
2938 | ok = true; | ||
2939 | } | ||
2940 | } | ||
2941 | raw_spin_unlock_irqrestore(&er->lock, flags); | ||
2942 | |||
2943 | if (!ok) { | ||
2944 | /* | ||
2945 | * The Rbox events are always in pairs. The paired | ||
2946 | * events are functional identical, but use different | ||
2947 | * extra registers. If we failed to take an extra | ||
2948 | * register, try the alternative. | ||
2949 | */ | ||
2950 | idx ^= 1; | ||
2951 | if (idx != reg1->idx % 6) { | ||
2952 | if (idx == 2) | ||
2953 | config1 >>= 8; | ||
2954 | else if (idx == 3) | ||
2955 | config1 <<= 8; | ||
2956 | goto again; | ||
2957 | } | ||
2958 | } else { | ||
2959 | if (!uncore_box_is_fake(box)) { | ||
2960 | if (idx != reg1->idx % 6) | ||
2961 | nhmex_rbox_alter_er(box, event); | ||
2962 | reg1->alloc = 1; | ||
2963 | } | ||
2964 | return NULL; | ||
2965 | } | ||
2966 | return &constraint_empty; | ||
2967 | } | ||
2968 | |||
2969 | static void nhmex_rbox_put_constraint(struct intel_uncore_box *box, struct perf_event *event) | ||
2970 | { | ||
2971 | struct intel_uncore_extra_reg *er; | ||
2972 | struct hw_perf_event_extra *reg1 = &event->hw.extra_reg; | ||
2973 | int idx, er_idx; | ||
2974 | |||
2975 | if (uncore_box_is_fake(box) || !reg1->alloc) | ||
2976 | return; | ||
2977 | |||
2978 | idx = reg1->idx % 6; | ||
2979 | er_idx = idx; | ||
2980 | if (er_idx > 2) | ||
2981 | er_idx--; | ||
2982 | er_idx += (reg1->idx / 6) * 5; | ||
2983 | |||
2984 | er = &box->shared_regs[er_idx]; | ||
2985 | if (idx == 2 || idx == 3) | ||
2986 | atomic_sub(1 << ((idx - 2) * 8), &er->ref); | ||
2987 | else | ||
2988 | atomic_dec(&er->ref); | ||
2989 | |||
2990 | reg1->alloc = 0; | ||
2991 | } | ||
2992 | |||
2993 | static int nhmex_rbox_hw_config(struct intel_uncore_box *box, struct perf_event *event) | ||
2994 | { | ||
2995 | struct hw_perf_event *hwc = &event->hw; | ||
2996 | struct hw_perf_event_extra *reg1 = &event->hw.extra_reg; | ||
2997 | struct hw_perf_event_extra *reg2 = &event->hw.branch_reg; | ||
2998 | int idx; | ||
2999 | |||
3000 | idx = (event->hw.config & NHMEX_R_PMON_CTL_EV_SEL_MASK) >> | ||
3001 | NHMEX_R_PMON_CTL_EV_SEL_SHIFT; | ||
3002 | if (idx >= 0x18) | ||
3003 | return -EINVAL; | ||
3004 | |||
3005 | reg1->idx = idx; | ||
3006 | reg1->config = event->attr.config1; | ||
3007 | |||
3008 | switch (idx % 6) { | ||
3009 | case 4: | ||
3010 | case 5: | ||
3011 | hwc->config |= event->attr.config & (~0ULL << 32); | ||
3012 | reg2->config = event->attr.config2; | ||
3013 | break; | ||
3014 | }; | ||
3015 | return 0; | ||
3016 | } | ||
3017 | |||
3018 | static void nhmex_rbox_msr_enable_event(struct intel_uncore_box *box, struct perf_event *event) | ||
3019 | { | ||
3020 | struct hw_perf_event *hwc = &event->hw; | ||
3021 | struct hw_perf_event_extra *reg1 = &hwc->extra_reg; | ||
3022 | struct hw_perf_event_extra *reg2 = &hwc->branch_reg; | ||
3023 | int idx, port; | ||
3024 | |||
3025 | idx = reg1->idx; | ||
3026 | port = idx / 6 + box->pmu->pmu_idx * 4; | ||
3027 | |||
3028 | switch (idx % 6) { | ||
3029 | case 0: | ||
3030 | wrmsrl(NHMEX_R_MSR_PORTN_IPERF_CFG0(port), reg1->config); | ||
3031 | break; | ||
3032 | case 1: | ||
3033 | wrmsrl(NHMEX_R_MSR_PORTN_IPERF_CFG1(port), reg1->config); | ||
3034 | break; | ||
3035 | case 2: | ||
3036 | case 3: | ||
3037 | wrmsrl(NHMEX_R_MSR_PORTN_QLX_CFG(port), | ||
3038 | uncore_shared_reg_config(box, 2 + (idx / 6) * 5)); | ||
3039 | break; | ||
3040 | case 4: | ||
3041 | wrmsrl(NHMEX_R_MSR_PORTN_XBR_SET1_MM_CFG(port), | ||
3042 | hwc->config >> 32); | ||
3043 | wrmsrl(NHMEX_R_MSR_PORTN_XBR_SET1_MATCH(port), reg1->config); | ||
3044 | wrmsrl(NHMEX_R_MSR_PORTN_XBR_SET1_MASK(port), reg2->config); | ||
3045 | break; | ||
3046 | case 5: | ||
3047 | wrmsrl(NHMEX_R_MSR_PORTN_XBR_SET2_MM_CFG(port), | ||
3048 | hwc->config >> 32); | ||
3049 | wrmsrl(NHMEX_R_MSR_PORTN_XBR_SET2_MATCH(port), reg1->config); | ||
3050 | wrmsrl(NHMEX_R_MSR_PORTN_XBR_SET2_MASK(port), reg2->config); | ||
3051 | break; | ||
3052 | }; | ||
3053 | |||
3054 | wrmsrl(hwc->config_base, NHMEX_PMON_CTL_EN_BIT0 | | ||
3055 | (hwc->config & NHMEX_R_PMON_CTL_EV_SEL_MASK)); | ||
3056 | } | ||
3057 | |||
3058 | DEFINE_UNCORE_FORMAT_ATTR(xbr_mm_cfg, xbr_mm_cfg, "config:32-63"); | ||
3059 | DEFINE_UNCORE_FORMAT_ATTR(xbr_match, xbr_match, "config1:0-63"); | ||
3060 | DEFINE_UNCORE_FORMAT_ATTR(xbr_mask, xbr_mask, "config2:0-63"); | ||
3061 | DEFINE_UNCORE_FORMAT_ATTR(qlx_cfg, qlx_cfg, "config1:0-15"); | ||
3062 | DEFINE_UNCORE_FORMAT_ATTR(iperf_cfg, iperf_cfg, "config1:0-31"); | ||
3063 | |||
3064 | static struct attribute *nhmex_uncore_rbox_formats_attr[] = { | ||
3065 | &format_attr_event5.attr, | ||
3066 | &format_attr_xbr_mm_cfg.attr, | ||
3067 | &format_attr_xbr_match.attr, | ||
3068 | &format_attr_xbr_mask.attr, | ||
3069 | &format_attr_qlx_cfg.attr, | ||
3070 | &format_attr_iperf_cfg.attr, | ||
3071 | NULL, | ||
3072 | }; | ||
3073 | |||
3074 | static struct attribute_group nhmex_uncore_rbox_format_group = { | ||
3075 | .name = "format", | ||
3076 | .attrs = nhmex_uncore_rbox_formats_attr, | ||
3077 | }; | ||
3078 | |||
3079 | static struct uncore_event_desc nhmex_uncore_rbox_events[] = { | ||
3080 | INTEL_UNCORE_EVENT_DESC(qpi0_flit_send, "event=0x0,iperf_cfg=0x80000000"), | ||
3081 | INTEL_UNCORE_EVENT_DESC(qpi1_filt_send, "event=0x6,iperf_cfg=0x80000000"), | ||
3082 | INTEL_UNCORE_EVENT_DESC(qpi0_idle_filt, "event=0x0,iperf_cfg=0x40000000"), | ||
3083 | INTEL_UNCORE_EVENT_DESC(qpi1_idle_filt, "event=0x6,iperf_cfg=0x40000000"), | ||
3084 | INTEL_UNCORE_EVENT_DESC(qpi0_date_response, "event=0x0,iperf_cfg=0xc4"), | ||
3085 | INTEL_UNCORE_EVENT_DESC(qpi1_date_response, "event=0x6,iperf_cfg=0xc4"), | ||
3086 | { /* end: all zeroes */ }, | ||
3087 | }; | ||
3088 | |||
3089 | static struct intel_uncore_ops nhmex_uncore_rbox_ops = { | ||
3090 | NHMEX_UNCORE_OPS_COMMON_INIT(), | ||
3091 | .enable_event = nhmex_rbox_msr_enable_event, | ||
3092 | .hw_config = nhmex_rbox_hw_config, | ||
3093 | .get_constraint = nhmex_rbox_get_constraint, | ||
3094 | .put_constraint = nhmex_rbox_put_constraint, | ||
3095 | }; | ||
3096 | |||
3097 | static struct intel_uncore_type nhmex_uncore_rbox = { | ||
3098 | .name = "rbox", | ||
3099 | .num_counters = 8, | ||
3100 | .num_boxes = 2, | ||
3101 | .perf_ctr_bits = 48, | ||
3102 | .event_ctl = NHMEX_R_MSR_PMON_CTL0, | ||
3103 | .perf_ctr = NHMEX_R_MSR_PMON_CNT0, | ||
3104 | .event_mask = NHMEX_R_PMON_RAW_EVENT_MASK, | ||
3105 | .box_ctl = NHMEX_R_MSR_GLOBAL_CTL, | ||
3106 | .msr_offset = NHMEX_R_MSR_OFFSET, | ||
3107 | .pair_ctr_ctl = 1, | ||
3108 | .num_shared_regs = 20, | ||
3109 | .event_descs = nhmex_uncore_rbox_events, | ||
3110 | .ops = &nhmex_uncore_rbox_ops, | ||
3111 | .format_group = &nhmex_uncore_rbox_format_group | ||
3112 | }; | ||
3113 | |||
3114 | static struct intel_uncore_type *nhmex_msr_uncores[] = { | ||
3115 | &nhmex_uncore_ubox, | ||
3116 | &nhmex_uncore_cbox, | ||
3117 | &nhmex_uncore_bbox, | ||
3118 | &nhmex_uncore_sbox, | ||
3119 | &nhmex_uncore_mbox, | ||
3120 | &nhmex_uncore_rbox, | ||
3121 | &nhmex_uncore_wbox, | ||
3122 | NULL, | ||
3123 | }; | ||
3124 | /* end of Nehalem-EX uncore support */ | ||
3125 | |||
3126 | static void uncore_assign_hw_event(struct intel_uncore_box *box, struct perf_event *event, int idx) | 156 | static void uncore_assign_hw_event(struct intel_uncore_box *box, struct perf_event *event, int idx) |
3127 | { | 157 | { |
3128 | struct hw_perf_event *hwc = &event->hw; | 158 | struct hw_perf_event *hwc = &event->hw; |
@@ -3140,7 +170,7 @@ static void uncore_assign_hw_event(struct intel_uncore_box *box, struct perf_eve | |||
3140 | hwc->event_base = uncore_perf_ctr(box, hwc->idx); | 170 | hwc->event_base = uncore_perf_ctr(box, hwc->idx); |
3141 | } | 171 | } |
3142 | 172 | ||
3143 | static void uncore_perf_event_update(struct intel_uncore_box *box, struct perf_event *event) | 173 | void uncore_perf_event_update(struct intel_uncore_box *box, struct perf_event *event) |
3144 | { | 174 | { |
3145 | u64 prev_count, new_count, delta; | 175 | u64 prev_count, new_count, delta; |
3146 | int shift; | 176 | int shift; |
@@ -3201,14 +231,14 @@ static enum hrtimer_restart uncore_pmu_hrtimer(struct hrtimer *hrtimer) | |||
3201 | return HRTIMER_RESTART; | 231 | return HRTIMER_RESTART; |
3202 | } | 232 | } |
3203 | 233 | ||
3204 | static void uncore_pmu_start_hrtimer(struct intel_uncore_box *box) | 234 | void uncore_pmu_start_hrtimer(struct intel_uncore_box *box) |
3205 | { | 235 | { |
3206 | __hrtimer_start_range_ns(&box->hrtimer, | 236 | __hrtimer_start_range_ns(&box->hrtimer, |
3207 | ns_to_ktime(box->hrtimer_duration), 0, | 237 | ns_to_ktime(box->hrtimer_duration), 0, |
3208 | HRTIMER_MODE_REL_PINNED, 0); | 238 | HRTIMER_MODE_REL_PINNED, 0); |
3209 | } | 239 | } |
3210 | 240 | ||
3211 | static void uncore_pmu_cancel_hrtimer(struct intel_uncore_box *box) | 241 | void uncore_pmu_cancel_hrtimer(struct intel_uncore_box *box) |
3212 | { | 242 | { |
3213 | hrtimer_cancel(&box->hrtimer); | 243 | hrtimer_cancel(&box->hrtimer); |
3214 | } | 244 | } |
@@ -3291,7 +321,7 @@ uncore_get_event_constraint(struct intel_uncore_box *box, struct perf_event *eve | |||
3291 | } | 321 | } |
3292 | 322 | ||
3293 | if (event->attr.config == UNCORE_FIXED_EVENT) | 323 | if (event->attr.config == UNCORE_FIXED_EVENT) |
3294 | return &constraint_fixed; | 324 | return &uncore_constraint_fixed; |
3295 | 325 | ||
3296 | if (type->constraints) { | 326 | if (type->constraints) { |
3297 | for_each_event_constraint(c, type->constraints) { | 327 | for_each_event_constraint(c, type->constraints) { |
@@ -3496,7 +526,7 @@ static void uncore_pmu_event_del(struct perf_event *event, int flags) | |||
3496 | event->hw.last_tag = ~0ULL; | 526 | event->hw.last_tag = ~0ULL; |
3497 | } | 527 | } |
3498 | 528 | ||
3499 | static void uncore_pmu_event_read(struct perf_event *event) | 529 | void uncore_pmu_event_read(struct perf_event *event) |
3500 | { | 530 | { |
3501 | struct intel_uncore_box *box = uncore_event_to_box(event); | 531 | struct intel_uncore_box *box = uncore_event_to_box(event); |
3502 | uncore_perf_event_update(box, event); | 532 | uncore_perf_event_update(box, event); |
@@ -3635,7 +665,7 @@ static struct attribute_group uncore_pmu_attr_group = { | |||
3635 | .attrs = uncore_pmu_attrs, | 665 | .attrs = uncore_pmu_attrs, |
3636 | }; | 666 | }; |
3637 | 667 | ||
3638 | static int __init uncore_pmu_register(struct intel_uncore_pmu *pmu) | 668 | static int uncore_pmu_register(struct intel_uncore_pmu *pmu) |
3639 | { | 669 | { |
3640 | int ret; | 670 | int ret; |
3641 | 671 | ||
@@ -3758,9 +788,6 @@ fail: | |||
3758 | return ret; | 788 | return ret; |
3759 | } | 789 | } |
3760 | 790 | ||
3761 | static struct pci_driver *uncore_pci_driver; | ||
3762 | static bool pcidrv_registered; | ||
3763 | |||
3764 | /* | 791 | /* |
3765 | * add a pci uncore device | 792 | * add a pci uncore device |
3766 | */ | 793 | */ |
@@ -3770,18 +797,20 @@ static int uncore_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id | |||
3770 | struct intel_uncore_box *box; | 797 | struct intel_uncore_box *box; |
3771 | struct intel_uncore_type *type; | 798 | struct intel_uncore_type *type; |
3772 | int phys_id; | 799 | int phys_id; |
800 | bool first_box = false; | ||
3773 | 801 | ||
3774 | phys_id = pcibus_to_physid[pdev->bus->number]; | 802 | phys_id = uncore_pcibus_to_physid[pdev->bus->number]; |
3775 | if (phys_id < 0) | 803 | if (phys_id < 0) |
3776 | return -ENODEV; | 804 | return -ENODEV; |
3777 | 805 | ||
3778 | if (UNCORE_PCI_DEV_TYPE(id->driver_data) == UNCORE_EXTRA_PCI_DEV) { | 806 | if (UNCORE_PCI_DEV_TYPE(id->driver_data) == UNCORE_EXTRA_PCI_DEV) { |
3779 | extra_pci_dev[phys_id][UNCORE_PCI_DEV_IDX(id->driver_data)] = pdev; | 807 | int idx = UNCORE_PCI_DEV_IDX(id->driver_data); |
808 | uncore_extra_pci_dev[phys_id][idx] = pdev; | ||
3780 | pci_set_drvdata(pdev, NULL); | 809 | pci_set_drvdata(pdev, NULL); |
3781 | return 0; | 810 | return 0; |
3782 | } | 811 | } |
3783 | 812 | ||
3784 | type = pci_uncores[UNCORE_PCI_DEV_TYPE(id->driver_data)]; | 813 | type = uncore_pci_uncores[UNCORE_PCI_DEV_TYPE(id->driver_data)]; |
3785 | box = uncore_alloc_box(type, NUMA_NO_NODE); | 814 | box = uncore_alloc_box(type, NUMA_NO_NODE); |
3786 | if (!box) | 815 | if (!box) |
3787 | return -ENOMEM; | 816 | return -ENOMEM; |
@@ -3803,9 +832,13 @@ static int uncore_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id | |||
3803 | pci_set_drvdata(pdev, box); | 832 | pci_set_drvdata(pdev, box); |
3804 | 833 | ||
3805 | raw_spin_lock(&uncore_box_lock); | 834 | raw_spin_lock(&uncore_box_lock); |
835 | if (list_empty(&pmu->box_list)) | ||
836 | first_box = true; | ||
3806 | list_add_tail(&box->list, &pmu->box_list); | 837 | list_add_tail(&box->list, &pmu->box_list); |
3807 | raw_spin_unlock(&uncore_box_lock); | 838 | raw_spin_unlock(&uncore_box_lock); |
3808 | 839 | ||
840 | if (first_box) | ||
841 | uncore_pmu_register(pmu); | ||
3809 | return 0; | 842 | return 0; |
3810 | } | 843 | } |
3811 | 844 | ||
@@ -3813,13 +846,14 @@ static void uncore_pci_remove(struct pci_dev *pdev) | |||
3813 | { | 846 | { |
3814 | struct intel_uncore_box *box = pci_get_drvdata(pdev); | 847 | struct intel_uncore_box *box = pci_get_drvdata(pdev); |
3815 | struct intel_uncore_pmu *pmu; | 848 | struct intel_uncore_pmu *pmu; |
3816 | int i, cpu, phys_id = pcibus_to_physid[pdev->bus->number]; | 849 | int i, cpu, phys_id = uncore_pcibus_to_physid[pdev->bus->number]; |
850 | bool last_box = false; | ||
3817 | 851 | ||
3818 | box = pci_get_drvdata(pdev); | 852 | box = pci_get_drvdata(pdev); |
3819 | if (!box) { | 853 | if (!box) { |
3820 | for (i = 0; i < UNCORE_EXTRA_PCI_DEV_MAX; i++) { | 854 | for (i = 0; i < UNCORE_EXTRA_PCI_DEV_MAX; i++) { |
3821 | if (extra_pci_dev[phys_id][i] == pdev) { | 855 | if (uncore_extra_pci_dev[phys_id][i] == pdev) { |
3822 | extra_pci_dev[phys_id][i] = NULL; | 856 | uncore_extra_pci_dev[phys_id][i] = NULL; |
3823 | break; | 857 | break; |
3824 | } | 858 | } |
3825 | } | 859 | } |
@@ -3835,6 +869,8 @@ static void uncore_pci_remove(struct pci_dev *pdev) | |||
3835 | 869 | ||
3836 | raw_spin_lock(&uncore_box_lock); | 870 | raw_spin_lock(&uncore_box_lock); |
3837 | list_del(&box->list); | 871 | list_del(&box->list); |
872 | if (list_empty(&pmu->box_list)) | ||
873 | last_box = true; | ||
3838 | raw_spin_unlock(&uncore_box_lock); | 874 | raw_spin_unlock(&uncore_box_lock); |
3839 | 875 | ||
3840 | for_each_possible_cpu(cpu) { | 876 | for_each_possible_cpu(cpu) { |
@@ -3846,6 +882,9 @@ static void uncore_pci_remove(struct pci_dev *pdev) | |||
3846 | 882 | ||
3847 | WARN_ON_ONCE(atomic_read(&box->refcnt) != 1); | 883 | WARN_ON_ONCE(atomic_read(&box->refcnt) != 1); |
3848 | kfree(box); | 884 | kfree(box); |
885 | |||
886 | if (last_box) | ||
887 | perf_pmu_unregister(&pmu->pmu); | ||
3849 | } | 888 | } |
3850 | 889 | ||
3851 | static int __init uncore_pci_init(void) | 890 | static int __init uncore_pci_init(void) |
@@ -3854,46 +893,32 @@ static int __init uncore_pci_init(void) | |||
3854 | 893 | ||
3855 | switch (boot_cpu_data.x86_model) { | 894 | switch (boot_cpu_data.x86_model) { |
3856 | case 45: /* Sandy Bridge-EP */ | 895 | case 45: /* Sandy Bridge-EP */ |
3857 | ret = snbep_pci2phy_map_init(0x3ce0); | 896 | ret = snbep_uncore_pci_init(); |
3858 | if (ret) | ||
3859 | return ret; | ||
3860 | pci_uncores = snbep_pci_uncores; | ||
3861 | uncore_pci_driver = &snbep_uncore_pci_driver; | ||
3862 | break; | 897 | break; |
3863 | case 62: /* IvyTown */ | 898 | case 62: /* Ivy Bridge-EP */ |
3864 | ret = snbep_pci2phy_map_init(0x0e1e); | 899 | ret = ivbep_uncore_pci_init(); |
3865 | if (ret) | 900 | break; |
3866 | return ret; | 901 | case 63: /* Haswell-EP */ |
3867 | pci_uncores = ivt_pci_uncores; | 902 | ret = hswep_uncore_pci_init(); |
3868 | uncore_pci_driver = &ivt_uncore_pci_driver; | ||
3869 | break; | 903 | break; |
3870 | case 42: /* Sandy Bridge */ | 904 | case 42: /* Sandy Bridge */ |
3871 | ret = snb_pci2phy_map_init(PCI_DEVICE_ID_INTEL_SNB_IMC); | 905 | ret = snb_uncore_pci_init(); |
3872 | if (ret) | ||
3873 | return ret; | ||
3874 | pci_uncores = snb_pci_uncores; | ||
3875 | uncore_pci_driver = &snb_uncore_pci_driver; | ||
3876 | break; | 906 | break; |
3877 | case 58: /* Ivy Bridge */ | 907 | case 58: /* Ivy Bridge */ |
3878 | ret = snb_pci2phy_map_init(PCI_DEVICE_ID_INTEL_IVB_IMC); | 908 | ret = ivb_uncore_pci_init(); |
3879 | if (ret) | ||
3880 | return ret; | ||
3881 | pci_uncores = snb_pci_uncores; | ||
3882 | uncore_pci_driver = &ivb_uncore_pci_driver; | ||
3883 | break; | 909 | break; |
3884 | case 60: /* Haswell */ | 910 | case 60: /* Haswell */ |
3885 | case 69: /* Haswell Celeron */ | 911 | case 69: /* Haswell Celeron */ |
3886 | ret = snb_pci2phy_map_init(PCI_DEVICE_ID_INTEL_HSW_IMC); | 912 | ret = hsw_uncore_pci_init(); |
3887 | if (ret) | ||
3888 | return ret; | ||
3889 | pci_uncores = snb_pci_uncores; | ||
3890 | uncore_pci_driver = &hsw_uncore_pci_driver; | ||
3891 | break; | 913 | break; |
3892 | default: | 914 | default: |
3893 | return 0; | 915 | return 0; |
3894 | } | 916 | } |
3895 | 917 | ||
3896 | ret = uncore_types_init(pci_uncores); | 918 | if (ret) |
919 | return ret; | ||
920 | |||
921 | ret = uncore_types_init(uncore_pci_uncores); | ||
3897 | if (ret) | 922 | if (ret) |
3898 | return ret; | 923 | return ret; |
3899 | 924 | ||
@@ -3904,7 +929,7 @@ static int __init uncore_pci_init(void) | |||
3904 | if (ret == 0) | 929 | if (ret == 0) |
3905 | pcidrv_registered = true; | 930 | pcidrv_registered = true; |
3906 | else | 931 | else |
3907 | uncore_types_exit(pci_uncores); | 932 | uncore_types_exit(uncore_pci_uncores); |
3908 | 933 | ||
3909 | return ret; | 934 | return ret; |
3910 | } | 935 | } |
@@ -3914,7 +939,7 @@ static void __init uncore_pci_exit(void) | |||
3914 | if (pcidrv_registered) { | 939 | if (pcidrv_registered) { |
3915 | pcidrv_registered = false; | 940 | pcidrv_registered = false; |
3916 | pci_unregister_driver(uncore_pci_driver); | 941 | pci_unregister_driver(uncore_pci_driver); |
3917 | uncore_types_exit(pci_uncores); | 942 | uncore_types_exit(uncore_pci_uncores); |
3918 | } | 943 | } |
3919 | } | 944 | } |
3920 | 945 | ||
@@ -3940,8 +965,8 @@ static void uncore_cpu_dying(int cpu) | |||
3940 | struct intel_uncore_box *box; | 965 | struct intel_uncore_box *box; |
3941 | int i, j; | 966 | int i, j; |
3942 | 967 | ||
3943 | for (i = 0; msr_uncores[i]; i++) { | 968 | for (i = 0; uncore_msr_uncores[i]; i++) { |
3944 | type = msr_uncores[i]; | 969 | type = uncore_msr_uncores[i]; |
3945 | for (j = 0; j < type->num_boxes; j++) { | 970 | for (j = 0; j < type->num_boxes; j++) { |
3946 | pmu = &type->pmus[j]; | 971 | pmu = &type->pmus[j]; |
3947 | box = *per_cpu_ptr(pmu->box, cpu); | 972 | box = *per_cpu_ptr(pmu->box, cpu); |
@@ -3961,8 +986,8 @@ static int uncore_cpu_starting(int cpu) | |||
3961 | 986 | ||
3962 | phys_id = topology_physical_package_id(cpu); | 987 | phys_id = topology_physical_package_id(cpu); |
3963 | 988 | ||
3964 | for (i = 0; msr_uncores[i]; i++) { | 989 | for (i = 0; uncore_msr_uncores[i]; i++) { |
3965 | type = msr_uncores[i]; | 990 | type = uncore_msr_uncores[i]; |
3966 | for (j = 0; j < type->num_boxes; j++) { | 991 | for (j = 0; j < type->num_boxes; j++) { |
3967 | pmu = &type->pmus[j]; | 992 | pmu = &type->pmus[j]; |
3968 | box = *per_cpu_ptr(pmu->box, cpu); | 993 | box = *per_cpu_ptr(pmu->box, cpu); |
@@ -4002,8 +1027,8 @@ static int uncore_cpu_prepare(int cpu, int phys_id) | |||
4002 | struct intel_uncore_box *box; | 1027 | struct intel_uncore_box *box; |
4003 | int i, j; | 1028 | int i, j; |
4004 | 1029 | ||
4005 | for (i = 0; msr_uncores[i]; i++) { | 1030 | for (i = 0; uncore_msr_uncores[i]; i++) { |
4006 | type = msr_uncores[i]; | 1031 | type = uncore_msr_uncores[i]; |
4007 | for (j = 0; j < type->num_boxes; j++) { | 1032 | for (j = 0; j < type->num_boxes; j++) { |
4008 | pmu = &type->pmus[j]; | 1033 | pmu = &type->pmus[j]; |
4009 | if (pmu->func_id < 0) | 1034 | if (pmu->func_id < 0) |
@@ -4083,8 +1108,8 @@ static void uncore_event_exit_cpu(int cpu) | |||
4083 | if (target >= 0) | 1108 | if (target >= 0) |
4084 | cpumask_set_cpu(target, &uncore_cpu_mask); | 1109 | cpumask_set_cpu(target, &uncore_cpu_mask); |
4085 | 1110 | ||
4086 | uncore_change_context(msr_uncores, cpu, target); | 1111 | uncore_change_context(uncore_msr_uncores, cpu, target); |
4087 | uncore_change_context(pci_uncores, cpu, target); | 1112 | uncore_change_context(uncore_pci_uncores, cpu, target); |
4088 | } | 1113 | } |
4089 | 1114 | ||
4090 | static void uncore_event_init_cpu(int cpu) | 1115 | static void uncore_event_init_cpu(int cpu) |
@@ -4099,8 +1124,8 @@ static void uncore_event_init_cpu(int cpu) | |||
4099 | 1124 | ||
4100 | cpumask_set_cpu(cpu, &uncore_cpu_mask); | 1125 | cpumask_set_cpu(cpu, &uncore_cpu_mask); |
4101 | 1126 | ||
4102 | uncore_change_context(msr_uncores, -1, cpu); | 1127 | uncore_change_context(uncore_msr_uncores, -1, cpu); |
4103 | uncore_change_context(pci_uncores, -1, cpu); | 1128 | uncore_change_context(uncore_pci_uncores, -1, cpu); |
4104 | } | 1129 | } |
4105 | 1130 | ||
4106 | static int uncore_cpu_notifier(struct notifier_block *self, | 1131 | static int uncore_cpu_notifier(struct notifier_block *self, |
@@ -4160,47 +1185,37 @@ static void __init uncore_cpu_setup(void *dummy) | |||
4160 | 1185 | ||
4161 | static int __init uncore_cpu_init(void) | 1186 | static int __init uncore_cpu_init(void) |
4162 | { | 1187 | { |
4163 | int ret, max_cores; | 1188 | int ret; |
4164 | 1189 | ||
4165 | max_cores = boot_cpu_data.x86_max_cores; | ||
4166 | switch (boot_cpu_data.x86_model) { | 1190 | switch (boot_cpu_data.x86_model) { |
4167 | case 26: /* Nehalem */ | 1191 | case 26: /* Nehalem */ |
4168 | case 30: | 1192 | case 30: |
4169 | case 37: /* Westmere */ | 1193 | case 37: /* Westmere */ |
4170 | case 44: | 1194 | case 44: |
4171 | msr_uncores = nhm_msr_uncores; | 1195 | nhm_uncore_cpu_init(); |
4172 | break; | 1196 | break; |
4173 | case 42: /* Sandy Bridge */ | 1197 | case 42: /* Sandy Bridge */ |
4174 | case 58: /* Ivy Bridge */ | 1198 | case 58: /* Ivy Bridge */ |
4175 | if (snb_uncore_cbox.num_boxes > max_cores) | 1199 | snb_uncore_cpu_init(); |
4176 | snb_uncore_cbox.num_boxes = max_cores; | ||
4177 | msr_uncores = snb_msr_uncores; | ||
4178 | break; | 1200 | break; |
4179 | case 45: /* Sandy Bridge-EP */ | 1201 | case 45: /* Sandy Bridge-EP */ |
4180 | if (snbep_uncore_cbox.num_boxes > max_cores) | 1202 | snbep_uncore_cpu_init(); |
4181 | snbep_uncore_cbox.num_boxes = max_cores; | ||
4182 | msr_uncores = snbep_msr_uncores; | ||
4183 | break; | 1203 | break; |
4184 | case 46: /* Nehalem-EX */ | 1204 | case 46: /* Nehalem-EX */ |
4185 | uncore_nhmex = true; | ||
4186 | case 47: /* Westmere-EX aka. Xeon E7 */ | 1205 | case 47: /* Westmere-EX aka. Xeon E7 */ |
4187 | if (!uncore_nhmex) | 1206 | nhmex_uncore_cpu_init(); |
4188 | nhmex_uncore_mbox.event_descs = wsmex_uncore_mbox_events; | ||
4189 | if (nhmex_uncore_cbox.num_boxes > max_cores) | ||
4190 | nhmex_uncore_cbox.num_boxes = max_cores; | ||
4191 | msr_uncores = nhmex_msr_uncores; | ||
4192 | break; | 1207 | break; |
4193 | case 62: /* IvyTown */ | 1208 | case 62: /* Ivy Bridge-EP */ |
4194 | if (ivt_uncore_cbox.num_boxes > max_cores) | 1209 | ivbep_uncore_cpu_init(); |
4195 | ivt_uncore_cbox.num_boxes = max_cores; | 1210 | break; |
4196 | msr_uncores = ivt_msr_uncores; | 1211 | case 63: /* Haswell-EP */ |
1212 | hswep_uncore_cpu_init(); | ||
4197 | break; | 1213 | break; |
4198 | |||
4199 | default: | 1214 | default: |
4200 | return 0; | 1215 | return 0; |
4201 | } | 1216 | } |
4202 | 1217 | ||
4203 | ret = uncore_types_init(msr_uncores); | 1218 | ret = uncore_types_init(uncore_msr_uncores); |
4204 | if (ret) | 1219 | if (ret) |
4205 | return ret; | 1220 | return ret; |
4206 | 1221 | ||
@@ -4213,16 +1228,8 @@ static int __init uncore_pmus_register(void) | |||
4213 | struct intel_uncore_type *type; | 1228 | struct intel_uncore_type *type; |
4214 | int i, j; | 1229 | int i, j; |
4215 | 1230 | ||
4216 | for (i = 0; msr_uncores[i]; i++) { | 1231 | for (i = 0; uncore_msr_uncores[i]; i++) { |
4217 | type = msr_uncores[i]; | 1232 | type = uncore_msr_uncores[i]; |
4218 | for (j = 0; j < type->num_boxes; j++) { | ||
4219 | pmu = &type->pmus[j]; | ||
4220 | uncore_pmu_register(pmu); | ||
4221 | } | ||
4222 | } | ||
4223 | |||
4224 | for (i = 0; pci_uncores[i]; i++) { | ||
4225 | type = pci_uncores[i]; | ||
4226 | for (j = 0; j < type->num_boxes; j++) { | 1233 | for (j = 0; j < type->num_boxes; j++) { |
4227 | pmu = &type->pmus[j]; | 1234 | pmu = &type->pmus[j]; |
4228 | uncore_pmu_register(pmu); | 1235 | uncore_pmu_register(pmu); |
diff --git a/arch/x86/kernel/cpu/perf_event_intel_uncore.h b/arch/x86/kernel/cpu/perf_event_intel_uncore.h index 90236f0c94a9..18eb78bbdd10 100644 --- a/arch/x86/kernel/cpu/perf_event_intel_uncore.h +++ b/arch/x86/kernel/cpu/perf_event_intel_uncore.h | |||
@@ -24,395 +24,6 @@ | |||
24 | 24 | ||
25 | #define UNCORE_EVENT_CONSTRAINT(c, n) EVENT_CONSTRAINT(c, n, 0xff) | 25 | #define UNCORE_EVENT_CONSTRAINT(c, n) EVENT_CONSTRAINT(c, n, 0xff) |
26 | 26 | ||
27 | /* SNB event control */ | ||
28 | #define SNB_UNC_CTL_EV_SEL_MASK 0x000000ff | ||
29 | #define SNB_UNC_CTL_UMASK_MASK 0x0000ff00 | ||
30 | #define SNB_UNC_CTL_EDGE_DET (1 << 18) | ||
31 | #define SNB_UNC_CTL_EN (1 << 22) | ||
32 | #define SNB_UNC_CTL_INVERT (1 << 23) | ||
33 | #define SNB_UNC_CTL_CMASK_MASK 0x1f000000 | ||
34 | #define NHM_UNC_CTL_CMASK_MASK 0xff000000 | ||
35 | #define NHM_UNC_FIXED_CTR_CTL_EN (1 << 0) | ||
36 | |||
37 | #define SNB_UNC_RAW_EVENT_MASK (SNB_UNC_CTL_EV_SEL_MASK | \ | ||
38 | SNB_UNC_CTL_UMASK_MASK | \ | ||
39 | SNB_UNC_CTL_EDGE_DET | \ | ||
40 | SNB_UNC_CTL_INVERT | \ | ||
41 | SNB_UNC_CTL_CMASK_MASK) | ||
42 | |||
43 | #define NHM_UNC_RAW_EVENT_MASK (SNB_UNC_CTL_EV_SEL_MASK | \ | ||
44 | SNB_UNC_CTL_UMASK_MASK | \ | ||
45 | SNB_UNC_CTL_EDGE_DET | \ | ||
46 | SNB_UNC_CTL_INVERT | \ | ||
47 | NHM_UNC_CTL_CMASK_MASK) | ||
48 | |||
49 | /* SNB global control register */ | ||
50 | #define SNB_UNC_PERF_GLOBAL_CTL 0x391 | ||
51 | #define SNB_UNC_FIXED_CTR_CTRL 0x394 | ||
52 | #define SNB_UNC_FIXED_CTR 0x395 | ||
53 | |||
54 | /* SNB uncore global control */ | ||
55 | #define SNB_UNC_GLOBAL_CTL_CORE_ALL ((1 << 4) - 1) | ||
56 | #define SNB_UNC_GLOBAL_CTL_EN (1 << 29) | ||
57 | |||
58 | /* SNB Cbo register */ | ||
59 | #define SNB_UNC_CBO_0_PERFEVTSEL0 0x700 | ||
60 | #define SNB_UNC_CBO_0_PER_CTR0 0x706 | ||
61 | #define SNB_UNC_CBO_MSR_OFFSET 0x10 | ||
62 | |||
63 | /* NHM global control register */ | ||
64 | #define NHM_UNC_PERF_GLOBAL_CTL 0x391 | ||
65 | #define NHM_UNC_FIXED_CTR 0x394 | ||
66 | #define NHM_UNC_FIXED_CTR_CTRL 0x395 | ||
67 | |||
68 | /* NHM uncore global control */ | ||
69 | #define NHM_UNC_GLOBAL_CTL_EN_PC_ALL ((1ULL << 8) - 1) | ||
70 | #define NHM_UNC_GLOBAL_CTL_EN_FC (1ULL << 32) | ||
71 | |||
72 | /* NHM uncore register */ | ||
73 | #define NHM_UNC_PERFEVTSEL0 0x3c0 | ||
74 | #define NHM_UNC_UNCORE_PMC0 0x3b0 | ||
75 | |||
76 | /* SNB-EP Box level control */ | ||
77 | #define SNBEP_PMON_BOX_CTL_RST_CTRL (1 << 0) | ||
78 | #define SNBEP_PMON_BOX_CTL_RST_CTRS (1 << 1) | ||
79 | #define SNBEP_PMON_BOX_CTL_FRZ (1 << 8) | ||
80 | #define SNBEP_PMON_BOX_CTL_FRZ_EN (1 << 16) | ||
81 | #define SNBEP_PMON_BOX_CTL_INT (SNBEP_PMON_BOX_CTL_RST_CTRL | \ | ||
82 | SNBEP_PMON_BOX_CTL_RST_CTRS | \ | ||
83 | SNBEP_PMON_BOX_CTL_FRZ_EN) | ||
84 | /* SNB-EP event control */ | ||
85 | #define SNBEP_PMON_CTL_EV_SEL_MASK 0x000000ff | ||
86 | #define SNBEP_PMON_CTL_UMASK_MASK 0x0000ff00 | ||
87 | #define SNBEP_PMON_CTL_RST (1 << 17) | ||
88 | #define SNBEP_PMON_CTL_EDGE_DET (1 << 18) | ||
89 | #define SNBEP_PMON_CTL_EV_SEL_EXT (1 << 21) | ||
90 | #define SNBEP_PMON_CTL_EN (1 << 22) | ||
91 | #define SNBEP_PMON_CTL_INVERT (1 << 23) | ||
92 | #define SNBEP_PMON_CTL_TRESH_MASK 0xff000000 | ||
93 | #define SNBEP_PMON_RAW_EVENT_MASK (SNBEP_PMON_CTL_EV_SEL_MASK | \ | ||
94 | SNBEP_PMON_CTL_UMASK_MASK | \ | ||
95 | SNBEP_PMON_CTL_EDGE_DET | \ | ||
96 | SNBEP_PMON_CTL_INVERT | \ | ||
97 | SNBEP_PMON_CTL_TRESH_MASK) | ||
98 | |||
99 | /* SNB-EP Ubox event control */ | ||
100 | #define SNBEP_U_MSR_PMON_CTL_TRESH_MASK 0x1f000000 | ||
101 | #define SNBEP_U_MSR_PMON_RAW_EVENT_MASK \ | ||
102 | (SNBEP_PMON_CTL_EV_SEL_MASK | \ | ||
103 | SNBEP_PMON_CTL_UMASK_MASK | \ | ||
104 | SNBEP_PMON_CTL_EDGE_DET | \ | ||
105 | SNBEP_PMON_CTL_INVERT | \ | ||
106 | SNBEP_U_MSR_PMON_CTL_TRESH_MASK) | ||
107 | |||
108 | #define SNBEP_CBO_PMON_CTL_TID_EN (1 << 19) | ||
109 | #define SNBEP_CBO_MSR_PMON_RAW_EVENT_MASK (SNBEP_PMON_RAW_EVENT_MASK | \ | ||
110 | SNBEP_CBO_PMON_CTL_TID_EN) | ||
111 | |||
112 | /* SNB-EP PCU event control */ | ||
113 | #define SNBEP_PCU_MSR_PMON_CTL_OCC_SEL_MASK 0x0000c000 | ||
114 | #define SNBEP_PCU_MSR_PMON_CTL_TRESH_MASK 0x1f000000 | ||
115 | #define SNBEP_PCU_MSR_PMON_CTL_OCC_INVERT (1 << 30) | ||
116 | #define SNBEP_PCU_MSR_PMON_CTL_OCC_EDGE_DET (1 << 31) | ||
117 | #define SNBEP_PCU_MSR_PMON_RAW_EVENT_MASK \ | ||
118 | (SNBEP_PMON_CTL_EV_SEL_MASK | \ | ||
119 | SNBEP_PCU_MSR_PMON_CTL_OCC_SEL_MASK | \ | ||
120 | SNBEP_PMON_CTL_EDGE_DET | \ | ||
121 | SNBEP_PMON_CTL_EV_SEL_EXT | \ | ||
122 | SNBEP_PMON_CTL_INVERT | \ | ||
123 | SNBEP_PCU_MSR_PMON_CTL_TRESH_MASK | \ | ||
124 | SNBEP_PCU_MSR_PMON_CTL_OCC_INVERT | \ | ||
125 | SNBEP_PCU_MSR_PMON_CTL_OCC_EDGE_DET) | ||
126 | |||
127 | #define SNBEP_QPI_PCI_PMON_RAW_EVENT_MASK \ | ||
128 | (SNBEP_PMON_RAW_EVENT_MASK | \ | ||
129 | SNBEP_PMON_CTL_EV_SEL_EXT) | ||
130 | |||
131 | /* SNB-EP pci control register */ | ||
132 | #define SNBEP_PCI_PMON_BOX_CTL 0xf4 | ||
133 | #define SNBEP_PCI_PMON_CTL0 0xd8 | ||
134 | /* SNB-EP pci counter register */ | ||
135 | #define SNBEP_PCI_PMON_CTR0 0xa0 | ||
136 | |||
137 | /* SNB-EP home agent register */ | ||
138 | #define SNBEP_HA_PCI_PMON_BOX_ADDRMATCH0 0x40 | ||
139 | #define SNBEP_HA_PCI_PMON_BOX_ADDRMATCH1 0x44 | ||
140 | #define SNBEP_HA_PCI_PMON_BOX_OPCODEMATCH 0x48 | ||
141 | /* SNB-EP memory controller register */ | ||
142 | #define SNBEP_MC_CHy_PCI_PMON_FIXED_CTL 0xf0 | ||
143 | #define SNBEP_MC_CHy_PCI_PMON_FIXED_CTR 0xd0 | ||
144 | /* SNB-EP QPI register */ | ||
145 | #define SNBEP_Q_Py_PCI_PMON_PKT_MATCH0 0x228 | ||
146 | #define SNBEP_Q_Py_PCI_PMON_PKT_MATCH1 0x22c | ||
147 | #define SNBEP_Q_Py_PCI_PMON_PKT_MASK0 0x238 | ||
148 | #define SNBEP_Q_Py_PCI_PMON_PKT_MASK1 0x23c | ||
149 | |||
150 | /* SNB-EP Ubox register */ | ||
151 | #define SNBEP_U_MSR_PMON_CTR0 0xc16 | ||
152 | #define SNBEP_U_MSR_PMON_CTL0 0xc10 | ||
153 | |||
154 | #define SNBEP_U_MSR_PMON_UCLK_FIXED_CTL 0xc08 | ||
155 | #define SNBEP_U_MSR_PMON_UCLK_FIXED_CTR 0xc09 | ||
156 | |||
157 | /* SNB-EP Cbo register */ | ||
158 | #define SNBEP_C0_MSR_PMON_CTR0 0xd16 | ||
159 | #define SNBEP_C0_MSR_PMON_CTL0 0xd10 | ||
160 | #define SNBEP_C0_MSR_PMON_BOX_CTL 0xd04 | ||
161 | #define SNBEP_C0_MSR_PMON_BOX_FILTER 0xd14 | ||
162 | #define SNBEP_CBO_MSR_OFFSET 0x20 | ||
163 | |||
164 | #define SNBEP_CB0_MSR_PMON_BOX_FILTER_TID 0x1f | ||
165 | #define SNBEP_CB0_MSR_PMON_BOX_FILTER_NID 0x3fc00 | ||
166 | #define SNBEP_CB0_MSR_PMON_BOX_FILTER_STATE 0x7c0000 | ||
167 | #define SNBEP_CB0_MSR_PMON_BOX_FILTER_OPC 0xff800000 | ||
168 | |||
169 | #define SNBEP_CBO_EVENT_EXTRA_REG(e, m, i) { \ | ||
170 | .event = (e), \ | ||
171 | .msr = SNBEP_C0_MSR_PMON_BOX_FILTER, \ | ||
172 | .config_mask = (m), \ | ||
173 | .idx = (i) \ | ||
174 | } | ||
175 | |||
176 | /* SNB-EP PCU register */ | ||
177 | #define SNBEP_PCU_MSR_PMON_CTR0 0xc36 | ||
178 | #define SNBEP_PCU_MSR_PMON_CTL0 0xc30 | ||
179 | #define SNBEP_PCU_MSR_PMON_BOX_CTL 0xc24 | ||
180 | #define SNBEP_PCU_MSR_PMON_BOX_FILTER 0xc34 | ||
181 | #define SNBEP_PCU_MSR_PMON_BOX_FILTER_MASK 0xffffffff | ||
182 | #define SNBEP_PCU_MSR_CORE_C3_CTR 0x3fc | ||
183 | #define SNBEP_PCU_MSR_CORE_C6_CTR 0x3fd | ||
184 | |||
185 | /* IVT event control */ | ||
186 | #define IVT_PMON_BOX_CTL_INT (SNBEP_PMON_BOX_CTL_RST_CTRL | \ | ||
187 | SNBEP_PMON_BOX_CTL_RST_CTRS) | ||
188 | #define IVT_PMON_RAW_EVENT_MASK (SNBEP_PMON_CTL_EV_SEL_MASK | \ | ||
189 | SNBEP_PMON_CTL_UMASK_MASK | \ | ||
190 | SNBEP_PMON_CTL_EDGE_DET | \ | ||
191 | SNBEP_PMON_CTL_TRESH_MASK) | ||
192 | /* IVT Ubox */ | ||
193 | #define IVT_U_MSR_PMON_GLOBAL_CTL 0xc00 | ||
194 | #define IVT_U_PMON_GLOBAL_FRZ_ALL (1 << 31) | ||
195 | #define IVT_U_PMON_GLOBAL_UNFRZ_ALL (1 << 29) | ||
196 | |||
197 | #define IVT_U_MSR_PMON_RAW_EVENT_MASK \ | ||
198 | (SNBEP_PMON_CTL_EV_SEL_MASK | \ | ||
199 | SNBEP_PMON_CTL_UMASK_MASK | \ | ||
200 | SNBEP_PMON_CTL_EDGE_DET | \ | ||
201 | SNBEP_U_MSR_PMON_CTL_TRESH_MASK) | ||
202 | /* IVT Cbo */ | ||
203 | #define IVT_CBO_MSR_PMON_RAW_EVENT_MASK (IVT_PMON_RAW_EVENT_MASK | \ | ||
204 | SNBEP_CBO_PMON_CTL_TID_EN) | ||
205 | |||
206 | #define IVT_CB0_MSR_PMON_BOX_FILTER_TID (0x1fULL << 0) | ||
207 | #define IVT_CB0_MSR_PMON_BOX_FILTER_LINK (0xfULL << 5) | ||
208 | #define IVT_CB0_MSR_PMON_BOX_FILTER_STATE (0x3fULL << 17) | ||
209 | #define IVT_CB0_MSR_PMON_BOX_FILTER_NID (0xffffULL << 32) | ||
210 | #define IVT_CB0_MSR_PMON_BOX_FILTER_OPC (0x1ffULL << 52) | ||
211 | #define IVT_CB0_MSR_PMON_BOX_FILTER_C6 (0x1ULL << 61) | ||
212 | #define IVT_CB0_MSR_PMON_BOX_FILTER_NC (0x1ULL << 62) | ||
213 | #define IVT_CB0_MSR_PMON_BOX_FILTER_IOSC (0x1ULL << 63) | ||
214 | |||
215 | /* IVT home agent */ | ||
216 | #define IVT_HA_PCI_PMON_CTL_Q_OCC_RST (1 << 16) | ||
217 | #define IVT_HA_PCI_PMON_RAW_EVENT_MASK \ | ||
218 | (IVT_PMON_RAW_EVENT_MASK | \ | ||
219 | IVT_HA_PCI_PMON_CTL_Q_OCC_RST) | ||
220 | /* IVT PCU */ | ||
221 | #define IVT_PCU_MSR_PMON_RAW_EVENT_MASK \ | ||
222 | (SNBEP_PMON_CTL_EV_SEL_MASK | \ | ||
223 | SNBEP_PMON_CTL_EV_SEL_EXT | \ | ||
224 | SNBEP_PCU_MSR_PMON_CTL_OCC_SEL_MASK | \ | ||
225 | SNBEP_PMON_CTL_EDGE_DET | \ | ||
226 | SNBEP_PCU_MSR_PMON_CTL_TRESH_MASK | \ | ||
227 | SNBEP_PCU_MSR_PMON_CTL_OCC_INVERT | \ | ||
228 | SNBEP_PCU_MSR_PMON_CTL_OCC_EDGE_DET) | ||
229 | /* IVT QPI */ | ||
230 | #define IVT_QPI_PCI_PMON_RAW_EVENT_MASK \ | ||
231 | (IVT_PMON_RAW_EVENT_MASK | \ | ||
232 | SNBEP_PMON_CTL_EV_SEL_EXT) | ||
233 | |||
234 | /* NHM-EX event control */ | ||
235 | #define NHMEX_PMON_CTL_EV_SEL_MASK 0x000000ff | ||
236 | #define NHMEX_PMON_CTL_UMASK_MASK 0x0000ff00 | ||
237 | #define NHMEX_PMON_CTL_EN_BIT0 (1 << 0) | ||
238 | #define NHMEX_PMON_CTL_EDGE_DET (1 << 18) | ||
239 | #define NHMEX_PMON_CTL_PMI_EN (1 << 20) | ||
240 | #define NHMEX_PMON_CTL_EN_BIT22 (1 << 22) | ||
241 | #define NHMEX_PMON_CTL_INVERT (1 << 23) | ||
242 | #define NHMEX_PMON_CTL_TRESH_MASK 0xff000000 | ||
243 | #define NHMEX_PMON_RAW_EVENT_MASK (NHMEX_PMON_CTL_EV_SEL_MASK | \ | ||
244 | NHMEX_PMON_CTL_UMASK_MASK | \ | ||
245 | NHMEX_PMON_CTL_EDGE_DET | \ | ||
246 | NHMEX_PMON_CTL_INVERT | \ | ||
247 | NHMEX_PMON_CTL_TRESH_MASK) | ||
248 | |||
249 | /* NHM-EX Ubox */ | ||
250 | #define NHMEX_U_MSR_PMON_GLOBAL_CTL 0xc00 | ||
251 | #define NHMEX_U_MSR_PMON_CTR 0xc11 | ||
252 | #define NHMEX_U_MSR_PMON_EV_SEL 0xc10 | ||
253 | |||
254 | #define NHMEX_U_PMON_GLOBAL_EN (1 << 0) | ||
255 | #define NHMEX_U_PMON_GLOBAL_PMI_CORE_SEL 0x0000001e | ||
256 | #define NHMEX_U_PMON_GLOBAL_EN_ALL (1 << 28) | ||
257 | #define NHMEX_U_PMON_GLOBAL_RST_ALL (1 << 29) | ||
258 | #define NHMEX_U_PMON_GLOBAL_FRZ_ALL (1 << 31) | ||
259 | |||
260 | #define NHMEX_U_PMON_RAW_EVENT_MASK \ | ||
261 | (NHMEX_PMON_CTL_EV_SEL_MASK | \ | ||
262 | NHMEX_PMON_CTL_EDGE_DET) | ||
263 | |||
264 | /* NHM-EX Cbox */ | ||
265 | #define NHMEX_C0_MSR_PMON_GLOBAL_CTL 0xd00 | ||
266 | #define NHMEX_C0_MSR_PMON_CTR0 0xd11 | ||
267 | #define NHMEX_C0_MSR_PMON_EV_SEL0 0xd10 | ||
268 | #define NHMEX_C_MSR_OFFSET 0x20 | ||
269 | |||
270 | /* NHM-EX Bbox */ | ||
271 | #define NHMEX_B0_MSR_PMON_GLOBAL_CTL 0xc20 | ||
272 | #define NHMEX_B0_MSR_PMON_CTR0 0xc31 | ||
273 | #define NHMEX_B0_MSR_PMON_CTL0 0xc30 | ||
274 | #define NHMEX_B_MSR_OFFSET 0x40 | ||
275 | #define NHMEX_B0_MSR_MATCH 0xe45 | ||
276 | #define NHMEX_B0_MSR_MASK 0xe46 | ||
277 | #define NHMEX_B1_MSR_MATCH 0xe4d | ||
278 | #define NHMEX_B1_MSR_MASK 0xe4e | ||
279 | |||
280 | #define NHMEX_B_PMON_CTL_EN (1 << 0) | ||
281 | #define NHMEX_B_PMON_CTL_EV_SEL_SHIFT 1 | ||
282 | #define NHMEX_B_PMON_CTL_EV_SEL_MASK \ | ||
283 | (0x1f << NHMEX_B_PMON_CTL_EV_SEL_SHIFT) | ||
284 | #define NHMEX_B_PMON_CTR_SHIFT 6 | ||
285 | #define NHMEX_B_PMON_CTR_MASK \ | ||
286 | (0x3 << NHMEX_B_PMON_CTR_SHIFT) | ||
287 | #define NHMEX_B_PMON_RAW_EVENT_MASK \ | ||
288 | (NHMEX_B_PMON_CTL_EV_SEL_MASK | \ | ||
289 | NHMEX_B_PMON_CTR_MASK) | ||
290 | |||
291 | /* NHM-EX Sbox */ | ||
292 | #define NHMEX_S0_MSR_PMON_GLOBAL_CTL 0xc40 | ||
293 | #define NHMEX_S0_MSR_PMON_CTR0 0xc51 | ||
294 | #define NHMEX_S0_MSR_PMON_CTL0 0xc50 | ||
295 | #define NHMEX_S_MSR_OFFSET 0x80 | ||
296 | #define NHMEX_S0_MSR_MM_CFG 0xe48 | ||
297 | #define NHMEX_S0_MSR_MATCH 0xe49 | ||
298 | #define NHMEX_S0_MSR_MASK 0xe4a | ||
299 | #define NHMEX_S1_MSR_MM_CFG 0xe58 | ||
300 | #define NHMEX_S1_MSR_MATCH 0xe59 | ||
301 | #define NHMEX_S1_MSR_MASK 0xe5a | ||
302 | |||
303 | #define NHMEX_S_PMON_MM_CFG_EN (0x1ULL << 63) | ||
304 | #define NHMEX_S_EVENT_TO_R_PROG_EV 0 | ||
305 | |||
306 | /* NHM-EX Mbox */ | ||
307 | #define NHMEX_M0_MSR_GLOBAL_CTL 0xca0 | ||
308 | #define NHMEX_M0_MSR_PMU_DSP 0xca5 | ||
309 | #define NHMEX_M0_MSR_PMU_ISS 0xca6 | ||
310 | #define NHMEX_M0_MSR_PMU_MAP 0xca7 | ||
311 | #define NHMEX_M0_MSR_PMU_MSC_THR 0xca8 | ||
312 | #define NHMEX_M0_MSR_PMU_PGT 0xca9 | ||
313 | #define NHMEX_M0_MSR_PMU_PLD 0xcaa | ||
314 | #define NHMEX_M0_MSR_PMU_ZDP_CTL_FVC 0xcab | ||
315 | #define NHMEX_M0_MSR_PMU_CTL0 0xcb0 | ||
316 | #define NHMEX_M0_MSR_PMU_CNT0 0xcb1 | ||
317 | #define NHMEX_M_MSR_OFFSET 0x40 | ||
318 | #define NHMEX_M0_MSR_PMU_MM_CFG 0xe54 | ||
319 | #define NHMEX_M1_MSR_PMU_MM_CFG 0xe5c | ||
320 | |||
321 | #define NHMEX_M_PMON_MM_CFG_EN (1ULL << 63) | ||
322 | #define NHMEX_M_PMON_ADDR_MATCH_MASK 0x3ffffffffULL | ||
323 | #define NHMEX_M_PMON_ADDR_MASK_MASK 0x7ffffffULL | ||
324 | #define NHMEX_M_PMON_ADDR_MASK_SHIFT 34 | ||
325 | |||
326 | #define NHMEX_M_PMON_CTL_EN (1 << 0) | ||
327 | #define NHMEX_M_PMON_CTL_PMI_EN (1 << 1) | ||
328 | #define NHMEX_M_PMON_CTL_COUNT_MODE_SHIFT 2 | ||
329 | #define NHMEX_M_PMON_CTL_COUNT_MODE_MASK \ | ||
330 | (0x3 << NHMEX_M_PMON_CTL_COUNT_MODE_SHIFT) | ||
331 | #define NHMEX_M_PMON_CTL_STORAGE_MODE_SHIFT 4 | ||
332 | #define NHMEX_M_PMON_CTL_STORAGE_MODE_MASK \ | ||
333 | (0x3 << NHMEX_M_PMON_CTL_STORAGE_MODE_SHIFT) | ||
334 | #define NHMEX_M_PMON_CTL_WRAP_MODE (1 << 6) | ||
335 | #define NHMEX_M_PMON_CTL_FLAG_MODE (1 << 7) | ||
336 | #define NHMEX_M_PMON_CTL_INC_SEL_SHIFT 9 | ||
337 | #define NHMEX_M_PMON_CTL_INC_SEL_MASK \ | ||
338 | (0x1f << NHMEX_M_PMON_CTL_INC_SEL_SHIFT) | ||
339 | #define NHMEX_M_PMON_CTL_SET_FLAG_SEL_SHIFT 19 | ||
340 | #define NHMEX_M_PMON_CTL_SET_FLAG_SEL_MASK \ | ||
341 | (0x7 << NHMEX_M_PMON_CTL_SET_FLAG_SEL_SHIFT) | ||
342 | #define NHMEX_M_PMON_RAW_EVENT_MASK \ | ||
343 | (NHMEX_M_PMON_CTL_COUNT_MODE_MASK | \ | ||
344 | NHMEX_M_PMON_CTL_STORAGE_MODE_MASK | \ | ||
345 | NHMEX_M_PMON_CTL_WRAP_MODE | \ | ||
346 | NHMEX_M_PMON_CTL_FLAG_MODE | \ | ||
347 | NHMEX_M_PMON_CTL_INC_SEL_MASK | \ | ||
348 | NHMEX_M_PMON_CTL_SET_FLAG_SEL_MASK) | ||
349 | |||
350 | #define NHMEX_M_PMON_ZDP_CTL_FVC_MASK (((1 << 11) - 1) | (1 << 23)) | ||
351 | #define NHMEX_M_PMON_ZDP_CTL_FVC_EVENT_MASK(n) (0x7ULL << (11 + 3 * (n))) | ||
352 | |||
353 | #define WSMEX_M_PMON_ZDP_CTL_FVC_MASK (((1 << 12) - 1) | (1 << 24)) | ||
354 | #define WSMEX_M_PMON_ZDP_CTL_FVC_EVENT_MASK(n) (0x7ULL << (12 + 3 * (n))) | ||
355 | |||
356 | /* | ||
357 | * use the 9~13 bits to select event If the 7th bit is not set, | ||
358 | * otherwise use the 19~21 bits to select event. | ||
359 | */ | ||
360 | #define MBOX_INC_SEL(x) ((x) << NHMEX_M_PMON_CTL_INC_SEL_SHIFT) | ||
361 | #define MBOX_SET_FLAG_SEL(x) (((x) << NHMEX_M_PMON_CTL_SET_FLAG_SEL_SHIFT) | \ | ||
362 | NHMEX_M_PMON_CTL_FLAG_MODE) | ||
363 | #define MBOX_INC_SEL_MASK (NHMEX_M_PMON_CTL_INC_SEL_MASK | \ | ||
364 | NHMEX_M_PMON_CTL_FLAG_MODE) | ||
365 | #define MBOX_SET_FLAG_SEL_MASK (NHMEX_M_PMON_CTL_SET_FLAG_SEL_MASK | \ | ||
366 | NHMEX_M_PMON_CTL_FLAG_MODE) | ||
367 | #define MBOX_INC_SEL_EXTAR_REG(c, r) \ | ||
368 | EVENT_EXTRA_REG(MBOX_INC_SEL(c), NHMEX_M0_MSR_PMU_##r, \ | ||
369 | MBOX_INC_SEL_MASK, (u64)-1, NHMEX_M_##r) | ||
370 | #define MBOX_SET_FLAG_SEL_EXTRA_REG(c, r) \ | ||
371 | EVENT_EXTRA_REG(MBOX_SET_FLAG_SEL(c), NHMEX_M0_MSR_PMU_##r, \ | ||
372 | MBOX_SET_FLAG_SEL_MASK, \ | ||
373 | (u64)-1, NHMEX_M_##r) | ||
374 | |||
375 | /* NHM-EX Rbox */ | ||
376 | #define NHMEX_R_MSR_GLOBAL_CTL 0xe00 | ||
377 | #define NHMEX_R_MSR_PMON_CTL0 0xe10 | ||
378 | #define NHMEX_R_MSR_PMON_CNT0 0xe11 | ||
379 | #define NHMEX_R_MSR_OFFSET 0x20 | ||
380 | |||
381 | #define NHMEX_R_MSR_PORTN_QLX_CFG(n) \ | ||
382 | ((n) < 4 ? (0xe0c + (n)) : (0xe2c + (n) - 4)) | ||
383 | #define NHMEX_R_MSR_PORTN_IPERF_CFG0(n) (0xe04 + (n)) | ||
384 | #define NHMEX_R_MSR_PORTN_IPERF_CFG1(n) (0xe24 + (n)) | ||
385 | #define NHMEX_R_MSR_PORTN_XBR_OFFSET(n) \ | ||
386 | (((n) < 4 ? 0 : 0x10) + (n) * 4) | ||
387 | #define NHMEX_R_MSR_PORTN_XBR_SET1_MM_CFG(n) \ | ||
388 | (0xe60 + NHMEX_R_MSR_PORTN_XBR_OFFSET(n)) | ||
389 | #define NHMEX_R_MSR_PORTN_XBR_SET1_MATCH(n) \ | ||
390 | (NHMEX_R_MSR_PORTN_XBR_SET1_MM_CFG(n) + 1) | ||
391 | #define NHMEX_R_MSR_PORTN_XBR_SET1_MASK(n) \ | ||
392 | (NHMEX_R_MSR_PORTN_XBR_SET1_MM_CFG(n) + 2) | ||
393 | #define NHMEX_R_MSR_PORTN_XBR_SET2_MM_CFG(n) \ | ||
394 | (0xe70 + NHMEX_R_MSR_PORTN_XBR_OFFSET(n)) | ||
395 | #define NHMEX_R_MSR_PORTN_XBR_SET2_MATCH(n) \ | ||
396 | (NHMEX_R_MSR_PORTN_XBR_SET2_MM_CFG(n) + 1) | ||
397 | #define NHMEX_R_MSR_PORTN_XBR_SET2_MASK(n) \ | ||
398 | (NHMEX_R_MSR_PORTN_XBR_SET2_MM_CFG(n) + 2) | ||
399 | |||
400 | #define NHMEX_R_PMON_CTL_EN (1 << 0) | ||
401 | #define NHMEX_R_PMON_CTL_EV_SEL_SHIFT 1 | ||
402 | #define NHMEX_R_PMON_CTL_EV_SEL_MASK \ | ||
403 | (0x1f << NHMEX_R_PMON_CTL_EV_SEL_SHIFT) | ||
404 | #define NHMEX_R_PMON_CTL_PMI_EN (1 << 6) | ||
405 | #define NHMEX_R_PMON_RAW_EVENT_MASK NHMEX_R_PMON_CTL_EV_SEL_MASK | ||
406 | |||
407 | /* NHM-EX Wbox */ | ||
408 | #define NHMEX_W_MSR_GLOBAL_CTL 0xc80 | ||
409 | #define NHMEX_W_MSR_PMON_CNT0 0xc90 | ||
410 | #define NHMEX_W_MSR_PMON_EVT_SEL0 0xc91 | ||
411 | #define NHMEX_W_MSR_PMON_FIXED_CTR 0x394 | ||
412 | #define NHMEX_W_MSR_PMON_FIXED_CTL 0x395 | ||
413 | |||
414 | #define NHMEX_W_PMON_GLOBAL_FIXED_EN (1ULL << 31) | ||
415 | |||
416 | struct intel_uncore_ops; | 27 | struct intel_uncore_ops; |
417 | struct intel_uncore_pmu; | 28 | struct intel_uncore_pmu; |
418 | struct intel_uncore_box; | 29 | struct intel_uncore_box; |
@@ -505,6 +116,9 @@ struct uncore_event_desc { | |||
505 | const char *config; | 116 | const char *config; |
506 | }; | 117 | }; |
507 | 118 | ||
119 | ssize_t uncore_event_show(struct kobject *kobj, | ||
120 | struct kobj_attribute *attr, char *buf); | ||
121 | |||
508 | #define INTEL_UNCORE_EVENT_DESC(_name, _config) \ | 122 | #define INTEL_UNCORE_EVENT_DESC(_name, _config) \ |
509 | { \ | 123 | { \ |
510 | .attr = __ATTR(_name, 0444, uncore_event_show, NULL), \ | 124 | .attr = __ATTR(_name, 0444, uncore_event_show, NULL), \ |
@@ -522,15 +136,6 @@ static ssize_t __uncore_##_var##_show(struct kobject *kobj, \ | |||
522 | static struct kobj_attribute format_attr_##_var = \ | 136 | static struct kobj_attribute format_attr_##_var = \ |
523 | __ATTR(_name, 0444, __uncore_##_var##_show, NULL) | 137 | __ATTR(_name, 0444, __uncore_##_var##_show, NULL) |
524 | 138 | ||
525 | |||
526 | static ssize_t uncore_event_show(struct kobject *kobj, | ||
527 | struct kobj_attribute *attr, char *buf) | ||
528 | { | ||
529 | struct uncore_event_desc *event = | ||
530 | container_of(attr, struct uncore_event_desc, attr); | ||
531 | return sprintf(buf, "%s", event->config); | ||
532 | } | ||
533 | |||
534 | static inline unsigned uncore_pci_box_ctl(struct intel_uncore_box *box) | 139 | static inline unsigned uncore_pci_box_ctl(struct intel_uncore_box *box) |
535 | { | 140 | { |
536 | return box->pmu->type->box_ctl; | 141 | return box->pmu->type->box_ctl; |
@@ -694,3 +299,41 @@ static inline bool uncore_box_is_fake(struct intel_uncore_box *box) | |||
694 | { | 299 | { |
695 | return (box->phys_id < 0); | 300 | return (box->phys_id < 0); |
696 | } | 301 | } |
302 | |||
303 | struct intel_uncore_pmu *uncore_event_to_pmu(struct perf_event *event); | ||
304 | struct intel_uncore_box *uncore_pmu_to_box(struct intel_uncore_pmu *pmu, int cpu); | ||
305 | struct intel_uncore_box *uncore_event_to_box(struct perf_event *event); | ||
306 | u64 uncore_msr_read_counter(struct intel_uncore_box *box, struct perf_event *event); | ||
307 | void uncore_pmu_start_hrtimer(struct intel_uncore_box *box); | ||
308 | void uncore_pmu_cancel_hrtimer(struct intel_uncore_box *box); | ||
309 | void uncore_pmu_event_read(struct perf_event *event); | ||
310 | void uncore_perf_event_update(struct intel_uncore_box *box, struct perf_event *event); | ||
311 | struct event_constraint * | ||
312 | uncore_get_constraint(struct intel_uncore_box *box, struct perf_event *event); | ||
313 | void uncore_put_constraint(struct intel_uncore_box *box, struct perf_event *event); | ||
314 | u64 uncore_shared_reg_config(struct intel_uncore_box *box, int idx); | ||
315 | |||
316 | extern struct intel_uncore_type **uncore_msr_uncores; | ||
317 | extern struct intel_uncore_type **uncore_pci_uncores; | ||
318 | extern struct pci_driver *uncore_pci_driver; | ||
319 | extern int uncore_pcibus_to_physid[256]; | ||
320 | extern struct pci_dev *uncore_extra_pci_dev[UNCORE_SOCKET_MAX][UNCORE_EXTRA_PCI_DEV_MAX]; | ||
321 | extern struct event_constraint uncore_constraint_empty; | ||
322 | |||
323 | /* perf_event_intel_uncore_snb.c */ | ||
324 | int snb_uncore_pci_init(void); | ||
325 | int ivb_uncore_pci_init(void); | ||
326 | int hsw_uncore_pci_init(void); | ||
327 | void snb_uncore_cpu_init(void); | ||
328 | void nhm_uncore_cpu_init(void); | ||
329 | |||
330 | /* perf_event_intel_uncore_snbep.c */ | ||
331 | int snbep_uncore_pci_init(void); | ||
332 | void snbep_uncore_cpu_init(void); | ||
333 | int ivbep_uncore_pci_init(void); | ||
334 | void ivbep_uncore_cpu_init(void); | ||
335 | int hswep_uncore_pci_init(void); | ||
336 | void hswep_uncore_cpu_init(void); | ||
337 | |||
338 | /* perf_event_intel_uncore_nhmex.c */ | ||
339 | void nhmex_uncore_cpu_init(void); | ||
diff --git a/arch/x86/kernel/cpu/perf_event_intel_uncore_nhmex.c b/arch/x86/kernel/cpu/perf_event_intel_uncore_nhmex.c new file mode 100644 index 000000000000..2749965afed0 --- /dev/null +++ b/arch/x86/kernel/cpu/perf_event_intel_uncore_nhmex.c | |||
@@ -0,0 +1,1221 @@ | |||
1 | /* Nehalem-EX/Westmere-EX uncore support */ | ||
2 | #include "perf_event_intel_uncore.h" | ||
3 | |||
4 | /* NHM-EX event control */ | ||
5 | #define NHMEX_PMON_CTL_EV_SEL_MASK 0x000000ff | ||
6 | #define NHMEX_PMON_CTL_UMASK_MASK 0x0000ff00 | ||
7 | #define NHMEX_PMON_CTL_EN_BIT0 (1 << 0) | ||
8 | #define NHMEX_PMON_CTL_EDGE_DET (1 << 18) | ||
9 | #define NHMEX_PMON_CTL_PMI_EN (1 << 20) | ||
10 | #define NHMEX_PMON_CTL_EN_BIT22 (1 << 22) | ||
11 | #define NHMEX_PMON_CTL_INVERT (1 << 23) | ||
12 | #define NHMEX_PMON_CTL_TRESH_MASK 0xff000000 | ||
13 | #define NHMEX_PMON_RAW_EVENT_MASK (NHMEX_PMON_CTL_EV_SEL_MASK | \ | ||
14 | NHMEX_PMON_CTL_UMASK_MASK | \ | ||
15 | NHMEX_PMON_CTL_EDGE_DET | \ | ||
16 | NHMEX_PMON_CTL_INVERT | \ | ||
17 | NHMEX_PMON_CTL_TRESH_MASK) | ||
18 | |||
19 | /* NHM-EX Ubox */ | ||
20 | #define NHMEX_U_MSR_PMON_GLOBAL_CTL 0xc00 | ||
21 | #define NHMEX_U_MSR_PMON_CTR 0xc11 | ||
22 | #define NHMEX_U_MSR_PMON_EV_SEL 0xc10 | ||
23 | |||
24 | #define NHMEX_U_PMON_GLOBAL_EN (1 << 0) | ||
25 | #define NHMEX_U_PMON_GLOBAL_PMI_CORE_SEL 0x0000001e | ||
26 | #define NHMEX_U_PMON_GLOBAL_EN_ALL (1 << 28) | ||
27 | #define NHMEX_U_PMON_GLOBAL_RST_ALL (1 << 29) | ||
28 | #define NHMEX_U_PMON_GLOBAL_FRZ_ALL (1 << 31) | ||
29 | |||
30 | #define NHMEX_U_PMON_RAW_EVENT_MASK \ | ||
31 | (NHMEX_PMON_CTL_EV_SEL_MASK | \ | ||
32 | NHMEX_PMON_CTL_EDGE_DET) | ||
33 | |||
34 | /* NHM-EX Cbox */ | ||
35 | #define NHMEX_C0_MSR_PMON_GLOBAL_CTL 0xd00 | ||
36 | #define NHMEX_C0_MSR_PMON_CTR0 0xd11 | ||
37 | #define NHMEX_C0_MSR_PMON_EV_SEL0 0xd10 | ||
38 | #define NHMEX_C_MSR_OFFSET 0x20 | ||
39 | |||
40 | /* NHM-EX Bbox */ | ||
41 | #define NHMEX_B0_MSR_PMON_GLOBAL_CTL 0xc20 | ||
42 | #define NHMEX_B0_MSR_PMON_CTR0 0xc31 | ||
43 | #define NHMEX_B0_MSR_PMON_CTL0 0xc30 | ||
44 | #define NHMEX_B_MSR_OFFSET 0x40 | ||
45 | #define NHMEX_B0_MSR_MATCH 0xe45 | ||
46 | #define NHMEX_B0_MSR_MASK 0xe46 | ||
47 | #define NHMEX_B1_MSR_MATCH 0xe4d | ||
48 | #define NHMEX_B1_MSR_MASK 0xe4e | ||
49 | |||
50 | #define NHMEX_B_PMON_CTL_EN (1 << 0) | ||
51 | #define NHMEX_B_PMON_CTL_EV_SEL_SHIFT 1 | ||
52 | #define NHMEX_B_PMON_CTL_EV_SEL_MASK \ | ||
53 | (0x1f << NHMEX_B_PMON_CTL_EV_SEL_SHIFT) | ||
54 | #define NHMEX_B_PMON_CTR_SHIFT 6 | ||
55 | #define NHMEX_B_PMON_CTR_MASK \ | ||
56 | (0x3 << NHMEX_B_PMON_CTR_SHIFT) | ||
57 | #define NHMEX_B_PMON_RAW_EVENT_MASK \ | ||
58 | (NHMEX_B_PMON_CTL_EV_SEL_MASK | \ | ||
59 | NHMEX_B_PMON_CTR_MASK) | ||
60 | |||
61 | /* NHM-EX Sbox */ | ||
62 | #define NHMEX_S0_MSR_PMON_GLOBAL_CTL 0xc40 | ||
63 | #define NHMEX_S0_MSR_PMON_CTR0 0xc51 | ||
64 | #define NHMEX_S0_MSR_PMON_CTL0 0xc50 | ||
65 | #define NHMEX_S_MSR_OFFSET 0x80 | ||
66 | #define NHMEX_S0_MSR_MM_CFG 0xe48 | ||
67 | #define NHMEX_S0_MSR_MATCH 0xe49 | ||
68 | #define NHMEX_S0_MSR_MASK 0xe4a | ||
69 | #define NHMEX_S1_MSR_MM_CFG 0xe58 | ||
70 | #define NHMEX_S1_MSR_MATCH 0xe59 | ||
71 | #define NHMEX_S1_MSR_MASK 0xe5a | ||
72 | |||
73 | #define NHMEX_S_PMON_MM_CFG_EN (0x1ULL << 63) | ||
74 | #define NHMEX_S_EVENT_TO_R_PROG_EV 0 | ||
75 | |||
76 | /* NHM-EX Mbox */ | ||
77 | #define NHMEX_M0_MSR_GLOBAL_CTL 0xca0 | ||
78 | #define NHMEX_M0_MSR_PMU_DSP 0xca5 | ||
79 | #define NHMEX_M0_MSR_PMU_ISS 0xca6 | ||
80 | #define NHMEX_M0_MSR_PMU_MAP 0xca7 | ||
81 | #define NHMEX_M0_MSR_PMU_MSC_THR 0xca8 | ||
82 | #define NHMEX_M0_MSR_PMU_PGT 0xca9 | ||
83 | #define NHMEX_M0_MSR_PMU_PLD 0xcaa | ||
84 | #define NHMEX_M0_MSR_PMU_ZDP_CTL_FVC 0xcab | ||
85 | #define NHMEX_M0_MSR_PMU_CTL0 0xcb0 | ||
86 | #define NHMEX_M0_MSR_PMU_CNT0 0xcb1 | ||
87 | #define NHMEX_M_MSR_OFFSET 0x40 | ||
88 | #define NHMEX_M0_MSR_PMU_MM_CFG 0xe54 | ||
89 | #define NHMEX_M1_MSR_PMU_MM_CFG 0xe5c | ||
90 | |||
91 | #define NHMEX_M_PMON_MM_CFG_EN (1ULL << 63) | ||
92 | #define NHMEX_M_PMON_ADDR_MATCH_MASK 0x3ffffffffULL | ||
93 | #define NHMEX_M_PMON_ADDR_MASK_MASK 0x7ffffffULL | ||
94 | #define NHMEX_M_PMON_ADDR_MASK_SHIFT 34 | ||
95 | |||
96 | #define NHMEX_M_PMON_CTL_EN (1 << 0) | ||
97 | #define NHMEX_M_PMON_CTL_PMI_EN (1 << 1) | ||
98 | #define NHMEX_M_PMON_CTL_COUNT_MODE_SHIFT 2 | ||
99 | #define NHMEX_M_PMON_CTL_COUNT_MODE_MASK \ | ||
100 | (0x3 << NHMEX_M_PMON_CTL_COUNT_MODE_SHIFT) | ||
101 | #define NHMEX_M_PMON_CTL_STORAGE_MODE_SHIFT 4 | ||
102 | #define NHMEX_M_PMON_CTL_STORAGE_MODE_MASK \ | ||
103 | (0x3 << NHMEX_M_PMON_CTL_STORAGE_MODE_SHIFT) | ||
104 | #define NHMEX_M_PMON_CTL_WRAP_MODE (1 << 6) | ||
105 | #define NHMEX_M_PMON_CTL_FLAG_MODE (1 << 7) | ||
106 | #define NHMEX_M_PMON_CTL_INC_SEL_SHIFT 9 | ||
107 | #define NHMEX_M_PMON_CTL_INC_SEL_MASK \ | ||
108 | (0x1f << NHMEX_M_PMON_CTL_INC_SEL_SHIFT) | ||
109 | #define NHMEX_M_PMON_CTL_SET_FLAG_SEL_SHIFT 19 | ||
110 | #define NHMEX_M_PMON_CTL_SET_FLAG_SEL_MASK \ | ||
111 | (0x7 << NHMEX_M_PMON_CTL_SET_FLAG_SEL_SHIFT) | ||
112 | #define NHMEX_M_PMON_RAW_EVENT_MASK \ | ||
113 | (NHMEX_M_PMON_CTL_COUNT_MODE_MASK | \ | ||
114 | NHMEX_M_PMON_CTL_STORAGE_MODE_MASK | \ | ||
115 | NHMEX_M_PMON_CTL_WRAP_MODE | \ | ||
116 | NHMEX_M_PMON_CTL_FLAG_MODE | \ | ||
117 | NHMEX_M_PMON_CTL_INC_SEL_MASK | \ | ||
118 | NHMEX_M_PMON_CTL_SET_FLAG_SEL_MASK) | ||
119 | |||
120 | #define NHMEX_M_PMON_ZDP_CTL_FVC_MASK (((1 << 11) - 1) | (1 << 23)) | ||
121 | #define NHMEX_M_PMON_ZDP_CTL_FVC_EVENT_MASK(n) (0x7ULL << (11 + 3 * (n))) | ||
122 | |||
123 | #define WSMEX_M_PMON_ZDP_CTL_FVC_MASK (((1 << 12) - 1) | (1 << 24)) | ||
124 | #define WSMEX_M_PMON_ZDP_CTL_FVC_EVENT_MASK(n) (0x7ULL << (12 + 3 * (n))) | ||
125 | |||
126 | /* | ||
127 | * use the 9~13 bits to select event If the 7th bit is not set, | ||
128 | * otherwise use the 19~21 bits to select event. | ||
129 | */ | ||
130 | #define MBOX_INC_SEL(x) ((x) << NHMEX_M_PMON_CTL_INC_SEL_SHIFT) | ||
131 | #define MBOX_SET_FLAG_SEL(x) (((x) << NHMEX_M_PMON_CTL_SET_FLAG_SEL_SHIFT) | \ | ||
132 | NHMEX_M_PMON_CTL_FLAG_MODE) | ||
133 | #define MBOX_INC_SEL_MASK (NHMEX_M_PMON_CTL_INC_SEL_MASK | \ | ||
134 | NHMEX_M_PMON_CTL_FLAG_MODE) | ||
135 | #define MBOX_SET_FLAG_SEL_MASK (NHMEX_M_PMON_CTL_SET_FLAG_SEL_MASK | \ | ||
136 | NHMEX_M_PMON_CTL_FLAG_MODE) | ||
137 | #define MBOX_INC_SEL_EXTAR_REG(c, r) \ | ||
138 | EVENT_EXTRA_REG(MBOX_INC_SEL(c), NHMEX_M0_MSR_PMU_##r, \ | ||
139 | MBOX_INC_SEL_MASK, (u64)-1, NHMEX_M_##r) | ||
140 | #define MBOX_SET_FLAG_SEL_EXTRA_REG(c, r) \ | ||
141 | EVENT_EXTRA_REG(MBOX_SET_FLAG_SEL(c), NHMEX_M0_MSR_PMU_##r, \ | ||
142 | MBOX_SET_FLAG_SEL_MASK, \ | ||
143 | (u64)-1, NHMEX_M_##r) | ||
144 | |||
145 | /* NHM-EX Rbox */ | ||
146 | #define NHMEX_R_MSR_GLOBAL_CTL 0xe00 | ||
147 | #define NHMEX_R_MSR_PMON_CTL0 0xe10 | ||
148 | #define NHMEX_R_MSR_PMON_CNT0 0xe11 | ||
149 | #define NHMEX_R_MSR_OFFSET 0x20 | ||
150 | |||
151 | #define NHMEX_R_MSR_PORTN_QLX_CFG(n) \ | ||
152 | ((n) < 4 ? (0xe0c + (n)) : (0xe2c + (n) - 4)) | ||
153 | #define NHMEX_R_MSR_PORTN_IPERF_CFG0(n) (0xe04 + (n)) | ||
154 | #define NHMEX_R_MSR_PORTN_IPERF_CFG1(n) (0xe24 + (n)) | ||
155 | #define NHMEX_R_MSR_PORTN_XBR_OFFSET(n) \ | ||
156 | (((n) < 4 ? 0 : 0x10) + (n) * 4) | ||
157 | #define NHMEX_R_MSR_PORTN_XBR_SET1_MM_CFG(n) \ | ||
158 | (0xe60 + NHMEX_R_MSR_PORTN_XBR_OFFSET(n)) | ||
159 | #define NHMEX_R_MSR_PORTN_XBR_SET1_MATCH(n) \ | ||
160 | (NHMEX_R_MSR_PORTN_XBR_SET1_MM_CFG(n) + 1) | ||
161 | #define NHMEX_R_MSR_PORTN_XBR_SET1_MASK(n) \ | ||
162 | (NHMEX_R_MSR_PORTN_XBR_SET1_MM_CFG(n) + 2) | ||
163 | #define NHMEX_R_MSR_PORTN_XBR_SET2_MM_CFG(n) \ | ||
164 | (0xe70 + NHMEX_R_MSR_PORTN_XBR_OFFSET(n)) | ||
165 | #define NHMEX_R_MSR_PORTN_XBR_SET2_MATCH(n) \ | ||
166 | (NHMEX_R_MSR_PORTN_XBR_SET2_MM_CFG(n) + 1) | ||
167 | #define NHMEX_R_MSR_PORTN_XBR_SET2_MASK(n) \ | ||
168 | (NHMEX_R_MSR_PORTN_XBR_SET2_MM_CFG(n) + 2) | ||
169 | |||
170 | #define NHMEX_R_PMON_CTL_EN (1 << 0) | ||
171 | #define NHMEX_R_PMON_CTL_EV_SEL_SHIFT 1 | ||
172 | #define NHMEX_R_PMON_CTL_EV_SEL_MASK \ | ||
173 | (0x1f << NHMEX_R_PMON_CTL_EV_SEL_SHIFT) | ||
174 | #define NHMEX_R_PMON_CTL_PMI_EN (1 << 6) | ||
175 | #define NHMEX_R_PMON_RAW_EVENT_MASK NHMEX_R_PMON_CTL_EV_SEL_MASK | ||
176 | |||
177 | /* NHM-EX Wbox */ | ||
178 | #define NHMEX_W_MSR_GLOBAL_CTL 0xc80 | ||
179 | #define NHMEX_W_MSR_PMON_CNT0 0xc90 | ||
180 | #define NHMEX_W_MSR_PMON_EVT_SEL0 0xc91 | ||
181 | #define NHMEX_W_MSR_PMON_FIXED_CTR 0x394 | ||
182 | #define NHMEX_W_MSR_PMON_FIXED_CTL 0x395 | ||
183 | |||
184 | #define NHMEX_W_PMON_GLOBAL_FIXED_EN (1ULL << 31) | ||
185 | |||
186 | #define __BITS_VALUE(x, i, n) ((typeof(x))(((x) >> ((i) * (n))) & \ | ||
187 | ((1ULL << (n)) - 1))) | ||
188 | |||
189 | DEFINE_UNCORE_FORMAT_ATTR(event, event, "config:0-7"); | ||
190 | DEFINE_UNCORE_FORMAT_ATTR(event5, event, "config:1-5"); | ||
191 | DEFINE_UNCORE_FORMAT_ATTR(umask, umask, "config:8-15"); | ||
192 | DEFINE_UNCORE_FORMAT_ATTR(edge, edge, "config:18"); | ||
193 | DEFINE_UNCORE_FORMAT_ATTR(inv, inv, "config:23"); | ||
194 | DEFINE_UNCORE_FORMAT_ATTR(thresh8, thresh, "config:24-31"); | ||
195 | DEFINE_UNCORE_FORMAT_ATTR(counter, counter, "config:6-7"); | ||
196 | DEFINE_UNCORE_FORMAT_ATTR(match, match, "config1:0-63"); | ||
197 | DEFINE_UNCORE_FORMAT_ATTR(mask, mask, "config2:0-63"); | ||
198 | |||
199 | static void nhmex_uncore_msr_init_box(struct intel_uncore_box *box) | ||
200 | { | ||
201 | wrmsrl(NHMEX_U_MSR_PMON_GLOBAL_CTL, NHMEX_U_PMON_GLOBAL_EN_ALL); | ||
202 | } | ||
203 | |||
204 | static void nhmex_uncore_msr_disable_box(struct intel_uncore_box *box) | ||
205 | { | ||
206 | unsigned msr = uncore_msr_box_ctl(box); | ||
207 | u64 config; | ||
208 | |||
209 | if (msr) { | ||
210 | rdmsrl(msr, config); | ||
211 | config &= ~((1ULL << uncore_num_counters(box)) - 1); | ||
212 | /* WBox has a fixed counter */ | ||
213 | if (uncore_msr_fixed_ctl(box)) | ||
214 | config &= ~NHMEX_W_PMON_GLOBAL_FIXED_EN; | ||
215 | wrmsrl(msr, config); | ||
216 | } | ||
217 | } | ||
218 | |||
219 | static void nhmex_uncore_msr_enable_box(struct intel_uncore_box *box) | ||
220 | { | ||
221 | unsigned msr = uncore_msr_box_ctl(box); | ||
222 | u64 config; | ||
223 | |||
224 | if (msr) { | ||
225 | rdmsrl(msr, config); | ||
226 | config |= (1ULL << uncore_num_counters(box)) - 1; | ||
227 | /* WBox has a fixed counter */ | ||
228 | if (uncore_msr_fixed_ctl(box)) | ||
229 | config |= NHMEX_W_PMON_GLOBAL_FIXED_EN; | ||
230 | wrmsrl(msr, config); | ||
231 | } | ||
232 | } | ||
233 | |||
234 | static void nhmex_uncore_msr_disable_event(struct intel_uncore_box *box, struct perf_event *event) | ||
235 | { | ||
236 | wrmsrl(event->hw.config_base, 0); | ||
237 | } | ||
238 | |||
239 | static void nhmex_uncore_msr_enable_event(struct intel_uncore_box *box, struct perf_event *event) | ||
240 | { | ||
241 | struct hw_perf_event *hwc = &event->hw; | ||
242 | |||
243 | if (hwc->idx >= UNCORE_PMC_IDX_FIXED) | ||
244 | wrmsrl(hwc->config_base, NHMEX_PMON_CTL_EN_BIT0); | ||
245 | else if (box->pmu->type->event_mask & NHMEX_PMON_CTL_EN_BIT0) | ||
246 | wrmsrl(hwc->config_base, hwc->config | NHMEX_PMON_CTL_EN_BIT22); | ||
247 | else | ||
248 | wrmsrl(hwc->config_base, hwc->config | NHMEX_PMON_CTL_EN_BIT0); | ||
249 | } | ||
250 | |||
251 | #define NHMEX_UNCORE_OPS_COMMON_INIT() \ | ||
252 | .init_box = nhmex_uncore_msr_init_box, \ | ||
253 | .disable_box = nhmex_uncore_msr_disable_box, \ | ||
254 | .enable_box = nhmex_uncore_msr_enable_box, \ | ||
255 | .disable_event = nhmex_uncore_msr_disable_event, \ | ||
256 | .read_counter = uncore_msr_read_counter | ||
257 | |||
258 | static struct intel_uncore_ops nhmex_uncore_ops = { | ||
259 | NHMEX_UNCORE_OPS_COMMON_INIT(), | ||
260 | .enable_event = nhmex_uncore_msr_enable_event, | ||
261 | }; | ||
262 | |||
263 | static struct attribute *nhmex_uncore_ubox_formats_attr[] = { | ||
264 | &format_attr_event.attr, | ||
265 | &format_attr_edge.attr, | ||
266 | NULL, | ||
267 | }; | ||
268 | |||
269 | static struct attribute_group nhmex_uncore_ubox_format_group = { | ||
270 | .name = "format", | ||
271 | .attrs = nhmex_uncore_ubox_formats_attr, | ||
272 | }; | ||
273 | |||
274 | static struct intel_uncore_type nhmex_uncore_ubox = { | ||
275 | .name = "ubox", | ||
276 | .num_counters = 1, | ||
277 | .num_boxes = 1, | ||
278 | .perf_ctr_bits = 48, | ||
279 | .event_ctl = NHMEX_U_MSR_PMON_EV_SEL, | ||
280 | .perf_ctr = NHMEX_U_MSR_PMON_CTR, | ||
281 | .event_mask = NHMEX_U_PMON_RAW_EVENT_MASK, | ||
282 | .box_ctl = NHMEX_U_MSR_PMON_GLOBAL_CTL, | ||
283 | .ops = &nhmex_uncore_ops, | ||
284 | .format_group = &nhmex_uncore_ubox_format_group | ||
285 | }; | ||
286 | |||
287 | static struct attribute *nhmex_uncore_cbox_formats_attr[] = { | ||
288 | &format_attr_event.attr, | ||
289 | &format_attr_umask.attr, | ||
290 | &format_attr_edge.attr, | ||
291 | &format_attr_inv.attr, | ||
292 | &format_attr_thresh8.attr, | ||
293 | NULL, | ||
294 | }; | ||
295 | |||
296 | static struct attribute_group nhmex_uncore_cbox_format_group = { | ||
297 | .name = "format", | ||
298 | .attrs = nhmex_uncore_cbox_formats_attr, | ||
299 | }; | ||
300 | |||
301 | /* msr offset for each instance of cbox */ | ||
302 | static unsigned nhmex_cbox_msr_offsets[] = { | ||
303 | 0x0, 0x80, 0x40, 0xc0, 0x20, 0xa0, 0x60, 0xe0, 0x240, 0x2c0, | ||
304 | }; | ||
305 | |||
306 | static struct intel_uncore_type nhmex_uncore_cbox = { | ||
307 | .name = "cbox", | ||
308 | .num_counters = 6, | ||
309 | .num_boxes = 10, | ||
310 | .perf_ctr_bits = 48, | ||
311 | .event_ctl = NHMEX_C0_MSR_PMON_EV_SEL0, | ||
312 | .perf_ctr = NHMEX_C0_MSR_PMON_CTR0, | ||
313 | .event_mask = NHMEX_PMON_RAW_EVENT_MASK, | ||
314 | .box_ctl = NHMEX_C0_MSR_PMON_GLOBAL_CTL, | ||
315 | .msr_offsets = nhmex_cbox_msr_offsets, | ||
316 | .pair_ctr_ctl = 1, | ||
317 | .ops = &nhmex_uncore_ops, | ||
318 | .format_group = &nhmex_uncore_cbox_format_group | ||
319 | }; | ||
320 | |||
321 | static struct uncore_event_desc nhmex_uncore_wbox_events[] = { | ||
322 | INTEL_UNCORE_EVENT_DESC(clockticks, "event=0xff,umask=0"), | ||
323 | { /* end: all zeroes */ }, | ||
324 | }; | ||
325 | |||
326 | static struct intel_uncore_type nhmex_uncore_wbox = { | ||
327 | .name = "wbox", | ||
328 | .num_counters = 4, | ||
329 | .num_boxes = 1, | ||
330 | .perf_ctr_bits = 48, | ||
331 | .event_ctl = NHMEX_W_MSR_PMON_CNT0, | ||
332 | .perf_ctr = NHMEX_W_MSR_PMON_EVT_SEL0, | ||
333 | .fixed_ctr = NHMEX_W_MSR_PMON_FIXED_CTR, | ||
334 | .fixed_ctl = NHMEX_W_MSR_PMON_FIXED_CTL, | ||
335 | .event_mask = NHMEX_PMON_RAW_EVENT_MASK, | ||
336 | .box_ctl = NHMEX_W_MSR_GLOBAL_CTL, | ||
337 | .pair_ctr_ctl = 1, | ||
338 | .event_descs = nhmex_uncore_wbox_events, | ||
339 | .ops = &nhmex_uncore_ops, | ||
340 | .format_group = &nhmex_uncore_cbox_format_group | ||
341 | }; | ||
342 | |||
343 | static int nhmex_bbox_hw_config(struct intel_uncore_box *box, struct perf_event *event) | ||
344 | { | ||
345 | struct hw_perf_event *hwc = &event->hw; | ||
346 | struct hw_perf_event_extra *reg1 = &hwc->extra_reg; | ||
347 | struct hw_perf_event_extra *reg2 = &hwc->branch_reg; | ||
348 | int ctr, ev_sel; | ||
349 | |||
350 | ctr = (hwc->config & NHMEX_B_PMON_CTR_MASK) >> | ||
351 | NHMEX_B_PMON_CTR_SHIFT; | ||
352 | ev_sel = (hwc->config & NHMEX_B_PMON_CTL_EV_SEL_MASK) >> | ||
353 | NHMEX_B_PMON_CTL_EV_SEL_SHIFT; | ||
354 | |||
355 | /* events that do not use the match/mask registers */ | ||
356 | if ((ctr == 0 && ev_sel > 0x3) || (ctr == 1 && ev_sel > 0x6) || | ||
357 | (ctr == 2 && ev_sel != 0x4) || ctr == 3) | ||
358 | return 0; | ||
359 | |||
360 | if (box->pmu->pmu_idx == 0) | ||
361 | reg1->reg = NHMEX_B0_MSR_MATCH; | ||
362 | else | ||
363 | reg1->reg = NHMEX_B1_MSR_MATCH; | ||
364 | reg1->idx = 0; | ||
365 | reg1->config = event->attr.config1; | ||
366 | reg2->config = event->attr.config2; | ||
367 | return 0; | ||
368 | } | ||
369 | |||
370 | static void nhmex_bbox_msr_enable_event(struct intel_uncore_box *box, struct perf_event *event) | ||
371 | { | ||
372 | struct hw_perf_event *hwc = &event->hw; | ||
373 | struct hw_perf_event_extra *reg1 = &hwc->extra_reg; | ||
374 | struct hw_perf_event_extra *reg2 = &hwc->branch_reg; | ||
375 | |||
376 | if (reg1->idx != EXTRA_REG_NONE) { | ||
377 | wrmsrl(reg1->reg, reg1->config); | ||
378 | wrmsrl(reg1->reg + 1, reg2->config); | ||
379 | } | ||
380 | wrmsrl(hwc->config_base, NHMEX_PMON_CTL_EN_BIT0 | | ||
381 | (hwc->config & NHMEX_B_PMON_CTL_EV_SEL_MASK)); | ||
382 | } | ||
383 | |||
384 | /* | ||
385 | * The Bbox has 4 counters, but each counter monitors different events. | ||
386 | * Use bits 6-7 in the event config to select counter. | ||
387 | */ | ||
388 | static struct event_constraint nhmex_uncore_bbox_constraints[] = { | ||
389 | EVENT_CONSTRAINT(0 , 1, 0xc0), | ||
390 | EVENT_CONSTRAINT(0x40, 2, 0xc0), | ||
391 | EVENT_CONSTRAINT(0x80, 4, 0xc0), | ||
392 | EVENT_CONSTRAINT(0xc0, 8, 0xc0), | ||
393 | EVENT_CONSTRAINT_END, | ||
394 | }; | ||
395 | |||
396 | static struct attribute *nhmex_uncore_bbox_formats_attr[] = { | ||
397 | &format_attr_event5.attr, | ||
398 | &format_attr_counter.attr, | ||
399 | &format_attr_match.attr, | ||
400 | &format_attr_mask.attr, | ||
401 | NULL, | ||
402 | }; | ||
403 | |||
404 | static struct attribute_group nhmex_uncore_bbox_format_group = { | ||
405 | .name = "format", | ||
406 | .attrs = nhmex_uncore_bbox_formats_attr, | ||
407 | }; | ||
408 | |||
409 | static struct intel_uncore_ops nhmex_uncore_bbox_ops = { | ||
410 | NHMEX_UNCORE_OPS_COMMON_INIT(), | ||
411 | .enable_event = nhmex_bbox_msr_enable_event, | ||
412 | .hw_config = nhmex_bbox_hw_config, | ||
413 | .get_constraint = uncore_get_constraint, | ||
414 | .put_constraint = uncore_put_constraint, | ||
415 | }; | ||
416 | |||
417 | static struct intel_uncore_type nhmex_uncore_bbox = { | ||
418 | .name = "bbox", | ||
419 | .num_counters = 4, | ||
420 | .num_boxes = 2, | ||
421 | .perf_ctr_bits = 48, | ||
422 | .event_ctl = NHMEX_B0_MSR_PMON_CTL0, | ||
423 | .perf_ctr = NHMEX_B0_MSR_PMON_CTR0, | ||
424 | .event_mask = NHMEX_B_PMON_RAW_EVENT_MASK, | ||
425 | .box_ctl = NHMEX_B0_MSR_PMON_GLOBAL_CTL, | ||
426 | .msr_offset = NHMEX_B_MSR_OFFSET, | ||
427 | .pair_ctr_ctl = 1, | ||
428 | .num_shared_regs = 1, | ||
429 | .constraints = nhmex_uncore_bbox_constraints, | ||
430 | .ops = &nhmex_uncore_bbox_ops, | ||
431 | .format_group = &nhmex_uncore_bbox_format_group | ||
432 | }; | ||
433 | |||
434 | static int nhmex_sbox_hw_config(struct intel_uncore_box *box, struct perf_event *event) | ||
435 | { | ||
436 | struct hw_perf_event *hwc = &event->hw; | ||
437 | struct hw_perf_event_extra *reg1 = &hwc->extra_reg; | ||
438 | struct hw_perf_event_extra *reg2 = &hwc->branch_reg; | ||
439 | |||
440 | /* only TO_R_PROG_EV event uses the match/mask register */ | ||
441 | if ((hwc->config & NHMEX_PMON_CTL_EV_SEL_MASK) != | ||
442 | NHMEX_S_EVENT_TO_R_PROG_EV) | ||
443 | return 0; | ||
444 | |||
445 | if (box->pmu->pmu_idx == 0) | ||
446 | reg1->reg = NHMEX_S0_MSR_MM_CFG; | ||
447 | else | ||
448 | reg1->reg = NHMEX_S1_MSR_MM_CFG; | ||
449 | reg1->idx = 0; | ||
450 | reg1->config = event->attr.config1; | ||
451 | reg2->config = event->attr.config2; | ||
452 | return 0; | ||
453 | } | ||
454 | |||
455 | static void nhmex_sbox_msr_enable_event(struct intel_uncore_box *box, struct perf_event *event) | ||
456 | { | ||
457 | struct hw_perf_event *hwc = &event->hw; | ||
458 | struct hw_perf_event_extra *reg1 = &hwc->extra_reg; | ||
459 | struct hw_perf_event_extra *reg2 = &hwc->branch_reg; | ||
460 | |||
461 | if (reg1->idx != EXTRA_REG_NONE) { | ||
462 | wrmsrl(reg1->reg, 0); | ||
463 | wrmsrl(reg1->reg + 1, reg1->config); | ||
464 | wrmsrl(reg1->reg + 2, reg2->config); | ||
465 | wrmsrl(reg1->reg, NHMEX_S_PMON_MM_CFG_EN); | ||
466 | } | ||
467 | wrmsrl(hwc->config_base, hwc->config | NHMEX_PMON_CTL_EN_BIT22); | ||
468 | } | ||
469 | |||
470 | static struct attribute *nhmex_uncore_sbox_formats_attr[] = { | ||
471 | &format_attr_event.attr, | ||
472 | &format_attr_umask.attr, | ||
473 | &format_attr_edge.attr, | ||
474 | &format_attr_inv.attr, | ||
475 | &format_attr_thresh8.attr, | ||
476 | &format_attr_match.attr, | ||
477 | &format_attr_mask.attr, | ||
478 | NULL, | ||
479 | }; | ||
480 | |||
481 | static struct attribute_group nhmex_uncore_sbox_format_group = { | ||
482 | .name = "format", | ||
483 | .attrs = nhmex_uncore_sbox_formats_attr, | ||
484 | }; | ||
485 | |||
486 | static struct intel_uncore_ops nhmex_uncore_sbox_ops = { | ||
487 | NHMEX_UNCORE_OPS_COMMON_INIT(), | ||
488 | .enable_event = nhmex_sbox_msr_enable_event, | ||
489 | .hw_config = nhmex_sbox_hw_config, | ||
490 | .get_constraint = uncore_get_constraint, | ||
491 | .put_constraint = uncore_put_constraint, | ||
492 | }; | ||
493 | |||
494 | static struct intel_uncore_type nhmex_uncore_sbox = { | ||
495 | .name = "sbox", | ||
496 | .num_counters = 4, | ||
497 | .num_boxes = 2, | ||
498 | .perf_ctr_bits = 48, | ||
499 | .event_ctl = NHMEX_S0_MSR_PMON_CTL0, | ||
500 | .perf_ctr = NHMEX_S0_MSR_PMON_CTR0, | ||
501 | .event_mask = NHMEX_PMON_RAW_EVENT_MASK, | ||
502 | .box_ctl = NHMEX_S0_MSR_PMON_GLOBAL_CTL, | ||
503 | .msr_offset = NHMEX_S_MSR_OFFSET, | ||
504 | .pair_ctr_ctl = 1, | ||
505 | .num_shared_regs = 1, | ||
506 | .ops = &nhmex_uncore_sbox_ops, | ||
507 | .format_group = &nhmex_uncore_sbox_format_group | ||
508 | }; | ||
509 | |||
510 | enum { | ||
511 | EXTRA_REG_NHMEX_M_FILTER, | ||
512 | EXTRA_REG_NHMEX_M_DSP, | ||
513 | EXTRA_REG_NHMEX_M_ISS, | ||
514 | EXTRA_REG_NHMEX_M_MAP, | ||
515 | EXTRA_REG_NHMEX_M_MSC_THR, | ||
516 | EXTRA_REG_NHMEX_M_PGT, | ||
517 | EXTRA_REG_NHMEX_M_PLD, | ||
518 | EXTRA_REG_NHMEX_M_ZDP_CTL_FVC, | ||
519 | }; | ||
520 | |||
521 | static struct extra_reg nhmex_uncore_mbox_extra_regs[] = { | ||
522 | MBOX_INC_SEL_EXTAR_REG(0x0, DSP), | ||
523 | MBOX_INC_SEL_EXTAR_REG(0x4, MSC_THR), | ||
524 | MBOX_INC_SEL_EXTAR_REG(0x5, MSC_THR), | ||
525 | MBOX_INC_SEL_EXTAR_REG(0x9, ISS), | ||
526 | /* event 0xa uses two extra registers */ | ||
527 | MBOX_INC_SEL_EXTAR_REG(0xa, ISS), | ||
528 | MBOX_INC_SEL_EXTAR_REG(0xa, PLD), | ||
529 | MBOX_INC_SEL_EXTAR_REG(0xb, PLD), | ||
530 | /* events 0xd ~ 0x10 use the same extra register */ | ||
531 | MBOX_INC_SEL_EXTAR_REG(0xd, ZDP_CTL_FVC), | ||
532 | MBOX_INC_SEL_EXTAR_REG(0xe, ZDP_CTL_FVC), | ||
533 | MBOX_INC_SEL_EXTAR_REG(0xf, ZDP_CTL_FVC), | ||
534 | MBOX_INC_SEL_EXTAR_REG(0x10, ZDP_CTL_FVC), | ||
535 | MBOX_INC_SEL_EXTAR_REG(0x16, PGT), | ||
536 | MBOX_SET_FLAG_SEL_EXTRA_REG(0x0, DSP), | ||
537 | MBOX_SET_FLAG_SEL_EXTRA_REG(0x1, ISS), | ||
538 | MBOX_SET_FLAG_SEL_EXTRA_REG(0x5, PGT), | ||
539 | MBOX_SET_FLAG_SEL_EXTRA_REG(0x6, MAP), | ||
540 | EVENT_EXTRA_END | ||
541 | }; | ||
542 | |||
543 | /* Nehalem-EX or Westmere-EX ? */ | ||
544 | static bool uncore_nhmex; | ||
545 | |||
546 | static bool nhmex_mbox_get_shared_reg(struct intel_uncore_box *box, int idx, u64 config) | ||
547 | { | ||
548 | struct intel_uncore_extra_reg *er; | ||
549 | unsigned long flags; | ||
550 | bool ret = false; | ||
551 | u64 mask; | ||
552 | |||
553 | if (idx < EXTRA_REG_NHMEX_M_ZDP_CTL_FVC) { | ||
554 | er = &box->shared_regs[idx]; | ||
555 | raw_spin_lock_irqsave(&er->lock, flags); | ||
556 | if (!atomic_read(&er->ref) || er->config == config) { | ||
557 | atomic_inc(&er->ref); | ||
558 | er->config = config; | ||
559 | ret = true; | ||
560 | } | ||
561 | raw_spin_unlock_irqrestore(&er->lock, flags); | ||
562 | |||
563 | return ret; | ||
564 | } | ||
565 | /* | ||
566 | * The ZDP_CTL_FVC MSR has 4 fields which are used to control | ||
567 | * events 0xd ~ 0x10. Besides these 4 fields, there are additional | ||
568 | * fields which are shared. | ||
569 | */ | ||
570 | idx -= EXTRA_REG_NHMEX_M_ZDP_CTL_FVC; | ||
571 | if (WARN_ON_ONCE(idx >= 4)) | ||
572 | return false; | ||
573 | |||
574 | /* mask of the shared fields */ | ||
575 | if (uncore_nhmex) | ||
576 | mask = NHMEX_M_PMON_ZDP_CTL_FVC_MASK; | ||
577 | else | ||
578 | mask = WSMEX_M_PMON_ZDP_CTL_FVC_MASK; | ||
579 | er = &box->shared_regs[EXTRA_REG_NHMEX_M_ZDP_CTL_FVC]; | ||
580 | |||
581 | raw_spin_lock_irqsave(&er->lock, flags); | ||
582 | /* add mask of the non-shared field if it's in use */ | ||
583 | if (__BITS_VALUE(atomic_read(&er->ref), idx, 8)) { | ||
584 | if (uncore_nhmex) | ||
585 | mask |= NHMEX_M_PMON_ZDP_CTL_FVC_EVENT_MASK(idx); | ||
586 | else | ||
587 | mask |= WSMEX_M_PMON_ZDP_CTL_FVC_EVENT_MASK(idx); | ||
588 | } | ||
589 | |||
590 | if (!atomic_read(&er->ref) || !((er->config ^ config) & mask)) { | ||
591 | atomic_add(1 << (idx * 8), &er->ref); | ||
592 | if (uncore_nhmex) | ||
593 | mask = NHMEX_M_PMON_ZDP_CTL_FVC_MASK | | ||
594 | NHMEX_M_PMON_ZDP_CTL_FVC_EVENT_MASK(idx); | ||
595 | else | ||
596 | mask = WSMEX_M_PMON_ZDP_CTL_FVC_MASK | | ||
597 | WSMEX_M_PMON_ZDP_CTL_FVC_EVENT_MASK(idx); | ||
598 | er->config &= ~mask; | ||
599 | er->config |= (config & mask); | ||
600 | ret = true; | ||
601 | } | ||
602 | raw_spin_unlock_irqrestore(&er->lock, flags); | ||
603 | |||
604 | return ret; | ||
605 | } | ||
606 | |||
607 | static void nhmex_mbox_put_shared_reg(struct intel_uncore_box *box, int idx) | ||
608 | { | ||
609 | struct intel_uncore_extra_reg *er; | ||
610 | |||
611 | if (idx < EXTRA_REG_NHMEX_M_ZDP_CTL_FVC) { | ||
612 | er = &box->shared_regs[idx]; | ||
613 | atomic_dec(&er->ref); | ||
614 | return; | ||
615 | } | ||
616 | |||
617 | idx -= EXTRA_REG_NHMEX_M_ZDP_CTL_FVC; | ||
618 | er = &box->shared_regs[EXTRA_REG_NHMEX_M_ZDP_CTL_FVC]; | ||
619 | atomic_sub(1 << (idx * 8), &er->ref); | ||
620 | } | ||
621 | |||
622 | static u64 nhmex_mbox_alter_er(struct perf_event *event, int new_idx, bool modify) | ||
623 | { | ||
624 | struct hw_perf_event *hwc = &event->hw; | ||
625 | struct hw_perf_event_extra *reg1 = &hwc->extra_reg; | ||
626 | u64 idx, orig_idx = __BITS_VALUE(reg1->idx, 0, 8); | ||
627 | u64 config = reg1->config; | ||
628 | |||
629 | /* get the non-shared control bits and shift them */ | ||
630 | idx = orig_idx - EXTRA_REG_NHMEX_M_ZDP_CTL_FVC; | ||
631 | if (uncore_nhmex) | ||
632 | config &= NHMEX_M_PMON_ZDP_CTL_FVC_EVENT_MASK(idx); | ||
633 | else | ||
634 | config &= WSMEX_M_PMON_ZDP_CTL_FVC_EVENT_MASK(idx); | ||
635 | if (new_idx > orig_idx) { | ||
636 | idx = new_idx - orig_idx; | ||
637 | config <<= 3 * idx; | ||
638 | } else { | ||
639 | idx = orig_idx - new_idx; | ||
640 | config >>= 3 * idx; | ||
641 | } | ||
642 | |||
643 | /* add the shared control bits back */ | ||
644 | if (uncore_nhmex) | ||
645 | config |= NHMEX_M_PMON_ZDP_CTL_FVC_MASK & reg1->config; | ||
646 | else | ||
647 | config |= WSMEX_M_PMON_ZDP_CTL_FVC_MASK & reg1->config; | ||
648 | config |= NHMEX_M_PMON_ZDP_CTL_FVC_MASK & reg1->config; | ||
649 | if (modify) { | ||
650 | /* adjust the main event selector */ | ||
651 | if (new_idx > orig_idx) | ||
652 | hwc->config += idx << NHMEX_M_PMON_CTL_INC_SEL_SHIFT; | ||
653 | else | ||
654 | hwc->config -= idx << NHMEX_M_PMON_CTL_INC_SEL_SHIFT; | ||
655 | reg1->config = config; | ||
656 | reg1->idx = ~0xff | new_idx; | ||
657 | } | ||
658 | return config; | ||
659 | } | ||
660 | |||
661 | static struct event_constraint * | ||
662 | nhmex_mbox_get_constraint(struct intel_uncore_box *box, struct perf_event *event) | ||
663 | { | ||
664 | struct hw_perf_event_extra *reg1 = &event->hw.extra_reg; | ||
665 | struct hw_perf_event_extra *reg2 = &event->hw.branch_reg; | ||
666 | int i, idx[2], alloc = 0; | ||
667 | u64 config1 = reg1->config; | ||
668 | |||
669 | idx[0] = __BITS_VALUE(reg1->idx, 0, 8); | ||
670 | idx[1] = __BITS_VALUE(reg1->idx, 1, 8); | ||
671 | again: | ||
672 | for (i = 0; i < 2; i++) { | ||
673 | if (!uncore_box_is_fake(box) && (reg1->alloc & (0x1 << i))) | ||
674 | idx[i] = 0xff; | ||
675 | |||
676 | if (idx[i] == 0xff) | ||
677 | continue; | ||
678 | |||
679 | if (!nhmex_mbox_get_shared_reg(box, idx[i], | ||
680 | __BITS_VALUE(config1, i, 32))) | ||
681 | goto fail; | ||
682 | alloc |= (0x1 << i); | ||
683 | } | ||
684 | |||
685 | /* for the match/mask registers */ | ||
686 | if (reg2->idx != EXTRA_REG_NONE && | ||
687 | (uncore_box_is_fake(box) || !reg2->alloc) && | ||
688 | !nhmex_mbox_get_shared_reg(box, reg2->idx, reg2->config)) | ||
689 | goto fail; | ||
690 | |||
691 | /* | ||
692 | * If it's a fake box -- as per validate_{group,event}() we | ||
693 | * shouldn't touch event state and we can avoid doing so | ||
694 | * since both will only call get_event_constraints() once | ||
695 | * on each event, this avoids the need for reg->alloc. | ||
696 | */ | ||
697 | if (!uncore_box_is_fake(box)) { | ||
698 | if (idx[0] != 0xff && idx[0] != __BITS_VALUE(reg1->idx, 0, 8)) | ||
699 | nhmex_mbox_alter_er(event, idx[0], true); | ||
700 | reg1->alloc |= alloc; | ||
701 | if (reg2->idx != EXTRA_REG_NONE) | ||
702 | reg2->alloc = 1; | ||
703 | } | ||
704 | return NULL; | ||
705 | fail: | ||
706 | if (idx[0] != 0xff && !(alloc & 0x1) && | ||
707 | idx[0] >= EXTRA_REG_NHMEX_M_ZDP_CTL_FVC) { | ||
708 | /* | ||
709 | * events 0xd ~ 0x10 are functional identical, but are | ||
710 | * controlled by different fields in the ZDP_CTL_FVC | ||
711 | * register. If we failed to take one field, try the | ||
712 | * rest 3 choices. | ||
713 | */ | ||
714 | BUG_ON(__BITS_VALUE(reg1->idx, 1, 8) != 0xff); | ||
715 | idx[0] -= EXTRA_REG_NHMEX_M_ZDP_CTL_FVC; | ||
716 | idx[0] = (idx[0] + 1) % 4; | ||
717 | idx[0] += EXTRA_REG_NHMEX_M_ZDP_CTL_FVC; | ||
718 | if (idx[0] != __BITS_VALUE(reg1->idx, 0, 8)) { | ||
719 | config1 = nhmex_mbox_alter_er(event, idx[0], false); | ||
720 | goto again; | ||
721 | } | ||
722 | } | ||
723 | |||
724 | if (alloc & 0x1) | ||
725 | nhmex_mbox_put_shared_reg(box, idx[0]); | ||
726 | if (alloc & 0x2) | ||
727 | nhmex_mbox_put_shared_reg(box, idx[1]); | ||
728 | return &uncore_constraint_empty; | ||
729 | } | ||
730 | |||
731 | static void nhmex_mbox_put_constraint(struct intel_uncore_box *box, struct perf_event *event) | ||
732 | { | ||
733 | struct hw_perf_event_extra *reg1 = &event->hw.extra_reg; | ||
734 | struct hw_perf_event_extra *reg2 = &event->hw.branch_reg; | ||
735 | |||
736 | if (uncore_box_is_fake(box)) | ||
737 | return; | ||
738 | |||
739 | if (reg1->alloc & 0x1) | ||
740 | nhmex_mbox_put_shared_reg(box, __BITS_VALUE(reg1->idx, 0, 8)); | ||
741 | if (reg1->alloc & 0x2) | ||
742 | nhmex_mbox_put_shared_reg(box, __BITS_VALUE(reg1->idx, 1, 8)); | ||
743 | reg1->alloc = 0; | ||
744 | |||
745 | if (reg2->alloc) { | ||
746 | nhmex_mbox_put_shared_reg(box, reg2->idx); | ||
747 | reg2->alloc = 0; | ||
748 | } | ||
749 | } | ||
750 | |||
751 | static int nhmex_mbox_extra_reg_idx(struct extra_reg *er) | ||
752 | { | ||
753 | if (er->idx < EXTRA_REG_NHMEX_M_ZDP_CTL_FVC) | ||
754 | return er->idx; | ||
755 | return er->idx + (er->event >> NHMEX_M_PMON_CTL_INC_SEL_SHIFT) - 0xd; | ||
756 | } | ||
757 | |||
758 | static int nhmex_mbox_hw_config(struct intel_uncore_box *box, struct perf_event *event) | ||
759 | { | ||
760 | struct intel_uncore_type *type = box->pmu->type; | ||
761 | struct hw_perf_event_extra *reg1 = &event->hw.extra_reg; | ||
762 | struct hw_perf_event_extra *reg2 = &event->hw.branch_reg; | ||
763 | struct extra_reg *er; | ||
764 | unsigned msr; | ||
765 | int reg_idx = 0; | ||
766 | /* | ||
767 | * The mbox events may require 2 extra MSRs at the most. But only | ||
768 | * the lower 32 bits in these MSRs are significant, so we can use | ||
769 | * config1 to pass two MSRs' config. | ||
770 | */ | ||
771 | for (er = nhmex_uncore_mbox_extra_regs; er->msr; er++) { | ||
772 | if (er->event != (event->hw.config & er->config_mask)) | ||
773 | continue; | ||
774 | if (event->attr.config1 & ~er->valid_mask) | ||
775 | return -EINVAL; | ||
776 | |||
777 | msr = er->msr + type->msr_offset * box->pmu->pmu_idx; | ||
778 | if (WARN_ON_ONCE(msr >= 0xffff || er->idx >= 0xff)) | ||
779 | return -EINVAL; | ||
780 | |||
781 | /* always use the 32~63 bits to pass the PLD config */ | ||
782 | if (er->idx == EXTRA_REG_NHMEX_M_PLD) | ||
783 | reg_idx = 1; | ||
784 | else if (WARN_ON_ONCE(reg_idx > 0)) | ||
785 | return -EINVAL; | ||
786 | |||
787 | reg1->idx &= ~(0xff << (reg_idx * 8)); | ||
788 | reg1->reg &= ~(0xffff << (reg_idx * 16)); | ||
789 | reg1->idx |= nhmex_mbox_extra_reg_idx(er) << (reg_idx * 8); | ||
790 | reg1->reg |= msr << (reg_idx * 16); | ||
791 | reg1->config = event->attr.config1; | ||
792 | reg_idx++; | ||
793 | } | ||
794 | /* | ||
795 | * The mbox only provides ability to perform address matching | ||
796 | * for the PLD events. | ||
797 | */ | ||
798 | if (reg_idx == 2) { | ||
799 | reg2->idx = EXTRA_REG_NHMEX_M_FILTER; | ||
800 | if (event->attr.config2 & NHMEX_M_PMON_MM_CFG_EN) | ||
801 | reg2->config = event->attr.config2; | ||
802 | else | ||
803 | reg2->config = ~0ULL; | ||
804 | if (box->pmu->pmu_idx == 0) | ||
805 | reg2->reg = NHMEX_M0_MSR_PMU_MM_CFG; | ||
806 | else | ||
807 | reg2->reg = NHMEX_M1_MSR_PMU_MM_CFG; | ||
808 | } | ||
809 | return 0; | ||
810 | } | ||
811 | |||
812 | static u64 nhmex_mbox_shared_reg_config(struct intel_uncore_box *box, int idx) | ||
813 | { | ||
814 | struct intel_uncore_extra_reg *er; | ||
815 | unsigned long flags; | ||
816 | u64 config; | ||
817 | |||
818 | if (idx < EXTRA_REG_NHMEX_M_ZDP_CTL_FVC) | ||
819 | return box->shared_regs[idx].config; | ||
820 | |||
821 | er = &box->shared_regs[EXTRA_REG_NHMEX_M_ZDP_CTL_FVC]; | ||
822 | raw_spin_lock_irqsave(&er->lock, flags); | ||
823 | config = er->config; | ||
824 | raw_spin_unlock_irqrestore(&er->lock, flags); | ||
825 | return config; | ||
826 | } | ||
827 | |||
828 | static void nhmex_mbox_msr_enable_event(struct intel_uncore_box *box, struct perf_event *event) | ||
829 | { | ||
830 | struct hw_perf_event *hwc = &event->hw; | ||
831 | struct hw_perf_event_extra *reg1 = &hwc->extra_reg; | ||
832 | struct hw_perf_event_extra *reg2 = &hwc->branch_reg; | ||
833 | int idx; | ||
834 | |||
835 | idx = __BITS_VALUE(reg1->idx, 0, 8); | ||
836 | if (idx != 0xff) | ||
837 | wrmsrl(__BITS_VALUE(reg1->reg, 0, 16), | ||
838 | nhmex_mbox_shared_reg_config(box, idx)); | ||
839 | idx = __BITS_VALUE(reg1->idx, 1, 8); | ||
840 | if (idx != 0xff) | ||
841 | wrmsrl(__BITS_VALUE(reg1->reg, 1, 16), | ||
842 | nhmex_mbox_shared_reg_config(box, idx)); | ||
843 | |||
844 | if (reg2->idx != EXTRA_REG_NONE) { | ||
845 | wrmsrl(reg2->reg, 0); | ||
846 | if (reg2->config != ~0ULL) { | ||
847 | wrmsrl(reg2->reg + 1, | ||
848 | reg2->config & NHMEX_M_PMON_ADDR_MATCH_MASK); | ||
849 | wrmsrl(reg2->reg + 2, NHMEX_M_PMON_ADDR_MASK_MASK & | ||
850 | (reg2->config >> NHMEX_M_PMON_ADDR_MASK_SHIFT)); | ||
851 | wrmsrl(reg2->reg, NHMEX_M_PMON_MM_CFG_EN); | ||
852 | } | ||
853 | } | ||
854 | |||
855 | wrmsrl(hwc->config_base, hwc->config | NHMEX_PMON_CTL_EN_BIT0); | ||
856 | } | ||
857 | |||
858 | DEFINE_UNCORE_FORMAT_ATTR(count_mode, count_mode, "config:2-3"); | ||
859 | DEFINE_UNCORE_FORMAT_ATTR(storage_mode, storage_mode, "config:4-5"); | ||
860 | DEFINE_UNCORE_FORMAT_ATTR(wrap_mode, wrap_mode, "config:6"); | ||
861 | DEFINE_UNCORE_FORMAT_ATTR(flag_mode, flag_mode, "config:7"); | ||
862 | DEFINE_UNCORE_FORMAT_ATTR(inc_sel, inc_sel, "config:9-13"); | ||
863 | DEFINE_UNCORE_FORMAT_ATTR(set_flag_sel, set_flag_sel, "config:19-21"); | ||
864 | DEFINE_UNCORE_FORMAT_ATTR(filter_cfg_en, filter_cfg_en, "config2:63"); | ||
865 | DEFINE_UNCORE_FORMAT_ATTR(filter_match, filter_match, "config2:0-33"); | ||
866 | DEFINE_UNCORE_FORMAT_ATTR(filter_mask, filter_mask, "config2:34-61"); | ||
867 | DEFINE_UNCORE_FORMAT_ATTR(dsp, dsp, "config1:0-31"); | ||
868 | DEFINE_UNCORE_FORMAT_ATTR(thr, thr, "config1:0-31"); | ||
869 | DEFINE_UNCORE_FORMAT_ATTR(fvc, fvc, "config1:0-31"); | ||
870 | DEFINE_UNCORE_FORMAT_ATTR(pgt, pgt, "config1:0-31"); | ||
871 | DEFINE_UNCORE_FORMAT_ATTR(map, map, "config1:0-31"); | ||
872 | DEFINE_UNCORE_FORMAT_ATTR(iss, iss, "config1:0-31"); | ||
873 | DEFINE_UNCORE_FORMAT_ATTR(pld, pld, "config1:32-63"); | ||
874 | |||
875 | static struct attribute *nhmex_uncore_mbox_formats_attr[] = { | ||
876 | &format_attr_count_mode.attr, | ||
877 | &format_attr_storage_mode.attr, | ||
878 | &format_attr_wrap_mode.attr, | ||
879 | &format_attr_flag_mode.attr, | ||
880 | &format_attr_inc_sel.attr, | ||
881 | &format_attr_set_flag_sel.attr, | ||
882 | &format_attr_filter_cfg_en.attr, | ||
883 | &format_attr_filter_match.attr, | ||
884 | &format_attr_filter_mask.attr, | ||
885 | &format_attr_dsp.attr, | ||
886 | &format_attr_thr.attr, | ||
887 | &format_attr_fvc.attr, | ||
888 | &format_attr_pgt.attr, | ||
889 | &format_attr_map.attr, | ||
890 | &format_attr_iss.attr, | ||
891 | &format_attr_pld.attr, | ||
892 | NULL, | ||
893 | }; | ||
894 | |||
895 | static struct attribute_group nhmex_uncore_mbox_format_group = { | ||
896 | .name = "format", | ||
897 | .attrs = nhmex_uncore_mbox_formats_attr, | ||
898 | }; | ||
899 | |||
900 | static struct uncore_event_desc nhmex_uncore_mbox_events[] = { | ||
901 | INTEL_UNCORE_EVENT_DESC(bbox_cmds_read, "inc_sel=0xd,fvc=0x2800"), | ||
902 | INTEL_UNCORE_EVENT_DESC(bbox_cmds_write, "inc_sel=0xd,fvc=0x2820"), | ||
903 | { /* end: all zeroes */ }, | ||
904 | }; | ||
905 | |||
906 | static struct uncore_event_desc wsmex_uncore_mbox_events[] = { | ||
907 | INTEL_UNCORE_EVENT_DESC(bbox_cmds_read, "inc_sel=0xd,fvc=0x5000"), | ||
908 | INTEL_UNCORE_EVENT_DESC(bbox_cmds_write, "inc_sel=0xd,fvc=0x5040"), | ||
909 | { /* end: all zeroes */ }, | ||
910 | }; | ||
911 | |||
912 | static struct intel_uncore_ops nhmex_uncore_mbox_ops = { | ||
913 | NHMEX_UNCORE_OPS_COMMON_INIT(), | ||
914 | .enable_event = nhmex_mbox_msr_enable_event, | ||
915 | .hw_config = nhmex_mbox_hw_config, | ||
916 | .get_constraint = nhmex_mbox_get_constraint, | ||
917 | .put_constraint = nhmex_mbox_put_constraint, | ||
918 | }; | ||
919 | |||
920 | static struct intel_uncore_type nhmex_uncore_mbox = { | ||
921 | .name = "mbox", | ||
922 | .num_counters = 6, | ||
923 | .num_boxes = 2, | ||
924 | .perf_ctr_bits = 48, | ||
925 | .event_ctl = NHMEX_M0_MSR_PMU_CTL0, | ||
926 | .perf_ctr = NHMEX_M0_MSR_PMU_CNT0, | ||
927 | .event_mask = NHMEX_M_PMON_RAW_EVENT_MASK, | ||
928 | .box_ctl = NHMEX_M0_MSR_GLOBAL_CTL, | ||
929 | .msr_offset = NHMEX_M_MSR_OFFSET, | ||
930 | .pair_ctr_ctl = 1, | ||
931 | .num_shared_regs = 8, | ||
932 | .event_descs = nhmex_uncore_mbox_events, | ||
933 | .ops = &nhmex_uncore_mbox_ops, | ||
934 | .format_group = &nhmex_uncore_mbox_format_group, | ||
935 | }; | ||
936 | |||
937 | static void nhmex_rbox_alter_er(struct intel_uncore_box *box, struct perf_event *event) | ||
938 | { | ||
939 | struct hw_perf_event *hwc = &event->hw; | ||
940 | struct hw_perf_event_extra *reg1 = &hwc->extra_reg; | ||
941 | |||
942 | /* adjust the main event selector and extra register index */ | ||
943 | if (reg1->idx % 2) { | ||
944 | reg1->idx--; | ||
945 | hwc->config -= 1 << NHMEX_R_PMON_CTL_EV_SEL_SHIFT; | ||
946 | } else { | ||
947 | reg1->idx++; | ||
948 | hwc->config += 1 << NHMEX_R_PMON_CTL_EV_SEL_SHIFT; | ||
949 | } | ||
950 | |||
951 | /* adjust extra register config */ | ||
952 | switch (reg1->idx % 6) { | ||
953 | case 2: | ||
954 | /* shift the 8~15 bits to the 0~7 bits */ | ||
955 | reg1->config >>= 8; | ||
956 | break; | ||
957 | case 3: | ||
958 | /* shift the 0~7 bits to the 8~15 bits */ | ||
959 | reg1->config <<= 8; | ||
960 | break; | ||
961 | } | ||
962 | } | ||
963 | |||
964 | /* | ||
965 | * Each rbox has 4 event set which monitor PQI port 0~3 or 4~7. | ||
966 | * An event set consists of 6 events, the 3rd and 4th events in | ||
967 | * an event set use the same extra register. So an event set uses | ||
968 | * 5 extra registers. | ||
969 | */ | ||
970 | static struct event_constraint * | ||
971 | nhmex_rbox_get_constraint(struct intel_uncore_box *box, struct perf_event *event) | ||
972 | { | ||
973 | struct hw_perf_event *hwc = &event->hw; | ||
974 | struct hw_perf_event_extra *reg1 = &hwc->extra_reg; | ||
975 | struct hw_perf_event_extra *reg2 = &hwc->branch_reg; | ||
976 | struct intel_uncore_extra_reg *er; | ||
977 | unsigned long flags; | ||
978 | int idx, er_idx; | ||
979 | u64 config1; | ||
980 | bool ok = false; | ||
981 | |||
982 | if (!uncore_box_is_fake(box) && reg1->alloc) | ||
983 | return NULL; | ||
984 | |||
985 | idx = reg1->idx % 6; | ||
986 | config1 = reg1->config; | ||
987 | again: | ||
988 | er_idx = idx; | ||
989 | /* the 3rd and 4th events use the same extra register */ | ||
990 | if (er_idx > 2) | ||
991 | er_idx--; | ||
992 | er_idx += (reg1->idx / 6) * 5; | ||
993 | |||
994 | er = &box->shared_regs[er_idx]; | ||
995 | raw_spin_lock_irqsave(&er->lock, flags); | ||
996 | if (idx < 2) { | ||
997 | if (!atomic_read(&er->ref) || er->config == reg1->config) { | ||
998 | atomic_inc(&er->ref); | ||
999 | er->config = reg1->config; | ||
1000 | ok = true; | ||
1001 | } | ||
1002 | } else if (idx == 2 || idx == 3) { | ||
1003 | /* | ||
1004 | * these two events use different fields in a extra register, | ||
1005 | * the 0~7 bits and the 8~15 bits respectively. | ||
1006 | */ | ||
1007 | u64 mask = 0xff << ((idx - 2) * 8); | ||
1008 | if (!__BITS_VALUE(atomic_read(&er->ref), idx - 2, 8) || | ||
1009 | !((er->config ^ config1) & mask)) { | ||
1010 | atomic_add(1 << ((idx - 2) * 8), &er->ref); | ||
1011 | er->config &= ~mask; | ||
1012 | er->config |= config1 & mask; | ||
1013 | ok = true; | ||
1014 | } | ||
1015 | } else { | ||
1016 | if (!atomic_read(&er->ref) || | ||
1017 | (er->config == (hwc->config >> 32) && | ||
1018 | er->config1 == reg1->config && | ||
1019 | er->config2 == reg2->config)) { | ||
1020 | atomic_inc(&er->ref); | ||
1021 | er->config = (hwc->config >> 32); | ||
1022 | er->config1 = reg1->config; | ||
1023 | er->config2 = reg2->config; | ||
1024 | ok = true; | ||
1025 | } | ||
1026 | } | ||
1027 | raw_spin_unlock_irqrestore(&er->lock, flags); | ||
1028 | |||
1029 | if (!ok) { | ||
1030 | /* | ||
1031 | * The Rbox events are always in pairs. The paired | ||
1032 | * events are functional identical, but use different | ||
1033 | * extra registers. If we failed to take an extra | ||
1034 | * register, try the alternative. | ||
1035 | */ | ||
1036 | idx ^= 1; | ||
1037 | if (idx != reg1->idx % 6) { | ||
1038 | if (idx == 2) | ||
1039 | config1 >>= 8; | ||
1040 | else if (idx == 3) | ||
1041 | config1 <<= 8; | ||
1042 | goto again; | ||
1043 | } | ||
1044 | } else { | ||
1045 | if (!uncore_box_is_fake(box)) { | ||
1046 | if (idx != reg1->idx % 6) | ||
1047 | nhmex_rbox_alter_er(box, event); | ||
1048 | reg1->alloc = 1; | ||
1049 | } | ||
1050 | return NULL; | ||
1051 | } | ||
1052 | return &uncore_constraint_empty; | ||
1053 | } | ||
1054 | |||
1055 | static void nhmex_rbox_put_constraint(struct intel_uncore_box *box, struct perf_event *event) | ||
1056 | { | ||
1057 | struct intel_uncore_extra_reg *er; | ||
1058 | struct hw_perf_event_extra *reg1 = &event->hw.extra_reg; | ||
1059 | int idx, er_idx; | ||
1060 | |||
1061 | if (uncore_box_is_fake(box) || !reg1->alloc) | ||
1062 | return; | ||
1063 | |||
1064 | idx = reg1->idx % 6; | ||
1065 | er_idx = idx; | ||
1066 | if (er_idx > 2) | ||
1067 | er_idx--; | ||
1068 | er_idx += (reg1->idx / 6) * 5; | ||
1069 | |||
1070 | er = &box->shared_regs[er_idx]; | ||
1071 | if (idx == 2 || idx == 3) | ||
1072 | atomic_sub(1 << ((idx - 2) * 8), &er->ref); | ||
1073 | else | ||
1074 | atomic_dec(&er->ref); | ||
1075 | |||
1076 | reg1->alloc = 0; | ||
1077 | } | ||
1078 | |||
1079 | static int nhmex_rbox_hw_config(struct intel_uncore_box *box, struct perf_event *event) | ||
1080 | { | ||
1081 | struct hw_perf_event *hwc = &event->hw; | ||
1082 | struct hw_perf_event_extra *reg1 = &event->hw.extra_reg; | ||
1083 | struct hw_perf_event_extra *reg2 = &event->hw.branch_reg; | ||
1084 | int idx; | ||
1085 | |||
1086 | idx = (event->hw.config & NHMEX_R_PMON_CTL_EV_SEL_MASK) >> | ||
1087 | NHMEX_R_PMON_CTL_EV_SEL_SHIFT; | ||
1088 | if (idx >= 0x18) | ||
1089 | return -EINVAL; | ||
1090 | |||
1091 | reg1->idx = idx; | ||
1092 | reg1->config = event->attr.config1; | ||
1093 | |||
1094 | switch (idx % 6) { | ||
1095 | case 4: | ||
1096 | case 5: | ||
1097 | hwc->config |= event->attr.config & (~0ULL << 32); | ||
1098 | reg2->config = event->attr.config2; | ||
1099 | break; | ||
1100 | } | ||
1101 | return 0; | ||
1102 | } | ||
1103 | |||
1104 | static void nhmex_rbox_msr_enable_event(struct intel_uncore_box *box, struct perf_event *event) | ||
1105 | { | ||
1106 | struct hw_perf_event *hwc = &event->hw; | ||
1107 | struct hw_perf_event_extra *reg1 = &hwc->extra_reg; | ||
1108 | struct hw_perf_event_extra *reg2 = &hwc->branch_reg; | ||
1109 | int idx, port; | ||
1110 | |||
1111 | idx = reg1->idx; | ||
1112 | port = idx / 6 + box->pmu->pmu_idx * 4; | ||
1113 | |||
1114 | switch (idx % 6) { | ||
1115 | case 0: | ||
1116 | wrmsrl(NHMEX_R_MSR_PORTN_IPERF_CFG0(port), reg1->config); | ||
1117 | break; | ||
1118 | case 1: | ||
1119 | wrmsrl(NHMEX_R_MSR_PORTN_IPERF_CFG1(port), reg1->config); | ||
1120 | break; | ||
1121 | case 2: | ||
1122 | case 3: | ||
1123 | wrmsrl(NHMEX_R_MSR_PORTN_QLX_CFG(port), | ||
1124 | uncore_shared_reg_config(box, 2 + (idx / 6) * 5)); | ||
1125 | break; | ||
1126 | case 4: | ||
1127 | wrmsrl(NHMEX_R_MSR_PORTN_XBR_SET1_MM_CFG(port), | ||
1128 | hwc->config >> 32); | ||
1129 | wrmsrl(NHMEX_R_MSR_PORTN_XBR_SET1_MATCH(port), reg1->config); | ||
1130 | wrmsrl(NHMEX_R_MSR_PORTN_XBR_SET1_MASK(port), reg2->config); | ||
1131 | break; | ||
1132 | case 5: | ||
1133 | wrmsrl(NHMEX_R_MSR_PORTN_XBR_SET2_MM_CFG(port), | ||
1134 | hwc->config >> 32); | ||
1135 | wrmsrl(NHMEX_R_MSR_PORTN_XBR_SET2_MATCH(port), reg1->config); | ||
1136 | wrmsrl(NHMEX_R_MSR_PORTN_XBR_SET2_MASK(port), reg2->config); | ||
1137 | break; | ||
1138 | } | ||
1139 | |||
1140 | wrmsrl(hwc->config_base, NHMEX_PMON_CTL_EN_BIT0 | | ||
1141 | (hwc->config & NHMEX_R_PMON_CTL_EV_SEL_MASK)); | ||
1142 | } | ||
1143 | |||
1144 | DEFINE_UNCORE_FORMAT_ATTR(xbr_mm_cfg, xbr_mm_cfg, "config:32-63"); | ||
1145 | DEFINE_UNCORE_FORMAT_ATTR(xbr_match, xbr_match, "config1:0-63"); | ||
1146 | DEFINE_UNCORE_FORMAT_ATTR(xbr_mask, xbr_mask, "config2:0-63"); | ||
1147 | DEFINE_UNCORE_FORMAT_ATTR(qlx_cfg, qlx_cfg, "config1:0-15"); | ||
1148 | DEFINE_UNCORE_FORMAT_ATTR(iperf_cfg, iperf_cfg, "config1:0-31"); | ||
1149 | |||
1150 | static struct attribute *nhmex_uncore_rbox_formats_attr[] = { | ||
1151 | &format_attr_event5.attr, | ||
1152 | &format_attr_xbr_mm_cfg.attr, | ||
1153 | &format_attr_xbr_match.attr, | ||
1154 | &format_attr_xbr_mask.attr, | ||
1155 | &format_attr_qlx_cfg.attr, | ||
1156 | &format_attr_iperf_cfg.attr, | ||
1157 | NULL, | ||
1158 | }; | ||
1159 | |||
1160 | static struct attribute_group nhmex_uncore_rbox_format_group = { | ||
1161 | .name = "format", | ||
1162 | .attrs = nhmex_uncore_rbox_formats_attr, | ||
1163 | }; | ||
1164 | |||
1165 | static struct uncore_event_desc nhmex_uncore_rbox_events[] = { | ||
1166 | INTEL_UNCORE_EVENT_DESC(qpi0_flit_send, "event=0x0,iperf_cfg=0x80000000"), | ||
1167 | INTEL_UNCORE_EVENT_DESC(qpi1_filt_send, "event=0x6,iperf_cfg=0x80000000"), | ||
1168 | INTEL_UNCORE_EVENT_DESC(qpi0_idle_filt, "event=0x0,iperf_cfg=0x40000000"), | ||
1169 | INTEL_UNCORE_EVENT_DESC(qpi1_idle_filt, "event=0x6,iperf_cfg=0x40000000"), | ||
1170 | INTEL_UNCORE_EVENT_DESC(qpi0_date_response, "event=0x0,iperf_cfg=0xc4"), | ||
1171 | INTEL_UNCORE_EVENT_DESC(qpi1_date_response, "event=0x6,iperf_cfg=0xc4"), | ||
1172 | { /* end: all zeroes */ }, | ||
1173 | }; | ||
1174 | |||
1175 | static struct intel_uncore_ops nhmex_uncore_rbox_ops = { | ||
1176 | NHMEX_UNCORE_OPS_COMMON_INIT(), | ||
1177 | .enable_event = nhmex_rbox_msr_enable_event, | ||
1178 | .hw_config = nhmex_rbox_hw_config, | ||
1179 | .get_constraint = nhmex_rbox_get_constraint, | ||
1180 | .put_constraint = nhmex_rbox_put_constraint, | ||
1181 | }; | ||
1182 | |||
1183 | static struct intel_uncore_type nhmex_uncore_rbox = { | ||
1184 | .name = "rbox", | ||
1185 | .num_counters = 8, | ||
1186 | .num_boxes = 2, | ||
1187 | .perf_ctr_bits = 48, | ||
1188 | .event_ctl = NHMEX_R_MSR_PMON_CTL0, | ||
1189 | .perf_ctr = NHMEX_R_MSR_PMON_CNT0, | ||
1190 | .event_mask = NHMEX_R_PMON_RAW_EVENT_MASK, | ||
1191 | .box_ctl = NHMEX_R_MSR_GLOBAL_CTL, | ||
1192 | .msr_offset = NHMEX_R_MSR_OFFSET, | ||
1193 | .pair_ctr_ctl = 1, | ||
1194 | .num_shared_regs = 20, | ||
1195 | .event_descs = nhmex_uncore_rbox_events, | ||
1196 | .ops = &nhmex_uncore_rbox_ops, | ||
1197 | .format_group = &nhmex_uncore_rbox_format_group | ||
1198 | }; | ||
1199 | |||
1200 | static struct intel_uncore_type *nhmex_msr_uncores[] = { | ||
1201 | &nhmex_uncore_ubox, | ||
1202 | &nhmex_uncore_cbox, | ||
1203 | &nhmex_uncore_bbox, | ||
1204 | &nhmex_uncore_sbox, | ||
1205 | &nhmex_uncore_mbox, | ||
1206 | &nhmex_uncore_rbox, | ||
1207 | &nhmex_uncore_wbox, | ||
1208 | NULL, | ||
1209 | }; | ||
1210 | |||
1211 | void nhmex_uncore_cpu_init(void) | ||
1212 | { | ||
1213 | if (boot_cpu_data.x86_model == 46) | ||
1214 | uncore_nhmex = true; | ||
1215 | else | ||
1216 | nhmex_uncore_mbox.event_descs = wsmex_uncore_mbox_events; | ||
1217 | if (nhmex_uncore_cbox.num_boxes > boot_cpu_data.x86_max_cores) | ||
1218 | nhmex_uncore_cbox.num_boxes = boot_cpu_data.x86_max_cores; | ||
1219 | uncore_msr_uncores = nhmex_msr_uncores; | ||
1220 | } | ||
1221 | /* end of Nehalem-EX uncore support */ | ||
diff --git a/arch/x86/kernel/cpu/perf_event_intel_uncore_snb.c b/arch/x86/kernel/cpu/perf_event_intel_uncore_snb.c new file mode 100644 index 000000000000..3001015b755c --- /dev/null +++ b/arch/x86/kernel/cpu/perf_event_intel_uncore_snb.c | |||
@@ -0,0 +1,636 @@ | |||
1 | /* Nehalem/SandBridge/Haswell uncore support */ | ||
2 | #include "perf_event_intel_uncore.h" | ||
3 | |||
4 | /* SNB event control */ | ||
5 | #define SNB_UNC_CTL_EV_SEL_MASK 0x000000ff | ||
6 | #define SNB_UNC_CTL_UMASK_MASK 0x0000ff00 | ||
7 | #define SNB_UNC_CTL_EDGE_DET (1 << 18) | ||
8 | #define SNB_UNC_CTL_EN (1 << 22) | ||
9 | #define SNB_UNC_CTL_INVERT (1 << 23) | ||
10 | #define SNB_UNC_CTL_CMASK_MASK 0x1f000000 | ||
11 | #define NHM_UNC_CTL_CMASK_MASK 0xff000000 | ||
12 | #define NHM_UNC_FIXED_CTR_CTL_EN (1 << 0) | ||
13 | |||
14 | #define SNB_UNC_RAW_EVENT_MASK (SNB_UNC_CTL_EV_SEL_MASK | \ | ||
15 | SNB_UNC_CTL_UMASK_MASK | \ | ||
16 | SNB_UNC_CTL_EDGE_DET | \ | ||
17 | SNB_UNC_CTL_INVERT | \ | ||
18 | SNB_UNC_CTL_CMASK_MASK) | ||
19 | |||
20 | #define NHM_UNC_RAW_EVENT_MASK (SNB_UNC_CTL_EV_SEL_MASK | \ | ||
21 | SNB_UNC_CTL_UMASK_MASK | \ | ||
22 | SNB_UNC_CTL_EDGE_DET | \ | ||
23 | SNB_UNC_CTL_INVERT | \ | ||
24 | NHM_UNC_CTL_CMASK_MASK) | ||
25 | |||
26 | /* SNB global control register */ | ||
27 | #define SNB_UNC_PERF_GLOBAL_CTL 0x391 | ||
28 | #define SNB_UNC_FIXED_CTR_CTRL 0x394 | ||
29 | #define SNB_UNC_FIXED_CTR 0x395 | ||
30 | |||
31 | /* SNB uncore global control */ | ||
32 | #define SNB_UNC_GLOBAL_CTL_CORE_ALL ((1 << 4) - 1) | ||
33 | #define SNB_UNC_GLOBAL_CTL_EN (1 << 29) | ||
34 | |||
35 | /* SNB Cbo register */ | ||
36 | #define SNB_UNC_CBO_0_PERFEVTSEL0 0x700 | ||
37 | #define SNB_UNC_CBO_0_PER_CTR0 0x706 | ||
38 | #define SNB_UNC_CBO_MSR_OFFSET 0x10 | ||
39 | |||
40 | /* NHM global control register */ | ||
41 | #define NHM_UNC_PERF_GLOBAL_CTL 0x391 | ||
42 | #define NHM_UNC_FIXED_CTR 0x394 | ||
43 | #define NHM_UNC_FIXED_CTR_CTRL 0x395 | ||
44 | |||
45 | /* NHM uncore global control */ | ||
46 | #define NHM_UNC_GLOBAL_CTL_EN_PC_ALL ((1ULL << 8) - 1) | ||
47 | #define NHM_UNC_GLOBAL_CTL_EN_FC (1ULL << 32) | ||
48 | |||
49 | /* NHM uncore register */ | ||
50 | #define NHM_UNC_PERFEVTSEL0 0x3c0 | ||
51 | #define NHM_UNC_UNCORE_PMC0 0x3b0 | ||
52 | |||
53 | DEFINE_UNCORE_FORMAT_ATTR(event, event, "config:0-7"); | ||
54 | DEFINE_UNCORE_FORMAT_ATTR(umask, umask, "config:8-15"); | ||
55 | DEFINE_UNCORE_FORMAT_ATTR(edge, edge, "config:18"); | ||
56 | DEFINE_UNCORE_FORMAT_ATTR(inv, inv, "config:23"); | ||
57 | DEFINE_UNCORE_FORMAT_ATTR(cmask5, cmask, "config:24-28"); | ||
58 | DEFINE_UNCORE_FORMAT_ATTR(cmask8, cmask, "config:24-31"); | ||
59 | |||
60 | /* Sandy Bridge uncore support */ | ||
61 | static void snb_uncore_msr_enable_event(struct intel_uncore_box *box, struct perf_event *event) | ||
62 | { | ||
63 | struct hw_perf_event *hwc = &event->hw; | ||
64 | |||
65 | if (hwc->idx < UNCORE_PMC_IDX_FIXED) | ||
66 | wrmsrl(hwc->config_base, hwc->config | SNB_UNC_CTL_EN); | ||
67 | else | ||
68 | wrmsrl(hwc->config_base, SNB_UNC_CTL_EN); | ||
69 | } | ||
70 | |||
71 | static void snb_uncore_msr_disable_event(struct intel_uncore_box *box, struct perf_event *event) | ||
72 | { | ||
73 | wrmsrl(event->hw.config_base, 0); | ||
74 | } | ||
75 | |||
76 | static void snb_uncore_msr_init_box(struct intel_uncore_box *box) | ||
77 | { | ||
78 | if (box->pmu->pmu_idx == 0) { | ||
79 | wrmsrl(SNB_UNC_PERF_GLOBAL_CTL, | ||
80 | SNB_UNC_GLOBAL_CTL_EN | SNB_UNC_GLOBAL_CTL_CORE_ALL); | ||
81 | } | ||
82 | } | ||
83 | |||
84 | static struct uncore_event_desc snb_uncore_events[] = { | ||
85 | INTEL_UNCORE_EVENT_DESC(clockticks, "event=0xff,umask=0x00"), | ||
86 | { /* end: all zeroes */ }, | ||
87 | }; | ||
88 | |||
89 | static struct attribute *snb_uncore_formats_attr[] = { | ||
90 | &format_attr_event.attr, | ||
91 | &format_attr_umask.attr, | ||
92 | &format_attr_edge.attr, | ||
93 | &format_attr_inv.attr, | ||
94 | &format_attr_cmask5.attr, | ||
95 | NULL, | ||
96 | }; | ||
97 | |||
98 | static struct attribute_group snb_uncore_format_group = { | ||
99 | .name = "format", | ||
100 | .attrs = snb_uncore_formats_attr, | ||
101 | }; | ||
102 | |||
103 | static struct intel_uncore_ops snb_uncore_msr_ops = { | ||
104 | .init_box = snb_uncore_msr_init_box, | ||
105 | .disable_event = snb_uncore_msr_disable_event, | ||
106 | .enable_event = snb_uncore_msr_enable_event, | ||
107 | .read_counter = uncore_msr_read_counter, | ||
108 | }; | ||
109 | |||
110 | static struct event_constraint snb_uncore_cbox_constraints[] = { | ||
111 | UNCORE_EVENT_CONSTRAINT(0x80, 0x1), | ||
112 | UNCORE_EVENT_CONSTRAINT(0x83, 0x1), | ||
113 | EVENT_CONSTRAINT_END | ||
114 | }; | ||
115 | |||
116 | static struct intel_uncore_type snb_uncore_cbox = { | ||
117 | .name = "cbox", | ||
118 | .num_counters = 2, | ||
119 | .num_boxes = 4, | ||
120 | .perf_ctr_bits = 44, | ||
121 | .fixed_ctr_bits = 48, | ||
122 | .perf_ctr = SNB_UNC_CBO_0_PER_CTR0, | ||
123 | .event_ctl = SNB_UNC_CBO_0_PERFEVTSEL0, | ||
124 | .fixed_ctr = SNB_UNC_FIXED_CTR, | ||
125 | .fixed_ctl = SNB_UNC_FIXED_CTR_CTRL, | ||
126 | .single_fixed = 1, | ||
127 | .event_mask = SNB_UNC_RAW_EVENT_MASK, | ||
128 | .msr_offset = SNB_UNC_CBO_MSR_OFFSET, | ||
129 | .constraints = snb_uncore_cbox_constraints, | ||
130 | .ops = &snb_uncore_msr_ops, | ||
131 | .format_group = &snb_uncore_format_group, | ||
132 | .event_descs = snb_uncore_events, | ||
133 | }; | ||
134 | |||
135 | static struct intel_uncore_type *snb_msr_uncores[] = { | ||
136 | &snb_uncore_cbox, | ||
137 | NULL, | ||
138 | }; | ||
139 | |||
140 | void snb_uncore_cpu_init(void) | ||
141 | { | ||
142 | uncore_msr_uncores = snb_msr_uncores; | ||
143 | if (snb_uncore_cbox.num_boxes > boot_cpu_data.x86_max_cores) | ||
144 | snb_uncore_cbox.num_boxes = boot_cpu_data.x86_max_cores; | ||
145 | } | ||
146 | |||
147 | enum { | ||
148 | SNB_PCI_UNCORE_IMC, | ||
149 | }; | ||
150 | |||
151 | static struct uncore_event_desc snb_uncore_imc_events[] = { | ||
152 | INTEL_UNCORE_EVENT_DESC(data_reads, "event=0x01"), | ||
153 | INTEL_UNCORE_EVENT_DESC(data_reads.scale, "6.103515625e-5"), | ||
154 | INTEL_UNCORE_EVENT_DESC(data_reads.unit, "MiB"), | ||
155 | |||
156 | INTEL_UNCORE_EVENT_DESC(data_writes, "event=0x02"), | ||
157 | INTEL_UNCORE_EVENT_DESC(data_writes.scale, "6.103515625e-5"), | ||
158 | INTEL_UNCORE_EVENT_DESC(data_writes.unit, "MiB"), | ||
159 | |||
160 | { /* end: all zeroes */ }, | ||
161 | }; | ||
162 | |||
163 | #define SNB_UNCORE_PCI_IMC_EVENT_MASK 0xff | ||
164 | #define SNB_UNCORE_PCI_IMC_BAR_OFFSET 0x48 | ||
165 | |||
166 | /* page size multiple covering all config regs */ | ||
167 | #define SNB_UNCORE_PCI_IMC_MAP_SIZE 0x6000 | ||
168 | |||
169 | #define SNB_UNCORE_PCI_IMC_DATA_READS 0x1 | ||
170 | #define SNB_UNCORE_PCI_IMC_DATA_READS_BASE 0x5050 | ||
171 | #define SNB_UNCORE_PCI_IMC_DATA_WRITES 0x2 | ||
172 | #define SNB_UNCORE_PCI_IMC_DATA_WRITES_BASE 0x5054 | ||
173 | #define SNB_UNCORE_PCI_IMC_CTR_BASE SNB_UNCORE_PCI_IMC_DATA_READS_BASE | ||
174 | |||
175 | static struct attribute *snb_uncore_imc_formats_attr[] = { | ||
176 | &format_attr_event.attr, | ||
177 | NULL, | ||
178 | }; | ||
179 | |||
180 | static struct attribute_group snb_uncore_imc_format_group = { | ||
181 | .name = "format", | ||
182 | .attrs = snb_uncore_imc_formats_attr, | ||
183 | }; | ||
184 | |||
185 | static void snb_uncore_imc_init_box(struct intel_uncore_box *box) | ||
186 | { | ||
187 | struct pci_dev *pdev = box->pci_dev; | ||
188 | int where = SNB_UNCORE_PCI_IMC_BAR_OFFSET; | ||
189 | resource_size_t addr; | ||
190 | u32 pci_dword; | ||
191 | |||
192 | pci_read_config_dword(pdev, where, &pci_dword); | ||
193 | addr = pci_dword; | ||
194 | |||
195 | #ifdef CONFIG_PHYS_ADDR_T_64BIT | ||
196 | pci_read_config_dword(pdev, where + 4, &pci_dword); | ||
197 | addr |= ((resource_size_t)pci_dword << 32); | ||
198 | #endif | ||
199 | |||
200 | addr &= ~(PAGE_SIZE - 1); | ||
201 | |||
202 | box->io_addr = ioremap(addr, SNB_UNCORE_PCI_IMC_MAP_SIZE); | ||
203 | box->hrtimer_duration = UNCORE_SNB_IMC_HRTIMER_INTERVAL; | ||
204 | } | ||
205 | |||
206 | static void snb_uncore_imc_enable_box(struct intel_uncore_box *box) | ||
207 | {} | ||
208 | |||
209 | static void snb_uncore_imc_disable_box(struct intel_uncore_box *box) | ||
210 | {} | ||
211 | |||
212 | static void snb_uncore_imc_enable_event(struct intel_uncore_box *box, struct perf_event *event) | ||
213 | {} | ||
214 | |||
215 | static void snb_uncore_imc_disable_event(struct intel_uncore_box *box, struct perf_event *event) | ||
216 | {} | ||
217 | |||
218 | static u64 snb_uncore_imc_read_counter(struct intel_uncore_box *box, struct perf_event *event) | ||
219 | { | ||
220 | struct hw_perf_event *hwc = &event->hw; | ||
221 | |||
222 | return (u64)*(unsigned int *)(box->io_addr + hwc->event_base); | ||
223 | } | ||
224 | |||
225 | /* | ||
226 | * custom event_init() function because we define our own fixed, free | ||
227 | * running counters, so we do not want to conflict with generic uncore | ||
228 | * logic. Also simplifies processing | ||
229 | */ | ||
230 | static int snb_uncore_imc_event_init(struct perf_event *event) | ||
231 | { | ||
232 | struct intel_uncore_pmu *pmu; | ||
233 | struct intel_uncore_box *box; | ||
234 | struct hw_perf_event *hwc = &event->hw; | ||
235 | u64 cfg = event->attr.config & SNB_UNCORE_PCI_IMC_EVENT_MASK; | ||
236 | int idx, base; | ||
237 | |||
238 | if (event->attr.type != event->pmu->type) | ||
239 | return -ENOENT; | ||
240 | |||
241 | pmu = uncore_event_to_pmu(event); | ||
242 | /* no device found for this pmu */ | ||
243 | if (pmu->func_id < 0) | ||
244 | return -ENOENT; | ||
245 | |||
246 | /* Sampling not supported yet */ | ||
247 | if (hwc->sample_period) | ||
248 | return -EINVAL; | ||
249 | |||
250 | /* unsupported modes and filters */ | ||
251 | if (event->attr.exclude_user || | ||
252 | event->attr.exclude_kernel || | ||
253 | event->attr.exclude_hv || | ||
254 | event->attr.exclude_idle || | ||
255 | event->attr.exclude_host || | ||
256 | event->attr.exclude_guest || | ||
257 | event->attr.sample_period) /* no sampling */ | ||
258 | return -EINVAL; | ||
259 | |||
260 | /* | ||
261 | * Place all uncore events for a particular physical package | ||
262 | * onto a single cpu | ||
263 | */ | ||
264 | if (event->cpu < 0) | ||
265 | return -EINVAL; | ||
266 | |||
267 | /* check only supported bits are set */ | ||
268 | if (event->attr.config & ~SNB_UNCORE_PCI_IMC_EVENT_MASK) | ||
269 | return -EINVAL; | ||
270 | |||
271 | box = uncore_pmu_to_box(pmu, event->cpu); | ||
272 | if (!box || box->cpu < 0) | ||
273 | return -EINVAL; | ||
274 | |||
275 | event->cpu = box->cpu; | ||
276 | |||
277 | event->hw.idx = -1; | ||
278 | event->hw.last_tag = ~0ULL; | ||
279 | event->hw.extra_reg.idx = EXTRA_REG_NONE; | ||
280 | event->hw.branch_reg.idx = EXTRA_REG_NONE; | ||
281 | /* | ||
282 | * check event is known (whitelist, determines counter) | ||
283 | */ | ||
284 | switch (cfg) { | ||
285 | case SNB_UNCORE_PCI_IMC_DATA_READS: | ||
286 | base = SNB_UNCORE_PCI_IMC_DATA_READS_BASE; | ||
287 | idx = UNCORE_PMC_IDX_FIXED; | ||
288 | break; | ||
289 | case SNB_UNCORE_PCI_IMC_DATA_WRITES: | ||
290 | base = SNB_UNCORE_PCI_IMC_DATA_WRITES_BASE; | ||
291 | idx = UNCORE_PMC_IDX_FIXED + 1; | ||
292 | break; | ||
293 | default: | ||
294 | return -EINVAL; | ||
295 | } | ||
296 | |||
297 | /* must be done before validate_group */ | ||
298 | event->hw.event_base = base; | ||
299 | event->hw.config = cfg; | ||
300 | event->hw.idx = idx; | ||
301 | |||
302 | /* no group validation needed, we have free running counters */ | ||
303 | |||
304 | return 0; | ||
305 | } | ||
306 | |||
307 | static int snb_uncore_imc_hw_config(struct intel_uncore_box *box, struct perf_event *event) | ||
308 | { | ||
309 | return 0; | ||
310 | } | ||
311 | |||
312 | static void snb_uncore_imc_event_start(struct perf_event *event, int flags) | ||
313 | { | ||
314 | struct intel_uncore_box *box = uncore_event_to_box(event); | ||
315 | u64 count; | ||
316 | |||
317 | if (WARN_ON_ONCE(!(event->hw.state & PERF_HES_STOPPED))) | ||
318 | return; | ||
319 | |||
320 | event->hw.state = 0; | ||
321 | box->n_active++; | ||
322 | |||
323 | list_add_tail(&event->active_entry, &box->active_list); | ||
324 | |||
325 | count = snb_uncore_imc_read_counter(box, event); | ||
326 | local64_set(&event->hw.prev_count, count); | ||
327 | |||
328 | if (box->n_active == 1) | ||
329 | uncore_pmu_start_hrtimer(box); | ||
330 | } | ||
331 | |||
332 | static void snb_uncore_imc_event_stop(struct perf_event *event, int flags) | ||
333 | { | ||
334 | struct intel_uncore_box *box = uncore_event_to_box(event); | ||
335 | struct hw_perf_event *hwc = &event->hw; | ||
336 | |||
337 | if (!(hwc->state & PERF_HES_STOPPED)) { | ||
338 | box->n_active--; | ||
339 | |||
340 | WARN_ON_ONCE(hwc->state & PERF_HES_STOPPED); | ||
341 | hwc->state |= PERF_HES_STOPPED; | ||
342 | |||
343 | list_del(&event->active_entry); | ||
344 | |||
345 | if (box->n_active == 0) | ||
346 | uncore_pmu_cancel_hrtimer(box); | ||
347 | } | ||
348 | |||
349 | if ((flags & PERF_EF_UPDATE) && !(hwc->state & PERF_HES_UPTODATE)) { | ||
350 | /* | ||
351 | * Drain the remaining delta count out of a event | ||
352 | * that we are disabling: | ||
353 | */ | ||
354 | uncore_perf_event_update(box, event); | ||
355 | hwc->state |= PERF_HES_UPTODATE; | ||
356 | } | ||
357 | } | ||
358 | |||
359 | static int snb_uncore_imc_event_add(struct perf_event *event, int flags) | ||
360 | { | ||
361 | struct intel_uncore_box *box = uncore_event_to_box(event); | ||
362 | struct hw_perf_event *hwc = &event->hw; | ||
363 | |||
364 | if (!box) | ||
365 | return -ENODEV; | ||
366 | |||
367 | hwc->state = PERF_HES_UPTODATE | PERF_HES_STOPPED; | ||
368 | if (!(flags & PERF_EF_START)) | ||
369 | hwc->state |= PERF_HES_ARCH; | ||
370 | |||
371 | snb_uncore_imc_event_start(event, 0); | ||
372 | |||
373 | box->n_events++; | ||
374 | |||
375 | return 0; | ||
376 | } | ||
377 | |||
378 | static void snb_uncore_imc_event_del(struct perf_event *event, int flags) | ||
379 | { | ||
380 | struct intel_uncore_box *box = uncore_event_to_box(event); | ||
381 | int i; | ||
382 | |||
383 | snb_uncore_imc_event_stop(event, PERF_EF_UPDATE); | ||
384 | |||
385 | for (i = 0; i < box->n_events; i++) { | ||
386 | if (event == box->event_list[i]) { | ||
387 | --box->n_events; | ||
388 | break; | ||
389 | } | ||
390 | } | ||
391 | } | ||
392 | |||
393 | static int snb_pci2phy_map_init(int devid) | ||
394 | { | ||
395 | struct pci_dev *dev = NULL; | ||
396 | int bus; | ||
397 | |||
398 | dev = pci_get_device(PCI_VENDOR_ID_INTEL, devid, dev); | ||
399 | if (!dev) | ||
400 | return -ENOTTY; | ||
401 | |||
402 | bus = dev->bus->number; | ||
403 | |||
404 | uncore_pcibus_to_physid[bus] = 0; | ||
405 | |||
406 | pci_dev_put(dev); | ||
407 | |||
408 | return 0; | ||
409 | } | ||
410 | |||
411 | static struct pmu snb_uncore_imc_pmu = { | ||
412 | .task_ctx_nr = perf_invalid_context, | ||
413 | .event_init = snb_uncore_imc_event_init, | ||
414 | .add = snb_uncore_imc_event_add, | ||
415 | .del = snb_uncore_imc_event_del, | ||
416 | .start = snb_uncore_imc_event_start, | ||
417 | .stop = snb_uncore_imc_event_stop, | ||
418 | .read = uncore_pmu_event_read, | ||
419 | }; | ||
420 | |||
421 | static struct intel_uncore_ops snb_uncore_imc_ops = { | ||
422 | .init_box = snb_uncore_imc_init_box, | ||
423 | .enable_box = snb_uncore_imc_enable_box, | ||
424 | .disable_box = snb_uncore_imc_disable_box, | ||
425 | .disable_event = snb_uncore_imc_disable_event, | ||
426 | .enable_event = snb_uncore_imc_enable_event, | ||
427 | .hw_config = snb_uncore_imc_hw_config, | ||
428 | .read_counter = snb_uncore_imc_read_counter, | ||
429 | }; | ||
430 | |||
431 | static struct intel_uncore_type snb_uncore_imc = { | ||
432 | .name = "imc", | ||
433 | .num_counters = 2, | ||
434 | .num_boxes = 1, | ||
435 | .fixed_ctr_bits = 32, | ||
436 | .fixed_ctr = SNB_UNCORE_PCI_IMC_CTR_BASE, | ||
437 | .event_descs = snb_uncore_imc_events, | ||
438 | .format_group = &snb_uncore_imc_format_group, | ||
439 | .perf_ctr = SNB_UNCORE_PCI_IMC_DATA_READS_BASE, | ||
440 | .event_mask = SNB_UNCORE_PCI_IMC_EVENT_MASK, | ||
441 | .ops = &snb_uncore_imc_ops, | ||
442 | .pmu = &snb_uncore_imc_pmu, | ||
443 | }; | ||
444 | |||
445 | static struct intel_uncore_type *snb_pci_uncores[] = { | ||
446 | [SNB_PCI_UNCORE_IMC] = &snb_uncore_imc, | ||
447 | NULL, | ||
448 | }; | ||
449 | |||
450 | static const struct pci_device_id snb_uncore_pci_ids[] = { | ||
451 | { /* IMC */ | ||
452 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SNB_IMC), | ||
453 | .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0), | ||
454 | }, | ||
455 | { /* end: all zeroes */ }, | ||
456 | }; | ||
457 | |||
458 | static const struct pci_device_id ivb_uncore_pci_ids[] = { | ||
459 | { /* IMC */ | ||
460 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IVB_IMC), | ||
461 | .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0), | ||
462 | }, | ||
463 | { /* IMC */ | ||
464 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IVB_E3_IMC), | ||
465 | .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0), | ||
466 | }, | ||
467 | { /* end: all zeroes */ }, | ||
468 | }; | ||
469 | |||
470 | static const struct pci_device_id hsw_uncore_pci_ids[] = { | ||
471 | { /* IMC */ | ||
472 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_HSW_IMC), | ||
473 | .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0), | ||
474 | }, | ||
475 | { /* end: all zeroes */ }, | ||
476 | }; | ||
477 | |||
478 | static struct pci_driver snb_uncore_pci_driver = { | ||
479 | .name = "snb_uncore", | ||
480 | .id_table = snb_uncore_pci_ids, | ||
481 | }; | ||
482 | |||
483 | static struct pci_driver ivb_uncore_pci_driver = { | ||
484 | .name = "ivb_uncore", | ||
485 | .id_table = ivb_uncore_pci_ids, | ||
486 | }; | ||
487 | |||
488 | static struct pci_driver hsw_uncore_pci_driver = { | ||
489 | .name = "hsw_uncore", | ||
490 | .id_table = hsw_uncore_pci_ids, | ||
491 | }; | ||
492 | |||
493 | struct imc_uncore_pci_dev { | ||
494 | __u32 pci_id; | ||
495 | struct pci_driver *driver; | ||
496 | }; | ||
497 | #define IMC_DEV(a, d) \ | ||
498 | { .pci_id = PCI_DEVICE_ID_INTEL_##a, .driver = (d) } | ||
499 | |||
500 | static const struct imc_uncore_pci_dev desktop_imc_pci_ids[] = { | ||
501 | IMC_DEV(SNB_IMC, &snb_uncore_pci_driver), | ||
502 | IMC_DEV(IVB_IMC, &ivb_uncore_pci_driver), /* 3rd Gen Core processor */ | ||
503 | IMC_DEV(IVB_E3_IMC, &ivb_uncore_pci_driver), /* Xeon E3-1200 v2/3rd Gen Core processor */ | ||
504 | IMC_DEV(HSW_IMC, &hsw_uncore_pci_driver), /* 4th Gen Core Processor */ | ||
505 | { /* end marker */ } | ||
506 | }; | ||
507 | |||
508 | |||
509 | #define for_each_imc_pci_id(x, t) \ | ||
510 | for (x = (t); (x)->pci_id; x++) | ||
511 | |||
512 | static struct pci_driver *imc_uncore_find_dev(void) | ||
513 | { | ||
514 | const struct imc_uncore_pci_dev *p; | ||
515 | int ret; | ||
516 | |||
517 | for_each_imc_pci_id(p, desktop_imc_pci_ids) { | ||
518 | ret = snb_pci2phy_map_init(p->pci_id); | ||
519 | if (ret == 0) | ||
520 | return p->driver; | ||
521 | } | ||
522 | return NULL; | ||
523 | } | ||
524 | |||
525 | static int imc_uncore_pci_init(void) | ||
526 | { | ||
527 | struct pci_driver *imc_drv = imc_uncore_find_dev(); | ||
528 | |||
529 | if (!imc_drv) | ||
530 | return -ENODEV; | ||
531 | |||
532 | uncore_pci_uncores = snb_pci_uncores; | ||
533 | uncore_pci_driver = imc_drv; | ||
534 | |||
535 | return 0; | ||
536 | } | ||
537 | |||
538 | int snb_uncore_pci_init(void) | ||
539 | { | ||
540 | return imc_uncore_pci_init(); | ||
541 | } | ||
542 | |||
543 | int ivb_uncore_pci_init(void) | ||
544 | { | ||
545 | return imc_uncore_pci_init(); | ||
546 | } | ||
547 | int hsw_uncore_pci_init(void) | ||
548 | { | ||
549 | return imc_uncore_pci_init(); | ||
550 | } | ||
551 | |||
552 | /* end of Sandy Bridge uncore support */ | ||
553 | |||
554 | /* Nehalem uncore support */ | ||
555 | static void nhm_uncore_msr_disable_box(struct intel_uncore_box *box) | ||
556 | { | ||
557 | wrmsrl(NHM_UNC_PERF_GLOBAL_CTL, 0); | ||
558 | } | ||
559 | |||
560 | static void nhm_uncore_msr_enable_box(struct intel_uncore_box *box) | ||
561 | { | ||
562 | wrmsrl(NHM_UNC_PERF_GLOBAL_CTL, NHM_UNC_GLOBAL_CTL_EN_PC_ALL | NHM_UNC_GLOBAL_CTL_EN_FC); | ||
563 | } | ||
564 | |||
565 | static void nhm_uncore_msr_enable_event(struct intel_uncore_box *box, struct perf_event *event) | ||
566 | { | ||
567 | struct hw_perf_event *hwc = &event->hw; | ||
568 | |||
569 | if (hwc->idx < UNCORE_PMC_IDX_FIXED) | ||
570 | wrmsrl(hwc->config_base, hwc->config | SNB_UNC_CTL_EN); | ||
571 | else | ||
572 | wrmsrl(hwc->config_base, NHM_UNC_FIXED_CTR_CTL_EN); | ||
573 | } | ||
574 | |||
575 | static struct attribute *nhm_uncore_formats_attr[] = { | ||
576 | &format_attr_event.attr, | ||
577 | &format_attr_umask.attr, | ||
578 | &format_attr_edge.attr, | ||
579 | &format_attr_inv.attr, | ||
580 | &format_attr_cmask8.attr, | ||
581 | NULL, | ||
582 | }; | ||
583 | |||
584 | static struct attribute_group nhm_uncore_format_group = { | ||
585 | .name = "format", | ||
586 | .attrs = nhm_uncore_formats_attr, | ||
587 | }; | ||
588 | |||
589 | static struct uncore_event_desc nhm_uncore_events[] = { | ||
590 | INTEL_UNCORE_EVENT_DESC(clockticks, "event=0xff,umask=0x00"), | ||
591 | INTEL_UNCORE_EVENT_DESC(qmc_writes_full_any, "event=0x2f,umask=0x0f"), | ||
592 | INTEL_UNCORE_EVENT_DESC(qmc_normal_reads_any, "event=0x2c,umask=0x0f"), | ||
593 | INTEL_UNCORE_EVENT_DESC(qhl_request_ioh_reads, "event=0x20,umask=0x01"), | ||
594 | INTEL_UNCORE_EVENT_DESC(qhl_request_ioh_writes, "event=0x20,umask=0x02"), | ||
595 | INTEL_UNCORE_EVENT_DESC(qhl_request_remote_reads, "event=0x20,umask=0x04"), | ||
596 | INTEL_UNCORE_EVENT_DESC(qhl_request_remote_writes, "event=0x20,umask=0x08"), | ||
597 | INTEL_UNCORE_EVENT_DESC(qhl_request_local_reads, "event=0x20,umask=0x10"), | ||
598 | INTEL_UNCORE_EVENT_DESC(qhl_request_local_writes, "event=0x20,umask=0x20"), | ||
599 | { /* end: all zeroes */ }, | ||
600 | }; | ||
601 | |||
602 | static struct intel_uncore_ops nhm_uncore_msr_ops = { | ||
603 | .disable_box = nhm_uncore_msr_disable_box, | ||
604 | .enable_box = nhm_uncore_msr_enable_box, | ||
605 | .disable_event = snb_uncore_msr_disable_event, | ||
606 | .enable_event = nhm_uncore_msr_enable_event, | ||
607 | .read_counter = uncore_msr_read_counter, | ||
608 | }; | ||
609 | |||
610 | static struct intel_uncore_type nhm_uncore = { | ||
611 | .name = "", | ||
612 | .num_counters = 8, | ||
613 | .num_boxes = 1, | ||
614 | .perf_ctr_bits = 48, | ||
615 | .fixed_ctr_bits = 48, | ||
616 | .event_ctl = NHM_UNC_PERFEVTSEL0, | ||
617 | .perf_ctr = NHM_UNC_UNCORE_PMC0, | ||
618 | .fixed_ctr = NHM_UNC_FIXED_CTR, | ||
619 | .fixed_ctl = NHM_UNC_FIXED_CTR_CTRL, | ||
620 | .event_mask = NHM_UNC_RAW_EVENT_MASK, | ||
621 | .event_descs = nhm_uncore_events, | ||
622 | .ops = &nhm_uncore_msr_ops, | ||
623 | .format_group = &nhm_uncore_format_group, | ||
624 | }; | ||
625 | |||
626 | static struct intel_uncore_type *nhm_msr_uncores[] = { | ||
627 | &nhm_uncore, | ||
628 | NULL, | ||
629 | }; | ||
630 | |||
631 | void nhm_uncore_cpu_init(void) | ||
632 | { | ||
633 | uncore_msr_uncores = nhm_msr_uncores; | ||
634 | } | ||
635 | |||
636 | /* end of Nehalem uncore support */ | ||
diff --git a/arch/x86/kernel/cpu/perf_event_intel_uncore_snbep.c b/arch/x86/kernel/cpu/perf_event_intel_uncore_snbep.c new file mode 100644 index 000000000000..adf138eac85c --- /dev/null +++ b/arch/x86/kernel/cpu/perf_event_intel_uncore_snbep.c | |||
@@ -0,0 +1,2258 @@ | |||
1 | /* SandyBridge-EP/IvyTown uncore support */ | ||
2 | #include "perf_event_intel_uncore.h" | ||
3 | |||
4 | |||
5 | /* SNB-EP Box level control */ | ||
6 | #define SNBEP_PMON_BOX_CTL_RST_CTRL (1 << 0) | ||
7 | #define SNBEP_PMON_BOX_CTL_RST_CTRS (1 << 1) | ||
8 | #define SNBEP_PMON_BOX_CTL_FRZ (1 << 8) | ||
9 | #define SNBEP_PMON_BOX_CTL_FRZ_EN (1 << 16) | ||
10 | #define SNBEP_PMON_BOX_CTL_INT (SNBEP_PMON_BOX_CTL_RST_CTRL | \ | ||
11 | SNBEP_PMON_BOX_CTL_RST_CTRS | \ | ||
12 | SNBEP_PMON_BOX_CTL_FRZ_EN) | ||
13 | /* SNB-EP event control */ | ||
14 | #define SNBEP_PMON_CTL_EV_SEL_MASK 0x000000ff | ||
15 | #define SNBEP_PMON_CTL_UMASK_MASK 0x0000ff00 | ||
16 | #define SNBEP_PMON_CTL_RST (1 << 17) | ||
17 | #define SNBEP_PMON_CTL_EDGE_DET (1 << 18) | ||
18 | #define SNBEP_PMON_CTL_EV_SEL_EXT (1 << 21) | ||
19 | #define SNBEP_PMON_CTL_EN (1 << 22) | ||
20 | #define SNBEP_PMON_CTL_INVERT (1 << 23) | ||
21 | #define SNBEP_PMON_CTL_TRESH_MASK 0xff000000 | ||
22 | #define SNBEP_PMON_RAW_EVENT_MASK (SNBEP_PMON_CTL_EV_SEL_MASK | \ | ||
23 | SNBEP_PMON_CTL_UMASK_MASK | \ | ||
24 | SNBEP_PMON_CTL_EDGE_DET | \ | ||
25 | SNBEP_PMON_CTL_INVERT | \ | ||
26 | SNBEP_PMON_CTL_TRESH_MASK) | ||
27 | |||
28 | /* SNB-EP Ubox event control */ | ||
29 | #define SNBEP_U_MSR_PMON_CTL_TRESH_MASK 0x1f000000 | ||
30 | #define SNBEP_U_MSR_PMON_RAW_EVENT_MASK \ | ||
31 | (SNBEP_PMON_CTL_EV_SEL_MASK | \ | ||
32 | SNBEP_PMON_CTL_UMASK_MASK | \ | ||
33 | SNBEP_PMON_CTL_EDGE_DET | \ | ||
34 | SNBEP_PMON_CTL_INVERT | \ | ||
35 | SNBEP_U_MSR_PMON_CTL_TRESH_MASK) | ||
36 | |||
37 | #define SNBEP_CBO_PMON_CTL_TID_EN (1 << 19) | ||
38 | #define SNBEP_CBO_MSR_PMON_RAW_EVENT_MASK (SNBEP_PMON_RAW_EVENT_MASK | \ | ||
39 | SNBEP_CBO_PMON_CTL_TID_EN) | ||
40 | |||
41 | /* SNB-EP PCU event control */ | ||
42 | #define SNBEP_PCU_MSR_PMON_CTL_OCC_SEL_MASK 0x0000c000 | ||
43 | #define SNBEP_PCU_MSR_PMON_CTL_TRESH_MASK 0x1f000000 | ||
44 | #define SNBEP_PCU_MSR_PMON_CTL_OCC_INVERT (1 << 30) | ||
45 | #define SNBEP_PCU_MSR_PMON_CTL_OCC_EDGE_DET (1 << 31) | ||
46 | #define SNBEP_PCU_MSR_PMON_RAW_EVENT_MASK \ | ||
47 | (SNBEP_PMON_CTL_EV_SEL_MASK | \ | ||
48 | SNBEP_PCU_MSR_PMON_CTL_OCC_SEL_MASK | \ | ||
49 | SNBEP_PMON_CTL_EDGE_DET | \ | ||
50 | SNBEP_PMON_CTL_EV_SEL_EXT | \ | ||
51 | SNBEP_PMON_CTL_INVERT | \ | ||
52 | SNBEP_PCU_MSR_PMON_CTL_TRESH_MASK | \ | ||
53 | SNBEP_PCU_MSR_PMON_CTL_OCC_INVERT | \ | ||
54 | SNBEP_PCU_MSR_PMON_CTL_OCC_EDGE_DET) | ||
55 | |||
56 | #define SNBEP_QPI_PCI_PMON_RAW_EVENT_MASK \ | ||
57 | (SNBEP_PMON_RAW_EVENT_MASK | \ | ||
58 | SNBEP_PMON_CTL_EV_SEL_EXT) | ||
59 | |||
60 | /* SNB-EP pci control register */ | ||
61 | #define SNBEP_PCI_PMON_BOX_CTL 0xf4 | ||
62 | #define SNBEP_PCI_PMON_CTL0 0xd8 | ||
63 | /* SNB-EP pci counter register */ | ||
64 | #define SNBEP_PCI_PMON_CTR0 0xa0 | ||
65 | |||
66 | /* SNB-EP home agent register */ | ||
67 | #define SNBEP_HA_PCI_PMON_BOX_ADDRMATCH0 0x40 | ||
68 | #define SNBEP_HA_PCI_PMON_BOX_ADDRMATCH1 0x44 | ||
69 | #define SNBEP_HA_PCI_PMON_BOX_OPCODEMATCH 0x48 | ||
70 | /* SNB-EP memory controller register */ | ||
71 | #define SNBEP_MC_CHy_PCI_PMON_FIXED_CTL 0xf0 | ||
72 | #define SNBEP_MC_CHy_PCI_PMON_FIXED_CTR 0xd0 | ||
73 | /* SNB-EP QPI register */ | ||
74 | #define SNBEP_Q_Py_PCI_PMON_PKT_MATCH0 0x228 | ||
75 | #define SNBEP_Q_Py_PCI_PMON_PKT_MATCH1 0x22c | ||
76 | #define SNBEP_Q_Py_PCI_PMON_PKT_MASK0 0x238 | ||
77 | #define SNBEP_Q_Py_PCI_PMON_PKT_MASK1 0x23c | ||
78 | |||
79 | /* SNB-EP Ubox register */ | ||
80 | #define SNBEP_U_MSR_PMON_CTR0 0xc16 | ||
81 | #define SNBEP_U_MSR_PMON_CTL0 0xc10 | ||
82 | |||
83 | #define SNBEP_U_MSR_PMON_UCLK_FIXED_CTL 0xc08 | ||
84 | #define SNBEP_U_MSR_PMON_UCLK_FIXED_CTR 0xc09 | ||
85 | |||
86 | /* SNB-EP Cbo register */ | ||
87 | #define SNBEP_C0_MSR_PMON_CTR0 0xd16 | ||
88 | #define SNBEP_C0_MSR_PMON_CTL0 0xd10 | ||
89 | #define SNBEP_C0_MSR_PMON_BOX_CTL 0xd04 | ||
90 | #define SNBEP_C0_MSR_PMON_BOX_FILTER 0xd14 | ||
91 | #define SNBEP_CBO_MSR_OFFSET 0x20 | ||
92 | |||
93 | #define SNBEP_CB0_MSR_PMON_BOX_FILTER_TID 0x1f | ||
94 | #define SNBEP_CB0_MSR_PMON_BOX_FILTER_NID 0x3fc00 | ||
95 | #define SNBEP_CB0_MSR_PMON_BOX_FILTER_STATE 0x7c0000 | ||
96 | #define SNBEP_CB0_MSR_PMON_BOX_FILTER_OPC 0xff800000 | ||
97 | |||
98 | #define SNBEP_CBO_EVENT_EXTRA_REG(e, m, i) { \ | ||
99 | .event = (e), \ | ||
100 | .msr = SNBEP_C0_MSR_PMON_BOX_FILTER, \ | ||
101 | .config_mask = (m), \ | ||
102 | .idx = (i) \ | ||
103 | } | ||
104 | |||
105 | /* SNB-EP PCU register */ | ||
106 | #define SNBEP_PCU_MSR_PMON_CTR0 0xc36 | ||
107 | #define SNBEP_PCU_MSR_PMON_CTL0 0xc30 | ||
108 | #define SNBEP_PCU_MSR_PMON_BOX_CTL 0xc24 | ||
109 | #define SNBEP_PCU_MSR_PMON_BOX_FILTER 0xc34 | ||
110 | #define SNBEP_PCU_MSR_PMON_BOX_FILTER_MASK 0xffffffff | ||
111 | #define SNBEP_PCU_MSR_CORE_C3_CTR 0x3fc | ||
112 | #define SNBEP_PCU_MSR_CORE_C6_CTR 0x3fd | ||
113 | |||
114 | /* IVBEP event control */ | ||
115 | #define IVBEP_PMON_BOX_CTL_INT (SNBEP_PMON_BOX_CTL_RST_CTRL | \ | ||
116 | SNBEP_PMON_BOX_CTL_RST_CTRS) | ||
117 | #define IVBEP_PMON_RAW_EVENT_MASK (SNBEP_PMON_CTL_EV_SEL_MASK | \ | ||
118 | SNBEP_PMON_CTL_UMASK_MASK | \ | ||
119 | SNBEP_PMON_CTL_EDGE_DET | \ | ||
120 | SNBEP_PMON_CTL_TRESH_MASK) | ||
121 | /* IVBEP Ubox */ | ||
122 | #define IVBEP_U_MSR_PMON_GLOBAL_CTL 0xc00 | ||
123 | #define IVBEP_U_PMON_GLOBAL_FRZ_ALL (1 << 31) | ||
124 | #define IVBEP_U_PMON_GLOBAL_UNFRZ_ALL (1 << 29) | ||
125 | |||
126 | #define IVBEP_U_MSR_PMON_RAW_EVENT_MASK \ | ||
127 | (SNBEP_PMON_CTL_EV_SEL_MASK | \ | ||
128 | SNBEP_PMON_CTL_UMASK_MASK | \ | ||
129 | SNBEP_PMON_CTL_EDGE_DET | \ | ||
130 | SNBEP_U_MSR_PMON_CTL_TRESH_MASK) | ||
131 | /* IVBEP Cbo */ | ||
132 | #define IVBEP_CBO_MSR_PMON_RAW_EVENT_MASK (IVBEP_PMON_RAW_EVENT_MASK | \ | ||
133 | SNBEP_CBO_PMON_CTL_TID_EN) | ||
134 | |||
135 | #define IVBEP_CB0_MSR_PMON_BOX_FILTER_TID (0x1fULL << 0) | ||
136 | #define IVBEP_CB0_MSR_PMON_BOX_FILTER_LINK (0xfULL << 5) | ||
137 | #define IVBEP_CB0_MSR_PMON_BOX_FILTER_STATE (0x3fULL << 17) | ||
138 | #define IVBEP_CB0_MSR_PMON_BOX_FILTER_NID (0xffffULL << 32) | ||
139 | #define IVBEP_CB0_MSR_PMON_BOX_FILTER_OPC (0x1ffULL << 52) | ||
140 | #define IVBEP_CB0_MSR_PMON_BOX_FILTER_C6 (0x1ULL << 61) | ||
141 | #define IVBEP_CB0_MSR_PMON_BOX_FILTER_NC (0x1ULL << 62) | ||
142 | #define IVBEP_CB0_MSR_PMON_BOX_FILTER_ISOC (0x1ULL << 63) | ||
143 | |||
144 | /* IVBEP home agent */ | ||
145 | #define IVBEP_HA_PCI_PMON_CTL_Q_OCC_RST (1 << 16) | ||
146 | #define IVBEP_HA_PCI_PMON_RAW_EVENT_MASK \ | ||
147 | (IVBEP_PMON_RAW_EVENT_MASK | \ | ||
148 | IVBEP_HA_PCI_PMON_CTL_Q_OCC_RST) | ||
149 | /* IVBEP PCU */ | ||
150 | #define IVBEP_PCU_MSR_PMON_RAW_EVENT_MASK \ | ||
151 | (SNBEP_PMON_CTL_EV_SEL_MASK | \ | ||
152 | SNBEP_PMON_CTL_EV_SEL_EXT | \ | ||
153 | SNBEP_PCU_MSR_PMON_CTL_OCC_SEL_MASK | \ | ||
154 | SNBEP_PMON_CTL_EDGE_DET | \ | ||
155 | SNBEP_PCU_MSR_PMON_CTL_TRESH_MASK | \ | ||
156 | SNBEP_PCU_MSR_PMON_CTL_OCC_INVERT | \ | ||
157 | SNBEP_PCU_MSR_PMON_CTL_OCC_EDGE_DET) | ||
158 | /* IVBEP QPI */ | ||
159 | #define IVBEP_QPI_PCI_PMON_RAW_EVENT_MASK \ | ||
160 | (IVBEP_PMON_RAW_EVENT_MASK | \ | ||
161 | SNBEP_PMON_CTL_EV_SEL_EXT) | ||
162 | |||
163 | #define __BITS_VALUE(x, i, n) ((typeof(x))(((x) >> ((i) * (n))) & \ | ||
164 | ((1ULL << (n)) - 1))) | ||
165 | |||
166 | /* Haswell-EP Ubox */ | ||
167 | #define HSWEP_U_MSR_PMON_CTR0 0x705 | ||
168 | #define HSWEP_U_MSR_PMON_CTL0 0x709 | ||
169 | #define HSWEP_U_MSR_PMON_FILTER 0x707 | ||
170 | |||
171 | #define HSWEP_U_MSR_PMON_UCLK_FIXED_CTL 0x703 | ||
172 | #define HSWEP_U_MSR_PMON_UCLK_FIXED_CTR 0x704 | ||
173 | |||
174 | #define HSWEP_U_MSR_PMON_BOX_FILTER_TID (0x1 << 0) | ||
175 | #define HSWEP_U_MSR_PMON_BOX_FILTER_CID (0x1fULL << 1) | ||
176 | #define HSWEP_U_MSR_PMON_BOX_FILTER_MASK \ | ||
177 | (HSWEP_U_MSR_PMON_BOX_FILTER_TID | \ | ||
178 | HSWEP_U_MSR_PMON_BOX_FILTER_CID) | ||
179 | |||
180 | /* Haswell-EP CBo */ | ||
181 | #define HSWEP_C0_MSR_PMON_CTR0 0xe08 | ||
182 | #define HSWEP_C0_MSR_PMON_CTL0 0xe01 | ||
183 | #define HSWEP_C0_MSR_PMON_BOX_CTL 0xe00 | ||
184 | #define HSWEP_C0_MSR_PMON_BOX_FILTER0 0xe05 | ||
185 | #define HSWEP_CBO_MSR_OFFSET 0x10 | ||
186 | |||
187 | |||
188 | #define HSWEP_CB0_MSR_PMON_BOX_FILTER_TID (0x3fULL << 0) | ||
189 | #define HSWEP_CB0_MSR_PMON_BOX_FILTER_LINK (0xfULL << 6) | ||
190 | #define HSWEP_CB0_MSR_PMON_BOX_FILTER_STATE (0x7fULL << 17) | ||
191 | #define HSWEP_CB0_MSR_PMON_BOX_FILTER_NID (0xffffULL << 32) | ||
192 | #define HSWEP_CB0_MSR_PMON_BOX_FILTER_OPC (0x1ffULL << 52) | ||
193 | #define HSWEP_CB0_MSR_PMON_BOX_FILTER_C6 (0x1ULL << 61) | ||
194 | #define HSWEP_CB0_MSR_PMON_BOX_FILTER_NC (0x1ULL << 62) | ||
195 | #define HSWEP_CB0_MSR_PMON_BOX_FILTER_ISOC (0x1ULL << 63) | ||
196 | |||
197 | |||
198 | /* Haswell-EP Sbox */ | ||
199 | #define HSWEP_S0_MSR_PMON_CTR0 0x726 | ||
200 | #define HSWEP_S0_MSR_PMON_CTL0 0x721 | ||
201 | #define HSWEP_S0_MSR_PMON_BOX_CTL 0x720 | ||
202 | #define HSWEP_SBOX_MSR_OFFSET 0xa | ||
203 | #define HSWEP_S_MSR_PMON_RAW_EVENT_MASK (SNBEP_PMON_RAW_EVENT_MASK | \ | ||
204 | SNBEP_CBO_PMON_CTL_TID_EN) | ||
205 | |||
206 | /* Haswell-EP PCU */ | ||
207 | #define HSWEP_PCU_MSR_PMON_CTR0 0x717 | ||
208 | #define HSWEP_PCU_MSR_PMON_CTL0 0x711 | ||
209 | #define HSWEP_PCU_MSR_PMON_BOX_CTL 0x710 | ||
210 | #define HSWEP_PCU_MSR_PMON_BOX_FILTER 0x715 | ||
211 | |||
212 | |||
213 | DEFINE_UNCORE_FORMAT_ATTR(event, event, "config:0-7"); | ||
214 | DEFINE_UNCORE_FORMAT_ATTR(event_ext, event, "config:0-7,21"); | ||
215 | DEFINE_UNCORE_FORMAT_ATTR(umask, umask, "config:8-15"); | ||
216 | DEFINE_UNCORE_FORMAT_ATTR(edge, edge, "config:18"); | ||
217 | DEFINE_UNCORE_FORMAT_ATTR(tid_en, tid_en, "config:19"); | ||
218 | DEFINE_UNCORE_FORMAT_ATTR(inv, inv, "config:23"); | ||
219 | DEFINE_UNCORE_FORMAT_ATTR(thresh8, thresh, "config:24-31"); | ||
220 | DEFINE_UNCORE_FORMAT_ATTR(thresh5, thresh, "config:24-28"); | ||
221 | DEFINE_UNCORE_FORMAT_ATTR(occ_sel, occ_sel, "config:14-15"); | ||
222 | DEFINE_UNCORE_FORMAT_ATTR(occ_invert, occ_invert, "config:30"); | ||
223 | DEFINE_UNCORE_FORMAT_ATTR(occ_edge, occ_edge, "config:14-51"); | ||
224 | DEFINE_UNCORE_FORMAT_ATTR(filter_tid, filter_tid, "config1:0-4"); | ||
225 | DEFINE_UNCORE_FORMAT_ATTR(filter_tid2, filter_tid, "config1:0"); | ||
226 | DEFINE_UNCORE_FORMAT_ATTR(filter_tid3, filter_tid, "config1:0-5"); | ||
227 | DEFINE_UNCORE_FORMAT_ATTR(filter_cid, filter_cid, "config1:5"); | ||
228 | DEFINE_UNCORE_FORMAT_ATTR(filter_link, filter_link, "config1:5-8"); | ||
229 | DEFINE_UNCORE_FORMAT_ATTR(filter_link2, filter_link, "config1:6-8"); | ||
230 | DEFINE_UNCORE_FORMAT_ATTR(filter_nid, filter_nid, "config1:10-17"); | ||
231 | DEFINE_UNCORE_FORMAT_ATTR(filter_nid2, filter_nid, "config1:32-47"); | ||
232 | DEFINE_UNCORE_FORMAT_ATTR(filter_state, filter_state, "config1:18-22"); | ||
233 | DEFINE_UNCORE_FORMAT_ATTR(filter_state2, filter_state, "config1:17-22"); | ||
234 | DEFINE_UNCORE_FORMAT_ATTR(filter_state3, filter_state, "config1:17-23"); | ||
235 | DEFINE_UNCORE_FORMAT_ATTR(filter_opc, filter_opc, "config1:23-31"); | ||
236 | DEFINE_UNCORE_FORMAT_ATTR(filter_opc2, filter_opc, "config1:52-60"); | ||
237 | DEFINE_UNCORE_FORMAT_ATTR(filter_nc, filter_nc, "config1:62"); | ||
238 | DEFINE_UNCORE_FORMAT_ATTR(filter_c6, filter_c6, "config1:61"); | ||
239 | DEFINE_UNCORE_FORMAT_ATTR(filter_isoc, filter_isoc, "config1:63"); | ||
240 | DEFINE_UNCORE_FORMAT_ATTR(filter_band0, filter_band0, "config1:0-7"); | ||
241 | DEFINE_UNCORE_FORMAT_ATTR(filter_band1, filter_band1, "config1:8-15"); | ||
242 | DEFINE_UNCORE_FORMAT_ATTR(filter_band2, filter_band2, "config1:16-23"); | ||
243 | DEFINE_UNCORE_FORMAT_ATTR(filter_band3, filter_band3, "config1:24-31"); | ||
244 | DEFINE_UNCORE_FORMAT_ATTR(match_rds, match_rds, "config1:48-51"); | ||
245 | DEFINE_UNCORE_FORMAT_ATTR(match_rnid30, match_rnid30, "config1:32-35"); | ||
246 | DEFINE_UNCORE_FORMAT_ATTR(match_rnid4, match_rnid4, "config1:31"); | ||
247 | DEFINE_UNCORE_FORMAT_ATTR(match_dnid, match_dnid, "config1:13-17"); | ||
248 | DEFINE_UNCORE_FORMAT_ATTR(match_mc, match_mc, "config1:9-12"); | ||
249 | DEFINE_UNCORE_FORMAT_ATTR(match_opc, match_opc, "config1:5-8"); | ||
250 | DEFINE_UNCORE_FORMAT_ATTR(match_vnw, match_vnw, "config1:3-4"); | ||
251 | DEFINE_UNCORE_FORMAT_ATTR(match0, match0, "config1:0-31"); | ||
252 | DEFINE_UNCORE_FORMAT_ATTR(match1, match1, "config1:32-63"); | ||
253 | DEFINE_UNCORE_FORMAT_ATTR(mask_rds, mask_rds, "config2:48-51"); | ||
254 | DEFINE_UNCORE_FORMAT_ATTR(mask_rnid30, mask_rnid30, "config2:32-35"); | ||
255 | DEFINE_UNCORE_FORMAT_ATTR(mask_rnid4, mask_rnid4, "config2:31"); | ||
256 | DEFINE_UNCORE_FORMAT_ATTR(mask_dnid, mask_dnid, "config2:13-17"); | ||
257 | DEFINE_UNCORE_FORMAT_ATTR(mask_mc, mask_mc, "config2:9-12"); | ||
258 | DEFINE_UNCORE_FORMAT_ATTR(mask_opc, mask_opc, "config2:5-8"); | ||
259 | DEFINE_UNCORE_FORMAT_ATTR(mask_vnw, mask_vnw, "config2:3-4"); | ||
260 | DEFINE_UNCORE_FORMAT_ATTR(mask0, mask0, "config2:0-31"); | ||
261 | DEFINE_UNCORE_FORMAT_ATTR(mask1, mask1, "config2:32-63"); | ||
262 | |||
263 | static void snbep_uncore_pci_disable_box(struct intel_uncore_box *box) | ||
264 | { | ||
265 | struct pci_dev *pdev = box->pci_dev; | ||
266 | int box_ctl = uncore_pci_box_ctl(box); | ||
267 | u32 config = 0; | ||
268 | |||
269 | if (!pci_read_config_dword(pdev, box_ctl, &config)) { | ||
270 | config |= SNBEP_PMON_BOX_CTL_FRZ; | ||
271 | pci_write_config_dword(pdev, box_ctl, config); | ||
272 | } | ||
273 | } | ||
274 | |||
275 | static void snbep_uncore_pci_enable_box(struct intel_uncore_box *box) | ||
276 | { | ||
277 | struct pci_dev *pdev = box->pci_dev; | ||
278 | int box_ctl = uncore_pci_box_ctl(box); | ||
279 | u32 config = 0; | ||
280 | |||
281 | if (!pci_read_config_dword(pdev, box_ctl, &config)) { | ||
282 | config &= ~SNBEP_PMON_BOX_CTL_FRZ; | ||
283 | pci_write_config_dword(pdev, box_ctl, config); | ||
284 | } | ||
285 | } | ||
286 | |||
287 | static void snbep_uncore_pci_enable_event(struct intel_uncore_box *box, struct perf_event *event) | ||
288 | { | ||
289 | struct pci_dev *pdev = box->pci_dev; | ||
290 | struct hw_perf_event *hwc = &event->hw; | ||
291 | |||
292 | pci_write_config_dword(pdev, hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN); | ||
293 | } | ||
294 | |||
295 | static void snbep_uncore_pci_disable_event(struct intel_uncore_box *box, struct perf_event *event) | ||
296 | { | ||
297 | struct pci_dev *pdev = box->pci_dev; | ||
298 | struct hw_perf_event *hwc = &event->hw; | ||
299 | |||
300 | pci_write_config_dword(pdev, hwc->config_base, hwc->config); | ||
301 | } | ||
302 | |||
303 | static u64 snbep_uncore_pci_read_counter(struct intel_uncore_box *box, struct perf_event *event) | ||
304 | { | ||
305 | struct pci_dev *pdev = box->pci_dev; | ||
306 | struct hw_perf_event *hwc = &event->hw; | ||
307 | u64 count = 0; | ||
308 | |||
309 | pci_read_config_dword(pdev, hwc->event_base, (u32 *)&count); | ||
310 | pci_read_config_dword(pdev, hwc->event_base + 4, (u32 *)&count + 1); | ||
311 | |||
312 | return count; | ||
313 | } | ||
314 | |||
315 | static void snbep_uncore_pci_init_box(struct intel_uncore_box *box) | ||
316 | { | ||
317 | struct pci_dev *pdev = box->pci_dev; | ||
318 | |||
319 | pci_write_config_dword(pdev, SNBEP_PCI_PMON_BOX_CTL, SNBEP_PMON_BOX_CTL_INT); | ||
320 | } | ||
321 | |||
322 | static void snbep_uncore_msr_disable_box(struct intel_uncore_box *box) | ||
323 | { | ||
324 | u64 config; | ||
325 | unsigned msr; | ||
326 | |||
327 | msr = uncore_msr_box_ctl(box); | ||
328 | if (msr) { | ||
329 | rdmsrl(msr, config); | ||
330 | config |= SNBEP_PMON_BOX_CTL_FRZ; | ||
331 | wrmsrl(msr, config); | ||
332 | } | ||
333 | } | ||
334 | |||
335 | static void snbep_uncore_msr_enable_box(struct intel_uncore_box *box) | ||
336 | { | ||
337 | u64 config; | ||
338 | unsigned msr; | ||
339 | |||
340 | msr = uncore_msr_box_ctl(box); | ||
341 | if (msr) { | ||
342 | rdmsrl(msr, config); | ||
343 | config &= ~SNBEP_PMON_BOX_CTL_FRZ; | ||
344 | wrmsrl(msr, config); | ||
345 | } | ||
346 | } | ||
347 | |||
348 | static void snbep_uncore_msr_enable_event(struct intel_uncore_box *box, struct perf_event *event) | ||
349 | { | ||
350 | struct hw_perf_event *hwc = &event->hw; | ||
351 | struct hw_perf_event_extra *reg1 = &hwc->extra_reg; | ||
352 | |||
353 | if (reg1->idx != EXTRA_REG_NONE) | ||
354 | wrmsrl(reg1->reg, uncore_shared_reg_config(box, 0)); | ||
355 | |||
356 | wrmsrl(hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN); | ||
357 | } | ||
358 | |||
359 | static void snbep_uncore_msr_disable_event(struct intel_uncore_box *box, | ||
360 | struct perf_event *event) | ||
361 | { | ||
362 | struct hw_perf_event *hwc = &event->hw; | ||
363 | |||
364 | wrmsrl(hwc->config_base, hwc->config); | ||
365 | } | ||
366 | |||
367 | static void snbep_uncore_msr_init_box(struct intel_uncore_box *box) | ||
368 | { | ||
369 | unsigned msr = uncore_msr_box_ctl(box); | ||
370 | |||
371 | if (msr) | ||
372 | wrmsrl(msr, SNBEP_PMON_BOX_CTL_INT); | ||
373 | } | ||
374 | |||
375 | static struct attribute *snbep_uncore_formats_attr[] = { | ||
376 | &format_attr_event.attr, | ||
377 | &format_attr_umask.attr, | ||
378 | &format_attr_edge.attr, | ||
379 | &format_attr_inv.attr, | ||
380 | &format_attr_thresh8.attr, | ||
381 | NULL, | ||
382 | }; | ||
383 | |||
384 | static struct attribute *snbep_uncore_ubox_formats_attr[] = { | ||
385 | &format_attr_event.attr, | ||
386 | &format_attr_umask.attr, | ||
387 | &format_attr_edge.attr, | ||
388 | &format_attr_inv.attr, | ||
389 | &format_attr_thresh5.attr, | ||
390 | NULL, | ||
391 | }; | ||
392 | |||
393 | static struct attribute *snbep_uncore_cbox_formats_attr[] = { | ||
394 | &format_attr_event.attr, | ||
395 | &format_attr_umask.attr, | ||
396 | &format_attr_edge.attr, | ||
397 | &format_attr_tid_en.attr, | ||
398 | &format_attr_inv.attr, | ||
399 | &format_attr_thresh8.attr, | ||
400 | &format_attr_filter_tid.attr, | ||
401 | &format_attr_filter_nid.attr, | ||
402 | &format_attr_filter_state.attr, | ||
403 | &format_attr_filter_opc.attr, | ||
404 | NULL, | ||
405 | }; | ||
406 | |||
407 | static struct attribute *snbep_uncore_pcu_formats_attr[] = { | ||
408 | &format_attr_event_ext.attr, | ||
409 | &format_attr_occ_sel.attr, | ||
410 | &format_attr_edge.attr, | ||
411 | &format_attr_inv.attr, | ||
412 | &format_attr_thresh5.attr, | ||
413 | &format_attr_occ_invert.attr, | ||
414 | &format_attr_occ_edge.attr, | ||
415 | &format_attr_filter_band0.attr, | ||
416 | &format_attr_filter_band1.attr, | ||
417 | &format_attr_filter_band2.attr, | ||
418 | &format_attr_filter_band3.attr, | ||
419 | NULL, | ||
420 | }; | ||
421 | |||
422 | static struct attribute *snbep_uncore_qpi_formats_attr[] = { | ||
423 | &format_attr_event_ext.attr, | ||
424 | &format_attr_umask.attr, | ||
425 | &format_attr_edge.attr, | ||
426 | &format_attr_inv.attr, | ||
427 | &format_attr_thresh8.attr, | ||
428 | &format_attr_match_rds.attr, | ||
429 | &format_attr_match_rnid30.attr, | ||
430 | &format_attr_match_rnid4.attr, | ||
431 | &format_attr_match_dnid.attr, | ||
432 | &format_attr_match_mc.attr, | ||
433 | &format_attr_match_opc.attr, | ||
434 | &format_attr_match_vnw.attr, | ||
435 | &format_attr_match0.attr, | ||
436 | &format_attr_match1.attr, | ||
437 | &format_attr_mask_rds.attr, | ||
438 | &format_attr_mask_rnid30.attr, | ||
439 | &format_attr_mask_rnid4.attr, | ||
440 | &format_attr_mask_dnid.attr, | ||
441 | &format_attr_mask_mc.attr, | ||
442 | &format_attr_mask_opc.attr, | ||
443 | &format_attr_mask_vnw.attr, | ||
444 | &format_attr_mask0.attr, | ||
445 | &format_attr_mask1.attr, | ||
446 | NULL, | ||
447 | }; | ||
448 | |||
449 | static struct uncore_event_desc snbep_uncore_imc_events[] = { | ||
450 | INTEL_UNCORE_EVENT_DESC(clockticks, "event=0xff,umask=0x00"), | ||
451 | INTEL_UNCORE_EVENT_DESC(cas_count_read, "event=0x04,umask=0x03"), | ||
452 | INTEL_UNCORE_EVENT_DESC(cas_count_write, "event=0x04,umask=0x0c"), | ||
453 | { /* end: all zeroes */ }, | ||
454 | }; | ||
455 | |||
456 | static struct uncore_event_desc snbep_uncore_qpi_events[] = { | ||
457 | INTEL_UNCORE_EVENT_DESC(clockticks, "event=0x14"), | ||
458 | INTEL_UNCORE_EVENT_DESC(txl_flits_active, "event=0x00,umask=0x06"), | ||
459 | INTEL_UNCORE_EVENT_DESC(drs_data, "event=0x102,umask=0x08"), | ||
460 | INTEL_UNCORE_EVENT_DESC(ncb_data, "event=0x103,umask=0x04"), | ||
461 | { /* end: all zeroes */ }, | ||
462 | }; | ||
463 | |||
464 | static struct attribute_group snbep_uncore_format_group = { | ||
465 | .name = "format", | ||
466 | .attrs = snbep_uncore_formats_attr, | ||
467 | }; | ||
468 | |||
469 | static struct attribute_group snbep_uncore_ubox_format_group = { | ||
470 | .name = "format", | ||
471 | .attrs = snbep_uncore_ubox_formats_attr, | ||
472 | }; | ||
473 | |||
474 | static struct attribute_group snbep_uncore_cbox_format_group = { | ||
475 | .name = "format", | ||
476 | .attrs = snbep_uncore_cbox_formats_attr, | ||
477 | }; | ||
478 | |||
479 | static struct attribute_group snbep_uncore_pcu_format_group = { | ||
480 | .name = "format", | ||
481 | .attrs = snbep_uncore_pcu_formats_attr, | ||
482 | }; | ||
483 | |||
484 | static struct attribute_group snbep_uncore_qpi_format_group = { | ||
485 | .name = "format", | ||
486 | .attrs = snbep_uncore_qpi_formats_attr, | ||
487 | }; | ||
488 | |||
489 | #define SNBEP_UNCORE_MSR_OPS_COMMON_INIT() \ | ||
490 | .init_box = snbep_uncore_msr_init_box, \ | ||
491 | .disable_box = snbep_uncore_msr_disable_box, \ | ||
492 | .enable_box = snbep_uncore_msr_enable_box, \ | ||
493 | .disable_event = snbep_uncore_msr_disable_event, \ | ||
494 | .enable_event = snbep_uncore_msr_enable_event, \ | ||
495 | .read_counter = uncore_msr_read_counter | ||
496 | |||
497 | static struct intel_uncore_ops snbep_uncore_msr_ops = { | ||
498 | SNBEP_UNCORE_MSR_OPS_COMMON_INIT(), | ||
499 | }; | ||
500 | |||
501 | #define SNBEP_UNCORE_PCI_OPS_COMMON_INIT() \ | ||
502 | .init_box = snbep_uncore_pci_init_box, \ | ||
503 | .disable_box = snbep_uncore_pci_disable_box, \ | ||
504 | .enable_box = snbep_uncore_pci_enable_box, \ | ||
505 | .disable_event = snbep_uncore_pci_disable_event, \ | ||
506 | .read_counter = snbep_uncore_pci_read_counter | ||
507 | |||
508 | static struct intel_uncore_ops snbep_uncore_pci_ops = { | ||
509 | SNBEP_UNCORE_PCI_OPS_COMMON_INIT(), | ||
510 | .enable_event = snbep_uncore_pci_enable_event, \ | ||
511 | }; | ||
512 | |||
513 | static struct event_constraint snbep_uncore_cbox_constraints[] = { | ||
514 | UNCORE_EVENT_CONSTRAINT(0x01, 0x1), | ||
515 | UNCORE_EVENT_CONSTRAINT(0x02, 0x3), | ||
516 | UNCORE_EVENT_CONSTRAINT(0x04, 0x3), | ||
517 | UNCORE_EVENT_CONSTRAINT(0x05, 0x3), | ||
518 | UNCORE_EVENT_CONSTRAINT(0x07, 0x3), | ||
519 | UNCORE_EVENT_CONSTRAINT(0x09, 0x3), | ||
520 | UNCORE_EVENT_CONSTRAINT(0x11, 0x1), | ||
521 | UNCORE_EVENT_CONSTRAINT(0x12, 0x3), | ||
522 | UNCORE_EVENT_CONSTRAINT(0x13, 0x3), | ||
523 | UNCORE_EVENT_CONSTRAINT(0x1b, 0xc), | ||
524 | UNCORE_EVENT_CONSTRAINT(0x1c, 0xc), | ||
525 | UNCORE_EVENT_CONSTRAINT(0x1d, 0xc), | ||
526 | UNCORE_EVENT_CONSTRAINT(0x1e, 0xc), | ||
527 | EVENT_CONSTRAINT_OVERLAP(0x1f, 0xe, 0xff), | ||
528 | UNCORE_EVENT_CONSTRAINT(0x21, 0x3), | ||
529 | UNCORE_EVENT_CONSTRAINT(0x23, 0x3), | ||
530 | UNCORE_EVENT_CONSTRAINT(0x31, 0x3), | ||
531 | UNCORE_EVENT_CONSTRAINT(0x32, 0x3), | ||
532 | UNCORE_EVENT_CONSTRAINT(0x33, 0x3), | ||
533 | UNCORE_EVENT_CONSTRAINT(0x34, 0x3), | ||
534 | UNCORE_EVENT_CONSTRAINT(0x35, 0x3), | ||
535 | UNCORE_EVENT_CONSTRAINT(0x36, 0x1), | ||
536 | UNCORE_EVENT_CONSTRAINT(0x37, 0x3), | ||
537 | UNCORE_EVENT_CONSTRAINT(0x38, 0x3), | ||
538 | UNCORE_EVENT_CONSTRAINT(0x39, 0x3), | ||
539 | UNCORE_EVENT_CONSTRAINT(0x3b, 0x1), | ||
540 | EVENT_CONSTRAINT_END | ||
541 | }; | ||
542 | |||
543 | static struct event_constraint snbep_uncore_r2pcie_constraints[] = { | ||
544 | UNCORE_EVENT_CONSTRAINT(0x10, 0x3), | ||
545 | UNCORE_EVENT_CONSTRAINT(0x11, 0x3), | ||
546 | UNCORE_EVENT_CONSTRAINT(0x12, 0x1), | ||
547 | UNCORE_EVENT_CONSTRAINT(0x23, 0x3), | ||
548 | UNCORE_EVENT_CONSTRAINT(0x24, 0x3), | ||
549 | UNCORE_EVENT_CONSTRAINT(0x25, 0x3), | ||
550 | UNCORE_EVENT_CONSTRAINT(0x26, 0x3), | ||
551 | UNCORE_EVENT_CONSTRAINT(0x32, 0x3), | ||
552 | UNCORE_EVENT_CONSTRAINT(0x33, 0x3), | ||
553 | UNCORE_EVENT_CONSTRAINT(0x34, 0x3), | ||
554 | EVENT_CONSTRAINT_END | ||
555 | }; | ||
556 | |||
557 | static struct event_constraint snbep_uncore_r3qpi_constraints[] = { | ||
558 | UNCORE_EVENT_CONSTRAINT(0x10, 0x3), | ||
559 | UNCORE_EVENT_CONSTRAINT(0x11, 0x3), | ||
560 | UNCORE_EVENT_CONSTRAINT(0x12, 0x3), | ||
561 | UNCORE_EVENT_CONSTRAINT(0x13, 0x1), | ||
562 | UNCORE_EVENT_CONSTRAINT(0x20, 0x3), | ||
563 | UNCORE_EVENT_CONSTRAINT(0x21, 0x3), | ||
564 | UNCORE_EVENT_CONSTRAINT(0x22, 0x3), | ||
565 | UNCORE_EVENT_CONSTRAINT(0x23, 0x3), | ||
566 | UNCORE_EVENT_CONSTRAINT(0x24, 0x3), | ||
567 | UNCORE_EVENT_CONSTRAINT(0x25, 0x3), | ||
568 | UNCORE_EVENT_CONSTRAINT(0x26, 0x3), | ||
569 | UNCORE_EVENT_CONSTRAINT(0x28, 0x3), | ||
570 | UNCORE_EVENT_CONSTRAINT(0x29, 0x3), | ||
571 | UNCORE_EVENT_CONSTRAINT(0x2a, 0x3), | ||
572 | UNCORE_EVENT_CONSTRAINT(0x2b, 0x3), | ||
573 | UNCORE_EVENT_CONSTRAINT(0x2c, 0x3), | ||
574 | UNCORE_EVENT_CONSTRAINT(0x2d, 0x3), | ||
575 | UNCORE_EVENT_CONSTRAINT(0x2e, 0x3), | ||
576 | UNCORE_EVENT_CONSTRAINT(0x2f, 0x3), | ||
577 | UNCORE_EVENT_CONSTRAINT(0x30, 0x3), | ||
578 | UNCORE_EVENT_CONSTRAINT(0x31, 0x3), | ||
579 | UNCORE_EVENT_CONSTRAINT(0x32, 0x3), | ||
580 | UNCORE_EVENT_CONSTRAINT(0x33, 0x3), | ||
581 | UNCORE_EVENT_CONSTRAINT(0x34, 0x3), | ||
582 | UNCORE_EVENT_CONSTRAINT(0x36, 0x3), | ||
583 | UNCORE_EVENT_CONSTRAINT(0x37, 0x3), | ||
584 | UNCORE_EVENT_CONSTRAINT(0x38, 0x3), | ||
585 | UNCORE_EVENT_CONSTRAINT(0x39, 0x3), | ||
586 | EVENT_CONSTRAINT_END | ||
587 | }; | ||
588 | |||
589 | static struct intel_uncore_type snbep_uncore_ubox = { | ||
590 | .name = "ubox", | ||
591 | .num_counters = 2, | ||
592 | .num_boxes = 1, | ||
593 | .perf_ctr_bits = 44, | ||
594 | .fixed_ctr_bits = 48, | ||
595 | .perf_ctr = SNBEP_U_MSR_PMON_CTR0, | ||
596 | .event_ctl = SNBEP_U_MSR_PMON_CTL0, | ||
597 | .event_mask = SNBEP_U_MSR_PMON_RAW_EVENT_MASK, | ||
598 | .fixed_ctr = SNBEP_U_MSR_PMON_UCLK_FIXED_CTR, | ||
599 | .fixed_ctl = SNBEP_U_MSR_PMON_UCLK_FIXED_CTL, | ||
600 | .ops = &snbep_uncore_msr_ops, | ||
601 | .format_group = &snbep_uncore_ubox_format_group, | ||
602 | }; | ||
603 | |||
604 | static struct extra_reg snbep_uncore_cbox_extra_regs[] = { | ||
605 | SNBEP_CBO_EVENT_EXTRA_REG(SNBEP_CBO_PMON_CTL_TID_EN, | ||
606 | SNBEP_CBO_PMON_CTL_TID_EN, 0x1), | ||
607 | SNBEP_CBO_EVENT_EXTRA_REG(0x0334, 0xffff, 0x4), | ||
608 | SNBEP_CBO_EVENT_EXTRA_REG(0x4334, 0xffff, 0x6), | ||
609 | SNBEP_CBO_EVENT_EXTRA_REG(0x0534, 0xffff, 0x4), | ||
610 | SNBEP_CBO_EVENT_EXTRA_REG(0x4534, 0xffff, 0x6), | ||
611 | SNBEP_CBO_EVENT_EXTRA_REG(0x0934, 0xffff, 0x4), | ||
612 | SNBEP_CBO_EVENT_EXTRA_REG(0x4934, 0xffff, 0x6), | ||
613 | SNBEP_CBO_EVENT_EXTRA_REG(0x4134, 0xffff, 0x6), | ||
614 | SNBEP_CBO_EVENT_EXTRA_REG(0x0135, 0xffff, 0x8), | ||
615 | SNBEP_CBO_EVENT_EXTRA_REG(0x0335, 0xffff, 0x8), | ||
616 | SNBEP_CBO_EVENT_EXTRA_REG(0x4135, 0xffff, 0xa), | ||
617 | SNBEP_CBO_EVENT_EXTRA_REG(0x4335, 0xffff, 0xa), | ||
618 | SNBEP_CBO_EVENT_EXTRA_REG(0x4435, 0xffff, 0x2), | ||
619 | SNBEP_CBO_EVENT_EXTRA_REG(0x4835, 0xffff, 0x2), | ||
620 | SNBEP_CBO_EVENT_EXTRA_REG(0x4a35, 0xffff, 0x2), | ||
621 | SNBEP_CBO_EVENT_EXTRA_REG(0x5035, 0xffff, 0x2), | ||
622 | SNBEP_CBO_EVENT_EXTRA_REG(0x0136, 0xffff, 0x8), | ||
623 | SNBEP_CBO_EVENT_EXTRA_REG(0x0336, 0xffff, 0x8), | ||
624 | SNBEP_CBO_EVENT_EXTRA_REG(0x4136, 0xffff, 0xa), | ||
625 | SNBEP_CBO_EVENT_EXTRA_REG(0x4336, 0xffff, 0xa), | ||
626 | SNBEP_CBO_EVENT_EXTRA_REG(0x4436, 0xffff, 0x2), | ||
627 | SNBEP_CBO_EVENT_EXTRA_REG(0x4836, 0xffff, 0x2), | ||
628 | SNBEP_CBO_EVENT_EXTRA_REG(0x4a36, 0xffff, 0x2), | ||
629 | SNBEP_CBO_EVENT_EXTRA_REG(0x4037, 0x40ff, 0x2), | ||
630 | EVENT_EXTRA_END | ||
631 | }; | ||
632 | |||
633 | static void snbep_cbox_put_constraint(struct intel_uncore_box *box, struct perf_event *event) | ||
634 | { | ||
635 | struct hw_perf_event_extra *reg1 = &event->hw.extra_reg; | ||
636 | struct intel_uncore_extra_reg *er = &box->shared_regs[0]; | ||
637 | int i; | ||
638 | |||
639 | if (uncore_box_is_fake(box)) | ||
640 | return; | ||
641 | |||
642 | for (i = 0; i < 5; i++) { | ||
643 | if (reg1->alloc & (0x1 << i)) | ||
644 | atomic_sub(1 << (i * 6), &er->ref); | ||
645 | } | ||
646 | reg1->alloc = 0; | ||
647 | } | ||
648 | |||
649 | static struct event_constraint * | ||
650 | __snbep_cbox_get_constraint(struct intel_uncore_box *box, struct perf_event *event, | ||
651 | u64 (*cbox_filter_mask)(int fields)) | ||
652 | { | ||
653 | struct hw_perf_event_extra *reg1 = &event->hw.extra_reg; | ||
654 | struct intel_uncore_extra_reg *er = &box->shared_regs[0]; | ||
655 | int i, alloc = 0; | ||
656 | unsigned long flags; | ||
657 | u64 mask; | ||
658 | |||
659 | if (reg1->idx == EXTRA_REG_NONE) | ||
660 | return NULL; | ||
661 | |||
662 | raw_spin_lock_irqsave(&er->lock, flags); | ||
663 | for (i = 0; i < 5; i++) { | ||
664 | if (!(reg1->idx & (0x1 << i))) | ||
665 | continue; | ||
666 | if (!uncore_box_is_fake(box) && (reg1->alloc & (0x1 << i))) | ||
667 | continue; | ||
668 | |||
669 | mask = cbox_filter_mask(0x1 << i); | ||
670 | if (!__BITS_VALUE(atomic_read(&er->ref), i, 6) || | ||
671 | !((reg1->config ^ er->config) & mask)) { | ||
672 | atomic_add(1 << (i * 6), &er->ref); | ||
673 | er->config &= ~mask; | ||
674 | er->config |= reg1->config & mask; | ||
675 | alloc |= (0x1 << i); | ||
676 | } else { | ||
677 | break; | ||
678 | } | ||
679 | } | ||
680 | raw_spin_unlock_irqrestore(&er->lock, flags); | ||
681 | if (i < 5) | ||
682 | goto fail; | ||
683 | |||
684 | if (!uncore_box_is_fake(box)) | ||
685 | reg1->alloc |= alloc; | ||
686 | |||
687 | return NULL; | ||
688 | fail: | ||
689 | for (; i >= 0; i--) { | ||
690 | if (alloc & (0x1 << i)) | ||
691 | atomic_sub(1 << (i * 6), &er->ref); | ||
692 | } | ||
693 | return &uncore_constraint_empty; | ||
694 | } | ||
695 | |||
696 | static u64 snbep_cbox_filter_mask(int fields) | ||
697 | { | ||
698 | u64 mask = 0; | ||
699 | |||
700 | if (fields & 0x1) | ||
701 | mask |= SNBEP_CB0_MSR_PMON_BOX_FILTER_TID; | ||
702 | if (fields & 0x2) | ||
703 | mask |= SNBEP_CB0_MSR_PMON_BOX_FILTER_NID; | ||
704 | if (fields & 0x4) | ||
705 | mask |= SNBEP_CB0_MSR_PMON_BOX_FILTER_STATE; | ||
706 | if (fields & 0x8) | ||
707 | mask |= SNBEP_CB0_MSR_PMON_BOX_FILTER_OPC; | ||
708 | |||
709 | return mask; | ||
710 | } | ||
711 | |||
712 | static struct event_constraint * | ||
713 | snbep_cbox_get_constraint(struct intel_uncore_box *box, struct perf_event *event) | ||
714 | { | ||
715 | return __snbep_cbox_get_constraint(box, event, snbep_cbox_filter_mask); | ||
716 | } | ||
717 | |||
718 | static int snbep_cbox_hw_config(struct intel_uncore_box *box, struct perf_event *event) | ||
719 | { | ||
720 | struct hw_perf_event_extra *reg1 = &event->hw.extra_reg; | ||
721 | struct extra_reg *er; | ||
722 | int idx = 0; | ||
723 | |||
724 | for (er = snbep_uncore_cbox_extra_regs; er->msr; er++) { | ||
725 | if (er->event != (event->hw.config & er->config_mask)) | ||
726 | continue; | ||
727 | idx |= er->idx; | ||
728 | } | ||
729 | |||
730 | if (idx) { | ||
731 | reg1->reg = SNBEP_C0_MSR_PMON_BOX_FILTER + | ||
732 | SNBEP_CBO_MSR_OFFSET * box->pmu->pmu_idx; | ||
733 | reg1->config = event->attr.config1 & snbep_cbox_filter_mask(idx); | ||
734 | reg1->idx = idx; | ||
735 | } | ||
736 | return 0; | ||
737 | } | ||
738 | |||
739 | static struct intel_uncore_ops snbep_uncore_cbox_ops = { | ||
740 | SNBEP_UNCORE_MSR_OPS_COMMON_INIT(), | ||
741 | .hw_config = snbep_cbox_hw_config, | ||
742 | .get_constraint = snbep_cbox_get_constraint, | ||
743 | .put_constraint = snbep_cbox_put_constraint, | ||
744 | }; | ||
745 | |||
746 | static struct intel_uncore_type snbep_uncore_cbox = { | ||
747 | .name = "cbox", | ||
748 | .num_counters = 4, | ||
749 | .num_boxes = 8, | ||
750 | .perf_ctr_bits = 44, | ||
751 | .event_ctl = SNBEP_C0_MSR_PMON_CTL0, | ||
752 | .perf_ctr = SNBEP_C0_MSR_PMON_CTR0, | ||
753 | .event_mask = SNBEP_CBO_MSR_PMON_RAW_EVENT_MASK, | ||
754 | .box_ctl = SNBEP_C0_MSR_PMON_BOX_CTL, | ||
755 | .msr_offset = SNBEP_CBO_MSR_OFFSET, | ||
756 | .num_shared_regs = 1, | ||
757 | .constraints = snbep_uncore_cbox_constraints, | ||
758 | .ops = &snbep_uncore_cbox_ops, | ||
759 | .format_group = &snbep_uncore_cbox_format_group, | ||
760 | }; | ||
761 | |||
762 | static u64 snbep_pcu_alter_er(struct perf_event *event, int new_idx, bool modify) | ||
763 | { | ||
764 | struct hw_perf_event *hwc = &event->hw; | ||
765 | struct hw_perf_event_extra *reg1 = &hwc->extra_reg; | ||
766 | u64 config = reg1->config; | ||
767 | |||
768 | if (new_idx > reg1->idx) | ||
769 | config <<= 8 * (new_idx - reg1->idx); | ||
770 | else | ||
771 | config >>= 8 * (reg1->idx - new_idx); | ||
772 | |||
773 | if (modify) { | ||
774 | hwc->config += new_idx - reg1->idx; | ||
775 | reg1->config = config; | ||
776 | reg1->idx = new_idx; | ||
777 | } | ||
778 | return config; | ||
779 | } | ||
780 | |||
781 | static struct event_constraint * | ||
782 | snbep_pcu_get_constraint(struct intel_uncore_box *box, struct perf_event *event) | ||
783 | { | ||
784 | struct hw_perf_event_extra *reg1 = &event->hw.extra_reg; | ||
785 | struct intel_uncore_extra_reg *er = &box->shared_regs[0]; | ||
786 | unsigned long flags; | ||
787 | int idx = reg1->idx; | ||
788 | u64 mask, config1 = reg1->config; | ||
789 | bool ok = false; | ||
790 | |||
791 | if (reg1->idx == EXTRA_REG_NONE || | ||
792 | (!uncore_box_is_fake(box) && reg1->alloc)) | ||
793 | return NULL; | ||
794 | again: | ||
795 | mask = 0xffULL << (idx * 8); | ||
796 | raw_spin_lock_irqsave(&er->lock, flags); | ||
797 | if (!__BITS_VALUE(atomic_read(&er->ref), idx, 8) || | ||
798 | !((config1 ^ er->config) & mask)) { | ||
799 | atomic_add(1 << (idx * 8), &er->ref); | ||
800 | er->config &= ~mask; | ||
801 | er->config |= config1 & mask; | ||
802 | ok = true; | ||
803 | } | ||
804 | raw_spin_unlock_irqrestore(&er->lock, flags); | ||
805 | |||
806 | if (!ok) { | ||
807 | idx = (idx + 1) % 4; | ||
808 | if (idx != reg1->idx) { | ||
809 | config1 = snbep_pcu_alter_er(event, idx, false); | ||
810 | goto again; | ||
811 | } | ||
812 | return &uncore_constraint_empty; | ||
813 | } | ||
814 | |||
815 | if (!uncore_box_is_fake(box)) { | ||
816 | if (idx != reg1->idx) | ||
817 | snbep_pcu_alter_er(event, idx, true); | ||
818 | reg1->alloc = 1; | ||
819 | } | ||
820 | return NULL; | ||
821 | } | ||
822 | |||
823 | static void snbep_pcu_put_constraint(struct intel_uncore_box *box, struct perf_event *event) | ||
824 | { | ||
825 | struct hw_perf_event_extra *reg1 = &event->hw.extra_reg; | ||
826 | struct intel_uncore_extra_reg *er = &box->shared_regs[0]; | ||
827 | |||
828 | if (uncore_box_is_fake(box) || !reg1->alloc) | ||
829 | return; | ||
830 | |||
831 | atomic_sub(1 << (reg1->idx * 8), &er->ref); | ||
832 | reg1->alloc = 0; | ||
833 | } | ||
834 | |||
835 | static int snbep_pcu_hw_config(struct intel_uncore_box *box, struct perf_event *event) | ||
836 | { | ||
837 | struct hw_perf_event *hwc = &event->hw; | ||
838 | struct hw_perf_event_extra *reg1 = &hwc->extra_reg; | ||
839 | int ev_sel = hwc->config & SNBEP_PMON_CTL_EV_SEL_MASK; | ||
840 | |||
841 | if (ev_sel >= 0xb && ev_sel <= 0xe) { | ||
842 | reg1->reg = SNBEP_PCU_MSR_PMON_BOX_FILTER; | ||
843 | reg1->idx = ev_sel - 0xb; | ||
844 | reg1->config = event->attr.config1 & (0xff << (reg1->idx * 8)); | ||
845 | } | ||
846 | return 0; | ||
847 | } | ||
848 | |||
849 | static struct intel_uncore_ops snbep_uncore_pcu_ops = { | ||
850 | SNBEP_UNCORE_MSR_OPS_COMMON_INIT(), | ||
851 | .hw_config = snbep_pcu_hw_config, | ||
852 | .get_constraint = snbep_pcu_get_constraint, | ||
853 | .put_constraint = snbep_pcu_put_constraint, | ||
854 | }; | ||
855 | |||
856 | static struct intel_uncore_type snbep_uncore_pcu = { | ||
857 | .name = "pcu", | ||
858 | .num_counters = 4, | ||
859 | .num_boxes = 1, | ||
860 | .perf_ctr_bits = 48, | ||
861 | .perf_ctr = SNBEP_PCU_MSR_PMON_CTR0, | ||
862 | .event_ctl = SNBEP_PCU_MSR_PMON_CTL0, | ||
863 | .event_mask = SNBEP_PCU_MSR_PMON_RAW_EVENT_MASK, | ||
864 | .box_ctl = SNBEP_PCU_MSR_PMON_BOX_CTL, | ||
865 | .num_shared_regs = 1, | ||
866 | .ops = &snbep_uncore_pcu_ops, | ||
867 | .format_group = &snbep_uncore_pcu_format_group, | ||
868 | }; | ||
869 | |||
870 | static struct intel_uncore_type *snbep_msr_uncores[] = { | ||
871 | &snbep_uncore_ubox, | ||
872 | &snbep_uncore_cbox, | ||
873 | &snbep_uncore_pcu, | ||
874 | NULL, | ||
875 | }; | ||
876 | |||
877 | void snbep_uncore_cpu_init(void) | ||
878 | { | ||
879 | if (snbep_uncore_cbox.num_boxes > boot_cpu_data.x86_max_cores) | ||
880 | snbep_uncore_cbox.num_boxes = boot_cpu_data.x86_max_cores; | ||
881 | uncore_msr_uncores = snbep_msr_uncores; | ||
882 | } | ||
883 | |||
884 | enum { | ||
885 | SNBEP_PCI_QPI_PORT0_FILTER, | ||
886 | SNBEP_PCI_QPI_PORT1_FILTER, | ||
887 | }; | ||
888 | |||
889 | static int snbep_qpi_hw_config(struct intel_uncore_box *box, struct perf_event *event) | ||
890 | { | ||
891 | struct hw_perf_event *hwc = &event->hw; | ||
892 | struct hw_perf_event_extra *reg1 = &hwc->extra_reg; | ||
893 | struct hw_perf_event_extra *reg2 = &hwc->branch_reg; | ||
894 | |||
895 | if ((hwc->config & SNBEP_PMON_CTL_EV_SEL_MASK) == 0x38) { | ||
896 | reg1->idx = 0; | ||
897 | reg1->reg = SNBEP_Q_Py_PCI_PMON_PKT_MATCH0; | ||
898 | reg1->config = event->attr.config1; | ||
899 | reg2->reg = SNBEP_Q_Py_PCI_PMON_PKT_MASK0; | ||
900 | reg2->config = event->attr.config2; | ||
901 | } | ||
902 | return 0; | ||
903 | } | ||
904 | |||
905 | static void snbep_qpi_enable_event(struct intel_uncore_box *box, struct perf_event *event) | ||
906 | { | ||
907 | struct pci_dev *pdev = box->pci_dev; | ||
908 | struct hw_perf_event *hwc = &event->hw; | ||
909 | struct hw_perf_event_extra *reg1 = &hwc->extra_reg; | ||
910 | struct hw_perf_event_extra *reg2 = &hwc->branch_reg; | ||
911 | |||
912 | if (reg1->idx != EXTRA_REG_NONE) { | ||
913 | int idx = box->pmu->pmu_idx + SNBEP_PCI_QPI_PORT0_FILTER; | ||
914 | struct pci_dev *filter_pdev = uncore_extra_pci_dev[box->phys_id][idx]; | ||
915 | if (filter_pdev) { | ||
916 | pci_write_config_dword(filter_pdev, reg1->reg, | ||
917 | (u32)reg1->config); | ||
918 | pci_write_config_dword(filter_pdev, reg1->reg + 4, | ||
919 | (u32)(reg1->config >> 32)); | ||
920 | pci_write_config_dword(filter_pdev, reg2->reg, | ||
921 | (u32)reg2->config); | ||
922 | pci_write_config_dword(filter_pdev, reg2->reg + 4, | ||
923 | (u32)(reg2->config >> 32)); | ||
924 | } | ||
925 | } | ||
926 | |||
927 | pci_write_config_dword(pdev, hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN); | ||
928 | } | ||
929 | |||
930 | static struct intel_uncore_ops snbep_uncore_qpi_ops = { | ||
931 | SNBEP_UNCORE_PCI_OPS_COMMON_INIT(), | ||
932 | .enable_event = snbep_qpi_enable_event, | ||
933 | .hw_config = snbep_qpi_hw_config, | ||
934 | .get_constraint = uncore_get_constraint, | ||
935 | .put_constraint = uncore_put_constraint, | ||
936 | }; | ||
937 | |||
938 | #define SNBEP_UNCORE_PCI_COMMON_INIT() \ | ||
939 | .perf_ctr = SNBEP_PCI_PMON_CTR0, \ | ||
940 | .event_ctl = SNBEP_PCI_PMON_CTL0, \ | ||
941 | .event_mask = SNBEP_PMON_RAW_EVENT_MASK, \ | ||
942 | .box_ctl = SNBEP_PCI_PMON_BOX_CTL, \ | ||
943 | .ops = &snbep_uncore_pci_ops, \ | ||
944 | .format_group = &snbep_uncore_format_group | ||
945 | |||
946 | static struct intel_uncore_type snbep_uncore_ha = { | ||
947 | .name = "ha", | ||
948 | .num_counters = 4, | ||
949 | .num_boxes = 1, | ||
950 | .perf_ctr_bits = 48, | ||
951 | SNBEP_UNCORE_PCI_COMMON_INIT(), | ||
952 | }; | ||
953 | |||
954 | static struct intel_uncore_type snbep_uncore_imc = { | ||
955 | .name = "imc", | ||
956 | .num_counters = 4, | ||
957 | .num_boxes = 4, | ||
958 | .perf_ctr_bits = 48, | ||
959 | .fixed_ctr_bits = 48, | ||
960 | .fixed_ctr = SNBEP_MC_CHy_PCI_PMON_FIXED_CTR, | ||
961 | .fixed_ctl = SNBEP_MC_CHy_PCI_PMON_FIXED_CTL, | ||
962 | .event_descs = snbep_uncore_imc_events, | ||
963 | SNBEP_UNCORE_PCI_COMMON_INIT(), | ||
964 | }; | ||
965 | |||
966 | static struct intel_uncore_type snbep_uncore_qpi = { | ||
967 | .name = "qpi", | ||
968 | .num_counters = 4, | ||
969 | .num_boxes = 2, | ||
970 | .perf_ctr_bits = 48, | ||
971 | .perf_ctr = SNBEP_PCI_PMON_CTR0, | ||
972 | .event_ctl = SNBEP_PCI_PMON_CTL0, | ||
973 | .event_mask = SNBEP_QPI_PCI_PMON_RAW_EVENT_MASK, | ||
974 | .box_ctl = SNBEP_PCI_PMON_BOX_CTL, | ||
975 | .num_shared_regs = 1, | ||
976 | .ops = &snbep_uncore_qpi_ops, | ||
977 | .event_descs = snbep_uncore_qpi_events, | ||
978 | .format_group = &snbep_uncore_qpi_format_group, | ||
979 | }; | ||
980 | |||
981 | |||
982 | static struct intel_uncore_type snbep_uncore_r2pcie = { | ||
983 | .name = "r2pcie", | ||
984 | .num_counters = 4, | ||
985 | .num_boxes = 1, | ||
986 | .perf_ctr_bits = 44, | ||
987 | .constraints = snbep_uncore_r2pcie_constraints, | ||
988 | SNBEP_UNCORE_PCI_COMMON_INIT(), | ||
989 | }; | ||
990 | |||
991 | static struct intel_uncore_type snbep_uncore_r3qpi = { | ||
992 | .name = "r3qpi", | ||
993 | .num_counters = 3, | ||
994 | .num_boxes = 2, | ||
995 | .perf_ctr_bits = 44, | ||
996 | .constraints = snbep_uncore_r3qpi_constraints, | ||
997 | SNBEP_UNCORE_PCI_COMMON_INIT(), | ||
998 | }; | ||
999 | |||
1000 | enum { | ||
1001 | SNBEP_PCI_UNCORE_HA, | ||
1002 | SNBEP_PCI_UNCORE_IMC, | ||
1003 | SNBEP_PCI_UNCORE_QPI, | ||
1004 | SNBEP_PCI_UNCORE_R2PCIE, | ||
1005 | SNBEP_PCI_UNCORE_R3QPI, | ||
1006 | }; | ||
1007 | |||
1008 | static struct intel_uncore_type *snbep_pci_uncores[] = { | ||
1009 | [SNBEP_PCI_UNCORE_HA] = &snbep_uncore_ha, | ||
1010 | [SNBEP_PCI_UNCORE_IMC] = &snbep_uncore_imc, | ||
1011 | [SNBEP_PCI_UNCORE_QPI] = &snbep_uncore_qpi, | ||
1012 | [SNBEP_PCI_UNCORE_R2PCIE] = &snbep_uncore_r2pcie, | ||
1013 | [SNBEP_PCI_UNCORE_R3QPI] = &snbep_uncore_r3qpi, | ||
1014 | NULL, | ||
1015 | }; | ||
1016 | |||
1017 | static const struct pci_device_id snbep_uncore_pci_ids[] = { | ||
1018 | { /* Home Agent */ | ||
1019 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_HA), | ||
1020 | .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_HA, 0), | ||
1021 | }, | ||
1022 | { /* MC Channel 0 */ | ||
1023 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_IMC0), | ||
1024 | .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_IMC, 0), | ||
1025 | }, | ||
1026 | { /* MC Channel 1 */ | ||
1027 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_IMC1), | ||
1028 | .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_IMC, 1), | ||
1029 | }, | ||
1030 | { /* MC Channel 2 */ | ||
1031 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_IMC2), | ||
1032 | .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_IMC, 2), | ||
1033 | }, | ||
1034 | { /* MC Channel 3 */ | ||
1035 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_IMC3), | ||
1036 | .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_IMC, 3), | ||
1037 | }, | ||
1038 | { /* QPI Port 0 */ | ||
1039 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_QPI0), | ||
1040 | .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_QPI, 0), | ||
1041 | }, | ||
1042 | { /* QPI Port 1 */ | ||
1043 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_QPI1), | ||
1044 | .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_QPI, 1), | ||
1045 | }, | ||
1046 | { /* R2PCIe */ | ||
1047 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_R2PCIE), | ||
1048 | .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_R2PCIE, 0), | ||
1049 | }, | ||
1050 | { /* R3QPI Link 0 */ | ||
1051 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_R3QPI0), | ||
1052 | .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_R3QPI, 0), | ||
1053 | }, | ||
1054 | { /* R3QPI Link 1 */ | ||
1055 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_R3QPI1), | ||
1056 | .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_R3QPI, 1), | ||
1057 | }, | ||
1058 | { /* QPI Port 0 filter */ | ||
1059 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x3c86), | ||
1060 | .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV, | ||
1061 | SNBEP_PCI_QPI_PORT0_FILTER), | ||
1062 | }, | ||
1063 | { /* QPI Port 0 filter */ | ||
1064 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x3c96), | ||
1065 | .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV, | ||
1066 | SNBEP_PCI_QPI_PORT1_FILTER), | ||
1067 | }, | ||
1068 | { /* end: all zeroes */ } | ||
1069 | }; | ||
1070 | |||
1071 | static struct pci_driver snbep_uncore_pci_driver = { | ||
1072 | .name = "snbep_uncore", | ||
1073 | .id_table = snbep_uncore_pci_ids, | ||
1074 | }; | ||
1075 | |||
1076 | /* | ||
1077 | * build pci bus to socket mapping | ||
1078 | */ | ||
1079 | static int snbep_pci2phy_map_init(int devid) | ||
1080 | { | ||
1081 | struct pci_dev *ubox_dev = NULL; | ||
1082 | int i, bus, nodeid; | ||
1083 | int err = 0; | ||
1084 | u32 config = 0; | ||
1085 | |||
1086 | while (1) { | ||
1087 | /* find the UBOX device */ | ||
1088 | ubox_dev = pci_get_device(PCI_VENDOR_ID_INTEL, devid, ubox_dev); | ||
1089 | if (!ubox_dev) | ||
1090 | break; | ||
1091 | bus = ubox_dev->bus->number; | ||
1092 | /* get the Node ID of the local register */ | ||
1093 | err = pci_read_config_dword(ubox_dev, 0x40, &config); | ||
1094 | if (err) | ||
1095 | break; | ||
1096 | nodeid = config; | ||
1097 | /* get the Node ID mapping */ | ||
1098 | err = pci_read_config_dword(ubox_dev, 0x54, &config); | ||
1099 | if (err) | ||
1100 | break; | ||
1101 | /* | ||
1102 | * every three bits in the Node ID mapping register maps | ||
1103 | * to a particular node. | ||
1104 | */ | ||
1105 | for (i = 0; i < 8; i++) { | ||
1106 | if (nodeid == ((config >> (3 * i)) & 0x7)) { | ||
1107 | uncore_pcibus_to_physid[bus] = i; | ||
1108 | break; | ||
1109 | } | ||
1110 | } | ||
1111 | } | ||
1112 | |||
1113 | if (!err) { | ||
1114 | /* | ||
1115 | * For PCI bus with no UBOX device, find the next bus | ||
1116 | * that has UBOX device and use its mapping. | ||
1117 | */ | ||
1118 | i = -1; | ||
1119 | for (bus = 255; bus >= 0; bus--) { | ||
1120 | if (uncore_pcibus_to_physid[bus] >= 0) | ||
1121 | i = uncore_pcibus_to_physid[bus]; | ||
1122 | else | ||
1123 | uncore_pcibus_to_physid[bus] = i; | ||
1124 | } | ||
1125 | } | ||
1126 | |||
1127 | if (ubox_dev) | ||
1128 | pci_dev_put(ubox_dev); | ||
1129 | |||
1130 | return err ? pcibios_err_to_errno(err) : 0; | ||
1131 | } | ||
1132 | |||
1133 | int snbep_uncore_pci_init(void) | ||
1134 | { | ||
1135 | int ret = snbep_pci2phy_map_init(0x3ce0); | ||
1136 | if (ret) | ||
1137 | return ret; | ||
1138 | uncore_pci_uncores = snbep_pci_uncores; | ||
1139 | uncore_pci_driver = &snbep_uncore_pci_driver; | ||
1140 | return 0; | ||
1141 | } | ||
1142 | /* end of Sandy Bridge-EP uncore support */ | ||
1143 | |||
1144 | /* IvyTown uncore support */ | ||
1145 | static void ivbep_uncore_msr_init_box(struct intel_uncore_box *box) | ||
1146 | { | ||
1147 | unsigned msr = uncore_msr_box_ctl(box); | ||
1148 | if (msr) | ||
1149 | wrmsrl(msr, IVBEP_PMON_BOX_CTL_INT); | ||
1150 | } | ||
1151 | |||
1152 | static void ivbep_uncore_pci_init_box(struct intel_uncore_box *box) | ||
1153 | { | ||
1154 | struct pci_dev *pdev = box->pci_dev; | ||
1155 | |||
1156 | pci_write_config_dword(pdev, SNBEP_PCI_PMON_BOX_CTL, IVBEP_PMON_BOX_CTL_INT); | ||
1157 | } | ||
1158 | |||
1159 | #define IVBEP_UNCORE_MSR_OPS_COMMON_INIT() \ | ||
1160 | .init_box = ivbep_uncore_msr_init_box, \ | ||
1161 | .disable_box = snbep_uncore_msr_disable_box, \ | ||
1162 | .enable_box = snbep_uncore_msr_enable_box, \ | ||
1163 | .disable_event = snbep_uncore_msr_disable_event, \ | ||
1164 | .enable_event = snbep_uncore_msr_enable_event, \ | ||
1165 | .read_counter = uncore_msr_read_counter | ||
1166 | |||
1167 | static struct intel_uncore_ops ivbep_uncore_msr_ops = { | ||
1168 | IVBEP_UNCORE_MSR_OPS_COMMON_INIT(), | ||
1169 | }; | ||
1170 | |||
1171 | static struct intel_uncore_ops ivbep_uncore_pci_ops = { | ||
1172 | .init_box = ivbep_uncore_pci_init_box, | ||
1173 | .disable_box = snbep_uncore_pci_disable_box, | ||
1174 | .enable_box = snbep_uncore_pci_enable_box, | ||
1175 | .disable_event = snbep_uncore_pci_disable_event, | ||
1176 | .enable_event = snbep_uncore_pci_enable_event, | ||
1177 | .read_counter = snbep_uncore_pci_read_counter, | ||
1178 | }; | ||
1179 | |||
1180 | #define IVBEP_UNCORE_PCI_COMMON_INIT() \ | ||
1181 | .perf_ctr = SNBEP_PCI_PMON_CTR0, \ | ||
1182 | .event_ctl = SNBEP_PCI_PMON_CTL0, \ | ||
1183 | .event_mask = IVBEP_PMON_RAW_EVENT_MASK, \ | ||
1184 | .box_ctl = SNBEP_PCI_PMON_BOX_CTL, \ | ||
1185 | .ops = &ivbep_uncore_pci_ops, \ | ||
1186 | .format_group = &ivbep_uncore_format_group | ||
1187 | |||
1188 | static struct attribute *ivbep_uncore_formats_attr[] = { | ||
1189 | &format_attr_event.attr, | ||
1190 | &format_attr_umask.attr, | ||
1191 | &format_attr_edge.attr, | ||
1192 | &format_attr_inv.attr, | ||
1193 | &format_attr_thresh8.attr, | ||
1194 | NULL, | ||
1195 | }; | ||
1196 | |||
1197 | static struct attribute *ivbep_uncore_ubox_formats_attr[] = { | ||
1198 | &format_attr_event.attr, | ||
1199 | &format_attr_umask.attr, | ||
1200 | &format_attr_edge.attr, | ||
1201 | &format_attr_inv.attr, | ||
1202 | &format_attr_thresh5.attr, | ||
1203 | NULL, | ||
1204 | }; | ||
1205 | |||
1206 | static struct attribute *ivbep_uncore_cbox_formats_attr[] = { | ||
1207 | &format_attr_event.attr, | ||
1208 | &format_attr_umask.attr, | ||
1209 | &format_attr_edge.attr, | ||
1210 | &format_attr_tid_en.attr, | ||
1211 | &format_attr_thresh8.attr, | ||
1212 | &format_attr_filter_tid.attr, | ||
1213 | &format_attr_filter_link.attr, | ||
1214 | &format_attr_filter_state2.attr, | ||
1215 | &format_attr_filter_nid2.attr, | ||
1216 | &format_attr_filter_opc2.attr, | ||
1217 | &format_attr_filter_nc.attr, | ||
1218 | &format_attr_filter_c6.attr, | ||
1219 | &format_attr_filter_isoc.attr, | ||
1220 | NULL, | ||
1221 | }; | ||
1222 | |||
1223 | static struct attribute *ivbep_uncore_pcu_formats_attr[] = { | ||
1224 | &format_attr_event_ext.attr, | ||
1225 | &format_attr_occ_sel.attr, | ||
1226 | &format_attr_edge.attr, | ||
1227 | &format_attr_thresh5.attr, | ||
1228 | &format_attr_occ_invert.attr, | ||
1229 | &format_attr_occ_edge.attr, | ||
1230 | &format_attr_filter_band0.attr, | ||
1231 | &format_attr_filter_band1.attr, | ||
1232 | &format_attr_filter_band2.attr, | ||
1233 | &format_attr_filter_band3.attr, | ||
1234 | NULL, | ||
1235 | }; | ||
1236 | |||
1237 | static struct attribute *ivbep_uncore_qpi_formats_attr[] = { | ||
1238 | &format_attr_event_ext.attr, | ||
1239 | &format_attr_umask.attr, | ||
1240 | &format_attr_edge.attr, | ||
1241 | &format_attr_thresh8.attr, | ||
1242 | &format_attr_match_rds.attr, | ||
1243 | &format_attr_match_rnid30.attr, | ||
1244 | &format_attr_match_rnid4.attr, | ||
1245 | &format_attr_match_dnid.attr, | ||
1246 | &format_attr_match_mc.attr, | ||
1247 | &format_attr_match_opc.attr, | ||
1248 | &format_attr_match_vnw.attr, | ||
1249 | &format_attr_match0.attr, | ||
1250 | &format_attr_match1.attr, | ||
1251 | &format_attr_mask_rds.attr, | ||
1252 | &format_attr_mask_rnid30.attr, | ||
1253 | &format_attr_mask_rnid4.attr, | ||
1254 | &format_attr_mask_dnid.attr, | ||
1255 | &format_attr_mask_mc.attr, | ||
1256 | &format_attr_mask_opc.attr, | ||
1257 | &format_attr_mask_vnw.attr, | ||
1258 | &format_attr_mask0.attr, | ||
1259 | &format_attr_mask1.attr, | ||
1260 | NULL, | ||
1261 | }; | ||
1262 | |||
1263 | static struct attribute_group ivbep_uncore_format_group = { | ||
1264 | .name = "format", | ||
1265 | .attrs = ivbep_uncore_formats_attr, | ||
1266 | }; | ||
1267 | |||
1268 | static struct attribute_group ivbep_uncore_ubox_format_group = { | ||
1269 | .name = "format", | ||
1270 | .attrs = ivbep_uncore_ubox_formats_attr, | ||
1271 | }; | ||
1272 | |||
1273 | static struct attribute_group ivbep_uncore_cbox_format_group = { | ||
1274 | .name = "format", | ||
1275 | .attrs = ivbep_uncore_cbox_formats_attr, | ||
1276 | }; | ||
1277 | |||
1278 | static struct attribute_group ivbep_uncore_pcu_format_group = { | ||
1279 | .name = "format", | ||
1280 | .attrs = ivbep_uncore_pcu_formats_attr, | ||
1281 | }; | ||
1282 | |||
1283 | static struct attribute_group ivbep_uncore_qpi_format_group = { | ||
1284 | .name = "format", | ||
1285 | .attrs = ivbep_uncore_qpi_formats_attr, | ||
1286 | }; | ||
1287 | |||
1288 | static struct intel_uncore_type ivbep_uncore_ubox = { | ||
1289 | .name = "ubox", | ||
1290 | .num_counters = 2, | ||
1291 | .num_boxes = 1, | ||
1292 | .perf_ctr_bits = 44, | ||
1293 | .fixed_ctr_bits = 48, | ||
1294 | .perf_ctr = SNBEP_U_MSR_PMON_CTR0, | ||
1295 | .event_ctl = SNBEP_U_MSR_PMON_CTL0, | ||
1296 | .event_mask = IVBEP_U_MSR_PMON_RAW_EVENT_MASK, | ||
1297 | .fixed_ctr = SNBEP_U_MSR_PMON_UCLK_FIXED_CTR, | ||
1298 | .fixed_ctl = SNBEP_U_MSR_PMON_UCLK_FIXED_CTL, | ||
1299 | .ops = &ivbep_uncore_msr_ops, | ||
1300 | .format_group = &ivbep_uncore_ubox_format_group, | ||
1301 | }; | ||
1302 | |||
1303 | static struct extra_reg ivbep_uncore_cbox_extra_regs[] = { | ||
1304 | SNBEP_CBO_EVENT_EXTRA_REG(SNBEP_CBO_PMON_CTL_TID_EN, | ||
1305 | SNBEP_CBO_PMON_CTL_TID_EN, 0x1), | ||
1306 | SNBEP_CBO_EVENT_EXTRA_REG(0x1031, 0x10ff, 0x2), | ||
1307 | SNBEP_CBO_EVENT_EXTRA_REG(0x1134, 0xffff, 0x4), | ||
1308 | SNBEP_CBO_EVENT_EXTRA_REG(0x4134, 0xffff, 0xc), | ||
1309 | SNBEP_CBO_EVENT_EXTRA_REG(0x5134, 0xffff, 0xc), | ||
1310 | SNBEP_CBO_EVENT_EXTRA_REG(0x0334, 0xffff, 0x4), | ||
1311 | SNBEP_CBO_EVENT_EXTRA_REG(0x4334, 0xffff, 0xc), | ||
1312 | SNBEP_CBO_EVENT_EXTRA_REG(0x0534, 0xffff, 0x4), | ||
1313 | SNBEP_CBO_EVENT_EXTRA_REG(0x4534, 0xffff, 0xc), | ||
1314 | SNBEP_CBO_EVENT_EXTRA_REG(0x0934, 0xffff, 0x4), | ||
1315 | SNBEP_CBO_EVENT_EXTRA_REG(0x4934, 0xffff, 0xc), | ||
1316 | SNBEP_CBO_EVENT_EXTRA_REG(0x0135, 0xffff, 0x10), | ||
1317 | SNBEP_CBO_EVENT_EXTRA_REG(0x0335, 0xffff, 0x10), | ||
1318 | SNBEP_CBO_EVENT_EXTRA_REG(0x2135, 0xffff, 0x10), | ||
1319 | SNBEP_CBO_EVENT_EXTRA_REG(0x2335, 0xffff, 0x10), | ||
1320 | SNBEP_CBO_EVENT_EXTRA_REG(0x4135, 0xffff, 0x18), | ||
1321 | SNBEP_CBO_EVENT_EXTRA_REG(0x4335, 0xffff, 0x18), | ||
1322 | SNBEP_CBO_EVENT_EXTRA_REG(0x4435, 0xffff, 0x8), | ||
1323 | SNBEP_CBO_EVENT_EXTRA_REG(0x4835, 0xffff, 0x8), | ||
1324 | SNBEP_CBO_EVENT_EXTRA_REG(0x4a35, 0xffff, 0x8), | ||
1325 | SNBEP_CBO_EVENT_EXTRA_REG(0x5035, 0xffff, 0x8), | ||
1326 | SNBEP_CBO_EVENT_EXTRA_REG(0x8135, 0xffff, 0x10), | ||
1327 | SNBEP_CBO_EVENT_EXTRA_REG(0x8335, 0xffff, 0x10), | ||
1328 | SNBEP_CBO_EVENT_EXTRA_REG(0x0136, 0xffff, 0x10), | ||
1329 | SNBEP_CBO_EVENT_EXTRA_REG(0x0336, 0xffff, 0x10), | ||
1330 | SNBEP_CBO_EVENT_EXTRA_REG(0x2136, 0xffff, 0x10), | ||
1331 | SNBEP_CBO_EVENT_EXTRA_REG(0x2336, 0xffff, 0x10), | ||
1332 | SNBEP_CBO_EVENT_EXTRA_REG(0x4136, 0xffff, 0x18), | ||
1333 | SNBEP_CBO_EVENT_EXTRA_REG(0x4336, 0xffff, 0x18), | ||
1334 | SNBEP_CBO_EVENT_EXTRA_REG(0x4436, 0xffff, 0x8), | ||
1335 | SNBEP_CBO_EVENT_EXTRA_REG(0x4836, 0xffff, 0x8), | ||
1336 | SNBEP_CBO_EVENT_EXTRA_REG(0x4a36, 0xffff, 0x8), | ||
1337 | SNBEP_CBO_EVENT_EXTRA_REG(0x5036, 0xffff, 0x8), | ||
1338 | SNBEP_CBO_EVENT_EXTRA_REG(0x8136, 0xffff, 0x10), | ||
1339 | SNBEP_CBO_EVENT_EXTRA_REG(0x8336, 0xffff, 0x10), | ||
1340 | SNBEP_CBO_EVENT_EXTRA_REG(0x4037, 0x40ff, 0x8), | ||
1341 | EVENT_EXTRA_END | ||
1342 | }; | ||
1343 | |||
1344 | static u64 ivbep_cbox_filter_mask(int fields) | ||
1345 | { | ||
1346 | u64 mask = 0; | ||
1347 | |||
1348 | if (fields & 0x1) | ||
1349 | mask |= IVBEP_CB0_MSR_PMON_BOX_FILTER_TID; | ||
1350 | if (fields & 0x2) | ||
1351 | mask |= IVBEP_CB0_MSR_PMON_BOX_FILTER_LINK; | ||
1352 | if (fields & 0x4) | ||
1353 | mask |= IVBEP_CB0_MSR_PMON_BOX_FILTER_STATE; | ||
1354 | if (fields & 0x8) | ||
1355 | mask |= IVBEP_CB0_MSR_PMON_BOX_FILTER_NID; | ||
1356 | if (fields & 0x10) { | ||
1357 | mask |= IVBEP_CB0_MSR_PMON_BOX_FILTER_OPC; | ||
1358 | mask |= IVBEP_CB0_MSR_PMON_BOX_FILTER_NC; | ||
1359 | mask |= IVBEP_CB0_MSR_PMON_BOX_FILTER_C6; | ||
1360 | mask |= IVBEP_CB0_MSR_PMON_BOX_FILTER_ISOC; | ||
1361 | } | ||
1362 | |||
1363 | return mask; | ||
1364 | } | ||
1365 | |||
1366 | static struct event_constraint * | ||
1367 | ivbep_cbox_get_constraint(struct intel_uncore_box *box, struct perf_event *event) | ||
1368 | { | ||
1369 | return __snbep_cbox_get_constraint(box, event, ivbep_cbox_filter_mask); | ||
1370 | } | ||
1371 | |||
1372 | static int ivbep_cbox_hw_config(struct intel_uncore_box *box, struct perf_event *event) | ||
1373 | { | ||
1374 | struct hw_perf_event_extra *reg1 = &event->hw.extra_reg; | ||
1375 | struct extra_reg *er; | ||
1376 | int idx = 0; | ||
1377 | |||
1378 | for (er = ivbep_uncore_cbox_extra_regs; er->msr; er++) { | ||
1379 | if (er->event != (event->hw.config & er->config_mask)) | ||
1380 | continue; | ||
1381 | idx |= er->idx; | ||
1382 | } | ||
1383 | |||
1384 | if (idx) { | ||
1385 | reg1->reg = SNBEP_C0_MSR_PMON_BOX_FILTER + | ||
1386 | SNBEP_CBO_MSR_OFFSET * box->pmu->pmu_idx; | ||
1387 | reg1->config = event->attr.config1 & ivbep_cbox_filter_mask(idx); | ||
1388 | reg1->idx = idx; | ||
1389 | } | ||
1390 | return 0; | ||
1391 | } | ||
1392 | |||
1393 | static void ivbep_cbox_enable_event(struct intel_uncore_box *box, struct perf_event *event) | ||
1394 | { | ||
1395 | struct hw_perf_event *hwc = &event->hw; | ||
1396 | struct hw_perf_event_extra *reg1 = &hwc->extra_reg; | ||
1397 | |||
1398 | if (reg1->idx != EXTRA_REG_NONE) { | ||
1399 | u64 filter = uncore_shared_reg_config(box, 0); | ||
1400 | wrmsrl(reg1->reg, filter & 0xffffffff); | ||
1401 | wrmsrl(reg1->reg + 6, filter >> 32); | ||
1402 | } | ||
1403 | |||
1404 | wrmsrl(hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN); | ||
1405 | } | ||
1406 | |||
1407 | static struct intel_uncore_ops ivbep_uncore_cbox_ops = { | ||
1408 | .init_box = ivbep_uncore_msr_init_box, | ||
1409 | .disable_box = snbep_uncore_msr_disable_box, | ||
1410 | .enable_box = snbep_uncore_msr_enable_box, | ||
1411 | .disable_event = snbep_uncore_msr_disable_event, | ||
1412 | .enable_event = ivbep_cbox_enable_event, | ||
1413 | .read_counter = uncore_msr_read_counter, | ||
1414 | .hw_config = ivbep_cbox_hw_config, | ||
1415 | .get_constraint = ivbep_cbox_get_constraint, | ||
1416 | .put_constraint = snbep_cbox_put_constraint, | ||
1417 | }; | ||
1418 | |||
1419 | static struct intel_uncore_type ivbep_uncore_cbox = { | ||
1420 | .name = "cbox", | ||
1421 | .num_counters = 4, | ||
1422 | .num_boxes = 15, | ||
1423 | .perf_ctr_bits = 44, | ||
1424 | .event_ctl = SNBEP_C0_MSR_PMON_CTL0, | ||
1425 | .perf_ctr = SNBEP_C0_MSR_PMON_CTR0, | ||
1426 | .event_mask = IVBEP_CBO_MSR_PMON_RAW_EVENT_MASK, | ||
1427 | .box_ctl = SNBEP_C0_MSR_PMON_BOX_CTL, | ||
1428 | .msr_offset = SNBEP_CBO_MSR_OFFSET, | ||
1429 | .num_shared_regs = 1, | ||
1430 | .constraints = snbep_uncore_cbox_constraints, | ||
1431 | .ops = &ivbep_uncore_cbox_ops, | ||
1432 | .format_group = &ivbep_uncore_cbox_format_group, | ||
1433 | }; | ||
1434 | |||
1435 | static struct intel_uncore_ops ivbep_uncore_pcu_ops = { | ||
1436 | IVBEP_UNCORE_MSR_OPS_COMMON_INIT(), | ||
1437 | .hw_config = snbep_pcu_hw_config, | ||
1438 | .get_constraint = snbep_pcu_get_constraint, | ||
1439 | .put_constraint = snbep_pcu_put_constraint, | ||
1440 | }; | ||
1441 | |||
1442 | static struct intel_uncore_type ivbep_uncore_pcu = { | ||
1443 | .name = "pcu", | ||
1444 | .num_counters = 4, | ||
1445 | .num_boxes = 1, | ||
1446 | .perf_ctr_bits = 48, | ||
1447 | .perf_ctr = SNBEP_PCU_MSR_PMON_CTR0, | ||
1448 | .event_ctl = SNBEP_PCU_MSR_PMON_CTL0, | ||
1449 | .event_mask = IVBEP_PCU_MSR_PMON_RAW_EVENT_MASK, | ||
1450 | .box_ctl = SNBEP_PCU_MSR_PMON_BOX_CTL, | ||
1451 | .num_shared_regs = 1, | ||
1452 | .ops = &ivbep_uncore_pcu_ops, | ||
1453 | .format_group = &ivbep_uncore_pcu_format_group, | ||
1454 | }; | ||
1455 | |||
1456 | static struct intel_uncore_type *ivbep_msr_uncores[] = { | ||
1457 | &ivbep_uncore_ubox, | ||
1458 | &ivbep_uncore_cbox, | ||
1459 | &ivbep_uncore_pcu, | ||
1460 | NULL, | ||
1461 | }; | ||
1462 | |||
1463 | void ivbep_uncore_cpu_init(void) | ||
1464 | { | ||
1465 | if (ivbep_uncore_cbox.num_boxes > boot_cpu_data.x86_max_cores) | ||
1466 | ivbep_uncore_cbox.num_boxes = boot_cpu_data.x86_max_cores; | ||
1467 | uncore_msr_uncores = ivbep_msr_uncores; | ||
1468 | } | ||
1469 | |||
1470 | static struct intel_uncore_type ivbep_uncore_ha = { | ||
1471 | .name = "ha", | ||
1472 | .num_counters = 4, | ||
1473 | .num_boxes = 2, | ||
1474 | .perf_ctr_bits = 48, | ||
1475 | IVBEP_UNCORE_PCI_COMMON_INIT(), | ||
1476 | }; | ||
1477 | |||
1478 | static struct intel_uncore_type ivbep_uncore_imc = { | ||
1479 | .name = "imc", | ||
1480 | .num_counters = 4, | ||
1481 | .num_boxes = 8, | ||
1482 | .perf_ctr_bits = 48, | ||
1483 | .fixed_ctr_bits = 48, | ||
1484 | .fixed_ctr = SNBEP_MC_CHy_PCI_PMON_FIXED_CTR, | ||
1485 | .fixed_ctl = SNBEP_MC_CHy_PCI_PMON_FIXED_CTL, | ||
1486 | .event_descs = snbep_uncore_imc_events, | ||
1487 | IVBEP_UNCORE_PCI_COMMON_INIT(), | ||
1488 | }; | ||
1489 | |||
1490 | /* registers in IRP boxes are not properly aligned */ | ||
1491 | static unsigned ivbep_uncore_irp_ctls[] = {0xd8, 0xdc, 0xe0, 0xe4}; | ||
1492 | static unsigned ivbep_uncore_irp_ctrs[] = {0xa0, 0xb0, 0xb8, 0xc0}; | ||
1493 | |||
1494 | static void ivbep_uncore_irp_enable_event(struct intel_uncore_box *box, struct perf_event *event) | ||
1495 | { | ||
1496 | struct pci_dev *pdev = box->pci_dev; | ||
1497 | struct hw_perf_event *hwc = &event->hw; | ||
1498 | |||
1499 | pci_write_config_dword(pdev, ivbep_uncore_irp_ctls[hwc->idx], | ||
1500 | hwc->config | SNBEP_PMON_CTL_EN); | ||
1501 | } | ||
1502 | |||
1503 | static void ivbep_uncore_irp_disable_event(struct intel_uncore_box *box, struct perf_event *event) | ||
1504 | { | ||
1505 | struct pci_dev *pdev = box->pci_dev; | ||
1506 | struct hw_perf_event *hwc = &event->hw; | ||
1507 | |||
1508 | pci_write_config_dword(pdev, ivbep_uncore_irp_ctls[hwc->idx], hwc->config); | ||
1509 | } | ||
1510 | |||
1511 | static u64 ivbep_uncore_irp_read_counter(struct intel_uncore_box *box, struct perf_event *event) | ||
1512 | { | ||
1513 | struct pci_dev *pdev = box->pci_dev; | ||
1514 | struct hw_perf_event *hwc = &event->hw; | ||
1515 | u64 count = 0; | ||
1516 | |||
1517 | pci_read_config_dword(pdev, ivbep_uncore_irp_ctrs[hwc->idx], (u32 *)&count); | ||
1518 | pci_read_config_dword(pdev, ivbep_uncore_irp_ctrs[hwc->idx] + 4, (u32 *)&count + 1); | ||
1519 | |||
1520 | return count; | ||
1521 | } | ||
1522 | |||
1523 | static struct intel_uncore_ops ivbep_uncore_irp_ops = { | ||
1524 | .init_box = ivbep_uncore_pci_init_box, | ||
1525 | .disable_box = snbep_uncore_pci_disable_box, | ||
1526 | .enable_box = snbep_uncore_pci_enable_box, | ||
1527 | .disable_event = ivbep_uncore_irp_disable_event, | ||
1528 | .enable_event = ivbep_uncore_irp_enable_event, | ||
1529 | .read_counter = ivbep_uncore_irp_read_counter, | ||
1530 | }; | ||
1531 | |||
1532 | static struct intel_uncore_type ivbep_uncore_irp = { | ||
1533 | .name = "irp", | ||
1534 | .num_counters = 4, | ||
1535 | .num_boxes = 1, | ||
1536 | .perf_ctr_bits = 48, | ||
1537 | .event_mask = IVBEP_PMON_RAW_EVENT_MASK, | ||
1538 | .box_ctl = SNBEP_PCI_PMON_BOX_CTL, | ||
1539 | .ops = &ivbep_uncore_irp_ops, | ||
1540 | .format_group = &ivbep_uncore_format_group, | ||
1541 | }; | ||
1542 | |||
1543 | static struct intel_uncore_ops ivbep_uncore_qpi_ops = { | ||
1544 | .init_box = ivbep_uncore_pci_init_box, | ||
1545 | .disable_box = snbep_uncore_pci_disable_box, | ||
1546 | .enable_box = snbep_uncore_pci_enable_box, | ||
1547 | .disable_event = snbep_uncore_pci_disable_event, | ||
1548 | .enable_event = snbep_qpi_enable_event, | ||
1549 | .read_counter = snbep_uncore_pci_read_counter, | ||
1550 | .hw_config = snbep_qpi_hw_config, | ||
1551 | .get_constraint = uncore_get_constraint, | ||
1552 | .put_constraint = uncore_put_constraint, | ||
1553 | }; | ||
1554 | |||
1555 | static struct intel_uncore_type ivbep_uncore_qpi = { | ||
1556 | .name = "qpi", | ||
1557 | .num_counters = 4, | ||
1558 | .num_boxes = 3, | ||
1559 | .perf_ctr_bits = 48, | ||
1560 | .perf_ctr = SNBEP_PCI_PMON_CTR0, | ||
1561 | .event_ctl = SNBEP_PCI_PMON_CTL0, | ||
1562 | .event_mask = IVBEP_QPI_PCI_PMON_RAW_EVENT_MASK, | ||
1563 | .box_ctl = SNBEP_PCI_PMON_BOX_CTL, | ||
1564 | .num_shared_regs = 1, | ||
1565 | .ops = &ivbep_uncore_qpi_ops, | ||
1566 | .format_group = &ivbep_uncore_qpi_format_group, | ||
1567 | }; | ||
1568 | |||
1569 | static struct intel_uncore_type ivbep_uncore_r2pcie = { | ||
1570 | .name = "r2pcie", | ||
1571 | .num_counters = 4, | ||
1572 | .num_boxes = 1, | ||
1573 | .perf_ctr_bits = 44, | ||
1574 | .constraints = snbep_uncore_r2pcie_constraints, | ||
1575 | IVBEP_UNCORE_PCI_COMMON_INIT(), | ||
1576 | }; | ||
1577 | |||
1578 | static struct intel_uncore_type ivbep_uncore_r3qpi = { | ||
1579 | .name = "r3qpi", | ||
1580 | .num_counters = 3, | ||
1581 | .num_boxes = 2, | ||
1582 | .perf_ctr_bits = 44, | ||
1583 | .constraints = snbep_uncore_r3qpi_constraints, | ||
1584 | IVBEP_UNCORE_PCI_COMMON_INIT(), | ||
1585 | }; | ||
1586 | |||
1587 | enum { | ||
1588 | IVBEP_PCI_UNCORE_HA, | ||
1589 | IVBEP_PCI_UNCORE_IMC, | ||
1590 | IVBEP_PCI_UNCORE_IRP, | ||
1591 | IVBEP_PCI_UNCORE_QPI, | ||
1592 | IVBEP_PCI_UNCORE_R2PCIE, | ||
1593 | IVBEP_PCI_UNCORE_R3QPI, | ||
1594 | }; | ||
1595 | |||
1596 | static struct intel_uncore_type *ivbep_pci_uncores[] = { | ||
1597 | [IVBEP_PCI_UNCORE_HA] = &ivbep_uncore_ha, | ||
1598 | [IVBEP_PCI_UNCORE_IMC] = &ivbep_uncore_imc, | ||
1599 | [IVBEP_PCI_UNCORE_IRP] = &ivbep_uncore_irp, | ||
1600 | [IVBEP_PCI_UNCORE_QPI] = &ivbep_uncore_qpi, | ||
1601 | [IVBEP_PCI_UNCORE_R2PCIE] = &ivbep_uncore_r2pcie, | ||
1602 | [IVBEP_PCI_UNCORE_R3QPI] = &ivbep_uncore_r3qpi, | ||
1603 | NULL, | ||
1604 | }; | ||
1605 | |||
1606 | static const struct pci_device_id ivbep_uncore_pci_ids[] = { | ||
1607 | { /* Home Agent 0 */ | ||
1608 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe30), | ||
1609 | .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_HA, 0), | ||
1610 | }, | ||
1611 | { /* Home Agent 1 */ | ||
1612 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe38), | ||
1613 | .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_HA, 1), | ||
1614 | }, | ||
1615 | { /* MC0 Channel 0 */ | ||
1616 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xeb4), | ||
1617 | .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC, 0), | ||
1618 | }, | ||
1619 | { /* MC0 Channel 1 */ | ||
1620 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xeb5), | ||
1621 | .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC, 1), | ||
1622 | }, | ||
1623 | { /* MC0 Channel 3 */ | ||
1624 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xeb0), | ||
1625 | .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC, 2), | ||
1626 | }, | ||
1627 | { /* MC0 Channel 4 */ | ||
1628 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xeb1), | ||
1629 | .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC, 3), | ||
1630 | }, | ||
1631 | { /* MC1 Channel 0 */ | ||
1632 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xef4), | ||
1633 | .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC, 4), | ||
1634 | }, | ||
1635 | { /* MC1 Channel 1 */ | ||
1636 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xef5), | ||
1637 | .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC, 5), | ||
1638 | }, | ||
1639 | { /* MC1 Channel 3 */ | ||
1640 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xef0), | ||
1641 | .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC, 6), | ||
1642 | }, | ||
1643 | { /* MC1 Channel 4 */ | ||
1644 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xef1), | ||
1645 | .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC, 7), | ||
1646 | }, | ||
1647 | { /* IRP */ | ||
1648 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe39), | ||
1649 | .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IRP, 0), | ||
1650 | }, | ||
1651 | { /* QPI0 Port 0 */ | ||
1652 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe32), | ||
1653 | .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_QPI, 0), | ||
1654 | }, | ||
1655 | { /* QPI0 Port 1 */ | ||
1656 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe33), | ||
1657 | .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_QPI, 1), | ||
1658 | }, | ||
1659 | { /* QPI1 Port 2 */ | ||
1660 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe3a), | ||
1661 | .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_QPI, 2), | ||
1662 | }, | ||
1663 | { /* R2PCIe */ | ||
1664 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe34), | ||
1665 | .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_R2PCIE, 0), | ||
1666 | }, | ||
1667 | { /* R3QPI0 Link 0 */ | ||
1668 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe36), | ||
1669 | .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_R3QPI, 0), | ||
1670 | }, | ||
1671 | { /* R3QPI0 Link 1 */ | ||
1672 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe37), | ||
1673 | .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_R3QPI, 1), | ||
1674 | }, | ||
1675 | { /* R3QPI1 Link 2 */ | ||
1676 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe3e), | ||
1677 | .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_R3QPI, 2), | ||
1678 | }, | ||
1679 | { /* QPI Port 0 filter */ | ||
1680 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe86), | ||
1681 | .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV, | ||
1682 | SNBEP_PCI_QPI_PORT0_FILTER), | ||
1683 | }, | ||
1684 | { /* QPI Port 0 filter */ | ||
1685 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe96), | ||
1686 | .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV, | ||
1687 | SNBEP_PCI_QPI_PORT1_FILTER), | ||
1688 | }, | ||
1689 | { /* end: all zeroes */ } | ||
1690 | }; | ||
1691 | |||
1692 | static struct pci_driver ivbep_uncore_pci_driver = { | ||
1693 | .name = "ivbep_uncore", | ||
1694 | .id_table = ivbep_uncore_pci_ids, | ||
1695 | }; | ||
1696 | |||
1697 | int ivbep_uncore_pci_init(void) | ||
1698 | { | ||
1699 | int ret = snbep_pci2phy_map_init(0x0e1e); | ||
1700 | if (ret) | ||
1701 | return ret; | ||
1702 | uncore_pci_uncores = ivbep_pci_uncores; | ||
1703 | uncore_pci_driver = &ivbep_uncore_pci_driver; | ||
1704 | return 0; | ||
1705 | } | ||
1706 | /* end of IvyTown uncore support */ | ||
1707 | |||
1708 | /* Haswell-EP uncore support */ | ||
1709 | static struct attribute *hswep_uncore_ubox_formats_attr[] = { | ||
1710 | &format_attr_event.attr, | ||
1711 | &format_attr_umask.attr, | ||
1712 | &format_attr_edge.attr, | ||
1713 | &format_attr_inv.attr, | ||
1714 | &format_attr_thresh5.attr, | ||
1715 | &format_attr_filter_tid2.attr, | ||
1716 | &format_attr_filter_cid.attr, | ||
1717 | NULL, | ||
1718 | }; | ||
1719 | |||
1720 | static struct attribute_group hswep_uncore_ubox_format_group = { | ||
1721 | .name = "format", | ||
1722 | .attrs = hswep_uncore_ubox_formats_attr, | ||
1723 | }; | ||
1724 | |||
1725 | static int hswep_ubox_hw_config(struct intel_uncore_box *box, struct perf_event *event) | ||
1726 | { | ||
1727 | struct hw_perf_event_extra *reg1 = &event->hw.extra_reg; | ||
1728 | reg1->reg = HSWEP_U_MSR_PMON_FILTER; | ||
1729 | reg1->config = event->attr.config1 & HSWEP_U_MSR_PMON_BOX_FILTER_MASK; | ||
1730 | reg1->idx = 0; | ||
1731 | return 0; | ||
1732 | } | ||
1733 | |||
1734 | static struct intel_uncore_ops hswep_uncore_ubox_ops = { | ||
1735 | SNBEP_UNCORE_MSR_OPS_COMMON_INIT(), | ||
1736 | .hw_config = hswep_ubox_hw_config, | ||
1737 | .get_constraint = uncore_get_constraint, | ||
1738 | .put_constraint = uncore_put_constraint, | ||
1739 | }; | ||
1740 | |||
1741 | static struct intel_uncore_type hswep_uncore_ubox = { | ||
1742 | .name = "ubox", | ||
1743 | .num_counters = 2, | ||
1744 | .num_boxes = 1, | ||
1745 | .perf_ctr_bits = 44, | ||
1746 | .fixed_ctr_bits = 48, | ||
1747 | .perf_ctr = HSWEP_U_MSR_PMON_CTR0, | ||
1748 | .event_ctl = HSWEP_U_MSR_PMON_CTL0, | ||
1749 | .event_mask = SNBEP_U_MSR_PMON_RAW_EVENT_MASK, | ||
1750 | .fixed_ctr = HSWEP_U_MSR_PMON_UCLK_FIXED_CTR, | ||
1751 | .fixed_ctl = HSWEP_U_MSR_PMON_UCLK_FIXED_CTL, | ||
1752 | .num_shared_regs = 1, | ||
1753 | .ops = &hswep_uncore_ubox_ops, | ||
1754 | .format_group = &hswep_uncore_ubox_format_group, | ||
1755 | }; | ||
1756 | |||
1757 | static struct attribute *hswep_uncore_cbox_formats_attr[] = { | ||
1758 | &format_attr_event.attr, | ||
1759 | &format_attr_umask.attr, | ||
1760 | &format_attr_edge.attr, | ||
1761 | &format_attr_tid_en.attr, | ||
1762 | &format_attr_thresh8.attr, | ||
1763 | &format_attr_filter_tid3.attr, | ||
1764 | &format_attr_filter_link2.attr, | ||
1765 | &format_attr_filter_state3.attr, | ||
1766 | &format_attr_filter_nid2.attr, | ||
1767 | &format_attr_filter_opc2.attr, | ||
1768 | &format_attr_filter_nc.attr, | ||
1769 | &format_attr_filter_c6.attr, | ||
1770 | &format_attr_filter_isoc.attr, | ||
1771 | NULL, | ||
1772 | }; | ||
1773 | |||
1774 | static struct attribute_group hswep_uncore_cbox_format_group = { | ||
1775 | .name = "format", | ||
1776 | .attrs = hswep_uncore_cbox_formats_attr, | ||
1777 | }; | ||
1778 | |||
1779 | static struct event_constraint hswep_uncore_cbox_constraints[] = { | ||
1780 | UNCORE_EVENT_CONSTRAINT(0x01, 0x1), | ||
1781 | UNCORE_EVENT_CONSTRAINT(0x09, 0x1), | ||
1782 | UNCORE_EVENT_CONSTRAINT(0x11, 0x1), | ||
1783 | UNCORE_EVENT_CONSTRAINT(0x36, 0x1), | ||
1784 | UNCORE_EVENT_CONSTRAINT(0x38, 0x3), | ||
1785 | UNCORE_EVENT_CONSTRAINT(0x3b, 0x1), | ||
1786 | UNCORE_EVENT_CONSTRAINT(0x3e, 0x1), | ||
1787 | EVENT_CONSTRAINT_END | ||
1788 | }; | ||
1789 | |||
1790 | static struct extra_reg hswep_uncore_cbox_extra_regs[] = { | ||
1791 | SNBEP_CBO_EVENT_EXTRA_REG(SNBEP_CBO_PMON_CTL_TID_EN, | ||
1792 | SNBEP_CBO_PMON_CTL_TID_EN, 0x1), | ||
1793 | SNBEP_CBO_EVENT_EXTRA_REG(0x0334, 0xffff, 0x4), | ||
1794 | SNBEP_CBO_EVENT_EXTRA_REG(0x0534, 0xffff, 0x4), | ||
1795 | SNBEP_CBO_EVENT_EXTRA_REG(0x0934, 0xffff, 0x4), | ||
1796 | SNBEP_CBO_EVENT_EXTRA_REG(0x1134, 0xffff, 0x4), | ||
1797 | SNBEP_CBO_EVENT_EXTRA_REG(0x2134, 0xffff, 0x4), | ||
1798 | SNBEP_CBO_EVENT_EXTRA_REG(0x4134, 0xffff, 0x4), | ||
1799 | SNBEP_CBO_EVENT_EXTRA_REG(0x4037, 0x40ff, 0x8), | ||
1800 | SNBEP_CBO_EVENT_EXTRA_REG(0x4028, 0x40ff, 0x8), | ||
1801 | SNBEP_CBO_EVENT_EXTRA_REG(0x4032, 0x40ff, 0x8), | ||
1802 | SNBEP_CBO_EVENT_EXTRA_REG(0x4029, 0x40ff, 0x8), | ||
1803 | SNBEP_CBO_EVENT_EXTRA_REG(0x4033, 0x40ff, 0x8), | ||
1804 | SNBEP_CBO_EVENT_EXTRA_REG(0x402A, 0x40ff, 0x8), | ||
1805 | SNBEP_CBO_EVENT_EXTRA_REG(0x0135, 0xffff, 0x12), | ||
1806 | SNBEP_CBO_EVENT_EXTRA_REG(0x0335, 0xffff, 0x10), | ||
1807 | SNBEP_CBO_EVENT_EXTRA_REG(0x4135, 0xffff, 0x18), | ||
1808 | SNBEP_CBO_EVENT_EXTRA_REG(0x4435, 0xffff, 0x8), | ||
1809 | SNBEP_CBO_EVENT_EXTRA_REG(0x4835, 0xffff, 0x8), | ||
1810 | SNBEP_CBO_EVENT_EXTRA_REG(0x5035, 0xffff, 0x8), | ||
1811 | SNBEP_CBO_EVENT_EXTRA_REG(0x4335, 0xffff, 0x18), | ||
1812 | SNBEP_CBO_EVENT_EXTRA_REG(0x4a35, 0xffff, 0x8), | ||
1813 | SNBEP_CBO_EVENT_EXTRA_REG(0x2335, 0xffff, 0x10), | ||
1814 | SNBEP_CBO_EVENT_EXTRA_REG(0x8335, 0xffff, 0x10), | ||
1815 | SNBEP_CBO_EVENT_EXTRA_REG(0x2135, 0xffff, 0x10), | ||
1816 | SNBEP_CBO_EVENT_EXTRA_REG(0x8135, 0xffff, 0x10), | ||
1817 | SNBEP_CBO_EVENT_EXTRA_REG(0x0136, 0xffff, 0x10), | ||
1818 | SNBEP_CBO_EVENT_EXTRA_REG(0x0336, 0xffff, 0x10), | ||
1819 | SNBEP_CBO_EVENT_EXTRA_REG(0x4136, 0xffff, 0x18), | ||
1820 | SNBEP_CBO_EVENT_EXTRA_REG(0x4436, 0xffff, 0x8), | ||
1821 | SNBEP_CBO_EVENT_EXTRA_REG(0x4836, 0xffff, 0x8), | ||
1822 | SNBEP_CBO_EVENT_EXTRA_REG(0x4336, 0xffff, 0x18), | ||
1823 | SNBEP_CBO_EVENT_EXTRA_REG(0x4a36, 0xffff, 0x8), | ||
1824 | SNBEP_CBO_EVENT_EXTRA_REG(0x2336, 0xffff, 0x10), | ||
1825 | SNBEP_CBO_EVENT_EXTRA_REG(0x8336, 0xffff, 0x10), | ||
1826 | SNBEP_CBO_EVENT_EXTRA_REG(0x2136, 0xffff, 0x10), | ||
1827 | SNBEP_CBO_EVENT_EXTRA_REG(0x8136, 0xffff, 0x10), | ||
1828 | SNBEP_CBO_EVENT_EXTRA_REG(0x5036, 0xffff, 0x8), | ||
1829 | EVENT_EXTRA_END | ||
1830 | }; | ||
1831 | |||
1832 | static u64 hswep_cbox_filter_mask(int fields) | ||
1833 | { | ||
1834 | u64 mask = 0; | ||
1835 | if (fields & 0x1) | ||
1836 | mask |= HSWEP_CB0_MSR_PMON_BOX_FILTER_TID; | ||
1837 | if (fields & 0x2) | ||
1838 | mask |= HSWEP_CB0_MSR_PMON_BOX_FILTER_LINK; | ||
1839 | if (fields & 0x4) | ||
1840 | mask |= HSWEP_CB0_MSR_PMON_BOX_FILTER_STATE; | ||
1841 | if (fields & 0x8) | ||
1842 | mask |= HSWEP_CB0_MSR_PMON_BOX_FILTER_NID; | ||
1843 | if (fields & 0x10) { | ||
1844 | mask |= HSWEP_CB0_MSR_PMON_BOX_FILTER_OPC; | ||
1845 | mask |= HSWEP_CB0_MSR_PMON_BOX_FILTER_NC; | ||
1846 | mask |= HSWEP_CB0_MSR_PMON_BOX_FILTER_C6; | ||
1847 | mask |= HSWEP_CB0_MSR_PMON_BOX_FILTER_ISOC; | ||
1848 | } | ||
1849 | return mask; | ||
1850 | } | ||
1851 | |||
1852 | static struct event_constraint * | ||
1853 | hswep_cbox_get_constraint(struct intel_uncore_box *box, struct perf_event *event) | ||
1854 | { | ||
1855 | return __snbep_cbox_get_constraint(box, event, hswep_cbox_filter_mask); | ||
1856 | } | ||
1857 | |||
1858 | static int hswep_cbox_hw_config(struct intel_uncore_box *box, struct perf_event *event) | ||
1859 | { | ||
1860 | struct hw_perf_event_extra *reg1 = &event->hw.extra_reg; | ||
1861 | struct extra_reg *er; | ||
1862 | int idx = 0; | ||
1863 | |||
1864 | for (er = hswep_uncore_cbox_extra_regs; er->msr; er++) { | ||
1865 | if (er->event != (event->hw.config & er->config_mask)) | ||
1866 | continue; | ||
1867 | idx |= er->idx; | ||
1868 | } | ||
1869 | |||
1870 | if (idx) { | ||
1871 | reg1->reg = HSWEP_C0_MSR_PMON_BOX_FILTER0 + | ||
1872 | HSWEP_CBO_MSR_OFFSET * box->pmu->pmu_idx; | ||
1873 | reg1->config = event->attr.config1 & hswep_cbox_filter_mask(idx); | ||
1874 | reg1->idx = idx; | ||
1875 | } | ||
1876 | return 0; | ||
1877 | } | ||
1878 | |||
1879 | static void hswep_cbox_enable_event(struct intel_uncore_box *box, | ||
1880 | struct perf_event *event) | ||
1881 | { | ||
1882 | struct hw_perf_event *hwc = &event->hw; | ||
1883 | struct hw_perf_event_extra *reg1 = &hwc->extra_reg; | ||
1884 | |||
1885 | if (reg1->idx != EXTRA_REG_NONE) { | ||
1886 | u64 filter = uncore_shared_reg_config(box, 0); | ||
1887 | wrmsrl(reg1->reg, filter & 0xffffffff); | ||
1888 | wrmsrl(reg1->reg + 1, filter >> 32); | ||
1889 | } | ||
1890 | |||
1891 | wrmsrl(hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN); | ||
1892 | } | ||
1893 | |||
1894 | static struct intel_uncore_ops hswep_uncore_cbox_ops = { | ||
1895 | .init_box = snbep_uncore_msr_init_box, | ||
1896 | .disable_box = snbep_uncore_msr_disable_box, | ||
1897 | .enable_box = snbep_uncore_msr_enable_box, | ||
1898 | .disable_event = snbep_uncore_msr_disable_event, | ||
1899 | .enable_event = hswep_cbox_enable_event, | ||
1900 | .read_counter = uncore_msr_read_counter, | ||
1901 | .hw_config = hswep_cbox_hw_config, | ||
1902 | .get_constraint = hswep_cbox_get_constraint, | ||
1903 | .put_constraint = snbep_cbox_put_constraint, | ||
1904 | }; | ||
1905 | |||
1906 | static struct intel_uncore_type hswep_uncore_cbox = { | ||
1907 | .name = "cbox", | ||
1908 | .num_counters = 4, | ||
1909 | .num_boxes = 18, | ||
1910 | .perf_ctr_bits = 44, | ||
1911 | .event_ctl = HSWEP_C0_MSR_PMON_CTL0, | ||
1912 | .perf_ctr = HSWEP_C0_MSR_PMON_CTR0, | ||
1913 | .event_mask = SNBEP_CBO_MSR_PMON_RAW_EVENT_MASK, | ||
1914 | .box_ctl = HSWEP_C0_MSR_PMON_BOX_CTL, | ||
1915 | .msr_offset = HSWEP_CBO_MSR_OFFSET, | ||
1916 | .num_shared_regs = 1, | ||
1917 | .constraints = hswep_uncore_cbox_constraints, | ||
1918 | .ops = &hswep_uncore_cbox_ops, | ||
1919 | .format_group = &hswep_uncore_cbox_format_group, | ||
1920 | }; | ||
1921 | |||
1922 | static struct attribute *hswep_uncore_sbox_formats_attr[] = { | ||
1923 | &format_attr_event.attr, | ||
1924 | &format_attr_umask.attr, | ||
1925 | &format_attr_edge.attr, | ||
1926 | &format_attr_tid_en.attr, | ||
1927 | &format_attr_inv.attr, | ||
1928 | &format_attr_thresh8.attr, | ||
1929 | NULL, | ||
1930 | }; | ||
1931 | |||
1932 | static struct attribute_group hswep_uncore_sbox_format_group = { | ||
1933 | .name = "format", | ||
1934 | .attrs = hswep_uncore_sbox_formats_attr, | ||
1935 | }; | ||
1936 | |||
1937 | static struct intel_uncore_type hswep_uncore_sbox = { | ||
1938 | .name = "sbox", | ||
1939 | .num_counters = 4, | ||
1940 | .num_boxes = 4, | ||
1941 | .perf_ctr_bits = 44, | ||
1942 | .event_ctl = HSWEP_S0_MSR_PMON_CTL0, | ||
1943 | .perf_ctr = HSWEP_S0_MSR_PMON_CTR0, | ||
1944 | .event_mask = HSWEP_S_MSR_PMON_RAW_EVENT_MASK, | ||
1945 | .box_ctl = HSWEP_S0_MSR_PMON_BOX_CTL, | ||
1946 | .msr_offset = HSWEP_SBOX_MSR_OFFSET, | ||
1947 | .ops = &snbep_uncore_msr_ops, | ||
1948 | .format_group = &hswep_uncore_sbox_format_group, | ||
1949 | }; | ||
1950 | |||
1951 | static int hswep_pcu_hw_config(struct intel_uncore_box *box, struct perf_event *event) | ||
1952 | { | ||
1953 | struct hw_perf_event *hwc = &event->hw; | ||
1954 | struct hw_perf_event_extra *reg1 = &hwc->extra_reg; | ||
1955 | int ev_sel = hwc->config & SNBEP_PMON_CTL_EV_SEL_MASK; | ||
1956 | |||
1957 | if (ev_sel >= 0xb && ev_sel <= 0xe) { | ||
1958 | reg1->reg = HSWEP_PCU_MSR_PMON_BOX_FILTER; | ||
1959 | reg1->idx = ev_sel - 0xb; | ||
1960 | reg1->config = event->attr.config1 & (0xff << reg1->idx); | ||
1961 | } | ||
1962 | return 0; | ||
1963 | } | ||
1964 | |||
1965 | static struct intel_uncore_ops hswep_uncore_pcu_ops = { | ||
1966 | SNBEP_UNCORE_MSR_OPS_COMMON_INIT(), | ||
1967 | .hw_config = hswep_pcu_hw_config, | ||
1968 | .get_constraint = snbep_pcu_get_constraint, | ||
1969 | .put_constraint = snbep_pcu_put_constraint, | ||
1970 | }; | ||
1971 | |||
1972 | static struct intel_uncore_type hswep_uncore_pcu = { | ||
1973 | .name = "pcu", | ||
1974 | .num_counters = 4, | ||
1975 | .num_boxes = 1, | ||
1976 | .perf_ctr_bits = 48, | ||
1977 | .perf_ctr = HSWEP_PCU_MSR_PMON_CTR0, | ||
1978 | .event_ctl = HSWEP_PCU_MSR_PMON_CTL0, | ||
1979 | .event_mask = SNBEP_PCU_MSR_PMON_RAW_EVENT_MASK, | ||
1980 | .box_ctl = HSWEP_PCU_MSR_PMON_BOX_CTL, | ||
1981 | .num_shared_regs = 1, | ||
1982 | .ops = &hswep_uncore_pcu_ops, | ||
1983 | .format_group = &snbep_uncore_pcu_format_group, | ||
1984 | }; | ||
1985 | |||
1986 | static struct intel_uncore_type *hswep_msr_uncores[] = { | ||
1987 | &hswep_uncore_ubox, | ||
1988 | &hswep_uncore_cbox, | ||
1989 | &hswep_uncore_sbox, | ||
1990 | &hswep_uncore_pcu, | ||
1991 | NULL, | ||
1992 | }; | ||
1993 | |||
1994 | void hswep_uncore_cpu_init(void) | ||
1995 | { | ||
1996 | if (hswep_uncore_cbox.num_boxes > boot_cpu_data.x86_max_cores) | ||
1997 | hswep_uncore_cbox.num_boxes = boot_cpu_data.x86_max_cores; | ||
1998 | uncore_msr_uncores = hswep_msr_uncores; | ||
1999 | } | ||
2000 | |||
2001 | static struct intel_uncore_type hswep_uncore_ha = { | ||
2002 | .name = "ha", | ||
2003 | .num_counters = 5, | ||
2004 | .num_boxes = 2, | ||
2005 | .perf_ctr_bits = 48, | ||
2006 | SNBEP_UNCORE_PCI_COMMON_INIT(), | ||
2007 | }; | ||
2008 | |||
2009 | static struct uncore_event_desc hswep_uncore_imc_events[] = { | ||
2010 | INTEL_UNCORE_EVENT_DESC(clockticks, "event=0x00,umask=0x00"), | ||
2011 | INTEL_UNCORE_EVENT_DESC(cas_count_read, "event=0x04,umask=0x03"), | ||
2012 | INTEL_UNCORE_EVENT_DESC(cas_count_write, "event=0x04,umask=0x0c"), | ||
2013 | { /* end: all zeroes */ }, | ||
2014 | }; | ||
2015 | |||
2016 | static struct intel_uncore_type hswep_uncore_imc = { | ||
2017 | .name = "imc", | ||
2018 | .num_counters = 5, | ||
2019 | .num_boxes = 8, | ||
2020 | .perf_ctr_bits = 48, | ||
2021 | .fixed_ctr_bits = 48, | ||
2022 | .fixed_ctr = SNBEP_MC_CHy_PCI_PMON_FIXED_CTR, | ||
2023 | .fixed_ctl = SNBEP_MC_CHy_PCI_PMON_FIXED_CTL, | ||
2024 | .event_descs = hswep_uncore_imc_events, | ||
2025 | SNBEP_UNCORE_PCI_COMMON_INIT(), | ||
2026 | }; | ||
2027 | |||
2028 | static struct intel_uncore_ops hswep_uncore_irp_ops = { | ||
2029 | .init_box = snbep_uncore_pci_init_box, | ||
2030 | .disable_box = snbep_uncore_pci_disable_box, | ||
2031 | .enable_box = snbep_uncore_pci_enable_box, | ||
2032 | .disable_event = ivbep_uncore_irp_disable_event, | ||
2033 | .enable_event = ivbep_uncore_irp_enable_event, | ||
2034 | .read_counter = ivbep_uncore_irp_read_counter, | ||
2035 | }; | ||
2036 | |||
2037 | static struct intel_uncore_type hswep_uncore_irp = { | ||
2038 | .name = "irp", | ||
2039 | .num_counters = 4, | ||
2040 | .num_boxes = 1, | ||
2041 | .perf_ctr_bits = 48, | ||
2042 | .event_mask = SNBEP_PMON_RAW_EVENT_MASK, | ||
2043 | .box_ctl = SNBEP_PCI_PMON_BOX_CTL, | ||
2044 | .ops = &hswep_uncore_irp_ops, | ||
2045 | .format_group = &snbep_uncore_format_group, | ||
2046 | }; | ||
2047 | |||
2048 | static struct intel_uncore_type hswep_uncore_qpi = { | ||
2049 | .name = "qpi", | ||
2050 | .num_counters = 5, | ||
2051 | .num_boxes = 3, | ||
2052 | .perf_ctr_bits = 48, | ||
2053 | .perf_ctr = SNBEP_PCI_PMON_CTR0, | ||
2054 | .event_ctl = SNBEP_PCI_PMON_CTL0, | ||
2055 | .event_mask = SNBEP_QPI_PCI_PMON_RAW_EVENT_MASK, | ||
2056 | .box_ctl = SNBEP_PCI_PMON_BOX_CTL, | ||
2057 | .num_shared_regs = 1, | ||
2058 | .ops = &snbep_uncore_qpi_ops, | ||
2059 | .format_group = &snbep_uncore_qpi_format_group, | ||
2060 | }; | ||
2061 | |||
2062 | static struct event_constraint hswep_uncore_r2pcie_constraints[] = { | ||
2063 | UNCORE_EVENT_CONSTRAINT(0x10, 0x3), | ||
2064 | UNCORE_EVENT_CONSTRAINT(0x11, 0x3), | ||
2065 | UNCORE_EVENT_CONSTRAINT(0x13, 0x1), | ||
2066 | UNCORE_EVENT_CONSTRAINT(0x23, 0x1), | ||
2067 | UNCORE_EVENT_CONSTRAINT(0x24, 0x1), | ||
2068 | UNCORE_EVENT_CONSTRAINT(0x25, 0x1), | ||
2069 | UNCORE_EVENT_CONSTRAINT(0x26, 0x3), | ||
2070 | UNCORE_EVENT_CONSTRAINT(0x27, 0x1), | ||
2071 | UNCORE_EVENT_CONSTRAINT(0x28, 0x3), | ||
2072 | UNCORE_EVENT_CONSTRAINT(0x29, 0x3), | ||
2073 | UNCORE_EVENT_CONSTRAINT(0x2a, 0x1), | ||
2074 | UNCORE_EVENT_CONSTRAINT(0x2b, 0x3), | ||
2075 | UNCORE_EVENT_CONSTRAINT(0x2c, 0x3), | ||
2076 | UNCORE_EVENT_CONSTRAINT(0x2d, 0x3), | ||
2077 | UNCORE_EVENT_CONSTRAINT(0x32, 0x3), | ||
2078 | UNCORE_EVENT_CONSTRAINT(0x33, 0x3), | ||
2079 | UNCORE_EVENT_CONSTRAINT(0x34, 0x3), | ||
2080 | UNCORE_EVENT_CONSTRAINT(0x35, 0x3), | ||
2081 | EVENT_CONSTRAINT_END | ||
2082 | }; | ||
2083 | |||
2084 | static struct intel_uncore_type hswep_uncore_r2pcie = { | ||
2085 | .name = "r2pcie", | ||
2086 | .num_counters = 4, | ||
2087 | .num_boxes = 1, | ||
2088 | .perf_ctr_bits = 48, | ||
2089 | .constraints = hswep_uncore_r2pcie_constraints, | ||
2090 | SNBEP_UNCORE_PCI_COMMON_INIT(), | ||
2091 | }; | ||
2092 | |||
2093 | static struct event_constraint hswep_uncore_r3qpi_constraints[] = { | ||
2094 | UNCORE_EVENT_CONSTRAINT(0x01, 0x3), | ||
2095 | UNCORE_EVENT_CONSTRAINT(0x07, 0x7), | ||
2096 | UNCORE_EVENT_CONSTRAINT(0x08, 0x7), | ||
2097 | UNCORE_EVENT_CONSTRAINT(0x09, 0x7), | ||
2098 | UNCORE_EVENT_CONSTRAINT(0x0a, 0x7), | ||
2099 | UNCORE_EVENT_CONSTRAINT(0x0e, 0x7), | ||
2100 | UNCORE_EVENT_CONSTRAINT(0x10, 0x3), | ||
2101 | UNCORE_EVENT_CONSTRAINT(0x11, 0x3), | ||
2102 | UNCORE_EVENT_CONSTRAINT(0x12, 0x3), | ||
2103 | UNCORE_EVENT_CONSTRAINT(0x13, 0x1), | ||
2104 | UNCORE_EVENT_CONSTRAINT(0x14, 0x3), | ||
2105 | UNCORE_EVENT_CONSTRAINT(0x15, 0x3), | ||
2106 | UNCORE_EVENT_CONSTRAINT(0x1f, 0x3), | ||
2107 | UNCORE_EVENT_CONSTRAINT(0x20, 0x3), | ||
2108 | UNCORE_EVENT_CONSTRAINT(0x21, 0x3), | ||
2109 | UNCORE_EVENT_CONSTRAINT(0x22, 0x3), | ||
2110 | UNCORE_EVENT_CONSTRAINT(0x23, 0x3), | ||
2111 | UNCORE_EVENT_CONSTRAINT(0x25, 0x3), | ||
2112 | UNCORE_EVENT_CONSTRAINT(0x26, 0x3), | ||
2113 | UNCORE_EVENT_CONSTRAINT(0x28, 0x3), | ||
2114 | UNCORE_EVENT_CONSTRAINT(0x29, 0x3), | ||
2115 | UNCORE_EVENT_CONSTRAINT(0x2c, 0x3), | ||
2116 | UNCORE_EVENT_CONSTRAINT(0x2d, 0x3), | ||
2117 | UNCORE_EVENT_CONSTRAINT(0x2e, 0x3), | ||
2118 | UNCORE_EVENT_CONSTRAINT(0x2f, 0x3), | ||
2119 | UNCORE_EVENT_CONSTRAINT(0x31, 0x3), | ||
2120 | UNCORE_EVENT_CONSTRAINT(0x32, 0x3), | ||
2121 | UNCORE_EVENT_CONSTRAINT(0x33, 0x3), | ||
2122 | UNCORE_EVENT_CONSTRAINT(0x34, 0x3), | ||
2123 | UNCORE_EVENT_CONSTRAINT(0x36, 0x3), | ||
2124 | UNCORE_EVENT_CONSTRAINT(0x37, 0x3), | ||
2125 | UNCORE_EVENT_CONSTRAINT(0x38, 0x3), | ||
2126 | UNCORE_EVENT_CONSTRAINT(0x39, 0x3), | ||
2127 | EVENT_CONSTRAINT_END | ||
2128 | }; | ||
2129 | |||
2130 | static struct intel_uncore_type hswep_uncore_r3qpi = { | ||
2131 | .name = "r3qpi", | ||
2132 | .num_counters = 4, | ||
2133 | .num_boxes = 3, | ||
2134 | .perf_ctr_bits = 44, | ||
2135 | .constraints = hswep_uncore_r3qpi_constraints, | ||
2136 | SNBEP_UNCORE_PCI_COMMON_INIT(), | ||
2137 | }; | ||
2138 | |||
2139 | enum { | ||
2140 | HSWEP_PCI_UNCORE_HA, | ||
2141 | HSWEP_PCI_UNCORE_IMC, | ||
2142 | HSWEP_PCI_UNCORE_IRP, | ||
2143 | HSWEP_PCI_UNCORE_QPI, | ||
2144 | HSWEP_PCI_UNCORE_R2PCIE, | ||
2145 | HSWEP_PCI_UNCORE_R3QPI, | ||
2146 | }; | ||
2147 | |||
2148 | static struct intel_uncore_type *hswep_pci_uncores[] = { | ||
2149 | [HSWEP_PCI_UNCORE_HA] = &hswep_uncore_ha, | ||
2150 | [HSWEP_PCI_UNCORE_IMC] = &hswep_uncore_imc, | ||
2151 | [HSWEP_PCI_UNCORE_IRP] = &hswep_uncore_irp, | ||
2152 | [HSWEP_PCI_UNCORE_QPI] = &hswep_uncore_qpi, | ||
2153 | [HSWEP_PCI_UNCORE_R2PCIE] = &hswep_uncore_r2pcie, | ||
2154 | [HSWEP_PCI_UNCORE_R3QPI] = &hswep_uncore_r3qpi, | ||
2155 | NULL, | ||
2156 | }; | ||
2157 | |||
2158 | static DEFINE_PCI_DEVICE_TABLE(hswep_uncore_pci_ids) = { | ||
2159 | { /* Home Agent 0 */ | ||
2160 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f30), | ||
2161 | .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_HA, 0), | ||
2162 | }, | ||
2163 | { /* Home Agent 1 */ | ||
2164 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f38), | ||
2165 | .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_HA, 1), | ||
2166 | }, | ||
2167 | { /* MC0 Channel 0 */ | ||
2168 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fb0), | ||
2169 | .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC, 0), | ||
2170 | }, | ||
2171 | { /* MC0 Channel 1 */ | ||
2172 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fb1), | ||
2173 | .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC, 1), | ||
2174 | }, | ||
2175 | { /* MC0 Channel 2 */ | ||
2176 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fb4), | ||
2177 | .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC, 2), | ||
2178 | }, | ||
2179 | { /* MC0 Channel 3 */ | ||
2180 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fb5), | ||
2181 | .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC, 3), | ||
2182 | }, | ||
2183 | { /* MC1 Channel 0 */ | ||
2184 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fd0), | ||
2185 | .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC, 4), | ||
2186 | }, | ||
2187 | { /* MC1 Channel 1 */ | ||
2188 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fd1), | ||
2189 | .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC, 5), | ||
2190 | }, | ||
2191 | { /* MC1 Channel 2 */ | ||
2192 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fd4), | ||
2193 | .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC, 6), | ||
2194 | }, | ||
2195 | { /* MC1 Channel 3 */ | ||
2196 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fd5), | ||
2197 | .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC, 7), | ||
2198 | }, | ||
2199 | { /* IRP */ | ||
2200 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f39), | ||
2201 | .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IRP, 0), | ||
2202 | }, | ||
2203 | { /* QPI0 Port 0 */ | ||
2204 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f32), | ||
2205 | .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_QPI, 0), | ||
2206 | }, | ||
2207 | { /* QPI0 Port 1 */ | ||
2208 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f33), | ||
2209 | .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_QPI, 1), | ||
2210 | }, | ||
2211 | { /* QPI1 Port 2 */ | ||
2212 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f3a), | ||
2213 | .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_QPI, 2), | ||
2214 | }, | ||
2215 | { /* R2PCIe */ | ||
2216 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f34), | ||
2217 | .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_R2PCIE, 0), | ||
2218 | }, | ||
2219 | { /* R3QPI0 Link 0 */ | ||
2220 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f36), | ||
2221 | .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_R3QPI, 0), | ||
2222 | }, | ||
2223 | { /* R3QPI0 Link 1 */ | ||
2224 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f37), | ||
2225 | .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_R3QPI, 1), | ||
2226 | }, | ||
2227 | { /* R3QPI1 Link 2 */ | ||
2228 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f3e), | ||
2229 | .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_R3QPI, 2), | ||
2230 | }, | ||
2231 | { /* QPI Port 0 filter */ | ||
2232 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f86), | ||
2233 | .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV, | ||
2234 | SNBEP_PCI_QPI_PORT0_FILTER), | ||
2235 | }, | ||
2236 | { /* QPI Port 1 filter */ | ||
2237 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f96), | ||
2238 | .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV, | ||
2239 | SNBEP_PCI_QPI_PORT1_FILTER), | ||
2240 | }, | ||
2241 | { /* end: all zeroes */ } | ||
2242 | }; | ||
2243 | |||
2244 | static struct pci_driver hswep_uncore_pci_driver = { | ||
2245 | .name = "hswep_uncore", | ||
2246 | .id_table = hswep_uncore_pci_ids, | ||
2247 | }; | ||
2248 | |||
2249 | int hswep_uncore_pci_init(void) | ||
2250 | { | ||
2251 | int ret = snbep_pci2phy_map_init(0x2f1e); | ||
2252 | if (ret) | ||
2253 | return ret; | ||
2254 | uncore_pci_uncores = hswep_pci_uncores; | ||
2255 | uncore_pci_driver = &hswep_uncore_pci_driver; | ||
2256 | return 0; | ||
2257 | } | ||
2258 | /* end of Haswell-EP uncore support */ | ||