aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/x86/include/asm/cpufeature.h2
-rw-r--r--arch/x86/include/uapi/asm/msr-index.h4
-rw-r--r--arch/x86/kernel/cpu/Makefile2
-rw-r--r--arch/x86/kernel/cpu/perf_event_amd_uncore.c546
4 files changed, 553 insertions, 1 deletions
diff --git a/arch/x86/include/asm/cpufeature.h b/arch/x86/include/asm/cpufeature.h
index 93fe929d1cee..ac10df72925b 100644
--- a/arch/x86/include/asm/cpufeature.h
+++ b/arch/x86/include/asm/cpufeature.h
@@ -168,6 +168,7 @@
168#define X86_FEATURE_TOPOEXT (6*32+22) /* topology extensions CPUID leafs */ 168#define X86_FEATURE_TOPOEXT (6*32+22) /* topology extensions CPUID leafs */
169#define X86_FEATURE_PERFCTR_CORE (6*32+23) /* core performance counter extensions */ 169#define X86_FEATURE_PERFCTR_CORE (6*32+23) /* core performance counter extensions */
170#define X86_FEATURE_PERFCTR_NB (6*32+24) /* NB performance counter extensions */ 170#define X86_FEATURE_PERFCTR_NB (6*32+24) /* NB performance counter extensions */
171#define X86_FEATURE_PERFCTR_L2 (6*32+28) /* L2 performance counter extensions */
171 172
172/* 173/*
173 * Auxiliary flags: Linux defined - For features scattered in various 174 * Auxiliary flags: Linux defined - For features scattered in various
@@ -311,6 +312,7 @@ extern const char * const x86_power_flags[32];
311#define cpu_has_pclmulqdq boot_cpu_has(X86_FEATURE_PCLMULQDQ) 312#define cpu_has_pclmulqdq boot_cpu_has(X86_FEATURE_PCLMULQDQ)
312#define cpu_has_perfctr_core boot_cpu_has(X86_FEATURE_PERFCTR_CORE) 313#define cpu_has_perfctr_core boot_cpu_has(X86_FEATURE_PERFCTR_CORE)
313#define cpu_has_perfctr_nb boot_cpu_has(X86_FEATURE_PERFCTR_NB) 314#define cpu_has_perfctr_nb boot_cpu_has(X86_FEATURE_PERFCTR_NB)
315#define cpu_has_perfctr_l2 boot_cpu_has(X86_FEATURE_PERFCTR_L2)
314#define cpu_has_cx8 boot_cpu_has(X86_FEATURE_CX8) 316#define cpu_has_cx8 boot_cpu_has(X86_FEATURE_CX8)
315#define cpu_has_cx16 boot_cpu_has(X86_FEATURE_CX16) 317#define cpu_has_cx16 boot_cpu_has(X86_FEATURE_CX16)
316#define cpu_has_eager_fpu boot_cpu_has(X86_FEATURE_EAGER_FPU) 318#define cpu_has_eager_fpu boot_cpu_has(X86_FEATURE_EAGER_FPU)
diff --git a/arch/x86/include/uapi/asm/msr-index.h b/arch/x86/include/uapi/asm/msr-index.h
index bf7bb68f43a8..b5757885d7a4 100644
--- a/arch/x86/include/uapi/asm/msr-index.h
+++ b/arch/x86/include/uapi/asm/msr-index.h
@@ -196,6 +196,10 @@
196#define MSR_AMD64_IBSBRTARGET 0xc001103b 196#define MSR_AMD64_IBSBRTARGET 0xc001103b
197#define MSR_AMD64_IBS_REG_COUNT_MAX 8 /* includes MSR_AMD64_IBSBRTARGET */ 197#define MSR_AMD64_IBS_REG_COUNT_MAX 8 /* includes MSR_AMD64_IBSBRTARGET */
198 198
199/* Fam 16h MSRs */
200#define MSR_F16H_L2I_PERF_CTL 0xc0010230
201#define MSR_F16H_L2I_PERF_CTR 0xc0010231
202
199/* Fam 15h MSRs */ 203/* Fam 15h MSRs */
200#define MSR_F15H_PERF_CTL 0xc0010200 204#define MSR_F15H_PERF_CTL 0xc0010200
201#define MSR_F15H_PERF_CTR 0xc0010201 205#define MSR_F15H_PERF_CTR 0xc0010201
diff --git a/arch/x86/kernel/cpu/Makefile b/arch/x86/kernel/cpu/Makefile
index a0e067d3d96c..00745729f2a1 100644
--- a/arch/x86/kernel/cpu/Makefile
+++ b/arch/x86/kernel/cpu/Makefile
@@ -31,7 +31,7 @@ obj-$(CONFIG_CPU_SUP_UMC_32) += umc.o
31obj-$(CONFIG_PERF_EVENTS) += perf_event.o 31obj-$(CONFIG_PERF_EVENTS) += perf_event.o
32 32
33ifdef CONFIG_PERF_EVENTS 33ifdef CONFIG_PERF_EVENTS
34obj-$(CONFIG_CPU_SUP_AMD) += perf_event_amd.o 34obj-$(CONFIG_CPU_SUP_AMD) += perf_event_amd.o perf_event_amd_uncore.o
35obj-$(CONFIG_CPU_SUP_INTEL) += perf_event_p6.o perf_event_knc.o perf_event_p4.o 35obj-$(CONFIG_CPU_SUP_INTEL) += perf_event_p6.o perf_event_knc.o perf_event_p4.o
36obj-$(CONFIG_CPU_SUP_INTEL) += perf_event_intel_lbr.o perf_event_intel_ds.o perf_event_intel.o 36obj-$(CONFIG_CPU_SUP_INTEL) += perf_event_intel_lbr.o perf_event_intel_ds.o perf_event_intel.o
37obj-$(CONFIG_CPU_SUP_INTEL) += perf_event_intel_uncore.o 37obj-$(CONFIG_CPU_SUP_INTEL) += perf_event_intel_uncore.o
diff --git a/arch/x86/kernel/cpu/perf_event_amd_uncore.c b/arch/x86/kernel/cpu/perf_event_amd_uncore.c
new file mode 100644
index 000000000000..6dc62273639c
--- /dev/null
+++ b/arch/x86/kernel/cpu/perf_event_amd_uncore.c
@@ -0,0 +1,546 @@
1/*
2 * Copyright (C) 2013 Advanced Micro Devices, Inc.
3 *
4 * Author: Jacob Shin <jacob.shin@amd.com>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10
11#include <linux/perf_event.h>
12#include <linux/percpu.h>
13#include <linux/types.h>
14#include <linux/slab.h>
15#include <linux/init.h>
16#include <linux/cpu.h>
17#include <linux/cpumask.h>
18
19#include <asm/cpufeature.h>
20#include <asm/perf_event.h>
21#include <asm/msr.h>
22
23#define NUM_COUNTERS_NB 4
24#define NUM_COUNTERS_L2 4
25#define MAX_COUNTERS NUM_COUNTERS_NB
26
27#define RDPMC_BASE_NB 6
28#define RDPMC_BASE_L2 10
29
30#define COUNTER_SHIFT 16
31
32struct amd_uncore {
33 int id;
34 int refcnt;
35 int cpu;
36 int num_counters;
37 int rdpmc_base;
38 u32 msr_base;
39 cpumask_t *active_mask;
40 struct pmu *pmu;
41 struct perf_event *events[MAX_COUNTERS];
42 struct amd_uncore *free_when_cpu_online;
43};
44
45static struct amd_uncore * __percpu *amd_uncore_nb;
46static struct amd_uncore * __percpu *amd_uncore_l2;
47
48static struct pmu amd_nb_pmu;
49static struct pmu amd_l2_pmu;
50
51static cpumask_t amd_nb_active_mask;
52static cpumask_t amd_l2_active_mask;
53
54static bool is_nb_event(struct perf_event *event)
55{
56 return event->pmu->type == amd_nb_pmu.type;
57}
58
59static bool is_l2_event(struct perf_event *event)
60{
61 return event->pmu->type == amd_l2_pmu.type;
62}
63
64static struct amd_uncore *event_to_amd_uncore(struct perf_event *event)
65{
66 if (is_nb_event(event) && amd_uncore_nb)
67 return *per_cpu_ptr(amd_uncore_nb, event->cpu);
68 else if (is_l2_event(event) && amd_uncore_l2)
69 return *per_cpu_ptr(amd_uncore_l2, event->cpu);
70
71 return NULL;
72}
73
74static void amd_uncore_read(struct perf_event *event)
75{
76 struct hw_perf_event *hwc = &event->hw;
77 u64 prev, new;
78 s64 delta;
79
80 /*
81 * since we do not enable counter overflow interrupts,
82 * we do not have to worry about prev_count changing on us
83 */
84
85 prev = local64_read(&hwc->prev_count);
86 rdpmcl(hwc->event_base_rdpmc, new);
87 local64_set(&hwc->prev_count, new);
88 delta = (new << COUNTER_SHIFT) - (prev << COUNTER_SHIFT);
89 delta >>= COUNTER_SHIFT;
90 local64_add(delta, &event->count);
91}
92
93static void amd_uncore_start(struct perf_event *event, int flags)
94{
95 struct hw_perf_event *hwc = &event->hw;
96
97 if (flags & PERF_EF_RELOAD)
98 wrmsrl(hwc->event_base, (u64)local64_read(&hwc->prev_count));
99
100 hwc->state = 0;
101 wrmsrl(hwc->config_base, (hwc->config | ARCH_PERFMON_EVENTSEL_ENABLE));
102 perf_event_update_userpage(event);
103}
104
105static void amd_uncore_stop(struct perf_event *event, int flags)
106{
107 struct hw_perf_event *hwc = &event->hw;
108
109 wrmsrl(hwc->config_base, hwc->config);
110 hwc->state |= PERF_HES_STOPPED;
111
112 if ((flags & PERF_EF_UPDATE) && !(hwc->state & PERF_HES_UPTODATE)) {
113 amd_uncore_read(event);
114 hwc->state |= PERF_HES_UPTODATE;
115 }
116}
117
118static int amd_uncore_add(struct perf_event *event, int flags)
119{
120 int i;
121 struct amd_uncore *uncore = event_to_amd_uncore(event);
122 struct hw_perf_event *hwc = &event->hw;
123
124 /* are we already assigned? */
125 if (hwc->idx != -1 && uncore->events[hwc->idx] == event)
126 goto out;
127
128 for (i = 0; i < uncore->num_counters; i++) {
129 if (uncore->events[i] == event) {
130 hwc->idx = i;
131 goto out;
132 }
133 }
134
135 /* if not, take the first available counter */
136 hwc->idx = -1;
137 for (i = 0; i < uncore->num_counters; i++) {
138 if (cmpxchg(&uncore->events[i], NULL, event) == NULL) {
139 hwc->idx = i;
140 break;
141 }
142 }
143
144out:
145 if (hwc->idx == -1)
146 return -EBUSY;
147
148 hwc->config_base = uncore->msr_base + (2 * hwc->idx);
149 hwc->event_base = uncore->msr_base + 1 + (2 * hwc->idx);
150 hwc->event_base_rdpmc = uncore->rdpmc_base + hwc->idx;
151 hwc->state = PERF_HES_UPTODATE | PERF_HES_STOPPED;
152
153 if (flags & PERF_EF_START)
154 amd_uncore_start(event, PERF_EF_RELOAD);
155
156 return 0;
157}
158
159static void amd_uncore_del(struct perf_event *event, int flags)
160{
161 int i;
162 struct amd_uncore *uncore = event_to_amd_uncore(event);
163 struct hw_perf_event *hwc = &event->hw;
164
165 amd_uncore_stop(event, PERF_EF_UPDATE);
166
167 for (i = 0; i < uncore->num_counters; i++) {
168 if (cmpxchg(&uncore->events[i], event, NULL) == event)
169 break;
170 }
171
172 hwc->idx = -1;
173}
174
175static int amd_uncore_event_init(struct perf_event *event)
176{
177 struct amd_uncore *uncore;
178 struct hw_perf_event *hwc = &event->hw;
179
180 if (event->attr.type != event->pmu->type)
181 return -ENOENT;
182
183 /*
184 * NB and L2 counters (MSRs) are shared across all cores that share the
185 * same NB / L2 cache. Interrupts can be directed to a single target
186 * core, however, event counts generated by processes running on other
187 * cores cannot be masked out. So we do not support sampling and
188 * per-thread events.
189 */
190 if (is_sampling_event(event) || event->attach_state & PERF_ATTACH_TASK)
191 return -EINVAL;
192
193 /* NB and L2 counters do not have usr/os/guest/host bits */
194 if (event->attr.exclude_user || event->attr.exclude_kernel ||
195 event->attr.exclude_host || event->attr.exclude_guest)
196 return -EINVAL;
197
198 /* and we do not enable counter overflow interrupts */
199 hwc->config = event->attr.config & AMD64_RAW_EVENT_MASK_NB;
200 hwc->idx = -1;
201
202 if (event->cpu < 0)
203 return -EINVAL;
204
205 uncore = event_to_amd_uncore(event);
206 if (!uncore)
207 return -ENODEV;
208
209 /*
210 * since request can come in to any of the shared cores, we will remap
211 * to a single common cpu.
212 */
213 event->cpu = uncore->cpu;
214
215 return 0;
216}
217
218static ssize_t amd_uncore_attr_show_cpumask(struct device *dev,
219 struct device_attribute *attr,
220 char *buf)
221{
222 int n;
223 cpumask_t *active_mask;
224 struct pmu *pmu = dev_get_drvdata(dev);
225
226 if (pmu->type == amd_nb_pmu.type)
227 active_mask = &amd_nb_active_mask;
228 else if (pmu->type == amd_l2_pmu.type)
229 active_mask = &amd_l2_active_mask;
230 else
231 return 0;
232
233 n = cpulist_scnprintf(buf, PAGE_SIZE - 2, active_mask);
234 buf[n++] = '\n';
235 buf[n] = '\0';
236 return n;
237}
238static DEVICE_ATTR(cpumask, S_IRUGO, amd_uncore_attr_show_cpumask, NULL);
239
240static struct attribute *amd_uncore_attrs[] = {
241 &dev_attr_cpumask.attr,
242 NULL,
243};
244
245static struct attribute_group amd_uncore_attr_group = {
246 .attrs = amd_uncore_attrs,
247};
248
249PMU_FORMAT_ATTR(event, "config:0-7,32-35");
250PMU_FORMAT_ATTR(umask, "config:8-15");
251
252static struct attribute *amd_uncore_format_attr[] = {
253 &format_attr_event.attr,
254 &format_attr_umask.attr,
255 NULL,
256};
257
258static struct attribute_group amd_uncore_format_group = {
259 .name = "format",
260 .attrs = amd_uncore_format_attr,
261};
262
263static const struct attribute_group *amd_uncore_attr_groups[] = {
264 &amd_uncore_attr_group,
265 &amd_uncore_format_group,
266 NULL,
267};
268
269static struct pmu amd_nb_pmu = {
270 .attr_groups = amd_uncore_attr_groups,
271 .name = "amd_nb",
272 .event_init = amd_uncore_event_init,
273 .add = amd_uncore_add,
274 .del = amd_uncore_del,
275 .start = amd_uncore_start,
276 .stop = amd_uncore_stop,
277 .read = amd_uncore_read,
278};
279
280static struct pmu amd_l2_pmu = {
281 .attr_groups = amd_uncore_attr_groups,
282 .name = "amd_l2",
283 .event_init = amd_uncore_event_init,
284 .add = amd_uncore_add,
285 .del = amd_uncore_del,
286 .start = amd_uncore_start,
287 .stop = amd_uncore_stop,
288 .read = amd_uncore_read,
289};
290
291static struct amd_uncore * __cpuinit amd_uncore_alloc(unsigned int cpu)
292{
293 return kzalloc_node(sizeof(struct amd_uncore), GFP_KERNEL,
294 cpu_to_node(cpu));
295}
296
297static void __cpuinit amd_uncore_cpu_up_prepare(unsigned int cpu)
298{
299 struct amd_uncore *uncore;
300
301 if (amd_uncore_nb) {
302 uncore = amd_uncore_alloc(cpu);
303 uncore->cpu = cpu;
304 uncore->num_counters = NUM_COUNTERS_NB;
305 uncore->rdpmc_base = RDPMC_BASE_NB;
306 uncore->msr_base = MSR_F15H_NB_PERF_CTL;
307 uncore->active_mask = &amd_nb_active_mask;
308 uncore->pmu = &amd_nb_pmu;
309 *per_cpu_ptr(amd_uncore_nb, cpu) = uncore;
310 }
311
312 if (amd_uncore_l2) {
313 uncore = amd_uncore_alloc(cpu);
314 uncore->cpu = cpu;
315 uncore->num_counters = NUM_COUNTERS_L2;
316 uncore->rdpmc_base = RDPMC_BASE_L2;
317 uncore->msr_base = MSR_F16H_L2I_PERF_CTL;
318 uncore->active_mask = &amd_l2_active_mask;
319 uncore->pmu = &amd_l2_pmu;
320 *per_cpu_ptr(amd_uncore_l2, cpu) = uncore;
321 }
322}
323
324static struct amd_uncore *
325__cpuinit amd_uncore_find_online_sibling(struct amd_uncore *this,
326 struct amd_uncore * __percpu *uncores)
327{
328 unsigned int cpu;
329 struct amd_uncore *that;
330
331 for_each_online_cpu(cpu) {
332 that = *per_cpu_ptr(uncores, cpu);
333
334 if (!that)
335 continue;
336
337 if (this == that)
338 continue;
339
340 if (this->id == that->id) {
341 that->free_when_cpu_online = this;
342 this = that;
343 break;
344 }
345 }
346
347 this->refcnt++;
348 return this;
349}
350
351static void __cpuinit amd_uncore_cpu_starting(unsigned int cpu)
352{
353 unsigned int eax, ebx, ecx, edx;
354 struct amd_uncore *uncore;
355
356 if (amd_uncore_nb) {
357 uncore = *per_cpu_ptr(amd_uncore_nb, cpu);
358 cpuid(0x8000001e, &eax, &ebx, &ecx, &edx);
359 uncore->id = ecx & 0xff;
360
361 uncore = amd_uncore_find_online_sibling(uncore, amd_uncore_nb);
362 *per_cpu_ptr(amd_uncore_nb, cpu) = uncore;
363 }
364
365 if (amd_uncore_l2) {
366 unsigned int apicid = cpu_data(cpu).apicid;
367 unsigned int nshared;
368
369 uncore = *per_cpu_ptr(amd_uncore_l2, cpu);
370 cpuid_count(0x8000001d, 2, &eax, &ebx, &ecx, &edx);
371 nshared = ((eax >> 14) & 0xfff) + 1;
372 uncore->id = apicid - (apicid % nshared);
373
374 uncore = amd_uncore_find_online_sibling(uncore, amd_uncore_l2);
375 *per_cpu_ptr(amd_uncore_l2, cpu) = uncore;
376 }
377}
378
379static void __cpuinit uncore_online(unsigned int cpu,
380 struct amd_uncore * __percpu *uncores)
381{
382 struct amd_uncore *uncore = *per_cpu_ptr(uncores, cpu);
383
384 kfree(uncore->free_when_cpu_online);
385 uncore->free_when_cpu_online = NULL;
386
387 if (cpu == uncore->cpu)
388 cpumask_set_cpu(cpu, uncore->active_mask);
389}
390
391static void __cpuinit amd_uncore_cpu_online(unsigned int cpu)
392{
393 if (amd_uncore_nb)
394 uncore_online(cpu, amd_uncore_nb);
395
396 if (amd_uncore_l2)
397 uncore_online(cpu, amd_uncore_l2);
398}
399
400static void __cpuinit uncore_down_prepare(unsigned int cpu,
401 struct amd_uncore * __percpu *uncores)
402{
403 unsigned int i;
404 struct amd_uncore *this = *per_cpu_ptr(uncores, cpu);
405
406 if (this->cpu != cpu)
407 return;
408
409 /* this cpu is going down, migrate to a shared sibling if possible */
410 for_each_online_cpu(i) {
411 struct amd_uncore *that = *per_cpu_ptr(uncores, i);
412
413 if (cpu == i)
414 continue;
415
416 if (this == that) {
417 perf_pmu_migrate_context(this->pmu, cpu, i);
418 cpumask_clear_cpu(cpu, that->active_mask);
419 cpumask_set_cpu(i, that->active_mask);
420 that->cpu = i;
421 break;
422 }
423 }
424}
425
426static void __cpuinit amd_uncore_cpu_down_prepare(unsigned int cpu)
427{
428 if (amd_uncore_nb)
429 uncore_down_prepare(cpu, amd_uncore_nb);
430
431 if (amd_uncore_l2)
432 uncore_down_prepare(cpu, amd_uncore_l2);
433}
434
435static void __cpuinit uncore_dead(unsigned int cpu,
436 struct amd_uncore * __percpu *uncores)
437{
438 struct amd_uncore *uncore = *per_cpu_ptr(uncores, cpu);
439
440 if (cpu == uncore->cpu)
441 cpumask_clear_cpu(cpu, uncore->active_mask);
442
443 if (!--uncore->refcnt)
444 kfree(uncore);
445 *per_cpu_ptr(amd_uncore_nb, cpu) = NULL;
446}
447
448static void __cpuinit amd_uncore_cpu_dead(unsigned int cpu)
449{
450 if (amd_uncore_nb)
451 uncore_dead(cpu, amd_uncore_nb);
452
453 if (amd_uncore_l2)
454 uncore_dead(cpu, amd_uncore_l2);
455}
456
457static int __cpuinit
458amd_uncore_cpu_notifier(struct notifier_block *self, unsigned long action,
459 void *hcpu)
460{
461 unsigned int cpu = (long)hcpu;
462
463 switch (action & ~CPU_TASKS_FROZEN) {
464 case CPU_UP_PREPARE:
465 amd_uncore_cpu_up_prepare(cpu);
466 break;
467
468 case CPU_STARTING:
469 amd_uncore_cpu_starting(cpu);
470 break;
471
472 case CPU_ONLINE:
473 amd_uncore_cpu_online(cpu);
474 break;
475
476 case CPU_DOWN_PREPARE:
477 amd_uncore_cpu_down_prepare(cpu);
478 break;
479
480 case CPU_UP_CANCELED:
481 case CPU_DEAD:
482 amd_uncore_cpu_dead(cpu);
483 break;
484
485 default:
486 break;
487 }
488
489 return NOTIFY_OK;
490}
491
492static struct notifier_block amd_uncore_cpu_notifier_block __cpuinitdata = {
493 .notifier_call = amd_uncore_cpu_notifier,
494 .priority = CPU_PRI_PERF + 1,
495};
496
497static void __init init_cpu_already_online(void *dummy)
498{
499 unsigned int cpu = smp_processor_id();
500
501 amd_uncore_cpu_up_prepare(cpu);
502 amd_uncore_cpu_starting(cpu);
503 amd_uncore_cpu_online(cpu);
504}
505
506static int __init amd_uncore_init(void)
507{
508 unsigned int cpu;
509 int ret = -ENODEV;
510
511 if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD)
512 return -ENODEV;
513
514 if (!cpu_has_topoext)
515 return -ENODEV;
516
517 if (cpu_has_perfctr_nb) {
518 amd_uncore_nb = alloc_percpu(struct amd_uncore *);
519 perf_pmu_register(&amd_nb_pmu, amd_nb_pmu.name, -1);
520
521 printk(KERN_INFO "perf: AMD NB counters detected\n");
522 ret = 0;
523 }
524
525 if (cpu_has_perfctr_l2) {
526 amd_uncore_l2 = alloc_percpu(struct amd_uncore *);
527 perf_pmu_register(&amd_l2_pmu, amd_l2_pmu.name, -1);
528
529 printk(KERN_INFO "perf: AMD L2I counters detected\n");
530 ret = 0;
531 }
532
533 if (ret)
534 return -ENODEV;
535
536 get_online_cpus();
537 /* init cpus already online before registering for hotplug notifier */
538 for_each_online_cpu(cpu)
539 smp_call_function_single(cpu, init_cpu_already_online, NULL, 1);
540
541 register_cpu_notifier(&amd_uncore_cpu_notifier_block);
542 put_online_cpus();
543
544 return 0;
545}
546device_initcall(amd_uncore_init);