diff options
author | Jacob Shin <jacob.shin@amd.com> | 2013-04-19 17:34:28 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@kernel.org> | 2013-04-21 05:01:24 -0400 |
commit | c43ca5091a374c1f6778bd7e4a39a5a10735a917 (patch) | |
tree | f9bc7384913f2c173d871cd506f9fec521cfc458 /arch/x86/kernel/cpu | |
parent | e850f9c33c0c7cc4097ae29f6f8d633237d235e6 (diff) |
perf/x86/amd: Add support for AMD NB and L2I "uncore" counters
Add support for AMD Family 15h [and above] northbridge
performance counters. MSRs 0xc0010240 ~ 0xc0010247 are shared
across all cores that share a common northbridge.
Add support for AMD Family 16h L2 performance counters. MSRs
0xc0010230 ~ 0xc0010237 are shared across all cores that share a
common L2 cache.
We do not enable counter overflow interrupts. Sampling mode and
per-thread events are not supported.
Signed-off-by: Jacob Shin <jacob.shin@amd.com>
Cc: Arnaldo Carvalho de Melo <acme@ghostprotocols.net>
Cc: Stephane Eranian <eranian@google.com>
Cc: Jiri Olsa <jolsa@redhat.com>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Link: http://lkml.kernel.org/r/20130419213428.GA8229@jshin-Toonie
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'arch/x86/kernel/cpu')
-rw-r--r-- | arch/x86/kernel/cpu/Makefile | 2 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/perf_event_amd_uncore.c | 546 |
2 files changed, 547 insertions, 1 deletions
diff --git a/arch/x86/kernel/cpu/Makefile b/arch/x86/kernel/cpu/Makefile index a0e067d3d96c..00745729f2a1 100644 --- a/arch/x86/kernel/cpu/Makefile +++ b/arch/x86/kernel/cpu/Makefile | |||
@@ -31,7 +31,7 @@ obj-$(CONFIG_CPU_SUP_UMC_32) += umc.o | |||
31 | obj-$(CONFIG_PERF_EVENTS) += perf_event.o | 31 | obj-$(CONFIG_PERF_EVENTS) += perf_event.o |
32 | 32 | ||
33 | ifdef CONFIG_PERF_EVENTS | 33 | ifdef CONFIG_PERF_EVENTS |
34 | obj-$(CONFIG_CPU_SUP_AMD) += perf_event_amd.o | 34 | obj-$(CONFIG_CPU_SUP_AMD) += perf_event_amd.o perf_event_amd_uncore.o |
35 | obj-$(CONFIG_CPU_SUP_INTEL) += perf_event_p6.o perf_event_knc.o perf_event_p4.o | 35 | obj-$(CONFIG_CPU_SUP_INTEL) += perf_event_p6.o perf_event_knc.o perf_event_p4.o |
36 | obj-$(CONFIG_CPU_SUP_INTEL) += perf_event_intel_lbr.o perf_event_intel_ds.o perf_event_intel.o | 36 | obj-$(CONFIG_CPU_SUP_INTEL) += perf_event_intel_lbr.o perf_event_intel_ds.o perf_event_intel.o |
37 | obj-$(CONFIG_CPU_SUP_INTEL) += perf_event_intel_uncore.o | 37 | obj-$(CONFIG_CPU_SUP_INTEL) += perf_event_intel_uncore.o |
diff --git a/arch/x86/kernel/cpu/perf_event_amd_uncore.c b/arch/x86/kernel/cpu/perf_event_amd_uncore.c new file mode 100644 index 000000000000..6dc62273639c --- /dev/null +++ b/arch/x86/kernel/cpu/perf_event_amd_uncore.c | |||
@@ -0,0 +1,546 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2013 Advanced Micro Devices, Inc. | ||
3 | * | ||
4 | * Author: Jacob Shin <jacob.shin@amd.com> | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License version 2 as | ||
8 | * published by the Free Software Foundation. | ||
9 | */ | ||
10 | |||
11 | #include <linux/perf_event.h> | ||
12 | #include <linux/percpu.h> | ||
13 | #include <linux/types.h> | ||
14 | #include <linux/slab.h> | ||
15 | #include <linux/init.h> | ||
16 | #include <linux/cpu.h> | ||
17 | #include <linux/cpumask.h> | ||
18 | |||
19 | #include <asm/cpufeature.h> | ||
20 | #include <asm/perf_event.h> | ||
21 | #include <asm/msr.h> | ||
22 | |||
23 | #define NUM_COUNTERS_NB 4 | ||
24 | #define NUM_COUNTERS_L2 4 | ||
25 | #define MAX_COUNTERS NUM_COUNTERS_NB | ||
26 | |||
27 | #define RDPMC_BASE_NB 6 | ||
28 | #define RDPMC_BASE_L2 10 | ||
29 | |||
30 | #define COUNTER_SHIFT 16 | ||
31 | |||
32 | struct amd_uncore { | ||
33 | int id; | ||
34 | int refcnt; | ||
35 | int cpu; | ||
36 | int num_counters; | ||
37 | int rdpmc_base; | ||
38 | u32 msr_base; | ||
39 | cpumask_t *active_mask; | ||
40 | struct pmu *pmu; | ||
41 | struct perf_event *events[MAX_COUNTERS]; | ||
42 | struct amd_uncore *free_when_cpu_online; | ||
43 | }; | ||
44 | |||
45 | static struct amd_uncore * __percpu *amd_uncore_nb; | ||
46 | static struct amd_uncore * __percpu *amd_uncore_l2; | ||
47 | |||
48 | static struct pmu amd_nb_pmu; | ||
49 | static struct pmu amd_l2_pmu; | ||
50 | |||
51 | static cpumask_t amd_nb_active_mask; | ||
52 | static cpumask_t amd_l2_active_mask; | ||
53 | |||
54 | static bool is_nb_event(struct perf_event *event) | ||
55 | { | ||
56 | return event->pmu->type == amd_nb_pmu.type; | ||
57 | } | ||
58 | |||
59 | static bool is_l2_event(struct perf_event *event) | ||
60 | { | ||
61 | return event->pmu->type == amd_l2_pmu.type; | ||
62 | } | ||
63 | |||
64 | static struct amd_uncore *event_to_amd_uncore(struct perf_event *event) | ||
65 | { | ||
66 | if (is_nb_event(event) && amd_uncore_nb) | ||
67 | return *per_cpu_ptr(amd_uncore_nb, event->cpu); | ||
68 | else if (is_l2_event(event) && amd_uncore_l2) | ||
69 | return *per_cpu_ptr(amd_uncore_l2, event->cpu); | ||
70 | |||
71 | return NULL; | ||
72 | } | ||
73 | |||
74 | static void amd_uncore_read(struct perf_event *event) | ||
75 | { | ||
76 | struct hw_perf_event *hwc = &event->hw; | ||
77 | u64 prev, new; | ||
78 | s64 delta; | ||
79 | |||
80 | /* | ||
81 | * since we do not enable counter overflow interrupts, | ||
82 | * we do not have to worry about prev_count changing on us | ||
83 | */ | ||
84 | |||
85 | prev = local64_read(&hwc->prev_count); | ||
86 | rdpmcl(hwc->event_base_rdpmc, new); | ||
87 | local64_set(&hwc->prev_count, new); | ||
88 | delta = (new << COUNTER_SHIFT) - (prev << COUNTER_SHIFT); | ||
89 | delta >>= COUNTER_SHIFT; | ||
90 | local64_add(delta, &event->count); | ||
91 | } | ||
92 | |||
93 | static void amd_uncore_start(struct perf_event *event, int flags) | ||
94 | { | ||
95 | struct hw_perf_event *hwc = &event->hw; | ||
96 | |||
97 | if (flags & PERF_EF_RELOAD) | ||
98 | wrmsrl(hwc->event_base, (u64)local64_read(&hwc->prev_count)); | ||
99 | |||
100 | hwc->state = 0; | ||
101 | wrmsrl(hwc->config_base, (hwc->config | ARCH_PERFMON_EVENTSEL_ENABLE)); | ||
102 | perf_event_update_userpage(event); | ||
103 | } | ||
104 | |||
105 | static void amd_uncore_stop(struct perf_event *event, int flags) | ||
106 | { | ||
107 | struct hw_perf_event *hwc = &event->hw; | ||
108 | |||
109 | wrmsrl(hwc->config_base, hwc->config); | ||
110 | hwc->state |= PERF_HES_STOPPED; | ||
111 | |||
112 | if ((flags & PERF_EF_UPDATE) && !(hwc->state & PERF_HES_UPTODATE)) { | ||
113 | amd_uncore_read(event); | ||
114 | hwc->state |= PERF_HES_UPTODATE; | ||
115 | } | ||
116 | } | ||
117 | |||
118 | static int amd_uncore_add(struct perf_event *event, int flags) | ||
119 | { | ||
120 | int i; | ||
121 | struct amd_uncore *uncore = event_to_amd_uncore(event); | ||
122 | struct hw_perf_event *hwc = &event->hw; | ||
123 | |||
124 | /* are we already assigned? */ | ||
125 | if (hwc->idx != -1 && uncore->events[hwc->idx] == event) | ||
126 | goto out; | ||
127 | |||
128 | for (i = 0; i < uncore->num_counters; i++) { | ||
129 | if (uncore->events[i] == event) { | ||
130 | hwc->idx = i; | ||
131 | goto out; | ||
132 | } | ||
133 | } | ||
134 | |||
135 | /* if not, take the first available counter */ | ||
136 | hwc->idx = -1; | ||
137 | for (i = 0; i < uncore->num_counters; i++) { | ||
138 | if (cmpxchg(&uncore->events[i], NULL, event) == NULL) { | ||
139 | hwc->idx = i; | ||
140 | break; | ||
141 | } | ||
142 | } | ||
143 | |||
144 | out: | ||
145 | if (hwc->idx == -1) | ||
146 | return -EBUSY; | ||
147 | |||
148 | hwc->config_base = uncore->msr_base + (2 * hwc->idx); | ||
149 | hwc->event_base = uncore->msr_base + 1 + (2 * hwc->idx); | ||
150 | hwc->event_base_rdpmc = uncore->rdpmc_base + hwc->idx; | ||
151 | hwc->state = PERF_HES_UPTODATE | PERF_HES_STOPPED; | ||
152 | |||
153 | if (flags & PERF_EF_START) | ||
154 | amd_uncore_start(event, PERF_EF_RELOAD); | ||
155 | |||
156 | return 0; | ||
157 | } | ||
158 | |||
159 | static void amd_uncore_del(struct perf_event *event, int flags) | ||
160 | { | ||
161 | int i; | ||
162 | struct amd_uncore *uncore = event_to_amd_uncore(event); | ||
163 | struct hw_perf_event *hwc = &event->hw; | ||
164 | |||
165 | amd_uncore_stop(event, PERF_EF_UPDATE); | ||
166 | |||
167 | for (i = 0; i < uncore->num_counters; i++) { | ||
168 | if (cmpxchg(&uncore->events[i], event, NULL) == event) | ||
169 | break; | ||
170 | } | ||
171 | |||
172 | hwc->idx = -1; | ||
173 | } | ||
174 | |||
175 | static int amd_uncore_event_init(struct perf_event *event) | ||
176 | { | ||
177 | struct amd_uncore *uncore; | ||
178 | struct hw_perf_event *hwc = &event->hw; | ||
179 | |||
180 | if (event->attr.type != event->pmu->type) | ||
181 | return -ENOENT; | ||
182 | |||
183 | /* | ||
184 | * NB and L2 counters (MSRs) are shared across all cores that share the | ||
185 | * same NB / L2 cache. Interrupts can be directed to a single target | ||
186 | * core, however, event counts generated by processes running on other | ||
187 | * cores cannot be masked out. So we do not support sampling and | ||
188 | * per-thread events. | ||
189 | */ | ||
190 | if (is_sampling_event(event) || event->attach_state & PERF_ATTACH_TASK) | ||
191 | return -EINVAL; | ||
192 | |||
193 | /* NB and L2 counters do not have usr/os/guest/host bits */ | ||
194 | if (event->attr.exclude_user || event->attr.exclude_kernel || | ||
195 | event->attr.exclude_host || event->attr.exclude_guest) | ||
196 | return -EINVAL; | ||
197 | |||
198 | /* and we do not enable counter overflow interrupts */ | ||
199 | hwc->config = event->attr.config & AMD64_RAW_EVENT_MASK_NB; | ||
200 | hwc->idx = -1; | ||
201 | |||
202 | if (event->cpu < 0) | ||
203 | return -EINVAL; | ||
204 | |||
205 | uncore = event_to_amd_uncore(event); | ||
206 | if (!uncore) | ||
207 | return -ENODEV; | ||
208 | |||
209 | /* | ||
210 | * since request can come in to any of the shared cores, we will remap | ||
211 | * to a single common cpu. | ||
212 | */ | ||
213 | event->cpu = uncore->cpu; | ||
214 | |||
215 | return 0; | ||
216 | } | ||
217 | |||
218 | static ssize_t amd_uncore_attr_show_cpumask(struct device *dev, | ||
219 | struct device_attribute *attr, | ||
220 | char *buf) | ||
221 | { | ||
222 | int n; | ||
223 | cpumask_t *active_mask; | ||
224 | struct pmu *pmu = dev_get_drvdata(dev); | ||
225 | |||
226 | if (pmu->type == amd_nb_pmu.type) | ||
227 | active_mask = &amd_nb_active_mask; | ||
228 | else if (pmu->type == amd_l2_pmu.type) | ||
229 | active_mask = &amd_l2_active_mask; | ||
230 | else | ||
231 | return 0; | ||
232 | |||
233 | n = cpulist_scnprintf(buf, PAGE_SIZE - 2, active_mask); | ||
234 | buf[n++] = '\n'; | ||
235 | buf[n] = '\0'; | ||
236 | return n; | ||
237 | } | ||
238 | static DEVICE_ATTR(cpumask, S_IRUGO, amd_uncore_attr_show_cpumask, NULL); | ||
239 | |||
240 | static struct attribute *amd_uncore_attrs[] = { | ||
241 | &dev_attr_cpumask.attr, | ||
242 | NULL, | ||
243 | }; | ||
244 | |||
245 | static struct attribute_group amd_uncore_attr_group = { | ||
246 | .attrs = amd_uncore_attrs, | ||
247 | }; | ||
248 | |||
249 | PMU_FORMAT_ATTR(event, "config:0-7,32-35"); | ||
250 | PMU_FORMAT_ATTR(umask, "config:8-15"); | ||
251 | |||
252 | static struct attribute *amd_uncore_format_attr[] = { | ||
253 | &format_attr_event.attr, | ||
254 | &format_attr_umask.attr, | ||
255 | NULL, | ||
256 | }; | ||
257 | |||
258 | static struct attribute_group amd_uncore_format_group = { | ||
259 | .name = "format", | ||
260 | .attrs = amd_uncore_format_attr, | ||
261 | }; | ||
262 | |||
263 | static const struct attribute_group *amd_uncore_attr_groups[] = { | ||
264 | &amd_uncore_attr_group, | ||
265 | &amd_uncore_format_group, | ||
266 | NULL, | ||
267 | }; | ||
268 | |||
269 | static struct pmu amd_nb_pmu = { | ||
270 | .attr_groups = amd_uncore_attr_groups, | ||
271 | .name = "amd_nb", | ||
272 | .event_init = amd_uncore_event_init, | ||
273 | .add = amd_uncore_add, | ||
274 | .del = amd_uncore_del, | ||
275 | .start = amd_uncore_start, | ||
276 | .stop = amd_uncore_stop, | ||
277 | .read = amd_uncore_read, | ||
278 | }; | ||
279 | |||
280 | static struct pmu amd_l2_pmu = { | ||
281 | .attr_groups = amd_uncore_attr_groups, | ||
282 | .name = "amd_l2", | ||
283 | .event_init = amd_uncore_event_init, | ||
284 | .add = amd_uncore_add, | ||
285 | .del = amd_uncore_del, | ||
286 | .start = amd_uncore_start, | ||
287 | .stop = amd_uncore_stop, | ||
288 | .read = amd_uncore_read, | ||
289 | }; | ||
290 | |||
291 | static struct amd_uncore * __cpuinit amd_uncore_alloc(unsigned int cpu) | ||
292 | { | ||
293 | return kzalloc_node(sizeof(struct amd_uncore), GFP_KERNEL, | ||
294 | cpu_to_node(cpu)); | ||
295 | } | ||
296 | |||
297 | static void __cpuinit amd_uncore_cpu_up_prepare(unsigned int cpu) | ||
298 | { | ||
299 | struct amd_uncore *uncore; | ||
300 | |||
301 | if (amd_uncore_nb) { | ||
302 | uncore = amd_uncore_alloc(cpu); | ||
303 | uncore->cpu = cpu; | ||
304 | uncore->num_counters = NUM_COUNTERS_NB; | ||
305 | uncore->rdpmc_base = RDPMC_BASE_NB; | ||
306 | uncore->msr_base = MSR_F15H_NB_PERF_CTL; | ||
307 | uncore->active_mask = &amd_nb_active_mask; | ||
308 | uncore->pmu = &amd_nb_pmu; | ||
309 | *per_cpu_ptr(amd_uncore_nb, cpu) = uncore; | ||
310 | } | ||
311 | |||
312 | if (amd_uncore_l2) { | ||
313 | uncore = amd_uncore_alloc(cpu); | ||
314 | uncore->cpu = cpu; | ||
315 | uncore->num_counters = NUM_COUNTERS_L2; | ||
316 | uncore->rdpmc_base = RDPMC_BASE_L2; | ||
317 | uncore->msr_base = MSR_F16H_L2I_PERF_CTL; | ||
318 | uncore->active_mask = &amd_l2_active_mask; | ||
319 | uncore->pmu = &amd_l2_pmu; | ||
320 | *per_cpu_ptr(amd_uncore_l2, cpu) = uncore; | ||
321 | } | ||
322 | } | ||
323 | |||
324 | static struct amd_uncore * | ||
325 | __cpuinit amd_uncore_find_online_sibling(struct amd_uncore *this, | ||
326 | struct amd_uncore * __percpu *uncores) | ||
327 | { | ||
328 | unsigned int cpu; | ||
329 | struct amd_uncore *that; | ||
330 | |||
331 | for_each_online_cpu(cpu) { | ||
332 | that = *per_cpu_ptr(uncores, cpu); | ||
333 | |||
334 | if (!that) | ||
335 | continue; | ||
336 | |||
337 | if (this == that) | ||
338 | continue; | ||
339 | |||
340 | if (this->id == that->id) { | ||
341 | that->free_when_cpu_online = this; | ||
342 | this = that; | ||
343 | break; | ||
344 | } | ||
345 | } | ||
346 | |||
347 | this->refcnt++; | ||
348 | return this; | ||
349 | } | ||
350 | |||
351 | static void __cpuinit amd_uncore_cpu_starting(unsigned int cpu) | ||
352 | { | ||
353 | unsigned int eax, ebx, ecx, edx; | ||
354 | struct amd_uncore *uncore; | ||
355 | |||
356 | if (amd_uncore_nb) { | ||
357 | uncore = *per_cpu_ptr(amd_uncore_nb, cpu); | ||
358 | cpuid(0x8000001e, &eax, &ebx, &ecx, &edx); | ||
359 | uncore->id = ecx & 0xff; | ||
360 | |||
361 | uncore = amd_uncore_find_online_sibling(uncore, amd_uncore_nb); | ||
362 | *per_cpu_ptr(amd_uncore_nb, cpu) = uncore; | ||
363 | } | ||
364 | |||
365 | if (amd_uncore_l2) { | ||
366 | unsigned int apicid = cpu_data(cpu).apicid; | ||
367 | unsigned int nshared; | ||
368 | |||
369 | uncore = *per_cpu_ptr(amd_uncore_l2, cpu); | ||
370 | cpuid_count(0x8000001d, 2, &eax, &ebx, &ecx, &edx); | ||
371 | nshared = ((eax >> 14) & 0xfff) + 1; | ||
372 | uncore->id = apicid - (apicid % nshared); | ||
373 | |||
374 | uncore = amd_uncore_find_online_sibling(uncore, amd_uncore_l2); | ||
375 | *per_cpu_ptr(amd_uncore_l2, cpu) = uncore; | ||
376 | } | ||
377 | } | ||
378 | |||
379 | static void __cpuinit uncore_online(unsigned int cpu, | ||
380 | struct amd_uncore * __percpu *uncores) | ||
381 | { | ||
382 | struct amd_uncore *uncore = *per_cpu_ptr(uncores, cpu); | ||
383 | |||
384 | kfree(uncore->free_when_cpu_online); | ||
385 | uncore->free_when_cpu_online = NULL; | ||
386 | |||
387 | if (cpu == uncore->cpu) | ||
388 | cpumask_set_cpu(cpu, uncore->active_mask); | ||
389 | } | ||
390 | |||
391 | static void __cpuinit amd_uncore_cpu_online(unsigned int cpu) | ||
392 | { | ||
393 | if (amd_uncore_nb) | ||
394 | uncore_online(cpu, amd_uncore_nb); | ||
395 | |||
396 | if (amd_uncore_l2) | ||
397 | uncore_online(cpu, amd_uncore_l2); | ||
398 | } | ||
399 | |||
400 | static void __cpuinit uncore_down_prepare(unsigned int cpu, | ||
401 | struct amd_uncore * __percpu *uncores) | ||
402 | { | ||
403 | unsigned int i; | ||
404 | struct amd_uncore *this = *per_cpu_ptr(uncores, cpu); | ||
405 | |||
406 | if (this->cpu != cpu) | ||
407 | return; | ||
408 | |||
409 | /* this cpu is going down, migrate to a shared sibling if possible */ | ||
410 | for_each_online_cpu(i) { | ||
411 | struct amd_uncore *that = *per_cpu_ptr(uncores, i); | ||
412 | |||
413 | if (cpu == i) | ||
414 | continue; | ||
415 | |||
416 | if (this == that) { | ||
417 | perf_pmu_migrate_context(this->pmu, cpu, i); | ||
418 | cpumask_clear_cpu(cpu, that->active_mask); | ||
419 | cpumask_set_cpu(i, that->active_mask); | ||
420 | that->cpu = i; | ||
421 | break; | ||
422 | } | ||
423 | } | ||
424 | } | ||
425 | |||
426 | static void __cpuinit amd_uncore_cpu_down_prepare(unsigned int cpu) | ||
427 | { | ||
428 | if (amd_uncore_nb) | ||
429 | uncore_down_prepare(cpu, amd_uncore_nb); | ||
430 | |||
431 | if (amd_uncore_l2) | ||
432 | uncore_down_prepare(cpu, amd_uncore_l2); | ||
433 | } | ||
434 | |||
435 | static void __cpuinit uncore_dead(unsigned int cpu, | ||
436 | struct amd_uncore * __percpu *uncores) | ||
437 | { | ||
438 | struct amd_uncore *uncore = *per_cpu_ptr(uncores, cpu); | ||
439 | |||
440 | if (cpu == uncore->cpu) | ||
441 | cpumask_clear_cpu(cpu, uncore->active_mask); | ||
442 | |||
443 | if (!--uncore->refcnt) | ||
444 | kfree(uncore); | ||
445 | *per_cpu_ptr(amd_uncore_nb, cpu) = NULL; | ||
446 | } | ||
447 | |||
448 | static void __cpuinit amd_uncore_cpu_dead(unsigned int cpu) | ||
449 | { | ||
450 | if (amd_uncore_nb) | ||
451 | uncore_dead(cpu, amd_uncore_nb); | ||
452 | |||
453 | if (amd_uncore_l2) | ||
454 | uncore_dead(cpu, amd_uncore_l2); | ||
455 | } | ||
456 | |||
457 | static int __cpuinit | ||
458 | amd_uncore_cpu_notifier(struct notifier_block *self, unsigned long action, | ||
459 | void *hcpu) | ||
460 | { | ||
461 | unsigned int cpu = (long)hcpu; | ||
462 | |||
463 | switch (action & ~CPU_TASKS_FROZEN) { | ||
464 | case CPU_UP_PREPARE: | ||
465 | amd_uncore_cpu_up_prepare(cpu); | ||
466 | break; | ||
467 | |||
468 | case CPU_STARTING: | ||
469 | amd_uncore_cpu_starting(cpu); | ||
470 | break; | ||
471 | |||
472 | case CPU_ONLINE: | ||
473 | amd_uncore_cpu_online(cpu); | ||
474 | break; | ||
475 | |||
476 | case CPU_DOWN_PREPARE: | ||
477 | amd_uncore_cpu_down_prepare(cpu); | ||
478 | break; | ||
479 | |||
480 | case CPU_UP_CANCELED: | ||
481 | case CPU_DEAD: | ||
482 | amd_uncore_cpu_dead(cpu); | ||
483 | break; | ||
484 | |||
485 | default: | ||
486 | break; | ||
487 | } | ||
488 | |||
489 | return NOTIFY_OK; | ||
490 | } | ||
491 | |||
492 | static struct notifier_block amd_uncore_cpu_notifier_block __cpuinitdata = { | ||
493 | .notifier_call = amd_uncore_cpu_notifier, | ||
494 | .priority = CPU_PRI_PERF + 1, | ||
495 | }; | ||
496 | |||
497 | static void __init init_cpu_already_online(void *dummy) | ||
498 | { | ||
499 | unsigned int cpu = smp_processor_id(); | ||
500 | |||
501 | amd_uncore_cpu_up_prepare(cpu); | ||
502 | amd_uncore_cpu_starting(cpu); | ||
503 | amd_uncore_cpu_online(cpu); | ||
504 | } | ||
505 | |||
506 | static int __init amd_uncore_init(void) | ||
507 | { | ||
508 | unsigned int cpu; | ||
509 | int ret = -ENODEV; | ||
510 | |||
511 | if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD) | ||
512 | return -ENODEV; | ||
513 | |||
514 | if (!cpu_has_topoext) | ||
515 | return -ENODEV; | ||
516 | |||
517 | if (cpu_has_perfctr_nb) { | ||
518 | amd_uncore_nb = alloc_percpu(struct amd_uncore *); | ||
519 | perf_pmu_register(&amd_nb_pmu, amd_nb_pmu.name, -1); | ||
520 | |||
521 | printk(KERN_INFO "perf: AMD NB counters detected\n"); | ||
522 | ret = 0; | ||
523 | } | ||
524 | |||
525 | if (cpu_has_perfctr_l2) { | ||
526 | amd_uncore_l2 = alloc_percpu(struct amd_uncore *); | ||
527 | perf_pmu_register(&amd_l2_pmu, amd_l2_pmu.name, -1); | ||
528 | |||
529 | printk(KERN_INFO "perf: AMD L2I counters detected\n"); | ||
530 | ret = 0; | ||
531 | } | ||
532 | |||
533 | if (ret) | ||
534 | return -ENODEV; | ||
535 | |||
536 | get_online_cpus(); | ||
537 | /* init cpus already online before registering for hotplug notifier */ | ||
538 | for_each_online_cpu(cpu) | ||
539 | smp_call_function_single(cpu, init_cpu_already_online, NULL, 1); | ||
540 | |||
541 | register_cpu_notifier(&amd_uncore_cpu_notifier_block); | ||
542 | put_online_cpus(); | ||
543 | |||
544 | return 0; | ||
545 | } | ||
546 | device_initcall(amd_uncore_init); | ||