diff options
Diffstat (limited to 'arch/x86/kernel/cpu/mcheck/mce_amd.c')
-rw-r--r-- | arch/x86/kernel/cpu/mcheck/mce_amd.c | 703 |
1 files changed, 703 insertions, 0 deletions
diff --git a/arch/x86/kernel/cpu/mcheck/mce_amd.c b/arch/x86/kernel/cpu/mcheck/mce_amd.c new file mode 100644 index 00000000000..ddae21620bd --- /dev/null +++ b/arch/x86/kernel/cpu/mcheck/mce_amd.c | |||
@@ -0,0 +1,703 @@ | |||
1 | /* | ||
2 | * (c) 2005, 2006 Advanced Micro Devices, Inc. | ||
3 | * Your use of this code is subject to the terms and conditions of the | ||
4 | * GNU general public license version 2. See "COPYING" or | ||
5 | * http://www.gnu.org/licenses/gpl.html | ||
6 | * | ||
7 | * Written by Jacob Shin - AMD, Inc. | ||
8 | * | ||
9 | * Support : jacob.shin@amd.com | ||
10 | * | ||
11 | * April 2006 | ||
12 | * - added support for AMD Family 0x10 processors | ||
13 | * | ||
14 | * All MC4_MISCi registers are shared between multi-cores | ||
15 | */ | ||
16 | #include <linux/interrupt.h> | ||
17 | #include <linux/notifier.h> | ||
18 | #include <linux/kobject.h> | ||
19 | #include <linux/percpu.h> | ||
20 | #include <linux/sysdev.h> | ||
21 | #include <linux/errno.h> | ||
22 | #include <linux/sched.h> | ||
23 | #include <linux/sysfs.h> | ||
24 | #include <linux/init.h> | ||
25 | #include <linux/cpu.h> | ||
26 | #include <linux/smp.h> | ||
27 | |||
28 | #include <asm/apic.h> | ||
29 | #include <asm/idle.h> | ||
30 | #include <asm/mce.h> | ||
31 | #include <asm/msr.h> | ||
32 | |||
33 | #define PFX "mce_threshold: " | ||
34 | #define VERSION "version 1.1.1" | ||
35 | #define NR_BANKS 6 | ||
36 | #define NR_BLOCKS 9 | ||
37 | #define THRESHOLD_MAX 0xFFF | ||
38 | #define INT_TYPE_APIC 0x00020000 | ||
39 | #define MASK_VALID_HI 0x80000000 | ||
40 | #define MASK_CNTP_HI 0x40000000 | ||
41 | #define MASK_LOCKED_HI 0x20000000 | ||
42 | #define MASK_LVTOFF_HI 0x00F00000 | ||
43 | #define MASK_COUNT_EN_HI 0x00080000 | ||
44 | #define MASK_INT_TYPE_HI 0x00060000 | ||
45 | #define MASK_OVERFLOW_HI 0x00010000 | ||
46 | #define MASK_ERR_COUNT_HI 0x00000FFF | ||
47 | #define MASK_BLKPTR_LO 0xFF000000 | ||
48 | #define MCG_XBLK_ADDR 0xC0000400 | ||
49 | |||
50 | struct threshold_block { | ||
51 | unsigned int block; | ||
52 | unsigned int bank; | ||
53 | unsigned int cpu; | ||
54 | u32 address; | ||
55 | u16 interrupt_enable; | ||
56 | u16 threshold_limit; | ||
57 | struct kobject kobj; | ||
58 | struct list_head miscj; | ||
59 | }; | ||
60 | |||
61 | /* defaults used early on boot */ | ||
62 | static struct threshold_block threshold_defaults = { | ||
63 | .interrupt_enable = 0, | ||
64 | .threshold_limit = THRESHOLD_MAX, | ||
65 | }; | ||
66 | |||
67 | struct threshold_bank { | ||
68 | struct kobject *kobj; | ||
69 | struct threshold_block *blocks; | ||
70 | cpumask_var_t cpus; | ||
71 | }; | ||
72 | static DEFINE_PER_CPU(struct threshold_bank *, threshold_banks[NR_BANKS]); | ||
73 | |||
74 | #ifdef CONFIG_SMP | ||
75 | static unsigned char shared_bank[NR_BANKS] = { | ||
76 | 0, 0, 0, 0, 1 | ||
77 | }; | ||
78 | #endif | ||
79 | |||
80 | static DEFINE_PER_CPU(unsigned char, bank_map); /* see which banks are on */ | ||
81 | |||
82 | static void amd_threshold_interrupt(void); | ||
83 | |||
84 | /* | ||
85 | * CPU Initialization | ||
86 | */ | ||
87 | |||
88 | struct thresh_restart { | ||
89 | struct threshold_block *b; | ||
90 | int reset; | ||
91 | u16 old_limit; | ||
92 | }; | ||
93 | |||
94 | /* must be called with correct cpu affinity */ | ||
95 | /* Called via smp_call_function_single() */ | ||
96 | static void threshold_restart_bank(void *_tr) | ||
97 | { | ||
98 | struct thresh_restart *tr = _tr; | ||
99 | u32 mci_misc_hi, mci_misc_lo; | ||
100 | |||
101 | rdmsr(tr->b->address, mci_misc_lo, mci_misc_hi); | ||
102 | |||
103 | if (tr->b->threshold_limit < (mci_misc_hi & THRESHOLD_MAX)) | ||
104 | tr->reset = 1; /* limit cannot be lower than err count */ | ||
105 | |||
106 | if (tr->reset) { /* reset err count and overflow bit */ | ||
107 | mci_misc_hi = | ||
108 | (mci_misc_hi & ~(MASK_ERR_COUNT_HI | MASK_OVERFLOW_HI)) | | ||
109 | (THRESHOLD_MAX - tr->b->threshold_limit); | ||
110 | } else if (tr->old_limit) { /* change limit w/o reset */ | ||
111 | int new_count = (mci_misc_hi & THRESHOLD_MAX) + | ||
112 | (tr->old_limit - tr->b->threshold_limit); | ||
113 | |||
114 | mci_misc_hi = (mci_misc_hi & ~MASK_ERR_COUNT_HI) | | ||
115 | (new_count & THRESHOLD_MAX); | ||
116 | } | ||
117 | |||
118 | tr->b->interrupt_enable ? | ||
119 | (mci_misc_hi = (mci_misc_hi & ~MASK_INT_TYPE_HI) | INT_TYPE_APIC) : | ||
120 | (mci_misc_hi &= ~MASK_INT_TYPE_HI); | ||
121 | |||
122 | mci_misc_hi |= MASK_COUNT_EN_HI; | ||
123 | wrmsr(tr->b->address, mci_misc_lo, mci_misc_hi); | ||
124 | } | ||
125 | |||
126 | /* cpu init entry point, called from mce.c with preempt off */ | ||
127 | void mce_amd_feature_init(struct cpuinfo_x86 *c) | ||
128 | { | ||
129 | unsigned int cpu = smp_processor_id(); | ||
130 | u32 low = 0, high = 0, address = 0; | ||
131 | unsigned int bank, block; | ||
132 | struct thresh_restart tr; | ||
133 | u8 lvt_off; | ||
134 | |||
135 | for (bank = 0; bank < NR_BANKS; ++bank) { | ||
136 | for (block = 0; block < NR_BLOCKS; ++block) { | ||
137 | if (block == 0) | ||
138 | address = MSR_IA32_MC0_MISC + bank * 4; | ||
139 | else if (block == 1) { | ||
140 | address = (low & MASK_BLKPTR_LO) >> 21; | ||
141 | if (!address) | ||
142 | break; | ||
143 | address += MCG_XBLK_ADDR; | ||
144 | } else | ||
145 | ++address; | ||
146 | |||
147 | if (rdmsr_safe(address, &low, &high)) | ||
148 | break; | ||
149 | |||
150 | if (!(high & MASK_VALID_HI)) { | ||
151 | if (block) | ||
152 | continue; | ||
153 | else | ||
154 | break; | ||
155 | } | ||
156 | |||
157 | if (!(high & MASK_CNTP_HI) || | ||
158 | (high & MASK_LOCKED_HI)) | ||
159 | continue; | ||
160 | |||
161 | if (!block) | ||
162 | per_cpu(bank_map, cpu) |= (1 << bank); | ||
163 | #ifdef CONFIG_SMP | ||
164 | if (shared_bank[bank] && c->cpu_core_id) | ||
165 | break; | ||
166 | #endif | ||
167 | lvt_off = setup_APIC_eilvt_mce(THRESHOLD_APIC_VECTOR, | ||
168 | APIC_EILVT_MSG_FIX, 0); | ||
169 | |||
170 | high &= ~MASK_LVTOFF_HI; | ||
171 | high |= lvt_off << 20; | ||
172 | wrmsr(address, low, high); | ||
173 | |||
174 | threshold_defaults.address = address; | ||
175 | tr.b = &threshold_defaults; | ||
176 | tr.reset = 0; | ||
177 | tr.old_limit = 0; | ||
178 | threshold_restart_bank(&tr); | ||
179 | |||
180 | mce_threshold_vector = amd_threshold_interrupt; | ||
181 | } | ||
182 | } | ||
183 | } | ||
184 | |||
185 | /* | ||
186 | * APIC Interrupt Handler | ||
187 | */ | ||
188 | |||
189 | /* | ||
190 | * threshold interrupt handler will service THRESHOLD_APIC_VECTOR. | ||
191 | * the interrupt goes off when error_count reaches threshold_limit. | ||
192 | * the handler will simply log mcelog w/ software defined bank number. | ||
193 | */ | ||
194 | static void amd_threshold_interrupt(void) | ||
195 | { | ||
196 | u32 low = 0, high = 0, address = 0; | ||
197 | unsigned int bank, block; | ||
198 | struct mce m; | ||
199 | |||
200 | mce_setup(&m); | ||
201 | |||
202 | /* assume first bank caused it */ | ||
203 | for (bank = 0; bank < NR_BANKS; ++bank) { | ||
204 | if (!(per_cpu(bank_map, m.cpu) & (1 << bank))) | ||
205 | continue; | ||
206 | for (block = 0; block < NR_BLOCKS; ++block) { | ||
207 | if (block == 0) { | ||
208 | address = MSR_IA32_MC0_MISC + bank * 4; | ||
209 | } else if (block == 1) { | ||
210 | address = (low & MASK_BLKPTR_LO) >> 21; | ||
211 | if (!address) | ||
212 | break; | ||
213 | address += MCG_XBLK_ADDR; | ||
214 | } else { | ||
215 | ++address; | ||
216 | } | ||
217 | |||
218 | if (rdmsr_safe(address, &low, &high)) | ||
219 | break; | ||
220 | |||
221 | if (!(high & MASK_VALID_HI)) { | ||
222 | if (block) | ||
223 | continue; | ||
224 | else | ||
225 | break; | ||
226 | } | ||
227 | |||
228 | if (!(high & MASK_CNTP_HI) || | ||
229 | (high & MASK_LOCKED_HI)) | ||
230 | continue; | ||
231 | |||
232 | /* | ||
233 | * Log the machine check that caused the threshold | ||
234 | * event. | ||
235 | */ | ||
236 | machine_check_poll(MCP_TIMESTAMP, | ||
237 | &__get_cpu_var(mce_poll_banks)); | ||
238 | |||
239 | if (high & MASK_OVERFLOW_HI) { | ||
240 | rdmsrl(address, m.misc); | ||
241 | rdmsrl(MSR_IA32_MC0_STATUS + bank * 4, | ||
242 | m.status); | ||
243 | m.bank = K8_MCE_THRESHOLD_BASE | ||
244 | + bank * NR_BLOCKS | ||
245 | + block; | ||
246 | mce_log(&m); | ||
247 | return; | ||
248 | } | ||
249 | } | ||
250 | } | ||
251 | } | ||
252 | |||
253 | /* | ||
254 | * Sysfs Interface | ||
255 | */ | ||
256 | |||
257 | struct threshold_attr { | ||
258 | struct attribute attr; | ||
259 | ssize_t (*show) (struct threshold_block *, char *); | ||
260 | ssize_t (*store) (struct threshold_block *, const char *, size_t count); | ||
261 | }; | ||
262 | |||
263 | #define SHOW_FIELDS(name) \ | ||
264 | static ssize_t show_ ## name(struct threshold_block *b, char *buf) \ | ||
265 | { \ | ||
266 | return sprintf(buf, "%lx\n", (unsigned long) b->name); \ | ||
267 | } | ||
268 | SHOW_FIELDS(interrupt_enable) | ||
269 | SHOW_FIELDS(threshold_limit) | ||
270 | |||
271 | static ssize_t | ||
272 | store_interrupt_enable(struct threshold_block *b, const char *buf, size_t size) | ||
273 | { | ||
274 | struct thresh_restart tr; | ||
275 | unsigned long new; | ||
276 | |||
277 | if (strict_strtoul(buf, 0, &new) < 0) | ||
278 | return -EINVAL; | ||
279 | |||
280 | b->interrupt_enable = !!new; | ||
281 | |||
282 | tr.b = b; | ||
283 | tr.reset = 0; | ||
284 | tr.old_limit = 0; | ||
285 | |||
286 | smp_call_function_single(b->cpu, threshold_restart_bank, &tr, 1); | ||
287 | |||
288 | return size; | ||
289 | } | ||
290 | |||
291 | static ssize_t | ||
292 | store_threshold_limit(struct threshold_block *b, const char *buf, size_t size) | ||
293 | { | ||
294 | struct thresh_restart tr; | ||
295 | unsigned long new; | ||
296 | |||
297 | if (strict_strtoul(buf, 0, &new) < 0) | ||
298 | return -EINVAL; | ||
299 | |||
300 | if (new > THRESHOLD_MAX) | ||
301 | new = THRESHOLD_MAX; | ||
302 | if (new < 1) | ||
303 | new = 1; | ||
304 | |||
305 | tr.old_limit = b->threshold_limit; | ||
306 | b->threshold_limit = new; | ||
307 | tr.b = b; | ||
308 | tr.reset = 0; | ||
309 | |||
310 | smp_call_function_single(b->cpu, threshold_restart_bank, &tr, 1); | ||
311 | |||
312 | return size; | ||
313 | } | ||
314 | |||
315 | struct threshold_block_cross_cpu { | ||
316 | struct threshold_block *tb; | ||
317 | long retval; | ||
318 | }; | ||
319 | |||
320 | static void local_error_count_handler(void *_tbcc) | ||
321 | { | ||
322 | struct threshold_block_cross_cpu *tbcc = _tbcc; | ||
323 | struct threshold_block *b = tbcc->tb; | ||
324 | u32 low, high; | ||
325 | |||
326 | rdmsr(b->address, low, high); | ||
327 | tbcc->retval = (high & 0xFFF) - (THRESHOLD_MAX - b->threshold_limit); | ||
328 | } | ||
329 | |||
330 | static ssize_t show_error_count(struct threshold_block *b, char *buf) | ||
331 | { | ||
332 | struct threshold_block_cross_cpu tbcc = { .tb = b, }; | ||
333 | |||
334 | smp_call_function_single(b->cpu, local_error_count_handler, &tbcc, 1); | ||
335 | return sprintf(buf, "%lx\n", tbcc.retval); | ||
336 | } | ||
337 | |||
338 | static ssize_t store_error_count(struct threshold_block *b, | ||
339 | const char *buf, size_t count) | ||
340 | { | ||
341 | struct thresh_restart tr = { .b = b, .reset = 1, .old_limit = 0 }; | ||
342 | |||
343 | smp_call_function_single(b->cpu, threshold_restart_bank, &tr, 1); | ||
344 | return 1; | ||
345 | } | ||
346 | |||
347 | #define RW_ATTR(val) \ | ||
348 | static struct threshold_attr val = { \ | ||
349 | .attr = {.name = __stringify(val), .mode = 0644 }, \ | ||
350 | .show = show_## val, \ | ||
351 | .store = store_## val, \ | ||
352 | }; | ||
353 | |||
354 | RW_ATTR(interrupt_enable); | ||
355 | RW_ATTR(threshold_limit); | ||
356 | RW_ATTR(error_count); | ||
357 | |||
358 | static struct attribute *default_attrs[] = { | ||
359 | &interrupt_enable.attr, | ||
360 | &threshold_limit.attr, | ||
361 | &error_count.attr, | ||
362 | NULL | ||
363 | }; | ||
364 | |||
365 | #define to_block(k) container_of(k, struct threshold_block, kobj) | ||
366 | #define to_attr(a) container_of(a, struct threshold_attr, attr) | ||
367 | |||
368 | static ssize_t show(struct kobject *kobj, struct attribute *attr, char *buf) | ||
369 | { | ||
370 | struct threshold_block *b = to_block(kobj); | ||
371 | struct threshold_attr *a = to_attr(attr); | ||
372 | ssize_t ret; | ||
373 | |||
374 | ret = a->show ? a->show(b, buf) : -EIO; | ||
375 | |||
376 | return ret; | ||
377 | } | ||
378 | |||
379 | static ssize_t store(struct kobject *kobj, struct attribute *attr, | ||
380 | const char *buf, size_t count) | ||
381 | { | ||
382 | struct threshold_block *b = to_block(kobj); | ||
383 | struct threshold_attr *a = to_attr(attr); | ||
384 | ssize_t ret; | ||
385 | |||
386 | ret = a->store ? a->store(b, buf, count) : -EIO; | ||
387 | |||
388 | return ret; | ||
389 | } | ||
390 | |||
391 | static struct sysfs_ops threshold_ops = { | ||
392 | .show = show, | ||
393 | .store = store, | ||
394 | }; | ||
395 | |||
396 | static struct kobj_type threshold_ktype = { | ||
397 | .sysfs_ops = &threshold_ops, | ||
398 | .default_attrs = default_attrs, | ||
399 | }; | ||
400 | |||
401 | static __cpuinit int allocate_threshold_blocks(unsigned int cpu, | ||
402 | unsigned int bank, | ||
403 | unsigned int block, | ||
404 | u32 address) | ||
405 | { | ||
406 | struct threshold_block *b = NULL; | ||
407 | u32 low, high; | ||
408 | int err; | ||
409 | |||
410 | if ((bank >= NR_BANKS) || (block >= NR_BLOCKS)) | ||
411 | return 0; | ||
412 | |||
413 | if (rdmsr_safe_on_cpu(cpu, address, &low, &high)) | ||
414 | return 0; | ||
415 | |||
416 | if (!(high & MASK_VALID_HI)) { | ||
417 | if (block) | ||
418 | goto recurse; | ||
419 | else | ||
420 | return 0; | ||
421 | } | ||
422 | |||
423 | if (!(high & MASK_CNTP_HI) || | ||
424 | (high & MASK_LOCKED_HI)) | ||
425 | goto recurse; | ||
426 | |||
427 | b = kzalloc(sizeof(struct threshold_block), GFP_KERNEL); | ||
428 | if (!b) | ||
429 | return -ENOMEM; | ||
430 | |||
431 | b->block = block; | ||
432 | b->bank = bank; | ||
433 | b->cpu = cpu; | ||
434 | b->address = address; | ||
435 | b->interrupt_enable = 0; | ||
436 | b->threshold_limit = THRESHOLD_MAX; | ||
437 | |||
438 | INIT_LIST_HEAD(&b->miscj); | ||
439 | |||
440 | if (per_cpu(threshold_banks, cpu)[bank]->blocks) { | ||
441 | list_add(&b->miscj, | ||
442 | &per_cpu(threshold_banks, cpu)[bank]->blocks->miscj); | ||
443 | } else { | ||
444 | per_cpu(threshold_banks, cpu)[bank]->blocks = b; | ||
445 | } | ||
446 | |||
447 | err = kobject_init_and_add(&b->kobj, &threshold_ktype, | ||
448 | per_cpu(threshold_banks, cpu)[bank]->kobj, | ||
449 | "misc%i", block); | ||
450 | if (err) | ||
451 | goto out_free; | ||
452 | recurse: | ||
453 | if (!block) { | ||
454 | address = (low & MASK_BLKPTR_LO) >> 21; | ||
455 | if (!address) | ||
456 | return 0; | ||
457 | address += MCG_XBLK_ADDR; | ||
458 | } else { | ||
459 | ++address; | ||
460 | } | ||
461 | |||
462 | err = allocate_threshold_blocks(cpu, bank, ++block, address); | ||
463 | if (err) | ||
464 | goto out_free; | ||
465 | |||
466 | if (b) | ||
467 | kobject_uevent(&b->kobj, KOBJ_ADD); | ||
468 | |||
469 | return err; | ||
470 | |||
471 | out_free: | ||
472 | if (b) { | ||
473 | kobject_put(&b->kobj); | ||
474 | kfree(b); | ||
475 | } | ||
476 | return err; | ||
477 | } | ||
478 | |||
479 | static __cpuinit long | ||
480 | local_allocate_threshold_blocks(int cpu, unsigned int bank) | ||
481 | { | ||
482 | return allocate_threshold_blocks(cpu, bank, 0, | ||
483 | MSR_IA32_MC0_MISC + bank * 4); | ||
484 | } | ||
485 | |||
486 | /* symlinks sibling shared banks to first core. first core owns dir/files. */ | ||
487 | static __cpuinit int threshold_create_bank(unsigned int cpu, unsigned int bank) | ||
488 | { | ||
489 | int i, err = 0; | ||
490 | struct threshold_bank *b = NULL; | ||
491 | char name[32]; | ||
492 | |||
493 | sprintf(name, "threshold_bank%i", bank); | ||
494 | |||
495 | #ifdef CONFIG_SMP | ||
496 | if (cpu_data(cpu).cpu_core_id && shared_bank[bank]) { /* symlink */ | ||
497 | i = cpumask_first(cpu_core_mask(cpu)); | ||
498 | |||
499 | /* first core not up yet */ | ||
500 | if (cpu_data(i).cpu_core_id) | ||
501 | goto out; | ||
502 | |||
503 | /* already linked */ | ||
504 | if (per_cpu(threshold_banks, cpu)[bank]) | ||
505 | goto out; | ||
506 | |||
507 | b = per_cpu(threshold_banks, i)[bank]; | ||
508 | |||
509 | if (!b) | ||
510 | goto out; | ||
511 | |||
512 | err = sysfs_create_link(&per_cpu(mce_dev, cpu).kobj, | ||
513 | b->kobj, name); | ||
514 | if (err) | ||
515 | goto out; | ||
516 | |||
517 | cpumask_copy(b->cpus, cpu_core_mask(cpu)); | ||
518 | per_cpu(threshold_banks, cpu)[bank] = b; | ||
519 | |||
520 | goto out; | ||
521 | } | ||
522 | #endif | ||
523 | |||
524 | b = kzalloc(sizeof(struct threshold_bank), GFP_KERNEL); | ||
525 | if (!b) { | ||
526 | err = -ENOMEM; | ||
527 | goto out; | ||
528 | } | ||
529 | if (!alloc_cpumask_var(&b->cpus, GFP_KERNEL)) { | ||
530 | kfree(b); | ||
531 | err = -ENOMEM; | ||
532 | goto out; | ||
533 | } | ||
534 | |||
535 | b->kobj = kobject_create_and_add(name, &per_cpu(mce_dev, cpu).kobj); | ||
536 | if (!b->kobj) | ||
537 | goto out_free; | ||
538 | |||
539 | #ifndef CONFIG_SMP | ||
540 | cpumask_setall(b->cpus); | ||
541 | #else | ||
542 | cpumask_copy(b->cpus, cpu_core_mask(cpu)); | ||
543 | #endif | ||
544 | |||
545 | per_cpu(threshold_banks, cpu)[bank] = b; | ||
546 | |||
547 | err = local_allocate_threshold_blocks(cpu, bank); | ||
548 | if (err) | ||
549 | goto out_free; | ||
550 | |||
551 | for_each_cpu(i, b->cpus) { | ||
552 | if (i == cpu) | ||
553 | continue; | ||
554 | |||
555 | err = sysfs_create_link(&per_cpu(mce_dev, i).kobj, | ||
556 | b->kobj, name); | ||
557 | if (err) | ||
558 | goto out; | ||
559 | |||
560 | per_cpu(threshold_banks, i)[bank] = b; | ||
561 | } | ||
562 | |||
563 | goto out; | ||
564 | |||
565 | out_free: | ||
566 | per_cpu(threshold_banks, cpu)[bank] = NULL; | ||
567 | free_cpumask_var(b->cpus); | ||
568 | kfree(b); | ||
569 | out: | ||
570 | return err; | ||
571 | } | ||
572 | |||
573 | /* create dir/files for all valid threshold banks */ | ||
574 | static __cpuinit int threshold_create_device(unsigned int cpu) | ||
575 | { | ||
576 | unsigned int bank; | ||
577 | int err = 0; | ||
578 | |||
579 | for (bank = 0; bank < NR_BANKS; ++bank) { | ||
580 | if (!(per_cpu(bank_map, cpu) & (1 << bank))) | ||
581 | continue; | ||
582 | err = threshold_create_bank(cpu, bank); | ||
583 | if (err) | ||
584 | goto out; | ||
585 | } | ||
586 | out: | ||
587 | return err; | ||
588 | } | ||
589 | |||
590 | /* | ||
591 | * let's be hotplug friendly. | ||
592 | * in case of multiple core processors, the first core always takes ownership | ||
593 | * of shared sysfs dir/files, and rest of the cores will be symlinked to it. | ||
594 | */ | ||
595 | |||
596 | static void deallocate_threshold_block(unsigned int cpu, | ||
597 | unsigned int bank) | ||
598 | { | ||
599 | struct threshold_block *pos = NULL; | ||
600 | struct threshold_block *tmp = NULL; | ||
601 | struct threshold_bank *head = per_cpu(threshold_banks, cpu)[bank]; | ||
602 | |||
603 | if (!head) | ||
604 | return; | ||
605 | |||
606 | list_for_each_entry_safe(pos, tmp, &head->blocks->miscj, miscj) { | ||
607 | kobject_put(&pos->kobj); | ||
608 | list_del(&pos->miscj); | ||
609 | kfree(pos); | ||
610 | } | ||
611 | |||
612 | kfree(per_cpu(threshold_banks, cpu)[bank]->blocks); | ||
613 | per_cpu(threshold_banks, cpu)[bank]->blocks = NULL; | ||
614 | } | ||
615 | |||
616 | static void threshold_remove_bank(unsigned int cpu, int bank) | ||
617 | { | ||
618 | struct threshold_bank *b; | ||
619 | char name[32]; | ||
620 | int i = 0; | ||
621 | |||
622 | b = per_cpu(threshold_banks, cpu)[bank]; | ||
623 | if (!b) | ||
624 | return; | ||
625 | if (!b->blocks) | ||
626 | goto free_out; | ||
627 | |||
628 | sprintf(name, "threshold_bank%i", bank); | ||
629 | |||
630 | #ifdef CONFIG_SMP | ||
631 | /* sibling symlink */ | ||
632 | if (shared_bank[bank] && b->blocks->cpu != cpu) { | ||
633 | sysfs_remove_link(&per_cpu(mce_dev, cpu).kobj, name); | ||
634 | per_cpu(threshold_banks, cpu)[bank] = NULL; | ||
635 | |||
636 | return; | ||
637 | } | ||
638 | #endif | ||
639 | |||
640 | /* remove all sibling symlinks before unregistering */ | ||
641 | for_each_cpu(i, b->cpus) { | ||
642 | if (i == cpu) | ||
643 | continue; | ||
644 | |||
645 | sysfs_remove_link(&per_cpu(mce_dev, i).kobj, name); | ||
646 | per_cpu(threshold_banks, i)[bank] = NULL; | ||
647 | } | ||
648 | |||
649 | deallocate_threshold_block(cpu, bank); | ||
650 | |||
651 | free_out: | ||
652 | kobject_del(b->kobj); | ||
653 | kobject_put(b->kobj); | ||
654 | free_cpumask_var(b->cpus); | ||
655 | kfree(b); | ||
656 | per_cpu(threshold_banks, cpu)[bank] = NULL; | ||
657 | } | ||
658 | |||
659 | static void threshold_remove_device(unsigned int cpu) | ||
660 | { | ||
661 | unsigned int bank; | ||
662 | |||
663 | for (bank = 0; bank < NR_BANKS; ++bank) { | ||
664 | if (!(per_cpu(bank_map, cpu) & (1 << bank))) | ||
665 | continue; | ||
666 | threshold_remove_bank(cpu, bank); | ||
667 | } | ||
668 | } | ||
669 | |||
670 | /* get notified when a cpu comes on/off */ | ||
671 | static void __cpuinit | ||
672 | amd_64_threshold_cpu_callback(unsigned long action, unsigned int cpu) | ||
673 | { | ||
674 | switch (action) { | ||
675 | case CPU_ONLINE: | ||
676 | case CPU_ONLINE_FROZEN: | ||
677 | threshold_create_device(cpu); | ||
678 | break; | ||
679 | case CPU_DEAD: | ||
680 | case CPU_DEAD_FROZEN: | ||
681 | threshold_remove_device(cpu); | ||
682 | break; | ||
683 | default: | ||
684 | break; | ||
685 | } | ||
686 | } | ||
687 | |||
688 | static __init int threshold_init_device(void) | ||
689 | { | ||
690 | unsigned lcpu = 0; | ||
691 | |||
692 | /* to hit CPUs online before the notifier is up */ | ||
693 | for_each_online_cpu(lcpu) { | ||
694 | int err = threshold_create_device(lcpu); | ||
695 | |||
696 | if (err) | ||
697 | return err; | ||
698 | } | ||
699 | threshold_cpu_callback = amd_64_threshold_cpu_callback; | ||
700 | |||
701 | return 0; | ||
702 | } | ||
703 | device_initcall(threshold_init_device); | ||