aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kernel
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2009-04-08 06:31:18 -0400
committerH. Peter Anvin <hpa@zytor.com>2009-05-28 12:24:10 -0400
commit1cb2a8e1767ab60370ecce90654c0f281c602d95 (patch)
tree2a3e20eadf8ebab4cfad0a3a038b06514ed3480f /arch/x86/kernel
parentcb6f3c155b0afabc48667efb9e7b1ce92ccfcab4 (diff)
x86, mce: clean up mce_amd_64.c
Make the coding style match that of the rest of the x86 arch code. [ Impact: cleanup ] Signed-off-by: Ingo Molnar <mingo@elte.hu> Signed-off-by: Hidetoshi Seto <seto.hidetoshi@jp.fujitsu.com> Signed-off-by: H. Peter Anvin <hpa@zytor.com>
Diffstat (limited to 'arch/x86/kernel')
-rw-r--r--arch/x86/kernel/cpu/mcheck/mce_amd_64.c188
1 files changed, 103 insertions, 85 deletions
diff --git a/arch/x86/kernel/cpu/mcheck/mce_amd_64.c b/arch/x86/kernel/cpu/mcheck/mce_amd_64.c
index 56dde9c4bc96..4d90ec3eb51d 100644
--- a/arch/x86/kernel/cpu/mcheck/mce_amd_64.c
+++ b/arch/x86/kernel/cpu/mcheck/mce_amd_64.c
@@ -13,22 +13,22 @@
13 * 13 *
14 * All MC4_MISCi registers are shared between multi-cores 14 * All MC4_MISCi registers are shared between multi-cores
15 */ 15 */
16
17#include <linux/cpu.h>
18#include <linux/errno.h>
19#include <linux/init.h>
20#include <linux/interrupt.h> 16#include <linux/interrupt.h>
21#include <linux/kobject.h>
22#include <linux/notifier.h> 17#include <linux/notifier.h>
23#include <linux/sched.h> 18#include <linux/kobject.h>
24#include <linux/smp.h>
25#include <linux/sysdev.h> 19#include <linux/sysdev.h>
20#include <linux/errno.h>
21#include <linux/sched.h>
26#include <linux/sysfs.h> 22#include <linux/sysfs.h>
23#include <linux/init.h>
24#include <linux/cpu.h>
25#include <linux/smp.h>
26
27#include <asm/percpu.h>
27#include <asm/apic.h> 28#include <asm/apic.h>
29#include <asm/idle.h>
28#include <asm/mce.h> 30#include <asm/mce.h>
29#include <asm/msr.h> 31#include <asm/msr.h>
30#include <asm/percpu.h>
31#include <asm/idle.h>
32 32
33#define PFX "mce_threshold: " 33#define PFX "mce_threshold: "
34#define VERSION "version 1.1.1" 34#define VERSION "version 1.1.1"
@@ -48,26 +48,26 @@
48#define MCG_XBLK_ADDR 0xC0000400 48#define MCG_XBLK_ADDR 0xC0000400
49 49
50struct threshold_block { 50struct threshold_block {
51 unsigned int block; 51 unsigned int block;
52 unsigned int bank; 52 unsigned int bank;
53 unsigned int cpu; 53 unsigned int cpu;
54 u32 address; 54 u32 address;
55 u16 interrupt_enable; 55 u16 interrupt_enable;
56 u16 threshold_limit; 56 u16 threshold_limit;
57 struct kobject kobj; 57 struct kobject kobj;
58 struct list_head miscj; 58 struct list_head miscj;
59}; 59};
60 60
61/* defaults used early on boot */ 61/* defaults used early on boot */
62static struct threshold_block threshold_defaults = { 62static struct threshold_block threshold_defaults = {
63 .interrupt_enable = 0, 63 .interrupt_enable = 0,
64 .threshold_limit = THRESHOLD_MAX, 64 .threshold_limit = THRESHOLD_MAX,
65}; 65};
66 66
67struct threshold_bank { 67struct threshold_bank {
68 struct kobject *kobj; 68 struct kobject *kobj;
69 struct threshold_block *blocks; 69 struct threshold_block *blocks;
70 cpumask_var_t cpus; 70 cpumask_var_t cpus;
71}; 71};
72static DEFINE_PER_CPU(struct threshold_bank *, threshold_banks[NR_BANKS]); 72static DEFINE_PER_CPU(struct threshold_bank *, threshold_banks[NR_BANKS]);
73 73
@@ -86,9 +86,9 @@ static void amd_threshold_interrupt(void);
86 */ 86 */
87 87
88struct thresh_restart { 88struct thresh_restart {
89 struct threshold_block *b; 89 struct threshold_block *b;
90 int reset; 90 int reset;
91 u16 old_limit; 91 u16 old_limit;
92}; 92};
93 93
94/* must be called with correct cpu affinity */ 94/* must be called with correct cpu affinity */
@@ -110,6 +110,7 @@ static void threshold_restart_bank(void *_tr)
110 } else if (tr->old_limit) { /* change limit w/o reset */ 110 } else if (tr->old_limit) { /* change limit w/o reset */
111 int new_count = (mci_misc_hi & THRESHOLD_MAX) + 111 int new_count = (mci_misc_hi & THRESHOLD_MAX) +
112 (tr->old_limit - tr->b->threshold_limit); 112 (tr->old_limit - tr->b->threshold_limit);
113
113 mci_misc_hi = (mci_misc_hi & ~MASK_ERR_COUNT_HI) | 114 mci_misc_hi = (mci_misc_hi & ~MASK_ERR_COUNT_HI) |
114 (new_count & THRESHOLD_MAX); 115 (new_count & THRESHOLD_MAX);
115 } 116 }
@@ -125,11 +126,11 @@ static void threshold_restart_bank(void *_tr)
125/* cpu init entry point, called from mce.c with preempt off */ 126/* cpu init entry point, called from mce.c with preempt off */
126void mce_amd_feature_init(struct cpuinfo_x86 *c) 127void mce_amd_feature_init(struct cpuinfo_x86 *c)
127{ 128{
128 unsigned int bank, block;
129 unsigned int cpu = smp_processor_id(); 129 unsigned int cpu = smp_processor_id();
130 u8 lvt_off;
131 u32 low = 0, high = 0, address = 0; 130 u32 low = 0, high = 0, address = 0;
131 unsigned int bank, block;
132 struct thresh_restart tr; 132 struct thresh_restart tr;
133 u8 lvt_off;
133 134
134 for (bank = 0; bank < NR_BANKS; ++bank) { 135 for (bank = 0; bank < NR_BANKS; ++bank) {
135 for (block = 0; block < NR_BLOCKS; ++block) { 136 for (block = 0; block < NR_BLOCKS; ++block) {
@@ -140,8 +141,7 @@ void mce_amd_feature_init(struct cpuinfo_x86 *c)
140 if (!address) 141 if (!address)
141 break; 142 break;
142 address += MCG_XBLK_ADDR; 143 address += MCG_XBLK_ADDR;
143 } 144 } else
144 else
145 ++address; 145 ++address;
146 146
147 if (rdmsr_safe(address, &low, &high)) 147 if (rdmsr_safe(address, &low, &high))
@@ -193,9 +193,9 @@ void mce_amd_feature_init(struct cpuinfo_x86 *c)
193 */ 193 */
194static void amd_threshold_interrupt(void) 194static void amd_threshold_interrupt(void)
195{ 195{
196 u32 low = 0, high = 0, address = 0;
196 unsigned int bank, block; 197 unsigned int bank, block;
197 struct mce m; 198 struct mce m;
198 u32 low = 0, high = 0, address = 0;
199 199
200 mce_setup(&m); 200 mce_setup(&m);
201 201
@@ -204,16 +204,16 @@ static void amd_threshold_interrupt(void)
204 if (!(per_cpu(bank_map, m.cpu) & (1 << bank))) 204 if (!(per_cpu(bank_map, m.cpu) & (1 << bank)))
205 continue; 205 continue;
206 for (block = 0; block < NR_BLOCKS; ++block) { 206 for (block = 0; block < NR_BLOCKS; ++block) {
207 if (block == 0) 207 if (block == 0) {
208 address = MSR_IA32_MC0_MISC + bank * 4; 208 address = MSR_IA32_MC0_MISC + bank * 4;
209 else if (block == 1) { 209 } else if (block == 1) {
210 address = (low & MASK_BLKPTR_LO) >> 21; 210 address = (low & MASK_BLKPTR_LO) >> 21;
211 if (!address) 211 if (!address)
212 break; 212 break;
213 address += MCG_XBLK_ADDR; 213 address += MCG_XBLK_ADDR;
214 } 214 } else {
215 else
216 ++address; 215 ++address;
216 }
217 217
218 if (rdmsr_safe(address, &low, &high)) 218 if (rdmsr_safe(address, &low, &high))
219 break; 219 break;
@@ -229,8 +229,10 @@ static void amd_threshold_interrupt(void)
229 (high & MASK_LOCKED_HI)) 229 (high & MASK_LOCKED_HI))
230 continue; 230 continue;
231 231
232 /* Log the machine check that caused the threshold 232 /*
233 event. */ 233 * Log the machine check that caused the threshold
234 * event.
235 */
234 machine_check_poll(MCP_TIMESTAMP, 236 machine_check_poll(MCP_TIMESTAMP,
235 &__get_cpu_var(mce_poll_banks)); 237 &__get_cpu_var(mce_poll_banks));
236 238
@@ -254,48 +256,56 @@ static void amd_threshold_interrupt(void)
254 256
255struct threshold_attr { 257struct threshold_attr {
256 struct attribute attr; 258 struct attribute attr;
257 ssize_t(*show) (struct threshold_block *, char *); 259 ssize_t (*show) (struct threshold_block *, char *);
258 ssize_t(*store) (struct threshold_block *, const char *, size_t count); 260 ssize_t (*store) (struct threshold_block *, const char *, size_t count);
259}; 261};
260 262
261#define SHOW_FIELDS(name) \ 263#define SHOW_FIELDS(name) \
262static ssize_t show_ ## name(struct threshold_block * b, char *buf) \ 264static ssize_t show_ ## name(struct threshold_block *b, char *buf) \
263{ \ 265{ \
264 return sprintf(buf, "%lx\n", (unsigned long) b->name); \ 266 return sprintf(buf, "%lx\n", (unsigned long) b->name); \
265} 267}
266SHOW_FIELDS(interrupt_enable) 268SHOW_FIELDS(interrupt_enable)
267SHOW_FIELDS(threshold_limit) 269SHOW_FIELDS(threshold_limit)
268 270
269static ssize_t store_interrupt_enable(struct threshold_block *b, 271static ssize_t
270 const char *buf, size_t count) 272store_interrupt_enable(struct threshold_block *b, const char *buf, size_t count)
271{ 273{
272 char *end;
273 struct thresh_restart tr; 274 struct thresh_restart tr;
274 unsigned long new = simple_strtoul(buf, &end, 0); 275 unsigned long new;
276 char *end;
277
278 new = simple_strtoul(buf, &end, 0);
275 if (end == buf) 279 if (end == buf)
276 return -EINVAL; 280 return -EINVAL;
281
277 b->interrupt_enable = !!new; 282 b->interrupt_enable = !!new;
278 283
279 tr.b = b; 284 tr.b = b;
280 tr.reset = 0; 285 tr.reset = 0;
281 tr.old_limit = 0; 286 tr.old_limit = 0;
287
282 smp_call_function_single(b->cpu, threshold_restart_bank, &tr, 1); 288 smp_call_function_single(b->cpu, threshold_restart_bank, &tr, 1);
283 289
284 return end - buf; 290 return end - buf;
285} 291}
286 292
287static ssize_t store_threshold_limit(struct threshold_block *b, 293static ssize_t
288 const char *buf, size_t count) 294store_threshold_limit(struct threshold_block *b, const char *buf, size_t count)
289{ 295{
290 char *end;
291 struct thresh_restart tr; 296 struct thresh_restart tr;
292 unsigned long new = simple_strtoul(buf, &end, 0); 297 unsigned long new;
298 char *end;
299
300 new = simple_strtoul(buf, &end, 0);
293 if (end == buf) 301 if (end == buf)
294 return -EINVAL; 302 return -EINVAL;
303
295 if (new > THRESHOLD_MAX) 304 if (new > THRESHOLD_MAX)
296 new = THRESHOLD_MAX; 305 new = THRESHOLD_MAX;
297 if (new < 1) 306 if (new < 1)
298 new = 1; 307 new = 1;
308
299 tr.old_limit = b->threshold_limit; 309 tr.old_limit = b->threshold_limit;
300 b->threshold_limit = new; 310 b->threshold_limit = new;
301 tr.b = b; 311 tr.b = b;
@@ -307,8 +317,8 @@ static ssize_t store_threshold_limit(struct threshold_block *b,
307} 317}
308 318
309struct threshold_block_cross_cpu { 319struct threshold_block_cross_cpu {
310 struct threshold_block *tb; 320 struct threshold_block *tb;
311 long retval; 321 long retval;
312}; 322};
313 323
314static void local_error_count_handler(void *_tbcc) 324static void local_error_count_handler(void *_tbcc)
@@ -338,15 +348,16 @@ static ssize_t store_error_count(struct threshold_block *b,
338 return 1; 348 return 1;
339} 349}
340 350
341#define THRESHOLD_ATTR(_name,_mode,_show,_store) { \ 351#define THRESHOLD_ATTR(_name, _mode, _show, _store) \
342 .attr = {.name = __stringify(_name), .mode = _mode }, \ 352{ \
343 .show = _show, \ 353 .attr = {.name = __stringify(_name), .mode = _mode }, \
344 .store = _store, \ 354 .show = _show, \
355 .store = _store, \
345}; 356};
346 357
347#define RW_ATTR(name) \ 358#define RW_ATTR(name) \
348static struct threshold_attr name = \ 359static struct threshold_attr name = \
349 THRESHOLD_ATTR(name, 0644, show_## name, store_## name) 360 THRESHOLD_ATTR(name, 0644, show_## name, store_## name)
350 361
351RW_ATTR(interrupt_enable); 362RW_ATTR(interrupt_enable);
352RW_ATTR(threshold_limit); 363RW_ATTR(threshold_limit);
@@ -359,15 +370,17 @@ static struct attribute *default_attrs[] = {
359 NULL 370 NULL
360}; 371};
361 372
362#define to_block(k) container_of(k, struct threshold_block, kobj) 373#define to_block(k) container_of(k, struct threshold_block, kobj)
363#define to_attr(a) container_of(a, struct threshold_attr, attr) 374#define to_attr(a) container_of(a, struct threshold_attr, attr)
364 375
365static ssize_t show(struct kobject *kobj, struct attribute *attr, char *buf) 376static ssize_t show(struct kobject *kobj, struct attribute *attr, char *buf)
366{ 377{
367 struct threshold_block *b = to_block(kobj); 378 struct threshold_block *b = to_block(kobj);
368 struct threshold_attr *a = to_attr(attr); 379 struct threshold_attr *a = to_attr(attr);
369 ssize_t ret; 380 ssize_t ret;
381
370 ret = a->show ? a->show(b, buf) : -EIO; 382 ret = a->show ? a->show(b, buf) : -EIO;
383
371 return ret; 384 return ret;
372} 385}
373 386
@@ -377,18 +390,20 @@ static ssize_t store(struct kobject *kobj, struct attribute *attr,
377 struct threshold_block *b = to_block(kobj); 390 struct threshold_block *b = to_block(kobj);
378 struct threshold_attr *a = to_attr(attr); 391 struct threshold_attr *a = to_attr(attr);
379 ssize_t ret; 392 ssize_t ret;
393
380 ret = a->store ? a->store(b, buf, count) : -EIO; 394 ret = a->store ? a->store(b, buf, count) : -EIO;
395
381 return ret; 396 return ret;
382} 397}
383 398
384static struct sysfs_ops threshold_ops = { 399static struct sysfs_ops threshold_ops = {
385 .show = show, 400 .show = show,
386 .store = store, 401 .store = store,
387}; 402};
388 403
389static struct kobj_type threshold_ktype = { 404static struct kobj_type threshold_ktype = {
390 .sysfs_ops = &threshold_ops, 405 .sysfs_ops = &threshold_ops,
391 .default_attrs = default_attrs, 406 .default_attrs = default_attrs,
392}; 407};
393 408
394static __cpuinit int allocate_threshold_blocks(unsigned int cpu, 409static __cpuinit int allocate_threshold_blocks(unsigned int cpu,
@@ -396,9 +411,9 @@ static __cpuinit int allocate_threshold_blocks(unsigned int cpu,
396 unsigned int block, 411 unsigned int block,
397 u32 address) 412 u32 address)
398{ 413{
399 int err;
400 u32 low, high;
401 struct threshold_block *b = NULL; 414 struct threshold_block *b = NULL;
415 u32 low, high;
416 int err;
402 417
403 if ((bank >= NR_BANKS) || (block >= NR_BLOCKS)) 418 if ((bank >= NR_BANKS) || (block >= NR_BLOCKS))
404 return 0; 419 return 0;
@@ -421,20 +436,21 @@ static __cpuinit int allocate_threshold_blocks(unsigned int cpu,
421 if (!b) 436 if (!b)
422 return -ENOMEM; 437 return -ENOMEM;
423 438
424 b->block = block; 439 b->block = block;
425 b->bank = bank; 440 b->bank = bank;
426 b->cpu = cpu; 441 b->cpu = cpu;
427 b->address = address; 442 b->address = address;
428 b->interrupt_enable = 0; 443 b->interrupt_enable = 0;
429 b->threshold_limit = THRESHOLD_MAX; 444 b->threshold_limit = THRESHOLD_MAX;
430 445
431 INIT_LIST_HEAD(&b->miscj); 446 INIT_LIST_HEAD(&b->miscj);
432 447
433 if (per_cpu(threshold_banks, cpu)[bank]->blocks) 448 if (per_cpu(threshold_banks, cpu)[bank]->blocks) {
434 list_add(&b->miscj, 449 list_add(&b->miscj,
435 &per_cpu(threshold_banks, cpu)[bank]->blocks->miscj); 450 &per_cpu(threshold_banks, cpu)[bank]->blocks->miscj);
436 else 451 } else {
437 per_cpu(threshold_banks, cpu)[bank]->blocks = b; 452 per_cpu(threshold_banks, cpu)[bank]->blocks = b;
453 }
438 454
439 err = kobject_init_and_add(&b->kobj, &threshold_ktype, 455 err = kobject_init_and_add(&b->kobj, &threshold_ktype,
440 per_cpu(threshold_banks, cpu)[bank]->kobj, 456 per_cpu(threshold_banks, cpu)[bank]->kobj,
@@ -447,8 +463,9 @@ recurse:
447 if (!address) 463 if (!address)
448 return 0; 464 return 0;
449 address += MCG_XBLK_ADDR; 465 address += MCG_XBLK_ADDR;
450 } else 466 } else {
451 ++address; 467 ++address;
468 }
452 469
453 err = allocate_threshold_blocks(cpu, bank, ++block, address); 470 err = allocate_threshold_blocks(cpu, bank, ++block, address);
454 if (err) 471 if (err)
@@ -507,6 +524,7 @@ static __cpuinit int threshold_create_bank(unsigned int cpu, unsigned int bank)
507 524
508 cpumask_copy(b->cpus, cpu_core_mask(cpu)); 525 cpumask_copy(b->cpus, cpu_core_mask(cpu));
509 per_cpu(threshold_banks, cpu)[bank] = b; 526 per_cpu(threshold_banks, cpu)[bank] = b;
527
510 goto out; 528 goto out;
511 } 529 }
512#endif 530#endif
@@ -605,15 +623,13 @@ static void deallocate_threshold_block(unsigned int cpu,
605 623
606static void threshold_remove_bank(unsigned int cpu, int bank) 624static void threshold_remove_bank(unsigned int cpu, int bank)
607{ 625{
608 int i = 0;
609 struct threshold_bank *b; 626 struct threshold_bank *b;
610 char name[32]; 627 char name[32];
628 int i = 0;
611 629
612 b = per_cpu(threshold_banks, cpu)[bank]; 630 b = per_cpu(threshold_banks, cpu)[bank];
613
614 if (!b) 631 if (!b)
615 return; 632 return;
616
617 if (!b->blocks) 633 if (!b->blocks)
618 goto free_out; 634 goto free_out;
619 635
@@ -624,6 +640,7 @@ static void threshold_remove_bank(unsigned int cpu, int bank)
624 if (shared_bank[bank] && b->blocks->cpu != cpu) { 640 if (shared_bank[bank] && b->blocks->cpu != cpu) {
625 sysfs_remove_link(&per_cpu(device_mce, cpu).kobj, name); 641 sysfs_remove_link(&per_cpu(device_mce, cpu).kobj, name);
626 per_cpu(threshold_banks, cpu)[bank] = NULL; 642 per_cpu(threshold_banks, cpu)[bank] = NULL;
643
627 return; 644 return;
628 } 645 }
629#endif 646#endif
@@ -659,8 +676,8 @@ static void threshold_remove_device(unsigned int cpu)
659} 676}
660 677
661/* get notified when a cpu comes on/off */ 678/* get notified when a cpu comes on/off */
662static void __cpuinit amd_64_threshold_cpu_callback(unsigned long action, 679static void __cpuinit
663 unsigned int cpu) 680amd_64_threshold_cpu_callback(unsigned long action, unsigned int cpu)
664{ 681{
665 if (cpu >= NR_CPUS) 682 if (cpu >= NR_CPUS)
666 return; 683 return;
@@ -686,11 +703,12 @@ static __init int threshold_init_device(void)
686 /* to hit CPUs online before the notifier is up */ 703 /* to hit CPUs online before the notifier is up */
687 for_each_online_cpu(lcpu) { 704 for_each_online_cpu(lcpu) {
688 int err = threshold_create_device(lcpu); 705 int err = threshold_create_device(lcpu);
706
689 if (err) 707 if (err)
690 return err; 708 return err;
691 } 709 }
692 threshold_cpu_callback = amd_64_threshold_cpu_callback; 710 threshold_cpu_callback = amd_64_threshold_cpu_callback;
711
693 return 0; 712 return 0;
694} 713}
695
696device_initcall(threshold_init_device); 714device_initcall(threshold_init_device);