aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kernel/cpu/mcheck
diff options
context:
space:
mode:
authorBorislav Petkov <borislav.petkov@amd.com>2012-05-02 11:16:59 -0400
committerBorislav Petkov <borislav.petkov@amd.com>2012-06-07 06:43:44 -0400
commit019f34fccfd5cf5ff1e722dafd9fe2bd54434e66 (patch)
tree65c1cccc25052d3fbdd884ff4d5efe4b81fbbd40 /arch/x86/kernel/cpu/mcheck
parent26ab256eaac7af26ecd9ba893b5159a3b38c8a1c (diff)
x86, MCE, AMD: Move shared bank to node descriptor
Well, instead of having a real bank 4 on the BSP of each node and symlinks on the remaining cores, we push it up into the amd_northbridge descriptor which now contains a pointer to the northbridge bank 4 because the bank is one per northbridge and, as such, belongs in the NB descriptor anyway. Each time we hotplug CPUs, we use the northbridge pointer to copy the shared bank into the per-CPU array of threshold_banks pointers, or destroy it when the last CPU on the node goes offline, or create it when the first comes online. Signed-off-by: Borislav Petkov <borislav.petkov@amd.com>
Diffstat (limited to 'arch/x86/kernel/cpu/mcheck')
-rw-r--r--arch/x86/kernel/cpu/mcheck/mce_amd.c107
1 files changed, 87 insertions, 20 deletions
diff --git a/arch/x86/kernel/cpu/mcheck/mce_amd.c b/arch/x86/kernel/cpu/mcheck/mce_amd.c
index 7fd02cac962..d67c9e56d60 100644
--- a/arch/x86/kernel/cpu/mcheck/mce_amd.c
+++ b/arch/x86/kernel/cpu/mcheck/mce_amd.c
@@ -25,6 +25,7 @@
25#include <linux/cpu.h> 25#include <linux/cpu.h>
26#include <linux/smp.h> 26#include <linux/smp.h>
27 27
28#include <asm/amd_nb.h>
28#include <asm/apic.h> 29#include <asm/apic.h>
29#include <asm/idle.h> 30#include <asm/idle.h>
30#include <asm/mce.h> 31#include <asm/mce.h>
@@ -45,23 +46,6 @@
45#define MASK_BLKPTR_LO 0xFF000000 46#define MASK_BLKPTR_LO 0xFF000000
46#define MCG_XBLK_ADDR 0xC0000400 47#define MCG_XBLK_ADDR 0xC0000400
47 48
48struct threshold_block {
49 unsigned int block;
50 unsigned int bank;
51 unsigned int cpu;
52 u32 address;
53 u16 interrupt_enable;
54 bool interrupt_capable;
55 u16 threshold_limit;
56 struct kobject kobj;
57 struct list_head miscj;
58};
59
60struct threshold_bank {
61 struct kobject *kobj;
62 struct threshold_block *blocks;
63};
64
65static DEFINE_PER_CPU(struct threshold_bank * [NR_BANKS], threshold_banks); 49static DEFINE_PER_CPU(struct threshold_bank * [NR_BANKS], threshold_banks);
66 50
67static unsigned char shared_bank[NR_BANKS] = { 51static unsigned char shared_bank[NR_BANKS] = {
@@ -546,15 +530,62 @@ out_free:
546 return err; 530 return err;
547} 531}
548 532
533static __cpuinit int __threshold_add_blocks(struct threshold_bank *b)
534{
535 struct list_head *head = &b->blocks->miscj;
536 struct threshold_block *pos = NULL;
537 struct threshold_block *tmp = NULL;
538 int err = 0;
539
540 err = kobject_add(&b->blocks->kobj, b->kobj, b->blocks->kobj.name);
541 if (err)
542 return err;
543
544 list_for_each_entry_safe(pos, tmp, head, miscj) {
545
546 err = kobject_add(&pos->kobj, b->kobj, pos->kobj.name);
547 if (err) {
548 list_for_each_entry_safe_reverse(pos, tmp, head, miscj)
549 kobject_del(&pos->kobj);
550
551 return err;
552 }
553 }
554 return err;
555}
556
549static __cpuinit int threshold_create_bank(unsigned int cpu, unsigned int bank) 557static __cpuinit int threshold_create_bank(unsigned int cpu, unsigned int bank)
550{ 558{
551 struct device *dev = per_cpu(mce_device, cpu); 559 struct device *dev = per_cpu(mce_device, cpu);
560 struct amd_northbridge *nb = NULL;
552 struct threshold_bank *b = NULL; 561 struct threshold_bank *b = NULL;
553 char name[32]; 562 char name[32];
554 int err = 0; 563 int err = 0;
555 564
556 sprintf(name, "threshold_bank%i", bank); 565 sprintf(name, "threshold_bank%i", bank);
557 566
567 if (shared_bank[bank]) {
568
569 nb = node_to_amd_nb(amd_get_nb_id(cpu));
570 WARN_ON(!nb);
571
572 /* threshold descriptor already initialized on this node? */
573 if (nb->bank4) {
574 /* yes, use it */
575 b = nb->bank4;
576 err = kobject_add(b->kobj, &dev->kobj, name);
577 if (err)
578 goto out;
579
580 per_cpu(threshold_banks, cpu)[bank] = b;
581 atomic_inc(&b->cpus);
582
583 err = __threshold_add_blocks(b);
584
585 goto out;
586 }
587 }
588
558 b = kzalloc(sizeof(struct threshold_bank), GFP_KERNEL); 589 b = kzalloc(sizeof(struct threshold_bank), GFP_KERNEL);
559 if (!b) { 590 if (!b) {
560 err = -ENOMEM; 591 err = -ENOMEM;
@@ -569,15 +600,23 @@ static __cpuinit int threshold_create_bank(unsigned int cpu, unsigned int bank)
569 600
570 per_cpu(threshold_banks, cpu)[bank] = b; 601 per_cpu(threshold_banks, cpu)[bank] = b;
571 602
603 if (shared_bank[bank]) {
604 atomic_set(&b->cpus, 1);
605
606 /* nb is already initialized, see above */
607 WARN_ON(nb->bank4);
608 nb->bank4 = b;
609 }
610
572 err = allocate_threshold_blocks(cpu, bank, 0, 611 err = allocate_threshold_blocks(cpu, bank, 0,
573 MSR_IA32_MC0_MISC + bank * 4); 612 MSR_IA32_MC0_MISC + bank * 4);
574 if (!err) 613 if (!err)
575 goto out; 614 goto out;
576 615
577out_free: 616 out_free:
578 per_cpu(threshold_banks, cpu)[bank] = NULL;
579 kfree(b); 617 kfree(b);
580out: 618
619 out:
581 return err; 620 return err;
582} 621}
583 622
@@ -618,16 +657,44 @@ static void deallocate_threshold_block(unsigned int cpu,
618 per_cpu(threshold_banks, cpu)[bank]->blocks = NULL; 657 per_cpu(threshold_banks, cpu)[bank]->blocks = NULL;
619} 658}
620 659
660static void __threshold_remove_blocks(struct threshold_bank *b)
661{
662 struct threshold_block *pos = NULL;
663 struct threshold_block *tmp = NULL;
664
665 kobject_del(b->kobj);
666
667 list_for_each_entry_safe(pos, tmp, &b->blocks->miscj, miscj)
668 kobject_del(&pos->kobj);
669}
670
621static void threshold_remove_bank(unsigned int cpu, int bank) 671static void threshold_remove_bank(unsigned int cpu, int bank)
622{ 672{
673 struct amd_northbridge *nb;
623 struct threshold_bank *b; 674 struct threshold_bank *b;
624 675
625 b = per_cpu(threshold_banks, cpu)[bank]; 676 b = per_cpu(threshold_banks, cpu)[bank];
626 if (!b) 677 if (!b)
627 return; 678 return;
679
628 if (!b->blocks) 680 if (!b->blocks)
629 goto free_out; 681 goto free_out;
630 682
683 if (shared_bank[bank]) {
684 if (!atomic_dec_and_test(&b->cpus)) {
685 __threshold_remove_blocks(b);
686 per_cpu(threshold_banks, cpu)[bank] = NULL;
687 return;
688 } else {
689 /*
690 * the last CPU on this node using the shared bank is
691 * going away, remove that bank now.
692 */
693 nb = node_to_amd_nb(amd_get_nb_id(cpu));
694 nb->bank4 = NULL;
695 }
696 }
697
631 deallocate_threshold_block(cpu, bank); 698 deallocate_threshold_block(cpu, bank);
632 699
633free_out: 700free_out: