diff options
Diffstat (limited to 'arch/x86_64/kernel/mce_amd.c')
-rw-r--r-- | arch/x86_64/kernel/mce_amd.c | 506 |
1 files changed, 323 insertions, 183 deletions
diff --git a/arch/x86_64/kernel/mce_amd.c b/arch/x86_64/kernel/mce_amd.c index d13b241ad094..335200aa2737 100644 --- a/arch/x86_64/kernel/mce_amd.c +++ b/arch/x86_64/kernel/mce_amd.c | |||
@@ -1,5 +1,5 @@ | |||
1 | /* | 1 | /* |
2 | * (c) 2005 Advanced Micro Devices, Inc. | 2 | * (c) 2005, 2006 Advanced Micro Devices, Inc. |
3 | * Your use of this code is subject to the terms and conditions of the | 3 | * Your use of this code is subject to the terms and conditions of the |
4 | * GNU general public license version 2. See "COPYING" or | 4 | * GNU general public license version 2. See "COPYING" or |
5 | * http://www.gnu.org/licenses/gpl.html | 5 | * http://www.gnu.org/licenses/gpl.html |
@@ -8,9 +8,10 @@ | |||
8 | * | 8 | * |
9 | * Support : jacob.shin@amd.com | 9 | * Support : jacob.shin@amd.com |
10 | * | 10 | * |
11 | * MC4_MISC0 DRAM ECC Error Threshold available under AMD K8 Rev F. | 11 | * April 2006 |
12 | * MC4_MISC0 exists per physical processor. | 12 | * - added support for AMD Family 0x10 processors |
13 | * | 13 | * |
14 | * All MC4_MISCi registers are shared between multi-cores | ||
14 | */ | 15 | */ |
15 | 16 | ||
16 | #include <linux/cpu.h> | 17 | #include <linux/cpu.h> |
@@ -29,32 +30,45 @@ | |||
29 | #include <asm/percpu.h> | 30 | #include <asm/percpu.h> |
30 | #include <asm/idle.h> | 31 | #include <asm/idle.h> |
31 | 32 | ||
32 | #define PFX "mce_threshold: " | 33 | #define PFX "mce_threshold: " |
33 | #define VERSION "version 1.00.9" | 34 | #define VERSION "version 1.1.1" |
34 | #define NR_BANKS 5 | 35 | #define NR_BANKS 6 |
35 | #define THRESHOLD_MAX 0xFFF | 36 | #define NR_BLOCKS 9 |
36 | #define INT_TYPE_APIC 0x00020000 | 37 | #define THRESHOLD_MAX 0xFFF |
37 | #define MASK_VALID_HI 0x80000000 | 38 | #define INT_TYPE_APIC 0x00020000 |
38 | #define MASK_LVTOFF_HI 0x00F00000 | 39 | #define MASK_VALID_HI 0x80000000 |
39 | #define MASK_COUNT_EN_HI 0x00080000 | 40 | #define MASK_LVTOFF_HI 0x00F00000 |
40 | #define MASK_INT_TYPE_HI 0x00060000 | 41 | #define MASK_COUNT_EN_HI 0x00080000 |
41 | #define MASK_OVERFLOW_HI 0x00010000 | 42 | #define MASK_INT_TYPE_HI 0x00060000 |
43 | #define MASK_OVERFLOW_HI 0x00010000 | ||
42 | #define MASK_ERR_COUNT_HI 0x00000FFF | 44 | #define MASK_ERR_COUNT_HI 0x00000FFF |
43 | #define MASK_OVERFLOW 0x0001000000000000L | 45 | #define MASK_BLKPTR_LO 0xFF000000 |
46 | #define MCG_XBLK_ADDR 0xC0000400 | ||
44 | 47 | ||
45 | struct threshold_bank { | 48 | struct threshold_block { |
49 | unsigned int block; | ||
50 | unsigned int bank; | ||
46 | unsigned int cpu; | 51 | unsigned int cpu; |
47 | u8 bank; | 52 | u32 address; |
48 | u8 interrupt_enable; | 53 | u16 interrupt_enable; |
49 | u16 threshold_limit; | 54 | u16 threshold_limit; |
50 | struct kobject kobj; | 55 | struct kobject kobj; |
56 | struct list_head miscj; | ||
51 | }; | 57 | }; |
52 | 58 | ||
53 | static struct threshold_bank threshold_defaults = { | 59 | /* defaults used early on boot */ |
60 | static struct threshold_block threshold_defaults = { | ||
54 | .interrupt_enable = 0, | 61 | .interrupt_enable = 0, |
55 | .threshold_limit = THRESHOLD_MAX, | 62 | .threshold_limit = THRESHOLD_MAX, |
56 | }; | 63 | }; |
57 | 64 | ||
65 | struct threshold_bank { | ||
66 | struct kobject kobj; | ||
67 | struct threshold_block *blocks; | ||
68 | cpumask_t cpus; | ||
69 | }; | ||
70 | static DEFINE_PER_CPU(struct threshold_bank *, threshold_banks[NR_BANKS]); | ||
71 | |||
58 | #ifdef CONFIG_SMP | 72 | #ifdef CONFIG_SMP |
59 | static unsigned char shared_bank[NR_BANKS] = { | 73 | static unsigned char shared_bank[NR_BANKS] = { |
60 | 0, 0, 0, 0, 1 | 74 | 0, 0, 0, 0, 1 |
@@ -68,12 +82,12 @@ static DEFINE_PER_CPU(unsigned char, bank_map); /* see which banks are on */ | |||
68 | */ | 82 | */ |
69 | 83 | ||
70 | /* must be called with correct cpu affinity */ | 84 | /* must be called with correct cpu affinity */ |
71 | static void threshold_restart_bank(struct threshold_bank *b, | 85 | static void threshold_restart_bank(struct threshold_block *b, |
72 | int reset, u16 old_limit) | 86 | int reset, u16 old_limit) |
73 | { | 87 | { |
74 | u32 mci_misc_hi, mci_misc_lo; | 88 | u32 mci_misc_hi, mci_misc_lo; |
75 | 89 | ||
76 | rdmsr(MSR_IA32_MC0_MISC + b->bank * 4, mci_misc_lo, mci_misc_hi); | 90 | rdmsr(b->address, mci_misc_lo, mci_misc_hi); |
77 | 91 | ||
78 | if (b->threshold_limit < (mci_misc_hi & THRESHOLD_MAX)) | 92 | if (b->threshold_limit < (mci_misc_hi & THRESHOLD_MAX)) |
79 | reset = 1; /* limit cannot be lower than err count */ | 93 | reset = 1; /* limit cannot be lower than err count */ |
@@ -94,35 +108,57 @@ static void threshold_restart_bank(struct threshold_bank *b, | |||
94 | (mci_misc_hi &= ~MASK_INT_TYPE_HI); | 108 | (mci_misc_hi &= ~MASK_INT_TYPE_HI); |
95 | 109 | ||
96 | mci_misc_hi |= MASK_COUNT_EN_HI; | 110 | mci_misc_hi |= MASK_COUNT_EN_HI; |
97 | wrmsr(MSR_IA32_MC0_MISC + b->bank * 4, mci_misc_lo, mci_misc_hi); | 111 | wrmsr(b->address, mci_misc_lo, mci_misc_hi); |
98 | } | 112 | } |
99 | 113 | ||
114 | /* cpu init entry point, called from mce.c with preempt off */ | ||
100 | void __cpuinit mce_amd_feature_init(struct cpuinfo_x86 *c) | 115 | void __cpuinit mce_amd_feature_init(struct cpuinfo_x86 *c) |
101 | { | 116 | { |
102 | int bank; | 117 | unsigned int bank, block; |
103 | u32 mci_misc_lo, mci_misc_hi; | ||
104 | unsigned int cpu = smp_processor_id(); | 118 | unsigned int cpu = smp_processor_id(); |
119 | u32 low = 0, high = 0, address = 0; | ||
105 | 120 | ||
106 | for (bank = 0; bank < NR_BANKS; ++bank) { | 121 | for (bank = 0; bank < NR_BANKS; ++bank) { |
107 | rdmsr(MSR_IA32_MC0_MISC + bank * 4, mci_misc_lo, mci_misc_hi); | 122 | for (block = 0; block < NR_BLOCKS; ++block) { |
123 | if (block == 0) | ||
124 | address = MSR_IA32_MC0_MISC + bank * 4; | ||
125 | else if (block == 1) | ||
126 | address = MCG_XBLK_ADDR | ||
127 | + ((low & MASK_BLKPTR_LO) >> 21); | ||
128 | else | ||
129 | ++address; | ||
130 | |||
131 | if (rdmsr_safe(address, &low, &high)) | ||
132 | continue; | ||
108 | 133 | ||
109 | /* !valid, !counter present, bios locked */ | 134 | if (!(high & MASK_VALID_HI)) { |
110 | if (!(mci_misc_hi & MASK_VALID_HI) || | 135 | if (block) |
111 | !(mci_misc_hi & MASK_VALID_HI >> 1) || | 136 | continue; |
112 | (mci_misc_hi & MASK_VALID_HI >> 2)) | 137 | else |
113 | continue; | 138 | break; |
139 | } | ||
114 | 140 | ||
115 | per_cpu(bank_map, cpu) |= (1 << bank); | 141 | if (!(high & MASK_VALID_HI >> 1) || |
142 | (high & MASK_VALID_HI >> 2)) | ||
143 | continue; | ||
116 | 144 | ||
145 | if (!block) | ||
146 | per_cpu(bank_map, cpu) |= (1 << bank); | ||
117 | #ifdef CONFIG_SMP | 147 | #ifdef CONFIG_SMP |
118 | if (shared_bank[bank] && cpu_core_id[cpu]) | 148 | if (shared_bank[bank] && c->cpu_core_id) |
119 | continue; | 149 | break; |
120 | #endif | 150 | #endif |
151 | high &= ~MASK_LVTOFF_HI; | ||
152 | high |= K8_APIC_EXT_LVT_ENTRY_THRESHOLD << 20; | ||
153 | wrmsr(address, low, high); | ||
121 | 154 | ||
122 | setup_threshold_lvt((mci_misc_hi & MASK_LVTOFF_HI) >> 20); | 155 | setup_APIC_extened_lvt(K8_APIC_EXT_LVT_ENTRY_THRESHOLD, |
123 | threshold_defaults.cpu = cpu; | 156 | THRESHOLD_APIC_VECTOR, |
124 | threshold_defaults.bank = bank; | 157 | K8_APIC_EXT_INT_MSG_FIX, 0); |
125 | threshold_restart_bank(&threshold_defaults, 0, 0); | 158 | |
159 | threshold_defaults.address = address; | ||
160 | threshold_restart_bank(&threshold_defaults, 0, 0); | ||
161 | } | ||
126 | } | 162 | } |
127 | } | 163 | } |
128 | 164 | ||
@@ -137,8 +173,9 @@ void __cpuinit mce_amd_feature_init(struct cpuinfo_x86 *c) | |||
137 | */ | 173 | */ |
138 | asmlinkage void mce_threshold_interrupt(void) | 174 | asmlinkage void mce_threshold_interrupt(void) |
139 | { | 175 | { |
140 | int bank; | 176 | unsigned int bank, block; |
141 | struct mce m; | 177 | struct mce m; |
178 | u32 low = 0, high = 0, address = 0; | ||
142 | 179 | ||
143 | ack_APIC_irq(); | 180 | ack_APIC_irq(); |
144 | exit_idle(); | 181 | exit_idle(); |
@@ -150,15 +187,42 @@ asmlinkage void mce_threshold_interrupt(void) | |||
150 | 187 | ||
151 | /* assume first bank caused it */ | 188 | /* assume first bank caused it */ |
152 | for (bank = 0; bank < NR_BANKS; ++bank) { | 189 | for (bank = 0; bank < NR_BANKS; ++bank) { |
153 | m.bank = MCE_THRESHOLD_BASE + bank; | 190 | for (block = 0; block < NR_BLOCKS; ++block) { |
154 | rdmsrl(MSR_IA32_MC0_MISC + bank * 4, m.misc); | 191 | if (block == 0) |
192 | address = MSR_IA32_MC0_MISC + bank * 4; | ||
193 | else if (block == 1) | ||
194 | address = MCG_XBLK_ADDR | ||
195 | + ((low & MASK_BLKPTR_LO) >> 21); | ||
196 | else | ||
197 | ++address; | ||
198 | |||
199 | if (rdmsr_safe(address, &low, &high)) | ||
200 | continue; | ||
155 | 201 | ||
156 | if (m.misc & MASK_OVERFLOW) { | 202 | if (!(high & MASK_VALID_HI)) { |
157 | mce_log(&m); | 203 | if (block) |
158 | goto out; | 204 | continue; |
205 | else | ||
206 | break; | ||
207 | } | ||
208 | |||
209 | if (!(high & MASK_VALID_HI >> 1) || | ||
210 | (high & MASK_VALID_HI >> 2)) | ||
211 | continue; | ||
212 | |||
213 | if (high & MASK_OVERFLOW_HI) { | ||
214 | rdmsrl(address, m.misc); | ||
215 | rdmsrl(MSR_IA32_MC0_STATUS + bank * 4, | ||
216 | m.status); | ||
217 | m.bank = K8_MCE_THRESHOLD_BASE | ||
218 | + bank * NR_BLOCKS | ||
219 | + block; | ||
220 | mce_log(&m); | ||
221 | goto out; | ||
222 | } | ||
159 | } | 223 | } |
160 | } | 224 | } |
161 | out: | 225 | out: |
162 | irq_exit(); | 226 | irq_exit(); |
163 | } | 227 | } |
164 | 228 | ||
@@ -166,20 +230,12 @@ asmlinkage void mce_threshold_interrupt(void) | |||
166 | * Sysfs Interface | 230 | * Sysfs Interface |
167 | */ | 231 | */ |
168 | 232 | ||
169 | static struct sysdev_class threshold_sysclass = { | ||
170 | set_kset_name("threshold"), | ||
171 | }; | ||
172 | |||
173 | static DEFINE_PER_CPU(struct sys_device, device_threshold); | ||
174 | |||
175 | struct threshold_attr { | 233 | struct threshold_attr { |
176 | struct attribute attr; | 234 | struct attribute attr; |
177 | ssize_t(*show) (struct threshold_bank *, char *); | 235 | ssize_t(*show) (struct threshold_block *, char *); |
178 | ssize_t(*store) (struct threshold_bank *, const char *, size_t count); | 236 | ssize_t(*store) (struct threshold_block *, const char *, size_t count); |
179 | }; | 237 | }; |
180 | 238 | ||
181 | static DEFINE_PER_CPU(struct threshold_bank *, threshold_banks[NR_BANKS]); | ||
182 | |||
183 | static cpumask_t affinity_set(unsigned int cpu) | 239 | static cpumask_t affinity_set(unsigned int cpu) |
184 | { | 240 | { |
185 | cpumask_t oldmask = current->cpus_allowed; | 241 | cpumask_t oldmask = current->cpus_allowed; |
@@ -194,15 +250,15 @@ static void affinity_restore(cpumask_t oldmask) | |||
194 | set_cpus_allowed(current, oldmask); | 250 | set_cpus_allowed(current, oldmask); |
195 | } | 251 | } |
196 | 252 | ||
197 | #define SHOW_FIELDS(name) \ | 253 | #define SHOW_FIELDS(name) \ |
198 | static ssize_t show_ ## name(struct threshold_bank * b, char *buf) \ | 254 | static ssize_t show_ ## name(struct threshold_block * b, char *buf) \ |
199 | { \ | 255 | { \ |
200 | return sprintf(buf, "%lx\n", (unsigned long) b->name); \ | 256 | return sprintf(buf, "%lx\n", (unsigned long) b->name); \ |
201 | } | 257 | } |
202 | SHOW_FIELDS(interrupt_enable) | 258 | SHOW_FIELDS(interrupt_enable) |
203 | SHOW_FIELDS(threshold_limit) | 259 | SHOW_FIELDS(threshold_limit) |
204 | 260 | ||
205 | static ssize_t store_interrupt_enable(struct threshold_bank *b, | 261 | static ssize_t store_interrupt_enable(struct threshold_block *b, |
206 | const char *buf, size_t count) | 262 | const char *buf, size_t count) |
207 | { | 263 | { |
208 | char *end; | 264 | char *end; |
@@ -219,7 +275,7 @@ static ssize_t store_interrupt_enable(struct threshold_bank *b, | |||
219 | return end - buf; | 275 | return end - buf; |
220 | } | 276 | } |
221 | 277 | ||
222 | static ssize_t store_threshold_limit(struct threshold_bank *b, | 278 | static ssize_t store_threshold_limit(struct threshold_block *b, |
223 | const char *buf, size_t count) | 279 | const char *buf, size_t count) |
224 | { | 280 | { |
225 | char *end; | 281 | char *end; |
@@ -242,18 +298,18 @@ static ssize_t store_threshold_limit(struct threshold_bank *b, | |||
242 | return end - buf; | 298 | return end - buf; |
243 | } | 299 | } |
244 | 300 | ||
245 | static ssize_t show_error_count(struct threshold_bank *b, char *buf) | 301 | static ssize_t show_error_count(struct threshold_block *b, char *buf) |
246 | { | 302 | { |
247 | u32 high, low; | 303 | u32 high, low; |
248 | cpumask_t oldmask; | 304 | cpumask_t oldmask; |
249 | oldmask = affinity_set(b->cpu); | 305 | oldmask = affinity_set(b->cpu); |
250 | rdmsr(MSR_IA32_MC0_MISC + b->bank * 4, low, high); /* ignore low 32 */ | 306 | rdmsr(b->address, low, high); |
251 | affinity_restore(oldmask); | 307 | affinity_restore(oldmask); |
252 | return sprintf(buf, "%x\n", | 308 | return sprintf(buf, "%x\n", |
253 | (high & 0xFFF) - (THRESHOLD_MAX - b->threshold_limit)); | 309 | (high & 0xFFF) - (THRESHOLD_MAX - b->threshold_limit)); |
254 | } | 310 | } |
255 | 311 | ||
256 | static ssize_t store_error_count(struct threshold_bank *b, | 312 | static ssize_t store_error_count(struct threshold_block *b, |
257 | const char *buf, size_t count) | 313 | const char *buf, size_t count) |
258 | { | 314 | { |
259 | cpumask_t oldmask; | 315 | cpumask_t oldmask; |
@@ -269,13 +325,13 @@ static ssize_t store_error_count(struct threshold_bank *b, | |||
269 | .store = _store, \ | 325 | .store = _store, \ |
270 | }; | 326 | }; |
271 | 327 | ||
272 | #define ATTR_FIELDS(name) \ | 328 | #define RW_ATTR(name) \ |
273 | static struct threshold_attr name = \ | 329 | static struct threshold_attr name = \ |
274 | THRESHOLD_ATTR(name, 0644, show_## name, store_## name) | 330 | THRESHOLD_ATTR(name, 0644, show_## name, store_## name) |
275 | 331 | ||
276 | ATTR_FIELDS(interrupt_enable); | 332 | RW_ATTR(interrupt_enable); |
277 | ATTR_FIELDS(threshold_limit); | 333 | RW_ATTR(threshold_limit); |
278 | ATTR_FIELDS(error_count); | 334 | RW_ATTR(error_count); |
279 | 335 | ||
280 | static struct attribute *default_attrs[] = { | 336 | static struct attribute *default_attrs[] = { |
281 | &interrupt_enable.attr, | 337 | &interrupt_enable.attr, |
@@ -284,12 +340,12 @@ static struct attribute *default_attrs[] = { | |||
284 | NULL | 340 | NULL |
285 | }; | 341 | }; |
286 | 342 | ||
287 | #define to_bank(k) container_of(k,struct threshold_bank,kobj) | 343 | #define to_block(k) container_of(k, struct threshold_block, kobj) |
288 | #define to_attr(a) container_of(a,struct threshold_attr,attr) | 344 | #define to_attr(a) container_of(a, struct threshold_attr, attr) |
289 | 345 | ||
290 | static ssize_t show(struct kobject *kobj, struct attribute *attr, char *buf) | 346 | static ssize_t show(struct kobject *kobj, struct attribute *attr, char *buf) |
291 | { | 347 | { |
292 | struct threshold_bank *b = to_bank(kobj); | 348 | struct threshold_block *b = to_block(kobj); |
293 | struct threshold_attr *a = to_attr(attr); | 349 | struct threshold_attr *a = to_attr(attr); |
294 | ssize_t ret; | 350 | ssize_t ret; |
295 | ret = a->show ? a->show(b, buf) : -EIO; | 351 | ret = a->show ? a->show(b, buf) : -EIO; |
@@ -299,7 +355,7 @@ static ssize_t show(struct kobject *kobj, struct attribute *attr, char *buf) | |||
299 | static ssize_t store(struct kobject *kobj, struct attribute *attr, | 355 | static ssize_t store(struct kobject *kobj, struct attribute *attr, |
300 | const char *buf, size_t count) | 356 | const char *buf, size_t count) |
301 | { | 357 | { |
302 | struct threshold_bank *b = to_bank(kobj); | 358 | struct threshold_block *b = to_block(kobj); |
303 | struct threshold_attr *a = to_attr(attr); | 359 | struct threshold_attr *a = to_attr(attr); |
304 | ssize_t ret; | 360 | ssize_t ret; |
305 | ret = a->store ? a->store(b, buf, count) : -EIO; | 361 | ret = a->store ? a->store(b, buf, count) : -EIO; |
@@ -316,69 +372,174 @@ static struct kobj_type threshold_ktype = { | |||
316 | .default_attrs = default_attrs, | 372 | .default_attrs = default_attrs, |
317 | }; | 373 | }; |
318 | 374 | ||
375 | static __cpuinit int allocate_threshold_blocks(unsigned int cpu, | ||
376 | unsigned int bank, | ||
377 | unsigned int block, | ||
378 | u32 address) | ||
379 | { | ||
380 | int err; | ||
381 | u32 low, high; | ||
382 | struct threshold_block *b = NULL; | ||
383 | |||
384 | if ((bank >= NR_BANKS) || (block >= NR_BLOCKS)) | ||
385 | return 0; | ||
386 | |||
387 | if (rdmsr_safe(address, &low, &high)) | ||
388 | goto recurse; | ||
389 | |||
390 | if (!(high & MASK_VALID_HI)) { | ||
391 | if (block) | ||
392 | goto recurse; | ||
393 | else | ||
394 | return 0; | ||
395 | } | ||
396 | |||
397 | if (!(high & MASK_VALID_HI >> 1) || | ||
398 | (high & MASK_VALID_HI >> 2)) | ||
399 | goto recurse; | ||
400 | |||
401 | b = kzalloc(sizeof(struct threshold_block), GFP_KERNEL); | ||
402 | if (!b) | ||
403 | return -ENOMEM; | ||
404 | memset(b, 0, sizeof(struct threshold_block)); | ||
405 | |||
406 | b->block = block; | ||
407 | b->bank = bank; | ||
408 | b->cpu = cpu; | ||
409 | b->address = address; | ||
410 | b->interrupt_enable = 0; | ||
411 | b->threshold_limit = THRESHOLD_MAX; | ||
412 | |||
413 | INIT_LIST_HEAD(&b->miscj); | ||
414 | |||
415 | if (per_cpu(threshold_banks, cpu)[bank]->blocks) | ||
416 | list_add(&b->miscj, | ||
417 | &per_cpu(threshold_banks, cpu)[bank]->blocks->miscj); | ||
418 | else | ||
419 | per_cpu(threshold_banks, cpu)[bank]->blocks = b; | ||
420 | |||
421 | kobject_set_name(&b->kobj, "misc%i", block); | ||
422 | b->kobj.parent = &per_cpu(threshold_banks, cpu)[bank]->kobj; | ||
423 | b->kobj.ktype = &threshold_ktype; | ||
424 | err = kobject_register(&b->kobj); | ||
425 | if (err) | ||
426 | goto out_free; | ||
427 | recurse: | ||
428 | if (!block) { | ||
429 | address = (low & MASK_BLKPTR_LO) >> 21; | ||
430 | if (!address) | ||
431 | return 0; | ||
432 | address += MCG_XBLK_ADDR; | ||
433 | } else | ||
434 | ++address; | ||
435 | |||
436 | err = allocate_threshold_blocks(cpu, bank, ++block, address); | ||
437 | if (err) | ||
438 | goto out_free; | ||
439 | |||
440 | return err; | ||
441 | |||
442 | out_free: | ||
443 | if (b) { | ||
444 | kobject_unregister(&b->kobj); | ||
445 | kfree(b); | ||
446 | } | ||
447 | return err; | ||
448 | } | ||
449 | |||
319 | /* symlinks sibling shared banks to first core. first core owns dir/files. */ | 450 | /* symlinks sibling shared banks to first core. first core owns dir/files. */ |
320 | static __cpuinit int threshold_create_bank(unsigned int cpu, int bank) | 451 | static __cpuinit int threshold_create_bank(unsigned int cpu, unsigned int bank) |
321 | { | 452 | { |
322 | int err = 0; | 453 | int i, err = 0; |
323 | struct threshold_bank *b = NULL; | 454 | struct threshold_bank *b = NULL; |
455 | cpumask_t oldmask = CPU_MASK_NONE; | ||
456 | char name[32]; | ||
457 | |||
458 | sprintf(name, "threshold_bank%i", bank); | ||
324 | 459 | ||
325 | #ifdef CONFIG_SMP | 460 | #ifdef CONFIG_SMP |
326 | if (cpu_core_id[cpu] && shared_bank[bank]) { /* symlink */ | 461 | if (cpu_data[cpu].cpu_core_id && shared_bank[bank]) { /* symlink */ |
327 | char name[16]; | 462 | i = first_cpu(cpu_core_map[cpu]); |
328 | unsigned lcpu = first_cpu(cpu_core_map[cpu]); | 463 | |
329 | if (cpu_core_id[lcpu]) | 464 | /* first core not up yet */ |
330 | goto out; /* first core not up yet */ | 465 | if (cpu_data[i].cpu_core_id) |
466 | goto out; | ||
467 | |||
468 | /* already linked */ | ||
469 | if (per_cpu(threshold_banks, cpu)[bank]) | ||
470 | goto out; | ||
471 | |||
472 | b = per_cpu(threshold_banks, i)[bank]; | ||
331 | 473 | ||
332 | b = per_cpu(threshold_banks, lcpu)[bank]; | ||
333 | if (!b) | 474 | if (!b) |
334 | goto out; | 475 | goto out; |
335 | sprintf(name, "bank%i", bank); | 476 | |
336 | err = sysfs_create_link(&per_cpu(device_threshold, cpu).kobj, | 477 | err = sysfs_create_link(&per_cpu(device_mce, cpu).kobj, |
337 | &b->kobj, name); | 478 | &b->kobj, name); |
338 | if (err) | 479 | if (err) |
339 | goto out; | 480 | goto out; |
481 | |||
482 | b->cpus = cpu_core_map[cpu]; | ||
340 | per_cpu(threshold_banks, cpu)[bank] = b; | 483 | per_cpu(threshold_banks, cpu)[bank] = b; |
341 | goto out; | 484 | goto out; |
342 | } | 485 | } |
343 | #endif | 486 | #endif |
344 | 487 | ||
345 | b = kmalloc(sizeof(struct threshold_bank), GFP_KERNEL); | 488 | b = kzalloc(sizeof(struct threshold_bank), GFP_KERNEL); |
346 | if (!b) { | 489 | if (!b) { |
347 | err = -ENOMEM; | 490 | err = -ENOMEM; |
348 | goto out; | 491 | goto out; |
349 | } | 492 | } |
350 | memset(b, 0, sizeof(struct threshold_bank)); | 493 | memset(b, 0, sizeof(struct threshold_bank)); |
351 | 494 | ||
352 | b->cpu = cpu; | 495 | kobject_set_name(&b->kobj, "threshold_bank%i", bank); |
353 | b->bank = bank; | 496 | b->kobj.parent = &per_cpu(device_mce, cpu).kobj; |
354 | b->interrupt_enable = 0; | 497 | #ifndef CONFIG_SMP |
355 | b->threshold_limit = THRESHOLD_MAX; | 498 | b->cpus = CPU_MASK_ALL; |
356 | kobject_set_name(&b->kobj, "bank%i", bank); | 499 | #else |
357 | b->kobj.parent = &per_cpu(device_threshold, cpu).kobj; | 500 | b->cpus = cpu_core_map[cpu]; |
358 | b->kobj.ktype = &threshold_ktype; | 501 | #endif |
359 | |||
360 | err = kobject_register(&b->kobj); | 502 | err = kobject_register(&b->kobj); |
361 | if (err) { | 503 | if (err) |
362 | kfree(b); | 504 | goto out_free; |
363 | goto out; | 505 | |
364 | } | ||
365 | per_cpu(threshold_banks, cpu)[bank] = b; | 506 | per_cpu(threshold_banks, cpu)[bank] = b; |
366 | out: | 507 | |
508 | oldmask = affinity_set(cpu); | ||
509 | err = allocate_threshold_blocks(cpu, bank, 0, | ||
510 | MSR_IA32_MC0_MISC + bank * 4); | ||
511 | affinity_restore(oldmask); | ||
512 | |||
513 | if (err) | ||
514 | goto out_free; | ||
515 | |||
516 | for_each_cpu_mask(i, b->cpus) { | ||
517 | if (i == cpu) | ||
518 | continue; | ||
519 | |||
520 | err = sysfs_create_link(&per_cpu(device_mce, i).kobj, | ||
521 | &b->kobj, name); | ||
522 | if (err) | ||
523 | goto out; | ||
524 | |||
525 | per_cpu(threshold_banks, i)[bank] = b; | ||
526 | } | ||
527 | |||
528 | goto out; | ||
529 | |||
530 | out_free: | ||
531 | per_cpu(threshold_banks, cpu)[bank] = NULL; | ||
532 | kfree(b); | ||
533 | out: | ||
367 | return err; | 534 | return err; |
368 | } | 535 | } |
369 | 536 | ||
370 | /* create dir/files for all valid threshold banks */ | 537 | /* create dir/files for all valid threshold banks */ |
371 | static __cpuinit int threshold_create_device(unsigned int cpu) | 538 | static __cpuinit int threshold_create_device(unsigned int cpu) |
372 | { | 539 | { |
373 | int bank; | 540 | unsigned int bank; |
374 | int err = 0; | 541 | int err = 0; |
375 | 542 | ||
376 | per_cpu(device_threshold, cpu).id = cpu; | ||
377 | per_cpu(device_threshold, cpu).cls = &threshold_sysclass; | ||
378 | err = sysdev_register(&per_cpu(device_threshold, cpu)); | ||
379 | if (err) | ||
380 | goto out; | ||
381 | |||
382 | for (bank = 0; bank < NR_BANKS; ++bank) { | 543 | for (bank = 0; bank < NR_BANKS; ++bank) { |
383 | if (!(per_cpu(bank_map, cpu) & 1 << bank)) | 544 | if (!(per_cpu(bank_map, cpu) & 1 << bank)) |
384 | continue; | 545 | continue; |
@@ -386,7 +547,7 @@ static __cpuinit int threshold_create_device(unsigned int cpu) | |||
386 | if (err) | 547 | if (err) |
387 | goto out; | 548 | goto out; |
388 | } | 549 | } |
389 | out: | 550 | out: |
390 | return err; | 551 | return err; |
391 | } | 552 | } |
392 | 553 | ||
@@ -397,92 +558,85 @@ static __cpuinit int threshold_create_device(unsigned int cpu) | |||
397 | * of shared sysfs dir/files, and rest of the cores will be symlinked to it. | 558 | * of shared sysfs dir/files, and rest of the cores will be symlinked to it. |
398 | */ | 559 | */ |
399 | 560 | ||
400 | /* cpu hotplug call removes all symlinks before first core dies */ | 561 | static __cpuinit void deallocate_threshold_block(unsigned int cpu, |
562 | unsigned int bank) | ||
563 | { | ||
564 | struct threshold_block *pos = NULL; | ||
565 | struct threshold_block *tmp = NULL; | ||
566 | struct threshold_bank *head = per_cpu(threshold_banks, cpu)[bank]; | ||
567 | |||
568 | if (!head) | ||
569 | return; | ||
570 | |||
571 | list_for_each_entry_safe(pos, tmp, &head->blocks->miscj, miscj) { | ||
572 | kobject_unregister(&pos->kobj); | ||
573 | list_del(&pos->miscj); | ||
574 | kfree(pos); | ||
575 | } | ||
576 | |||
577 | kfree(per_cpu(threshold_banks, cpu)[bank]->blocks); | ||
578 | per_cpu(threshold_banks, cpu)[bank]->blocks = NULL; | ||
579 | } | ||
580 | |||
401 | static __cpuinit void threshold_remove_bank(unsigned int cpu, int bank) | 581 | static __cpuinit void threshold_remove_bank(unsigned int cpu, int bank) |
402 | { | 582 | { |
583 | int i = 0; | ||
403 | struct threshold_bank *b; | 584 | struct threshold_bank *b; |
404 | char name[16]; | 585 | char name[32]; |
405 | 586 | ||
406 | b = per_cpu(threshold_banks, cpu)[bank]; | 587 | b = per_cpu(threshold_banks, cpu)[bank]; |
588 | |||
407 | if (!b) | 589 | if (!b) |
408 | return; | 590 | return; |
409 | if (shared_bank[bank] && atomic_read(&b->kobj.kref.refcount) > 2) { | 591 | |
410 | sprintf(name, "bank%i", bank); | 592 | if (!b->blocks) |
411 | sysfs_remove_link(&per_cpu(device_threshold, cpu).kobj, name); | 593 | goto free_out; |
412 | per_cpu(threshold_banks, cpu)[bank] = NULL; | 594 | |
413 | } else { | 595 | sprintf(name, "threshold_bank%i", bank); |
414 | kobject_unregister(&b->kobj); | 596 | |
415 | kfree(per_cpu(threshold_banks, cpu)[bank]); | 597 | /* sibling symlink */ |
598 | if (shared_bank[bank] && b->blocks->cpu != cpu) { | ||
599 | sysfs_remove_link(&per_cpu(device_mce, cpu).kobj, name); | ||
600 | per_cpu(threshold_banks, i)[bank] = NULL; | ||
601 | return; | ||
602 | } | ||
603 | |||
604 | /* remove all sibling symlinks before unregistering */ | ||
605 | for_each_cpu_mask(i, b->cpus) { | ||
606 | if (i == cpu) | ||
607 | continue; | ||
608 | |||
609 | sysfs_remove_link(&per_cpu(device_mce, i).kobj, name); | ||
610 | per_cpu(threshold_banks, i)[bank] = NULL; | ||
416 | } | 611 | } |
612 | |||
613 | deallocate_threshold_block(cpu, bank); | ||
614 | |||
615 | free_out: | ||
616 | kobject_unregister(&b->kobj); | ||
617 | kfree(b); | ||
618 | per_cpu(threshold_banks, cpu)[bank] = NULL; | ||
417 | } | 619 | } |
418 | 620 | ||
419 | static __cpuinit void threshold_remove_device(unsigned int cpu) | 621 | static __cpuinit void threshold_remove_device(unsigned int cpu) |
420 | { | 622 | { |
421 | int bank; | 623 | unsigned int bank; |
422 | 624 | ||
423 | for (bank = 0; bank < NR_BANKS; ++bank) { | 625 | for (bank = 0; bank < NR_BANKS; ++bank) { |
424 | if (!(per_cpu(bank_map, cpu) & 1 << bank)) | 626 | if (!(per_cpu(bank_map, cpu) & 1 << bank)) |
425 | continue; | 627 | continue; |
426 | threshold_remove_bank(cpu, bank); | 628 | threshold_remove_bank(cpu, bank); |
427 | } | 629 | } |
428 | sysdev_unregister(&per_cpu(device_threshold, cpu)); | ||
429 | } | 630 | } |
430 | 631 | ||
431 | /* link all existing siblings when first core comes up */ | ||
432 | static __cpuinit int threshold_create_symlinks(unsigned int cpu) | ||
433 | { | ||
434 | int bank, err = 0; | ||
435 | unsigned int lcpu = 0; | ||
436 | |||
437 | if (cpu_core_id[cpu]) | ||
438 | return 0; | ||
439 | for_each_cpu_mask(lcpu, cpu_core_map[cpu]) { | ||
440 | if (lcpu == cpu) | ||
441 | continue; | ||
442 | for (bank = 0; bank < NR_BANKS; ++bank) { | ||
443 | if (!(per_cpu(bank_map, cpu) & 1 << bank)) | ||
444 | continue; | ||
445 | if (!shared_bank[bank]) | ||
446 | continue; | ||
447 | err = threshold_create_bank(lcpu, bank); | ||
448 | } | ||
449 | } | ||
450 | return err; | ||
451 | } | ||
452 | |||
453 | /* remove all symlinks before first core dies. */ | ||
454 | static __cpuinit void threshold_remove_symlinks(unsigned int cpu) | ||
455 | { | ||
456 | int bank; | ||
457 | unsigned int lcpu = 0; | ||
458 | if (cpu_core_id[cpu]) | ||
459 | return; | ||
460 | for_each_cpu_mask(lcpu, cpu_core_map[cpu]) { | ||
461 | if (lcpu == cpu) | ||
462 | continue; | ||
463 | for (bank = 0; bank < NR_BANKS; ++bank) { | ||
464 | if (!(per_cpu(bank_map, cpu) & 1 << bank)) | ||
465 | continue; | ||
466 | if (!shared_bank[bank]) | ||
467 | continue; | ||
468 | threshold_remove_bank(lcpu, bank); | ||
469 | } | ||
470 | } | ||
471 | } | ||
472 | #else /* !CONFIG_HOTPLUG_CPU */ | 632 | #else /* !CONFIG_HOTPLUG_CPU */ |
473 | static __cpuinit void threshold_create_symlinks(unsigned int cpu) | ||
474 | { | ||
475 | } | ||
476 | static __cpuinit void threshold_remove_symlinks(unsigned int cpu) | ||
477 | { | ||
478 | } | ||
479 | static void threshold_remove_device(unsigned int cpu) | 633 | static void threshold_remove_device(unsigned int cpu) |
480 | { | 634 | { |
481 | } | 635 | } |
482 | #endif | 636 | #endif |
483 | 637 | ||
484 | /* get notified when a cpu comes on/off */ | 638 | /* get notified when a cpu comes on/off */ |
485 | static int threshold_cpu_callback(struct notifier_block *nfb, | 639 | static int __cpuinit threshold_cpu_callback(struct notifier_block *nfb, |
486 | unsigned long action, void *hcpu) | 640 | unsigned long action, void *hcpu) |
487 | { | 641 | { |
488 | /* cpu was unsigned int to begin with */ | 642 | /* cpu was unsigned int to begin with */ |
@@ -494,13 +648,6 @@ static int threshold_cpu_callback(struct notifier_block *nfb, | |||
494 | switch (action) { | 648 | switch (action) { |
495 | case CPU_ONLINE: | 649 | case CPU_ONLINE: |
496 | threshold_create_device(cpu); | 650 | threshold_create_device(cpu); |
497 | threshold_create_symlinks(cpu); | ||
498 | break; | ||
499 | case CPU_DOWN_PREPARE: | ||
500 | threshold_remove_symlinks(cpu); | ||
501 | break; | ||
502 | case CPU_DOWN_FAILED: | ||
503 | threshold_create_symlinks(cpu); | ||
504 | break; | 651 | break; |
505 | case CPU_DEAD: | 652 | case CPU_DEAD: |
506 | threshold_remove_device(cpu); | 653 | threshold_remove_device(cpu); |
@@ -512,29 +659,22 @@ static int threshold_cpu_callback(struct notifier_block *nfb, | |||
512 | return NOTIFY_OK; | 659 | return NOTIFY_OK; |
513 | } | 660 | } |
514 | 661 | ||
515 | static struct notifier_block threshold_cpu_notifier = { | 662 | static struct notifier_block threshold_cpu_notifier __cpuinitdata = { |
516 | .notifier_call = threshold_cpu_callback, | 663 | .notifier_call = threshold_cpu_callback, |
517 | }; | 664 | }; |
518 | 665 | ||
519 | static __init int threshold_init_device(void) | 666 | static __init int threshold_init_device(void) |
520 | { | 667 | { |
521 | int err; | 668 | unsigned lcpu = 0; |
522 | int lcpu = 0; | ||
523 | |||
524 | err = sysdev_class_register(&threshold_sysclass); | ||
525 | if (err) | ||
526 | goto out; | ||
527 | 669 | ||
528 | /* to hit CPUs online before the notifier is up */ | 670 | /* to hit CPUs online before the notifier is up */ |
529 | for_each_online_cpu(lcpu) { | 671 | for_each_online_cpu(lcpu) { |
530 | err = threshold_create_device(lcpu); | 672 | int err = threshold_create_device(lcpu); |
531 | if (err) | 673 | if (err) |
532 | goto out; | 674 | return err; |
533 | } | 675 | } |
534 | register_cpu_notifier(&threshold_cpu_notifier); | 676 | register_cpu_notifier(&threshold_cpu_notifier); |
535 | 677 | return 0; | |
536 | out: | ||
537 | return err; | ||
538 | } | 678 | } |
539 | 679 | ||
540 | device_initcall(threshold_init_device); | 680 | device_initcall(threshold_init_device); |