diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2011-10-28 08:03:12 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2011-10-28 08:03:12 -0400 |
commit | e34eb39c1c791fe79da6aae0d9057f0c74c2f0ed (patch) | |
tree | cfc27d987b2c888f189f35326702220b694596cc /arch/x86/kernel/cpu | |
parent | 396e6e49c58bb23d1814d3c240c736c9f01523c5 (diff) | |
parent | 910b2c5122ab787179a790ca1dec616fc80f0173 (diff) |
Merge branch 'x86-cpu-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
* 'x86-cpu-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
x86, amd: Include linux/elf.h since we use stuff from asm/elf.h
x86: cache_info: Update calculation of AMD L3 cache indices
x86: cache_info: Kill the atomic allocation in amd_init_l3_cache()
x86: cache_info: Kill the moronic shadow struct
x86: cache_info: Remove bogus free of amd_l3_cache data
x86, amd: Include elf.h explicitly, prepare the code for the module.h split
x86-32, amd: Move va_align definition to unbreak 32-bit build
x86, amd: Move BSP code to cpu_dev helper
x86: Add a BSP cpu_dev helper
x86, amd: Avoid cache aliasing penalties on AMD family 15h
Diffstat (limited to 'arch/x86/kernel/cpu')
-rw-r--r-- | arch/x86/kernel/cpu/amd.c | 47 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/common.c | 3 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/cpu.h | 1 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/intel_cacheinfo.c | 127 |
4 files changed, 81 insertions, 97 deletions
diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c index b13ed393dfce..13c6ec812545 100644 --- a/arch/x86/kernel/cpu/amd.c +++ b/arch/x86/kernel/cpu/amd.c | |||
@@ -1,5 +1,6 @@ | |||
1 | #include <linux/init.h> | 1 | #include <linux/init.h> |
2 | #include <linux/bitops.h> | 2 | #include <linux/bitops.h> |
3 | #include <linux/elf.h> | ||
3 | #include <linux/mm.h> | 4 | #include <linux/mm.h> |
4 | 5 | ||
5 | #include <linux/io.h> | 6 | #include <linux/io.h> |
@@ -410,6 +411,34 @@ static void __cpuinit early_init_amd_mc(struct cpuinfo_x86 *c) | |||
410 | #endif | 411 | #endif |
411 | } | 412 | } |
412 | 413 | ||
414 | static void __cpuinit bsp_init_amd(struct cpuinfo_x86 *c) | ||
415 | { | ||
416 | if (cpu_has(c, X86_FEATURE_CONSTANT_TSC)) { | ||
417 | |||
418 | if (c->x86 > 0x10 || | ||
419 | (c->x86 == 0x10 && c->x86_model >= 0x2)) { | ||
420 | u64 val; | ||
421 | |||
422 | rdmsrl(MSR_K7_HWCR, val); | ||
423 | if (!(val & BIT(24))) | ||
424 | printk(KERN_WARNING FW_BUG "TSC doesn't count " | ||
425 | "with P0 frequency!\n"); | ||
426 | } | ||
427 | } | ||
428 | |||
429 | if (c->x86 == 0x15) { | ||
430 | unsigned long upperbit; | ||
431 | u32 cpuid, assoc; | ||
432 | |||
433 | cpuid = cpuid_edx(0x80000005); | ||
434 | assoc = cpuid >> 16 & 0xff; | ||
435 | upperbit = ((cpuid >> 24) << 10) / assoc; | ||
436 | |||
437 | va_align.mask = (upperbit - 1) & PAGE_MASK; | ||
438 | va_align.flags = ALIGN_VA_32 | ALIGN_VA_64; | ||
439 | } | ||
440 | } | ||
441 | |||
413 | static void __cpuinit early_init_amd(struct cpuinfo_x86 *c) | 442 | static void __cpuinit early_init_amd(struct cpuinfo_x86 *c) |
414 | { | 443 | { |
415 | early_init_amd_mc(c); | 444 | early_init_amd_mc(c); |
@@ -441,23 +470,6 @@ static void __cpuinit early_init_amd(struct cpuinfo_x86 *c) | |||
441 | set_cpu_cap(c, X86_FEATURE_EXTD_APICID); | 470 | set_cpu_cap(c, X86_FEATURE_EXTD_APICID); |
442 | } | 471 | } |
443 | #endif | 472 | #endif |
444 | |||
445 | /* We need to do the following only once */ | ||
446 | if (c != &boot_cpu_data) | ||
447 | return; | ||
448 | |||
449 | if (cpu_has(c, X86_FEATURE_CONSTANT_TSC)) { | ||
450 | |||
451 | if (c->x86 > 0x10 || | ||
452 | (c->x86 == 0x10 && c->x86_model >= 0x2)) { | ||
453 | u64 val; | ||
454 | |||
455 | rdmsrl(MSR_K7_HWCR, val); | ||
456 | if (!(val & BIT(24))) | ||
457 | printk(KERN_WARNING FW_BUG "TSC doesn't count " | ||
458 | "with P0 frequency!\n"); | ||
459 | } | ||
460 | } | ||
461 | } | 473 | } |
462 | 474 | ||
463 | static void __cpuinit init_amd(struct cpuinfo_x86 *c) | 475 | static void __cpuinit init_amd(struct cpuinfo_x86 *c) |
@@ -679,6 +691,7 @@ static const struct cpu_dev __cpuinitconst amd_cpu_dev = { | |||
679 | .c_size_cache = amd_size_cache, | 691 | .c_size_cache = amd_size_cache, |
680 | #endif | 692 | #endif |
681 | .c_early_init = early_init_amd, | 693 | .c_early_init = early_init_amd, |
694 | .c_bsp_init = bsp_init_amd, | ||
682 | .c_init = init_amd, | 695 | .c_init = init_amd, |
683 | .c_x86_vendor = X86_VENDOR_AMD, | 696 | .c_x86_vendor = X86_VENDOR_AMD, |
684 | }; | 697 | }; |
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c index 62184390a601..ec63df54d138 100644 --- a/arch/x86/kernel/cpu/common.c +++ b/arch/x86/kernel/cpu/common.c | |||
@@ -681,6 +681,9 @@ static void __init early_identify_cpu(struct cpuinfo_x86 *c) | |||
681 | filter_cpuid_features(c, false); | 681 | filter_cpuid_features(c, false); |
682 | 682 | ||
683 | setup_smep(c); | 683 | setup_smep(c); |
684 | |||
685 | if (this_cpu->c_bsp_init) | ||
686 | this_cpu->c_bsp_init(c); | ||
684 | } | 687 | } |
685 | 688 | ||
686 | void __init early_cpu_init(void) | 689 | void __init early_cpu_init(void) |
diff --git a/arch/x86/kernel/cpu/cpu.h b/arch/x86/kernel/cpu/cpu.h index e765633f210e..1b22dcc51af4 100644 --- a/arch/x86/kernel/cpu/cpu.h +++ b/arch/x86/kernel/cpu/cpu.h | |||
@@ -18,6 +18,7 @@ struct cpu_dev { | |||
18 | struct cpu_model_info c_models[4]; | 18 | struct cpu_model_info c_models[4]; |
19 | 19 | ||
20 | void (*c_early_init)(struct cpuinfo_x86 *); | 20 | void (*c_early_init)(struct cpuinfo_x86 *); |
21 | void (*c_bsp_init)(struct cpuinfo_x86 *); | ||
21 | void (*c_init)(struct cpuinfo_x86 *); | 22 | void (*c_init)(struct cpuinfo_x86 *); |
22 | void (*c_identify)(struct cpuinfo_x86 *); | 23 | void (*c_identify)(struct cpuinfo_x86 *); |
23 | unsigned int (*c_size_cache)(struct cpuinfo_x86 *, unsigned int); | 24 | unsigned int (*c_size_cache)(struct cpuinfo_x86 *, unsigned int); |
diff --git a/arch/x86/kernel/cpu/intel_cacheinfo.c b/arch/x86/kernel/cpu/intel_cacheinfo.c index c105c533ed94..a3b0811693c9 100644 --- a/arch/x86/kernel/cpu/intel_cacheinfo.c +++ b/arch/x86/kernel/cpu/intel_cacheinfo.c | |||
@@ -151,28 +151,17 @@ union _cpuid4_leaf_ecx { | |||
151 | u32 full; | 151 | u32 full; |
152 | }; | 152 | }; |
153 | 153 | ||
154 | struct amd_l3_cache { | 154 | struct _cpuid4_info_regs { |
155 | struct amd_northbridge *nb; | ||
156 | unsigned indices; | ||
157 | u8 subcaches[4]; | ||
158 | }; | ||
159 | |||
160 | struct _cpuid4_info { | ||
161 | union _cpuid4_leaf_eax eax; | 155 | union _cpuid4_leaf_eax eax; |
162 | union _cpuid4_leaf_ebx ebx; | 156 | union _cpuid4_leaf_ebx ebx; |
163 | union _cpuid4_leaf_ecx ecx; | 157 | union _cpuid4_leaf_ecx ecx; |
164 | unsigned long size; | 158 | unsigned long size; |
165 | struct amd_l3_cache *l3; | 159 | struct amd_northbridge *nb; |
166 | DECLARE_BITMAP(shared_cpu_map, NR_CPUS); | ||
167 | }; | 160 | }; |
168 | 161 | ||
169 | /* subset of above _cpuid4_info w/o shared_cpu_map */ | 162 | struct _cpuid4_info { |
170 | struct _cpuid4_info_regs { | 163 | struct _cpuid4_info_regs base; |
171 | union _cpuid4_leaf_eax eax; | 164 | DECLARE_BITMAP(shared_cpu_map, NR_CPUS); |
172 | union _cpuid4_leaf_ebx ebx; | ||
173 | union _cpuid4_leaf_ecx ecx; | ||
174 | unsigned long size; | ||
175 | struct amd_l3_cache *l3; | ||
176 | }; | 165 | }; |
177 | 166 | ||
178 | unsigned short num_cache_leaves; | 167 | unsigned short num_cache_leaves; |
@@ -314,16 +303,23 @@ struct _cache_attr { | |||
314 | /* | 303 | /* |
315 | * L3 cache descriptors | 304 | * L3 cache descriptors |
316 | */ | 305 | */ |
317 | static void __cpuinit amd_calc_l3_indices(struct amd_l3_cache *l3) | 306 | static void __cpuinit amd_calc_l3_indices(struct amd_northbridge *nb) |
318 | { | 307 | { |
308 | struct amd_l3_cache *l3 = &nb->l3_cache; | ||
319 | unsigned int sc0, sc1, sc2, sc3; | 309 | unsigned int sc0, sc1, sc2, sc3; |
320 | u32 val = 0; | 310 | u32 val = 0; |
321 | 311 | ||
322 | pci_read_config_dword(l3->nb->misc, 0x1C4, &val); | 312 | pci_read_config_dword(nb->misc, 0x1C4, &val); |
323 | 313 | ||
324 | /* calculate subcache sizes */ | 314 | /* calculate subcache sizes */ |
325 | l3->subcaches[0] = sc0 = !(val & BIT(0)); | 315 | l3->subcaches[0] = sc0 = !(val & BIT(0)); |
326 | l3->subcaches[1] = sc1 = !(val & BIT(4)); | 316 | l3->subcaches[1] = sc1 = !(val & BIT(4)); |
317 | |||
318 | if (boot_cpu_data.x86 == 0x15) { | ||
319 | l3->subcaches[0] = sc0 += !(val & BIT(1)); | ||
320 | l3->subcaches[1] = sc1 += !(val & BIT(5)); | ||
321 | } | ||
322 | |||
327 | l3->subcaches[2] = sc2 = !(val & BIT(8)) + !(val & BIT(9)); | 323 | l3->subcaches[2] = sc2 = !(val & BIT(8)) + !(val & BIT(9)); |
328 | l3->subcaches[3] = sc3 = !(val & BIT(12)) + !(val & BIT(13)); | 324 | l3->subcaches[3] = sc3 = !(val & BIT(12)) + !(val & BIT(13)); |
329 | 325 | ||
@@ -333,33 +329,16 @@ static void __cpuinit amd_calc_l3_indices(struct amd_l3_cache *l3) | |||
333 | static void __cpuinit amd_init_l3_cache(struct _cpuid4_info_regs *this_leaf, | 329 | static void __cpuinit amd_init_l3_cache(struct _cpuid4_info_regs *this_leaf, |
334 | int index) | 330 | int index) |
335 | { | 331 | { |
336 | static struct amd_l3_cache *__cpuinitdata l3_caches; | ||
337 | int node; | 332 | int node; |
338 | 333 | ||
339 | /* only for L3, and not in virtualized environments */ | 334 | /* only for L3, and not in virtualized environments */ |
340 | if (index < 3 || amd_nb_num() == 0) | 335 | if (index < 3) |
341 | return; | 336 | return; |
342 | 337 | ||
343 | /* | ||
344 | * Strictly speaking, the amount in @size below is leaked since it is | ||
345 | * never freed but this is done only on shutdown so it doesn't matter. | ||
346 | */ | ||
347 | if (!l3_caches) { | ||
348 | int size = amd_nb_num() * sizeof(struct amd_l3_cache); | ||
349 | |||
350 | l3_caches = kzalloc(size, GFP_ATOMIC); | ||
351 | if (!l3_caches) | ||
352 | return; | ||
353 | } | ||
354 | |||
355 | node = amd_get_nb_id(smp_processor_id()); | 338 | node = amd_get_nb_id(smp_processor_id()); |
356 | 339 | this_leaf->nb = node_to_amd_nb(node); | |
357 | if (!l3_caches[node].nb) { | 340 | if (this_leaf->nb && !this_leaf->nb->l3_cache.indices) |
358 | l3_caches[node].nb = node_to_amd_nb(node); | 341 | amd_calc_l3_indices(this_leaf->nb); |
359 | amd_calc_l3_indices(&l3_caches[node]); | ||
360 | } | ||
361 | |||
362 | this_leaf->l3 = &l3_caches[node]; | ||
363 | } | 342 | } |
364 | 343 | ||
365 | /* | 344 | /* |
@@ -369,11 +348,11 @@ static void __cpuinit amd_init_l3_cache(struct _cpuid4_info_regs *this_leaf, | |||
369 | * | 348 | * |
370 | * @returns: the disabled index if used or negative value if slot free. | 349 | * @returns: the disabled index if used or negative value if slot free. |
371 | */ | 350 | */ |
372 | int amd_get_l3_disable_slot(struct amd_l3_cache *l3, unsigned slot) | 351 | int amd_get_l3_disable_slot(struct amd_northbridge *nb, unsigned slot) |
373 | { | 352 | { |
374 | unsigned int reg = 0; | 353 | unsigned int reg = 0; |
375 | 354 | ||
376 | pci_read_config_dword(l3->nb->misc, 0x1BC + slot * 4, ®); | 355 | pci_read_config_dword(nb->misc, 0x1BC + slot * 4, ®); |
377 | 356 | ||
378 | /* check whether this slot is activated already */ | 357 | /* check whether this slot is activated already */ |
379 | if (reg & (3UL << 30)) | 358 | if (reg & (3UL << 30)) |
@@ -387,11 +366,10 @@ static ssize_t show_cache_disable(struct _cpuid4_info *this_leaf, char *buf, | |||
387 | { | 366 | { |
388 | int index; | 367 | int index; |
389 | 368 | ||
390 | if (!this_leaf->l3 || | 369 | if (!this_leaf->base.nb || !amd_nb_has_feature(AMD_NB_L3_INDEX_DISABLE)) |
391 | !amd_nb_has_feature(AMD_NB_L3_INDEX_DISABLE)) | ||
392 | return -EINVAL; | 370 | return -EINVAL; |
393 | 371 | ||
394 | index = amd_get_l3_disable_slot(this_leaf->l3, slot); | 372 | index = amd_get_l3_disable_slot(this_leaf->base.nb, slot); |
395 | if (index >= 0) | 373 | if (index >= 0) |
396 | return sprintf(buf, "%d\n", index); | 374 | return sprintf(buf, "%d\n", index); |
397 | 375 | ||
@@ -408,7 +386,7 @@ show_cache_disable_##slot(struct _cpuid4_info *this_leaf, char *buf, \ | |||
408 | SHOW_CACHE_DISABLE(0) | 386 | SHOW_CACHE_DISABLE(0) |
409 | SHOW_CACHE_DISABLE(1) | 387 | SHOW_CACHE_DISABLE(1) |
410 | 388 | ||
411 | static void amd_l3_disable_index(struct amd_l3_cache *l3, int cpu, | 389 | static void amd_l3_disable_index(struct amd_northbridge *nb, int cpu, |
412 | unsigned slot, unsigned long idx) | 390 | unsigned slot, unsigned long idx) |
413 | { | 391 | { |
414 | int i; | 392 | int i; |
@@ -421,10 +399,10 @@ static void amd_l3_disable_index(struct amd_l3_cache *l3, int cpu, | |||
421 | for (i = 0; i < 4; i++) { | 399 | for (i = 0; i < 4; i++) { |
422 | u32 reg = idx | (i << 20); | 400 | u32 reg = idx | (i << 20); |
423 | 401 | ||
424 | if (!l3->subcaches[i]) | 402 | if (!nb->l3_cache.subcaches[i]) |
425 | continue; | 403 | continue; |
426 | 404 | ||
427 | pci_write_config_dword(l3->nb->misc, 0x1BC + slot * 4, reg); | 405 | pci_write_config_dword(nb->misc, 0x1BC + slot * 4, reg); |
428 | 406 | ||
429 | /* | 407 | /* |
430 | * We need to WBINVD on a core on the node containing the L3 | 408 | * We need to WBINVD on a core on the node containing the L3 |
@@ -434,7 +412,7 @@ static void amd_l3_disable_index(struct amd_l3_cache *l3, int cpu, | |||
434 | wbinvd_on_cpu(cpu); | 412 | wbinvd_on_cpu(cpu); |
435 | 413 | ||
436 | reg |= BIT(31); | 414 | reg |= BIT(31); |
437 | pci_write_config_dword(l3->nb->misc, 0x1BC + slot * 4, reg); | 415 | pci_write_config_dword(nb->misc, 0x1BC + slot * 4, reg); |
438 | } | 416 | } |
439 | } | 417 | } |
440 | 418 | ||
@@ -448,24 +426,24 @@ static void amd_l3_disable_index(struct amd_l3_cache *l3, int cpu, | |||
448 | * | 426 | * |
449 | * @return: 0 on success, error status on failure | 427 | * @return: 0 on success, error status on failure |
450 | */ | 428 | */ |
451 | int amd_set_l3_disable_slot(struct amd_l3_cache *l3, int cpu, unsigned slot, | 429 | int amd_set_l3_disable_slot(struct amd_northbridge *nb, int cpu, unsigned slot, |
452 | unsigned long index) | 430 | unsigned long index) |
453 | { | 431 | { |
454 | int ret = 0; | 432 | int ret = 0; |
455 | 433 | ||
456 | /* check if @slot is already used or the index is already disabled */ | 434 | /* check if @slot is already used or the index is already disabled */ |
457 | ret = amd_get_l3_disable_slot(l3, slot); | 435 | ret = amd_get_l3_disable_slot(nb, slot); |
458 | if (ret >= 0) | 436 | if (ret >= 0) |
459 | return -EINVAL; | 437 | return -EINVAL; |
460 | 438 | ||
461 | if (index > l3->indices) | 439 | if (index > nb->l3_cache.indices) |
462 | return -EINVAL; | 440 | return -EINVAL; |
463 | 441 | ||
464 | /* check whether the other slot has disabled the same index already */ | 442 | /* check whether the other slot has disabled the same index already */ |
465 | if (index == amd_get_l3_disable_slot(l3, !slot)) | 443 | if (index == amd_get_l3_disable_slot(nb, !slot)) |
466 | return -EINVAL; | 444 | return -EINVAL; |
467 | 445 | ||
468 | amd_l3_disable_index(l3, cpu, slot, index); | 446 | amd_l3_disable_index(nb, cpu, slot, index); |
469 | 447 | ||
470 | return 0; | 448 | return 0; |
471 | } | 449 | } |
@@ -480,8 +458,7 @@ static ssize_t store_cache_disable(struct _cpuid4_info *this_leaf, | |||
480 | if (!capable(CAP_SYS_ADMIN)) | 458 | if (!capable(CAP_SYS_ADMIN)) |
481 | return -EPERM; | 459 | return -EPERM; |
482 | 460 | ||
483 | if (!this_leaf->l3 || | 461 | if (!this_leaf->base.nb || !amd_nb_has_feature(AMD_NB_L3_INDEX_DISABLE)) |
484 | !amd_nb_has_feature(AMD_NB_L3_INDEX_DISABLE)) | ||
485 | return -EINVAL; | 462 | return -EINVAL; |
486 | 463 | ||
487 | cpu = cpumask_first(to_cpumask(this_leaf->shared_cpu_map)); | 464 | cpu = cpumask_first(to_cpumask(this_leaf->shared_cpu_map)); |
@@ -489,7 +466,7 @@ static ssize_t store_cache_disable(struct _cpuid4_info *this_leaf, | |||
489 | if (strict_strtoul(buf, 10, &val) < 0) | 466 | if (strict_strtoul(buf, 10, &val) < 0) |
490 | return -EINVAL; | 467 | return -EINVAL; |
491 | 468 | ||
492 | err = amd_set_l3_disable_slot(this_leaf->l3, cpu, slot, val); | 469 | err = amd_set_l3_disable_slot(this_leaf->base.nb, cpu, slot, val); |
493 | if (err) { | 470 | if (err) { |
494 | if (err == -EEXIST) | 471 | if (err == -EEXIST) |
495 | printk(KERN_WARNING "L3 disable slot %d in use!\n", | 472 | printk(KERN_WARNING "L3 disable slot %d in use!\n", |
@@ -518,7 +495,7 @@ static struct _cache_attr cache_disable_1 = __ATTR(cache_disable_1, 0644, | |||
518 | static ssize_t | 495 | static ssize_t |
519 | show_subcaches(struct _cpuid4_info *this_leaf, char *buf, unsigned int cpu) | 496 | show_subcaches(struct _cpuid4_info *this_leaf, char *buf, unsigned int cpu) |
520 | { | 497 | { |
521 | if (!this_leaf->l3 || !amd_nb_has_feature(AMD_NB_L3_PARTITIONING)) | 498 | if (!this_leaf->base.nb || !amd_nb_has_feature(AMD_NB_L3_PARTITIONING)) |
522 | return -EINVAL; | 499 | return -EINVAL; |
523 | 500 | ||
524 | return sprintf(buf, "%x\n", amd_get_subcaches(cpu)); | 501 | return sprintf(buf, "%x\n", amd_get_subcaches(cpu)); |
@@ -533,7 +510,7 @@ store_subcaches(struct _cpuid4_info *this_leaf, const char *buf, size_t count, | |||
533 | if (!capable(CAP_SYS_ADMIN)) | 510 | if (!capable(CAP_SYS_ADMIN)) |
534 | return -EPERM; | 511 | return -EPERM; |
535 | 512 | ||
536 | if (!this_leaf->l3 || !amd_nb_has_feature(AMD_NB_L3_PARTITIONING)) | 513 | if (!this_leaf->base.nb || !amd_nb_has_feature(AMD_NB_L3_PARTITIONING)) |
537 | return -EINVAL; | 514 | return -EINVAL; |
538 | 515 | ||
539 | if (strict_strtoul(buf, 16, &val) < 0) | 516 | if (strict_strtoul(buf, 16, &val) < 0) |
@@ -769,7 +746,7 @@ static void __cpuinit cache_shared_cpu_map_setup(unsigned int cpu, int index) | |||
769 | return; | 746 | return; |
770 | } | 747 | } |
771 | this_leaf = CPUID4_INFO_IDX(cpu, index); | 748 | this_leaf = CPUID4_INFO_IDX(cpu, index); |
772 | num_threads_sharing = 1 + this_leaf->eax.split.num_threads_sharing; | 749 | num_threads_sharing = 1 + this_leaf->base.eax.split.num_threads_sharing; |
773 | 750 | ||
774 | if (num_threads_sharing == 1) | 751 | if (num_threads_sharing == 1) |
775 | cpumask_set_cpu(cpu, to_cpumask(this_leaf->shared_cpu_map)); | 752 | cpumask_set_cpu(cpu, to_cpumask(this_leaf->shared_cpu_map)); |
@@ -820,29 +797,19 @@ static void __cpuinit free_cache_attributes(unsigned int cpu) | |||
820 | for (i = 0; i < num_cache_leaves; i++) | 797 | for (i = 0; i < num_cache_leaves; i++) |
821 | cache_remove_shared_cpu_map(cpu, i); | 798 | cache_remove_shared_cpu_map(cpu, i); |
822 | 799 | ||
823 | kfree(per_cpu(ici_cpuid4_info, cpu)->l3); | ||
824 | kfree(per_cpu(ici_cpuid4_info, cpu)); | 800 | kfree(per_cpu(ici_cpuid4_info, cpu)); |
825 | per_cpu(ici_cpuid4_info, cpu) = NULL; | 801 | per_cpu(ici_cpuid4_info, cpu) = NULL; |
826 | } | 802 | } |
827 | 803 | ||
828 | static int | ||
829 | __cpuinit cpuid4_cache_lookup(int index, struct _cpuid4_info *this_leaf) | ||
830 | { | ||
831 | struct _cpuid4_info_regs *leaf_regs = | ||
832 | (struct _cpuid4_info_regs *)this_leaf; | ||
833 | |||
834 | return cpuid4_cache_lookup_regs(index, leaf_regs); | ||
835 | } | ||
836 | |||
837 | static void __cpuinit get_cpu_leaves(void *_retval) | 804 | static void __cpuinit get_cpu_leaves(void *_retval) |
838 | { | 805 | { |
839 | int j, *retval = _retval, cpu = smp_processor_id(); | 806 | int j, *retval = _retval, cpu = smp_processor_id(); |
840 | 807 | ||
841 | /* Do cpuid and store the results */ | 808 | /* Do cpuid and store the results */ |
842 | for (j = 0; j < num_cache_leaves; j++) { | 809 | for (j = 0; j < num_cache_leaves; j++) { |
843 | struct _cpuid4_info *this_leaf; | 810 | struct _cpuid4_info *this_leaf = CPUID4_INFO_IDX(cpu, j); |
844 | this_leaf = CPUID4_INFO_IDX(cpu, j); | 811 | |
845 | *retval = cpuid4_cache_lookup(j, this_leaf); | 812 | *retval = cpuid4_cache_lookup_regs(j, &this_leaf->base); |
846 | if (unlikely(*retval < 0)) { | 813 | if (unlikely(*retval < 0)) { |
847 | int i; | 814 | int i; |
848 | 815 | ||
@@ -900,16 +867,16 @@ static ssize_t show_##file_name(struct _cpuid4_info *this_leaf, char *buf, \ | |||
900 | return sprintf(buf, "%lu\n", (unsigned long)this_leaf->object + val); \ | 867 | return sprintf(buf, "%lu\n", (unsigned long)this_leaf->object + val); \ |
901 | } | 868 | } |
902 | 869 | ||
903 | show_one_plus(level, eax.split.level, 0); | 870 | show_one_plus(level, base.eax.split.level, 0); |
904 | show_one_plus(coherency_line_size, ebx.split.coherency_line_size, 1); | 871 | show_one_plus(coherency_line_size, base.ebx.split.coherency_line_size, 1); |
905 | show_one_plus(physical_line_partition, ebx.split.physical_line_partition, 1); | 872 | show_one_plus(physical_line_partition, base.ebx.split.physical_line_partition, 1); |
906 | show_one_plus(ways_of_associativity, ebx.split.ways_of_associativity, 1); | 873 | show_one_plus(ways_of_associativity, base.ebx.split.ways_of_associativity, 1); |
907 | show_one_plus(number_of_sets, ecx.split.number_of_sets, 1); | 874 | show_one_plus(number_of_sets, base.ecx.split.number_of_sets, 1); |
908 | 875 | ||
909 | static ssize_t show_size(struct _cpuid4_info *this_leaf, char *buf, | 876 | static ssize_t show_size(struct _cpuid4_info *this_leaf, char *buf, |
910 | unsigned int cpu) | 877 | unsigned int cpu) |
911 | { | 878 | { |
912 | return sprintf(buf, "%luK\n", this_leaf->size / 1024); | 879 | return sprintf(buf, "%luK\n", this_leaf->base.size / 1024); |
913 | } | 880 | } |
914 | 881 | ||
915 | static ssize_t show_shared_cpu_map_func(struct _cpuid4_info *this_leaf, | 882 | static ssize_t show_shared_cpu_map_func(struct _cpuid4_info *this_leaf, |
@@ -946,7 +913,7 @@ static inline ssize_t show_shared_cpu_list(struct _cpuid4_info *leaf, char *buf, | |||
946 | static ssize_t show_type(struct _cpuid4_info *this_leaf, char *buf, | 913 | static ssize_t show_type(struct _cpuid4_info *this_leaf, char *buf, |
947 | unsigned int cpu) | 914 | unsigned int cpu) |
948 | { | 915 | { |
949 | switch (this_leaf->eax.split.type) { | 916 | switch (this_leaf->base.eax.split.type) { |
950 | case CACHE_TYPE_DATA: | 917 | case CACHE_TYPE_DATA: |
951 | return sprintf(buf, "Data\n"); | 918 | return sprintf(buf, "Data\n"); |
952 | case CACHE_TYPE_INST: | 919 | case CACHE_TYPE_INST: |
@@ -1135,7 +1102,7 @@ static int __cpuinit cache_add_dev(struct sys_device * sys_dev) | |||
1135 | 1102 | ||
1136 | ktype_cache.default_attrs = default_attrs; | 1103 | ktype_cache.default_attrs = default_attrs; |
1137 | #ifdef CONFIG_AMD_NB | 1104 | #ifdef CONFIG_AMD_NB |
1138 | if (this_leaf->l3) | 1105 | if (this_leaf->base.nb) |
1139 | ktype_cache.default_attrs = amd_l3_attrs(); | 1106 | ktype_cache.default_attrs = amd_l3_attrs(); |
1140 | #endif | 1107 | #endif |
1141 | retval = kobject_init_and_add(&(this_object->kobj), | 1108 | retval = kobject_init_and_add(&(this_object->kobj), |