diff options
author | Borislav Petkov <borislav.petkov@amd.com> | 2010-04-22 10:07:02 -0400 |
---|---|---|
committer | H. Peter Anvin <hpa@zytor.com> | 2010-04-22 20:17:27 -0400 |
commit | 59d3b388741cf1a5eb7ad27fd4e9ed72643164ae (patch) | |
tree | a1c4de47be3b0eb272ab08e661022e911166aa75 /arch/x86 | |
parent | ba06edb63f5ef2913aad37070eaec3c9d8ac73b8 (diff) |
x86, cacheinfo: Disable index in all four subcaches
When disabling an L3 cache index, make sure we disable that index in
all four subcaches of the L3. Clarify nomenclature while at it, wrt to
disable slots versus disable index and rename accordingly.
Signed-off-by: Borislav Petkov <borislav.petkov@amd.com>
LKML-Reference: <1271945222-5283-6-git-send-email-bp@amd64.org>
Signed-off-by: H. Peter Anvin <hpa@zytor.com>
Diffstat (limited to 'arch/x86')
-rw-r--r-- | arch/x86/kernel/cpu/intel_cacheinfo.c | 60 |
1 files changed, 43 insertions, 17 deletions
diff --git a/arch/x86/kernel/cpu/intel_cacheinfo.c b/arch/x86/kernel/cpu/intel_cacheinfo.c index 1346e9c23fc4..33eae2062cf5 100644 --- a/arch/x86/kernel/cpu/intel_cacheinfo.c +++ b/arch/x86/kernel/cpu/intel_cacheinfo.c | |||
@@ -397,7 +397,7 @@ amd_check_l3_disable(int index, struct _cpuid4_info_regs *this_leaf) | |||
397 | } | 397 | } |
398 | 398 | ||
399 | static ssize_t show_cache_disable(struct _cpuid4_info *this_leaf, char *buf, | 399 | static ssize_t show_cache_disable(struct _cpuid4_info *this_leaf, char *buf, |
400 | unsigned int index) | 400 | unsigned int slot) |
401 | { | 401 | { |
402 | struct pci_dev *dev = this_leaf->l3->dev; | 402 | struct pci_dev *dev = this_leaf->l3->dev; |
403 | unsigned int reg = 0; | 403 | unsigned int reg = 0; |
@@ -408,21 +408,53 @@ static ssize_t show_cache_disable(struct _cpuid4_info *this_leaf, char *buf, | |||
408 | if (!dev) | 408 | if (!dev) |
409 | return -EINVAL; | 409 | return -EINVAL; |
410 | 410 | ||
411 | pci_read_config_dword(dev, 0x1BC + index * 4, ®); | 411 | pci_read_config_dword(dev, 0x1BC + slot * 4, ®); |
412 | return sprintf(buf, "0x%08x\n", reg); | 412 | return sprintf(buf, "0x%08x\n", reg); |
413 | } | 413 | } |
414 | 414 | ||
415 | #define SHOW_CACHE_DISABLE(index) \ | 415 | #define SHOW_CACHE_DISABLE(slot) \ |
416 | static ssize_t \ | 416 | static ssize_t \ |
417 | show_cache_disable_##index(struct _cpuid4_info *this_leaf, char *buf) \ | 417 | show_cache_disable_##slot(struct _cpuid4_info *this_leaf, char *buf) \ |
418 | { \ | 418 | { \ |
419 | return show_cache_disable(this_leaf, buf, index); \ | 419 | return show_cache_disable(this_leaf, buf, slot); \ |
420 | } | 420 | } |
421 | SHOW_CACHE_DISABLE(0) | 421 | SHOW_CACHE_DISABLE(0) |
422 | SHOW_CACHE_DISABLE(1) | 422 | SHOW_CACHE_DISABLE(1) |
423 | 423 | ||
424 | static void amd_l3_disable_index(struct amd_l3_cache *l3, int cpu, | ||
425 | unsigned slot, unsigned long idx) | ||
426 | { | ||
427 | int i; | ||
428 | |||
429 | idx |= BIT(30); | ||
430 | |||
431 | /* | ||
432 | * disable index in all 4 subcaches | ||
433 | */ | ||
434 | for (i = 0; i < 4; i++) { | ||
435 | u32 reg = idx | (i << 20); | ||
436 | |||
437 | if (!l3->subcaches[i]) | ||
438 | continue; | ||
439 | |||
440 | pci_write_config_dword(l3->dev, 0x1BC + slot * 4, reg); | ||
441 | |||
442 | /* | ||
443 | * We need to WBINVD on a core on the node containing the L3 | ||
444 | * cache which indices we disable therefore a simple wbinvd() | ||
445 | * is not sufficient. | ||
446 | */ | ||
447 | wbinvd_on_cpu(cpu); | ||
448 | |||
449 | reg |= BIT(31); | ||
450 | pci_write_config_dword(l3->dev, 0x1BC + slot * 4, reg); | ||
451 | } | ||
452 | } | ||
453 | |||
454 | |||
424 | static ssize_t store_cache_disable(struct _cpuid4_info *this_leaf, | 455 | static ssize_t store_cache_disable(struct _cpuid4_info *this_leaf, |
425 | const char *buf, size_t count, unsigned int index) | 456 | const char *buf, size_t count, |
457 | unsigned int slot) | ||
426 | { | 458 | { |
427 | struct pci_dev *dev = this_leaf->l3->dev; | 459 | struct pci_dev *dev = this_leaf->l3->dev; |
428 | int cpu = cpumask_first(to_cpumask(this_leaf->shared_cpu_map)); | 460 | int cpu = cpumask_first(to_cpumask(this_leaf->shared_cpu_map)); |
@@ -448,23 +480,17 @@ static ssize_t store_cache_disable(struct _cpuid4_info *this_leaf, | |||
448 | ((val & SUBCACHE_INDEX) > this_leaf->l3->indices)) | 480 | ((val & SUBCACHE_INDEX) > this_leaf->l3->indices)) |
449 | return -EINVAL; | 481 | return -EINVAL; |
450 | 482 | ||
451 | val |= BIT(30); | 483 | amd_l3_disable_index(this_leaf->l3, cpu, slot, val); |
452 | pci_write_config_dword(dev, 0x1BC + index * 4, val); | 484 | |
453 | /* | ||
454 | * We need to WBINVD on a core on the node containing the L3 cache which | ||
455 | * indices we disable therefore a simple wbinvd() is not sufficient. | ||
456 | */ | ||
457 | wbinvd_on_cpu(cpu); | ||
458 | pci_write_config_dword(dev, 0x1BC + index * 4, val | BIT(31)); | ||
459 | return count; | 485 | return count; |
460 | } | 486 | } |
461 | 487 | ||
462 | #define STORE_CACHE_DISABLE(index) \ | 488 | #define STORE_CACHE_DISABLE(slot) \ |
463 | static ssize_t \ | 489 | static ssize_t \ |
464 | store_cache_disable_##index(struct _cpuid4_info *this_leaf, \ | 490 | store_cache_disable_##slot(struct _cpuid4_info *this_leaf, \ |
465 | const char *buf, size_t count) \ | 491 | const char *buf, size_t count) \ |
466 | { \ | 492 | { \ |
467 | return store_cache_disable(this_leaf, buf, count, index); \ | 493 | return store_cache_disable(this_leaf, buf, count, slot); \ |
468 | } | 494 | } |
469 | STORE_CACHE_DISABLE(0) | 495 | STORE_CACHE_DISABLE(0) |
470 | STORE_CACHE_DISABLE(1) | 496 | STORE_CACHE_DISABLE(1) |