diff options
author | Frank Arnold <frank.arnold@amd.com> | 2011-05-16 09:39:47 -0400 |
---|---|---|
committer | H. Peter Anvin <hpa@linux.intel.com> | 2011-05-16 14:24:27 -0400 |
commit | 42be450565b0fc4607fae3e3a7da038d367a23ed (patch) | |
tree | b6b42606ee23185c14414c459ddcd43e9d0777a0 /arch/x86/kernel/cpu/intel_cacheinfo.c | |
parent | 50e7534427283afd997d58481778c07bea79eb63 (diff) |
x86, AMD, cacheinfo: Fix L3 cache index disable checks
We provide two slots to disable cache indices, and have a check to
prevent both slots to be used for the same index.
If the user disables the same index on different subcaches, both slots
will hold the same index, e.g.
$ echo 2047 > /sys/devices/system/cpu/cpu0/cache/index3/cache_disable_0
$ cat /sys/devices/system/cpu/cpu0/cache/index3/cache_disable_0
2047
$ echo 1050623 > /sys/devices/system/cpu/cpu0/cache/index3/cache_disable_1
$ cat /sys/devices/system/cpu/cpu0/cache/index3/cache_disable_1
2047
due to the fact that the check was looking only at index bits [11:0]
and was ignoring writes to bits outside that range. The more correct
fix is to simply check whether the index is within the bounds of
[0..l3->indices].
While at it, cleanup comments and drop now-unused local macros.
Signed-off-by: Frank Arnold <frank.arnold@amd.com>
Link: http://lkml.kernel.org/r/1305553188-21061-3-git-send-email-bp@amd64.org
Signed-off-by: Borislav Petkov <borislav.petkov@amd.com>
Signed-off-by: H. Peter Anvin <hpa@linux.intel.com>
Diffstat (limited to 'arch/x86/kernel/cpu/intel_cacheinfo.c')
-rw-r--r-- | arch/x86/kernel/cpu/intel_cacheinfo.c | 19 |
1 files changed, 4 insertions, 15 deletions
diff --git a/arch/x86/kernel/cpu/intel_cacheinfo.c b/arch/x86/kernel/cpu/intel_cacheinfo.c index 31590a001e2a..c105c533ed94 100644 --- a/arch/x86/kernel/cpu/intel_cacheinfo.c +++ b/arch/x86/kernel/cpu/intel_cacheinfo.c | |||
@@ -453,27 +453,16 @@ int amd_set_l3_disable_slot(struct amd_l3_cache *l3, int cpu, unsigned slot, | |||
453 | { | 453 | { |
454 | int ret = 0; | 454 | int ret = 0; |
455 | 455 | ||
456 | #define SUBCACHE_MASK (3UL << 20) | 456 | /* check if @slot is already used or the index is already disabled */ |
457 | #define SUBCACHE_INDEX 0xfff | ||
458 | |||
459 | /* | ||
460 | * check whether this slot is already used or | ||
461 | * the index is already disabled | ||
462 | */ | ||
463 | ret = amd_get_l3_disable_slot(l3, slot); | 457 | ret = amd_get_l3_disable_slot(l3, slot); |
464 | if (ret >= 0) | 458 | if (ret >= 0) |
465 | return -EINVAL; | 459 | return -EINVAL; |
466 | 460 | ||
467 | /* | 461 | if (index > l3->indices) |
468 | * check whether the other slot has disabled the | ||
469 | * same index already | ||
470 | */ | ||
471 | if (index == amd_get_l3_disable_slot(l3, !slot)) | ||
472 | return -EINVAL; | 462 | return -EINVAL; |
473 | 463 | ||
474 | /* do not allow writes outside of allowed bits */ | 464 | /* check whether the other slot has disabled the same index already */ |
475 | if ((index & ~(SUBCACHE_MASK | SUBCACHE_INDEX)) || | 465 | if (index == amd_get_l3_disable_slot(l3, !slot)) |
476 | ((index & SUBCACHE_INDEX) > l3->indices)) | ||
477 | return -EINVAL; | 466 | return -EINVAL; |
478 | 467 | ||
479 | amd_l3_disable_index(l3, cpu, slot, index); | 468 | amd_l3_disable_index(l3, cpu, slot, index); |