diff options
author | Borislav Petkov <borislav.petkov@amd.com> | 2010-04-22 10:07:00 -0400 |
---|---|---|
committer | H. Peter Anvin <hpa@zytor.com> | 2010-04-22 20:17:23 -0400 |
commit | 9350f982e4fe539e83a2d4a13e9b53ad8253c4a8 (patch) | |
tree | 4ee595d0ba964446b2b06c026cbc6964e3ce2cdc /arch | |
parent | f2b20e41407fccfcfacf927ff91ec888832a37af (diff) |
x86, cacheinfo: Reorganize AMD L3 cache structure
Add a struct representing L3 cache attributes (subcache sizes and
indices count) and move the respective members out of _cpuid4_info.
Also, stash the struct pci_dev ptr into the struct simplifying the code
even more.
There should be no functionality change resulting from this patch except
slightly slimming the _cpuid4_info per-cpu vars.
Signed-off-by: Borislav Petkov <borislav.petkov@amd.com>
LKML-Reference: <1271945222-5283-4-git-send-email-bp@amd64.org>
Signed-off-by: H. Peter Anvin <hpa@zytor.com>
Diffstat (limited to 'arch')
-rw-r--r-- | arch/x86/kernel/cpu/intel_cacheinfo.c | 53 |
1 files changed, 32 insertions, 21 deletions
diff --git a/arch/x86/kernel/cpu/intel_cacheinfo.c b/arch/x86/kernel/cpu/intel_cacheinfo.c index 5ab14c86c6ec..ff663ca63fdc 100644 --- a/arch/x86/kernel/cpu/intel_cacheinfo.c +++ b/arch/x86/kernel/cpu/intel_cacheinfo.c | |||
@@ -148,13 +148,19 @@ union _cpuid4_leaf_ecx { | |||
148 | u32 full; | 148 | u32 full; |
149 | }; | 149 | }; |
150 | 150 | ||
151 | struct amd_l3_cache { | ||
152 | struct pci_dev *dev; | ||
153 | bool can_disable; | ||
154 | unsigned indices; | ||
155 | u8 subcaches[4]; | ||
156 | }; | ||
157 | |||
151 | struct _cpuid4_info { | 158 | struct _cpuid4_info { |
152 | union _cpuid4_leaf_eax eax; | 159 | union _cpuid4_leaf_eax eax; |
153 | union _cpuid4_leaf_ebx ebx; | 160 | union _cpuid4_leaf_ebx ebx; |
154 | union _cpuid4_leaf_ecx ecx; | 161 | union _cpuid4_leaf_ecx ecx; |
155 | unsigned long size; | 162 | unsigned long size; |
156 | bool can_disable; | 163 | struct amd_l3_cache *l3; |
157 | unsigned int l3_indices; | ||
158 | DECLARE_BITMAP(shared_cpu_map, NR_CPUS); | 164 | DECLARE_BITMAP(shared_cpu_map, NR_CPUS); |
159 | }; | 165 | }; |
160 | 166 | ||
@@ -164,8 +170,7 @@ struct _cpuid4_info_regs { | |||
164 | union _cpuid4_leaf_ebx ebx; | 170 | union _cpuid4_leaf_ebx ebx; |
165 | union _cpuid4_leaf_ecx ecx; | 171 | union _cpuid4_leaf_ecx ecx; |
166 | unsigned long size; | 172 | unsigned long size; |
167 | bool can_disable; | 173 | struct amd_l3_cache *l3; |
168 | unsigned int l3_indices; | ||
169 | }; | 174 | }; |
170 | 175 | ||
171 | unsigned short num_cache_leaves; | 176 | unsigned short num_cache_leaves; |
@@ -302,7 +307,7 @@ struct _cache_attr { | |||
302 | }; | 307 | }; |
303 | 308 | ||
304 | #ifdef CONFIG_CPU_SUP_AMD | 309 | #ifdef CONFIG_CPU_SUP_AMD |
305 | static unsigned int __cpuinit amd_calc_l3_indices(void) | 310 | static void __cpuinit amd_calc_l3_indices(struct amd_l3_cache *l3) |
306 | { | 311 | { |
307 | /* | 312 | /* |
308 | * We're called over smp_call_function_single() and therefore | 313 | * We're called over smp_call_function_single() and therefore |
@@ -317,12 +322,14 @@ static unsigned int __cpuinit amd_calc_l3_indices(void) | |||
317 | pci_read_config_dword(dev, 0x1C4, &val); | 322 | pci_read_config_dword(dev, 0x1C4, &val); |
318 | 323 | ||
319 | /* calculate subcache sizes */ | 324 | /* calculate subcache sizes */ |
320 | sc0 = !(val & BIT(0)); | 325 | l3->subcaches[0] = sc0 = !(val & BIT(0)); |
321 | sc1 = !(val & BIT(4)); | 326 | l3->subcaches[1] = sc1 = !(val & BIT(4)); |
322 | sc2 = !(val & BIT(8)) + !(val & BIT(9)); | 327 | l3->subcaches[2] = sc2 = !(val & BIT(8)) + !(val & BIT(9)); |
323 | sc3 = !(val & BIT(12)) + !(val & BIT(13)); | 328 | l3->subcaches[3] = sc3 = !(val & BIT(12)) + !(val & BIT(13)); |
329 | |||
330 | l3->indices = (max(max(max(sc0, sc1), sc2), sc3) << 10) - 1; | ||
324 | 331 | ||
325 | return (max(max(max(sc0, sc1), sc2), sc3) << 10) - 1; | 332 | l3->dev = dev; |
326 | } | 333 | } |
327 | 334 | ||
328 | static void __cpuinit | 335 | static void __cpuinit |
@@ -348,19 +355,23 @@ amd_check_l3_disable(int index, struct _cpuid4_info_regs *this_leaf) | |||
348 | if (num_k8_northbridges == 0) | 355 | if (num_k8_northbridges == 0) |
349 | return; | 356 | return; |
350 | 357 | ||
351 | this_leaf->can_disable = true; | 358 | this_leaf->l3 = kzalloc(sizeof(struct amd_l3_cache), GFP_ATOMIC); |
352 | this_leaf->l3_indices = amd_calc_l3_indices(); | 359 | if (!this_leaf->l3) { |
360 | printk(KERN_WARNING "Error allocating L3 struct\n"); | ||
361 | return; | ||
362 | } | ||
363 | |||
364 | this_leaf->l3->can_disable = true; | ||
365 | amd_calc_l3_indices(this_leaf->l3); | ||
353 | } | 366 | } |
354 | 367 | ||
355 | static ssize_t show_cache_disable(struct _cpuid4_info *this_leaf, char *buf, | 368 | static ssize_t show_cache_disable(struct _cpuid4_info *this_leaf, char *buf, |
356 | unsigned int index) | 369 | unsigned int index) |
357 | { | 370 | { |
358 | int cpu = cpumask_first(to_cpumask(this_leaf->shared_cpu_map)); | 371 | struct pci_dev *dev = this_leaf->l3->dev; |
359 | int node = amd_get_nb_id(cpu); | ||
360 | struct pci_dev *dev = node_to_k8_nb_misc(node); | ||
361 | unsigned int reg = 0; | 372 | unsigned int reg = 0; |
362 | 373 | ||
363 | if (!this_leaf->can_disable) | 374 | if (!this_leaf->l3 || !this_leaf->l3->can_disable) |
364 | return -EINVAL; | 375 | return -EINVAL; |
365 | 376 | ||
366 | if (!dev) | 377 | if (!dev) |
@@ -382,15 +393,14 @@ SHOW_CACHE_DISABLE(1) | |||
382 | static ssize_t store_cache_disable(struct _cpuid4_info *this_leaf, | 393 | static ssize_t store_cache_disable(struct _cpuid4_info *this_leaf, |
383 | const char *buf, size_t count, unsigned int index) | 394 | const char *buf, size_t count, unsigned int index) |
384 | { | 395 | { |
396 | struct pci_dev *dev = this_leaf->l3->dev; | ||
385 | int cpu = cpumask_first(to_cpumask(this_leaf->shared_cpu_map)); | 397 | int cpu = cpumask_first(to_cpumask(this_leaf->shared_cpu_map)); |
386 | int node = amd_get_nb_id(cpu); | ||
387 | struct pci_dev *dev = node_to_k8_nb_misc(node); | ||
388 | unsigned long val = 0; | 398 | unsigned long val = 0; |
389 | 399 | ||
390 | #define SUBCACHE_MASK (3UL << 20) | 400 | #define SUBCACHE_MASK (3UL << 20) |
391 | #define SUBCACHE_INDEX 0xfff | 401 | #define SUBCACHE_INDEX 0xfff |
392 | 402 | ||
393 | if (!this_leaf->can_disable) | 403 | if (!this_leaf->l3 || !this_leaf->l3->can_disable) |
394 | return -EINVAL; | 404 | return -EINVAL; |
395 | 405 | ||
396 | if (!capable(CAP_SYS_ADMIN)) | 406 | if (!capable(CAP_SYS_ADMIN)) |
@@ -404,7 +414,7 @@ static ssize_t store_cache_disable(struct _cpuid4_info *this_leaf, | |||
404 | 414 | ||
405 | /* do not allow writes outside of allowed bits */ | 415 | /* do not allow writes outside of allowed bits */ |
406 | if ((val & ~(SUBCACHE_MASK | SUBCACHE_INDEX)) || | 416 | if ((val & ~(SUBCACHE_MASK | SUBCACHE_INDEX)) || |
407 | ((val & SUBCACHE_INDEX) > this_leaf->l3_indices)) | 417 | ((val & SUBCACHE_INDEX) > this_leaf->l3->indices)) |
408 | return -EINVAL; | 418 | return -EINVAL; |
409 | 419 | ||
410 | val |= BIT(30); | 420 | val |= BIT(30); |
@@ -708,6 +718,7 @@ static void __cpuinit free_cache_attributes(unsigned int cpu) | |||
708 | for (i = 0; i < num_cache_leaves; i++) | 718 | for (i = 0; i < num_cache_leaves; i++) |
709 | cache_remove_shared_cpu_map(cpu, i); | 719 | cache_remove_shared_cpu_map(cpu, i); |
710 | 720 | ||
721 | kfree(per_cpu(ici_cpuid4_info, cpu)->l3); | ||
711 | kfree(per_cpu(ici_cpuid4_info, cpu)); | 722 | kfree(per_cpu(ici_cpuid4_info, cpu)); |
712 | per_cpu(ici_cpuid4_info, cpu) = NULL; | 723 | per_cpu(ici_cpuid4_info, cpu) = NULL; |
713 | } | 724 | } |
@@ -992,7 +1003,7 @@ static int __cpuinit cache_add_dev(struct sys_device * sys_dev) | |||
992 | 1003 | ||
993 | this_leaf = CPUID4_INFO_IDX(cpu, i); | 1004 | this_leaf = CPUID4_INFO_IDX(cpu, i); |
994 | 1005 | ||
995 | if (this_leaf->can_disable) | 1006 | if (this_leaf->l3 && this_leaf->l3->can_disable) |
996 | ktype_cache.default_attrs = default_l3_attrs; | 1007 | ktype_cache.default_attrs = default_l3_attrs; |
997 | else | 1008 | else |
998 | ktype_cache.default_attrs = default_attrs; | 1009 | ktype_cache.default_attrs = default_attrs; |