diff options
author | Borislav Petkov <borislav.petkov@amd.com> | 2010-04-22 10:07:01 -0400 |
---|---|---|
committer | H. Peter Anvin <hpa@zytor.com> | 2010-04-22 20:17:23 -0400 |
commit | ba06edb63f5ef2913aad37070eaec3c9d8ac73b8 (patch) | |
tree | 79605f9dfc645a6be5045e1fb18201befe27fef3 /arch/x86/kernel/cpu | |
parent | 9350f982e4fe539e83a2d4a13e9b53ad8253c4a8 (diff) |
x86, cacheinfo: Make L3 cache info per node
Currently, we're allocating L3 cache info and calculating indices for
each online cpu which is clearly superfluous. Instead, we need to do
this per-node as is each L3 cache.
No functional change, only per-cpu memory savings.
-v2: Allocate L3 cache descriptors array dynamically.
Signed-off-by: Borislav Petkov <borislav.petkov@amd.com>
LKML-Reference: <1271945222-5283-5-git-send-email-bp@amd64.org>
Signed-off-by: H. Peter Anvin <hpa@zytor.com>
Diffstat (limited to 'arch/x86/kernel/cpu')
-rw-r--r-- | arch/x86/kernel/cpu/intel_cacheinfo.c | 59 |
1 files changed, 45 insertions, 14 deletions
diff --git a/arch/x86/kernel/cpu/intel_cacheinfo.c b/arch/x86/kernel/cpu/intel_cacheinfo.c index ff663ca63fdc..1346e9c23fc4 100644 --- a/arch/x86/kernel/cpu/intel_cacheinfo.c +++ b/arch/x86/kernel/cpu/intel_cacheinfo.c | |||
@@ -307,19 +307,18 @@ struct _cache_attr { | |||
307 | }; | 307 | }; |
308 | 308 | ||
309 | #ifdef CONFIG_CPU_SUP_AMD | 309 | #ifdef CONFIG_CPU_SUP_AMD |
310 | |||
311 | /* | ||
312 | * L3 cache descriptors | ||
313 | */ | ||
314 | static struct amd_l3_cache **__cpuinitdata l3_caches; | ||
315 | |||
310 | static void __cpuinit amd_calc_l3_indices(struct amd_l3_cache *l3) | 316 | static void __cpuinit amd_calc_l3_indices(struct amd_l3_cache *l3) |
311 | { | 317 | { |
312 | /* | ||
313 | * We're called over smp_call_function_single() and therefore | ||
314 | * are on the correct cpu. | ||
315 | */ | ||
316 | int cpu = smp_processor_id(); | ||
317 | int node = cpu_to_node(cpu); | ||
318 | struct pci_dev *dev = node_to_k8_nb_misc(node); | ||
319 | unsigned int sc0, sc1, sc2, sc3; | 318 | unsigned int sc0, sc1, sc2, sc3; |
320 | u32 val = 0; | 319 | u32 val = 0; |
321 | 320 | ||
322 | pci_read_config_dword(dev, 0x1C4, &val); | 321 | pci_read_config_dword(l3->dev, 0x1C4, &val); |
323 | 322 | ||
324 | /* calculate subcache sizes */ | 323 | /* calculate subcache sizes */ |
325 | l3->subcaches[0] = sc0 = !(val & BIT(0)); | 324 | l3->subcaches[0] = sc0 = !(val & BIT(0)); |
@@ -328,13 +327,31 @@ static void __cpuinit amd_calc_l3_indices(struct amd_l3_cache *l3) | |||
328 | l3->subcaches[3] = sc3 = !(val & BIT(12)) + !(val & BIT(13)); | 327 | l3->subcaches[3] = sc3 = !(val & BIT(12)) + !(val & BIT(13)); |
329 | 328 | ||
330 | l3->indices = (max(max(max(sc0, sc1), sc2), sc3) << 10) - 1; | 329 | l3->indices = (max(max(max(sc0, sc1), sc2), sc3) << 10) - 1; |
330 | } | ||
331 | |||
332 | static struct amd_l3_cache * __cpuinit amd_init_l3_cache(int node) | ||
333 | { | ||
334 | struct amd_l3_cache *l3; | ||
335 | struct pci_dev *dev = node_to_k8_nb_misc(node); | ||
336 | |||
337 | l3 = kzalloc(sizeof(struct amd_l3_cache), GFP_ATOMIC); | ||
338 | if (!l3) { | ||
339 | printk(KERN_WARNING "Error allocating L3 struct\n"); | ||
340 | return NULL; | ||
341 | } | ||
331 | 342 | ||
332 | l3->dev = dev; | 343 | l3->dev = dev; |
344 | |||
345 | amd_calc_l3_indices(l3); | ||
346 | |||
347 | return l3; | ||
333 | } | 348 | } |
334 | 349 | ||
335 | static void __cpuinit | 350 | static void __cpuinit |
336 | amd_check_l3_disable(int index, struct _cpuid4_info_regs *this_leaf) | 351 | amd_check_l3_disable(int index, struct _cpuid4_info_regs *this_leaf) |
337 | { | 352 | { |
353 | int node; | ||
354 | |||
338 | if (boot_cpu_data.x86 != 0x10) | 355 | if (boot_cpu_data.x86 != 0x10) |
339 | return; | 356 | return; |
340 | 357 | ||
@@ -355,14 +372,28 @@ amd_check_l3_disable(int index, struct _cpuid4_info_regs *this_leaf) | |||
355 | if (num_k8_northbridges == 0) | 372 | if (num_k8_northbridges == 0) |
356 | return; | 373 | return; |
357 | 374 | ||
358 | this_leaf->l3 = kzalloc(sizeof(struct amd_l3_cache), GFP_ATOMIC); | 375 | /* |
359 | if (!this_leaf->l3) { | 376 | * Strictly speaking, the amount in @size below is leaked since it is |
360 | printk(KERN_WARNING "Error allocating L3 struct\n"); | 377 | * never freed but this is done only on shutdown so it doesn't matter. |
361 | return; | 378 | */ |
379 | if (!l3_caches) { | ||
380 | int size = num_k8_northbridges * sizeof(struct amd_l3_cache *); | ||
381 | |||
382 | l3_caches = kzalloc(size, GFP_ATOMIC); | ||
383 | if (!l3_caches) | ||
384 | return; | ||
362 | } | 385 | } |
363 | 386 | ||
364 | this_leaf->l3->can_disable = true; | 387 | node = amd_get_nb_id(smp_processor_id()); |
365 | amd_calc_l3_indices(this_leaf->l3); | 388 | |
389 | if (!l3_caches[node]) { | ||
390 | l3_caches[node] = amd_init_l3_cache(node); | ||
391 | l3_caches[node]->can_disable = true; | ||
392 | } | ||
393 | |||
394 | WARN_ON(!l3_caches[node]); | ||
395 | |||
396 | this_leaf->l3 = l3_caches[node]; | ||
366 | } | 397 | } |
367 | 398 | ||
368 | static ssize_t show_cache_disable(struct _cpuid4_info *this_leaf, char *buf, | 399 | static ssize_t show_cache_disable(struct _cpuid4_info *this_leaf, char *buf, |