diff options
Diffstat (limited to 'arch/x86/kernel/cpu/intel_cacheinfo.c')
-rw-r--r-- | arch/x86/kernel/cpu/intel_cacheinfo.c | 74 |
1 files changed, 26 insertions, 48 deletions
diff --git a/arch/x86/kernel/cpu/intel_cacheinfo.c b/arch/x86/kernel/cpu/intel_cacheinfo.c index 311322bb712a..951820f4e02b 100644 --- a/arch/x86/kernel/cpu/intel_cacheinfo.c +++ b/arch/x86/kernel/cpu/intel_cacheinfo.c | |||
@@ -151,18 +151,12 @@ union _cpuid4_leaf_ecx { | |||
151 | u32 full; | 151 | u32 full; |
152 | }; | 152 | }; |
153 | 153 | ||
154 | struct amd_l3_cache { | ||
155 | struct amd_northbridge *nb; | ||
156 | unsigned indices; | ||
157 | u8 subcaches[4]; | ||
158 | }; | ||
159 | |||
160 | struct _cpuid4_info_regs { | 154 | struct _cpuid4_info_regs { |
161 | union _cpuid4_leaf_eax eax; | 155 | union _cpuid4_leaf_eax eax; |
162 | union _cpuid4_leaf_ebx ebx; | 156 | union _cpuid4_leaf_ebx ebx; |
163 | union _cpuid4_leaf_ecx ecx; | 157 | union _cpuid4_leaf_ecx ecx; |
164 | unsigned long size; | 158 | unsigned long size; |
165 | struct amd_l3_cache *l3; | 159 | struct amd_northbridge *nb; |
166 | }; | 160 | }; |
167 | 161 | ||
168 | struct _cpuid4_info { | 162 | struct _cpuid4_info { |
@@ -309,12 +303,13 @@ struct _cache_attr { | |||
309 | /* | 303 | /* |
310 | * L3 cache descriptors | 304 | * L3 cache descriptors |
311 | */ | 305 | */ |
312 | static void __cpuinit amd_calc_l3_indices(struct amd_l3_cache *l3) | 306 | static void __cpuinit amd_calc_l3_indices(struct amd_northbridge *nb) |
313 | { | 307 | { |
308 | struct amd_l3_cache *l3 = &nb->l3_cache; | ||
314 | unsigned int sc0, sc1, sc2, sc3; | 309 | unsigned int sc0, sc1, sc2, sc3; |
315 | u32 val = 0; | 310 | u32 val = 0; |
316 | 311 | ||
317 | pci_read_config_dword(l3->nb->misc, 0x1C4, &val); | 312 | pci_read_config_dword(nb->misc, 0x1C4, &val); |
318 | 313 | ||
319 | /* calculate subcache sizes */ | 314 | /* calculate subcache sizes */ |
320 | l3->subcaches[0] = sc0 = !(val & BIT(0)); | 315 | l3->subcaches[0] = sc0 = !(val & BIT(0)); |
@@ -328,33 +323,16 @@ static void __cpuinit amd_calc_l3_indices(struct amd_l3_cache *l3) | |||
328 | static void __cpuinit amd_init_l3_cache(struct _cpuid4_info_regs *this_leaf, | 323 | static void __cpuinit amd_init_l3_cache(struct _cpuid4_info_regs *this_leaf, |
329 | int index) | 324 | int index) |
330 | { | 325 | { |
331 | static struct amd_l3_cache *__cpuinitdata l3_caches; | ||
332 | int node; | 326 | int node; |
333 | 327 | ||
334 | /* only for L3, and not in virtualized environments */ | 328 | /* only for L3, and not in virtualized environments */ |
335 | if (index < 3 || amd_nb_num() == 0) | 329 | if (index < 3) |
336 | return; | 330 | return; |
337 | 331 | ||
338 | /* | ||
339 | * Strictly speaking, the amount in @size below is leaked since it is | ||
340 | * never freed but this is done only on shutdown so it doesn't matter. | ||
341 | */ | ||
342 | if (!l3_caches) { | ||
343 | int size = amd_nb_num() * sizeof(struct amd_l3_cache); | ||
344 | |||
345 | l3_caches = kzalloc(size, GFP_ATOMIC); | ||
346 | if (!l3_caches) | ||
347 | return; | ||
348 | } | ||
349 | |||
350 | node = amd_get_nb_id(smp_processor_id()); | 332 | node = amd_get_nb_id(smp_processor_id()); |
351 | 333 | this_leaf->nb = node_to_amd_nb(node); | |
352 | if (!l3_caches[node].nb) { | 334 | if (this_leaf->nb && !this_leaf->nb->l3_cache.indices) |
353 | l3_caches[node].nb = node_to_amd_nb(node); | 335 | amd_calc_l3_indices(this_leaf->nb); |
354 | amd_calc_l3_indices(&l3_caches[node]); | ||
355 | } | ||
356 | |||
357 | this_leaf->l3 = &l3_caches[node]; | ||
358 | } | 336 | } |
359 | 337 | ||
360 | /* | 338 | /* |
@@ -364,11 +342,11 @@ static void __cpuinit amd_init_l3_cache(struct _cpuid4_info_regs *this_leaf, | |||
364 | * | 342 | * |
365 | * @returns: the disabled index if used or negative value if slot free. | 343 | * @returns: the disabled index if used or negative value if slot free. |
366 | */ | 344 | */ |
367 | int amd_get_l3_disable_slot(struct amd_l3_cache *l3, unsigned slot) | 345 | int amd_get_l3_disable_slot(struct amd_northbridge *nb, unsigned slot) |
368 | { | 346 | { |
369 | unsigned int reg = 0; | 347 | unsigned int reg = 0; |
370 | 348 | ||
371 | pci_read_config_dword(l3->nb->misc, 0x1BC + slot * 4, ®); | 349 | pci_read_config_dword(nb->misc, 0x1BC + slot * 4, ®); |
372 | 350 | ||
373 | /* check whether this slot is activated already */ | 351 | /* check whether this slot is activated already */ |
374 | if (reg & (3UL << 30)) | 352 | if (reg & (3UL << 30)) |
@@ -382,10 +360,10 @@ static ssize_t show_cache_disable(struct _cpuid4_info *this_leaf, char *buf, | |||
382 | { | 360 | { |
383 | int index; | 361 | int index; |
384 | 362 | ||
385 | if (!this_leaf->base.l3 || !amd_nb_has_feature(AMD_NB_L3_INDEX_DISABLE)) | 363 | if (!this_leaf->base.nb || !amd_nb_has_feature(AMD_NB_L3_INDEX_DISABLE)) |
386 | return -EINVAL; | 364 | return -EINVAL; |
387 | 365 | ||
388 | index = amd_get_l3_disable_slot(this_leaf->base.l3, slot); | 366 | index = amd_get_l3_disable_slot(this_leaf->base.nb, slot); |
389 | if (index >= 0) | 367 | if (index >= 0) |
390 | return sprintf(buf, "%d\n", index); | 368 | return sprintf(buf, "%d\n", index); |
391 | 369 | ||
@@ -402,7 +380,7 @@ show_cache_disable_##slot(struct _cpuid4_info *this_leaf, char *buf, \ | |||
402 | SHOW_CACHE_DISABLE(0) | 380 | SHOW_CACHE_DISABLE(0) |
403 | SHOW_CACHE_DISABLE(1) | 381 | SHOW_CACHE_DISABLE(1) |
404 | 382 | ||
405 | static void amd_l3_disable_index(struct amd_l3_cache *l3, int cpu, | 383 | static void amd_l3_disable_index(struct amd_northbridge *nb, int cpu, |
406 | unsigned slot, unsigned long idx) | 384 | unsigned slot, unsigned long idx) |
407 | { | 385 | { |
408 | int i; | 386 | int i; |
@@ -415,10 +393,10 @@ static void amd_l3_disable_index(struct amd_l3_cache *l3, int cpu, | |||
415 | for (i = 0; i < 4; i++) { | 393 | for (i = 0; i < 4; i++) { |
416 | u32 reg = idx | (i << 20); | 394 | u32 reg = idx | (i << 20); |
417 | 395 | ||
418 | if (!l3->subcaches[i]) | 396 | if (!nb->l3_cache.subcaches[i]) |
419 | continue; | 397 | continue; |
420 | 398 | ||
421 | pci_write_config_dword(l3->nb->misc, 0x1BC + slot * 4, reg); | 399 | pci_write_config_dword(nb->misc, 0x1BC + slot * 4, reg); |
422 | 400 | ||
423 | /* | 401 | /* |
424 | * We need to WBINVD on a core on the node containing the L3 | 402 | * We need to WBINVD on a core on the node containing the L3 |
@@ -428,7 +406,7 @@ static void amd_l3_disable_index(struct amd_l3_cache *l3, int cpu, | |||
428 | wbinvd_on_cpu(cpu); | 406 | wbinvd_on_cpu(cpu); |
429 | 407 | ||
430 | reg |= BIT(31); | 408 | reg |= BIT(31); |
431 | pci_write_config_dword(l3->nb->misc, 0x1BC + slot * 4, reg); | 409 | pci_write_config_dword(nb->misc, 0x1BC + slot * 4, reg); |
432 | } | 410 | } |
433 | } | 411 | } |
434 | 412 | ||
@@ -442,24 +420,24 @@ static void amd_l3_disable_index(struct amd_l3_cache *l3, int cpu, | |||
442 | * | 420 | * |
443 | * @return: 0 on success, error status on failure | 421 | * @return: 0 on success, error status on failure |
444 | */ | 422 | */ |
445 | int amd_set_l3_disable_slot(struct amd_l3_cache *l3, int cpu, unsigned slot, | 423 | int amd_set_l3_disable_slot(struct amd_northbridge *nb, int cpu, unsigned slot, |
446 | unsigned long index) | 424 | unsigned long index) |
447 | { | 425 | { |
448 | int ret = 0; | 426 | int ret = 0; |
449 | 427 | ||
450 | /* check if @slot is already used or the index is already disabled */ | 428 | /* check if @slot is already used or the index is already disabled */ |
451 | ret = amd_get_l3_disable_slot(l3, slot); | 429 | ret = amd_get_l3_disable_slot(nb, slot); |
452 | if (ret >= 0) | 430 | if (ret >= 0) |
453 | return -EINVAL; | 431 | return -EINVAL; |
454 | 432 | ||
455 | if (index > l3->indices) | 433 | if (index > nb->l3_cache.indices) |
456 | return -EINVAL; | 434 | return -EINVAL; |
457 | 435 | ||
458 | /* check whether the other slot has disabled the same index already */ | 436 | /* check whether the other slot has disabled the same index already */ |
459 | if (index == amd_get_l3_disable_slot(l3, !slot)) | 437 | if (index == amd_get_l3_disable_slot(nb, !slot)) |
460 | return -EINVAL; | 438 | return -EINVAL; |
461 | 439 | ||
462 | amd_l3_disable_index(l3, cpu, slot, index); | 440 | amd_l3_disable_index(nb, cpu, slot, index); |
463 | 441 | ||
464 | return 0; | 442 | return 0; |
465 | } | 443 | } |
@@ -474,7 +452,7 @@ static ssize_t store_cache_disable(struct _cpuid4_info *this_leaf, | |||
474 | if (!capable(CAP_SYS_ADMIN)) | 452 | if (!capable(CAP_SYS_ADMIN)) |
475 | return -EPERM; | 453 | return -EPERM; |
476 | 454 | ||
477 | if (!this_leaf->base.l3 || !amd_nb_has_feature(AMD_NB_L3_INDEX_DISABLE)) | 455 | if (!this_leaf->base.nb || !amd_nb_has_feature(AMD_NB_L3_INDEX_DISABLE)) |
478 | return -EINVAL; | 456 | return -EINVAL; |
479 | 457 | ||
480 | cpu = cpumask_first(to_cpumask(this_leaf->shared_cpu_map)); | 458 | cpu = cpumask_first(to_cpumask(this_leaf->shared_cpu_map)); |
@@ -482,7 +460,7 @@ static ssize_t store_cache_disable(struct _cpuid4_info *this_leaf, | |||
482 | if (strict_strtoul(buf, 10, &val) < 0) | 460 | if (strict_strtoul(buf, 10, &val) < 0) |
483 | return -EINVAL; | 461 | return -EINVAL; |
484 | 462 | ||
485 | err = amd_set_l3_disable_slot(this_leaf->base.l3, cpu, slot, val); | 463 | err = amd_set_l3_disable_slot(this_leaf->base.nb, cpu, slot, val); |
486 | if (err) { | 464 | if (err) { |
487 | if (err == -EEXIST) | 465 | if (err == -EEXIST) |
488 | printk(KERN_WARNING "L3 disable slot %d in use!\n", | 466 | printk(KERN_WARNING "L3 disable slot %d in use!\n", |
@@ -511,7 +489,7 @@ static struct _cache_attr cache_disable_1 = __ATTR(cache_disable_1, 0644, | |||
511 | static ssize_t | 489 | static ssize_t |
512 | show_subcaches(struct _cpuid4_info *this_leaf, char *buf, unsigned int cpu) | 490 | show_subcaches(struct _cpuid4_info *this_leaf, char *buf, unsigned int cpu) |
513 | { | 491 | { |
514 | if (!this_leaf->base.l3 || !amd_nb_has_feature(AMD_NB_L3_PARTITIONING)) | 492 | if (!this_leaf->base.nb || !amd_nb_has_feature(AMD_NB_L3_PARTITIONING)) |
515 | return -EINVAL; | 493 | return -EINVAL; |
516 | 494 | ||
517 | return sprintf(buf, "%x\n", amd_get_subcaches(cpu)); | 495 | return sprintf(buf, "%x\n", amd_get_subcaches(cpu)); |
@@ -526,7 +504,7 @@ store_subcaches(struct _cpuid4_info *this_leaf, const char *buf, size_t count, | |||
526 | if (!capable(CAP_SYS_ADMIN)) | 504 | if (!capable(CAP_SYS_ADMIN)) |
527 | return -EPERM; | 505 | return -EPERM; |
528 | 506 | ||
529 | if (!this_leaf->base.l3 || !amd_nb_has_feature(AMD_NB_L3_PARTITIONING)) | 507 | if (!this_leaf->base.nb || !amd_nb_has_feature(AMD_NB_L3_PARTITIONING)) |
530 | return -EINVAL; | 508 | return -EINVAL; |
531 | 509 | ||
532 | if (strict_strtoul(buf, 16, &val) < 0) | 510 | if (strict_strtoul(buf, 16, &val) < 0) |
@@ -1118,7 +1096,7 @@ static int __cpuinit cache_add_dev(struct sys_device * sys_dev) | |||
1118 | 1096 | ||
1119 | ktype_cache.default_attrs = default_attrs; | 1097 | ktype_cache.default_attrs = default_attrs; |
1120 | #ifdef CONFIG_AMD_NB | 1098 | #ifdef CONFIG_AMD_NB |
1121 | if (this_leaf->base.l3) | 1099 | if (this_leaf->base.nb) |
1122 | ktype_cache.default_attrs = amd_l3_attrs(); | 1100 | ktype_cache.default_attrs = amd_l3_attrs(); |
1123 | #endif | 1101 | #endif |
1124 | retval = kobject_init_and_add(&(this_object->kobj), | 1102 | retval = kobject_init_and_add(&(this_object->kobj), |