aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86
diff options
context:
space:
mode:
authorBorislav Petkov <bp@suse.de>2013-06-08 12:48:15 -0400
committerIngo Molnar <mingo@kernel.org>2013-06-20 06:27:41 -0400
commit719038de98bc8479b771c582a1e4a1e86079da22 (patch)
tree92eac51c7a8d0e89cf683fc195f88278ef5155cf /arch/x86
parent4d067d8e056d76a3327f0517c7722db55e7888fc (diff)
x86/intel/cacheinfo: Shut up last long-standing warning
arch/x86/kernel/cpu/intel_cacheinfo.c: In function ‘init_intel_cacheinfo’: arch/x86/kernel/cpu/intel_cacheinfo.c:642:28: warning: ‘this_leaf.size’ may be used uninitialized in this function [-Wmaybe-uninitialized] arch/x86/kernel/cpu/intel_cacheinfo.c:643:29: warning: ‘this_leaf.eax.split.num_threads_sharing’ may be used uninitialized in this function [-Wmaybe-uninitialized] This keeps on happening during randbuilds and the compiler is wrong here: In the case where cpuid4_cache_lookup_regs() returns 0, both this_leaf.size and this_leaf.eax get initialized. In the case where the CPUID leaf doesn't contain valid cache info, we error out which init_intel_cacheinfo() handles correctly without touching the abovementioned fields. So shut up the warning by clearing out the struct which we hand down. While at it, reverse error handling and gain one indentation level. Signed-off-by: Borislav Petkov <bp@suse.de> Link: http://lkml.kernel.org/r/1370710095-20547-1-git-send-email-bp@alien8.de Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'arch/x86')
-rw-r--r--arch/x86/kernel/cpu/intel_cacheinfo.c52
1 files changed, 25 insertions, 27 deletions
diff --git a/arch/x86/kernel/cpu/intel_cacheinfo.c b/arch/x86/kernel/cpu/intel_cacheinfo.c
index 7c6f7d548c0f..8dc72dda66fe 100644
--- a/arch/x86/kernel/cpu/intel_cacheinfo.c
+++ b/arch/x86/kernel/cpu/intel_cacheinfo.c
@@ -618,36 +618,34 @@ unsigned int __cpuinit init_intel_cacheinfo(struct cpuinfo_x86 *c)
618 * parameters cpuid leaf to find the cache details 618 * parameters cpuid leaf to find the cache details
619 */ 619 */
620 for (i = 0; i < num_cache_leaves; i++) { 620 for (i = 0; i < num_cache_leaves; i++) {
621 struct _cpuid4_info_regs this_leaf; 621 struct _cpuid4_info_regs this_leaf = {};
622 int retval; 622 int retval;
623 623
624 retval = cpuid4_cache_lookup_regs(i, &this_leaf); 624 retval = cpuid4_cache_lookup_regs(i, &this_leaf);
625 if (retval >= 0) { 625 if (retval < 0)
626 switch (this_leaf.eax.split.level) { 626 continue;
627 case 1: 627
628 if (this_leaf.eax.split.type == 628 switch (this_leaf.eax.split.level) {
629 CACHE_TYPE_DATA) 629 case 1:
630 new_l1d = this_leaf.size/1024; 630 if (this_leaf.eax.split.type == CACHE_TYPE_DATA)
631 else if (this_leaf.eax.split.type == 631 new_l1d = this_leaf.size/1024;
632 CACHE_TYPE_INST) 632 else if (this_leaf.eax.split.type == CACHE_TYPE_INST)
633 new_l1i = this_leaf.size/1024; 633 new_l1i = this_leaf.size/1024;
634 break; 634 break;
635 case 2: 635 case 2:
636 new_l2 = this_leaf.size/1024; 636 new_l2 = this_leaf.size/1024;
637 num_threads_sharing = 1 + this_leaf.eax.split.num_threads_sharing; 637 num_threads_sharing = 1 + this_leaf.eax.split.num_threads_sharing;
638 index_msb = get_count_order(num_threads_sharing); 638 index_msb = get_count_order(num_threads_sharing);
639 l2_id = c->apicid & ~((1 << index_msb) - 1); 639 l2_id = c->apicid & ~((1 << index_msb) - 1);
640 break; 640 break;
641 case 3: 641 case 3:
642 new_l3 = this_leaf.size/1024; 642 new_l3 = this_leaf.size/1024;
643 num_threads_sharing = 1 + this_leaf.eax.split.num_threads_sharing; 643 num_threads_sharing = 1 + this_leaf.eax.split.num_threads_sharing;
644 index_msb = get_count_order( 644 index_msb = get_count_order(num_threads_sharing);
645 num_threads_sharing); 645 l3_id = c->apicid & ~((1 << index_msb) - 1);
646 l3_id = c->apicid & ~((1 << index_msb) - 1); 646 break;
647 break; 647 default:
648 default: 648 break;
649 break;
650 }
651 } 649 }
652 } 650 }
653 } 651 }