aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86
diff options
context:
space:
mode:
authorBorislav Petkov <bp@suse.de>2015-02-09 15:42:34 -0500
committerBorislav Petkov <bp@suse.de>2015-03-02 14:32:07 -0500
commit58ce8d6d3a7616014dc70fd8d8f945176d74957c (patch)
tree0f627b72ce176a890460c7a28a2d57d4423b0697 /arch/x86
parent4f5e5f2b574804ed330e20456d5b86a4259544ad (diff)
x86/microcode: Consolidate family,model, ... code
... to the header. Split the family acquiring function into a main one, doing CPUID and a helper which computes the extended family and is used in multiple places. Get rid of the locally-grown get_x86_{family,model}(). While at it, rename local variables to something more descriptive and vertically align assignments for better readability. There should be no functionality change resulting from this patch. Signed-off-by: Borislav Petkov <bp@suse.de>
Diffstat (limited to 'arch/x86')
-rw-r--r--arch/x86/include/asm/microcode.h73
-rw-r--r--arch/x86/kernel/cpu/microcode/core_early.c75
-rw-r--r--arch/x86/kernel/cpu/microcode/intel_early.c58
3 files changed, 101 insertions, 105 deletions
diff --git a/arch/x86/include/asm/microcode.h b/arch/x86/include/asm/microcode.h
index 201b520521ed..2fb20d6f7e23 100644
--- a/arch/x86/include/asm/microcode.h
+++ b/arch/x86/include/asm/microcode.h
@@ -75,6 +75,79 @@ static inline void __exit exit_amd_microcode(void) {}
75 75
76#ifdef CONFIG_MICROCODE_EARLY 76#ifdef CONFIG_MICROCODE_EARLY
77#define MAX_UCODE_COUNT 128 77#define MAX_UCODE_COUNT 128
78
79#define QCHAR(a, b, c, d) ((a) + ((b) << 8) + ((c) << 16) + ((d) << 24))
80#define CPUID_INTEL1 QCHAR('G', 'e', 'n', 'u')
81#define CPUID_INTEL2 QCHAR('i', 'n', 'e', 'I')
82#define CPUID_INTEL3 QCHAR('n', 't', 'e', 'l')
83#define CPUID_AMD1 QCHAR('A', 'u', 't', 'h')
84#define CPUID_AMD2 QCHAR('e', 'n', 't', 'i')
85#define CPUID_AMD3 QCHAR('c', 'A', 'M', 'D')
86
87#define CPUID_IS(a, b, c, ebx, ecx, edx) \
88 (!((ebx ^ (a))|(edx ^ (b))|(ecx ^ (c))))
89
90/*
91 * In early loading microcode phase on BSP, boot_cpu_data is not set up yet.
92 * x86_vendor() gets vendor id for BSP.
93 *
94 * In 32 bit AP case, accessing boot_cpu_data needs linear address. To simplify
95 * coding, we still use x86_vendor() to get vendor id for AP.
96 *
97 * x86_vendor() gets vendor information directly from CPUID.
98 */
99static inline int x86_vendor(void)
100{
101 u32 eax = 0x00000000;
102 u32 ebx, ecx = 0, edx;
103
104 native_cpuid(&eax, &ebx, &ecx, &edx);
105
106 if (CPUID_IS(CPUID_INTEL1, CPUID_INTEL2, CPUID_INTEL3, ebx, ecx, edx))
107 return X86_VENDOR_INTEL;
108
109 if (CPUID_IS(CPUID_AMD1, CPUID_AMD2, CPUID_AMD3, ebx, ecx, edx))
110 return X86_VENDOR_AMD;
111
112 return X86_VENDOR_UNKNOWN;
113}
114
115static inline unsigned int __x86_family(unsigned int sig)
116{
117 unsigned int x86;
118
119 x86 = (sig >> 8) & 0xf;
120
121 if (x86 == 0xf)
122 x86 += (sig >> 20) & 0xff;
123
124 return x86;
125}
126
127static inline unsigned int x86_family(void)
128{
129 u32 eax = 0x00000001;
130 u32 ebx, ecx = 0, edx;
131
132 native_cpuid(&eax, &ebx, &ecx, &edx);
133
134 return __x86_family(eax);
135}
136
137static inline unsigned int x86_model(unsigned int sig)
138{
139 unsigned int x86, model;
140
141 x86 = __x86_family(sig);
142
143 model = (sig >> 4) & 0xf;
144
145 if (x86 == 0x6 || x86 == 0xf)
146 model += ((sig >> 16) & 0xf) << 4;
147
148 return model;
149}
150
78extern void __init load_ucode_bsp(void); 151extern void __init load_ucode_bsp(void);
79extern void load_ucode_ap(void); 152extern void load_ucode_ap(void);
80extern int __init save_microcode_in_initrd(void); 153extern int __init save_microcode_in_initrd(void);
diff --git a/arch/x86/kernel/cpu/microcode/core_early.c b/arch/x86/kernel/cpu/microcode/core_early.c
index d45df4bd16ab..a413a69cbd74 100644
--- a/arch/x86/kernel/cpu/microcode/core_early.c
+++ b/arch/x86/kernel/cpu/microcode/core_early.c
@@ -23,57 +23,6 @@
23#include <asm/processor.h> 23#include <asm/processor.h>
24#include <asm/cmdline.h> 24#include <asm/cmdline.h>
25 25
26#define QCHAR(a, b, c, d) ((a) + ((b) << 8) + ((c) << 16) + ((d) << 24))
27#define CPUID_INTEL1 QCHAR('G', 'e', 'n', 'u')
28#define CPUID_INTEL2 QCHAR('i', 'n', 'e', 'I')
29#define CPUID_INTEL3 QCHAR('n', 't', 'e', 'l')
30#define CPUID_AMD1 QCHAR('A', 'u', 't', 'h')
31#define CPUID_AMD2 QCHAR('e', 'n', 't', 'i')
32#define CPUID_AMD3 QCHAR('c', 'A', 'M', 'D')
33
34#define CPUID_IS(a, b, c, ebx, ecx, edx) \
35 (!((ebx ^ (a))|(edx ^ (b))|(ecx ^ (c))))
36
37/*
38 * In early loading microcode phase on BSP, boot_cpu_data is not set up yet.
39 * x86_vendor() gets vendor id for BSP.
40 *
41 * In 32 bit AP case, accessing boot_cpu_data needs linear address. To simplify
42 * coding, we still use x86_vendor() to get vendor id for AP.
43 *
44 * x86_vendor() gets vendor information directly through cpuid.
45 */
46static int x86_vendor(void)
47{
48 u32 eax = 0x00000000;
49 u32 ebx, ecx = 0, edx;
50
51 native_cpuid(&eax, &ebx, &ecx, &edx);
52
53 if (CPUID_IS(CPUID_INTEL1, CPUID_INTEL2, CPUID_INTEL3, ebx, ecx, edx))
54 return X86_VENDOR_INTEL;
55
56 if (CPUID_IS(CPUID_AMD1, CPUID_AMD2, CPUID_AMD3, ebx, ecx, edx))
57 return X86_VENDOR_AMD;
58
59 return X86_VENDOR_UNKNOWN;
60}
61
62static int x86_family(void)
63{
64 u32 eax = 0x00000001;
65 u32 ebx, ecx = 0, edx;
66 int x86;
67
68 native_cpuid(&eax, &ebx, &ecx, &edx);
69
70 x86 = (eax >> 8) & 0xf;
71 if (x86 == 15)
72 x86 += (eax >> 20) & 0xff;
73
74 return x86;
75}
76
77static bool __init check_loader_disabled_bsp(void) 26static bool __init check_loader_disabled_bsp(void)
78{ 27{
79#ifdef CONFIG_X86_32 28#ifdef CONFIG_X86_32
@@ -96,7 +45,7 @@ static bool __init check_loader_disabled_bsp(void)
96 45
97void __init load_ucode_bsp(void) 46void __init load_ucode_bsp(void)
98{ 47{
99 int vendor, x86; 48 int vendor, family;
100 49
101 if (check_loader_disabled_bsp()) 50 if (check_loader_disabled_bsp())
102 return; 51 return;
@@ -105,15 +54,15 @@ void __init load_ucode_bsp(void)
105 return; 54 return;
106 55
107 vendor = x86_vendor(); 56 vendor = x86_vendor();
108 x86 = x86_family(); 57 family = x86_family();
109 58
110 switch (vendor) { 59 switch (vendor) {
111 case X86_VENDOR_INTEL: 60 case X86_VENDOR_INTEL:
112 if (x86 >= 6) 61 if (family >= 6)
113 load_ucode_intel_bsp(); 62 load_ucode_intel_bsp();
114 break; 63 break;
115 case X86_VENDOR_AMD: 64 case X86_VENDOR_AMD:
116 if (x86 >= 0x10) 65 if (family >= 0x10)
117 load_ucode_amd_bsp(); 66 load_ucode_amd_bsp();
118 break; 67 break;
119 default: 68 default:
@@ -132,7 +81,7 @@ static bool check_loader_disabled_ap(void)
132 81
133void load_ucode_ap(void) 82void load_ucode_ap(void)
134{ 83{
135 int vendor, x86; 84 int vendor, family;
136 85
137 if (check_loader_disabled_ap()) 86 if (check_loader_disabled_ap())
138 return; 87 return;
@@ -141,15 +90,15 @@ void load_ucode_ap(void)
141 return; 90 return;
142 91
143 vendor = x86_vendor(); 92 vendor = x86_vendor();
144 x86 = x86_family(); 93 family = x86_family();
145 94
146 switch (vendor) { 95 switch (vendor) {
147 case X86_VENDOR_INTEL: 96 case X86_VENDOR_INTEL:
148 if (x86 >= 6) 97 if (family >= 6)
149 load_ucode_intel_ap(); 98 load_ucode_intel_ap();
150 break; 99 break;
151 case X86_VENDOR_AMD: 100 case X86_VENDOR_AMD:
152 if (x86 >= 0x10) 101 if (family >= 0x10)
153 load_ucode_amd_ap(); 102 load_ucode_amd_ap();
154 break; 103 break;
155 default: 104 default:
@@ -179,18 +128,18 @@ int __init save_microcode_in_initrd(void)
179 128
180void reload_early_microcode(void) 129void reload_early_microcode(void)
181{ 130{
182 int vendor, x86; 131 int vendor, family;
183 132
184 vendor = x86_vendor(); 133 vendor = x86_vendor();
185 x86 = x86_family(); 134 family = x86_family();
186 135
187 switch (vendor) { 136 switch (vendor) {
188 case X86_VENDOR_INTEL: 137 case X86_VENDOR_INTEL:
189 if (x86 >= 6) 138 if (family >= 6)
190 reload_ucode_intel(); 139 reload_ucode_intel();
191 break; 140 break;
192 case X86_VENDOR_AMD: 141 case X86_VENDOR_AMD:
193 if (x86 >= 0x10) 142 if (family >= 0x10)
194 reload_ucode_amd(); 143 reload_ucode_amd();
195 break; 144 break;
196 default: 145 default:
diff --git a/arch/x86/kernel/cpu/microcode/intel_early.c b/arch/x86/kernel/cpu/microcode/intel_early.c
index cd42b3a55897..5c7896bf0e4d 100644
--- a/arch/x86/kernel/cpu/microcode/intel_early.c
+++ b/arch/x86/kernel/cpu/microcode/intel_early.c
@@ -126,31 +126,6 @@ load_microcode(struct mc_saved_data *mc_saved_data,
126 } 126 }
127} 127}
128 128
129static u8 get_x86_family(unsigned long sig)
130{
131 u8 x86;
132
133 x86 = (sig >> 8) & 0xf;
134
135 if (x86 == 0xf)
136 x86 += (sig >> 20) & 0xff;
137
138 return x86;
139}
140
141static u8 get_x86_model(unsigned long sig)
142{
143 u8 x86, x86_model;
144
145 x86 = get_x86_family(sig);
146 x86_model = (sig >> 4) & 0xf;
147
148 if (x86 == 0x6 || x86 == 0xf)
149 x86_model += ((sig >> 16) & 0xf) << 4;
150
151 return x86_model;
152}
153
154/* 129/*
155 * Given CPU signature and a microcode patch, this function finds if the 130 * Given CPU signature and a microcode patch, this function finds if the
156 * microcode patch has matching family and model with the CPU. 131 * microcode patch has matching family and model with the CPU.
@@ -159,41 +134,40 @@ static enum ucode_state
159matching_model_microcode(struct microcode_header_intel *mc_header, 134matching_model_microcode(struct microcode_header_intel *mc_header,
160 unsigned long sig) 135 unsigned long sig)
161{ 136{
162 u8 x86, x86_model; 137 unsigned int fam, model;
163 u8 x86_ucode, x86_model_ucode; 138 unsigned int fam_ucode, model_ucode;
164 struct extended_sigtable *ext_header; 139 struct extended_sigtable *ext_header;
165 unsigned long total_size = get_totalsize(mc_header); 140 unsigned long total_size = get_totalsize(mc_header);
166 unsigned long data_size = get_datasize(mc_header); 141 unsigned long data_size = get_datasize(mc_header);
167 int ext_sigcount, i; 142 int ext_sigcount, i;
168 struct extended_signature *ext_sig; 143 struct extended_signature *ext_sig;
169 144
170 x86 = get_x86_family(sig); 145 fam = __x86_family(sig);
171 x86_model = get_x86_model(sig); 146 model = x86_model(sig);
172 147
173 x86_ucode = get_x86_family(mc_header->sig); 148 fam_ucode = __x86_family(mc_header->sig);
174 x86_model_ucode = get_x86_model(mc_header->sig); 149 model_ucode = x86_model(mc_header->sig);
175 150
176 if (x86 == x86_ucode && x86_model == x86_model_ucode) 151 if (fam == fam_ucode && model == model_ucode)
177 return UCODE_OK; 152 return UCODE_OK;
178 153
179 /* Look for ext. headers: */ 154 /* Look for ext. headers: */
180 if (total_size <= data_size + MC_HEADER_SIZE) 155 if (total_size <= data_size + MC_HEADER_SIZE)
181 return UCODE_NFOUND; 156 return UCODE_NFOUND;
182 157
183 ext_header = (void *) mc_header + data_size + MC_HEADER_SIZE; 158 ext_header = (void *) mc_header + data_size + MC_HEADER_SIZE;
159 ext_sig = (void *)ext_header + EXT_HEADER_SIZE;
184 ext_sigcount = ext_header->count; 160 ext_sigcount = ext_header->count;
185 ext_sig = (void *)ext_header + EXT_HEADER_SIZE;
186 161
187 for (i = 0; i < ext_sigcount; i++) { 162 for (i = 0; i < ext_sigcount; i++) {
188 x86_ucode = get_x86_family(ext_sig->sig); 163 fam_ucode = __x86_family(ext_sig->sig);
189 x86_model_ucode = get_x86_model(ext_sig->sig); 164 model_ucode = x86_model(ext_sig->sig);
190 165
191 if (x86 == x86_ucode && x86_model == x86_model_ucode) 166 if (fam == fam_ucode && model == model_ucode)
192 return UCODE_OK; 167 return UCODE_OK;
193 168
194 ext_sig++; 169 ext_sig++;
195 } 170 }
196
197 return UCODE_NFOUND; 171 return UCODE_NFOUND;
198} 172}
199 173
@@ -374,7 +348,7 @@ out:
374static int collect_cpu_info_early(struct ucode_cpu_info *uci) 348static int collect_cpu_info_early(struct ucode_cpu_info *uci)
375{ 349{
376 unsigned int val[2]; 350 unsigned int val[2];
377 u8 x86, x86_model; 351 unsigned int family, model;
378 struct cpu_signature csig; 352 struct cpu_signature csig;
379 unsigned int eax, ebx, ecx, edx; 353 unsigned int eax, ebx, ecx, edx;
380 354
@@ -389,10 +363,10 @@ static int collect_cpu_info_early(struct ucode_cpu_info *uci)
389 native_cpuid(&eax, &ebx, &ecx, &edx); 363 native_cpuid(&eax, &ebx, &ecx, &edx);
390 csig.sig = eax; 364 csig.sig = eax;
391 365
392 x86 = get_x86_family(csig.sig); 366 family = __x86_family(csig.sig);
393 x86_model = get_x86_model(csig.sig); 367 model = x86_model(csig.sig);
394 368
395 if ((x86_model >= 5) || (x86 > 6)) { 369 if ((model >= 5) || (family > 6)) {
396 /* get processor flags from MSR 0x17 */ 370 /* get processor flags from MSR 0x17 */
397 native_rdmsr(MSR_IA32_PLATFORM_ID, val[0], val[1]); 371 native_rdmsr(MSR_IA32_PLATFORM_ID, val[0], val[1]);
398 csig.pf = 1 << ((val[1] >> 18) & 7); 372 csig.pf = 1 << ((val[1] >> 18) & 7);