summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/x86/events/intel/core.c2
-rw-r--r--arch/x86/events/intel/lbr.c2
-rw-r--r--arch/x86/events/intel/p6.c2
-rw-r--r--arch/x86/include/asm/acpi.h2
-rw-r--r--arch/x86/include/asm/processor.h2
-rw-r--r--arch/x86/kernel/amd_nb.c2
-rw-r--r--arch/x86/kernel/apic/apic.c6
-rw-r--r--arch/x86/kernel/asm-offsets_32.c2
-rw-r--r--arch/x86/kernel/cpu/amd.c28
-rw-r--r--arch/x86/kernel/cpu/centaur.c4
-rw-r--r--arch/x86/kernel/cpu/common.c8
-rw-r--r--arch/x86/kernel/cpu/cyrix.c2
-rw-r--r--arch/x86/kernel/cpu/intel.c18
-rw-r--r--arch/x86/kernel/cpu/intel_rdt.c2
-rw-r--r--arch/x86/kernel/cpu/microcode/intel.c4
-rw-r--r--arch/x86/kernel/cpu/mtrr/generic.c2
-rw-r--r--arch/x86/kernel/cpu/mtrr/main.c4
-rw-r--r--arch/x86/kernel/cpu/proc.c4
-rw-r--r--arch/x86/kernel/head_32.S4
-rw-r--r--arch/x86/kernel/mpparse.c2
-rw-r--r--arch/x86/lib/cpu.c2
-rw-r--r--drivers/char/hw_random/via-rng.c2
-rw-r--r--drivers/cpufreq/acpi-cpufreq.c2
-rw-r--r--drivers/cpufreq/longhaul.c6
-rw-r--r--drivers/cpufreq/p4-clockmod.c2
-rw-r--r--drivers/cpufreq/powernow-k7.c2
-rw-r--r--drivers/cpufreq/speedstep-centrino.c4
-rw-r--r--drivers/cpufreq/speedstep-lib.c6
-rw-r--r--drivers/crypto/padlock-aes.c2
-rw-r--r--drivers/edac/amd64_edac.c2
-rw-r--r--drivers/hwmon/coretemp.c6
-rw-r--r--drivers/hwmon/hwmon-vid.c2
-rw-r--r--drivers/hwmon/k10temp.c2
-rw-r--r--drivers/hwmon/k8temp.c2
-rw-r--r--drivers/video/fbdev/geode/video_gx.c2
35 files changed, 73 insertions, 73 deletions
diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c
index 731153a4681e..56457cb73448 100644
--- a/arch/x86/events/intel/core.c
+++ b/arch/x86/events/intel/core.c
@@ -3559,7 +3559,7 @@ static int intel_snb_pebs_broken(int cpu)
3559 break; 3559 break;
3560 3560
3561 case INTEL_FAM6_SANDYBRIDGE_X: 3561 case INTEL_FAM6_SANDYBRIDGE_X:
3562 switch (cpu_data(cpu).x86_mask) { 3562 switch (cpu_data(cpu).x86_stepping) {
3563 case 6: rev = 0x618; break; 3563 case 6: rev = 0x618; break;
3564 case 7: rev = 0x70c; break; 3564 case 7: rev = 0x70c; break;
3565 } 3565 }
diff --git a/arch/x86/events/intel/lbr.c b/arch/x86/events/intel/lbr.c
index ae64d0b69729..cf372b90557e 100644
--- a/arch/x86/events/intel/lbr.c
+++ b/arch/x86/events/intel/lbr.c
@@ -1186,7 +1186,7 @@ void __init intel_pmu_lbr_init_atom(void)
1186 * on PMU interrupt 1186 * on PMU interrupt
1187 */ 1187 */
1188 if (boot_cpu_data.x86_model == 28 1188 if (boot_cpu_data.x86_model == 28
1189 && boot_cpu_data.x86_mask < 10) { 1189 && boot_cpu_data.x86_stepping < 10) {
1190 pr_cont("LBR disabled due to erratum"); 1190 pr_cont("LBR disabled due to erratum");
1191 return; 1191 return;
1192 } 1192 }
diff --git a/arch/x86/events/intel/p6.c b/arch/x86/events/intel/p6.c
index a5604c352930..408879b0c0d4 100644
--- a/arch/x86/events/intel/p6.c
+++ b/arch/x86/events/intel/p6.c
@@ -234,7 +234,7 @@ static __initconst const struct x86_pmu p6_pmu = {
234 234
235static __init void p6_pmu_rdpmc_quirk(void) 235static __init void p6_pmu_rdpmc_quirk(void)
236{ 236{
237 if (boot_cpu_data.x86_mask < 9) { 237 if (boot_cpu_data.x86_stepping < 9) {
238 /* 238 /*
239 * PPro erratum 26; fixed in stepping 9 and above. 239 * PPro erratum 26; fixed in stepping 9 and above.
240 */ 240 */
diff --git a/arch/x86/include/asm/acpi.h b/arch/x86/include/asm/acpi.h
index 8d0ec9df1cbe..f077401869ee 100644
--- a/arch/x86/include/asm/acpi.h
+++ b/arch/x86/include/asm/acpi.h
@@ -94,7 +94,7 @@ static inline unsigned int acpi_processor_cstate_check(unsigned int max_cstate)
94 if (boot_cpu_data.x86 == 0x0F && 94 if (boot_cpu_data.x86 == 0x0F &&
95 boot_cpu_data.x86_vendor == X86_VENDOR_AMD && 95 boot_cpu_data.x86_vendor == X86_VENDOR_AMD &&
96 boot_cpu_data.x86_model <= 0x05 && 96 boot_cpu_data.x86_model <= 0x05 &&
97 boot_cpu_data.x86_mask < 0x0A) 97 boot_cpu_data.x86_stepping < 0x0A)
98 return 1; 98 return 1;
99 else if (boot_cpu_has(X86_BUG_AMD_APIC_C1E)) 99 else if (boot_cpu_has(X86_BUG_AMD_APIC_C1E))
100 return 1; 100 return 1;
diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
index 99799fbd0f7e..b7c8583328c7 100644
--- a/arch/x86/include/asm/processor.h
+++ b/arch/x86/include/asm/processor.h
@@ -91,7 +91,7 @@ struct cpuinfo_x86 {
91 __u8 x86; /* CPU family */ 91 __u8 x86; /* CPU family */
92 __u8 x86_vendor; /* CPU vendor */ 92 __u8 x86_vendor; /* CPU vendor */
93 __u8 x86_model; 93 __u8 x86_model;
94 __u8 x86_mask; 94 __u8 x86_stepping;
95#ifdef CONFIG_X86_64 95#ifdef CONFIG_X86_64
96 /* Number of 4K pages in DTLB/ITLB combined(in pages): */ 96 /* Number of 4K pages in DTLB/ITLB combined(in pages): */
97 int x86_tlbsize; 97 int x86_tlbsize;
diff --git a/arch/x86/kernel/amd_nb.c b/arch/x86/kernel/amd_nb.c
index 6db28f17ff28..c88e0b127810 100644
--- a/arch/x86/kernel/amd_nb.c
+++ b/arch/x86/kernel/amd_nb.c
@@ -235,7 +235,7 @@ int amd_cache_northbridges(void)
235 if (boot_cpu_data.x86 == 0x10 && 235 if (boot_cpu_data.x86 == 0x10 &&
236 boot_cpu_data.x86_model >= 0x8 && 236 boot_cpu_data.x86_model >= 0x8 &&
237 (boot_cpu_data.x86_model > 0x9 || 237 (boot_cpu_data.x86_model > 0x9 ||
238 boot_cpu_data.x86_mask >= 0x1)) 238 boot_cpu_data.x86_stepping >= 0x1))
239 amd_northbridges.flags |= AMD_NB_L3_INDEX_DISABLE; 239 amd_northbridges.flags |= AMD_NB_L3_INDEX_DISABLE;
240 240
241 if (boot_cpu_data.x86 == 0x15) 241 if (boot_cpu_data.x86 == 0x15)
diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
index 25ddf02598d2..b203af0855b5 100644
--- a/arch/x86/kernel/apic/apic.c
+++ b/arch/x86/kernel/apic/apic.c
@@ -546,7 +546,7 @@ static DEFINE_PER_CPU(struct clock_event_device, lapic_events);
546 546
547static u32 hsx_deadline_rev(void) 547static u32 hsx_deadline_rev(void)
548{ 548{
549 switch (boot_cpu_data.x86_mask) { 549 switch (boot_cpu_data.x86_stepping) {
550 case 0x02: return 0x3a; /* EP */ 550 case 0x02: return 0x3a; /* EP */
551 case 0x04: return 0x0f; /* EX */ 551 case 0x04: return 0x0f; /* EX */
552 } 552 }
@@ -556,7 +556,7 @@ static u32 hsx_deadline_rev(void)
556 556
557static u32 bdx_deadline_rev(void) 557static u32 bdx_deadline_rev(void)
558{ 558{
559 switch (boot_cpu_data.x86_mask) { 559 switch (boot_cpu_data.x86_stepping) {
560 case 0x02: return 0x00000011; 560 case 0x02: return 0x00000011;
561 case 0x03: return 0x0700000e; 561 case 0x03: return 0x0700000e;
562 case 0x04: return 0x0f00000c; 562 case 0x04: return 0x0f00000c;
@@ -568,7 +568,7 @@ static u32 bdx_deadline_rev(void)
568 568
569static u32 skx_deadline_rev(void) 569static u32 skx_deadline_rev(void)
570{ 570{
571 switch (boot_cpu_data.x86_mask) { 571 switch (boot_cpu_data.x86_stepping) {
572 case 0x03: return 0x01000136; 572 case 0x03: return 0x01000136;
573 case 0x04: return 0x02000014; 573 case 0x04: return 0x02000014;
574 } 574 }
diff --git a/arch/x86/kernel/asm-offsets_32.c b/arch/x86/kernel/asm-offsets_32.c
index fa1261eefa16..f91ba53e06c8 100644
--- a/arch/x86/kernel/asm-offsets_32.c
+++ b/arch/x86/kernel/asm-offsets_32.c
@@ -18,7 +18,7 @@ void foo(void)
18 OFFSET(CPUINFO_x86, cpuinfo_x86, x86); 18 OFFSET(CPUINFO_x86, cpuinfo_x86, x86);
19 OFFSET(CPUINFO_x86_vendor, cpuinfo_x86, x86_vendor); 19 OFFSET(CPUINFO_x86_vendor, cpuinfo_x86, x86_vendor);
20 OFFSET(CPUINFO_x86_model, cpuinfo_x86, x86_model); 20 OFFSET(CPUINFO_x86_model, cpuinfo_x86, x86_model);
21 OFFSET(CPUINFO_x86_mask, cpuinfo_x86, x86_mask); 21 OFFSET(CPUINFO_x86_stepping, cpuinfo_x86, x86_stepping);
22 OFFSET(CPUINFO_cpuid_level, cpuinfo_x86, cpuid_level); 22 OFFSET(CPUINFO_cpuid_level, cpuinfo_x86, cpuid_level);
23 OFFSET(CPUINFO_x86_capability, cpuinfo_x86, x86_capability); 23 OFFSET(CPUINFO_x86_capability, cpuinfo_x86, x86_capability);
24 OFFSET(CPUINFO_x86_vendor_id, cpuinfo_x86, x86_vendor_id); 24 OFFSET(CPUINFO_x86_vendor_id, cpuinfo_x86, x86_vendor_id);
diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
index ea831c858195..e7d5a7883632 100644
--- a/arch/x86/kernel/cpu/amd.c
+++ b/arch/x86/kernel/cpu/amd.c
@@ -119,7 +119,7 @@ static void init_amd_k6(struct cpuinfo_x86 *c)
119 return; 119 return;
120 } 120 }
121 121
122 if (c->x86_model == 6 && c->x86_mask == 1) { 122 if (c->x86_model == 6 && c->x86_stepping == 1) {
123 const int K6_BUG_LOOP = 1000000; 123 const int K6_BUG_LOOP = 1000000;
124 int n; 124 int n;
125 void (*f_vide)(void); 125 void (*f_vide)(void);
@@ -149,7 +149,7 @@ static void init_amd_k6(struct cpuinfo_x86 *c)
149 149
150 /* K6 with old style WHCR */ 150 /* K6 with old style WHCR */
151 if (c->x86_model < 8 || 151 if (c->x86_model < 8 ||
152 (c->x86_model == 8 && c->x86_mask < 8)) { 152 (c->x86_model == 8 && c->x86_stepping < 8)) {
153 /* We can only write allocate on the low 508Mb */ 153 /* We can only write allocate on the low 508Mb */
154 if (mbytes > 508) 154 if (mbytes > 508)
155 mbytes = 508; 155 mbytes = 508;
@@ -168,7 +168,7 @@ static void init_amd_k6(struct cpuinfo_x86 *c)
168 return; 168 return;
169 } 169 }
170 170
171 if ((c->x86_model == 8 && c->x86_mask > 7) || 171 if ((c->x86_model == 8 && c->x86_stepping > 7) ||
172 c->x86_model == 9 || c->x86_model == 13) { 172 c->x86_model == 9 || c->x86_model == 13) {
173 /* The more serious chips .. */ 173 /* The more serious chips .. */
174 174
@@ -221,7 +221,7 @@ static void init_amd_k7(struct cpuinfo_x86 *c)
221 * are more robust with CLK_CTL set to 200xxxxx instead of 600xxxxx 221 * are more robust with CLK_CTL set to 200xxxxx instead of 600xxxxx
222 * As per AMD technical note 27212 0.2 222 * As per AMD technical note 27212 0.2
223 */ 223 */
224 if ((c->x86_model == 8 && c->x86_mask >= 1) || (c->x86_model > 8)) { 224 if ((c->x86_model == 8 && c->x86_stepping >= 1) || (c->x86_model > 8)) {
225 rdmsr(MSR_K7_CLK_CTL, l, h); 225 rdmsr(MSR_K7_CLK_CTL, l, h);
226 if ((l & 0xfff00000) != 0x20000000) { 226 if ((l & 0xfff00000) != 0x20000000) {
227 pr_info("CPU: CLK_CTL MSR was %x. Reprogramming to %x\n", 227 pr_info("CPU: CLK_CTL MSR was %x. Reprogramming to %x\n",
@@ -241,12 +241,12 @@ static void init_amd_k7(struct cpuinfo_x86 *c)
241 * but they are not certified as MP capable. 241 * but they are not certified as MP capable.
242 */ 242 */
243 /* Athlon 660/661 is valid. */ 243 /* Athlon 660/661 is valid. */
244 if ((c->x86_model == 6) && ((c->x86_mask == 0) || 244 if ((c->x86_model == 6) && ((c->x86_stepping == 0) ||
245 (c->x86_mask == 1))) 245 (c->x86_stepping == 1)))
246 return; 246 return;
247 247
248 /* Duron 670 is valid */ 248 /* Duron 670 is valid */
249 if ((c->x86_model == 7) && (c->x86_mask == 0)) 249 if ((c->x86_model == 7) && (c->x86_stepping == 0))
250 return; 250 return;
251 251
252 /* 252 /*
@@ -256,8 +256,8 @@ static void init_amd_k7(struct cpuinfo_x86 *c)
256 * See http://www.heise.de/newsticker/data/jow-18.10.01-000 for 256 * See http://www.heise.de/newsticker/data/jow-18.10.01-000 for
257 * more. 257 * more.
258 */ 258 */
259 if (((c->x86_model == 6) && (c->x86_mask >= 2)) || 259 if (((c->x86_model == 6) && (c->x86_stepping >= 2)) ||
260 ((c->x86_model == 7) && (c->x86_mask >= 1)) || 260 ((c->x86_model == 7) && (c->x86_stepping >= 1)) ||
261 (c->x86_model > 7)) 261 (c->x86_model > 7))
262 if (cpu_has(c, X86_FEATURE_MP)) 262 if (cpu_has(c, X86_FEATURE_MP))
263 return; 263 return;
@@ -583,7 +583,7 @@ static void early_init_amd(struct cpuinfo_x86 *c)
583 /* Set MTRR capability flag if appropriate */ 583 /* Set MTRR capability flag if appropriate */
584 if (c->x86 == 5) 584 if (c->x86 == 5)
585 if (c->x86_model == 13 || c->x86_model == 9 || 585 if (c->x86_model == 13 || c->x86_model == 9 ||
586 (c->x86_model == 8 && c->x86_mask >= 8)) 586 (c->x86_model == 8 && c->x86_stepping >= 8))
587 set_cpu_cap(c, X86_FEATURE_K6_MTRR); 587 set_cpu_cap(c, X86_FEATURE_K6_MTRR);
588#endif 588#endif
589#if defined(CONFIG_X86_LOCAL_APIC) && defined(CONFIG_PCI) 589#if defined(CONFIG_X86_LOCAL_APIC) && defined(CONFIG_PCI)
@@ -769,7 +769,7 @@ static void init_amd_zn(struct cpuinfo_x86 *c)
769 * Fix erratum 1076: CPB feature bit not being set in CPUID. It affects 769 * Fix erratum 1076: CPB feature bit not being set in CPUID. It affects
770 * all up to and including B1. 770 * all up to and including B1.
771 */ 771 */
772 if (c->x86_model <= 1 && c->x86_mask <= 1) 772 if (c->x86_model <= 1 && c->x86_stepping <= 1)
773 set_cpu_cap(c, X86_FEATURE_CPB); 773 set_cpu_cap(c, X86_FEATURE_CPB);
774} 774}
775 775
@@ -880,11 +880,11 @@ static unsigned int amd_size_cache(struct cpuinfo_x86 *c, unsigned int size)
880 /* AMD errata T13 (order #21922) */ 880 /* AMD errata T13 (order #21922) */
881 if ((c->x86 == 6)) { 881 if ((c->x86 == 6)) {
882 /* Duron Rev A0 */ 882 /* Duron Rev A0 */
883 if (c->x86_model == 3 && c->x86_mask == 0) 883 if (c->x86_model == 3 && c->x86_stepping == 0)
884 size = 64; 884 size = 64;
885 /* Tbird rev A1/A2 */ 885 /* Tbird rev A1/A2 */
886 if (c->x86_model == 4 && 886 if (c->x86_model == 4 &&
887 (c->x86_mask == 0 || c->x86_mask == 1)) 887 (c->x86_stepping == 0 || c->x86_stepping == 1))
888 size = 256; 888 size = 256;
889 } 889 }
890 return size; 890 return size;
@@ -1021,7 +1021,7 @@ static bool cpu_has_amd_erratum(struct cpuinfo_x86 *cpu, const int *erratum)
1021 } 1021 }
1022 1022
1023 /* OSVW unavailable or ID unknown, match family-model-stepping range */ 1023 /* OSVW unavailable or ID unknown, match family-model-stepping range */
1024 ms = (cpu->x86_model << 4) | cpu->x86_mask; 1024 ms = (cpu->x86_model << 4) | cpu->x86_stepping;
1025 while ((range = *erratum++)) 1025 while ((range = *erratum++))
1026 if ((cpu->x86 == AMD_MODEL_RANGE_FAMILY(range)) && 1026 if ((cpu->x86 == AMD_MODEL_RANGE_FAMILY(range)) &&
1027 (ms >= AMD_MODEL_RANGE_START(range)) && 1027 (ms >= AMD_MODEL_RANGE_START(range)) &&
diff --git a/arch/x86/kernel/cpu/centaur.c b/arch/x86/kernel/cpu/centaur.c
index 68bc6d9b3132..595be776727d 100644
--- a/arch/x86/kernel/cpu/centaur.c
+++ b/arch/x86/kernel/cpu/centaur.c
@@ -136,7 +136,7 @@ static void init_centaur(struct cpuinfo_x86 *c)
136 clear_cpu_cap(c, X86_FEATURE_TSC); 136 clear_cpu_cap(c, X86_FEATURE_TSC);
137 break; 137 break;
138 case 8: 138 case 8:
139 switch (c->x86_mask) { 139 switch (c->x86_stepping) {
140 default: 140 default:
141 name = "2"; 141 name = "2";
142 break; 142 break;
@@ -211,7 +211,7 @@ centaur_size_cache(struct cpuinfo_x86 *c, unsigned int size)
211 * - Note, it seems this may only be in engineering samples. 211 * - Note, it seems this may only be in engineering samples.
212 */ 212 */
213 if ((c->x86 == 6) && (c->x86_model == 9) && 213 if ((c->x86 == 6) && (c->x86_model == 9) &&
214 (c->x86_mask == 1) && (size == 65)) 214 (c->x86_stepping == 1) && (size == 65))
215 size -= 1; 215 size -= 1;
216 return size; 216 return size;
217} 217}
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
index d63f4b5706e4..a7d8df641a4c 100644
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
@@ -731,7 +731,7 @@ void cpu_detect(struct cpuinfo_x86 *c)
731 cpuid(0x00000001, &tfms, &misc, &junk, &cap0); 731 cpuid(0x00000001, &tfms, &misc, &junk, &cap0);
732 c->x86 = x86_family(tfms); 732 c->x86 = x86_family(tfms);
733 c->x86_model = x86_model(tfms); 733 c->x86_model = x86_model(tfms);
734 c->x86_mask = x86_stepping(tfms); 734 c->x86_stepping = x86_stepping(tfms);
735 735
736 if (cap0 & (1<<19)) { 736 if (cap0 & (1<<19)) {
737 c->x86_clflush_size = ((misc >> 8) & 0xff) * 8; 737 c->x86_clflush_size = ((misc >> 8) & 0xff) * 8;
@@ -1186,7 +1186,7 @@ static void identify_cpu(struct cpuinfo_x86 *c)
1186 c->loops_per_jiffy = loops_per_jiffy; 1186 c->loops_per_jiffy = loops_per_jiffy;
1187 c->x86_cache_size = -1; 1187 c->x86_cache_size = -1;
1188 c->x86_vendor = X86_VENDOR_UNKNOWN; 1188 c->x86_vendor = X86_VENDOR_UNKNOWN;
1189 c->x86_model = c->x86_mask = 0; /* So far unknown... */ 1189 c->x86_model = c->x86_stepping = 0; /* So far unknown... */
1190 c->x86_vendor_id[0] = '\0'; /* Unset */ 1190 c->x86_vendor_id[0] = '\0'; /* Unset */
1191 c->x86_model_id[0] = '\0'; /* Unset */ 1191 c->x86_model_id[0] = '\0'; /* Unset */
1192 c->x86_max_cores = 1; 1192 c->x86_max_cores = 1;
@@ -1378,8 +1378,8 @@ void print_cpu_info(struct cpuinfo_x86 *c)
1378 1378
1379 pr_cont(" (family: 0x%x, model: 0x%x", c->x86, c->x86_model); 1379 pr_cont(" (family: 0x%x, model: 0x%x", c->x86, c->x86_model);
1380 1380
1381 if (c->x86_mask || c->cpuid_level >= 0) 1381 if (c->x86_stepping || c->cpuid_level >= 0)
1382 pr_cont(", stepping: 0x%x)\n", c->x86_mask); 1382 pr_cont(", stepping: 0x%x)\n", c->x86_stepping);
1383 else 1383 else
1384 pr_cont(")\n"); 1384 pr_cont(")\n");
1385} 1385}
diff --git a/arch/x86/kernel/cpu/cyrix.c b/arch/x86/kernel/cpu/cyrix.c
index 6b4bb335641f..8949b7ae6d92 100644
--- a/arch/x86/kernel/cpu/cyrix.c
+++ b/arch/x86/kernel/cpu/cyrix.c
@@ -215,7 +215,7 @@ static void init_cyrix(struct cpuinfo_x86 *c)
215 215
216 /* common case step number/rev -- exceptions handled below */ 216 /* common case step number/rev -- exceptions handled below */
217 c->x86_model = (dir1 >> 4) + 1; 217 c->x86_model = (dir1 >> 4) + 1;
218 c->x86_mask = dir1 & 0xf; 218 c->x86_stepping = dir1 & 0xf;
219 219
220 /* Now cook; the original recipe is by Channing Corn, from Cyrix. 220 /* Now cook; the original recipe is by Channing Corn, from Cyrix.
221 * We do the same thing for each generation: we work out 221 * We do the same thing for each generation: we work out
diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c
index ef796f14f7ae..d19e903214b4 100644
--- a/arch/x86/kernel/cpu/intel.c
+++ b/arch/x86/kernel/cpu/intel.c
@@ -146,7 +146,7 @@ static bool bad_spectre_microcode(struct cpuinfo_x86 *c)
146 146
147 for (i = 0; i < ARRAY_SIZE(spectre_bad_microcodes); i++) { 147 for (i = 0; i < ARRAY_SIZE(spectre_bad_microcodes); i++) {
148 if (c->x86_model == spectre_bad_microcodes[i].model && 148 if (c->x86_model == spectre_bad_microcodes[i].model &&
149 c->x86_mask == spectre_bad_microcodes[i].stepping) 149 c->x86_stepping == spectre_bad_microcodes[i].stepping)
150 return (c->microcode <= spectre_bad_microcodes[i].microcode); 150 return (c->microcode <= spectre_bad_microcodes[i].microcode);
151 } 151 }
152 return false; 152 return false;
@@ -193,7 +193,7 @@ static void early_init_intel(struct cpuinfo_x86 *c)
193 * need the microcode to have already been loaded... so if it is 193 * need the microcode to have already been loaded... so if it is
194 * not, recommend a BIOS update and disable large pages. 194 * not, recommend a BIOS update and disable large pages.
195 */ 195 */
196 if (c->x86 == 6 && c->x86_model == 0x1c && c->x86_mask <= 2 && 196 if (c->x86 == 6 && c->x86_model == 0x1c && c->x86_stepping <= 2 &&
197 c->microcode < 0x20e) { 197 c->microcode < 0x20e) {
198 pr_warn("Atom PSE erratum detected, BIOS microcode update recommended\n"); 198 pr_warn("Atom PSE erratum detected, BIOS microcode update recommended\n");
199 clear_cpu_cap(c, X86_FEATURE_PSE); 199 clear_cpu_cap(c, X86_FEATURE_PSE);
@@ -209,7 +209,7 @@ static void early_init_intel(struct cpuinfo_x86 *c)
209 209
210 /* CPUID workaround for 0F33/0F34 CPU */ 210 /* CPUID workaround for 0F33/0F34 CPU */
211 if (c->x86 == 0xF && c->x86_model == 0x3 211 if (c->x86 == 0xF && c->x86_model == 0x3
212 && (c->x86_mask == 0x3 || c->x86_mask == 0x4)) 212 && (c->x86_stepping == 0x3 || c->x86_stepping == 0x4))
213 c->x86_phys_bits = 36; 213 c->x86_phys_bits = 36;
214 214
215 /* 215 /*
@@ -307,7 +307,7 @@ int ppro_with_ram_bug(void)
307 if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL && 307 if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL &&
308 boot_cpu_data.x86 == 6 && 308 boot_cpu_data.x86 == 6 &&
309 boot_cpu_data.x86_model == 1 && 309 boot_cpu_data.x86_model == 1 &&
310 boot_cpu_data.x86_mask < 8) { 310 boot_cpu_data.x86_stepping < 8) {
311 pr_info("Pentium Pro with Errata#50 detected. Taking evasive action.\n"); 311 pr_info("Pentium Pro with Errata#50 detected. Taking evasive action.\n");
312 return 1; 312 return 1;
313 } 313 }
@@ -324,7 +324,7 @@ static void intel_smp_check(struct cpuinfo_x86 *c)
324 * Mask B, Pentium, but not Pentium MMX 324 * Mask B, Pentium, but not Pentium MMX
325 */ 325 */
326 if (c->x86 == 5 && 326 if (c->x86 == 5 &&
327 c->x86_mask >= 1 && c->x86_mask <= 4 && 327 c->x86_stepping >= 1 && c->x86_stepping <= 4 &&
328 c->x86_model <= 3) { 328 c->x86_model <= 3) {
329 /* 329 /*
330 * Remember we have B step Pentia with bugs 330 * Remember we have B step Pentia with bugs
@@ -367,7 +367,7 @@ static void intel_workarounds(struct cpuinfo_x86 *c)
367 * SEP CPUID bug: Pentium Pro reports SEP but doesn't have it until 367 * SEP CPUID bug: Pentium Pro reports SEP but doesn't have it until
368 * model 3 mask 3 368 * model 3 mask 3
369 */ 369 */
370 if ((c->x86<<8 | c->x86_model<<4 | c->x86_mask) < 0x633) 370 if ((c->x86<<8 | c->x86_model<<4 | c->x86_stepping) < 0x633)
371 clear_cpu_cap(c, X86_FEATURE_SEP); 371 clear_cpu_cap(c, X86_FEATURE_SEP);
372 372
373 /* 373 /*
@@ -385,7 +385,7 @@ static void intel_workarounds(struct cpuinfo_x86 *c)
385 * P4 Xeon erratum 037 workaround. 385 * P4 Xeon erratum 037 workaround.
386 * Hardware prefetcher may cause stale data to be loaded into the cache. 386 * Hardware prefetcher may cause stale data to be loaded into the cache.
387 */ 387 */
388 if ((c->x86 == 15) && (c->x86_model == 1) && (c->x86_mask == 1)) { 388 if ((c->x86 == 15) && (c->x86_model == 1) && (c->x86_stepping == 1)) {
389 if (msr_set_bit(MSR_IA32_MISC_ENABLE, 389 if (msr_set_bit(MSR_IA32_MISC_ENABLE,
390 MSR_IA32_MISC_ENABLE_PREFETCH_DISABLE_BIT) > 0) { 390 MSR_IA32_MISC_ENABLE_PREFETCH_DISABLE_BIT) > 0) {
391 pr_info("CPU: C0 stepping P4 Xeon detected.\n"); 391 pr_info("CPU: C0 stepping P4 Xeon detected.\n");
@@ -400,7 +400,7 @@ static void intel_workarounds(struct cpuinfo_x86 *c)
400 * Specification Update"). 400 * Specification Update").
401 */ 401 */
402 if (boot_cpu_has(X86_FEATURE_APIC) && (c->x86<<8 | c->x86_model<<4) == 0x520 && 402 if (boot_cpu_has(X86_FEATURE_APIC) && (c->x86<<8 | c->x86_model<<4) == 0x520 &&
403 (c->x86_mask < 0x6 || c->x86_mask == 0xb)) 403 (c->x86_stepping < 0x6 || c->x86_stepping == 0xb))
404 set_cpu_bug(c, X86_BUG_11AP); 404 set_cpu_bug(c, X86_BUG_11AP);
405 405
406 406
@@ -647,7 +647,7 @@ static void init_intel(struct cpuinfo_x86 *c)
647 case 6: 647 case 6:
648 if (l2 == 128) 648 if (l2 == 128)
649 p = "Celeron (Mendocino)"; 649 p = "Celeron (Mendocino)";
650 else if (c->x86_mask == 0 || c->x86_mask == 5) 650 else if (c->x86_stepping == 0 || c->x86_stepping == 5)
651 p = "Celeron-A"; 651 p = "Celeron-A";
652 break; 652 break;
653 653
diff --git a/arch/x86/kernel/cpu/intel_rdt.c b/arch/x86/kernel/cpu/intel_rdt.c
index 99442370de40..18dd8f22e353 100644
--- a/arch/x86/kernel/cpu/intel_rdt.c
+++ b/arch/x86/kernel/cpu/intel_rdt.c
@@ -771,7 +771,7 @@ static __init void rdt_quirks(void)
771 cache_alloc_hsw_probe(); 771 cache_alloc_hsw_probe();
772 break; 772 break;
773 case INTEL_FAM6_SKYLAKE_X: 773 case INTEL_FAM6_SKYLAKE_X:
774 if (boot_cpu_data.x86_mask <= 4) 774 if (boot_cpu_data.x86_stepping <= 4)
775 set_rdt_options("!cmt,!mbmtotal,!mbmlocal,!l3cat"); 775 set_rdt_options("!cmt,!mbmtotal,!mbmlocal,!l3cat");
776 } 776 }
777} 777}
diff --git a/arch/x86/kernel/cpu/microcode/intel.c b/arch/x86/kernel/cpu/microcode/intel.c
index f7c55b0e753a..b94279bb5c04 100644
--- a/arch/x86/kernel/cpu/microcode/intel.c
+++ b/arch/x86/kernel/cpu/microcode/intel.c
@@ -921,7 +921,7 @@ static bool is_blacklisted(unsigned int cpu)
921 */ 921 */
922 if (c->x86 == 6 && 922 if (c->x86 == 6 &&
923 c->x86_model == INTEL_FAM6_BROADWELL_X && 923 c->x86_model == INTEL_FAM6_BROADWELL_X &&
924 c->x86_mask == 0x01 && 924 c->x86_stepping == 0x01 &&
925 llc_size_per_core > 2621440 && 925 llc_size_per_core > 2621440 &&
926 c->microcode < 0x0b000021) { 926 c->microcode < 0x0b000021) {
927 pr_err_once("Erratum BDF90: late loading with revision < 0x0b000021 (0x%x) disabled.\n", c->microcode); 927 pr_err_once("Erratum BDF90: late loading with revision < 0x0b000021 (0x%x) disabled.\n", c->microcode);
@@ -944,7 +944,7 @@ static enum ucode_state request_microcode_fw(int cpu, struct device *device,
944 return UCODE_NFOUND; 944 return UCODE_NFOUND;
945 945
946 sprintf(name, "intel-ucode/%02x-%02x-%02x", 946 sprintf(name, "intel-ucode/%02x-%02x-%02x",
947 c->x86, c->x86_model, c->x86_mask); 947 c->x86, c->x86_model, c->x86_stepping);
948 948
949 if (request_firmware_direct(&firmware, name, device)) { 949 if (request_firmware_direct(&firmware, name, device)) {
950 pr_debug("data file %s load failed\n", name); 950 pr_debug("data file %s load failed\n", name);
diff --git a/arch/x86/kernel/cpu/mtrr/generic.c b/arch/x86/kernel/cpu/mtrr/generic.c
index fdc55215d44d..e12ee86906c6 100644
--- a/arch/x86/kernel/cpu/mtrr/generic.c
+++ b/arch/x86/kernel/cpu/mtrr/generic.c
@@ -859,7 +859,7 @@ int generic_validate_add_page(unsigned long base, unsigned long size,
859 */ 859 */
860 if (is_cpu(INTEL) && boot_cpu_data.x86 == 6 && 860 if (is_cpu(INTEL) && boot_cpu_data.x86 == 6 &&
861 boot_cpu_data.x86_model == 1 && 861 boot_cpu_data.x86_model == 1 &&
862 boot_cpu_data.x86_mask <= 7) { 862 boot_cpu_data.x86_stepping <= 7) {
863 if (base & ((1 << (22 - PAGE_SHIFT)) - 1)) { 863 if (base & ((1 << (22 - PAGE_SHIFT)) - 1)) {
864 pr_warn("mtrr: base(0x%lx000) is not 4 MiB aligned\n", base); 864 pr_warn("mtrr: base(0x%lx000) is not 4 MiB aligned\n", base);
865 return -EINVAL; 865 return -EINVAL;
diff --git a/arch/x86/kernel/cpu/mtrr/main.c b/arch/x86/kernel/cpu/mtrr/main.c
index 40d5a8a75212..7468de429087 100644
--- a/arch/x86/kernel/cpu/mtrr/main.c
+++ b/arch/x86/kernel/cpu/mtrr/main.c
@@ -711,8 +711,8 @@ void __init mtrr_bp_init(void)
711 if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL && 711 if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL &&
712 boot_cpu_data.x86 == 0xF && 712 boot_cpu_data.x86 == 0xF &&
713 boot_cpu_data.x86_model == 0x3 && 713 boot_cpu_data.x86_model == 0x3 &&
714 (boot_cpu_data.x86_mask == 0x3 || 714 (boot_cpu_data.x86_stepping == 0x3 ||
715 boot_cpu_data.x86_mask == 0x4)) 715 boot_cpu_data.x86_stepping == 0x4))
716 phys_addr = 36; 716 phys_addr = 36;
717 717
718 size_or_mask = SIZE_OR_MASK_BITS(phys_addr); 718 size_or_mask = SIZE_OR_MASK_BITS(phys_addr);
diff --git a/arch/x86/kernel/cpu/proc.c b/arch/x86/kernel/cpu/proc.c
index e7ecedafa1c8..ee4cc388e8d3 100644
--- a/arch/x86/kernel/cpu/proc.c
+++ b/arch/x86/kernel/cpu/proc.c
@@ -72,8 +72,8 @@ static int show_cpuinfo(struct seq_file *m, void *v)
72 c->x86_model, 72 c->x86_model,
73 c->x86_model_id[0] ? c->x86_model_id : "unknown"); 73 c->x86_model_id[0] ? c->x86_model_id : "unknown");
74 74
75 if (c->x86_mask || c->cpuid_level >= 0) 75 if (c->x86_stepping || c->cpuid_level >= 0)
76 seq_printf(m, "stepping\t: %d\n", c->x86_mask); 76 seq_printf(m, "stepping\t: %d\n", c->x86_stepping);
77 else 77 else
78 seq_puts(m, "stepping\t: unknown\n"); 78 seq_puts(m, "stepping\t: unknown\n");
79 if (c->microcode) 79 if (c->microcode)
diff --git a/arch/x86/kernel/head_32.S b/arch/x86/kernel/head_32.S
index c29020907886..b59e4fb40fd9 100644
--- a/arch/x86/kernel/head_32.S
+++ b/arch/x86/kernel/head_32.S
@@ -37,7 +37,7 @@
37#define X86 new_cpu_data+CPUINFO_x86 37#define X86 new_cpu_data+CPUINFO_x86
38#define X86_VENDOR new_cpu_data+CPUINFO_x86_vendor 38#define X86_VENDOR new_cpu_data+CPUINFO_x86_vendor
39#define X86_MODEL new_cpu_data+CPUINFO_x86_model 39#define X86_MODEL new_cpu_data+CPUINFO_x86_model
40#define X86_MASK new_cpu_data+CPUINFO_x86_mask 40#define X86_STEPPING new_cpu_data+CPUINFO_x86_stepping
41#define X86_HARD_MATH new_cpu_data+CPUINFO_hard_math 41#define X86_HARD_MATH new_cpu_data+CPUINFO_hard_math
42#define X86_CPUID new_cpu_data+CPUINFO_cpuid_level 42#define X86_CPUID new_cpu_data+CPUINFO_cpuid_level
43#define X86_CAPABILITY new_cpu_data+CPUINFO_x86_capability 43#define X86_CAPABILITY new_cpu_data+CPUINFO_x86_capability
@@ -332,7 +332,7 @@ ENTRY(startup_32_smp)
332 shrb $4,%al 332 shrb $4,%al
333 movb %al,X86_MODEL 333 movb %al,X86_MODEL
334 andb $0x0f,%cl # mask mask revision 334 andb $0x0f,%cl # mask mask revision
335 movb %cl,X86_MASK 335 movb %cl,X86_STEPPING
336 movl %edx,X86_CAPABILITY 336 movl %edx,X86_CAPABILITY
337 337
338.Lis486: 338.Lis486:
diff --git a/arch/x86/kernel/mpparse.c b/arch/x86/kernel/mpparse.c
index 3a4b12809ab5..bc6bc6689e68 100644
--- a/arch/x86/kernel/mpparse.c
+++ b/arch/x86/kernel/mpparse.c
@@ -407,7 +407,7 @@ static inline void __init construct_default_ISA_mptable(int mpc_default_type)
407 processor.apicver = mpc_default_type > 4 ? 0x10 : 0x01; 407 processor.apicver = mpc_default_type > 4 ? 0x10 : 0x01;
408 processor.cpuflag = CPU_ENABLED; 408 processor.cpuflag = CPU_ENABLED;
409 processor.cpufeature = (boot_cpu_data.x86 << 8) | 409 processor.cpufeature = (boot_cpu_data.x86 << 8) |
410 (boot_cpu_data.x86_model << 4) | boot_cpu_data.x86_mask; 410 (boot_cpu_data.x86_model << 4) | boot_cpu_data.x86_stepping;
411 processor.featureflag = boot_cpu_data.x86_capability[CPUID_1_EDX]; 411 processor.featureflag = boot_cpu_data.x86_capability[CPUID_1_EDX];
412 processor.reserved[0] = 0; 412 processor.reserved[0] = 0;
413 processor.reserved[1] = 0; 413 processor.reserved[1] = 0;
diff --git a/arch/x86/lib/cpu.c b/arch/x86/lib/cpu.c
index d6f848d1211d..2dd1fe13a37b 100644
--- a/arch/x86/lib/cpu.c
+++ b/arch/x86/lib/cpu.c
@@ -18,7 +18,7 @@ unsigned int x86_model(unsigned int sig)
18{ 18{
19 unsigned int fam, model; 19 unsigned int fam, model;
20 20
21 fam = x86_family(sig); 21 fam = x86_family(sig);
22 22
23 model = (sig >> 4) & 0xf; 23 model = (sig >> 4) & 0xf;
24 24
diff --git a/drivers/char/hw_random/via-rng.c b/drivers/char/hw_random/via-rng.c
index d1f5bb534e0e..6e9df558325b 100644
--- a/drivers/char/hw_random/via-rng.c
+++ b/drivers/char/hw_random/via-rng.c
@@ -162,7 +162,7 @@ static int via_rng_init(struct hwrng *rng)
162 /* Enable secondary noise source on CPUs where it is present. */ 162 /* Enable secondary noise source on CPUs where it is present. */
163 163
164 /* Nehemiah stepping 8 and higher */ 164 /* Nehemiah stepping 8 and higher */
165 if ((c->x86_model == 9) && (c->x86_mask > 7)) 165 if ((c->x86_model == 9) && (c->x86_stepping > 7))
166 lo |= VIA_NOISESRC2; 166 lo |= VIA_NOISESRC2;
167 167
168 /* Esther */ 168 /* Esther */
diff --git a/drivers/cpufreq/acpi-cpufreq.c b/drivers/cpufreq/acpi-cpufreq.c
index 3a2ca0f79daf..d0c34df0529c 100644
--- a/drivers/cpufreq/acpi-cpufreq.c
+++ b/drivers/cpufreq/acpi-cpufreq.c
@@ -629,7 +629,7 @@ static int acpi_cpufreq_blacklist(struct cpuinfo_x86 *c)
629 if (c->x86_vendor == X86_VENDOR_INTEL) { 629 if (c->x86_vendor == X86_VENDOR_INTEL) {
630 if ((c->x86 == 15) && 630 if ((c->x86 == 15) &&
631 (c->x86_model == 6) && 631 (c->x86_model == 6) &&
632 (c->x86_mask == 8)) { 632 (c->x86_stepping == 8)) {
633 pr_info("Intel(R) Xeon(R) 7100 Errata AL30, processors may lock up on frequency changes: disabling acpi-cpufreq\n"); 633 pr_info("Intel(R) Xeon(R) 7100 Errata AL30, processors may lock up on frequency changes: disabling acpi-cpufreq\n");
634 return -ENODEV; 634 return -ENODEV;
635 } 635 }
diff --git a/drivers/cpufreq/longhaul.c b/drivers/cpufreq/longhaul.c
index c46a12df40dd..d5e27bc7585a 100644
--- a/drivers/cpufreq/longhaul.c
+++ b/drivers/cpufreq/longhaul.c
@@ -775,7 +775,7 @@ static int longhaul_cpu_init(struct cpufreq_policy *policy)
775 break; 775 break;
776 776
777 case 7: 777 case 7:
778 switch (c->x86_mask) { 778 switch (c->x86_stepping) {
779 case 0: 779 case 0:
780 longhaul_version = TYPE_LONGHAUL_V1; 780 longhaul_version = TYPE_LONGHAUL_V1;
781 cpu_model = CPU_SAMUEL2; 781 cpu_model = CPU_SAMUEL2;
@@ -787,7 +787,7 @@ static int longhaul_cpu_init(struct cpufreq_policy *policy)
787 break; 787 break;
788 case 1 ... 15: 788 case 1 ... 15:
789 longhaul_version = TYPE_LONGHAUL_V2; 789 longhaul_version = TYPE_LONGHAUL_V2;
790 if (c->x86_mask < 8) { 790 if (c->x86_stepping < 8) {
791 cpu_model = CPU_SAMUEL2; 791 cpu_model = CPU_SAMUEL2;
792 cpuname = "C3 'Samuel 2' [C5B]"; 792 cpuname = "C3 'Samuel 2' [C5B]";
793 } else { 793 } else {
@@ -814,7 +814,7 @@ static int longhaul_cpu_init(struct cpufreq_policy *policy)
814 numscales = 32; 814 numscales = 32;
815 memcpy(mults, nehemiah_mults, sizeof(nehemiah_mults)); 815 memcpy(mults, nehemiah_mults, sizeof(nehemiah_mults));
816 memcpy(eblcr, nehemiah_eblcr, sizeof(nehemiah_eblcr)); 816 memcpy(eblcr, nehemiah_eblcr, sizeof(nehemiah_eblcr));
817 switch (c->x86_mask) { 817 switch (c->x86_stepping) {
818 case 0 ... 1: 818 case 0 ... 1:
819 cpu_model = CPU_NEHEMIAH; 819 cpu_model = CPU_NEHEMIAH;
820 cpuname = "C3 'Nehemiah A' [C5XLOE]"; 820 cpuname = "C3 'Nehemiah A' [C5XLOE]";
diff --git a/drivers/cpufreq/p4-clockmod.c b/drivers/cpufreq/p4-clockmod.c
index fd77812313f3..a25741b1281b 100644
--- a/drivers/cpufreq/p4-clockmod.c
+++ b/drivers/cpufreq/p4-clockmod.c
@@ -168,7 +168,7 @@ static int cpufreq_p4_cpu_init(struct cpufreq_policy *policy)
168#endif 168#endif
169 169
170 /* Errata workaround */ 170 /* Errata workaround */
171 cpuid = (c->x86 << 8) | (c->x86_model << 4) | c->x86_mask; 171 cpuid = (c->x86 << 8) | (c->x86_model << 4) | c->x86_stepping;
172 switch (cpuid) { 172 switch (cpuid) {
173 case 0x0f07: 173 case 0x0f07:
174 case 0x0f0a: 174 case 0x0f0a:
diff --git a/drivers/cpufreq/powernow-k7.c b/drivers/cpufreq/powernow-k7.c
index 80ac313e6c59..302e9ce793a0 100644
--- a/drivers/cpufreq/powernow-k7.c
+++ b/drivers/cpufreq/powernow-k7.c
@@ -131,7 +131,7 @@ static int check_powernow(void)
131 return 0; 131 return 0;
132 } 132 }
133 133
134 if ((c->x86_model == 6) && (c->x86_mask == 0)) { 134 if ((c->x86_model == 6) && (c->x86_stepping == 0)) {
135 pr_info("K7 660[A0] core detected, enabling errata workarounds\n"); 135 pr_info("K7 660[A0] core detected, enabling errata workarounds\n");
136 have_a0 = 1; 136 have_a0 = 1;
137 } 137 }
diff --git a/drivers/cpufreq/speedstep-centrino.c b/drivers/cpufreq/speedstep-centrino.c
index 41bc5397f4bb..4fa5adf16c70 100644
--- a/drivers/cpufreq/speedstep-centrino.c
+++ b/drivers/cpufreq/speedstep-centrino.c
@@ -37,7 +37,7 @@ struct cpu_id
37{ 37{
38 __u8 x86; /* CPU family */ 38 __u8 x86; /* CPU family */
39 __u8 x86_model; /* model */ 39 __u8 x86_model; /* model */
40 __u8 x86_mask; /* stepping */ 40 __u8 x86_stepping; /* stepping */
41}; 41};
42 42
43enum { 43enum {
@@ -277,7 +277,7 @@ static int centrino_verify_cpu_id(const struct cpuinfo_x86 *c,
277{ 277{
278 if ((c->x86 == x->x86) && 278 if ((c->x86 == x->x86) &&
279 (c->x86_model == x->x86_model) && 279 (c->x86_model == x->x86_model) &&
280 (c->x86_mask == x->x86_mask)) 280 (c->x86_stepping == x->x86_stepping))
281 return 1; 281 return 1;
282 return 0; 282 return 0;
283} 283}
diff --git a/drivers/cpufreq/speedstep-lib.c b/drivers/cpufreq/speedstep-lib.c
index 8085ec9000d1..e3a9962ee410 100644
--- a/drivers/cpufreq/speedstep-lib.c
+++ b/drivers/cpufreq/speedstep-lib.c
@@ -272,9 +272,9 @@ unsigned int speedstep_detect_processor(void)
272 ebx = cpuid_ebx(0x00000001); 272 ebx = cpuid_ebx(0x00000001);
273 ebx &= 0x000000FF; 273 ebx &= 0x000000FF;
274 274
275 pr_debug("ebx value is %x, x86_mask is %x\n", ebx, c->x86_mask); 275 pr_debug("ebx value is %x, x86_stepping is %x\n", ebx, c->x86_stepping);
276 276
277 switch (c->x86_mask) { 277 switch (c->x86_stepping) {
278 case 4: 278 case 4:
279 /* 279 /*
280 * B-stepping [M-P4-M] 280 * B-stepping [M-P4-M]
@@ -361,7 +361,7 @@ unsigned int speedstep_detect_processor(void)
361 msr_lo, msr_hi); 361 msr_lo, msr_hi);
362 if ((msr_hi & (1<<18)) && 362 if ((msr_hi & (1<<18)) &&
363 (relaxed_check ? 1 : (msr_hi & (3<<24)))) { 363 (relaxed_check ? 1 : (msr_hi & (3<<24)))) {
364 if (c->x86_mask == 0x01) { 364 if (c->x86_stepping == 0x01) {
365 pr_debug("early PIII version\n"); 365 pr_debug("early PIII version\n");
366 return SPEEDSTEP_CPU_PIII_C_EARLY; 366 return SPEEDSTEP_CPU_PIII_C_EARLY;
367 } else 367 } else
diff --git a/drivers/crypto/padlock-aes.c b/drivers/crypto/padlock-aes.c
index 4b6642a25df5..1c6cbda56afe 100644
--- a/drivers/crypto/padlock-aes.c
+++ b/drivers/crypto/padlock-aes.c
@@ -512,7 +512,7 @@ static int __init padlock_init(void)
512 512
513 printk(KERN_NOTICE PFX "Using VIA PadLock ACE for AES algorithm.\n"); 513 printk(KERN_NOTICE PFX "Using VIA PadLock ACE for AES algorithm.\n");
514 514
515 if (c->x86 == 6 && c->x86_model == 15 && c->x86_mask == 2) { 515 if (c->x86 == 6 && c->x86_model == 15 && c->x86_stepping == 2) {
516 ecb_fetch_blocks = MAX_ECB_FETCH_BLOCKS; 516 ecb_fetch_blocks = MAX_ECB_FETCH_BLOCKS;
517 cbc_fetch_blocks = MAX_CBC_FETCH_BLOCKS; 517 cbc_fetch_blocks = MAX_CBC_FETCH_BLOCKS;
518 printk(KERN_NOTICE PFX "VIA Nano stepping 2 detected: enabling workaround.\n"); 518 printk(KERN_NOTICE PFX "VIA Nano stepping 2 detected: enabling workaround.\n");
diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c
index 8b16ec595fa7..329cb96f886f 100644
--- a/drivers/edac/amd64_edac.c
+++ b/drivers/edac/amd64_edac.c
@@ -3147,7 +3147,7 @@ static struct amd64_family_type *per_family_init(struct amd64_pvt *pvt)
3147 struct amd64_family_type *fam_type = NULL; 3147 struct amd64_family_type *fam_type = NULL;
3148 3148
3149 pvt->ext_model = boot_cpu_data.x86_model >> 4; 3149 pvt->ext_model = boot_cpu_data.x86_model >> 4;
3150 pvt->stepping = boot_cpu_data.x86_mask; 3150 pvt->stepping = boot_cpu_data.x86_stepping;
3151 pvt->model = boot_cpu_data.x86_model; 3151 pvt->model = boot_cpu_data.x86_model;
3152 pvt->fam = boot_cpu_data.x86; 3152 pvt->fam = boot_cpu_data.x86;
3153 3153
diff --git a/drivers/hwmon/coretemp.c b/drivers/hwmon/coretemp.c
index c13a4fd86b3c..a42744c7665b 100644
--- a/drivers/hwmon/coretemp.c
+++ b/drivers/hwmon/coretemp.c
@@ -268,13 +268,13 @@ static int adjust_tjmax(struct cpuinfo_x86 *c, u32 id, struct device *dev)
268 for (i = 0; i < ARRAY_SIZE(tjmax_model_table); i++) { 268 for (i = 0; i < ARRAY_SIZE(tjmax_model_table); i++) {
269 const struct tjmax_model *tm = &tjmax_model_table[i]; 269 const struct tjmax_model *tm = &tjmax_model_table[i];
270 if (c->x86_model == tm->model && 270 if (c->x86_model == tm->model &&
271 (tm->mask == ANY || c->x86_mask == tm->mask)) 271 (tm->mask == ANY || c->x86_stepping == tm->mask))
272 return tm->tjmax; 272 return tm->tjmax;
273 } 273 }
274 274
275 /* Early chips have no MSR for TjMax */ 275 /* Early chips have no MSR for TjMax */
276 276
277 if (c->x86_model == 0xf && c->x86_mask < 4) 277 if (c->x86_model == 0xf && c->x86_stepping < 4)
278 usemsr_ee = 0; 278 usemsr_ee = 0;
279 279
280 if (c->x86_model > 0xe && usemsr_ee) { 280 if (c->x86_model > 0xe && usemsr_ee) {
@@ -425,7 +425,7 @@ static int chk_ucode_version(unsigned int cpu)
425 * Readings might stop update when processor visited too deep sleep, 425 * Readings might stop update when processor visited too deep sleep,
426 * fixed for stepping D0 (6EC). 426 * fixed for stepping D0 (6EC).
427 */ 427 */
428 if (c->x86_model == 0xe && c->x86_mask < 0xc && c->microcode < 0x39) { 428 if (c->x86_model == 0xe && c->x86_stepping < 0xc && c->microcode < 0x39) {
429 pr_err("Errata AE18 not fixed, update BIOS or microcode of the CPU!\n"); 429 pr_err("Errata AE18 not fixed, update BIOS or microcode of the CPU!\n");
430 return -ENODEV; 430 return -ENODEV;
431 } 431 }
diff --git a/drivers/hwmon/hwmon-vid.c b/drivers/hwmon/hwmon-vid.c
index ef91b8a67549..84e91286fc4f 100644
--- a/drivers/hwmon/hwmon-vid.c
+++ b/drivers/hwmon/hwmon-vid.c
@@ -293,7 +293,7 @@ u8 vid_which_vrm(void)
293 if (c->x86 < 6) /* Any CPU with family lower than 6 */ 293 if (c->x86 < 6) /* Any CPU with family lower than 6 */
294 return 0; /* doesn't have VID */ 294 return 0; /* doesn't have VID */
295 295
296 vrm_ret = find_vrm(c->x86, c->x86_model, c->x86_mask, c->x86_vendor); 296 vrm_ret = find_vrm(c->x86, c->x86_model, c->x86_stepping, c->x86_vendor);
297 if (vrm_ret == 134) 297 if (vrm_ret == 134)
298 vrm_ret = get_via_model_d_vrm(); 298 vrm_ret = get_via_model_d_vrm();
299 if (vrm_ret == 0) 299 if (vrm_ret == 0)
diff --git a/drivers/hwmon/k10temp.c b/drivers/hwmon/k10temp.c
index 0721e175664a..b960015cb073 100644
--- a/drivers/hwmon/k10temp.c
+++ b/drivers/hwmon/k10temp.c
@@ -226,7 +226,7 @@ static bool has_erratum_319(struct pci_dev *pdev)
226 * and AM3 formats, but that's the best we can do. 226 * and AM3 formats, but that's the best we can do.
227 */ 227 */
228 return boot_cpu_data.x86_model < 4 || 228 return boot_cpu_data.x86_model < 4 ||
229 (boot_cpu_data.x86_model == 4 && boot_cpu_data.x86_mask <= 2); 229 (boot_cpu_data.x86_model == 4 && boot_cpu_data.x86_stepping <= 2);
230} 230}
231 231
232static int k10temp_probe(struct pci_dev *pdev, 232static int k10temp_probe(struct pci_dev *pdev,
diff --git a/drivers/hwmon/k8temp.c b/drivers/hwmon/k8temp.c
index 5a632bcf869b..e59f9113fb93 100644
--- a/drivers/hwmon/k8temp.c
+++ b/drivers/hwmon/k8temp.c
@@ -187,7 +187,7 @@ static int k8temp_probe(struct pci_dev *pdev,
187 return -ENOMEM; 187 return -ENOMEM;
188 188
189 model = boot_cpu_data.x86_model; 189 model = boot_cpu_data.x86_model;
190 stepping = boot_cpu_data.x86_mask; 190 stepping = boot_cpu_data.x86_stepping;
191 191
192 /* feature available since SH-C0, exclude older revisions */ 192 /* feature available since SH-C0, exclude older revisions */
193 if ((model == 4 && stepping == 0) || 193 if ((model == 4 && stepping == 0) ||
diff --git a/drivers/video/fbdev/geode/video_gx.c b/drivers/video/fbdev/geode/video_gx.c
index 6082f653c68a..67773e8bbb95 100644
--- a/drivers/video/fbdev/geode/video_gx.c
+++ b/drivers/video/fbdev/geode/video_gx.c
@@ -127,7 +127,7 @@ void gx_set_dclk_frequency(struct fb_info *info)
127 int timeout = 1000; 127 int timeout = 1000;
128 128
129 /* Rev. 1 Geode GXs use a 14 MHz reference clock instead of 48 MHz. */ 129 /* Rev. 1 Geode GXs use a 14 MHz reference clock instead of 48 MHz. */
130 if (cpu_data(0).x86_mask == 1) { 130 if (cpu_data(0).x86_stepping == 1) {
131 pll_table = gx_pll_table_14MHz; 131 pll_table = gx_pll_table_14MHz;
132 pll_table_len = ARRAY_SIZE(gx_pll_table_14MHz); 132 pll_table_len = ARRAY_SIZE(gx_pll_table_14MHz);
133 } else { 133 } else {