diff options
author | Jeremy Fitzhardinge <jeremy@goop.org> | 2008-01-30 07:30:55 -0500 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2008-01-30 07:30:55 -0500 |
commit | 53756d3722172815f52272b28c6d5d5e9639adde (patch) | |
tree | f376d3378a47c9d442d8444ce7fa78b557bcd8b5 | |
parent | 5548fecdff5617ba3a2f09f0e585e1ac6e1bd25c (diff) |
x86: add set/clear_cpu_cap operations
The patch to suppress bitops-related warnings added a pile of ugly
casts. Many of these were related to the management of x86 CPU
capabilities. Clean these up by adding specific set/clear_cpu_cap
macros, and use them consistently.
Signed-off-by: Jeremy Fitzhardinge <jeremy@xensource.com>
Cc: Andi Kleen <ak@suse.de>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
-rw-r--r-- | arch/x86/kernel/alternative.c | 13 | ||||
-rw-r--r-- | arch/x86/kernel/apic_32.c | 8 | ||||
-rw-r--r-- | arch/x86/kernel/apic_64.c | 2 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/addon_cpuid_features.c | 2 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/mcheck/mce_64.c | 2 | ||||
-rw-r--r-- | arch/x86/kernel/setup_32.c | 2 | ||||
-rw-r--r-- | arch/x86/kernel/setup_64.c | 29 | ||||
-rw-r--r-- | arch/x86/kernel/vmi_32.c | 10 | ||||
-rw-r--r-- | include/asm-x86/cpufeature.h | 5 |
9 files changed, 38 insertions, 35 deletions
diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c index d6405e0842b5..cdc43242da92 100644 --- a/arch/x86/kernel/alternative.c +++ b/arch/x86/kernel/alternative.c | |||
@@ -356,15 +356,15 @@ void alternatives_smp_switch(int smp) | |||
356 | spin_lock_irqsave(&smp_alt, flags); | 356 | spin_lock_irqsave(&smp_alt, flags); |
357 | if (smp) { | 357 | if (smp) { |
358 | printk(KERN_INFO "SMP alternatives: switching to SMP code\n"); | 358 | printk(KERN_INFO "SMP alternatives: switching to SMP code\n"); |
359 | clear_bit(X86_FEATURE_UP, boot_cpu_data.x86_capability); | 359 | clear_cpu_cap(&boot_cpu_data, X86_FEATURE_UP); |
360 | clear_bit(X86_FEATURE_UP, cpu_data(0).x86_capability); | 360 | clear_cpu_cap(&cpu_data(0), X86_FEATURE_UP); |
361 | list_for_each_entry(mod, &smp_alt_modules, next) | 361 | list_for_each_entry(mod, &smp_alt_modules, next) |
362 | alternatives_smp_lock(mod->locks, mod->locks_end, | 362 | alternatives_smp_lock(mod->locks, mod->locks_end, |
363 | mod->text, mod->text_end); | 363 | mod->text, mod->text_end); |
364 | } else { | 364 | } else { |
365 | printk(KERN_INFO "SMP alternatives: switching to UP code\n"); | 365 | printk(KERN_INFO "SMP alternatives: switching to UP code\n"); |
366 | set_bit(X86_FEATURE_UP, boot_cpu_data.x86_capability); | 366 | set_cpu_cap(&boot_cpu_data, X86_FEATURE_UP); |
367 | set_bit(X86_FEATURE_UP, cpu_data(0).x86_capability); | 367 | set_cpu_cap(&cpu_data(0), X86_FEATURE_UP); |
368 | list_for_each_entry(mod, &smp_alt_modules, next) | 368 | list_for_each_entry(mod, &smp_alt_modules, next) |
369 | alternatives_smp_unlock(mod->locks, mod->locks_end, | 369 | alternatives_smp_unlock(mod->locks, mod->locks_end, |
370 | mod->text, mod->text_end); | 370 | mod->text, mod->text_end); |
@@ -431,8 +431,9 @@ void __init alternative_instructions(void) | |||
431 | if (smp_alt_once) { | 431 | if (smp_alt_once) { |
432 | if (1 == num_possible_cpus()) { | 432 | if (1 == num_possible_cpus()) { |
433 | printk(KERN_INFO "SMP alternatives: switching to UP code\n"); | 433 | printk(KERN_INFO "SMP alternatives: switching to UP code\n"); |
434 | set_bit(X86_FEATURE_UP, boot_cpu_data.x86_capability); | 434 | set_cpu_cap(&boot_cpu_data, X86_FEATURE_UP); |
435 | set_bit(X86_FEATURE_UP, cpu_data(0).x86_capability); | 435 | set_cpu_cap(&cpu_data(0), X86_FEATURE_UP); |
436 | |||
436 | alternatives_smp_unlock(__smp_locks, __smp_locks_end, | 437 | alternatives_smp_unlock(__smp_locks, __smp_locks_end, |
437 | _text, _etext); | 438 | _text, _etext); |
438 | } | 439 | } |
diff --git a/arch/x86/kernel/apic_32.c b/arch/x86/kernel/apic_32.c index 56352c11a896..1ee443a8e61b 100644 --- a/arch/x86/kernel/apic_32.c +++ b/arch/x86/kernel/apic_32.c | |||
@@ -1078,7 +1078,7 @@ static int __init detect_init_APIC (void) | |||
1078 | printk(KERN_WARNING "Could not enable APIC!\n"); | 1078 | printk(KERN_WARNING "Could not enable APIC!\n"); |
1079 | return -1; | 1079 | return -1; |
1080 | } | 1080 | } |
1081 | set_bit(X86_FEATURE_APIC, boot_cpu_data.x86_capability); | 1081 | set_cpu_cap(&boot_cpu_data, X86_FEATURE_APIC); |
1082 | mp_lapic_addr = APIC_DEFAULT_PHYS_BASE; | 1082 | mp_lapic_addr = APIC_DEFAULT_PHYS_BASE; |
1083 | 1083 | ||
1084 | /* The BIOS may have set up the APIC at some other address */ | 1084 | /* The BIOS may have set up the APIC at some other address */ |
@@ -1168,7 +1168,7 @@ fake_ioapic_page: | |||
1168 | int __init APIC_init_uniprocessor (void) | 1168 | int __init APIC_init_uniprocessor (void) |
1169 | { | 1169 | { |
1170 | if (enable_local_apic < 0) | 1170 | if (enable_local_apic < 0) |
1171 | clear_bit(X86_FEATURE_APIC, boot_cpu_data.x86_capability); | 1171 | clear_cpu_cap(&boot_cpu_data, X86_FEATURE_APIC); |
1172 | 1172 | ||
1173 | if (!smp_found_config && !cpu_has_apic) | 1173 | if (!smp_found_config && !cpu_has_apic) |
1174 | return -1; | 1174 | return -1; |
@@ -1180,7 +1180,7 @@ int __init APIC_init_uniprocessor (void) | |||
1180 | APIC_INTEGRATED(apic_version[boot_cpu_physical_apicid])) { | 1180 | APIC_INTEGRATED(apic_version[boot_cpu_physical_apicid])) { |
1181 | printk(KERN_ERR "BIOS bug, local APIC #%d not detected!...\n", | 1181 | printk(KERN_ERR "BIOS bug, local APIC #%d not detected!...\n", |
1182 | boot_cpu_physical_apicid); | 1182 | boot_cpu_physical_apicid); |
1183 | clear_bit(X86_FEATURE_APIC, boot_cpu_data.x86_capability); | 1183 | clear_cpu_cap(&boot_cpu_data, X86_FEATURE_APIC); |
1184 | return -1; | 1184 | return -1; |
1185 | } | 1185 | } |
1186 | 1186 | ||
@@ -1536,7 +1536,7 @@ early_param("lapic", parse_lapic); | |||
1536 | static int __init parse_nolapic(char *arg) | 1536 | static int __init parse_nolapic(char *arg) |
1537 | { | 1537 | { |
1538 | enable_local_apic = -1; | 1538 | enable_local_apic = -1; |
1539 | clear_bit(X86_FEATURE_APIC, boot_cpu_data.x86_capability); | 1539 | clear_cpu_cap(&boot_cpu_data, X86_FEATURE_APIC); |
1540 | return 0; | 1540 | return 0; |
1541 | } | 1541 | } |
1542 | early_param("nolapic", parse_nolapic); | 1542 | early_param("nolapic", parse_nolapic); |
diff --git a/arch/x86/kernel/apic_64.c b/arch/x86/kernel/apic_64.c index 027004262105..ab4ae50399fd 100644 --- a/arch/x86/kernel/apic_64.c +++ b/arch/x86/kernel/apic_64.c | |||
@@ -1211,7 +1211,7 @@ early_param("apic", apic_set_verbosity); | |||
1211 | static __init int setup_disableapic(char *str) | 1211 | static __init int setup_disableapic(char *str) |
1212 | { | 1212 | { |
1213 | disable_apic = 1; | 1213 | disable_apic = 1; |
1214 | clear_bit(X86_FEATURE_APIC, boot_cpu_data.x86_capability); | 1214 | clear_cpu_cap(&boot_cpu_data, X86_FEATURE_APIC); |
1215 | return 0; | 1215 | return 0; |
1216 | } | 1216 | } |
1217 | early_param("disableapic", setup_disableapic); | 1217 | early_param("disableapic", setup_disableapic); |
diff --git a/arch/x86/kernel/cpu/addon_cpuid_features.c b/arch/x86/kernel/cpu/addon_cpuid_features.c index 3e91d3ee26ec..238468ae1993 100644 --- a/arch/x86/kernel/cpu/addon_cpuid_features.c +++ b/arch/x86/kernel/cpu/addon_cpuid_features.c | |||
@@ -45,6 +45,6 @@ void __cpuinit init_scattered_cpuid_features(struct cpuinfo_x86 *c) | |||
45 | ®s[CR_ECX], ®s[CR_EDX]); | 45 | ®s[CR_ECX], ®s[CR_EDX]); |
46 | 46 | ||
47 | if (regs[cb->reg] & (1 << cb->bit)) | 47 | if (regs[cb->reg] & (1 << cb->bit)) |
48 | set_bit(cb->feature, c->x86_capability); | 48 | set_cpu_cap(c, cb->feature); |
49 | } | 49 | } |
50 | } | 50 | } |
diff --git a/arch/x86/kernel/cpu/mcheck/mce_64.c b/arch/x86/kernel/cpu/mcheck/mce_64.c index 8ef6a6bfd112..3c7672c40cf4 100644 --- a/arch/x86/kernel/cpu/mcheck/mce_64.c +++ b/arch/x86/kernel/cpu/mcheck/mce_64.c | |||
@@ -80,7 +80,7 @@ void mce_log(struct mce *mce) | |||
80 | /* When the buffer fills up discard new entries. Assume | 80 | /* When the buffer fills up discard new entries. Assume |
81 | that the earlier errors are the more interesting. */ | 81 | that the earlier errors are the more interesting. */ |
82 | if (entry >= MCE_LOG_LEN) { | 82 | if (entry >= MCE_LOG_LEN) { |
83 | set_bit(MCE_OVERFLOW, &mcelog.flags); | 83 | set_bit(MCE_OVERFLOW, (unsigned long *)&mcelog.flags); |
84 | return; | 84 | return; |
85 | } | 85 | } |
86 | /* Old left over entry. Skip. */ | 86 | /* Old left over entry. Skip. */ |
diff --git a/arch/x86/kernel/setup_32.c b/arch/x86/kernel/setup_32.c index 32edf70d6b0d..e9ede0fc585a 100644 --- a/arch/x86/kernel/setup_32.c +++ b/arch/x86/kernel/setup_32.c | |||
@@ -238,7 +238,7 @@ static int __init parse_mem(char *arg) | |||
238 | return -EINVAL; | 238 | return -EINVAL; |
239 | 239 | ||
240 | if (strcmp(arg, "nopentium") == 0) { | 240 | if (strcmp(arg, "nopentium") == 0) { |
241 | clear_bit(X86_FEATURE_PSE, boot_cpu_data.x86_capability); | 241 | clear_cpu_cap(&boot_cpu_data, X86_FEATURE_PSE); |
242 | disable_pse = 1; | 242 | disable_pse = 1; |
243 | } else { | 243 | } else { |
244 | /* If the user specifies memory size, we | 244 | /* If the user specifies memory size, we |
diff --git a/arch/x86/kernel/setup_64.c b/arch/x86/kernel/setup_64.c index 63dd39b843b5..ce4d6b52ce36 100644 --- a/arch/x86/kernel/setup_64.c +++ b/arch/x86/kernel/setup_64.c | |||
@@ -667,13 +667,13 @@ static void __cpuinit init_amd(struct cpuinfo_x86 *c) | |||
667 | level = cpuid_eax(1); | 667 | level = cpuid_eax(1); |
668 | if (c->x86 == 15 && ((level >= 0x0f48 && level < 0x0f50) || | 668 | if (c->x86 == 15 && ((level >= 0x0f48 && level < 0x0f50) || |
669 | level >= 0x0f58)) | 669 | level >= 0x0f58)) |
670 | set_bit(X86_FEATURE_REP_GOOD, (unsigned long *)&c->x86_capability); | 670 | set_cpu_cap(c, X86_FEATURE_REP_GOOD); |
671 | if (c->x86 == 0x10 || c->x86 == 0x11) | 671 | if (c->x86 == 0x10 || c->x86 == 0x11) |
672 | set_bit(X86_FEATURE_REP_GOOD, (unsigned long *)&c->x86_capability); | 672 | set_cpu_cap(c, X86_FEATURE_REP_GOOD); |
673 | 673 | ||
674 | /* Enable workaround for FXSAVE leak */ | 674 | /* Enable workaround for FXSAVE leak */ |
675 | if (c->x86 >= 6) | 675 | if (c->x86 >= 6) |
676 | set_bit(X86_FEATURE_FXSAVE_LEAK, (unsigned long *)&c->x86_capability); | 676 | set_cpu_cap(c, X86_FEATURE_FXSAVE_LEAK); |
677 | 677 | ||
678 | level = get_model_name(c); | 678 | level = get_model_name(c); |
679 | if (!level) { | 679 | if (!level) { |
@@ -689,7 +689,7 @@ static void __cpuinit init_amd(struct cpuinfo_x86 *c) | |||
689 | 689 | ||
690 | /* c->x86_power is 8000_0007 edx. Bit 8 is constant TSC */ | 690 | /* c->x86_power is 8000_0007 edx. Bit 8 is constant TSC */ |
691 | if (c->x86_power & (1<<8)) | 691 | if (c->x86_power & (1<<8)) |
692 | set_bit(X86_FEATURE_CONSTANT_TSC, (unsigned long *)&c->x86_capability); | 692 | set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC); |
693 | 693 | ||
694 | /* Multi core CPU? */ | 694 | /* Multi core CPU? */ |
695 | if (c->extended_cpuid_level >= 0x80000008) | 695 | if (c->extended_cpuid_level >= 0x80000008) |
@@ -702,14 +702,14 @@ static void __cpuinit init_amd(struct cpuinfo_x86 *c) | |||
702 | num_cache_leaves = 3; | 702 | num_cache_leaves = 3; |
703 | 703 | ||
704 | if (c->x86 == 0xf || c->x86 == 0x10 || c->x86 == 0x11) | 704 | if (c->x86 == 0xf || c->x86 == 0x10 || c->x86 == 0x11) |
705 | set_bit(X86_FEATURE_K8, (unsigned long *)&c->x86_capability); | 705 | set_cpu_cap(c, X86_FEATURE_K8); |
706 | 706 | ||
707 | /* RDTSC can be speculated around */ | 707 | /* RDTSC can be speculated around */ |
708 | clear_bit(X86_FEATURE_SYNC_RDTSC, (unsigned long *)&c->x86_capability); | 708 | clear_cpu_cap(c, X86_FEATURE_SYNC_RDTSC); |
709 | 709 | ||
710 | /* Family 10 doesn't support C states in MWAIT so don't use it */ | 710 | /* Family 10 doesn't support C states in MWAIT so don't use it */ |
711 | if (c->x86 == 0x10 && !force_mwait) | 711 | if (c->x86 == 0x10 && !force_mwait) |
712 | clear_bit(X86_FEATURE_MWAIT, (unsigned long *)&c->x86_capability); | 712 | clear_cpu_cap(c, X86_FEATURE_MWAIT); |
713 | 713 | ||
714 | if (amd_apic_timer_broken()) | 714 | if (amd_apic_timer_broken()) |
715 | disable_apic_timer = 1; | 715 | disable_apic_timer = 1; |
@@ -811,17 +811,16 @@ static void __cpuinit init_intel(struct cpuinfo_x86 *c) | |||
811 | unsigned eax = cpuid_eax(10); | 811 | unsigned eax = cpuid_eax(10); |
812 | /* Check for version and the number of counters */ | 812 | /* Check for version and the number of counters */ |
813 | if ((eax & 0xff) && (((eax>>8) & 0xff) > 1)) | 813 | if ((eax & 0xff) && (((eax>>8) & 0xff) > 1)) |
814 | set_bit(X86_FEATURE_ARCH_PERFMON, | 814 | set_cpu_cap(c, X86_FEATURE_ARCH_PERFMON); |
815 | (unsigned long *)&c->x86_capability); | ||
816 | } | 815 | } |
817 | 816 | ||
818 | if (cpu_has_ds) { | 817 | if (cpu_has_ds) { |
819 | unsigned int l1, l2; | 818 | unsigned int l1, l2; |
820 | rdmsr(MSR_IA32_MISC_ENABLE, l1, l2); | 819 | rdmsr(MSR_IA32_MISC_ENABLE, l1, l2); |
821 | if (!(l1 & (1<<11))) | 820 | if (!(l1 & (1<<11))) |
822 | set_bit(X86_FEATURE_BTS, (unsigned long *)c->x86_capability); | 821 | set_cpu_cap(c, X86_FEATURE_BTS); |
823 | if (!(l1 & (1<<12))) | 822 | if (!(l1 & (1<<12))) |
824 | set_bit(X86_FEATURE_PEBS, (unsigned long *)c->x86_capability); | 823 | set_cpu_cap(c, X86_FEATURE_PEBS); |
825 | } | 824 | } |
826 | 825 | ||
827 | n = c->extended_cpuid_level; | 826 | n = c->extended_cpuid_level; |
@@ -840,13 +839,13 @@ static void __cpuinit init_intel(struct cpuinfo_x86 *c) | |||
840 | c->x86_cache_alignment = c->x86_clflush_size * 2; | 839 | c->x86_cache_alignment = c->x86_clflush_size * 2; |
841 | if ((c->x86 == 0xf && c->x86_model >= 0x03) || | 840 | if ((c->x86 == 0xf && c->x86_model >= 0x03) || |
842 | (c->x86 == 0x6 && c->x86_model >= 0x0e)) | 841 | (c->x86 == 0x6 && c->x86_model >= 0x0e)) |
843 | set_bit(X86_FEATURE_CONSTANT_TSC, (unsigned long *)&c->x86_capability); | 842 | set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC); |
844 | if (c->x86 == 6) | 843 | if (c->x86 == 6) |
845 | set_bit(X86_FEATURE_REP_GOOD, (unsigned long *)&c->x86_capability); | 844 | set_cpu_cap(c, X86_FEATURE_REP_GOOD); |
846 | if (c->x86 == 15) | 845 | if (c->x86 == 15) |
847 | set_bit(X86_FEATURE_SYNC_RDTSC, (unsigned long *)&c->x86_capability); | 846 | set_cpu_cap(c, X86_FEATURE_SYNC_RDTSC); |
848 | else | 847 | else |
849 | clear_bit(X86_FEATURE_SYNC_RDTSC, (unsigned long *)&c->x86_capability); | 848 | clear_cpu_cap(c, X86_FEATURE_SYNC_RDTSC); |
850 | c->x86_max_cores = intel_num_cpu_cores(c); | 849 | c->x86_max_cores = intel_num_cpu_cores(c); |
851 | 850 | ||
852 | srat_detect_node(); | 851 | srat_detect_node(); |
diff --git a/arch/x86/kernel/vmi_32.c b/arch/x86/kernel/vmi_32.c index aacce426cbd0..87e5633805a9 100644 --- a/arch/x86/kernel/vmi_32.c +++ b/arch/x86/kernel/vmi_32.c | |||
@@ -963,19 +963,19 @@ static int __init parse_vmi(char *arg) | |||
963 | return -EINVAL; | 963 | return -EINVAL; |
964 | 964 | ||
965 | if (!strcmp(arg, "disable_pge")) { | 965 | if (!strcmp(arg, "disable_pge")) { |
966 | clear_bit(X86_FEATURE_PGE, boot_cpu_data.x86_capability); | 966 | clear_cpu_cap(&boot_cpu_data, X86_FEATURE_PGE); |
967 | disable_pge = 1; | 967 | disable_pge = 1; |
968 | } else if (!strcmp(arg, "disable_pse")) { | 968 | } else if (!strcmp(arg, "disable_pse")) { |
969 | clear_bit(X86_FEATURE_PSE, boot_cpu_data.x86_capability); | 969 | clear_cpu_cap(&boot_cpu_data, X86_FEATURE_PSE); |
970 | disable_pse = 1; | 970 | disable_pse = 1; |
971 | } else if (!strcmp(arg, "disable_sep")) { | 971 | } else if (!strcmp(arg, "disable_sep")) { |
972 | clear_bit(X86_FEATURE_SEP, boot_cpu_data.x86_capability); | 972 | clear_cpu_cap(&boot_cpu_data, X86_FEATURE_SEP); |
973 | disable_sep = 1; | 973 | disable_sep = 1; |
974 | } else if (!strcmp(arg, "disable_tsc")) { | 974 | } else if (!strcmp(arg, "disable_tsc")) { |
975 | clear_bit(X86_FEATURE_TSC, boot_cpu_data.x86_capability); | 975 | clear_cpu_cap(&boot_cpu_data, X86_FEATURE_TSC); |
976 | disable_tsc = 1; | 976 | disable_tsc = 1; |
977 | } else if (!strcmp(arg, "disable_mtrr")) { | 977 | } else if (!strcmp(arg, "disable_mtrr")) { |
978 | clear_bit(X86_FEATURE_MTRR, boot_cpu_data.x86_capability); | 978 | clear_cpu_cap(&boot_cpu_data, X86_FEATURE_MTRR); |
979 | disable_mtrr = 1; | 979 | disable_mtrr = 1; |
980 | } else if (!strcmp(arg, "disable_timer")) { | 980 | } else if (!strcmp(arg, "disable_timer")) { |
981 | disable_vmi_timer = 1; | 981 | disable_vmi_timer = 1; |
diff --git a/include/asm-x86/cpufeature.h b/include/asm-x86/cpufeature.h index 761922972f6f..87dd900154d1 100644 --- a/include/asm-x86/cpufeature.h +++ b/include/asm-x86/cpufeature.h | |||
@@ -124,9 +124,12 @@ | |||
124 | (((bit)>>5)==6 && (1UL<<((bit)&31) & REQUIRED_MASK6)) || \ | 124 | (((bit)>>5)==6 && (1UL<<((bit)&31) & REQUIRED_MASK6)) || \ |
125 | (((bit)>>5)==7 && (1UL<<((bit)&31) & REQUIRED_MASK7)) ) \ | 125 | (((bit)>>5)==7 && (1UL<<((bit)&31) & REQUIRED_MASK7)) ) \ |
126 | ? 1 : \ | 126 | ? 1 : \ |
127 | test_bit(bit, (unsigned long *)(c)->x86_capability)) | 127 | test_bit(bit, (unsigned long *)((c)->x86_capability))) |
128 | #define boot_cpu_has(bit) cpu_has(&boot_cpu_data, bit) | 128 | #define boot_cpu_has(bit) cpu_has(&boot_cpu_data, bit) |
129 | 129 | ||
130 | #define set_cpu_cap(c, bit) set_bit(bit, (unsigned long *)((c)->x86_capability)) | ||
131 | #define clear_cpu_cap(c, bit) clear_bit(bit, (unsigned long *)((c)->x86_capability)) | ||
132 | |||
130 | #define cpu_has_fpu boot_cpu_has(X86_FEATURE_FPU) | 133 | #define cpu_has_fpu boot_cpu_has(X86_FEATURE_FPU) |
131 | #define cpu_has_vme boot_cpu_has(X86_FEATURE_VME) | 134 | #define cpu_has_vme boot_cpu_has(X86_FEATURE_VME) |
132 | #define cpu_has_de boot_cpu_has(X86_FEATURE_DE) | 135 | #define cpu_has_de boot_cpu_has(X86_FEATURE_DE) |