diff options
Diffstat (limited to 'arch/x86')
28 files changed, 149 insertions, 80 deletions
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index df9e885eee14..a6efe0a2e9ae 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig | |||
@@ -498,6 +498,19 @@ config PARAVIRT | |||
498 | over full virtualization. However, when run without a hypervisor | 498 | over full virtualization. However, when run without a hypervisor |
499 | the kernel is theoretically slower and slightly larger. | 499 | the kernel is theoretically slower and slightly larger. |
500 | 500 | ||
501 | config PARAVIRT_SPINLOCKS | ||
502 | bool "Paravirtualization layer for spinlocks" | ||
503 | depends on PARAVIRT && SMP && EXPERIMENTAL | ||
504 | ---help--- | ||
505 | Paravirtualized spinlocks allow a pvops backend to replace the | ||
506 | spinlock implementation with something virtualization-friendly | ||
507 | (for example, block the virtual CPU rather than spinning). | ||
508 | |||
509 | Unfortunately the downside is an up to 5% performance hit on | ||
510 | native kernels, with various workloads. | ||
511 | |||
512 | If you are unsure how to answer this question, answer N. | ||
513 | |||
501 | config PARAVIRT_CLOCK | 514 | config PARAVIRT_CLOCK |
502 | bool | 515 | bool |
503 | default n | 516 | default n |
diff --git a/arch/x86/boot/compressed/relocs.c b/arch/x86/boot/compressed/relocs.c index 857e492c571e..bbeb0c3fbd90 100644 --- a/arch/x86/boot/compressed/relocs.c +++ b/arch/x86/boot/compressed/relocs.c | |||
@@ -504,8 +504,11 @@ static void walk_relocs(void (*visit)(Elf32_Rel *rel, Elf32_Sym *sym)) | |||
504 | if (sym->st_shndx == SHN_ABS) { | 504 | if (sym->st_shndx == SHN_ABS) { |
505 | continue; | 505 | continue; |
506 | } | 506 | } |
507 | if (r_type == R_386_PC32) { | 507 | if (r_type == R_386_NONE || r_type == R_386_PC32) { |
508 | /* PC relative relocations don't need to be adjusted */ | 508 | /* |
509 | * NONE can be ignored and and PC relative | ||
510 | * relocations don't need to be adjusted. | ||
511 | */ | ||
509 | } | 512 | } |
510 | else if (r_type == R_386_32) { | 513 | else if (r_type == R_386_32) { |
511 | /* Visit relocations that need to be adjusted */ | 514 | /* Visit relocations that need to be adjusted */ |
diff --git a/arch/x86/boot/memory.c b/arch/x86/boot/memory.c index 5054c2ddd1a0..74b3d2ba84e9 100644 --- a/arch/x86/boot/memory.c +++ b/arch/x86/boot/memory.c | |||
@@ -17,11 +17,6 @@ | |||
17 | 17 | ||
18 | #define SMAP 0x534d4150 /* ASCII "SMAP" */ | 18 | #define SMAP 0x534d4150 /* ASCII "SMAP" */ |
19 | 19 | ||
20 | struct e820_ext_entry { | ||
21 | struct e820entry std; | ||
22 | u32 ext_flags; | ||
23 | } __attribute__((packed)); | ||
24 | |||
25 | static int detect_memory_e820(void) | 20 | static int detect_memory_e820(void) |
26 | { | 21 | { |
27 | int count = 0; | 22 | int count = 0; |
@@ -29,13 +24,21 @@ static int detect_memory_e820(void) | |||
29 | u32 size, id, edi; | 24 | u32 size, id, edi; |
30 | u8 err; | 25 | u8 err; |
31 | struct e820entry *desc = boot_params.e820_map; | 26 | struct e820entry *desc = boot_params.e820_map; |
32 | static struct e820_ext_entry buf; /* static so it is zeroed */ | 27 | static struct e820entry buf; /* static so it is zeroed */ |
33 | 28 | ||
34 | /* | 29 | /* |
35 | * Set this here so that if the BIOS doesn't change this field | 30 | * Note: at least one BIOS is known which assumes that the |
36 | * but still doesn't change %ecx, we're still okay... | 31 | * buffer pointed to by one e820 call is the same one as |
32 | * the previous call, and only changes modified fields. Therefore, | ||
33 | * we use a temporary buffer and copy the results entry by entry. | ||
34 | * | ||
35 | * This routine deliberately does not try to account for | ||
36 | * ACPI 3+ extended attributes. This is because there are | ||
37 | * BIOSes in the field which report zero for the valid bit for | ||
38 | * all ranges, and we don't currently make any use of the | ||
39 | * other attribute bits. Revisit this if we see the extended | ||
40 | * attribute bits deployed in a meaningful way in the future. | ||
37 | */ | 41 | */ |
38 | buf.ext_flags = 1; | ||
39 | 42 | ||
40 | do { | 43 | do { |
41 | size = sizeof buf; | 44 | size = sizeof buf; |
@@ -66,13 +69,7 @@ static int detect_memory_e820(void) | |||
66 | break; | 69 | break; |
67 | } | 70 | } |
68 | 71 | ||
69 | /* ACPI 3.0 added the extended flags support. If bit 0 | 72 | *desc++ = buf; |
70 | in the extended flags is zero, we're supposed to simply | ||
71 | ignore the entry -- a backwards incompatible change! */ | ||
72 | if (size > 20 && !(buf.ext_flags & 1)) | ||
73 | continue; | ||
74 | |||
75 | *desc++ = buf.std; | ||
76 | count++; | 73 | count++; |
77 | } while (next && count < ARRAY_SIZE(boot_params.e820_map)); | 74 | } while (next && count < ARRAY_SIZE(boot_params.e820_map)); |
78 | 75 | ||
diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h index 378e3691c08c..a53da004e08e 100644 --- a/arch/x86/include/asm/paravirt.h +++ b/arch/x86/include/asm/paravirt.h | |||
@@ -1443,7 +1443,7 @@ u64 _paravirt_ident_64(u64); | |||
1443 | 1443 | ||
1444 | #define paravirt_nop ((void *)_paravirt_nop) | 1444 | #define paravirt_nop ((void *)_paravirt_nop) |
1445 | 1445 | ||
1446 | #ifdef CONFIG_SMP | 1446 | #if defined(CONFIG_SMP) && defined(CONFIG_PARAVIRT_SPINLOCKS) |
1447 | 1447 | ||
1448 | static inline int __raw_spin_is_locked(struct raw_spinlock *lock) | 1448 | static inline int __raw_spin_is_locked(struct raw_spinlock *lock) |
1449 | { | 1449 | { |
diff --git a/arch/x86/include/asm/percpu.h b/arch/x86/include/asm/percpu.h index aee103b26d01..02ecb30982a3 100644 --- a/arch/x86/include/asm/percpu.h +++ b/arch/x86/include/asm/percpu.h | |||
@@ -82,22 +82,22 @@ do { \ | |||
82 | case 1: \ | 82 | case 1: \ |
83 | asm(op "b %1,"__percpu_arg(0) \ | 83 | asm(op "b %1,"__percpu_arg(0) \ |
84 | : "+m" (var) \ | 84 | : "+m" (var) \ |
85 | : "ri" ((T__)val)); \ | 85 | : "qi" ((T__)(val))); \ |
86 | break; \ | 86 | break; \ |
87 | case 2: \ | 87 | case 2: \ |
88 | asm(op "w %1,"__percpu_arg(0) \ | 88 | asm(op "w %1,"__percpu_arg(0) \ |
89 | : "+m" (var) \ | 89 | : "+m" (var) \ |
90 | : "ri" ((T__)val)); \ | 90 | : "ri" ((T__)(val))); \ |
91 | break; \ | 91 | break; \ |
92 | case 4: \ | 92 | case 4: \ |
93 | asm(op "l %1,"__percpu_arg(0) \ | 93 | asm(op "l %1,"__percpu_arg(0) \ |
94 | : "+m" (var) \ | 94 | : "+m" (var) \ |
95 | : "ri" ((T__)val)); \ | 95 | : "ri" ((T__)(val))); \ |
96 | break; \ | 96 | break; \ |
97 | case 8: \ | 97 | case 8: \ |
98 | asm(op "q %1,"__percpu_arg(0) \ | 98 | asm(op "q %1,"__percpu_arg(0) \ |
99 | : "+m" (var) \ | 99 | : "+m" (var) \ |
100 | : "re" ((T__)val)); \ | 100 | : "re" ((T__)(val))); \ |
101 | break; \ | 101 | break; \ |
102 | default: __bad_percpu_size(); \ | 102 | default: __bad_percpu_size(); \ |
103 | } \ | 103 | } \ |
@@ -109,7 +109,7 @@ do { \ | |||
109 | switch (sizeof(var)) { \ | 109 | switch (sizeof(var)) { \ |
110 | case 1: \ | 110 | case 1: \ |
111 | asm(op "b "__percpu_arg(1)",%0" \ | 111 | asm(op "b "__percpu_arg(1)",%0" \ |
112 | : "=r" (ret__) \ | 112 | : "=q" (ret__) \ |
113 | : "m" (var)); \ | 113 | : "m" (var)); \ |
114 | break; \ | 114 | break; \ |
115 | case 2: \ | 115 | case 2: \ |
diff --git a/arch/x86/include/asm/ptrace.h b/arch/x86/include/asm/ptrace.h index e304b66abeea..624f133943ed 100644 --- a/arch/x86/include/asm/ptrace.h +++ b/arch/x86/include/asm/ptrace.h | |||
@@ -187,14 +187,15 @@ static inline int v8086_mode(struct pt_regs *regs) | |||
187 | 187 | ||
188 | /* | 188 | /* |
189 | * X86_32 CPUs don't save ss and esp if the CPU is already in kernel mode | 189 | * X86_32 CPUs don't save ss and esp if the CPU is already in kernel mode |
190 | * when it traps. So regs will be the current sp. | 190 | * when it traps. The previous stack will be directly underneath the saved |
191 | * registers, and 'sp/ss' won't even have been saved. Thus the '®s->sp'. | ||
191 | * | 192 | * |
192 | * This is valid only for kernel mode traps. | 193 | * This is valid only for kernel mode traps. |
193 | */ | 194 | */ |
194 | static inline unsigned long kernel_trap_sp(struct pt_regs *regs) | 195 | static inline unsigned long kernel_stack_pointer(struct pt_regs *regs) |
195 | { | 196 | { |
196 | #ifdef CONFIG_X86_32 | 197 | #ifdef CONFIG_X86_32 |
197 | return (unsigned long)regs; | 198 | return (unsigned long)(®s->sp); |
198 | #else | 199 | #else |
199 | return regs->sp; | 200 | return regs->sp; |
200 | #endif | 201 | #endif |
diff --git a/arch/x86/include/asm/spinlock.h b/arch/x86/include/asm/spinlock.h index e5e6caffec87..b7e5db876399 100644 --- a/arch/x86/include/asm/spinlock.h +++ b/arch/x86/include/asm/spinlock.h | |||
@@ -172,7 +172,7 @@ static inline int __ticket_spin_is_contended(raw_spinlock_t *lock) | |||
172 | return (((tmp >> TICKET_SHIFT) - tmp) & ((1 << TICKET_SHIFT) - 1)) > 1; | 172 | return (((tmp >> TICKET_SHIFT) - tmp) & ((1 << TICKET_SHIFT) - 1)) > 1; |
173 | } | 173 | } |
174 | 174 | ||
175 | #ifndef CONFIG_PARAVIRT | 175 | #ifndef CONFIG_PARAVIRT_SPINLOCKS |
176 | 176 | ||
177 | static inline int __raw_spin_is_locked(raw_spinlock_t *lock) | 177 | static inline int __raw_spin_is_locked(raw_spinlock_t *lock) |
178 | { | 178 | { |
@@ -206,7 +206,7 @@ static __always_inline void __raw_spin_lock_flags(raw_spinlock_t *lock, | |||
206 | __raw_spin_lock(lock); | 206 | __raw_spin_lock(lock); |
207 | } | 207 | } |
208 | 208 | ||
209 | #endif | 209 | #endif /* CONFIG_PARAVIRT_SPINLOCKS */ |
210 | 210 | ||
211 | static inline void __raw_spin_unlock_wait(raw_spinlock_t *lock) | 211 | static inline void __raw_spin_unlock_wait(raw_spinlock_t *lock) |
212 | { | 212 | { |
diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile index 145cce75cda7..88d1bfc847d3 100644 --- a/arch/x86/kernel/Makefile +++ b/arch/x86/kernel/Makefile | |||
@@ -89,7 +89,8 @@ obj-$(CONFIG_DEBUG_NX_TEST) += test_nx.o | |||
89 | obj-$(CONFIG_VMI) += vmi_32.o vmiclock_32.o | 89 | obj-$(CONFIG_VMI) += vmi_32.o vmiclock_32.o |
90 | obj-$(CONFIG_KVM_GUEST) += kvm.o | 90 | obj-$(CONFIG_KVM_GUEST) += kvm.o |
91 | obj-$(CONFIG_KVM_CLOCK) += kvmclock.o | 91 | obj-$(CONFIG_KVM_CLOCK) += kvmclock.o |
92 | obj-$(CONFIG_PARAVIRT) += paravirt.o paravirt_patch_$(BITS).o paravirt-spinlocks.o | 92 | obj-$(CONFIG_PARAVIRT) += paravirt.o paravirt_patch_$(BITS).o |
93 | obj-$(CONFIG_PARAVIRT_SPINLOCKS)+= paravirt-spinlocks.o | ||
93 | obj-$(CONFIG_PARAVIRT_CLOCK) += pvclock.o | 94 | obj-$(CONFIG_PARAVIRT_CLOCK) += pvclock.o |
94 | 95 | ||
95 | obj-$(CONFIG_PCSPKR_PLATFORM) += pcspeaker.o | 96 | obj-$(CONFIG_PCSPKR_PLATFORM) += pcspeaker.o |
diff --git a/arch/x86/kernel/apic/es7000_32.c b/arch/x86/kernel/apic/es7000_32.c index 1c11b819f245..302947775575 100644 --- a/arch/x86/kernel/apic/es7000_32.c +++ b/arch/x86/kernel/apic/es7000_32.c | |||
@@ -254,7 +254,7 @@ static int parse_unisys_oem(char *oemptr) | |||
254 | } | 254 | } |
255 | 255 | ||
256 | #ifdef CONFIG_ACPI | 256 | #ifdef CONFIG_ACPI |
257 | static int find_unisys_acpi_oem_table(unsigned long *oem_addr) | 257 | static int __init find_unisys_acpi_oem_table(unsigned long *oem_addr) |
258 | { | 258 | { |
259 | struct acpi_table_header *header = NULL; | 259 | struct acpi_table_header *header = NULL; |
260 | struct es7000_oem_table *table; | 260 | struct es7000_oem_table *table; |
@@ -285,7 +285,7 @@ static int find_unisys_acpi_oem_table(unsigned long *oem_addr) | |||
285 | return 0; | 285 | return 0; |
286 | } | 286 | } |
287 | 287 | ||
288 | static void unmap_unisys_acpi_oem_table(unsigned long oem_addr) | 288 | static void __init unmap_unisys_acpi_oem_table(unsigned long oem_addr) |
289 | { | 289 | { |
290 | if (!oem_addr) | 290 | if (!oem_addr) |
291 | return; | 291 | return; |
@@ -306,7 +306,7 @@ static int es7000_check_dsdt(void) | |||
306 | static int es7000_acpi_ret; | 306 | static int es7000_acpi_ret; |
307 | 307 | ||
308 | /* Hook from generic ACPI tables.c */ | 308 | /* Hook from generic ACPI tables.c */ |
309 | static int es7000_acpi_madt_oem_check(char *oem_id, char *oem_table_id) | 309 | static int __init es7000_acpi_madt_oem_check(char *oem_id, char *oem_table_id) |
310 | { | 310 | { |
311 | unsigned long oem_addr = 0; | 311 | unsigned long oem_addr = 0; |
312 | int check_dsdt; | 312 | int check_dsdt; |
@@ -717,7 +717,7 @@ struct apic apic_es7000_cluster = { | |||
717 | .safe_wait_icr_idle = native_safe_apic_wait_icr_idle, | 717 | .safe_wait_icr_idle = native_safe_apic_wait_icr_idle, |
718 | }; | 718 | }; |
719 | 719 | ||
720 | struct apic apic_es7000 = { | 720 | struct apic __refdata apic_es7000 = { |
721 | 721 | ||
722 | .name = "es7000", | 722 | .name = "es7000", |
723 | .probe = probe_es7000, | 723 | .probe = probe_es7000, |
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c index c1caefc82e62..77848d9fca68 100644 --- a/arch/x86/kernel/cpu/common.c +++ b/arch/x86/kernel/cpu/common.c | |||
@@ -114,6 +114,13 @@ DEFINE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page) = { .gdt = { | |||
114 | } }; | 114 | } }; |
115 | EXPORT_PER_CPU_SYMBOL_GPL(gdt_page); | 115 | EXPORT_PER_CPU_SYMBOL_GPL(gdt_page); |
116 | 116 | ||
117 | static int __init x86_xsave_setup(char *s) | ||
118 | { | ||
119 | setup_clear_cpu_cap(X86_FEATURE_XSAVE); | ||
120 | return 1; | ||
121 | } | ||
122 | __setup("noxsave", x86_xsave_setup); | ||
123 | |||
117 | #ifdef CONFIG_X86_32 | 124 | #ifdef CONFIG_X86_32 |
118 | static int cachesize_override __cpuinitdata = -1; | 125 | static int cachesize_override __cpuinitdata = -1; |
119 | static int disable_x86_serial_nr __cpuinitdata = 1; | 126 | static int disable_x86_serial_nr __cpuinitdata = 1; |
diff --git a/arch/x86/kernel/cpu/cpufreq/p4-clockmod.c b/arch/x86/kernel/cpu/cpufreq/p4-clockmod.c index 6ac55bd341ae..869615193720 100644 --- a/arch/x86/kernel/cpu/cpufreq/p4-clockmod.c +++ b/arch/x86/kernel/cpu/cpufreq/p4-clockmod.c | |||
@@ -168,6 +168,7 @@ static unsigned int cpufreq_p4_get_frequency(struct cpuinfo_x86 *c) | |||
168 | case 0x0E: /* Core */ | 168 | case 0x0E: /* Core */ |
169 | case 0x0F: /* Core Duo */ | 169 | case 0x0F: /* Core Duo */ |
170 | case 0x16: /* Celeron Core */ | 170 | case 0x16: /* Celeron Core */ |
171 | case 0x1C: /* Atom */ | ||
171 | p4clockmod_driver.flags |= CPUFREQ_CONST_LOOPS; | 172 | p4clockmod_driver.flags |= CPUFREQ_CONST_LOOPS; |
172 | return speedstep_get_frequency(SPEEDSTEP_CPU_PCORE); | 173 | return speedstep_get_frequency(SPEEDSTEP_CPU_PCORE); |
173 | case 0x0D: /* Pentium M (Dothan) */ | 174 | case 0x0D: /* Pentium M (Dothan) */ |
diff --git a/arch/x86/kernel/cpu/cpufreq/powernow-k7.c b/arch/x86/kernel/cpu/cpufreq/powernow-k7.c index 3c28ccd49742..a8363e5be4ef 100644 --- a/arch/x86/kernel/cpu/cpufreq/powernow-k7.c +++ b/arch/x86/kernel/cpu/cpufreq/powernow-k7.c | |||
@@ -168,10 +168,12 @@ static int check_powernow(void) | |||
168 | return 1; | 168 | return 1; |
169 | } | 169 | } |
170 | 170 | ||
171 | #ifdef CONFIG_X86_POWERNOW_K7_ACPI | ||
171 | static void invalidate_entry(unsigned int entry) | 172 | static void invalidate_entry(unsigned int entry) |
172 | { | 173 | { |
173 | powernow_table[entry].frequency = CPUFREQ_ENTRY_INVALID; | 174 | powernow_table[entry].frequency = CPUFREQ_ENTRY_INVALID; |
174 | } | 175 | } |
176 | #endif | ||
175 | 177 | ||
176 | static int get_ranges(unsigned char *pst) | 178 | static int get_ranges(unsigned char *pst) |
177 | { | 179 | { |
diff --git a/arch/x86/kernel/cpu/cpufreq/powernow-k8.c b/arch/x86/kernel/cpu/cpufreq/powernow-k8.c index 4709ead2db52..f6b32d112357 100644 --- a/arch/x86/kernel/cpu/cpufreq/powernow-k8.c +++ b/arch/x86/kernel/cpu/cpufreq/powernow-k8.c | |||
@@ -649,6 +649,20 @@ static void print_basics(struct powernow_k8_data *data) | |||
649 | data->batps); | 649 | data->batps); |
650 | } | 650 | } |
651 | 651 | ||
652 | static u32 freq_from_fid_did(u32 fid, u32 did) | ||
653 | { | ||
654 | u32 mhz = 0; | ||
655 | |||
656 | if (boot_cpu_data.x86 == 0x10) | ||
657 | mhz = (100 * (fid + 0x10)) >> did; | ||
658 | else if (boot_cpu_data.x86 == 0x11) | ||
659 | mhz = (100 * (fid + 8)) >> did; | ||
660 | else | ||
661 | BUG(); | ||
662 | |||
663 | return mhz * 1000; | ||
664 | } | ||
665 | |||
652 | static int fill_powernow_table(struct powernow_k8_data *data, | 666 | static int fill_powernow_table(struct powernow_k8_data *data, |
653 | struct pst_s *pst, u8 maxvid) | 667 | struct pst_s *pst, u8 maxvid) |
654 | { | 668 | { |
@@ -923,8 +937,13 @@ static int fill_powernow_table_pstate(struct powernow_k8_data *data, | |||
923 | 937 | ||
924 | powernow_table[i].index = index; | 938 | powernow_table[i].index = index; |
925 | 939 | ||
926 | powernow_table[i].frequency = | 940 | /* Frequency may be rounded for these */ |
927 | data->acpi_data.states[i].core_frequency * 1000; | 941 | if (boot_cpu_data.x86 == 0x10 || boot_cpu_data.x86 == 0x11) { |
942 | powernow_table[i].frequency = | ||
943 | freq_from_fid_did(lo & 0x3f, (lo >> 6) & 7); | ||
944 | } else | ||
945 | powernow_table[i].frequency = | ||
946 | data->acpi_data.states[i].core_frequency * 1000; | ||
928 | } | 947 | } |
929 | return 0; | 948 | return 0; |
930 | } | 949 | } |
@@ -1215,13 +1234,16 @@ static int powernowk8_verify(struct cpufreq_policy *pol) | |||
1215 | return cpufreq_frequency_table_verify(pol, data->powernow_table); | 1234 | return cpufreq_frequency_table_verify(pol, data->powernow_table); |
1216 | } | 1235 | } |
1217 | 1236 | ||
1237 | static const char ACPI_PSS_BIOS_BUG_MSG[] = | ||
1238 | KERN_ERR FW_BUG PFX "No compatible ACPI _PSS objects found.\n" | ||
1239 | KERN_ERR FW_BUG PFX "Try again with latest BIOS.\n"; | ||
1240 | |||
1218 | /* per CPU init entry point to the driver */ | 1241 | /* per CPU init entry point to the driver */ |
1219 | static int __cpuinit powernowk8_cpu_init(struct cpufreq_policy *pol) | 1242 | static int __cpuinit powernowk8_cpu_init(struct cpufreq_policy *pol) |
1220 | { | 1243 | { |
1221 | struct powernow_k8_data *data; | 1244 | struct powernow_k8_data *data; |
1222 | cpumask_t oldmask; | 1245 | cpumask_t oldmask; |
1223 | int rc; | 1246 | int rc; |
1224 | static int print_once; | ||
1225 | 1247 | ||
1226 | if (!cpu_online(pol->cpu)) | 1248 | if (!cpu_online(pol->cpu)) |
1227 | return -ENODEV; | 1249 | return -ENODEV; |
@@ -1244,19 +1266,7 @@ static int __cpuinit powernowk8_cpu_init(struct cpufreq_policy *pol) | |||
1244 | * an UP version, and is deprecated by AMD. | 1266 | * an UP version, and is deprecated by AMD. |
1245 | */ | 1267 | */ |
1246 | if (num_online_cpus() != 1) { | 1268 | if (num_online_cpus() != 1) { |
1247 | /* | 1269 | printk_once(ACPI_PSS_BIOS_BUG_MSG); |
1248 | * Replace this one with print_once as soon as such a | ||
1249 | * thing gets introduced | ||
1250 | */ | ||
1251 | if (!print_once) { | ||
1252 | WARN_ONCE(1, KERN_ERR FW_BUG PFX "Your BIOS " | ||
1253 | "does not provide ACPI _PSS objects " | ||
1254 | "in a way that Linux understands. " | ||
1255 | "Please report this to the Linux ACPI" | ||
1256 | " maintainers and complain to your " | ||
1257 | "BIOS vendor.\n"); | ||
1258 | print_once++; | ||
1259 | } | ||
1260 | goto err_out; | 1270 | goto err_out; |
1261 | } | 1271 | } |
1262 | if (pol->cpu != 0) { | 1272 | if (pol->cpu != 0) { |
diff --git a/arch/x86/kernel/cpu/mtrr/generic.c b/arch/x86/kernel/cpu/mtrr/generic.c index 0b776c09aff3..d21d4fb161f7 100644 --- a/arch/x86/kernel/cpu/mtrr/generic.c +++ b/arch/x86/kernel/cpu/mtrr/generic.c | |||
@@ -275,7 +275,11 @@ static void __init print_mtrr_state(void) | |||
275 | } | 275 | } |
276 | printk(KERN_DEBUG "MTRR variable ranges %sabled:\n", | 276 | printk(KERN_DEBUG "MTRR variable ranges %sabled:\n", |
277 | mtrr_state.enabled & 2 ? "en" : "dis"); | 277 | mtrr_state.enabled & 2 ? "en" : "dis"); |
278 | high_width = ((size_or_mask ? ffs(size_or_mask) - 1 : 32) - (32 - PAGE_SHIFT) + 3) / 4; | 278 | if (size_or_mask & 0xffffffffUL) |
279 | high_width = ffs(size_or_mask & 0xffffffffUL) - 1; | ||
280 | else | ||
281 | high_width = ffs(size_or_mask>>32) + 32 - 1; | ||
282 | high_width = (high_width - (32 - PAGE_SHIFT) + 3) / 4; | ||
279 | for (i = 0; i < num_var_ranges; ++i) { | 283 | for (i = 0; i < num_var_ranges; ++i) { |
280 | if (mtrr_state.var_ranges[i].mask_lo & (1 << 11)) | 284 | if (mtrr_state.var_ranges[i].mask_lo & (1 << 11)) |
281 | printk(KERN_DEBUG " %u base %0*X%05X000 mask %0*X%05X000 %s\n", | 285 | printk(KERN_DEBUG " %u base %0*X%05X000 mask %0*X%05X000 %s\n", |
diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c index 18dfa30795c9..b79c5533c421 100644 --- a/arch/x86/kernel/ftrace.c +++ b/arch/x86/kernel/ftrace.c | |||
@@ -442,7 +442,7 @@ void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr) | |||
442 | _ASM_EXTABLE(1b, 4b) | 442 | _ASM_EXTABLE(1b, 4b) |
443 | _ASM_EXTABLE(2b, 4b) | 443 | _ASM_EXTABLE(2b, 4b) |
444 | 444 | ||
445 | : [old] "=r" (old), [faulted] "=r" (faulted) | 445 | : [old] "=&r" (old), [faulted] "=r" (faulted) |
446 | : [parent] "r" (parent), [return_hooker] "r" (return_hooker) | 446 | : [parent] "r" (parent), [return_hooker] "r" (return_hooker) |
447 | : "memory" | 447 | : "memory" |
448 | ); | 448 | ); |
diff --git a/arch/x86/kernel/kgdb.c b/arch/x86/kernel/kgdb.c index eedfaebe1063..b1f4dffb919e 100644 --- a/arch/x86/kernel/kgdb.c +++ b/arch/x86/kernel/kgdb.c | |||
@@ -88,6 +88,7 @@ void pt_regs_to_gdb_regs(unsigned long *gdb_regs, struct pt_regs *regs) | |||
88 | gdb_regs[GDB_SS] = __KERNEL_DS; | 88 | gdb_regs[GDB_SS] = __KERNEL_DS; |
89 | gdb_regs[GDB_FS] = 0xFFFF; | 89 | gdb_regs[GDB_FS] = 0xFFFF; |
90 | gdb_regs[GDB_GS] = 0xFFFF; | 90 | gdb_regs[GDB_GS] = 0xFFFF; |
91 | gdb_regs[GDB_SP] = (int)®s->sp; | ||
91 | #else | 92 | #else |
92 | gdb_regs[GDB_R8] = regs->r8; | 93 | gdb_regs[GDB_R8] = regs->r8; |
93 | gdb_regs[GDB_R9] = regs->r9; | 94 | gdb_regs[GDB_R9] = regs->r9; |
@@ -100,8 +101,8 @@ void pt_regs_to_gdb_regs(unsigned long *gdb_regs, struct pt_regs *regs) | |||
100 | gdb_regs32[GDB_PS] = regs->flags; | 101 | gdb_regs32[GDB_PS] = regs->flags; |
101 | gdb_regs32[GDB_CS] = regs->cs; | 102 | gdb_regs32[GDB_CS] = regs->cs; |
102 | gdb_regs32[GDB_SS] = regs->ss; | 103 | gdb_regs32[GDB_SS] = regs->ss; |
103 | #endif | ||
104 | gdb_regs[GDB_SP] = regs->sp; | 104 | gdb_regs[GDB_SP] = regs->sp; |
105 | #endif | ||
105 | } | 106 | } |
106 | 107 | ||
107 | /** | 108 | /** |
diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c index 8e45f4464880..9faf43bea336 100644 --- a/arch/x86/kernel/paravirt.c +++ b/arch/x86/kernel/paravirt.c | |||
@@ -134,7 +134,9 @@ static void *get_call_destination(u8 type) | |||
134 | .pv_irq_ops = pv_irq_ops, | 134 | .pv_irq_ops = pv_irq_ops, |
135 | .pv_apic_ops = pv_apic_ops, | 135 | .pv_apic_ops = pv_apic_ops, |
136 | .pv_mmu_ops = pv_mmu_ops, | 136 | .pv_mmu_ops = pv_mmu_ops, |
137 | #ifdef CONFIG_PARAVIRT_SPINLOCKS | ||
137 | .pv_lock_ops = pv_lock_ops, | 138 | .pv_lock_ops = pv_lock_ops, |
139 | #endif | ||
138 | }; | 140 | }; |
139 | return *((void **)&tmpl + type); | 141 | return *((void **)&tmpl + type); |
140 | } | 142 | } |
diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c index 1340dad417f4..667188e0b5a0 100644 --- a/arch/x86/kernel/reboot.c +++ b/arch/x86/kernel/reboot.c | |||
@@ -232,6 +232,14 @@ static struct dmi_system_id __initdata reboot_dmi_table[] = { | |||
232 | DMI_MATCH(DMI_PRODUCT_NAME, "Dell DXP061"), | 232 | DMI_MATCH(DMI_PRODUCT_NAME, "Dell DXP061"), |
233 | }, | 233 | }, |
234 | }, | 234 | }, |
235 | { /* Handle problems with rebooting on Sony VGN-Z540N */ | ||
236 | .callback = set_bios_reboot, | ||
237 | .ident = "Sony VGN-Z540N", | ||
238 | .matches = { | ||
239 | DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"), | ||
240 | DMI_MATCH(DMI_PRODUCT_NAME, "VGN-Z540N"), | ||
241 | }, | ||
242 | }, | ||
235 | { } | 243 | { } |
236 | }; | 244 | }; |
237 | 245 | ||
diff --git a/arch/x86/kernel/setup_percpu.c b/arch/x86/kernel/setup_percpu.c index 3a97a4cf1872..8f0e13be36b3 100644 --- a/arch/x86/kernel/setup_percpu.c +++ b/arch/x86/kernel/setup_percpu.c | |||
@@ -160,8 +160,10 @@ static ssize_t __init setup_pcpu_remap(size_t static_size) | |||
160 | /* | 160 | /* |
161 | * If large page isn't supported, there's no benefit in doing | 161 | * If large page isn't supported, there's no benefit in doing |
162 | * this. Also, on non-NUMA, embedding is better. | 162 | * this. Also, on non-NUMA, embedding is better. |
163 | * | ||
164 | * NOTE: disabled for now. | ||
163 | */ | 165 | */ |
164 | if (!cpu_has_pse || !pcpu_need_numa()) | 166 | if (true || !cpu_has_pse || !pcpu_need_numa()) |
165 | return -EINVAL; | 167 | return -EINVAL; |
166 | 168 | ||
167 | /* | 169 | /* |
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index b6caf1329b1b..32cf11e5728a 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c | |||
@@ -2897,8 +2897,7 @@ static int kvm_pv_mmu_write(struct kvm_vcpu *vcpu, | |||
2897 | 2897 | ||
2898 | static int kvm_pv_mmu_flush_tlb(struct kvm_vcpu *vcpu) | 2898 | static int kvm_pv_mmu_flush_tlb(struct kvm_vcpu *vcpu) |
2899 | { | 2899 | { |
2900 | kvm_x86_ops->tlb_flush(vcpu); | 2900 | kvm_set_cr3(vcpu, vcpu->arch.cr3); |
2901 | set_bit(KVM_REQ_MMU_SYNC, &vcpu->requests); | ||
2902 | return 1; | 2901 | return 1; |
2903 | } | 2902 | } |
2904 | 2903 | ||
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c index 1821c2078199..1f8510c51d6e 100644 --- a/arch/x86/kvm/svm.c +++ b/arch/x86/kvm/svm.c | |||
@@ -411,7 +411,6 @@ static __init int svm_hardware_setup(void) | |||
411 | 411 | ||
412 | iopm_va = page_address(iopm_pages); | 412 | iopm_va = page_address(iopm_pages); |
413 | memset(iopm_va, 0xff, PAGE_SIZE * (1 << IOPM_ALLOC_ORDER)); | 413 | memset(iopm_va, 0xff, PAGE_SIZE * (1 << IOPM_ALLOC_ORDER)); |
414 | clear_bit(0x80, iopm_va); /* allow direct access to PC debug port */ | ||
415 | iopm_base = page_to_pfn(iopm_pages) << PAGE_SHIFT; | 414 | iopm_base = page_to_pfn(iopm_pages) << PAGE_SHIFT; |
416 | 415 | ||
417 | if (boot_cpu_has(X86_FEATURE_NX)) | 416 | if (boot_cpu_has(X86_FEATURE_NX)) |
@@ -796,6 +795,11 @@ static void svm_get_segment(struct kvm_vcpu *vcpu, | |||
796 | var->db = (s->attrib >> SVM_SELECTOR_DB_SHIFT) & 1; | 795 | var->db = (s->attrib >> SVM_SELECTOR_DB_SHIFT) & 1; |
797 | var->g = (s->attrib >> SVM_SELECTOR_G_SHIFT) & 1; | 796 | var->g = (s->attrib >> SVM_SELECTOR_G_SHIFT) & 1; |
798 | 797 | ||
798 | /* AMD's VMCB does not have an explicit unusable field, so emulate it | ||
799 | * for cross vendor migration purposes by "not present" | ||
800 | */ | ||
801 | var->unusable = !var->present || (var->type == 0); | ||
802 | |||
799 | switch (seg) { | 803 | switch (seg) { |
800 | case VCPU_SREG_CS: | 804 | case VCPU_SREG_CS: |
801 | /* | 805 | /* |
@@ -827,8 +831,6 @@ static void svm_get_segment(struct kvm_vcpu *vcpu, | |||
827 | var->type |= 0x1; | 831 | var->type |= 0x1; |
828 | break; | 832 | break; |
829 | } | 833 | } |
830 | |||
831 | var->unusable = !var->present; | ||
832 | } | 834 | } |
833 | 835 | ||
834 | static int svm_get_cpl(struct kvm_vcpu *vcpu) | 836 | static int svm_get_cpl(struct kvm_vcpu *vcpu) |
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 7c1ce5ac6131..3944e917e794 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c | |||
@@ -338,6 +338,9 @@ EXPORT_SYMBOL_GPL(kvm_lmsw); | |||
338 | 338 | ||
339 | void kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4) | 339 | void kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4) |
340 | { | 340 | { |
341 | unsigned long old_cr4 = vcpu->arch.cr4; | ||
342 | unsigned long pdptr_bits = X86_CR4_PGE | X86_CR4_PSE | X86_CR4_PAE; | ||
343 | |||
341 | if (cr4 & CR4_RESERVED_BITS) { | 344 | if (cr4 & CR4_RESERVED_BITS) { |
342 | printk(KERN_DEBUG "set_cr4: #GP, reserved bits\n"); | 345 | printk(KERN_DEBUG "set_cr4: #GP, reserved bits\n"); |
343 | kvm_inject_gp(vcpu, 0); | 346 | kvm_inject_gp(vcpu, 0); |
@@ -351,7 +354,8 @@ void kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4) | |||
351 | kvm_inject_gp(vcpu, 0); | 354 | kvm_inject_gp(vcpu, 0); |
352 | return; | 355 | return; |
353 | } | 356 | } |
354 | } else if (is_paging(vcpu) && !is_pae(vcpu) && (cr4 & X86_CR4_PAE) | 357 | } else if (is_paging(vcpu) && (cr4 & X86_CR4_PAE) |
358 | && ((cr4 ^ old_cr4) & pdptr_bits) | ||
355 | && !load_pdptrs(vcpu, vcpu->arch.cr3)) { | 359 | && !load_pdptrs(vcpu, vcpu->arch.cr3)) { |
356 | printk(KERN_DEBUG "set_cr4: #GP, pdptrs reserved bits\n"); | 360 | printk(KERN_DEBUG "set_cr4: #GP, pdptrs reserved bits\n"); |
357 | kvm_inject_gp(vcpu, 0); | 361 | kvm_inject_gp(vcpu, 0); |
@@ -1121,9 +1125,9 @@ void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) | |||
1121 | 1125 | ||
1122 | static int is_efer_nx(void) | 1126 | static int is_efer_nx(void) |
1123 | { | 1127 | { |
1124 | u64 efer; | 1128 | unsigned long long efer = 0; |
1125 | 1129 | ||
1126 | rdmsrl(MSR_EFER, efer); | 1130 | rdmsrl_safe(MSR_EFER, &efer); |
1127 | return efer & EFER_NX; | 1131 | return efer & EFER_NX; |
1128 | } | 1132 | } |
1129 | 1133 | ||
@@ -1259,7 +1263,7 @@ static void do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function, | |||
1259 | bit(X86_FEATURE_CMOV) | bit(X86_FEATURE_PSE36) | | 1263 | bit(X86_FEATURE_CMOV) | bit(X86_FEATURE_PSE36) | |
1260 | bit(X86_FEATURE_MMX) | bit(X86_FEATURE_FXSR) | | 1264 | bit(X86_FEATURE_MMX) | bit(X86_FEATURE_FXSR) | |
1261 | bit(X86_FEATURE_SYSCALL) | | 1265 | bit(X86_FEATURE_SYSCALL) | |
1262 | (bit(X86_FEATURE_NX) && is_efer_nx()) | | 1266 | (is_efer_nx() ? bit(X86_FEATURE_NX) : 0) | |
1263 | #ifdef CONFIG_X86_64 | 1267 | #ifdef CONFIG_X86_64 |
1264 | bit(X86_FEATURE_LM) | | 1268 | bit(X86_FEATURE_LM) | |
1265 | #endif | 1269 | #endif |
diff --git a/arch/x86/mm/hugetlbpage.c b/arch/x86/mm/hugetlbpage.c index 8f307d914c2e..f46c340727b8 100644 --- a/arch/x86/mm/hugetlbpage.c +++ b/arch/x86/mm/hugetlbpage.c | |||
@@ -26,12 +26,16 @@ static unsigned long page_table_shareable(struct vm_area_struct *svma, | |||
26 | unsigned long sbase = saddr & PUD_MASK; | 26 | unsigned long sbase = saddr & PUD_MASK; |
27 | unsigned long s_end = sbase + PUD_SIZE; | 27 | unsigned long s_end = sbase + PUD_SIZE; |
28 | 28 | ||
29 | /* Allow segments to share if only one is marked locked */ | ||
30 | unsigned long vm_flags = vma->vm_flags & ~VM_LOCKED; | ||
31 | unsigned long svm_flags = svma->vm_flags & ~VM_LOCKED; | ||
32 | |||
29 | /* | 33 | /* |
30 | * match the virtual addresses, permission and the alignment of the | 34 | * match the virtual addresses, permission and the alignment of the |
31 | * page table page. | 35 | * page table page. |
32 | */ | 36 | */ |
33 | if (pmd_index(addr) != pmd_index(saddr) || | 37 | if (pmd_index(addr) != pmd_index(saddr) || |
34 | vma->vm_flags != svma->vm_flags || | 38 | vm_flags != svm_flags || |
35 | sbase < svma->vm_start || svma->vm_end < s_end) | 39 | sbase < svma->vm_start || svma->vm_end < s_end) |
36 | return 0; | 40 | return 0; |
37 | 41 | ||
diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c index 797f9f107cb6..e17efed088c5 100644 --- a/arch/x86/mm/pageattr.c +++ b/arch/x86/mm/pageattr.c | |||
@@ -153,7 +153,7 @@ static void __cpa_flush_all(void *arg) | |||
153 | */ | 153 | */ |
154 | __flush_tlb_all(); | 154 | __flush_tlb_all(); |
155 | 155 | ||
156 | if (cache && boot_cpu_data.x86_model >= 4) | 156 | if (cache && boot_cpu_data.x86 >= 4) |
157 | wbinvd(); | 157 | wbinvd(); |
158 | } | 158 | } |
159 | 159 | ||
@@ -208,20 +208,15 @@ static void cpa_flush_array(unsigned long *start, int numpages, int cache, | |||
208 | int in_flags, struct page **pages) | 208 | int in_flags, struct page **pages) |
209 | { | 209 | { |
210 | unsigned int i, level; | 210 | unsigned int i, level; |
211 | unsigned long do_wbinvd = cache && numpages >= 1024; /* 4M threshold */ | ||
211 | 212 | ||
212 | BUG_ON(irqs_disabled()); | 213 | BUG_ON(irqs_disabled()); |
213 | 214 | ||
214 | on_each_cpu(__cpa_flush_range, NULL, 1); | 215 | on_each_cpu(__cpa_flush_all, (void *) do_wbinvd, 1); |
215 | 216 | ||
216 | if (!cache) | 217 | if (!cache || do_wbinvd) |
217 | return; | 218 | return; |
218 | 219 | ||
219 | /* 4M threshold */ | ||
220 | if (numpages >= 1024) { | ||
221 | if (boot_cpu_data.x86_model >= 4) | ||
222 | wbinvd(); | ||
223 | return; | ||
224 | } | ||
225 | /* | 220 | /* |
226 | * We only need to flush on one CPU, | 221 | * We only need to flush on one CPU, |
227 | * clflush is a MESI-coherent instruction that | 222 | * clflush is a MESI-coherent instruction that |
diff --git a/arch/x86/oprofile/backtrace.c b/arch/x86/oprofile/backtrace.c index 04df67f8a7ba..044897be021f 100644 --- a/arch/x86/oprofile/backtrace.c +++ b/arch/x86/oprofile/backtrace.c | |||
@@ -76,9 +76,9 @@ void | |||
76 | x86_backtrace(struct pt_regs * const regs, unsigned int depth) | 76 | x86_backtrace(struct pt_regs * const regs, unsigned int depth) |
77 | { | 77 | { |
78 | struct frame_head *head = (struct frame_head *)frame_pointer(regs); | 78 | struct frame_head *head = (struct frame_head *)frame_pointer(regs); |
79 | unsigned long stack = kernel_trap_sp(regs); | ||
80 | 79 | ||
81 | if (!user_mode_vm(regs)) { | 80 | if (!user_mode_vm(regs)) { |
81 | unsigned long stack = kernel_stack_pointer(regs); | ||
82 | if (depth) | 82 | if (depth) |
83 | dump_trace(NULL, regs, (unsigned long *)stack, 0, | 83 | dump_trace(NULL, regs, (unsigned long *)stack, 0, |
84 | &backtrace_ops, &depth); | 84 | &backtrace_ops, &depth); |
diff --git a/arch/x86/xen/Makefile b/arch/x86/xen/Makefile index 3b767d03fd6a..172438f86a02 100644 --- a/arch/x86/xen/Makefile +++ b/arch/x86/xen/Makefile | |||
@@ -9,5 +9,6 @@ obj-y := enlighten.o setup.o multicalls.o mmu.o irq.o \ | |||
9 | time.o xen-asm.o xen-asm_$(BITS).o \ | 9 | time.o xen-asm.o xen-asm_$(BITS).o \ |
10 | grant-table.o suspend.o | 10 | grant-table.o suspend.o |
11 | 11 | ||
12 | obj-$(CONFIG_SMP) += smp.o spinlock.o | 12 | obj-$(CONFIG_SMP) += smp.o |
13 | obj-$(CONFIG_XEN_DEBUG_FS) += debugfs.o \ No newline at end of file | 13 | obj-$(CONFIG_PARAVIRT_SPINLOCKS)+= spinlock.o |
14 | obj-$(CONFIG_XEN_DEBUG_FS) += debugfs.o | ||
diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c index e25a78e1113a..fba55b1a4021 100644 --- a/arch/x86/xen/mmu.c +++ b/arch/x86/xen/mmu.c | |||
@@ -42,6 +42,7 @@ | |||
42 | #include <linux/highmem.h> | 42 | #include <linux/highmem.h> |
43 | #include <linux/debugfs.h> | 43 | #include <linux/debugfs.h> |
44 | #include <linux/bug.h> | 44 | #include <linux/bug.h> |
45 | #include <linux/module.h> | ||
45 | 46 | ||
46 | #include <asm/pgtable.h> | 47 | #include <asm/pgtable.h> |
47 | #include <asm/tlbflush.h> | 48 | #include <asm/tlbflush.h> |
diff --git a/arch/x86/xen/xen-ops.h b/arch/x86/xen/xen-ops.h index 20139464943c..ca6596b05d53 100644 --- a/arch/x86/xen/xen-ops.h +++ b/arch/x86/xen/xen-ops.h | |||
@@ -62,15 +62,26 @@ void xen_setup_vcpu_info_placement(void); | |||
62 | #ifdef CONFIG_SMP | 62 | #ifdef CONFIG_SMP |
63 | void xen_smp_init(void); | 63 | void xen_smp_init(void); |
64 | 64 | ||
65 | void __init xen_init_spinlocks(void); | ||
66 | __cpuinit void xen_init_lock_cpu(int cpu); | ||
67 | void xen_uninit_lock_cpu(int cpu); | ||
68 | |||
69 | extern cpumask_var_t xen_cpu_initialized_map; | 65 | extern cpumask_var_t xen_cpu_initialized_map; |
70 | #else | 66 | #else |
71 | static inline void xen_smp_init(void) {} | 67 | static inline void xen_smp_init(void) {} |
72 | #endif | 68 | #endif |
73 | 69 | ||
70 | #ifdef CONFIG_PARAVIRT_SPINLOCKS | ||
71 | void __init xen_init_spinlocks(void); | ||
72 | __cpuinit void xen_init_lock_cpu(int cpu); | ||
73 | void xen_uninit_lock_cpu(int cpu); | ||
74 | #else | ||
75 | static inline void xen_init_spinlocks(void) | ||
76 | { | ||
77 | } | ||
78 | static inline void xen_init_lock_cpu(int cpu) | ||
79 | { | ||
80 | } | ||
81 | static inline void xen_uninit_lock_cpu(int cpu) | ||
82 | { | ||
83 | } | ||
84 | #endif | ||
74 | 85 | ||
75 | /* Declare an asm function, along with symbols needed to make it | 86 | /* Declare an asm function, along with symbols needed to make it |
76 | inlineable */ | 87 | inlineable */ |