aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/xen
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86/xen')
-rw-r--r--arch/x86/xen/Makefile2
-rw-r--r--arch/x86/xen/enlighten.c161
-rw-r--r--arch/x86/xen/irq.c5
-rw-r--r--arch/x86/xen/mmu.c16
-rw-r--r--arch/x86/xen/mmu.h2
-rw-r--r--arch/x86/xen/smp.c1
-rw-r--r--arch/x86/xen/spinlock.c28
-rw-r--r--arch/x86/xen/xen-ops.h2
8 files changed, 164 insertions, 53 deletions
diff --git a/arch/x86/xen/Makefile b/arch/x86/xen/Makefile
index 7410640db173..3bb4fc21f4f2 100644
--- a/arch/x86/xen/Makefile
+++ b/arch/x86/xen/Makefile
@@ -8,6 +8,7 @@ endif
8# Make sure early boot has no stackprotector 8# Make sure early boot has no stackprotector
9nostackp := $(call cc-option, -fno-stack-protector) 9nostackp := $(call cc-option, -fno-stack-protector)
10CFLAGS_enlighten.o := $(nostackp) 10CFLAGS_enlighten.o := $(nostackp)
11CFLAGS_mmu.o := $(nostackp)
11 12
12obj-y := enlighten.o setup.o multicalls.o mmu.o irq.o \ 13obj-y := enlighten.o setup.o multicalls.o mmu.o irq.o \
13 time.o xen-asm.o xen-asm_$(BITS).o \ 14 time.o xen-asm.o xen-asm_$(BITS).o \
@@ -16,3 +17,4 @@ obj-y := enlighten.o setup.o multicalls.o mmu.o irq.o \
16obj-$(CONFIG_SMP) += smp.o 17obj-$(CONFIG_SMP) += smp.o
17obj-$(CONFIG_PARAVIRT_SPINLOCKS)+= spinlock.o 18obj-$(CONFIG_PARAVIRT_SPINLOCKS)+= spinlock.o
18obj-$(CONFIG_XEN_DEBUG_FS) += debugfs.o 19obj-$(CONFIG_XEN_DEBUG_FS) += debugfs.o
20
diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
index e90540a46a0b..544eb7496531 100644
--- a/arch/x86/xen/enlighten.c
+++ b/arch/x86/xen/enlighten.c
@@ -51,6 +51,7 @@
51#include <asm/pgtable.h> 51#include <asm/pgtable.h>
52#include <asm/tlbflush.h> 52#include <asm/tlbflush.h>
53#include <asm/reboot.h> 53#include <asm/reboot.h>
54#include <asm/stackprotector.h>
54 55
55#include "xen-ops.h" 56#include "xen-ops.h"
56#include "mmu.h" 57#include "mmu.h"
@@ -215,6 +216,7 @@ static __init void xen_init_cpuid_mask(void)
215 (1 << X86_FEATURE_ACPI)); /* disable ACPI */ 216 (1 << X86_FEATURE_ACPI)); /* disable ACPI */
216 217
217 ax = 1; 218 ax = 1;
219 cx = 0;
218 xen_cpuid(&ax, &bx, &cx, &dx); 220 xen_cpuid(&ax, &bx, &cx, &dx);
219 221
220 /* cpuid claims we support xsave; try enabling it to see what happens */ 222 /* cpuid claims we support xsave; try enabling it to see what happens */
@@ -329,18 +331,28 @@ static void xen_load_gdt(const struct desc_ptr *dtr)
329 unsigned long frames[pages]; 331 unsigned long frames[pages];
330 int f; 332 int f;
331 333
332 /* A GDT can be up to 64k in size, which corresponds to 8192 334 /*
333 8-byte entries, or 16 4k pages.. */ 335 * A GDT can be up to 64k in size, which corresponds to 8192
336 * 8-byte entries, or 16 4k pages..
337 */
334 338
335 BUG_ON(size > 65536); 339 BUG_ON(size > 65536);
336 BUG_ON(va & ~PAGE_MASK); 340 BUG_ON(va & ~PAGE_MASK);
337 341
338 for (f = 0; va < dtr->address + size; va += PAGE_SIZE, f++) { 342 for (f = 0; va < dtr->address + size; va += PAGE_SIZE, f++) {
339 int level; 343 int level;
340 pte_t *ptep = lookup_address(va, &level); 344 pte_t *ptep;
341 unsigned long pfn, mfn; 345 unsigned long pfn, mfn;
342 void *virt; 346 void *virt;
343 347
348 /*
349 * The GDT is per-cpu and is in the percpu data area.
350 * That can be virtually mapped, so we need to do a
351 * page-walk to get the underlying MFN for the
352 * hypercall. The page can also be in the kernel's
353 * linear range, so we need to RO that mapping too.
354 */
355 ptep = lookup_address(va, &level);
344 BUG_ON(ptep == NULL); 356 BUG_ON(ptep == NULL);
345 357
346 pfn = pte_pfn(*ptep); 358 pfn = pte_pfn(*ptep);
@@ -357,6 +369,44 @@ static void xen_load_gdt(const struct desc_ptr *dtr)
357 BUG(); 369 BUG();
358} 370}
359 371
372/*
373 * load_gdt for early boot, when the gdt is only mapped once
374 */
375static __init void xen_load_gdt_boot(const struct desc_ptr *dtr)
376{
377 unsigned long va = dtr->address;
378 unsigned int size = dtr->size + 1;
379 unsigned pages = (size + PAGE_SIZE - 1) / PAGE_SIZE;
380 unsigned long frames[pages];
381 int f;
382
383 /*
384 * A GDT can be up to 64k in size, which corresponds to 8192
385 * 8-byte entries, or 16 4k pages..
386 */
387
388 BUG_ON(size > 65536);
389 BUG_ON(va & ~PAGE_MASK);
390
391 for (f = 0; va < dtr->address + size; va += PAGE_SIZE, f++) {
392 pte_t pte;
393 unsigned long pfn, mfn;
394
395 pfn = virt_to_pfn(va);
396 mfn = pfn_to_mfn(pfn);
397
398 pte = pfn_pte(pfn, PAGE_KERNEL_RO);
399
400 if (HYPERVISOR_update_va_mapping((unsigned long)va, pte, 0))
401 BUG();
402
403 frames[f] = mfn;
404 }
405
406 if (HYPERVISOR_set_gdt(frames, size / sizeof(struct desc_struct)))
407 BUG();
408}
409
360static void load_TLS_descriptor(struct thread_struct *t, 410static void load_TLS_descriptor(struct thread_struct *t,
361 unsigned int cpu, unsigned int i) 411 unsigned int cpu, unsigned int i)
362{ 412{
@@ -580,6 +630,29 @@ static void xen_write_gdt_entry(struct desc_struct *dt, int entry,
580 preempt_enable(); 630 preempt_enable();
581} 631}
582 632
633/*
634 * Version of write_gdt_entry for use at early boot-time needed to
635 * update an entry as simply as possible.
636 */
637static __init void xen_write_gdt_entry_boot(struct desc_struct *dt, int entry,
638 const void *desc, int type)
639{
640 switch (type) {
641 case DESC_LDT:
642 case DESC_TSS:
643 /* ignore */
644 break;
645
646 default: {
647 xmaddr_t maddr = virt_to_machine(&dt[entry]);
648
649 if (HYPERVISOR_update_descriptor(maddr.maddr, *(u64 *)desc))
650 dt[entry] = *(struct desc_struct *)desc;
651 }
652
653 }
654}
655
583static void xen_load_sp0(struct tss_struct *tss, 656static void xen_load_sp0(struct tss_struct *tss,
584 struct thread_struct *thread) 657 struct thread_struct *thread)
585{ 658{
@@ -713,7 +786,7 @@ static int xen_write_msr_safe(unsigned int msr, unsigned low, unsigned high)
713 set: 786 set:
714 base = ((u64)high << 32) | low; 787 base = ((u64)high << 32) | low;
715 if (HYPERVISOR_set_segment_base(which, base) != 0) 788 if (HYPERVISOR_set_segment_base(which, base) != 0)
716 ret = -EFAULT; 789 ret = -EIO;
717 break; 790 break;
718#endif 791#endif
719 792
@@ -839,19 +912,9 @@ static const struct pv_info xen_info __initdata = {
839 912
840static const struct pv_init_ops xen_init_ops __initdata = { 913static const struct pv_init_ops xen_init_ops __initdata = {
841 .patch = xen_patch, 914 .patch = xen_patch,
842
843 .banner = xen_banner,
844 .memory_setup = xen_memory_setup,
845 .arch_setup = xen_arch_setup,
846 .post_allocator_init = xen_post_allocator_init,
847}; 915};
848 916
849static const struct pv_time_ops xen_time_ops __initdata = { 917static const struct pv_time_ops xen_time_ops __initdata = {
850 .time_init = xen_time_init,
851
852 .set_wallclock = xen_set_wallclock,
853 .get_wallclock = xen_get_wallclock,
854 .get_tsc_khz = xen_tsc_khz,
855 .sched_clock = xen_sched_clock, 918 .sched_clock = xen_sched_clock,
856}; 919};
857 920
@@ -917,8 +980,6 @@ static const struct pv_cpu_ops xen_cpu_ops __initdata = {
917 980
918static const struct pv_apic_ops xen_apic_ops __initdata = { 981static const struct pv_apic_ops xen_apic_ops __initdata = {
919#ifdef CONFIG_X86_LOCAL_APIC 982#ifdef CONFIG_X86_LOCAL_APIC
920 .setup_boot_clock = paravirt_nop,
921 .setup_secondary_clock = paravirt_nop,
922 .startup_ipi_hook = paravirt_nop, 983 .startup_ipi_hook = paravirt_nop,
923#endif 984#endif
924}; 985};
@@ -964,6 +1025,23 @@ static const struct machine_ops __initdata xen_machine_ops = {
964 .emergency_restart = xen_emergency_restart, 1025 .emergency_restart = xen_emergency_restart,
965}; 1026};
966 1027
1028/*
1029 * Set up the GDT and segment registers for -fstack-protector. Until
1030 * we do this, we have to be careful not to call any stack-protected
1031 * function, which is most of the kernel.
1032 */
1033static void __init xen_setup_stackprotector(void)
1034{
1035 pv_cpu_ops.write_gdt_entry = xen_write_gdt_entry_boot;
1036 pv_cpu_ops.load_gdt = xen_load_gdt_boot;
1037
1038 setup_stack_canary_segment(0);
1039 switch_to_new_gdt(0);
1040
1041 pv_cpu_ops.write_gdt_entry = xen_write_gdt_entry;
1042 pv_cpu_ops.load_gdt = xen_load_gdt;
1043}
1044
967/* First C function to be called on Xen boot */ 1045/* First C function to be called on Xen boot */
968asmlinkage void __init xen_start_kernel(void) 1046asmlinkage void __init xen_start_kernel(void)
969{ 1047{
@@ -980,16 +1058,43 @@ asmlinkage void __init xen_start_kernel(void)
980 pv_time_ops = xen_time_ops; 1058 pv_time_ops = xen_time_ops;
981 pv_cpu_ops = xen_cpu_ops; 1059 pv_cpu_ops = xen_cpu_ops;
982 pv_apic_ops = xen_apic_ops; 1060 pv_apic_ops = xen_apic_ops;
983 pv_mmu_ops = xen_mmu_ops;
984 1061
985#ifdef CONFIG_X86_64 1062 x86_init.resources.memory_setup = xen_memory_setup;
1063 x86_init.oem.arch_setup = xen_arch_setup;
1064 x86_init.oem.banner = xen_banner;
1065
1066 x86_init.timers.timer_init = xen_time_init;
1067 x86_init.timers.setup_percpu_clockev = x86_init_noop;
1068 x86_cpuinit.setup_percpu_clockev = x86_init_noop;
1069
1070 x86_platform.calibrate_tsc = xen_tsc_khz;
1071 x86_platform.get_wallclock = xen_get_wallclock;
1072 x86_platform.set_wallclock = xen_set_wallclock;
1073
986 /* 1074 /*
987 * Setup percpu state. We only need to do this for 64-bit 1075 * Set up some pagetable state before starting to set any ptes.
988 * because 32-bit already has %fs set properly.
989 */ 1076 */
990 load_percpu_segment(0);
991#endif
992 1077
1078 /* Prevent unwanted bits from being set in PTEs. */
1079 __supported_pte_mask &= ~_PAGE_GLOBAL;
1080 if (!xen_initial_domain())
1081 __supported_pte_mask &= ~(_PAGE_PWT | _PAGE_PCD);
1082
1083 __supported_pte_mask |= _PAGE_IOMAP;
1084
1085 xen_setup_features();
1086
1087 /* Get mfn list */
1088 if (!xen_feature(XENFEAT_auto_translated_physmap))
1089 xen_build_dynamic_phys_to_machine();
1090
1091 /*
1092 * Set up kernel GDT and segment registers, mainly so that
1093 * -fstack-protector code can be executed.
1094 */
1095 xen_setup_stackprotector();
1096
1097 xen_init_mmu_ops();
993 xen_init_irq_ops(); 1098 xen_init_irq_ops();
994 xen_init_cpuid_mask(); 1099 xen_init_cpuid_mask();
995 1100
@@ -1000,8 +1105,6 @@ asmlinkage void __init xen_start_kernel(void)
1000 set_xen_basic_apic_ops(); 1105 set_xen_basic_apic_ops();
1001#endif 1106#endif
1002 1107
1003 xen_setup_features();
1004
1005 if (xen_feature(XENFEAT_mmu_pt_update_preserve_ad)) { 1108 if (xen_feature(XENFEAT_mmu_pt_update_preserve_ad)) {
1006 pv_mmu_ops.ptep_modify_prot_start = xen_ptep_modify_prot_start; 1109 pv_mmu_ops.ptep_modify_prot_start = xen_ptep_modify_prot_start;
1007 pv_mmu_ops.ptep_modify_prot_commit = xen_ptep_modify_prot_commit; 1110 pv_mmu_ops.ptep_modify_prot_commit = xen_ptep_modify_prot_commit;
@@ -1018,17 +1121,8 @@ asmlinkage void __init xen_start_kernel(void)
1018 1121
1019 xen_smp_init(); 1122 xen_smp_init();
1020 1123
1021 /* Get mfn list */
1022 if (!xen_feature(XENFEAT_auto_translated_physmap))
1023 xen_build_dynamic_phys_to_machine();
1024
1025 pgd = (pgd_t *)xen_start_info->pt_base; 1124 pgd = (pgd_t *)xen_start_info->pt_base;
1026 1125
1027 /* Prevent unwanted bits from being set in PTEs. */
1028 __supported_pte_mask &= ~_PAGE_GLOBAL;
1029 if (!xen_initial_domain())
1030 __supported_pte_mask &= ~(_PAGE_PWT | _PAGE_PCD);
1031
1032#ifdef CONFIG_X86_64 1126#ifdef CONFIG_X86_64
1033 /* Work out if we support NX */ 1127 /* Work out if we support NX */
1034 check_efer(); 1128 check_efer();
@@ -1059,6 +1153,7 @@ asmlinkage void __init xen_start_kernel(void)
1059 /* set up basic CPUID stuff */ 1153 /* set up basic CPUID stuff */
1060 cpu_detect(&new_cpu_data); 1154 cpu_detect(&new_cpu_data);
1061 new_cpu_data.hard_math = 1; 1155 new_cpu_data.hard_math = 1;
1156 new_cpu_data.wp_works_ok = 1;
1062 new_cpu_data.x86_capability[0] = cpuid_edx(1); 1157 new_cpu_data.x86_capability[0] = cpuid_edx(1);
1063#endif 1158#endif
1064 1159
diff --git a/arch/x86/xen/irq.c b/arch/x86/xen/irq.c
index cfd17799bd6d..9d30105a0c4a 100644
--- a/arch/x86/xen/irq.c
+++ b/arch/x86/xen/irq.c
@@ -1,5 +1,7 @@
1#include <linux/hardirq.h> 1#include <linux/hardirq.h>
2 2
3#include <asm/x86_init.h>
4
3#include <xen/interface/xen.h> 5#include <xen/interface/xen.h>
4#include <xen/interface/sched.h> 6#include <xen/interface/sched.h>
5#include <xen/interface/vcpu.h> 7#include <xen/interface/vcpu.h>
@@ -112,8 +114,6 @@ static void xen_halt(void)
112} 114}
113 115
114static const struct pv_irq_ops xen_irq_ops __initdata = { 116static const struct pv_irq_ops xen_irq_ops __initdata = {
115 .init_IRQ = xen_init_IRQ,
116
117 .save_fl = PV_CALLEE_SAVE(xen_save_fl), 117 .save_fl = PV_CALLEE_SAVE(xen_save_fl),
118 .restore_fl = PV_CALLEE_SAVE(xen_restore_fl), 118 .restore_fl = PV_CALLEE_SAVE(xen_restore_fl),
119 .irq_disable = PV_CALLEE_SAVE(xen_irq_disable), 119 .irq_disable = PV_CALLEE_SAVE(xen_irq_disable),
@@ -129,4 +129,5 @@ static const struct pv_irq_ops xen_irq_ops __initdata = {
129void __init xen_init_irq_ops() 129void __init xen_init_irq_ops()
130{ 130{
131 pv_irq_ops = xen_irq_ops; 131 pv_irq_ops = xen_irq_ops;
132 x86_init.irqs.intr_init = xen_init_IRQ;
132} 133}
diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
index 4ceb28581652..093dd59b5385 100644
--- a/arch/x86/xen/mmu.c
+++ b/arch/x86/xen/mmu.c
@@ -1229,9 +1229,12 @@ static __init void xen_pagetable_setup_start(pgd_t *base)
1229{ 1229{
1230} 1230}
1231 1231
1232static void xen_post_allocator_init(void);
1233
1232static __init void xen_pagetable_setup_done(pgd_t *base) 1234static __init void xen_pagetable_setup_done(pgd_t *base)
1233{ 1235{
1234 xen_setup_shared_info(); 1236 xen_setup_shared_info();
1237 xen_post_allocator_init();
1235} 1238}
1236 1239
1237static void xen_write_cr2(unsigned long cr2) 1240static void xen_write_cr2(unsigned long cr2)
@@ -1841,7 +1844,7 @@ static void xen_set_fixmap(unsigned idx, phys_addr_t phys, pgprot_t prot)
1841#endif 1844#endif
1842} 1845}
1843 1846
1844__init void xen_post_allocator_init(void) 1847static __init void xen_post_allocator_init(void)
1845{ 1848{
1846 pv_mmu_ops.set_pte = xen_set_pte; 1849 pv_mmu_ops.set_pte = xen_set_pte;
1847 pv_mmu_ops.set_pmd = xen_set_pmd; 1850 pv_mmu_ops.set_pmd = xen_set_pmd;
@@ -1875,10 +1878,7 @@ static void xen_leave_lazy_mmu(void)
1875 preempt_enable(); 1878 preempt_enable();
1876} 1879}
1877 1880
1878const struct pv_mmu_ops xen_mmu_ops __initdata = { 1881static const struct pv_mmu_ops xen_mmu_ops __initdata = {
1879 .pagetable_setup_start = xen_pagetable_setup_start,
1880 .pagetable_setup_done = xen_pagetable_setup_done,
1881
1882 .read_cr2 = xen_read_cr2, 1882 .read_cr2 = xen_read_cr2,
1883 .write_cr2 = xen_write_cr2, 1883 .write_cr2 = xen_write_cr2,
1884 1884
@@ -1954,6 +1954,12 @@ const struct pv_mmu_ops xen_mmu_ops __initdata = {
1954 .set_fixmap = xen_set_fixmap, 1954 .set_fixmap = xen_set_fixmap,
1955}; 1955};
1956 1956
1957void __init xen_init_mmu_ops(void)
1958{
1959 x86_init.paging.pagetable_setup_start = xen_pagetable_setup_start;
1960 x86_init.paging.pagetable_setup_done = xen_pagetable_setup_done;
1961 pv_mmu_ops = xen_mmu_ops;
1962}
1957 1963
1958#ifdef CONFIG_XEN_DEBUG_FS 1964#ifdef CONFIG_XEN_DEBUG_FS
1959 1965
diff --git a/arch/x86/xen/mmu.h b/arch/x86/xen/mmu.h
index da7302624897..5fe6bc7f5ecf 100644
--- a/arch/x86/xen/mmu.h
+++ b/arch/x86/xen/mmu.h
@@ -59,5 +59,5 @@ void xen_ptep_modify_prot_commit(struct mm_struct *mm, unsigned long addr,
59 59
60unsigned long xen_read_cr2_direct(void); 60unsigned long xen_read_cr2_direct(void);
61 61
62extern const struct pv_mmu_ops xen_mmu_ops; 62extern void xen_init_mmu_ops(void);
63#endif /* _XEN_MMU_H */ 63#endif /* _XEN_MMU_H */
diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c
index 429834ec1687..fe03eeed7b48 100644
--- a/arch/x86/xen/smp.c
+++ b/arch/x86/xen/smp.c
@@ -236,6 +236,7 @@ cpu_initialize_context(unsigned int cpu, struct task_struct *idle)
236 ctxt->user_regs.ss = __KERNEL_DS; 236 ctxt->user_regs.ss = __KERNEL_DS;
237#ifdef CONFIG_X86_32 237#ifdef CONFIG_X86_32
238 ctxt->user_regs.fs = __KERNEL_PERCPU; 238 ctxt->user_regs.fs = __KERNEL_PERCPU;
239 ctxt->user_regs.gs = __KERNEL_STACK_CANARY;
239#else 240#else
240 ctxt->gs_base_kernel = per_cpu_offset(cpu); 241 ctxt->gs_base_kernel = per_cpu_offset(cpu);
241#endif 242#endif
diff --git a/arch/x86/xen/spinlock.c b/arch/x86/xen/spinlock.c
index 5601506f2dd9..36a5141108df 100644
--- a/arch/x86/xen/spinlock.c
+++ b/arch/x86/xen/spinlock.c
@@ -187,7 +187,6 @@ static noinline int xen_spin_lock_slow(struct raw_spinlock *lock, bool irq_enabl
187 struct xen_spinlock *prev; 187 struct xen_spinlock *prev;
188 int irq = __get_cpu_var(lock_kicker_irq); 188 int irq = __get_cpu_var(lock_kicker_irq);
189 int ret; 189 int ret;
190 unsigned long flags;
191 u64 start; 190 u64 start;
192 191
193 /* If kicker interrupts not initialized yet, just spin */ 192 /* If kicker interrupts not initialized yet, just spin */
@@ -199,16 +198,12 @@ static noinline int xen_spin_lock_slow(struct raw_spinlock *lock, bool irq_enabl
199 /* announce we're spinning */ 198 /* announce we're spinning */
200 prev = spinning_lock(xl); 199 prev = spinning_lock(xl);
201 200
202 flags = __raw_local_save_flags();
203 if (irq_enable) {
204 ADD_STATS(taken_slow_irqenable, 1);
205 raw_local_irq_enable();
206 }
207
208 ADD_STATS(taken_slow, 1); 201 ADD_STATS(taken_slow, 1);
209 ADD_STATS(taken_slow_nested, prev != NULL); 202 ADD_STATS(taken_slow_nested, prev != NULL);
210 203
211 do { 204 do {
205 unsigned long flags;
206
212 /* clear pending */ 207 /* clear pending */
213 xen_clear_irq_pending(irq); 208 xen_clear_irq_pending(irq);
214 209
@@ -228,6 +223,12 @@ static noinline int xen_spin_lock_slow(struct raw_spinlock *lock, bool irq_enabl
228 goto out; 223 goto out;
229 } 224 }
230 225
226 flags = __raw_local_save_flags();
227 if (irq_enable) {
228 ADD_STATS(taken_slow_irqenable, 1);
229 raw_local_irq_enable();
230 }
231
231 /* 232 /*
232 * Block until irq becomes pending. If we're 233 * Block until irq becomes pending. If we're
233 * interrupted at this point (after the trylock but 234 * interrupted at this point (after the trylock but
@@ -238,13 +239,15 @@ static noinline int xen_spin_lock_slow(struct raw_spinlock *lock, bool irq_enabl
238 * pending. 239 * pending.
239 */ 240 */
240 xen_poll_irq(irq); 241 xen_poll_irq(irq);
242
243 raw_local_irq_restore(flags);
244
241 ADD_STATS(taken_slow_spurious, !xen_test_irq_pending(irq)); 245 ADD_STATS(taken_slow_spurious, !xen_test_irq_pending(irq));
242 } while (!xen_test_irq_pending(irq)); /* check for spurious wakeups */ 246 } while (!xen_test_irq_pending(irq)); /* check for spurious wakeups */
243 247
244 kstat_incr_irqs_this_cpu(irq, irq_to_desc(irq)); 248 kstat_incr_irqs_this_cpu(irq, irq_to_desc(irq));
245 249
246out: 250out:
247 raw_local_irq_restore(flags);
248 unspinning_lock(xl, prev); 251 unspinning_lock(xl, prev);
249 spin_time_accum_blocked(start); 252 spin_time_accum_blocked(start);
250 253
@@ -323,8 +326,13 @@ static void xen_spin_unlock(struct raw_spinlock *lock)
323 smp_wmb(); /* make sure no writes get moved after unlock */ 326 smp_wmb(); /* make sure no writes get moved after unlock */
324 xl->lock = 0; /* release lock */ 327 xl->lock = 0; /* release lock */
325 328
326 /* make sure unlock happens before kick */ 329 /*
327 barrier(); 330 * Make sure unlock happens before checking for waiting
331 * spinners. We need a strong barrier to enforce the
332 * write-read ordering to different memory locations, as the
333 * CPU makes no implied guarantees about their ordering.
334 */
335 mb();
328 336
329 if (unlikely(xl->spinners)) 337 if (unlikely(xl->spinners))
330 xen_spin_unlock_slow(xl); 338 xen_spin_unlock_slow(xl);
diff --git a/arch/x86/xen/xen-ops.h b/arch/x86/xen/xen-ops.h
index 22494fd4c9b5..355fa6b99c9c 100644
--- a/arch/x86/xen/xen-ops.h
+++ b/arch/x86/xen/xen-ops.h
@@ -30,8 +30,6 @@ pgd_t *xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn);
30void xen_ident_map_ISA(void); 30void xen_ident_map_ISA(void);
31void xen_reserve_top(void); 31void xen_reserve_top(void);
32 32
33void xen_post_allocator_init(void);
34
35char * __init xen_memory_setup(void); 33char * __init xen_memory_setup(void);
36void __init xen_arch_setup(void); 34void __init xen_arch_setup(void);
37void __init xen_init_IRQ(void); 35void __init xen_init_IRQ(void);