diff options
author | H. Peter Anvin <hpa@linux.intel.com> | 2009-01-30 17:50:57 -0500 |
---|---|---|
committer | H. Peter Anvin <hpa@linux.intel.com> | 2009-01-30 17:50:57 -0500 |
commit | 9b7ed8faa034fc2d350e2eff5c68680eb5c43a07 (patch) | |
tree | 5c94c34ad30e312604c1ce4f08ab6631b64a94f5 /arch/x86/xen | |
parent | 6522869c34664dd5f05a0a327e93915b1281c90d (diff) | |
parent | c43e0e46adf79c321ed3fbf0351e1005fb8a2413 (diff) |
Merge branch 'core/percpu' into x86/paravirt
Diffstat (limited to 'arch/x86/xen')
-rw-r--r-- | arch/x86/xen/enlighten.c | 46 | ||||
-rw-r--r-- | arch/x86/xen/irq.c | 8 | ||||
-rw-r--r-- | arch/x86/xen/mmu.c | 8 | ||||
-rw-r--r-- | arch/x86/xen/multicalls.h | 2 | ||||
-rw-r--r-- | arch/x86/xen/smp.c | 34 | ||||
-rw-r--r-- | arch/x86/xen/suspend.c | 1 | ||||
-rw-r--r-- | arch/x86/xen/xen-asm_64.S | 31 |
7 files changed, 47 insertions, 83 deletions
diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c index 6f1bb71aa13a..6b3f7eef57e3 100644 --- a/arch/x86/xen/enlighten.c +++ b/arch/x86/xen/enlighten.c | |||
@@ -634,35 +634,27 @@ static void xen_flush_tlb_single(unsigned long addr) | |||
634 | preempt_enable(); | 634 | preempt_enable(); |
635 | } | 635 | } |
636 | 636 | ||
637 | static void xen_flush_tlb_others(const cpumask_t *cpus, struct mm_struct *mm, | 637 | static void xen_flush_tlb_others(const struct cpumask *cpus, |
638 | unsigned long va) | 638 | struct mm_struct *mm, unsigned long va) |
639 | { | 639 | { |
640 | struct { | 640 | struct { |
641 | struct mmuext_op op; | 641 | struct mmuext_op op; |
642 | cpumask_t mask; | 642 | DECLARE_BITMAP(mask, NR_CPUS); |
643 | } *args; | 643 | } *args; |
644 | cpumask_t cpumask = *cpus; | ||
645 | struct multicall_space mcs; | 644 | struct multicall_space mcs; |
646 | 645 | ||
647 | /* | 646 | BUG_ON(cpumask_empty(cpus)); |
648 | * A couple of (to be removed) sanity checks: | ||
649 | * | ||
650 | * - current CPU must not be in mask | ||
651 | * - mask must exist :) | ||
652 | */ | ||
653 | BUG_ON(cpus_empty(cpumask)); | ||
654 | BUG_ON(cpu_isset(smp_processor_id(), cpumask)); | ||
655 | BUG_ON(!mm); | 647 | BUG_ON(!mm); |
656 | 648 | ||
657 | /* If a CPU which we ran on has gone down, OK. */ | ||
658 | cpus_and(cpumask, cpumask, cpu_online_map); | ||
659 | if (cpus_empty(cpumask)) | ||
660 | return; | ||
661 | |||
662 | mcs = xen_mc_entry(sizeof(*args)); | 649 | mcs = xen_mc_entry(sizeof(*args)); |
663 | args = mcs.args; | 650 | args = mcs.args; |
664 | args->mask = cpumask; | 651 | args->op.arg2.vcpumask = to_cpumask(args->mask); |
665 | args->op.arg2.vcpumask = &args->mask; | 652 | |
653 | /* Remove us, and any offline CPUS. */ | ||
654 | cpumask_and(to_cpumask(args->mask), cpus, cpu_online_mask); | ||
655 | cpumask_clear_cpu(smp_processor_id(), to_cpumask(args->mask)); | ||
656 | if (unlikely(cpumask_empty(to_cpumask(args->mask)))) | ||
657 | goto issue; | ||
666 | 658 | ||
667 | if (va == TLB_FLUSH_ALL) { | 659 | if (va == TLB_FLUSH_ALL) { |
668 | args->op.cmd = MMUEXT_TLB_FLUSH_MULTI; | 660 | args->op.cmd = MMUEXT_TLB_FLUSH_MULTI; |
@@ -673,6 +665,7 @@ static void xen_flush_tlb_others(const cpumask_t *cpus, struct mm_struct *mm, | |||
673 | 665 | ||
674 | MULTI_mmuext_op(mcs.mc, &args->op, 1, NULL, DOMID_SELF); | 666 | MULTI_mmuext_op(mcs.mc, &args->op, 1, NULL, DOMID_SELF); |
675 | 667 | ||
668 | issue: | ||
676 | xen_mc_issue(PARAVIRT_LAZY_MMU); | 669 | xen_mc_issue(PARAVIRT_LAZY_MMU); |
677 | } | 670 | } |
678 | 671 | ||
@@ -702,17 +695,17 @@ static void xen_write_cr0(unsigned long cr0) | |||
702 | 695 | ||
703 | static void xen_write_cr2(unsigned long cr2) | 696 | static void xen_write_cr2(unsigned long cr2) |
704 | { | 697 | { |
705 | x86_read_percpu(xen_vcpu)->arch.cr2 = cr2; | 698 | percpu_read(xen_vcpu)->arch.cr2 = cr2; |
706 | } | 699 | } |
707 | 700 | ||
708 | static unsigned long xen_read_cr2(void) | 701 | static unsigned long xen_read_cr2(void) |
709 | { | 702 | { |
710 | return x86_read_percpu(xen_vcpu)->arch.cr2; | 703 | return percpu_read(xen_vcpu)->arch.cr2; |
711 | } | 704 | } |
712 | 705 | ||
713 | static unsigned long xen_read_cr2_direct(void) | 706 | static unsigned long xen_read_cr2_direct(void) |
714 | { | 707 | { |
715 | return x86_read_percpu(xen_vcpu_info.arch.cr2); | 708 | return percpu_read(xen_vcpu_info.arch.cr2); |
716 | } | 709 | } |
717 | 710 | ||
718 | static void xen_write_cr4(unsigned long cr4) | 711 | static void xen_write_cr4(unsigned long cr4) |
@@ -725,12 +718,12 @@ static void xen_write_cr4(unsigned long cr4) | |||
725 | 718 | ||
726 | static unsigned long xen_read_cr3(void) | 719 | static unsigned long xen_read_cr3(void) |
727 | { | 720 | { |
728 | return x86_read_percpu(xen_cr3); | 721 | return percpu_read(xen_cr3); |
729 | } | 722 | } |
730 | 723 | ||
731 | static void set_current_cr3(void *v) | 724 | static void set_current_cr3(void *v) |
732 | { | 725 | { |
733 | x86_write_percpu(xen_current_cr3, (unsigned long)v); | 726 | percpu_write(xen_current_cr3, (unsigned long)v); |
734 | } | 727 | } |
735 | 728 | ||
736 | static void __xen_write_cr3(bool kernel, unsigned long cr3) | 729 | static void __xen_write_cr3(bool kernel, unsigned long cr3) |
@@ -755,7 +748,7 @@ static void __xen_write_cr3(bool kernel, unsigned long cr3) | |||
755 | MULTI_mmuext_op(mcs.mc, op, 1, NULL, DOMID_SELF); | 748 | MULTI_mmuext_op(mcs.mc, op, 1, NULL, DOMID_SELF); |
756 | 749 | ||
757 | if (kernel) { | 750 | if (kernel) { |
758 | x86_write_percpu(xen_cr3, cr3); | 751 | percpu_write(xen_cr3, cr3); |
759 | 752 | ||
760 | /* Update xen_current_cr3 once the batch has actually | 753 | /* Update xen_current_cr3 once the batch has actually |
761 | been submitted. */ | 754 | been submitted. */ |
@@ -771,7 +764,7 @@ static void xen_write_cr3(unsigned long cr3) | |||
771 | 764 | ||
772 | /* Update while interrupts are disabled, so its atomic with | 765 | /* Update while interrupts are disabled, so its atomic with |
773 | respect to ipis */ | 766 | respect to ipis */ |
774 | x86_write_percpu(xen_cr3, cr3); | 767 | percpu_write(xen_cr3, cr3); |
775 | 768 | ||
776 | __xen_write_cr3(true, cr3); | 769 | __xen_write_cr3(true, cr3); |
777 | 770 | ||
@@ -1651,7 +1644,6 @@ asmlinkage void __init xen_start_kernel(void) | |||
1651 | #ifdef CONFIG_X86_64 | 1644 | #ifdef CONFIG_X86_64 |
1652 | /* Disable until direct per-cpu data access. */ | 1645 | /* Disable until direct per-cpu data access. */ |
1653 | have_vcpu_info_placement = 0; | 1646 | have_vcpu_info_placement = 0; |
1654 | x86_64_init_pda(); | ||
1655 | #endif | 1647 | #endif |
1656 | 1648 | ||
1657 | xen_smp_init(); | 1649 | xen_smp_init(); |
diff --git a/arch/x86/xen/irq.c b/arch/x86/xen/irq.c index bb042608c602..2e8271431e1a 100644 --- a/arch/x86/xen/irq.c +++ b/arch/x86/xen/irq.c | |||
@@ -39,7 +39,7 @@ static unsigned long xen_save_fl(void) | |||
39 | struct vcpu_info *vcpu; | 39 | struct vcpu_info *vcpu; |
40 | unsigned long flags; | 40 | unsigned long flags; |
41 | 41 | ||
42 | vcpu = x86_read_percpu(xen_vcpu); | 42 | vcpu = percpu_read(xen_vcpu); |
43 | 43 | ||
44 | /* flag has opposite sense of mask */ | 44 | /* flag has opposite sense of mask */ |
45 | flags = !vcpu->evtchn_upcall_mask; | 45 | flags = !vcpu->evtchn_upcall_mask; |
@@ -62,7 +62,7 @@ static void xen_restore_fl(unsigned long flags) | |||
62 | make sure we're don't switch CPUs between getting the vcpu | 62 | make sure we're don't switch CPUs between getting the vcpu |
63 | pointer and updating the mask. */ | 63 | pointer and updating the mask. */ |
64 | preempt_disable(); | 64 | preempt_disable(); |
65 | vcpu = x86_read_percpu(xen_vcpu); | 65 | vcpu = percpu_read(xen_vcpu); |
66 | vcpu->evtchn_upcall_mask = flags; | 66 | vcpu->evtchn_upcall_mask = flags; |
67 | preempt_enable_no_resched(); | 67 | preempt_enable_no_resched(); |
68 | 68 | ||
@@ -83,7 +83,7 @@ static void xen_irq_disable(void) | |||
83 | make sure we're don't switch CPUs between getting the vcpu | 83 | make sure we're don't switch CPUs between getting the vcpu |
84 | pointer and updating the mask. */ | 84 | pointer and updating the mask. */ |
85 | preempt_disable(); | 85 | preempt_disable(); |
86 | x86_read_percpu(xen_vcpu)->evtchn_upcall_mask = 1; | 86 | percpu_read(xen_vcpu)->evtchn_upcall_mask = 1; |
87 | preempt_enable_no_resched(); | 87 | preempt_enable_no_resched(); |
88 | } | 88 | } |
89 | 89 | ||
@@ -96,7 +96,7 @@ static void xen_irq_enable(void) | |||
96 | the caller is confused and is trying to re-enable interrupts | 96 | the caller is confused and is trying to re-enable interrupts |
97 | on an indeterminate processor. */ | 97 | on an indeterminate processor. */ |
98 | 98 | ||
99 | vcpu = x86_read_percpu(xen_vcpu); | 99 | vcpu = percpu_read(xen_vcpu); |
100 | vcpu->evtchn_upcall_mask = 0; | 100 | vcpu->evtchn_upcall_mask = 0; |
101 | 101 | ||
102 | /* Doesn't matter if we get preempted here, because any | 102 | /* Doesn't matter if we get preempted here, because any |
diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c index 503c240e26c7..98cb9869eb24 100644 --- a/arch/x86/xen/mmu.c +++ b/arch/x86/xen/mmu.c | |||
@@ -1063,18 +1063,14 @@ static void drop_other_mm_ref(void *info) | |||
1063 | struct mm_struct *mm = info; | 1063 | struct mm_struct *mm = info; |
1064 | struct mm_struct *active_mm; | 1064 | struct mm_struct *active_mm; |
1065 | 1065 | ||
1066 | #ifdef CONFIG_X86_64 | 1066 | active_mm = percpu_read(cpu_tlbstate.active_mm); |
1067 | active_mm = read_pda(active_mm); | ||
1068 | #else | ||
1069 | active_mm = __get_cpu_var(cpu_tlbstate).active_mm; | ||
1070 | #endif | ||
1071 | 1067 | ||
1072 | if (active_mm == mm) | 1068 | if (active_mm == mm) |
1073 | leave_mm(smp_processor_id()); | 1069 | leave_mm(smp_processor_id()); |
1074 | 1070 | ||
1075 | /* If this cpu still has a stale cr3 reference, then make sure | 1071 | /* If this cpu still has a stale cr3 reference, then make sure |
1076 | it has been flushed. */ | 1072 | it has been flushed. */ |
1077 | if (x86_read_percpu(xen_current_cr3) == __pa(mm->pgd)) { | 1073 | if (percpu_read(xen_current_cr3) == __pa(mm->pgd)) { |
1078 | load_cr3(swapper_pg_dir); | 1074 | load_cr3(swapper_pg_dir); |
1079 | arch_flush_lazy_cpu_mode(); | 1075 | arch_flush_lazy_cpu_mode(); |
1080 | } | 1076 | } |
diff --git a/arch/x86/xen/multicalls.h b/arch/x86/xen/multicalls.h index 858938241616..e786fa7f2615 100644 --- a/arch/x86/xen/multicalls.h +++ b/arch/x86/xen/multicalls.h | |||
@@ -39,7 +39,7 @@ static inline void xen_mc_issue(unsigned mode) | |||
39 | xen_mc_flush(); | 39 | xen_mc_flush(); |
40 | 40 | ||
41 | /* restore flags saved in xen_mc_batch */ | 41 | /* restore flags saved in xen_mc_batch */ |
42 | local_irq_restore(x86_read_percpu(xen_mc_irq_flags)); | 42 | local_irq_restore(percpu_read(xen_mc_irq_flags)); |
43 | } | 43 | } |
44 | 44 | ||
45 | /* Set up a callback to be called when the current batch is flushed */ | 45 | /* Set up a callback to be called when the current batch is flushed */ |
diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c index c44e2069c7c7..7735e3dd359c 100644 --- a/arch/x86/xen/smp.c +++ b/arch/x86/xen/smp.c | |||
@@ -50,11 +50,7 @@ static irqreturn_t xen_call_function_single_interrupt(int irq, void *dev_id); | |||
50 | */ | 50 | */ |
51 | static irqreturn_t xen_reschedule_interrupt(int irq, void *dev_id) | 51 | static irqreturn_t xen_reschedule_interrupt(int irq, void *dev_id) |
52 | { | 52 | { |
53 | #ifdef CONFIG_X86_32 | 53 | inc_irq_stat(irq_resched_count); |
54 | __get_cpu_var(irq_stat).irq_resched_count++; | ||
55 | #else | ||
56 | add_pda(irq_resched_count, 1); | ||
57 | #endif | ||
58 | 54 | ||
59 | return IRQ_HANDLED; | 55 | return IRQ_HANDLED; |
60 | } | 56 | } |
@@ -78,7 +74,7 @@ static __cpuinit void cpu_bringup(void) | |||
78 | xen_setup_cpu_clockevents(); | 74 | xen_setup_cpu_clockevents(); |
79 | 75 | ||
80 | cpu_set(cpu, cpu_online_map); | 76 | cpu_set(cpu, cpu_online_map); |
81 | x86_write_percpu(cpu_state, CPU_ONLINE); | 77 | percpu_write(cpu_state, CPU_ONLINE); |
82 | wmb(); | 78 | wmb(); |
83 | 79 | ||
84 | /* We can take interrupts now: we're officially "up". */ | 80 | /* We can take interrupts now: we're officially "up". */ |
@@ -283,22 +279,10 @@ static int __cpuinit xen_cpu_up(unsigned int cpu) | |||
283 | struct task_struct *idle = idle_task(cpu); | 279 | struct task_struct *idle = idle_task(cpu); |
284 | int rc; | 280 | int rc; |
285 | 281 | ||
286 | #ifdef CONFIG_X86_64 | ||
287 | /* Allocate node local memory for AP pdas */ | ||
288 | WARN_ON(cpu == 0); | ||
289 | if (cpu > 0) { | ||
290 | rc = get_local_pda(cpu); | ||
291 | if (rc) | ||
292 | return rc; | ||
293 | } | ||
294 | #endif | ||
295 | |||
296 | #ifdef CONFIG_X86_32 | ||
297 | init_gdt(cpu); | ||
298 | per_cpu(current_task, cpu) = idle; | 282 | per_cpu(current_task, cpu) = idle; |
283 | #ifdef CONFIG_X86_32 | ||
299 | irq_ctx_init(cpu); | 284 | irq_ctx_init(cpu); |
300 | #else | 285 | #else |
301 | cpu_pda(cpu)->pcurrent = idle; | ||
302 | clear_tsk_thread_flag(idle, TIF_FORK); | 286 | clear_tsk_thread_flag(idle, TIF_FORK); |
303 | #endif | 287 | #endif |
304 | xen_setup_timer(cpu); | 288 | xen_setup_timer(cpu); |
@@ -445,11 +429,7 @@ static irqreturn_t xen_call_function_interrupt(int irq, void *dev_id) | |||
445 | { | 429 | { |
446 | irq_enter(); | 430 | irq_enter(); |
447 | generic_smp_call_function_interrupt(); | 431 | generic_smp_call_function_interrupt(); |
448 | #ifdef CONFIG_X86_32 | 432 | inc_irq_stat(irq_call_count); |
449 | __get_cpu_var(irq_stat).irq_call_count++; | ||
450 | #else | ||
451 | add_pda(irq_call_count, 1); | ||
452 | #endif | ||
453 | irq_exit(); | 433 | irq_exit(); |
454 | 434 | ||
455 | return IRQ_HANDLED; | 435 | return IRQ_HANDLED; |
@@ -459,11 +439,7 @@ static irqreturn_t xen_call_function_single_interrupt(int irq, void *dev_id) | |||
459 | { | 439 | { |
460 | irq_enter(); | 440 | irq_enter(); |
461 | generic_smp_call_function_single_interrupt(); | 441 | generic_smp_call_function_single_interrupt(); |
462 | #ifdef CONFIG_X86_32 | 442 | inc_irq_stat(irq_call_count); |
463 | __get_cpu_var(irq_stat).irq_call_count++; | ||
464 | #else | ||
465 | add_pda(irq_call_count, 1); | ||
466 | #endif | ||
467 | irq_exit(); | 443 | irq_exit(); |
468 | 444 | ||
469 | return IRQ_HANDLED; | 445 | return IRQ_HANDLED; |
diff --git a/arch/x86/xen/suspend.c b/arch/x86/xen/suspend.c index 212ffe012b76..95be7b434724 100644 --- a/arch/x86/xen/suspend.c +++ b/arch/x86/xen/suspend.c | |||
@@ -6,6 +6,7 @@ | |||
6 | 6 | ||
7 | #include <asm/xen/hypercall.h> | 7 | #include <asm/xen/hypercall.h> |
8 | #include <asm/xen/page.h> | 8 | #include <asm/xen/page.h> |
9 | #include <asm/fixmap.h> | ||
9 | 10 | ||
10 | #include "xen-ops.h" | 11 | #include "xen-ops.h" |
11 | #include "mmu.h" | 12 | #include "mmu.h" |
diff --git a/arch/x86/xen/xen-asm_64.S b/arch/x86/xen/xen-asm_64.S index 05794c566e87..d6fc51f4ce85 100644 --- a/arch/x86/xen/xen-asm_64.S +++ b/arch/x86/xen/xen-asm_64.S | |||
@@ -17,6 +17,7 @@ | |||
17 | #include <asm/processor-flags.h> | 17 | #include <asm/processor-flags.h> |
18 | #include <asm/errno.h> | 18 | #include <asm/errno.h> |
19 | #include <asm/segment.h> | 19 | #include <asm/segment.h> |
20 | #include <asm/percpu.h> | ||
20 | 21 | ||
21 | #include <xen/interface/xen.h> | 22 | #include <xen/interface/xen.h> |
22 | 23 | ||
@@ -28,12 +29,10 @@ | |||
28 | 29 | ||
29 | #if 1 | 30 | #if 1 |
30 | /* | 31 | /* |
31 | x86-64 does not yet support direct access to percpu variables | 32 | FIXME: x86_64 now can support direct access to percpu variables |
32 | via a segment override, so we just need to make sure this code | 33 | via a segment override. Update xen accordingly. |
33 | never gets used | ||
34 | */ | 34 | */ |
35 | #define BUG ud2a | 35 | #define BUG ud2a |
36 | #define PER_CPU_VAR(var, off) 0xdeadbeef | ||
37 | #endif | 36 | #endif |
38 | 37 | ||
39 | /* | 38 | /* |
@@ -45,14 +44,14 @@ ENTRY(xen_irq_enable_direct) | |||
45 | BUG | 44 | BUG |
46 | 45 | ||
47 | /* Unmask events */ | 46 | /* Unmask events */ |
48 | movb $0, PER_CPU_VAR(xen_vcpu_info, XEN_vcpu_info_mask) | 47 | movb $0, PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_mask |
49 | 48 | ||
50 | /* Preempt here doesn't matter because that will deal with | 49 | /* Preempt here doesn't matter because that will deal with |
51 | any pending interrupts. The pending check may end up being | 50 | any pending interrupts. The pending check may end up being |
52 | run on the wrong CPU, but that doesn't hurt. */ | 51 | run on the wrong CPU, but that doesn't hurt. */ |
53 | 52 | ||
54 | /* Test for pending */ | 53 | /* Test for pending */ |
55 | testb $0xff, PER_CPU_VAR(xen_vcpu_info, XEN_vcpu_info_pending) | 54 | testb $0xff, PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_pending |
56 | jz 1f | 55 | jz 1f |
57 | 56 | ||
58 | 2: call check_events | 57 | 2: call check_events |
@@ -69,7 +68,7 @@ ENDPATCH(xen_irq_enable_direct) | |||
69 | ENTRY(xen_irq_disable_direct) | 68 | ENTRY(xen_irq_disable_direct) |
70 | BUG | 69 | BUG |
71 | 70 | ||
72 | movb $1, PER_CPU_VAR(xen_vcpu_info, XEN_vcpu_info_mask) | 71 | movb $1, PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_mask |
73 | ENDPATCH(xen_irq_disable_direct) | 72 | ENDPATCH(xen_irq_disable_direct) |
74 | ret | 73 | ret |
75 | ENDPROC(xen_irq_disable_direct) | 74 | ENDPROC(xen_irq_disable_direct) |
@@ -87,7 +86,7 @@ ENDPATCH(xen_irq_disable_direct) | |||
87 | ENTRY(xen_save_fl_direct) | 86 | ENTRY(xen_save_fl_direct) |
88 | BUG | 87 | BUG |
89 | 88 | ||
90 | testb $0xff, PER_CPU_VAR(xen_vcpu_info, XEN_vcpu_info_mask) | 89 | testb $0xff, PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_mask |
91 | setz %ah | 90 | setz %ah |
92 | addb %ah,%ah | 91 | addb %ah,%ah |
93 | ENDPATCH(xen_save_fl_direct) | 92 | ENDPATCH(xen_save_fl_direct) |
@@ -107,13 +106,13 @@ ENTRY(xen_restore_fl_direct) | |||
107 | BUG | 106 | BUG |
108 | 107 | ||
109 | testb $X86_EFLAGS_IF>>8, %ah | 108 | testb $X86_EFLAGS_IF>>8, %ah |
110 | setz PER_CPU_VAR(xen_vcpu_info, XEN_vcpu_info_mask) | 109 | setz PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_mask |
111 | /* Preempt here doesn't matter because that will deal with | 110 | /* Preempt here doesn't matter because that will deal with |
112 | any pending interrupts. The pending check may end up being | 111 | any pending interrupts. The pending check may end up being |
113 | run on the wrong CPU, but that doesn't hurt. */ | 112 | run on the wrong CPU, but that doesn't hurt. */ |
114 | 113 | ||
115 | /* check for unmasked and pending */ | 114 | /* check for unmasked and pending */ |
116 | cmpw $0x0001, PER_CPU_VAR(xen_vcpu_info, XEN_vcpu_info_pending) | 115 | cmpw $0x0001, PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_pending |
117 | jz 1f | 116 | jz 1f |
118 | 2: call check_events | 117 | 2: call check_events |
119 | 1: | 118 | 1: |
@@ -195,11 +194,11 @@ RELOC(xen_sysexit, 1b+1) | |||
195 | ENTRY(xen_sysret64) | 194 | ENTRY(xen_sysret64) |
196 | /* We're already on the usermode stack at this point, but still | 195 | /* We're already on the usermode stack at this point, but still |
197 | with the kernel gs, so we can easily switch back */ | 196 | with the kernel gs, so we can easily switch back */ |
198 | movq %rsp, %gs:pda_oldrsp | 197 | movq %rsp, PER_CPU_VAR(old_rsp) |
199 | movq %gs:pda_kernelstack,%rsp | 198 | movq PER_CPU_VAR(kernel_stack),%rsp |
200 | 199 | ||
201 | pushq $__USER_DS | 200 | pushq $__USER_DS |
202 | pushq %gs:pda_oldrsp | 201 | pushq PER_CPU_VAR(old_rsp) |
203 | pushq %r11 | 202 | pushq %r11 |
204 | pushq $__USER_CS | 203 | pushq $__USER_CS |
205 | pushq %rcx | 204 | pushq %rcx |
@@ -212,11 +211,11 @@ RELOC(xen_sysret64, 1b+1) | |||
212 | ENTRY(xen_sysret32) | 211 | ENTRY(xen_sysret32) |
213 | /* We're already on the usermode stack at this point, but still | 212 | /* We're already on the usermode stack at this point, but still |
214 | with the kernel gs, so we can easily switch back */ | 213 | with the kernel gs, so we can easily switch back */ |
215 | movq %rsp, %gs:pda_oldrsp | 214 | movq %rsp, PER_CPU_VAR(old_rsp) |
216 | movq %gs:pda_kernelstack, %rsp | 215 | movq PER_CPU_VAR(kernel_stack), %rsp |
217 | 216 | ||
218 | pushq $__USER32_DS | 217 | pushq $__USER32_DS |
219 | pushq %gs:pda_oldrsp | 218 | pushq PER_CPU_VAR(old_rsp) |
220 | pushq %r11 | 219 | pushq %r11 |
221 | pushq $__USER32_CS | 220 | pushq $__USER32_CS |
222 | pushq %rcx | 221 | pushq %rcx |