aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/xen/irq.c
diff options
context:
space:
mode:
authorJeremy Fitzhardinge <jeremy.fitzhardinge@citrix.com>2009-02-11 14:52:22 -0500
committerJeremy Fitzhardinge <jeremy.fitzhardinge@citrix.com>2009-02-11 14:52:22 -0500
commit9049a11de73d3ecc623f1903100d099f82ede56c (patch)
treec03d130d58168e337a66fe999682452b7a02b42b /arch/x86/xen/irq.c
parentc47c1b1f3a9d6973108020df1dcab7604f7774dd (diff)
parente4d0407185cdbdcfd99fc23bde2e5454bbc46329 (diff)
Merge commit 'remotes/tip/x86/paravirt' into x86/untangle2
* commit 'remotes/tip/x86/paravirt': (175 commits) xen: use direct ops on 64-bit xen: make direct versions of irq_enable/disable/save/restore to common code xen: setup percpu data pointers xen: fix 32-bit build resulting from mmu move x86/paravirt: return full 64-bit result x86, percpu: fix kexec with vmlinux x86/vmi: fix interrupt enable/disable/save/restore calling convention. x86/paravirt: don't restore second return reg xen: setup percpu data pointers x86: split loading percpu segments from loading gdt x86: pass in cpu number to switch_to_new_gdt() x86: UV fix uv_flush_send_and_wait() x86/paravirt: fix missing callee-save call on pud_val x86/paravirt: use callee-saved convention for pte_val/make_pte/etc x86/paravirt: implement PVOP_CALL macros for callee-save functions x86/paravirt: add register-saving thunks to reduce caller register pressure x86/paravirt: selectively save/restore regs around pvops calls x86: fix paravirt clobber in entry_64.S x86/pvops: add a paravirt_ident functions to allow special patching xen: move remaining mmu-related stuff into mmu.c ... Conflicts: arch/x86/mach-voyager/voyager_smp.c arch/x86/mm/fault.c
Diffstat (limited to 'arch/x86/xen/irq.c')
-rw-r--r--arch/x86/xen/irq.c22
1 files changed, 14 insertions, 8 deletions
diff --git a/arch/x86/xen/irq.c b/arch/x86/xen/irq.c
index bb042608c602..5a070900ad35 100644
--- a/arch/x86/xen/irq.c
+++ b/arch/x86/xen/irq.c
@@ -39,7 +39,7 @@ static unsigned long xen_save_fl(void)
39 struct vcpu_info *vcpu; 39 struct vcpu_info *vcpu;
40 unsigned long flags; 40 unsigned long flags;
41 41
42 vcpu = x86_read_percpu(xen_vcpu); 42 vcpu = percpu_read(xen_vcpu);
43 43
44 /* flag has opposite sense of mask */ 44 /* flag has opposite sense of mask */
45 flags = !vcpu->evtchn_upcall_mask; 45 flags = !vcpu->evtchn_upcall_mask;
@@ -50,6 +50,7 @@ static unsigned long xen_save_fl(void)
50 */ 50 */
51 return (-flags) & X86_EFLAGS_IF; 51 return (-flags) & X86_EFLAGS_IF;
52} 52}
53PV_CALLEE_SAVE_REGS_THUNK(xen_save_fl);
53 54
54static void xen_restore_fl(unsigned long flags) 55static void xen_restore_fl(unsigned long flags)
55{ 56{
@@ -62,7 +63,7 @@ static void xen_restore_fl(unsigned long flags)
62 make sure we're don't switch CPUs between getting the vcpu 63 make sure we're don't switch CPUs between getting the vcpu
63 pointer and updating the mask. */ 64 pointer and updating the mask. */
64 preempt_disable(); 65 preempt_disable();
65 vcpu = x86_read_percpu(xen_vcpu); 66 vcpu = percpu_read(xen_vcpu);
66 vcpu->evtchn_upcall_mask = flags; 67 vcpu->evtchn_upcall_mask = flags;
67 preempt_enable_no_resched(); 68 preempt_enable_no_resched();
68 69
@@ -76,6 +77,7 @@ static void xen_restore_fl(unsigned long flags)
76 xen_force_evtchn_callback(); 77 xen_force_evtchn_callback();
77 } 78 }
78} 79}
80PV_CALLEE_SAVE_REGS_THUNK(xen_restore_fl);
79 81
80static void xen_irq_disable(void) 82static void xen_irq_disable(void)
81{ 83{
@@ -83,9 +85,10 @@ static void xen_irq_disable(void)
83 make sure we're don't switch CPUs between getting the vcpu 85 make sure we're don't switch CPUs between getting the vcpu
84 pointer and updating the mask. */ 86 pointer and updating the mask. */
85 preempt_disable(); 87 preempt_disable();
86 x86_read_percpu(xen_vcpu)->evtchn_upcall_mask = 1; 88 percpu_read(xen_vcpu)->evtchn_upcall_mask = 1;
87 preempt_enable_no_resched(); 89 preempt_enable_no_resched();
88} 90}
91PV_CALLEE_SAVE_REGS_THUNK(xen_irq_disable);
89 92
90static void xen_irq_enable(void) 93static void xen_irq_enable(void)
91{ 94{
@@ -96,7 +99,7 @@ static void xen_irq_enable(void)
96 the caller is confused and is trying to re-enable interrupts 99 the caller is confused and is trying to re-enable interrupts
97 on an indeterminate processor. */ 100 on an indeterminate processor. */
98 101
99 vcpu = x86_read_percpu(xen_vcpu); 102 vcpu = percpu_read(xen_vcpu);
100 vcpu->evtchn_upcall_mask = 0; 103 vcpu->evtchn_upcall_mask = 0;
101 104
102 /* Doesn't matter if we get preempted here, because any 105 /* Doesn't matter if we get preempted here, because any
@@ -106,6 +109,7 @@ static void xen_irq_enable(void)
106 if (unlikely(vcpu->evtchn_upcall_pending)) 109 if (unlikely(vcpu->evtchn_upcall_pending))
107 xen_force_evtchn_callback(); 110 xen_force_evtchn_callback();
108} 111}
112PV_CALLEE_SAVE_REGS_THUNK(xen_irq_enable);
109 113
110static void xen_safe_halt(void) 114static void xen_safe_halt(void)
111{ 115{
@@ -124,10 +128,12 @@ static void xen_halt(void)
124 128
125static const struct pv_irq_ops xen_irq_ops __initdata = { 129static const struct pv_irq_ops xen_irq_ops __initdata = {
126 .init_IRQ = __xen_init_IRQ, 130 .init_IRQ = __xen_init_IRQ,
127 .save_fl = xen_save_fl, 131
128 .restore_fl = xen_restore_fl, 132 .save_fl = PV_CALLEE_SAVE(xen_save_fl),
129 .irq_disable = xen_irq_disable, 133 .restore_fl = PV_CALLEE_SAVE(xen_restore_fl),
130 .irq_enable = xen_irq_enable, 134 .irq_disable = PV_CALLEE_SAVE(xen_irq_disable),
135 .irq_enable = PV_CALLEE_SAVE(xen_irq_enable),
136
131 .safe_halt = xen_safe_halt, 137 .safe_halt = xen_safe_halt,
132 .halt = xen_halt, 138 .halt = xen_halt,
133#ifdef CONFIG_X86_64 139#ifdef CONFIG_X86_64