aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/xen/irq.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86/xen/irq.c')
-rw-r--r--arch/x86/xen/irq.c22
1 files changed, 14 insertions, 8 deletions
diff --git a/arch/x86/xen/irq.c b/arch/x86/xen/irq.c
index bb042608c60..5a070900ad3 100644
--- a/arch/x86/xen/irq.c
+++ b/arch/x86/xen/irq.c
@@ -39,7 +39,7 @@ static unsigned long xen_save_fl(void)
39 struct vcpu_info *vcpu; 39 struct vcpu_info *vcpu;
40 unsigned long flags; 40 unsigned long flags;
41 41
42 vcpu = x86_read_percpu(xen_vcpu); 42 vcpu = percpu_read(xen_vcpu);
43 43
44 /* flag has opposite sense of mask */ 44 /* flag has opposite sense of mask */
45 flags = !vcpu->evtchn_upcall_mask; 45 flags = !vcpu->evtchn_upcall_mask;
@@ -50,6 +50,7 @@ static unsigned long xen_save_fl(void)
50 */ 50 */
51 return (-flags) & X86_EFLAGS_IF; 51 return (-flags) & X86_EFLAGS_IF;
52} 52}
53PV_CALLEE_SAVE_REGS_THUNK(xen_save_fl);
53 54
54static void xen_restore_fl(unsigned long flags) 55static void xen_restore_fl(unsigned long flags)
55{ 56{
@@ -62,7 +63,7 @@ static void xen_restore_fl(unsigned long flags)
62 make sure we're don't switch CPUs between getting the vcpu 63 make sure we're don't switch CPUs between getting the vcpu
63 pointer and updating the mask. */ 64 pointer and updating the mask. */
64 preempt_disable(); 65 preempt_disable();
65 vcpu = x86_read_percpu(xen_vcpu); 66 vcpu = percpu_read(xen_vcpu);
66 vcpu->evtchn_upcall_mask = flags; 67 vcpu->evtchn_upcall_mask = flags;
67 preempt_enable_no_resched(); 68 preempt_enable_no_resched();
68 69
@@ -76,6 +77,7 @@ static void xen_restore_fl(unsigned long flags)
76 xen_force_evtchn_callback(); 77 xen_force_evtchn_callback();
77 } 78 }
78} 79}
80PV_CALLEE_SAVE_REGS_THUNK(xen_restore_fl);
79 81
80static void xen_irq_disable(void) 82static void xen_irq_disable(void)
81{ 83{
@@ -83,9 +85,10 @@ static void xen_irq_disable(void)
83 make sure we're don't switch CPUs between getting the vcpu 85 make sure we're don't switch CPUs between getting the vcpu
84 pointer and updating the mask. */ 86 pointer and updating the mask. */
85 preempt_disable(); 87 preempt_disable();
86 x86_read_percpu(xen_vcpu)->evtchn_upcall_mask = 1; 88 percpu_read(xen_vcpu)->evtchn_upcall_mask = 1;
87 preempt_enable_no_resched(); 89 preempt_enable_no_resched();
88} 90}
91PV_CALLEE_SAVE_REGS_THUNK(xen_irq_disable);
89 92
90static void xen_irq_enable(void) 93static void xen_irq_enable(void)
91{ 94{
@@ -96,7 +99,7 @@ static void xen_irq_enable(void)
96 the caller is confused and is trying to re-enable interrupts 99 the caller is confused and is trying to re-enable interrupts
97 on an indeterminate processor. */ 100 on an indeterminate processor. */
98 101
99 vcpu = x86_read_percpu(xen_vcpu); 102 vcpu = percpu_read(xen_vcpu);
100 vcpu->evtchn_upcall_mask = 0; 103 vcpu->evtchn_upcall_mask = 0;
101 104
102 /* Doesn't matter if we get preempted here, because any 105 /* Doesn't matter if we get preempted here, because any
@@ -106,6 +109,7 @@ static void xen_irq_enable(void)
106 if (unlikely(vcpu->evtchn_upcall_pending)) 109 if (unlikely(vcpu->evtchn_upcall_pending))
107 xen_force_evtchn_callback(); 110 xen_force_evtchn_callback();
108} 111}
112PV_CALLEE_SAVE_REGS_THUNK(xen_irq_enable);
109 113
110static void xen_safe_halt(void) 114static void xen_safe_halt(void)
111{ 115{
@@ -124,10 +128,12 @@ static void xen_halt(void)
124 128
125static const struct pv_irq_ops xen_irq_ops __initdata = { 129static const struct pv_irq_ops xen_irq_ops __initdata = {
126 .init_IRQ = __xen_init_IRQ, 130 .init_IRQ = __xen_init_IRQ,
127 .save_fl = xen_save_fl, 131
128 .restore_fl = xen_restore_fl, 132 .save_fl = PV_CALLEE_SAVE(xen_save_fl),
129 .irq_disable = xen_irq_disable, 133 .restore_fl = PV_CALLEE_SAVE(xen_restore_fl),
130 .irq_enable = xen_irq_enable, 134 .irq_disable = PV_CALLEE_SAVE(xen_irq_disable),
135 .irq_enable = PV_CALLEE_SAVE(xen_irq_enable),
136
131 .safe_halt = xen_safe_halt, 137 .safe_halt = xen_safe_halt,
132 .halt = xen_halt, 138 .halt = xen_halt,
133#ifdef CONFIG_X86_64 139#ifdef CONFIG_X86_64