aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/xen
diff options
context:
space:
mode:
authorJeremy Fitzhardinge <jeremy@goop.org>2009-02-02 16:55:42 -0500
committerH. Peter Anvin <hpa@linux.intel.com>2009-02-04 19:59:04 -0500
commit5393744b71ce797f1b1546fafaed127fc50c2b61 (patch)
treea974975e907b5e2830721c521965a53c34c10c8b /arch/x86/xen
parent383414322b3b3ced0cbc146801e0cc6c60a6c5f4 (diff)
xen: make direct versions of irq_enable/disable/save/restore to common code
Now that x86-64 has directly accessible percpu variables, it can also implement the direct versions of these operations, which operate on a vcpu_info structure directly embedded in the percpu area. In fact, the 64-bit versions are more or less identical, and so can be shared. The only two differences are: 1. xen_restore_fl_direct takes its argument in eax on 32-bit, and rdi on 64-bit. Unfortunately it isn't possible to directly refer to the 2nd lsb of rdi directly (as you can with %ah), so the code isn't quite as dense. 2. check_events needs to variants to save different registers. Signed-off-by: Jeremy Fitzhardinge <jeremy.fitzhardinge@citrix.com> Signed-off-by: H. Peter Anvin <hpa@linux.intel.com>
Diffstat (limited to 'arch/x86/xen')
-rw-r--r--arch/x86/xen/Makefile3
-rw-r--r--arch/x86/xen/xen-asm.S140
-rw-r--r--arch/x86/xen/xen-asm.h12
-rw-r--r--arch/x86/xen/xen-asm_32.S111
-rw-r--r--arch/x86/xen/xen-asm_64.S134
5 files changed, 169 insertions, 231 deletions
diff --git a/arch/x86/xen/Makefile b/arch/x86/xen/Makefile
index 6dcefba7836f..3b767d03fd6a 100644
--- a/arch/x86/xen/Makefile
+++ b/arch/x86/xen/Makefile
@@ -6,7 +6,8 @@ CFLAGS_REMOVE_irq.o = -pg
6endif 6endif
7 7
8obj-y := enlighten.o setup.o multicalls.o mmu.o irq.o \ 8obj-y := enlighten.o setup.o multicalls.o mmu.o irq.o \
9 time.o xen-asm_$(BITS).o grant-table.o suspend.o 9 time.o xen-asm.o xen-asm_$(BITS).o \
10 grant-table.o suspend.o
10 11
11obj-$(CONFIG_SMP) += smp.o spinlock.o 12obj-$(CONFIG_SMP) += smp.o spinlock.o
12obj-$(CONFIG_XEN_DEBUG_FS) += debugfs.o \ No newline at end of file 13obj-$(CONFIG_XEN_DEBUG_FS) += debugfs.o \ No newline at end of file
diff --git a/arch/x86/xen/xen-asm.S b/arch/x86/xen/xen-asm.S
new file mode 100644
index 000000000000..4c6f96799131
--- /dev/null
+++ b/arch/x86/xen/xen-asm.S
@@ -0,0 +1,140 @@
1/*
2 Asm versions of Xen pv-ops, suitable for either direct use or inlining.
3 The inline versions are the same as the direct-use versions, with the
4 pre- and post-amble chopped off.
5
6 This code is encoded for size rather than absolute efficiency,
7 with a view to being able to inline as much as possible.
8
9 We only bother with direct forms (ie, vcpu in percpu data) of
10 the operations here; the indirect forms are better handled in
11 C, since they're generally too large to inline anyway.
12 */
13
14#include <asm/asm-offsets.h>
15#include <asm/percpu.h>
16#include <asm/processor-flags.h>
17
18#include "xen-asm.h"
19
20/*
21 Enable events. This clears the event mask and tests the pending
22 event status with one and operation. If there are pending
23 events, then enter the hypervisor to get them handled.
24 */
25ENTRY(xen_irq_enable_direct)
26 /* Unmask events */
27 movb $0, PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_mask
28
29 /* Preempt here doesn't matter because that will deal with
30 any pending interrupts. The pending check may end up being
31 run on the wrong CPU, but that doesn't hurt. */
32
33 /* Test for pending */
34 testb $0xff, PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_pending
35 jz 1f
36
372: call check_events
381:
39ENDPATCH(xen_irq_enable_direct)
40 ret
41 ENDPROC(xen_irq_enable_direct)
42 RELOC(xen_irq_enable_direct, 2b+1)
43
44
45/*
46 Disabling events is simply a matter of making the event mask
47 non-zero.
48 */
49ENTRY(xen_irq_disable_direct)
50 movb $1, PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_mask
51ENDPATCH(xen_irq_disable_direct)
52 ret
53 ENDPROC(xen_irq_disable_direct)
54 RELOC(xen_irq_disable_direct, 0)
55
56/*
57 (xen_)save_fl is used to get the current interrupt enable status.
58 Callers expect the status to be in X86_EFLAGS_IF, and other bits
59 may be set in the return value. We take advantage of this by
60 making sure that X86_EFLAGS_IF has the right value (and other bits
61 in that byte are 0), but other bits in the return value are
62 undefined. We need to toggle the state of the bit, because
63 Xen and x86 use opposite senses (mask vs enable).
64 */
65ENTRY(xen_save_fl_direct)
66 testb $0xff, PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_mask
67 setz %ah
68 addb %ah,%ah
69ENDPATCH(xen_save_fl_direct)
70 ret
71 ENDPROC(xen_save_fl_direct)
72 RELOC(xen_save_fl_direct, 0)
73
74
75/*
76 In principle the caller should be passing us a value return
77 from xen_save_fl_direct, but for robustness sake we test only
78 the X86_EFLAGS_IF flag rather than the whole byte. After
79 setting the interrupt mask state, it checks for unmasked
80 pending events and enters the hypervisor to get them delivered
81 if so.
82 */
83ENTRY(xen_restore_fl_direct)
84#ifdef CONFIG_X86_64
85 testw $X86_EFLAGS_IF, %di
86#else
87 testb $X86_EFLAGS_IF>>8, %ah
88#endif
89 setz PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_mask
90 /* Preempt here doesn't matter because that will deal with
91 any pending interrupts. The pending check may end up being
92 run on the wrong CPU, but that doesn't hurt. */
93
94 /* check for unmasked and pending */
95 cmpw $0x0001, PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_pending
96 jz 1f
972: call check_events
981:
99ENDPATCH(xen_restore_fl_direct)
100 ret
101 ENDPROC(xen_restore_fl_direct)
102 RELOC(xen_restore_fl_direct, 2b+1)
103
104
105/*
106 Force an event check by making a hypercall,
107 but preserve regs before making the call.
108 */
109check_events:
110#ifdef CONFIG_X86_32
111 push %eax
112 push %ecx
113 push %edx
114 call xen_force_evtchn_callback
115 pop %edx
116 pop %ecx
117 pop %eax
118#else
119 push %rax
120 push %rcx
121 push %rdx
122 push %rsi
123 push %rdi
124 push %r8
125 push %r9
126 push %r10
127 push %r11
128 call xen_force_evtchn_callback
129 pop %r11
130 pop %r10
131 pop %r9
132 pop %r8
133 pop %rdi
134 pop %rsi
135 pop %rdx
136 pop %rcx
137 pop %rax
138#endif
139 ret
140
diff --git a/arch/x86/xen/xen-asm.h b/arch/x86/xen/xen-asm.h
new file mode 100644
index 000000000000..465276467a47
--- /dev/null
+++ b/arch/x86/xen/xen-asm.h
@@ -0,0 +1,12 @@
1#ifndef _XEN_XEN_ASM_H
2#define _XEN_XEN_ASM_H
3
4#include <linux/linkage.h>
5
6#define RELOC(x, v) .globl x##_reloc; x##_reloc=v
7#define ENDPATCH(x) .globl x##_end; x##_end=.
8
9/* Pseudo-flag used for virtual NMI, which we don't implement yet */
10#define XEN_EFLAGS_NMI 0x80000000
11
12#endif
diff --git a/arch/x86/xen/xen-asm_32.S b/arch/x86/xen/xen-asm_32.S
index 42786f59d9c0..082d173caaf3 100644
--- a/arch/x86/xen/xen-asm_32.S
+++ b/arch/x86/xen/xen-asm_32.S
@@ -11,101 +11,28 @@
11 generally too large to inline anyway. 11 generally too large to inline anyway.
12 */ 12 */
13 13
14#include <linux/linkage.h> 14//#include <asm/asm-offsets.h>
15
16#include <asm/asm-offsets.h>
17#include <asm/thread_info.h> 15#include <asm/thread_info.h>
18#include <asm/percpu.h>
19#include <asm/processor-flags.h> 16#include <asm/processor-flags.h>
20#include <asm/segment.h> 17#include <asm/segment.h>
21 18
22#include <xen/interface/xen.h> 19#include <xen/interface/xen.h>
23 20
24#define RELOC(x, v) .globl x##_reloc; x##_reloc=v 21#include "xen-asm.h"
25#define ENDPATCH(x) .globl x##_end; x##_end=.
26
27/* Pseudo-flag used for virtual NMI, which we don't implement yet */
28#define XEN_EFLAGS_NMI 0x80000000
29
30/*
31 Enable events. This clears the event mask and tests the pending
32 event status with one and operation. If there are pending
33 events, then enter the hypervisor to get them handled.
34 */
35ENTRY(xen_irq_enable_direct)
36 /* Unmask events */
37 movb $0, PER_CPU_VAR(xen_vcpu_info)+XEN_vcpu_info_mask
38
39 /* Preempt here doesn't matter because that will deal with
40 any pending interrupts. The pending check may end up being
41 run on the wrong CPU, but that doesn't hurt. */
42
43 /* Test for pending */
44 testb $0xff, PER_CPU_VAR(xen_vcpu_info)+XEN_vcpu_info_pending
45 jz 1f
46
472: call check_events
481:
49ENDPATCH(xen_irq_enable_direct)
50 ret
51 ENDPROC(xen_irq_enable_direct)
52 RELOC(xen_irq_enable_direct, 2b+1)
53
54 22
55/* 23/*
56 Disabling events is simply a matter of making the event mask 24 Force an event check by making a hypercall,
57 non-zero. 25 but preserve regs before making the call.
58 */
59ENTRY(xen_irq_disable_direct)
60 movb $1, PER_CPU_VAR(xen_vcpu_info)+XEN_vcpu_info_mask
61ENDPATCH(xen_irq_disable_direct)
62 ret
63 ENDPROC(xen_irq_disable_direct)
64 RELOC(xen_irq_disable_direct, 0)
65
66/*
67 (xen_)save_fl is used to get the current interrupt enable status.
68 Callers expect the status to be in X86_EFLAGS_IF, and other bits
69 may be set in the return value. We take advantage of this by
70 making sure that X86_EFLAGS_IF has the right value (and other bits
71 in that byte are 0), but other bits in the return value are
72 undefined. We need to toggle the state of the bit, because
73 Xen and x86 use opposite senses (mask vs enable).
74 */
75ENTRY(xen_save_fl_direct)
76 testb $0xff, PER_CPU_VAR(xen_vcpu_info)+XEN_vcpu_info_mask
77 setz %ah
78 addb %ah,%ah
79ENDPATCH(xen_save_fl_direct)
80 ret
81 ENDPROC(xen_save_fl_direct)
82 RELOC(xen_save_fl_direct, 0)
83
84
85/*
86 In principle the caller should be passing us a value return
87 from xen_save_fl_direct, but for robustness sake we test only
88 the X86_EFLAGS_IF flag rather than the whole byte. After
89 setting the interrupt mask state, it checks for unmasked
90 pending events and enters the hypervisor to get them delivered
91 if so.
92 */ 26 */
93ENTRY(xen_restore_fl_direct) 27check_events:
94 testb $X86_EFLAGS_IF>>8, %ah 28 push %eax
95 setz PER_CPU_VAR(xen_vcpu_info)+XEN_vcpu_info_mask 29 push %ecx
96 /* Preempt here doesn't matter because that will deal with 30 push %edx
97 any pending interrupts. The pending check may end up being 31 call xen_force_evtchn_callback
98 run on the wrong CPU, but that doesn't hurt. */ 32 pop %edx
99 33 pop %ecx
100 /* check for unmasked and pending */ 34 pop %eax
101 cmpw $0x0001, PER_CPU_VAR(xen_vcpu_info)+XEN_vcpu_info_pending
102 jz 1f
1032: call check_events
1041:
105ENDPATCH(xen_restore_fl_direct)
106 ret 35 ret
107 ENDPROC(xen_restore_fl_direct)
108 RELOC(xen_restore_fl_direct, 2b+1)
109 36
110/* 37/*
111 We can't use sysexit directly, because we're not running in ring0. 38 We can't use sysexit directly, because we're not running in ring0.
@@ -289,17 +216,3 @@ ENTRY(xen_iret_crit_fixup)
289 lea 4(%edi),%esp /* point esp to new frame */ 216 lea 4(%edi),%esp /* point esp to new frame */
2902: jmp xen_do_upcall 2172: jmp xen_do_upcall
291 218
292
293/*
294 Force an event check by making a hypercall,
295 but preserve regs before making the call.
296 */
297check_events:
298 push %eax
299 push %ecx
300 push %edx
301 call xen_force_evtchn_callback
302 pop %edx
303 pop %ecx
304 pop %eax
305 ret
diff --git a/arch/x86/xen/xen-asm_64.S b/arch/x86/xen/xen-asm_64.S
index d6fc51f4ce85..d205a283efe0 100644
--- a/arch/x86/xen/xen-asm_64.S
+++ b/arch/x86/xen/xen-asm_64.S
@@ -11,142 +11,14 @@
11 generally too large to inline anyway. 11 generally too large to inline anyway.
12 */ 12 */
13 13
14#include <linux/linkage.h>
15
16#include <asm/asm-offsets.h>
17#include <asm/processor-flags.h>
18#include <asm/errno.h> 14#include <asm/errno.h>
19#include <asm/segment.h>
20#include <asm/percpu.h> 15#include <asm/percpu.h>
16#include <asm/processor-flags.h>
17#include <asm/segment.h>
21 18
22#include <xen/interface/xen.h> 19#include <xen/interface/xen.h>
23 20
24#define RELOC(x, v) .globl x##_reloc; x##_reloc=v 21#include "xen-asm.h"
25#define ENDPATCH(x) .globl x##_end; x##_end=.
26
27/* Pseudo-flag used for virtual NMI, which we don't implement yet */
28#define XEN_EFLAGS_NMI 0x80000000
29
30#if 1
31/*
32 FIXME: x86_64 now can support direct access to percpu variables
33 via a segment override. Update xen accordingly.
34 */
35#define BUG ud2a
36#endif
37
38/*
39 Enable events. This clears the event mask and tests the pending
40 event status with one and operation. If there are pending
41 events, then enter the hypervisor to get them handled.
42 */
43ENTRY(xen_irq_enable_direct)
44 BUG
45
46 /* Unmask events */
47 movb $0, PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_mask
48
49 /* Preempt here doesn't matter because that will deal with
50 any pending interrupts. The pending check may end up being
51 run on the wrong CPU, but that doesn't hurt. */
52
53 /* Test for pending */
54 testb $0xff, PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_pending
55 jz 1f
56
572: call check_events
581:
59ENDPATCH(xen_irq_enable_direct)
60 ret
61 ENDPROC(xen_irq_enable_direct)
62 RELOC(xen_irq_enable_direct, 2b+1)
63
64/*
65 Disabling events is simply a matter of making the event mask
66 non-zero.
67 */
68ENTRY(xen_irq_disable_direct)
69 BUG
70
71 movb $1, PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_mask
72ENDPATCH(xen_irq_disable_direct)
73 ret
74 ENDPROC(xen_irq_disable_direct)
75 RELOC(xen_irq_disable_direct, 0)
76
77/*
78 (xen_)save_fl is used to get the current interrupt enable status.
79 Callers expect the status to be in X86_EFLAGS_IF, and other bits
80 may be set in the return value. We take advantage of this by
81 making sure that X86_EFLAGS_IF has the right value (and other bits
82 in that byte are 0), but other bits in the return value are
83 undefined. We need to toggle the state of the bit, because
84 Xen and x86 use opposite senses (mask vs enable).
85 */
86ENTRY(xen_save_fl_direct)
87 BUG
88
89 testb $0xff, PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_mask
90 setz %ah
91 addb %ah,%ah
92ENDPATCH(xen_save_fl_direct)
93 ret
94 ENDPROC(xen_save_fl_direct)
95 RELOC(xen_save_fl_direct, 0)
96
97/*
98 In principle the caller should be passing us a value return
99 from xen_save_fl_direct, but for robustness sake we test only
100 the X86_EFLAGS_IF flag rather than the whole byte. After
101 setting the interrupt mask state, it checks for unmasked
102 pending events and enters the hypervisor to get them delivered
103 if so.
104 */
105ENTRY(xen_restore_fl_direct)
106 BUG
107
108 testb $X86_EFLAGS_IF>>8, %ah
109 setz PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_mask
110 /* Preempt here doesn't matter because that will deal with
111 any pending interrupts. The pending check may end up being
112 run on the wrong CPU, but that doesn't hurt. */
113
114 /* check for unmasked and pending */
115 cmpw $0x0001, PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_pending
116 jz 1f
1172: call check_events
1181:
119ENDPATCH(xen_restore_fl_direct)
120 ret
121 ENDPROC(xen_restore_fl_direct)
122 RELOC(xen_restore_fl_direct, 2b+1)
123
124
125/*
126 Force an event check by making a hypercall,
127 but preserve regs before making the call.
128 */
129check_events:
130 push %rax
131 push %rcx
132 push %rdx
133 push %rsi
134 push %rdi
135 push %r8
136 push %r9
137 push %r10
138 push %r11
139 call xen_force_evtchn_callback
140 pop %r11
141 pop %r10
142 pop %r9
143 pop %r8
144 pop %rdi
145 pop %rsi
146 pop %rdx
147 pop %rcx
148 pop %rax
149 ret
150 22
151ENTRY(xen_adjust_exception_frame) 23ENTRY(xen_adjust_exception_frame)
152 mov 8+0(%rsp),%rcx 24 mov 8+0(%rsp),%rcx