diff options
author | Jeremy Fitzhardinge <jeremy@goop.org> | 2009-02-02 16:55:42 -0500 |
---|---|---|
committer | H. Peter Anvin <hpa@linux.intel.com> | 2009-02-04 19:59:04 -0500 |
commit | 5393744b71ce797f1b1546fafaed127fc50c2b61 (patch) | |
tree | a974975e907b5e2830721c521965a53c34c10c8b /arch/x86/xen/xen-asm_64.S | |
parent | 383414322b3b3ced0cbc146801e0cc6c60a6c5f4 (diff) |
xen: make direct versions of irq_enable/disable/save/restore to common code
Now that x86-64 has directly accessible percpu variables, it can also
implement the direct versions of these operations, which operate on a
vcpu_info structure directly embedded in the percpu area.
In fact, the 64-bit versions are more or less identical, and so can be
shared. The only two differences are:
1. xen_restore_fl_direct takes its argument in eax on 32-bit, and rdi on 64-bit.
Unfortunately it isn't possible to directly refer to the 2nd lsb of rdi directly
(as you can with %ah), so the code isn't quite as dense.
2. check_events needs to variants to save different registers.
Signed-off-by: Jeremy Fitzhardinge <jeremy.fitzhardinge@citrix.com>
Signed-off-by: H. Peter Anvin <hpa@linux.intel.com>
Diffstat (limited to 'arch/x86/xen/xen-asm_64.S')
-rw-r--r-- | arch/x86/xen/xen-asm_64.S | 134 |
1 files changed, 3 insertions, 131 deletions
diff --git a/arch/x86/xen/xen-asm_64.S b/arch/x86/xen/xen-asm_64.S index d6fc51f4ce85..d205a283efe0 100644 --- a/arch/x86/xen/xen-asm_64.S +++ b/arch/x86/xen/xen-asm_64.S | |||
@@ -11,142 +11,14 @@ | |||
11 | generally too large to inline anyway. | 11 | generally too large to inline anyway. |
12 | */ | 12 | */ |
13 | 13 | ||
14 | #include <linux/linkage.h> | ||
15 | |||
16 | #include <asm/asm-offsets.h> | ||
17 | #include <asm/processor-flags.h> | ||
18 | #include <asm/errno.h> | 14 | #include <asm/errno.h> |
19 | #include <asm/segment.h> | ||
20 | #include <asm/percpu.h> | 15 | #include <asm/percpu.h> |
16 | #include <asm/processor-flags.h> | ||
17 | #include <asm/segment.h> | ||
21 | 18 | ||
22 | #include <xen/interface/xen.h> | 19 | #include <xen/interface/xen.h> |
23 | 20 | ||
24 | #define RELOC(x, v) .globl x##_reloc; x##_reloc=v | 21 | #include "xen-asm.h" |
25 | #define ENDPATCH(x) .globl x##_end; x##_end=. | ||
26 | |||
27 | /* Pseudo-flag used for virtual NMI, which we don't implement yet */ | ||
28 | #define XEN_EFLAGS_NMI 0x80000000 | ||
29 | |||
30 | #if 1 | ||
31 | /* | ||
32 | FIXME: x86_64 now can support direct access to percpu variables | ||
33 | via a segment override. Update xen accordingly. | ||
34 | */ | ||
35 | #define BUG ud2a | ||
36 | #endif | ||
37 | |||
38 | /* | ||
39 | Enable events. This clears the event mask and tests the pending | ||
40 | event status with one and operation. If there are pending | ||
41 | events, then enter the hypervisor to get them handled. | ||
42 | */ | ||
43 | ENTRY(xen_irq_enable_direct) | ||
44 | BUG | ||
45 | |||
46 | /* Unmask events */ | ||
47 | movb $0, PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_mask | ||
48 | |||
49 | /* Preempt here doesn't matter because that will deal with | ||
50 | any pending interrupts. The pending check may end up being | ||
51 | run on the wrong CPU, but that doesn't hurt. */ | ||
52 | |||
53 | /* Test for pending */ | ||
54 | testb $0xff, PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_pending | ||
55 | jz 1f | ||
56 | |||
57 | 2: call check_events | ||
58 | 1: | ||
59 | ENDPATCH(xen_irq_enable_direct) | ||
60 | ret | ||
61 | ENDPROC(xen_irq_enable_direct) | ||
62 | RELOC(xen_irq_enable_direct, 2b+1) | ||
63 | |||
64 | /* | ||
65 | Disabling events is simply a matter of making the event mask | ||
66 | non-zero. | ||
67 | */ | ||
68 | ENTRY(xen_irq_disable_direct) | ||
69 | BUG | ||
70 | |||
71 | movb $1, PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_mask | ||
72 | ENDPATCH(xen_irq_disable_direct) | ||
73 | ret | ||
74 | ENDPROC(xen_irq_disable_direct) | ||
75 | RELOC(xen_irq_disable_direct, 0) | ||
76 | |||
77 | /* | ||
78 | (xen_)save_fl is used to get the current interrupt enable status. | ||
79 | Callers expect the status to be in X86_EFLAGS_IF, and other bits | ||
80 | may be set in the return value. We take advantage of this by | ||
81 | making sure that X86_EFLAGS_IF has the right value (and other bits | ||
82 | in that byte are 0), but other bits in the return value are | ||
83 | undefined. We need to toggle the state of the bit, because | ||
84 | Xen and x86 use opposite senses (mask vs enable). | ||
85 | */ | ||
86 | ENTRY(xen_save_fl_direct) | ||
87 | BUG | ||
88 | |||
89 | testb $0xff, PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_mask | ||
90 | setz %ah | ||
91 | addb %ah,%ah | ||
92 | ENDPATCH(xen_save_fl_direct) | ||
93 | ret | ||
94 | ENDPROC(xen_save_fl_direct) | ||
95 | RELOC(xen_save_fl_direct, 0) | ||
96 | |||
97 | /* | ||
98 | In principle the caller should be passing us a value return | ||
99 | from xen_save_fl_direct, but for robustness sake we test only | ||
100 | the X86_EFLAGS_IF flag rather than the whole byte. After | ||
101 | setting the interrupt mask state, it checks for unmasked | ||
102 | pending events and enters the hypervisor to get them delivered | ||
103 | if so. | ||
104 | */ | ||
105 | ENTRY(xen_restore_fl_direct) | ||
106 | BUG | ||
107 | |||
108 | testb $X86_EFLAGS_IF>>8, %ah | ||
109 | setz PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_mask | ||
110 | /* Preempt here doesn't matter because that will deal with | ||
111 | any pending interrupts. The pending check may end up being | ||
112 | run on the wrong CPU, but that doesn't hurt. */ | ||
113 | |||
114 | /* check for unmasked and pending */ | ||
115 | cmpw $0x0001, PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_pending | ||
116 | jz 1f | ||
117 | 2: call check_events | ||
118 | 1: | ||
119 | ENDPATCH(xen_restore_fl_direct) | ||
120 | ret | ||
121 | ENDPROC(xen_restore_fl_direct) | ||
122 | RELOC(xen_restore_fl_direct, 2b+1) | ||
123 | |||
124 | |||
125 | /* | ||
126 | Force an event check by making a hypercall, | ||
127 | but preserve regs before making the call. | ||
128 | */ | ||
129 | check_events: | ||
130 | push %rax | ||
131 | push %rcx | ||
132 | push %rdx | ||
133 | push %rsi | ||
134 | push %rdi | ||
135 | push %r8 | ||
136 | push %r9 | ||
137 | push %r10 | ||
138 | push %r11 | ||
139 | call xen_force_evtchn_callback | ||
140 | pop %r11 | ||
141 | pop %r10 | ||
142 | pop %r9 | ||
143 | pop %r8 | ||
144 | pop %rdi | ||
145 | pop %rsi | ||
146 | pop %rdx | ||
147 | pop %rcx | ||
148 | pop %rax | ||
149 | ret | ||
150 | 22 | ||
151 | ENTRY(xen_adjust_exception_frame) | 23 | ENTRY(xen_adjust_exception_frame) |
152 | mov 8+0(%rsp),%rcx | 24 | mov 8+0(%rsp),%rcx |