diff options
Diffstat (limited to 'arch/x86/xen/xen-asm.S')
-rw-r--r-- | arch/x86/xen/xen-asm.S | 142 |
1 files changed, 142 insertions, 0 deletions
diff --git a/arch/x86/xen/xen-asm.S b/arch/x86/xen/xen-asm.S new file mode 100644 index 000000000000..79d7362ad6d1 --- /dev/null +++ b/arch/x86/xen/xen-asm.S | |||
@@ -0,0 +1,142 @@ | |||
1 | /* | ||
2 | * Asm versions of Xen pv-ops, suitable for either direct use or | ||
3 | * inlining. The inline versions are the same as the direct-use | ||
4 | * versions, with the pre- and post-amble chopped off. | ||
5 | * | ||
6 | * This code is encoded for size rather than absolute efficiency, with | ||
7 | * a view to being able to inline as much as possible. | ||
8 | * | ||
9 | * We only bother with direct forms (ie, vcpu in percpu data) of the | ||
10 | * operations here; the indirect forms are better handled in C, since | ||
11 | * they're generally too large to inline anyway. | ||
12 | */ | ||
13 | |||
14 | #include <asm/asm-offsets.h> | ||
15 | #include <asm/percpu.h> | ||
16 | #include <asm/processor-flags.h> | ||
17 | |||
18 | #include "xen-asm.h" | ||
19 | |||
20 | /* | ||
21 | * Enable events. This clears the event mask and tests the pending | ||
22 | * event status with one and operation. If there are pending events, | ||
23 | * then enter the hypervisor to get them handled. | ||
24 | */ | ||
25 | ENTRY(xen_irq_enable_direct) | ||
26 | /* Unmask events */ | ||
27 | movb $0, PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_mask | ||
28 | |||
29 | /* | ||
30 | * Preempt here doesn't matter because that will deal with any | ||
31 | * pending interrupts. The pending check may end up being run | ||
32 | * on the wrong CPU, but that doesn't hurt. | ||
33 | */ | ||
34 | |||
35 | /* Test for pending */ | ||
36 | testb $0xff, PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_pending | ||
37 | jz 1f | ||
38 | |||
39 | 2: call check_events | ||
40 | 1: | ||
41 | ENDPATCH(xen_irq_enable_direct) | ||
42 | ret | ||
43 | ENDPROC(xen_irq_enable_direct) | ||
44 | RELOC(xen_irq_enable_direct, 2b+1) | ||
45 | |||
46 | |||
47 | /* | ||
48 | * Disabling events is simply a matter of making the event mask | ||
49 | * non-zero. | ||
50 | */ | ||
51 | ENTRY(xen_irq_disable_direct) | ||
52 | movb $1, PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_mask | ||
53 | ENDPATCH(xen_irq_disable_direct) | ||
54 | ret | ||
55 | ENDPROC(xen_irq_disable_direct) | ||
56 | RELOC(xen_irq_disable_direct, 0) | ||
57 | |||
58 | /* | ||
59 | * (xen_)save_fl is used to get the current interrupt enable status. | ||
60 | * Callers expect the status to be in X86_EFLAGS_IF, and other bits | ||
61 | * may be set in the return value. We take advantage of this by | ||
62 | * making sure that X86_EFLAGS_IF has the right value (and other bits | ||
63 | * in that byte are 0), but other bits in the return value are | ||
64 | * undefined. We need to toggle the state of the bit, because Xen and | ||
65 | * x86 use opposite senses (mask vs enable). | ||
66 | */ | ||
67 | ENTRY(xen_save_fl_direct) | ||
68 | testb $0xff, PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_mask | ||
69 | setz %ah | ||
70 | addb %ah, %ah | ||
71 | ENDPATCH(xen_save_fl_direct) | ||
72 | ret | ||
73 | ENDPROC(xen_save_fl_direct) | ||
74 | RELOC(xen_save_fl_direct, 0) | ||
75 | |||
76 | |||
77 | /* | ||
78 | * In principle the caller should be passing us a value return from | ||
79 | * xen_save_fl_direct, but for robustness sake we test only the | ||
80 | * X86_EFLAGS_IF flag rather than the whole byte. After setting the | ||
81 | * interrupt mask state, it checks for unmasked pending events and | ||
82 | * enters the hypervisor to get them delivered if so. | ||
83 | */ | ||
84 | ENTRY(xen_restore_fl_direct) | ||
85 | #ifdef CONFIG_X86_64 | ||
86 | testw $X86_EFLAGS_IF, %di | ||
87 | #else | ||
88 | testb $X86_EFLAGS_IF>>8, %ah | ||
89 | #endif | ||
90 | setz PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_mask | ||
91 | /* | ||
92 | * Preempt here doesn't matter because that will deal with any | ||
93 | * pending interrupts. The pending check may end up being run | ||
94 | * on the wrong CPU, but that doesn't hurt. | ||
95 | */ | ||
96 | |||
97 | /* check for unmasked and pending */ | ||
98 | cmpw $0x0001, PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_pending | ||
99 | jz 1f | ||
100 | 2: call check_events | ||
101 | 1: | ||
102 | ENDPATCH(xen_restore_fl_direct) | ||
103 | ret | ||
104 | ENDPROC(xen_restore_fl_direct) | ||
105 | RELOC(xen_restore_fl_direct, 2b+1) | ||
106 | |||
107 | |||
108 | /* | ||
109 | * Force an event check by making a hypercall, but preserve regs | ||
110 | * before making the call. | ||
111 | */ | ||
112 | check_events: | ||
113 | #ifdef CONFIG_X86_32 | ||
114 | push %eax | ||
115 | push %ecx | ||
116 | push %edx | ||
117 | call xen_force_evtchn_callback | ||
118 | pop %edx | ||
119 | pop %ecx | ||
120 | pop %eax | ||
121 | #else | ||
122 | push %rax | ||
123 | push %rcx | ||
124 | push %rdx | ||
125 | push %rsi | ||
126 | push %rdi | ||
127 | push %r8 | ||
128 | push %r9 | ||
129 | push %r10 | ||
130 | push %r11 | ||
131 | call xen_force_evtchn_callback | ||
132 | pop %r11 | ||
133 | pop %r10 | ||
134 | pop %r9 | ||
135 | pop %r8 | ||
136 | pop %rdi | ||
137 | pop %rsi | ||
138 | pop %rdx | ||
139 | pop %rcx | ||
140 | pop %rax | ||
141 | #endif | ||
142 | ret | ||