aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/xen/xen-asm.S
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86/xen/xen-asm.S')
-rw-r--r--arch/x86/xen/xen-asm.S78
1 files changed, 40 insertions, 38 deletions
diff --git a/arch/x86/xen/xen-asm.S b/arch/x86/xen/xen-asm.S
index 4c6f96799131..79d7362ad6d1 100644
--- a/arch/x86/xen/xen-asm.S
+++ b/arch/x86/xen/xen-asm.S
@@ -1,14 +1,14 @@
1/* 1/*
2 Asm versions of Xen pv-ops, suitable for either direct use or inlining. 2 * Asm versions of Xen pv-ops, suitable for either direct use or
3 The inline versions are the same as the direct-use versions, with the 3 * inlining. The inline versions are the same as the direct-use
4 pre- and post-amble chopped off. 4 * versions, with the pre- and post-amble chopped off.
5 5 *
6 This code is encoded for size rather than absolute efficiency, 6 * This code is encoded for size rather than absolute efficiency, with
7 with a view to being able to inline as much as possible. 7 * a view to being able to inline as much as possible.
8 8 *
9 We only bother with direct forms (ie, vcpu in percpu data) of 9 * We only bother with direct forms (ie, vcpu in percpu data) of the
10 the operations here; the indirect forms are better handled in 10 * operations here; the indirect forms are better handled in C, since
11 C, since they're generally too large to inline anyway. 11 * they're generally too large to inline anyway.
12 */ 12 */
13 13
14#include <asm/asm-offsets.h> 14#include <asm/asm-offsets.h>
@@ -18,17 +18,19 @@
18#include "xen-asm.h" 18#include "xen-asm.h"
19 19
20/* 20/*
21 Enable events. This clears the event mask and tests the pending 21 * Enable events. This clears the event mask and tests the pending
22 event status with one and operation. If there are pending 22 * event status with one and operation. If there are pending events,
23 events, then enter the hypervisor to get them handled. 23 * then enter the hypervisor to get them handled.
24 */ 24 */
25ENTRY(xen_irq_enable_direct) 25ENTRY(xen_irq_enable_direct)
26 /* Unmask events */ 26 /* Unmask events */
27 movb $0, PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_mask 27 movb $0, PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_mask
28 28
29 /* Preempt here doesn't matter because that will deal with 29 /*
30 any pending interrupts. The pending check may end up being 30 * Preempt here doesn't matter because that will deal with any
31 run on the wrong CPU, but that doesn't hurt. */ 31 * pending interrupts. The pending check may end up being run
32 * on the wrong CPU, but that doesn't hurt.
33 */
32 34
33 /* Test for pending */ 35 /* Test for pending */
34 testb $0xff, PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_pending 36 testb $0xff, PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_pending
@@ -43,8 +45,8 @@ ENDPATCH(xen_irq_enable_direct)
43 45
44 46
45/* 47/*
46 Disabling events is simply a matter of making the event mask 48 * Disabling events is simply a matter of making the event mask
47 non-zero. 49 * non-zero.
48 */ 50 */
49ENTRY(xen_irq_disable_direct) 51ENTRY(xen_irq_disable_direct)
50 movb $1, PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_mask 52 movb $1, PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_mask
@@ -54,18 +56,18 @@ ENDPATCH(xen_irq_disable_direct)
54 RELOC(xen_irq_disable_direct, 0) 56 RELOC(xen_irq_disable_direct, 0)
55 57
56/* 58/*
57 (xen_)save_fl is used to get the current interrupt enable status. 59 * (xen_)save_fl is used to get the current interrupt enable status.
58 Callers expect the status to be in X86_EFLAGS_IF, and other bits 60 * Callers expect the status to be in X86_EFLAGS_IF, and other bits
59 may be set in the return value. We take advantage of this by 61 * may be set in the return value. We take advantage of this by
60 making sure that X86_EFLAGS_IF has the right value (and other bits 62 * making sure that X86_EFLAGS_IF has the right value (and other bits
61 in that byte are 0), but other bits in the return value are 63 * in that byte are 0), but other bits in the return value are
62 undefined. We need to toggle the state of the bit, because 64 * undefined. We need to toggle the state of the bit, because Xen and
63 Xen and x86 use opposite senses (mask vs enable). 65 * x86 use opposite senses (mask vs enable).
64 */ 66 */
65ENTRY(xen_save_fl_direct) 67ENTRY(xen_save_fl_direct)
66 testb $0xff, PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_mask 68 testb $0xff, PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_mask
67 setz %ah 69 setz %ah
68 addb %ah,%ah 70 addb %ah, %ah
69ENDPATCH(xen_save_fl_direct) 71ENDPATCH(xen_save_fl_direct)
70 ret 72 ret
71 ENDPROC(xen_save_fl_direct) 73 ENDPROC(xen_save_fl_direct)
@@ -73,12 +75,11 @@ ENDPATCH(xen_save_fl_direct)
73 75
74 76
75/* 77/*
76 In principle the caller should be passing us a value return 78 * In principle the caller should be passing us a value return from
77 from xen_save_fl_direct, but for robustness sake we test only 79 * xen_save_fl_direct, but for robustness sake we test only the
78 the X86_EFLAGS_IF flag rather than the whole byte. After 80 * X86_EFLAGS_IF flag rather than the whole byte. After setting the
79 setting the interrupt mask state, it checks for unmasked 81 * interrupt mask state, it checks for unmasked pending events and
80 pending events and enters the hypervisor to get them delivered 82 * enters the hypervisor to get them delivered if so.
81 if so.
82 */ 83 */
83ENTRY(xen_restore_fl_direct) 84ENTRY(xen_restore_fl_direct)
84#ifdef CONFIG_X86_64 85#ifdef CONFIG_X86_64
@@ -87,9 +88,11 @@ ENTRY(xen_restore_fl_direct)
87 testb $X86_EFLAGS_IF>>8, %ah 88 testb $X86_EFLAGS_IF>>8, %ah
88#endif 89#endif
89 setz PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_mask 90 setz PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_mask
90 /* Preempt here doesn't matter because that will deal with 91 /*
91 any pending interrupts. The pending check may end up being 92 * Preempt here doesn't matter because that will deal with any
92 run on the wrong CPU, but that doesn't hurt. */ 93 * pending interrupts. The pending check may end up being run
94 * on the wrong CPU, but that doesn't hurt.
95 */
93 96
94 /* check for unmasked and pending */ 97 /* check for unmasked and pending */
95 cmpw $0x0001, PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_pending 98 cmpw $0x0001, PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_pending
@@ -103,8 +106,8 @@ ENDPATCH(xen_restore_fl_direct)
103 106
104 107
105/* 108/*
106 Force an event check by making a hypercall, 109 * Force an event check by making a hypercall, but preserve regs
107 but preserve regs before making the call. 110 * before making the call.
108 */ 111 */
109check_events: 112check_events:
110#ifdef CONFIG_X86_32 113#ifdef CONFIG_X86_32
@@ -137,4 +140,3 @@ check_events:
137 pop %rax 140 pop %rax
138#endif 141#endif
139 ret 142 ret
140