diff options
Diffstat (limited to 'arch/x86/xen/xen-asm_64.S')
| -rw-r--r-- | arch/x86/xen/xen-asm_64.S | 147 |
1 files changed, 9 insertions, 138 deletions
diff --git a/arch/x86/xen/xen-asm_64.S b/arch/x86/xen/xen-asm_64.S index 05794c566e87..d205a283efe0 100644 --- a/arch/x86/xen/xen-asm_64.S +++ b/arch/x86/xen/xen-asm_64.S | |||
| @@ -11,143 +11,14 @@ | |||
| 11 | generally too large to inline anyway. | 11 | generally too large to inline anyway. |
| 12 | */ | 12 | */ |
| 13 | 13 | ||
| 14 | #include <linux/linkage.h> | ||
| 15 | |||
| 16 | #include <asm/asm-offsets.h> | ||
| 17 | #include <asm/processor-flags.h> | ||
| 18 | #include <asm/errno.h> | 14 | #include <asm/errno.h> |
| 15 | #include <asm/percpu.h> | ||
| 16 | #include <asm/processor-flags.h> | ||
| 19 | #include <asm/segment.h> | 17 | #include <asm/segment.h> |
| 20 | 18 | ||
| 21 | #include <xen/interface/xen.h> | 19 | #include <xen/interface/xen.h> |
| 22 | 20 | ||
| 23 | #define RELOC(x, v) .globl x##_reloc; x##_reloc=v | 21 | #include "xen-asm.h" |
| 24 | #define ENDPATCH(x) .globl x##_end; x##_end=. | ||
| 25 | |||
| 26 | /* Pseudo-flag used for virtual NMI, which we don't implement yet */ | ||
| 27 | #define XEN_EFLAGS_NMI 0x80000000 | ||
| 28 | |||
| 29 | #if 1 | ||
| 30 | /* | ||
| 31 | x86-64 does not yet support direct access to percpu variables | ||
| 32 | via a segment override, so we just need to make sure this code | ||
| 33 | never gets used | ||
| 34 | */ | ||
| 35 | #define BUG ud2a | ||
| 36 | #define PER_CPU_VAR(var, off) 0xdeadbeef | ||
| 37 | #endif | ||
| 38 | |||
| 39 | /* | ||
| 40 | Enable events. This clears the event mask and tests the pending | ||
| 41 | event status with one and operation. If there are pending | ||
| 42 | events, then enter the hypervisor to get them handled. | ||
| 43 | */ | ||
| 44 | ENTRY(xen_irq_enable_direct) | ||
| 45 | BUG | ||
| 46 | |||
| 47 | /* Unmask events */ | ||
| 48 | movb $0, PER_CPU_VAR(xen_vcpu_info, XEN_vcpu_info_mask) | ||
| 49 | |||
| 50 | /* Preempt here doesn't matter because that will deal with | ||
| 51 | any pending interrupts. The pending check may end up being | ||
| 52 | run on the wrong CPU, but that doesn't hurt. */ | ||
| 53 | |||
| 54 | /* Test for pending */ | ||
| 55 | testb $0xff, PER_CPU_VAR(xen_vcpu_info, XEN_vcpu_info_pending) | ||
| 56 | jz 1f | ||
| 57 | |||
| 58 | 2: call check_events | ||
| 59 | 1: | ||
| 60 | ENDPATCH(xen_irq_enable_direct) | ||
| 61 | ret | ||
| 62 | ENDPROC(xen_irq_enable_direct) | ||
| 63 | RELOC(xen_irq_enable_direct, 2b+1) | ||
| 64 | |||
| 65 | /* | ||
| 66 | Disabling events is simply a matter of making the event mask | ||
| 67 | non-zero. | ||
| 68 | */ | ||
| 69 | ENTRY(xen_irq_disable_direct) | ||
| 70 | BUG | ||
| 71 | |||
| 72 | movb $1, PER_CPU_VAR(xen_vcpu_info, XEN_vcpu_info_mask) | ||
| 73 | ENDPATCH(xen_irq_disable_direct) | ||
| 74 | ret | ||
| 75 | ENDPROC(xen_irq_disable_direct) | ||
| 76 | RELOC(xen_irq_disable_direct, 0) | ||
| 77 | |||
| 78 | /* | ||
| 79 | (xen_)save_fl is used to get the current interrupt enable status. | ||
| 80 | Callers expect the status to be in X86_EFLAGS_IF, and other bits | ||
| 81 | may be set in the return value. We take advantage of this by | ||
| 82 | making sure that X86_EFLAGS_IF has the right value (and other bits | ||
| 83 | in that byte are 0), but other bits in the return value are | ||
| 84 | undefined. We need to toggle the state of the bit, because | ||
| 85 | Xen and x86 use opposite senses (mask vs enable). | ||
| 86 | */ | ||
| 87 | ENTRY(xen_save_fl_direct) | ||
| 88 | BUG | ||
| 89 | |||
| 90 | testb $0xff, PER_CPU_VAR(xen_vcpu_info, XEN_vcpu_info_mask) | ||
| 91 | setz %ah | ||
| 92 | addb %ah,%ah | ||
| 93 | ENDPATCH(xen_save_fl_direct) | ||
| 94 | ret | ||
| 95 | ENDPROC(xen_save_fl_direct) | ||
| 96 | RELOC(xen_save_fl_direct, 0) | ||
| 97 | |||
| 98 | /* | ||
| 99 | In principle the caller should be passing us a value return | ||
| 100 | from xen_save_fl_direct, but for robustness sake we test only | ||
| 101 | the X86_EFLAGS_IF flag rather than the whole byte. After | ||
| 102 | setting the interrupt mask state, it checks for unmasked | ||
| 103 | pending events and enters the hypervisor to get them delivered | ||
| 104 | if so. | ||
| 105 | */ | ||
| 106 | ENTRY(xen_restore_fl_direct) | ||
| 107 | BUG | ||
| 108 | |||
| 109 | testb $X86_EFLAGS_IF>>8, %ah | ||
| 110 | setz PER_CPU_VAR(xen_vcpu_info, XEN_vcpu_info_mask) | ||
| 111 | /* Preempt here doesn't matter because that will deal with | ||
| 112 | any pending interrupts. The pending check may end up being | ||
| 113 | run on the wrong CPU, but that doesn't hurt. */ | ||
| 114 | |||
| 115 | /* check for unmasked and pending */ | ||
| 116 | cmpw $0x0001, PER_CPU_VAR(xen_vcpu_info, XEN_vcpu_info_pending) | ||
| 117 | jz 1f | ||
| 118 | 2: call check_events | ||
| 119 | 1: | ||
| 120 | ENDPATCH(xen_restore_fl_direct) | ||
| 121 | ret | ||
| 122 | ENDPROC(xen_restore_fl_direct) | ||
| 123 | RELOC(xen_restore_fl_direct, 2b+1) | ||
| 124 | |||
| 125 | |||
| 126 | /* | ||
| 127 | Force an event check by making a hypercall, | ||
| 128 | but preserve regs before making the call. | ||
| 129 | */ | ||
| 130 | check_events: | ||
| 131 | push %rax | ||
| 132 | push %rcx | ||
| 133 | push %rdx | ||
| 134 | push %rsi | ||
| 135 | push %rdi | ||
| 136 | push %r8 | ||
| 137 | push %r9 | ||
| 138 | push %r10 | ||
| 139 | push %r11 | ||
| 140 | call xen_force_evtchn_callback | ||
| 141 | pop %r11 | ||
| 142 | pop %r10 | ||
| 143 | pop %r9 | ||
| 144 | pop %r8 | ||
| 145 | pop %rdi | ||
| 146 | pop %rsi | ||
| 147 | pop %rdx | ||
| 148 | pop %rcx | ||
| 149 | pop %rax | ||
| 150 | ret | ||
| 151 | 22 | ||
| 152 | ENTRY(xen_adjust_exception_frame) | 23 | ENTRY(xen_adjust_exception_frame) |
| 153 | mov 8+0(%rsp),%rcx | 24 | mov 8+0(%rsp),%rcx |
| @@ -195,11 +66,11 @@ RELOC(xen_sysexit, 1b+1) | |||
| 195 | ENTRY(xen_sysret64) | 66 | ENTRY(xen_sysret64) |
| 196 | /* We're already on the usermode stack at this point, but still | 67 | /* We're already on the usermode stack at this point, but still |
| 197 | with the kernel gs, so we can easily switch back */ | 68 | with the kernel gs, so we can easily switch back */ |
| 198 | movq %rsp, %gs:pda_oldrsp | 69 | movq %rsp, PER_CPU_VAR(old_rsp) |
| 199 | movq %gs:pda_kernelstack,%rsp | 70 | movq PER_CPU_VAR(kernel_stack),%rsp |
| 200 | 71 | ||
| 201 | pushq $__USER_DS | 72 | pushq $__USER_DS |
| 202 | pushq %gs:pda_oldrsp | 73 | pushq PER_CPU_VAR(old_rsp) |
| 203 | pushq %r11 | 74 | pushq %r11 |
| 204 | pushq $__USER_CS | 75 | pushq $__USER_CS |
| 205 | pushq %rcx | 76 | pushq %rcx |
| @@ -212,11 +83,11 @@ RELOC(xen_sysret64, 1b+1) | |||
| 212 | ENTRY(xen_sysret32) | 83 | ENTRY(xen_sysret32) |
| 213 | /* We're already on the usermode stack at this point, but still | 84 | /* We're already on the usermode stack at this point, but still |
| 214 | with the kernel gs, so we can easily switch back */ | 85 | with the kernel gs, so we can easily switch back */ |
| 215 | movq %rsp, %gs:pda_oldrsp | 86 | movq %rsp, PER_CPU_VAR(old_rsp) |
| 216 | movq %gs:pda_kernelstack, %rsp | 87 | movq PER_CPU_VAR(kernel_stack), %rsp |
| 217 | 88 | ||
| 218 | pushq $__USER32_DS | 89 | pushq $__USER32_DS |
| 219 | pushq %gs:pda_oldrsp | 90 | pushq PER_CPU_VAR(old_rsp) |
| 220 | pushq %r11 | 91 | pushq %r11 |
| 221 | pushq $__USER32_CS | 92 | pushq $__USER32_CS |
| 222 | pushq %rcx | 93 | pushq %rcx |
