diff options
author | Tejun Heo <tj@kernel.org> | 2009-02-05 10:57:48 -0500 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2009-02-05 14:25:41 -0500 |
commit | 130ace11a9dc682541336d1fe5cb3bc7771a149e (patch) | |
tree | a4913a03cd9e3b34b875aa919f26c1d8ec269dc5 /arch/x86/xen/xen-asm_64.S | |
parent | 69b745ff91836dfc25b26d7be0ead02a6fc0286e (diff) |
x86: style cleanups for xen assemblies
Make the following style cleanups:
* drop unnecessary //#include from xen-asm_32.S
* compulsive adding of space after comma
* reformat multiline comments
Signed-off-by: Tejun Heo <tj@kernel.org>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch/x86/xen/xen-asm_64.S')
-rw-r--r-- | arch/x86/xen/xen-asm_64.S | 107 |
1 files changed, 55 insertions, 52 deletions
diff --git a/arch/x86/xen/xen-asm_64.S b/arch/x86/xen/xen-asm_64.S index d205a283efe0..02f496a8dbaa 100644 --- a/arch/x86/xen/xen-asm_64.S +++ b/arch/x86/xen/xen-asm_64.S | |||
@@ -1,14 +1,14 @@ | |||
1 | /* | 1 | /* |
2 | Asm versions of Xen pv-ops, suitable for either direct use or inlining. | 2 | * Asm versions of Xen pv-ops, suitable for either direct use or |
3 | The inline versions are the same as the direct-use versions, with the | 3 | * inlining. The inline versions are the same as the direct-use |
4 | pre- and post-amble chopped off. | 4 | * versions, with the pre- and post-amble chopped off. |
5 | 5 | * | |
6 | This code is encoded for size rather than absolute efficiency, | 6 | * This code is encoded for size rather than absolute efficiency, with |
7 | with a view to being able to inline as much as possible. | 7 | * a view to being able to inline as much as possible. |
8 | 8 | * | |
9 | We only bother with direct forms (ie, vcpu in pda) of the operations | 9 | * We only bother with direct forms (ie, vcpu in pda) of the |
10 | here; the indirect forms are better handled in C, since they're | 10 | * operations here; the indirect forms are better handled in C, since |
11 | generally too large to inline anyway. | 11 | * they're generally too large to inline anyway. |
12 | */ | 12 | */ |
13 | 13 | ||
14 | #include <asm/errno.h> | 14 | #include <asm/errno.h> |
@@ -21,25 +21,25 @@ | |||
21 | #include "xen-asm.h" | 21 | #include "xen-asm.h" |
22 | 22 | ||
23 | ENTRY(xen_adjust_exception_frame) | 23 | ENTRY(xen_adjust_exception_frame) |
24 | mov 8+0(%rsp),%rcx | 24 | mov 8+0(%rsp), %rcx |
25 | mov 8+8(%rsp),%r11 | 25 | mov 8+8(%rsp), %r11 |
26 | ret $16 | 26 | ret $16 |
27 | 27 | ||
28 | hypercall_iret = hypercall_page + __HYPERVISOR_iret * 32 | 28 | hypercall_iret = hypercall_page + __HYPERVISOR_iret * 32 |
29 | /* | 29 | /* |
30 | Xen64 iret frame: | 30 | * Xen64 iret frame: |
31 | 31 | * | |
32 | ss | 32 | * ss |
33 | rsp | 33 | * rsp |
34 | rflags | 34 | * rflags |
35 | cs | 35 | * cs |
36 | rip <-- standard iret frame | 36 | * rip <-- standard iret frame |
37 | 37 | * | |
38 | flags | 38 | * flags |
39 | 39 | * | |
40 | rcx } | 40 | * rcx } |
41 | r11 }<-- pushed by hypercall page | 41 | * r11 }<-- pushed by hypercall page |
42 | rsp -> rax } | 42 | * rsp->rax } |
43 | */ | 43 | */ |
44 | ENTRY(xen_iret) | 44 | ENTRY(xen_iret) |
45 | pushq $0 | 45 | pushq $0 |
@@ -48,8 +48,8 @@ ENDPATCH(xen_iret) | |||
48 | RELOC(xen_iret, 1b+1) | 48 | RELOC(xen_iret, 1b+1) |
49 | 49 | ||
50 | /* | 50 | /* |
51 | sysexit is not used for 64-bit processes, so it's | 51 | * sysexit is not used for 64-bit processes, so it's only ever used to |
52 | only ever used to return to 32-bit compat userspace. | 52 | * return to 32-bit compat userspace. |
53 | */ | 53 | */ |
54 | ENTRY(xen_sysexit) | 54 | ENTRY(xen_sysexit) |
55 | pushq $__USER32_DS | 55 | pushq $__USER32_DS |
@@ -64,10 +64,12 @@ ENDPATCH(xen_sysexit) | |||
64 | RELOC(xen_sysexit, 1b+1) | 64 | RELOC(xen_sysexit, 1b+1) |
65 | 65 | ||
66 | ENTRY(xen_sysret64) | 66 | ENTRY(xen_sysret64) |
67 | /* We're already on the usermode stack at this point, but still | 67 | /* |
68 | with the kernel gs, so we can easily switch back */ | 68 | * We're already on the usermode stack at this point, but |
69 | * still with the kernel gs, so we can easily switch back | ||
70 | */ | ||
69 | movq %rsp, PER_CPU_VAR(old_rsp) | 71 | movq %rsp, PER_CPU_VAR(old_rsp) |
70 | movq PER_CPU_VAR(kernel_stack),%rsp | 72 | movq PER_CPU_VAR(kernel_stack), %rsp |
71 | 73 | ||
72 | pushq $__USER_DS | 74 | pushq $__USER_DS |
73 | pushq PER_CPU_VAR(old_rsp) | 75 | pushq PER_CPU_VAR(old_rsp) |
@@ -81,8 +83,10 @@ ENDPATCH(xen_sysret64) | |||
81 | RELOC(xen_sysret64, 1b+1) | 83 | RELOC(xen_sysret64, 1b+1) |
82 | 84 | ||
83 | ENTRY(xen_sysret32) | 85 | ENTRY(xen_sysret32) |
84 | /* We're already on the usermode stack at this point, but still | 86 | /* |
85 | with the kernel gs, so we can easily switch back */ | 87 | * We're already on the usermode stack at this point, but |
88 | * still with the kernel gs, so we can easily switch back | ||
89 | */ | ||
86 | movq %rsp, PER_CPU_VAR(old_rsp) | 90 | movq %rsp, PER_CPU_VAR(old_rsp) |
87 | movq PER_CPU_VAR(kernel_stack), %rsp | 91 | movq PER_CPU_VAR(kernel_stack), %rsp |
88 | 92 | ||
@@ -98,28 +102,27 @@ ENDPATCH(xen_sysret32) | |||
98 | RELOC(xen_sysret32, 1b+1) | 102 | RELOC(xen_sysret32, 1b+1) |
99 | 103 | ||
100 | /* | 104 | /* |
101 | Xen handles syscall callbacks much like ordinary exceptions, | 105 | * Xen handles syscall callbacks much like ordinary exceptions, which |
102 | which means we have: | 106 | * means we have: |
103 | - kernel gs | 107 | * - kernel gs |
104 | - kernel rsp | 108 | * - kernel rsp |
105 | - an iret-like stack frame on the stack (including rcx and r11): | 109 | * - an iret-like stack frame on the stack (including rcx and r11): |
106 | ss | 110 | * ss |
107 | rsp | 111 | * rsp |
108 | rflags | 112 | * rflags |
109 | cs | 113 | * cs |
110 | rip | 114 | * rip |
111 | r11 | 115 | * r11 |
112 | rsp-> rcx | 116 | * rsp->rcx |
113 | 117 | * | |
114 | In all the entrypoints, we undo all that to make it look | 118 | * In all the entrypoints, we undo all that to make it look like a |
115 | like a CPU-generated syscall/sysenter and jump to the normal | 119 | * CPU-generated syscall/sysenter and jump to the normal entrypoint. |
116 | entrypoint. | ||
117 | */ | 120 | */ |
118 | 121 | ||
119 | .macro undo_xen_syscall | 122 | .macro undo_xen_syscall |
120 | mov 0*8(%rsp),%rcx | 123 | mov 0*8(%rsp), %rcx |
121 | mov 1*8(%rsp),%r11 | 124 | mov 1*8(%rsp), %r11 |
122 | mov 5*8(%rsp),%rsp | 125 | mov 5*8(%rsp), %rsp |
123 | .endm | 126 | .endm |
124 | 127 | ||
125 | /* Normal 64-bit system call target */ | 128 | /* Normal 64-bit system call target */ |
@@ -146,7 +149,7 @@ ENDPROC(xen_sysenter_target) | |||
146 | 149 | ||
147 | ENTRY(xen_syscall32_target) | 150 | ENTRY(xen_syscall32_target) |
148 | ENTRY(xen_sysenter_target) | 151 | ENTRY(xen_sysenter_target) |
149 | lea 16(%rsp), %rsp /* strip %rcx,%r11 */ | 152 | lea 16(%rsp), %rsp /* strip %rcx, %r11 */ |
150 | mov $-ENOSYS, %rax | 153 | mov $-ENOSYS, %rax |
151 | pushq $VGCF_in_syscall | 154 | pushq $VGCF_in_syscall |
152 | jmp hypercall_iret | 155 | jmp hypercall_iret |