diff options
Diffstat (limited to 'arch/x86/include/asm/calling.h')
-rw-r--r-- | arch/x86/include/asm/calling.h | 130 |
1 files changed, 52 insertions, 78 deletions
diff --git a/arch/x86/include/asm/calling.h b/arch/x86/include/asm/calling.h index 30af5a832163..a9e3a740f697 100644 --- a/arch/x86/include/asm/calling.h +++ b/arch/x86/include/asm/calling.h | |||
@@ -46,6 +46,7 @@ For 32-bit we have the following conventions - kernel is built with | |||
46 | 46 | ||
47 | */ | 47 | */ |
48 | 48 | ||
49 | #include "dwarf2.h" | ||
49 | 50 | ||
50 | /* | 51 | /* |
51 | * 64-bit system call stack frame layout defines and helpers, for | 52 | * 64-bit system call stack frame layout defines and helpers, for |
@@ -84,72 +85,57 @@ For 32-bit we have the following conventions - kernel is built with | |||
84 | #define ARGOFFSET R11 | 85 | #define ARGOFFSET R11 |
85 | #define SWFRAME ORIG_RAX | 86 | #define SWFRAME ORIG_RAX |
86 | 87 | ||
87 | .macro SAVE_ARGS addskip=0, norcx=0, nor891011=0 | 88 | .macro SAVE_ARGS addskip=0, save_rcx=1, save_r891011=1 |
88 | subq $9*8+\addskip, %rsp | 89 | subq $9*8+\addskip, %rsp |
89 | CFI_ADJUST_CFA_OFFSET 9*8+\addskip | 90 | CFI_ADJUST_CFA_OFFSET 9*8+\addskip |
90 | movq %rdi, 8*8(%rsp) | 91 | movq_cfi rdi, 8*8 |
91 | CFI_REL_OFFSET rdi, 8*8 | 92 | movq_cfi rsi, 7*8 |
92 | movq %rsi, 7*8(%rsp) | 93 | movq_cfi rdx, 6*8 |
93 | CFI_REL_OFFSET rsi, 7*8 | 94 | |
94 | movq %rdx, 6*8(%rsp) | 95 | .if \save_rcx |
95 | CFI_REL_OFFSET rdx, 6*8 | 96 | movq_cfi rcx, 5*8 |
96 | .if \norcx | ||
97 | .else | ||
98 | movq %rcx, 5*8(%rsp) | ||
99 | CFI_REL_OFFSET rcx, 5*8 | ||
100 | .endif | 97 | .endif |
101 | movq %rax, 4*8(%rsp) | 98 | |
102 | CFI_REL_OFFSET rax, 4*8 | 99 | movq_cfi rax, 4*8 |
103 | .if \nor891011 | 100 | |
104 | .else | 101 | .if \save_r891011 |
105 | movq %r8, 3*8(%rsp) | 102 | movq_cfi r8, 3*8 |
106 | CFI_REL_OFFSET r8, 3*8 | 103 | movq_cfi r9, 2*8 |
107 | movq %r9, 2*8(%rsp) | 104 | movq_cfi r10, 1*8 |
108 | CFI_REL_OFFSET r9, 2*8 | 105 | movq_cfi r11, 0*8 |
109 | movq %r10, 1*8(%rsp) | ||
110 | CFI_REL_OFFSET r10, 1*8 | ||
111 | movq %r11, (%rsp) | ||
112 | CFI_REL_OFFSET r11, 0*8 | ||
113 | .endif | 106 | .endif |
107 | |||
114 | .endm | 108 | .endm |
115 | 109 | ||
116 | #define ARG_SKIP (9*8) | 110 | #define ARG_SKIP (9*8) |
117 | 111 | ||
118 | .macro RESTORE_ARGS skiprax=0, addskip=0, skiprcx=0, skipr11=0, \ | 112 | .macro RESTORE_ARGS rstor_rax=1, addskip=0, rstor_rcx=1, rstor_r11=1, \ |
119 | skipr8910=0, skiprdx=0 | 113 | rstor_r8910=1, rstor_rdx=1 |
120 | .if \skipr11 | 114 | .if \rstor_r11 |
121 | .else | 115 | movq_cfi_restore 0*8, r11 |
122 | movq (%rsp), %r11 | ||
123 | CFI_RESTORE r11 | ||
124 | .endif | 116 | .endif |
125 | .if \skipr8910 | 117 | |
126 | .else | 118 | .if \rstor_r8910 |
127 | movq 1*8(%rsp), %r10 | 119 | movq_cfi_restore 1*8, r10 |
128 | CFI_RESTORE r10 | 120 | movq_cfi_restore 2*8, r9 |
129 | movq 2*8(%rsp), %r9 | 121 | movq_cfi_restore 3*8, r8 |
130 | CFI_RESTORE r9 | ||
131 | movq 3*8(%rsp), %r8 | ||
132 | CFI_RESTORE r8 | ||
133 | .endif | 122 | .endif |
134 | .if \skiprax | 123 | |
135 | .else | 124 | .if \rstor_rax |
136 | movq 4*8(%rsp), %rax | 125 | movq_cfi_restore 4*8, rax |
137 | CFI_RESTORE rax | ||
138 | .endif | 126 | .endif |
139 | .if \skiprcx | 127 | |
140 | .else | 128 | .if \rstor_rcx |
141 | movq 5*8(%rsp), %rcx | 129 | movq_cfi_restore 5*8, rcx |
142 | CFI_RESTORE rcx | ||
143 | .endif | 130 | .endif |
144 | .if \skiprdx | 131 | |
145 | .else | 132 | .if \rstor_rdx |
146 | movq 6*8(%rsp), %rdx | 133 | movq_cfi_restore 6*8, rdx |
147 | CFI_RESTORE rdx | ||
148 | .endif | 134 | .endif |
149 | movq 7*8(%rsp), %rsi | 135 | |
150 | CFI_RESTORE rsi | 136 | movq_cfi_restore 7*8, rsi |
151 | movq 8*8(%rsp), %rdi | 137 | movq_cfi_restore 8*8, rdi |
152 | CFI_RESTORE rdi | 138 | |
153 | .if ARG_SKIP+\addskip > 0 | 139 | .if ARG_SKIP+\addskip > 0 |
154 | addq $ARG_SKIP+\addskip, %rsp | 140 | addq $ARG_SKIP+\addskip, %rsp |
155 | CFI_ADJUST_CFA_OFFSET -(ARG_SKIP+\addskip) | 141 | CFI_ADJUST_CFA_OFFSET -(ARG_SKIP+\addskip) |
@@ -176,33 +162,21 @@ For 32-bit we have the following conventions - kernel is built with | |||
176 | .macro SAVE_REST | 162 | .macro SAVE_REST |
177 | subq $REST_SKIP, %rsp | 163 | subq $REST_SKIP, %rsp |
178 | CFI_ADJUST_CFA_OFFSET REST_SKIP | 164 | CFI_ADJUST_CFA_OFFSET REST_SKIP |
179 | movq %rbx, 5*8(%rsp) | 165 | movq_cfi rbx, 5*8 |
180 | CFI_REL_OFFSET rbx, 5*8 | 166 | movq_cfi rbp, 4*8 |
181 | movq %rbp, 4*8(%rsp) | 167 | movq_cfi r12, 3*8 |
182 | CFI_REL_OFFSET rbp, 4*8 | 168 | movq_cfi r13, 2*8 |
183 | movq %r12, 3*8(%rsp) | 169 | movq_cfi r14, 1*8 |
184 | CFI_REL_OFFSET r12, 3*8 | 170 | movq_cfi r15, 0*8 |
185 | movq %r13, 2*8(%rsp) | ||
186 | CFI_REL_OFFSET r13, 2*8 | ||
187 | movq %r14, 1*8(%rsp) | ||
188 | CFI_REL_OFFSET r14, 1*8 | ||
189 | movq %r15, (%rsp) | ||
190 | CFI_REL_OFFSET r15, 0*8 | ||
191 | .endm | 171 | .endm |
192 | 172 | ||
193 | .macro RESTORE_REST | 173 | .macro RESTORE_REST |
194 | movq (%rsp), %r15 | 174 | movq_cfi_restore 0*8, r15 |
195 | CFI_RESTORE r15 | 175 | movq_cfi_restore 1*8, r14 |
196 | movq 1*8(%rsp), %r14 | 176 | movq_cfi_restore 2*8, r13 |
197 | CFI_RESTORE r14 | 177 | movq_cfi_restore 3*8, r12 |
198 | movq 2*8(%rsp), %r13 | 178 | movq_cfi_restore 4*8, rbp |
199 | CFI_RESTORE r13 | 179 | movq_cfi_restore 5*8, rbx |
200 | movq 3*8(%rsp), %r12 | ||
201 | CFI_RESTORE r12 | ||
202 | movq 4*8(%rsp), %rbp | ||
203 | CFI_RESTORE rbp | ||
204 | movq 5*8(%rsp), %rbx | ||
205 | CFI_RESTORE rbx | ||
206 | addq $REST_SKIP, %rsp | 180 | addq $REST_SKIP, %rsp |
207 | CFI_ADJUST_CFA_OFFSET -(REST_SKIP) | 181 | CFI_ADJUST_CFA_OFFSET -(REST_SKIP) |
208 | .endm | 182 | .endm |
@@ -214,7 +188,7 @@ For 32-bit we have the following conventions - kernel is built with | |||
214 | 188 | ||
215 | .macro RESTORE_ALL addskip=0 | 189 | .macro RESTORE_ALL addskip=0 |
216 | RESTORE_REST | 190 | RESTORE_REST |
217 | RESTORE_ARGS 0, \addskip | 191 | RESTORE_ARGS 1, \addskip |
218 | .endm | 192 | .endm |
219 | 193 | ||
220 | .macro icebp | 194 | .macro icebp |