aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorBorislav Petkov <bp@alien8.de>2011-05-31 16:21:51 -0400
committerH. Peter Anvin <hpa@linux.intel.com>2011-06-03 17:38:49 -0400
commita268fcfaa6ab2ef740fda5ecf947aca45ccd535d (patch)
tree6a48c776aed0a656085e7a9f5724807c8e12cfdb
parent55922c9d1b84b89cb946c777fddccb3247e7df2c (diff)
x86, asm: Thin down SAVE/RESTORE_* asm macros
Use dwarf2 cfi annotation macros, making SAVE/RESTORE_* marginally more readable. No functionality change. Signed-off-by: Borislav Petkov <bp@alien8.de> Link: http://lkml.kernel.org/r/1306873314-32523-2-git-send-email-bp@alien8.de Signed-off-by: H. Peter Anvin <hpa@linux.intel.com>
-rw-r--r--arch/x86/include/asm/calling.h101
1 files changed, 41 insertions, 60 deletions
diff --git a/arch/x86/include/asm/calling.h b/arch/x86/include/asm/calling.h
index 30af5a832163..b67e06c4710e 100644
--- a/arch/x86/include/asm/calling.h
+++ b/arch/x86/include/asm/calling.h
@@ -46,6 +46,7 @@ For 32-bit we have the following conventions - kernel is built with
46 46
47*/ 47*/
48 48
49#include "dwarf2.h"
49 50
50/* 51/*
51 * 64-bit system call stack frame layout defines and helpers, for 52 * 64-bit system call stack frame layout defines and helpers, for
@@ -87,30 +88,25 @@ For 32-bit we have the following conventions - kernel is built with
87 .macro SAVE_ARGS addskip=0, norcx=0, nor891011=0 88 .macro SAVE_ARGS addskip=0, norcx=0, nor891011=0
88 subq $9*8+\addskip, %rsp 89 subq $9*8+\addskip, %rsp
89 CFI_ADJUST_CFA_OFFSET 9*8+\addskip 90 CFI_ADJUST_CFA_OFFSET 9*8+\addskip
90 movq %rdi, 8*8(%rsp) 91 movq_cfi rdi, 8*8
91 CFI_REL_OFFSET rdi, 8*8 92 movq_cfi rsi, 7*8
92 movq %rsi, 7*8(%rsp) 93 movq_cfi rdx, 6*8
93 CFI_REL_OFFSET rsi, 7*8 94
94 movq %rdx, 6*8(%rsp)
95 CFI_REL_OFFSET rdx, 6*8
96 .if \norcx 95 .if \norcx
97 .else 96 .else
98 movq %rcx, 5*8(%rsp) 97 movq_cfi rcx, 5*8
99 CFI_REL_OFFSET rcx, 5*8
100 .endif 98 .endif
101 movq %rax, 4*8(%rsp) 99
102 CFI_REL_OFFSET rax, 4*8 100 movq_cfi rax, 4*8
101
103 .if \nor891011 102 .if \nor891011
104 .else 103 .else
105 movq %r8, 3*8(%rsp) 104 movq_cfi r8, 3*8
106 CFI_REL_OFFSET r8, 3*8 105 movq_cfi r9, 2*8
107 movq %r9, 2*8(%rsp) 106 movq_cfi r10, 1*8
108 CFI_REL_OFFSET r9, 2*8 107 movq_cfi r11, 0*8
109 movq %r10, 1*8(%rsp)
110 CFI_REL_OFFSET r10, 1*8
111 movq %r11, (%rsp)
112 CFI_REL_OFFSET r11, 0*8
113 .endif 108 .endif
109
114 .endm 110 .endm
115 111
116#define ARG_SKIP (9*8) 112#define ARG_SKIP (9*8)
@@ -119,37 +115,34 @@ For 32-bit we have the following conventions - kernel is built with
119 skipr8910=0, skiprdx=0 115 skipr8910=0, skiprdx=0
120 .if \skipr11 116 .if \skipr11
121 .else 117 .else
122 movq (%rsp), %r11 118 movq_cfi_restore 0*8, r11
123 CFI_RESTORE r11
124 .endif 119 .endif
120
125 .if \skipr8910 121 .if \skipr8910
126 .else 122 .else
127 movq 1*8(%rsp), %r10 123 movq_cfi_restore 1*8, r10
128 CFI_RESTORE r10 124 movq_cfi_restore 2*8, r9
129 movq 2*8(%rsp), %r9 125 movq_cfi_restore 3*8, r8
130 CFI_RESTORE r9
131 movq 3*8(%rsp), %r8
132 CFI_RESTORE r8
133 .endif 126 .endif
127
134 .if \skiprax 128 .if \skiprax
135 .else 129 .else
136 movq 4*8(%rsp), %rax 130 movq_cfi_restore 4*8, rax
137 CFI_RESTORE rax
138 .endif 131 .endif
132
139 .if \skiprcx 133 .if \skiprcx
140 .else 134 .else
141 movq 5*8(%rsp), %rcx 135 movq_cfi_restore 5*8, rcx
142 CFI_RESTORE rcx
143 .endif 136 .endif
137
144 .if \skiprdx 138 .if \skiprdx
145 .else 139 .else
146 movq 6*8(%rsp), %rdx 140 movq_cfi_restore 6*8, rdx
147 CFI_RESTORE rdx
148 .endif 141 .endif
149 movq 7*8(%rsp), %rsi 142
150 CFI_RESTORE rsi 143 movq_cfi_restore 7*8, rsi
151 movq 8*8(%rsp), %rdi 144 movq_cfi_restore 8*8, rdi
152 CFI_RESTORE rdi 145
153 .if ARG_SKIP+\addskip > 0 146 .if ARG_SKIP+\addskip > 0
154 addq $ARG_SKIP+\addskip, %rsp 147 addq $ARG_SKIP+\addskip, %rsp
155 CFI_ADJUST_CFA_OFFSET -(ARG_SKIP+\addskip) 148 CFI_ADJUST_CFA_OFFSET -(ARG_SKIP+\addskip)
@@ -176,33 +169,21 @@ For 32-bit we have the following conventions - kernel is built with
176 .macro SAVE_REST 169 .macro SAVE_REST
177 subq $REST_SKIP, %rsp 170 subq $REST_SKIP, %rsp
178 CFI_ADJUST_CFA_OFFSET REST_SKIP 171 CFI_ADJUST_CFA_OFFSET REST_SKIP
179 movq %rbx, 5*8(%rsp) 172 movq_cfi rbx, 5*8
180 CFI_REL_OFFSET rbx, 5*8 173 movq_cfi rbp, 4*8
181 movq %rbp, 4*8(%rsp) 174 movq_cfi r12, 3*8
182 CFI_REL_OFFSET rbp, 4*8 175 movq_cfi r13, 2*8
183 movq %r12, 3*8(%rsp) 176 movq_cfi r14, 1*8
184 CFI_REL_OFFSET r12, 3*8 177 movq_cfi r15, 0*8
185 movq %r13, 2*8(%rsp)
186 CFI_REL_OFFSET r13, 2*8
187 movq %r14, 1*8(%rsp)
188 CFI_REL_OFFSET r14, 1*8
189 movq %r15, (%rsp)
190 CFI_REL_OFFSET r15, 0*8
191 .endm 178 .endm
192 179
193 .macro RESTORE_REST 180 .macro RESTORE_REST
194 movq (%rsp), %r15 181 movq_cfi_restore 0*8, r15
195 CFI_RESTORE r15 182 movq_cfi_restore 1*8, r14
196 movq 1*8(%rsp), %r14 183 movq_cfi_restore 2*8, r13
197 CFI_RESTORE r14 184 movq_cfi_restore 3*8, r12
198 movq 2*8(%rsp), %r13 185 movq_cfi_restore 4*8, rbp
199 CFI_RESTORE r13 186 movq_cfi_restore 5*8, rbx
200 movq 3*8(%rsp), %r12
201 CFI_RESTORE r12
202 movq 4*8(%rsp), %rbp
203 CFI_RESTORE rbp
204 movq 5*8(%rsp), %rbx
205 CFI_RESTORE rbx
206 addq $REST_SKIP, %rsp 187 addq $REST_SKIP, %rsp
207 CFI_ADJUST_CFA_OFFSET -(REST_SKIP) 188 CFI_ADJUST_CFA_OFFSET -(REST_SKIP)
208 .endm 189 .endm