aboutsummaryrefslogtreecommitdiffstats
path: root/arch/ia64/kernel/relocate_kernel.S
diff options
context:
space:
mode:
Diffstat (limited to 'arch/ia64/kernel/relocate_kernel.S')
-rw-r--r--arch/ia64/kernel/relocate_kernel.S334
1 files changed, 334 insertions, 0 deletions
diff --git a/arch/ia64/kernel/relocate_kernel.S b/arch/ia64/kernel/relocate_kernel.S
new file mode 100644
index 000000000000..ae473e3f2a0d
--- /dev/null
+++ b/arch/ia64/kernel/relocate_kernel.S
@@ -0,0 +1,334 @@
1/*
2 * arch/ia64/kernel/relocate_kernel.S
3 *
4 * Relocate kexec'able kernel and start it
5 *
6 * Copyright (C) 2005 Hewlett-Packard Development Company, L.P.
7 * Copyright (C) 2005 Khalid Aziz <khalid.aziz@hp.com>
8 * Copyright (C) 2005 Intel Corp, Zou Nan hai <nanhai.zou@intel.com>
9 *
10 * This source code is licensed under the GNU General Public License,
11 * Version 2. See the file COPYING for more details.
12 */
13#include <asm/asmmacro.h>
14#include <asm/kregs.h>
15#include <asm/page.h>
16#include <asm/pgtable.h>
17#include <asm/mca_asm.h>
18
19 /* Must be relocatable PIC code callable as a C function
20 */
21GLOBAL_ENTRY(relocate_new_kernel)
22 .prologue
23 alloc r31=ar.pfs,4,0,0,0
24 .body
25.reloc_entry:
26{
27 rsm psr.i| psr.ic
28 mov r2=ip
29}
30 ;;
31{
32 flushrs // must be first insn in group
33 srlz.i
34}
35 ;;
36 dep r2=0,r2,61,3 //to physical address
37 ;;
38 //first switch to physical mode
39 add r3=1f-.reloc_entry, r2
40 movl r16 = IA64_PSR_AC|IA64_PSR_BN|IA64_PSR_IC
41 mov ar.rsc=0 // put RSE in enforced lazy mode
42 ;;
43 add sp=(memory_stack_end - 16 - .reloc_entry),r2
44 add r8=(register_stack - .reloc_entry),r2
45 ;;
46 mov r18=ar.rnat
47 mov ar.bspstore=r8
48 ;;
49 mov cr.ipsr=r16
50 mov cr.iip=r3
51 mov cr.ifs=r0
52 srlz.i
53 ;;
54 mov ar.rnat=r18
55 rfi
56 ;;
571:
58 //physical mode code begin
59 mov b6=in1
60 dep r28=0,in2,61,3 //to physical address
61
62 // purge all TC entries
63#define O(member) IA64_CPUINFO_##member##_OFFSET
64 GET_THIS_PADDR(r2, cpu_info) // load phys addr of cpu_info into r2
65 ;;
66 addl r17=O(PTCE_STRIDE),r2
67 addl r2=O(PTCE_BASE),r2
68 ;;
69 ld8 r18=[r2],(O(PTCE_COUNT)-O(PTCE_BASE));; // r18=ptce_base
70 ld4 r19=[r2],4 // r19=ptce_count[0]
71 ld4 r21=[r17],4 // r21=ptce_stride[0]
72 ;;
73 ld4 r20=[r2] // r20=ptce_count[1]
74 ld4 r22=[r17] // r22=ptce_stride[1]
75 mov r24=r0
76 ;;
77 adds r20=-1,r20
78 ;;
79#undef O
802:
81 cmp.ltu p6,p7=r24,r19
82(p7) br.cond.dpnt.few 4f
83 mov ar.lc=r20
843:
85 ptc.e r18
86 ;;
87 add r18=r22,r18
88 br.cloop.sptk.few 3b
89 ;;
90 add r18=r21,r18
91 add r24=1,r24
92 ;;
93 br.sptk.few 2b
944:
95 srlz.i
96 ;;
97 //purge TR entry for kernel text and data
98 movl r16=KERNEL_START
99 mov r18=KERNEL_TR_PAGE_SHIFT<<2
100 ;;
101 ptr.i r16, r18
102 ptr.d r16, r18
103 ;;
104 srlz.i
105 ;;
106
107 // purge TR entry for percpu data
108 movl r16=PERCPU_ADDR
109 mov r18=PERCPU_PAGE_SHIFT<<2
110 ;;
111 ptr.d r16,r18
112 ;;
113 srlz.d
114 ;;
115
116 // purge TR entry for pal code
117 mov r16=in3
118 mov r18=IA64_GRANULE_SHIFT<<2
119 ;;
120 ptr.i r16,r18
121 ;;
122 srlz.i
123 ;;
124
125 // purge TR entry for stack
126 mov r16=IA64_KR(CURRENT_STACK)
127 ;;
128 shl r16=r16,IA64_GRANULE_SHIFT
129 movl r19=PAGE_OFFSET
130 ;;
131 add r16=r19,r16
132 mov r18=IA64_GRANULE_SHIFT<<2
133 ;;
134 ptr.d r16,r18
135 ;;
136 srlz.i
137 ;;
138
139 //copy segments
140 movl r16=PAGE_MASK
141 mov r30=in0 // in0 is page_list
142 br.sptk.few .dest_page
143 ;;
144.loop:
145 ld8 r30=[in0], 8;;
146.dest_page:
147 tbit.z p0, p6=r30, 0;; // 0x1 dest page
148(p6) and r17=r30, r16
149(p6) br.cond.sptk.few .loop;;
150
151 tbit.z p0, p6=r30, 1;; // 0x2 indirect page
152(p6) and in0=r30, r16
153(p6) br.cond.sptk.few .loop;;
154
155 tbit.z p0, p6=r30, 2;; // 0x4 end flag
156(p6) br.cond.sptk.few .end_loop;;
157
158 tbit.z p6, p0=r30, 3;; // 0x8 source page
159(p6) br.cond.sptk.few .loop
160
161 and r18=r30, r16
162
163 // simple copy page, may optimize later
164 movl r14=PAGE_SIZE/8 - 1;;
165 mov ar.lc=r14;;
1661:
167 ld8 r14=[r18], 8;;
168 st8 [r17]=r14;;
169 fc.i r17
170 add r17=8, r17
171 br.ctop.sptk.few 1b
172 br.sptk.few .loop
173 ;;
174
175.end_loop:
176 sync.i // for fc.i
177 ;;
178 srlz.i
179 ;;
180 srlz.d
181 ;;
182 br.call.sptk.many b0=b6;;
183
184.align 32
185memory_stack:
186 .fill 8192, 1, 0
187memory_stack_end:
188register_stack:
189 .fill 8192, 1, 0
190register_stack_end:
191relocate_new_kernel_end:
192END(relocate_new_kernel)
193
194.global relocate_new_kernel_size
195relocate_new_kernel_size:
196 data8 relocate_new_kernel_end - relocate_new_kernel
197
198GLOBAL_ENTRY(ia64_dump_cpu_regs)
199 .prologue
200 alloc loc0=ar.pfs,1,2,0,0
201 .body
202 mov ar.rsc=0 // put RSE in enforced lazy mode
203 add loc1=4*8, in0 // save r4 and r5 first
204 ;;
205{
206 flushrs // flush dirty regs to backing store
207 srlz.i
208}
209 st8 [loc1]=r4, 8
210 ;;
211 st8 [loc1]=r5, 8
212 ;;
213 add loc1=32*8, in0
214 mov r4=ar.rnat
215 ;;
216 st8 [in0]=r0, 8 // r0
217 st8 [loc1]=r4, 8 // rnat
218 mov r5=pr
219 ;;
220 st8 [in0]=r1, 8 // r1
221 st8 [loc1]=r5, 8 // pr
222 mov r4=b0
223 ;;
224 st8 [in0]=r2, 8 // r2
225 st8 [loc1]=r4, 8 // b0
226 mov r5=b1;
227 ;;
228 st8 [in0]=r3, 24 // r3
229 st8 [loc1]=r5, 8 // b1
230 mov r4=b2
231 ;;
232 st8 [in0]=r6, 8 // r6
233 st8 [loc1]=r4, 8 // b2
234 mov r5=b3
235 ;;
236 st8 [in0]=r7, 8 // r7
237 st8 [loc1]=r5, 8 // b3
238 mov r4=b4
239 ;;
240 st8 [in0]=r8, 8 // r8
241 st8 [loc1]=r4, 8 // b4
242 mov r5=b5
243 ;;
244 st8 [in0]=r9, 8 // r9
245 st8 [loc1]=r5, 8 // b5
246 mov r4=b6
247 ;;
248 st8 [in0]=r10, 8 // r10
249 st8 [loc1]=r5, 8 // b6
250 mov r5=b7
251 ;;
252 st8 [in0]=r11, 8 // r11
253 st8 [loc1]=r5, 8 // b7
254 mov r4=b0
255 ;;
256 st8 [in0]=r12, 8 // r12
257 st8 [loc1]=r4, 8 // ip
258 mov r5=loc0
259 ;;
260 st8 [in0]=r13, 8 // r13
261 extr.u r5=r5, 0, 38 // ar.pfs.pfm
262 mov r4=r0 // user mask
263 ;;
264 st8 [in0]=r14, 8 // r14
265 st8 [loc1]=r5, 8 // cfm
266 ;;
267 st8 [in0]=r15, 8 // r15
268 st8 [loc1]=r4, 8 // user mask
269 mov r5=ar.rsc
270 ;;
271 st8 [in0]=r16, 8 // r16
272 st8 [loc1]=r5, 8 // ar.rsc
273 mov r4=ar.bsp
274 ;;
275 st8 [in0]=r17, 8 // r17
276 st8 [loc1]=r4, 8 // ar.bsp
277 mov r5=ar.bspstore
278 ;;
279 st8 [in0]=r18, 8 // r18
280 st8 [loc1]=r5, 8 // ar.bspstore
281 mov r4=ar.rnat
282 ;;
283 st8 [in0]=r19, 8 // r19
284 st8 [loc1]=r4, 8 // ar.rnat
285 mov r5=ar.ccv
286 ;;
287 st8 [in0]=r20, 8 // r20
288 st8 [loc1]=r5, 8 // ar.ccv
289 mov r4=ar.unat
290 ;;
291 st8 [in0]=r21, 8 // r21
292 st8 [loc1]=r4, 8 // ar.unat
293 mov r5 = ar.fpsr
294 ;;
295 st8 [in0]=r22, 8 // r22
296 st8 [loc1]=r5, 8 // ar.fpsr
297 mov r4 = ar.unat
298 ;;
299 st8 [in0]=r23, 8 // r23
300 st8 [loc1]=r4, 8 // unat
301 mov r5 = ar.fpsr
302 ;;
303 st8 [in0]=r24, 8 // r24
304 st8 [loc1]=r5, 8 // fpsr
305 mov r4 = ar.pfs
306 ;;
307 st8 [in0]=r25, 8 // r25
308 st8 [loc1]=r4, 8 // ar.pfs
309 mov r5 = ar.lc
310 ;;
311 st8 [in0]=r26, 8 // r26
312 st8 [loc1]=r5, 8 // ar.lc
313 mov r4 = ar.ec
314 ;;
315 st8 [in0]=r27, 8 // r27
316 st8 [loc1]=r4, 8 // ar.ec
317 mov r5 = ar.csd
318 ;;
319 st8 [in0]=r28, 8 // r28
320 st8 [loc1]=r5, 8 // ar.csd
321 mov r4 = ar.ssd
322 ;;
323 st8 [in0]=r29, 8 // r29
324 st8 [loc1]=r4, 8 // ar.ssd
325 ;;
326 st8 [in0]=r30, 8 // r30
327 ;;
328 st8 [in0]=r31, 8 // r31
329 mov ar.pfs=loc0
330 ;;
331 br.ret.sptk.many rp
332END(ia64_dump_cpu_regs)
333
334