diff options
Diffstat (limited to 'arch/ia64/kernel/entry.S')
-rw-r--r-- | arch/ia64/kernel/entry.S | 1587 |
1 files changed, 1587 insertions, 0 deletions
diff --git a/arch/ia64/kernel/entry.S b/arch/ia64/kernel/entry.S new file mode 100644 index 000000000000..0272c010a3ba --- /dev/null +++ b/arch/ia64/kernel/entry.S | |||
@@ -0,0 +1,1587 @@ | |||
1 | /* | ||
2 | * ia64/kernel/entry.S | ||
3 | * | ||
4 | * Kernel entry points. | ||
5 | * | ||
6 | * Copyright (C) 1998-2003, 2005 Hewlett-Packard Co | ||
7 | * David Mosberger-Tang <davidm@hpl.hp.com> | ||
8 | * Copyright (C) 1999, 2002-2003 | ||
9 | * Asit Mallick <Asit.K.Mallick@intel.com> | ||
10 | * Don Dugger <Don.Dugger@intel.com> | ||
11 | * Suresh Siddha <suresh.b.siddha@intel.com> | ||
12 | * Fenghua Yu <fenghua.yu@intel.com> | ||
13 | * Copyright (C) 1999 VA Linux Systems | ||
14 | * Copyright (C) 1999 Walt Drummond <drummond@valinux.com> | ||
15 | */ | ||
16 | /* | ||
17 | * ia64_switch_to now places correct virtual mapping in in TR2 for | ||
18 | * kernel stack. This allows us to handle interrupts without changing | ||
19 | * to physical mode. | ||
20 | * | ||
21 | * Jonathan Nicklin <nicklin@missioncriticallinux.com> | ||
22 | * Patrick O'Rourke <orourke@missioncriticallinux.com> | ||
23 | * 11/07/2000 | ||
24 | */ | ||
25 | /* | ||
26 | * Global (preserved) predicate usage on syscall entry/exit path: | ||
27 | * | ||
28 | * pKStk: See entry.h. | ||
29 | * pUStk: See entry.h. | ||
30 | * pSys: See entry.h. | ||
31 | * pNonSys: !pSys | ||
32 | */ | ||
33 | |||
34 | #include <linux/config.h> | ||
35 | |||
36 | #include <asm/asmmacro.h> | ||
37 | #include <asm/cache.h> | ||
38 | #include <asm/errno.h> | ||
39 | #include <asm/kregs.h> | ||
40 | #include <asm/offsets.h> | ||
41 | #include <asm/pgtable.h> | ||
42 | #include <asm/percpu.h> | ||
43 | #include <asm/processor.h> | ||
44 | #include <asm/thread_info.h> | ||
45 | #include <asm/unistd.h> | ||
46 | |||
47 | #include "minstate.h" | ||
48 | |||
49 | /* | ||
50 | * execve() is special because in case of success, we need to | ||
51 | * setup a null register window frame. | ||
52 | */ | ||
53 | ENTRY(ia64_execve) | ||
54 | /* | ||
55 | * Allocate 8 input registers since ptrace() may clobber them | ||
56 | */ | ||
57 | .prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(8) | ||
58 | alloc loc1=ar.pfs,8,2,4,0 | ||
59 | mov loc0=rp | ||
60 | .body | ||
61 | mov out0=in0 // filename | ||
62 | ;; // stop bit between alloc and call | ||
63 | mov out1=in1 // argv | ||
64 | mov out2=in2 // envp | ||
65 | add out3=16,sp // regs | ||
66 | br.call.sptk.many rp=sys_execve | ||
67 | .ret0: | ||
68 | #ifdef CONFIG_IA32_SUPPORT | ||
69 | /* | ||
70 | * Check if we're returning to ia32 mode. If so, we need to restore ia32 registers | ||
71 | * from pt_regs. | ||
72 | */ | ||
73 | adds r16=PT(CR_IPSR)+16,sp | ||
74 | ;; | ||
75 | ld8 r16=[r16] | ||
76 | #endif | ||
77 | cmp4.ge p6,p7=r8,r0 | ||
78 | mov ar.pfs=loc1 // restore ar.pfs | ||
79 | sxt4 r8=r8 // return 64-bit result | ||
80 | ;; | ||
81 | stf.spill [sp]=f0 | ||
82 | (p6) cmp.ne pKStk,pUStk=r0,r0 // a successful execve() lands us in user-mode... | ||
83 | mov rp=loc0 | ||
84 | (p6) mov ar.pfs=r0 // clear ar.pfs on success | ||
85 | (p7) br.ret.sptk.many rp | ||
86 | |||
87 | /* | ||
88 | * In theory, we'd have to zap this state only to prevent leaking of | ||
89 | * security sensitive state (e.g., if current->mm->dumpable is zero). However, | ||
90 | * this executes in less than 20 cycles even on Itanium, so it's not worth | ||
91 | * optimizing for...). | ||
92 | */ | ||
93 | mov ar.unat=0; mov ar.lc=0 | ||
94 | mov r4=0; mov f2=f0; mov b1=r0 | ||
95 | mov r5=0; mov f3=f0; mov b2=r0 | ||
96 | mov r6=0; mov f4=f0; mov b3=r0 | ||
97 | mov r7=0; mov f5=f0; mov b4=r0 | ||
98 | ldf.fill f12=[sp]; mov f13=f0; mov b5=r0 | ||
99 | ldf.fill f14=[sp]; ldf.fill f15=[sp]; mov f16=f0 | ||
100 | ldf.fill f17=[sp]; ldf.fill f18=[sp]; mov f19=f0 | ||
101 | ldf.fill f20=[sp]; ldf.fill f21=[sp]; mov f22=f0 | ||
102 | ldf.fill f23=[sp]; ldf.fill f24=[sp]; mov f25=f0 | ||
103 | ldf.fill f26=[sp]; ldf.fill f27=[sp]; mov f28=f0 | ||
104 | ldf.fill f29=[sp]; ldf.fill f30=[sp]; mov f31=f0 | ||
105 | #ifdef CONFIG_IA32_SUPPORT | ||
106 | tbit.nz p6,p0=r16, IA64_PSR_IS_BIT | ||
107 | movl loc0=ia64_ret_from_ia32_execve | ||
108 | ;; | ||
109 | (p6) mov rp=loc0 | ||
110 | #endif | ||
111 | br.ret.sptk.many rp | ||
112 | END(ia64_execve) | ||
113 | |||
114 | /* | ||
115 | * sys_clone2(u64 flags, u64 ustack_base, u64 ustack_size, u64 parent_tidptr, u64 child_tidptr, | ||
116 | * u64 tls) | ||
117 | */ | ||
118 | GLOBAL_ENTRY(sys_clone2) | ||
119 | /* | ||
120 | * Allocate 8 input registers since ptrace() may clobber them | ||
121 | */ | ||
122 | .prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(8) | ||
123 | alloc r16=ar.pfs,8,2,6,0 | ||
124 | DO_SAVE_SWITCH_STACK | ||
125 | adds r2=PT(R16)+IA64_SWITCH_STACK_SIZE+16,sp | ||
126 | mov loc0=rp | ||
127 | mov loc1=r16 // save ar.pfs across do_fork | ||
128 | .body | ||
129 | mov out1=in1 | ||
130 | mov out3=in2 | ||
131 | tbit.nz p6,p0=in0,CLONE_SETTLS_BIT | ||
132 | mov out4=in3 // parent_tidptr: valid only w/CLONE_PARENT_SETTID | ||
133 | ;; | ||
134 | (p6) st8 [r2]=in5 // store TLS in r16 for copy_thread() | ||
135 | mov out5=in4 // child_tidptr: valid only w/CLONE_CHILD_SETTID or CLONE_CHILD_CLEARTID | ||
136 | adds out2=IA64_SWITCH_STACK_SIZE+16,sp // out2 = ®s | ||
137 | mov out0=in0 // out0 = clone_flags | ||
138 | br.call.sptk.many rp=do_fork | ||
139 | .ret1: .restore sp | ||
140 | adds sp=IA64_SWITCH_STACK_SIZE,sp // pop the switch stack | ||
141 | mov ar.pfs=loc1 | ||
142 | mov rp=loc0 | ||
143 | br.ret.sptk.many rp | ||
144 | END(sys_clone2) | ||
145 | |||
146 | /* | ||
147 | * sys_clone(u64 flags, u64 ustack_base, u64 parent_tidptr, u64 child_tidptr, u64 tls) | ||
148 | * Deprecated. Use sys_clone2() instead. | ||
149 | */ | ||
150 | GLOBAL_ENTRY(sys_clone) | ||
151 | /* | ||
152 | * Allocate 8 input registers since ptrace() may clobber them | ||
153 | */ | ||
154 | .prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(8) | ||
155 | alloc r16=ar.pfs,8,2,6,0 | ||
156 | DO_SAVE_SWITCH_STACK | ||
157 | adds r2=PT(R16)+IA64_SWITCH_STACK_SIZE+16,sp | ||
158 | mov loc0=rp | ||
159 | mov loc1=r16 // save ar.pfs across do_fork | ||
160 | .body | ||
161 | mov out1=in1 | ||
162 | mov out3=16 // stacksize (compensates for 16-byte scratch area) | ||
163 | tbit.nz p6,p0=in0,CLONE_SETTLS_BIT | ||
164 | mov out4=in2 // parent_tidptr: valid only w/CLONE_PARENT_SETTID | ||
165 | ;; | ||
166 | (p6) st8 [r2]=in4 // store TLS in r13 (tp) | ||
167 | mov out5=in3 // child_tidptr: valid only w/CLONE_CHILD_SETTID or CLONE_CHILD_CLEARTID | ||
168 | adds out2=IA64_SWITCH_STACK_SIZE+16,sp // out2 = ®s | ||
169 | mov out0=in0 // out0 = clone_flags | ||
170 | br.call.sptk.many rp=do_fork | ||
171 | .ret2: .restore sp | ||
172 | adds sp=IA64_SWITCH_STACK_SIZE,sp // pop the switch stack | ||
173 | mov ar.pfs=loc1 | ||
174 | mov rp=loc0 | ||
175 | br.ret.sptk.many rp | ||
176 | END(sys_clone) | ||
177 | |||
178 | /* | ||
179 | * prev_task <- ia64_switch_to(struct task_struct *next) | ||
180 | * With Ingo's new scheduler, interrupts are disabled when this routine gets | ||
181 | * called. The code starting at .map relies on this. The rest of the code | ||
182 | * doesn't care about the interrupt masking status. | ||
183 | */ | ||
184 | GLOBAL_ENTRY(ia64_switch_to) | ||
185 | .prologue | ||
186 | alloc r16=ar.pfs,1,0,0,0 | ||
187 | DO_SAVE_SWITCH_STACK | ||
188 | .body | ||
189 | |||
190 | adds r22=IA64_TASK_THREAD_KSP_OFFSET,r13 | ||
191 | movl r25=init_task | ||
192 | mov r27=IA64_KR(CURRENT_STACK) | ||
193 | adds r21=IA64_TASK_THREAD_KSP_OFFSET,in0 | ||
194 | dep r20=0,in0,61,3 // physical address of "next" | ||
195 | ;; | ||
196 | st8 [r22]=sp // save kernel stack pointer of old task | ||
197 | shr.u r26=r20,IA64_GRANULE_SHIFT | ||
198 | cmp.eq p7,p6=r25,in0 | ||
199 | ;; | ||
200 | /* | ||
201 | * If we've already mapped this task's page, we can skip doing it again. | ||
202 | */ | ||
203 | (p6) cmp.eq p7,p6=r26,r27 | ||
204 | (p6) br.cond.dpnt .map | ||
205 | ;; | ||
206 | .done: | ||
207 | (p6) ssm psr.ic // if we had to map, reenable the psr.ic bit FIRST!!! | ||
208 | ;; | ||
209 | (p6) srlz.d | ||
210 | ld8 sp=[r21] // load kernel stack pointer of new task | ||
211 | mov IA64_KR(CURRENT)=in0 // update "current" application register | ||
212 | mov r8=r13 // return pointer to previously running task | ||
213 | mov r13=in0 // set "current" pointer | ||
214 | ;; | ||
215 | DO_LOAD_SWITCH_STACK | ||
216 | |||
217 | #ifdef CONFIG_SMP | ||
218 | sync.i // ensure "fc"s done by this CPU are visible on other CPUs | ||
219 | #endif | ||
220 | br.ret.sptk.many rp // boogie on out in new context | ||
221 | |||
222 | .map: | ||
223 | rsm psr.ic // interrupts (psr.i) are already disabled here | ||
224 | movl r25=PAGE_KERNEL | ||
225 | ;; | ||
226 | srlz.d | ||
227 | or r23=r25,r20 // construct PA | page properties | ||
228 | mov r25=IA64_GRANULE_SHIFT<<2 | ||
229 | ;; | ||
230 | mov cr.itir=r25 | ||
231 | mov cr.ifa=in0 // VA of next task... | ||
232 | ;; | ||
233 | mov r25=IA64_TR_CURRENT_STACK | ||
234 | mov IA64_KR(CURRENT_STACK)=r26 // remember last page we mapped... | ||
235 | ;; | ||
236 | itr.d dtr[r25]=r23 // wire in new mapping... | ||
237 | br.cond.sptk .done | ||
238 | END(ia64_switch_to) | ||
239 | |||
240 | /* | ||
241 | * Note that interrupts are enabled during save_switch_stack and load_switch_stack. This | ||
242 | * means that we may get an interrupt with "sp" pointing to the new kernel stack while | ||
243 | * ar.bspstore is still pointing to the old kernel backing store area. Since ar.rsc, | ||
244 | * ar.rnat, ar.bsp, and ar.bspstore are all preserved by interrupts, this is not a | ||
245 | * problem. Also, we don't need to specify unwind information for preserved registers | ||
246 | * that are not modified in save_switch_stack as the right unwind information is already | ||
247 | * specified at the call-site of save_switch_stack. | ||
248 | */ | ||
249 | |||
250 | /* | ||
251 | * save_switch_stack: | ||
252 | * - r16 holds ar.pfs | ||
253 | * - b7 holds address to return to | ||
254 | * - rp (b0) holds return address to save | ||
255 | */ | ||
256 | GLOBAL_ENTRY(save_switch_stack) | ||
257 | .prologue | ||
258 | .altrp b7 | ||
259 | flushrs // flush dirty regs to backing store (must be first in insn group) | ||
260 | .save @priunat,r17 | ||
261 | mov r17=ar.unat // preserve caller's | ||
262 | .body | ||
263 | #ifdef CONFIG_ITANIUM | ||
264 | adds r2=16+128,sp | ||
265 | adds r3=16+64,sp | ||
266 | adds r14=SW(R4)+16,sp | ||
267 | ;; | ||
268 | st8.spill [r14]=r4,16 // spill r4 | ||
269 | lfetch.fault.excl.nt1 [r3],128 | ||
270 | ;; | ||
271 | lfetch.fault.excl.nt1 [r2],128 | ||
272 | lfetch.fault.excl.nt1 [r3],128 | ||
273 | ;; | ||
274 | lfetch.fault.excl [r2] | ||
275 | lfetch.fault.excl [r3] | ||
276 | adds r15=SW(R5)+16,sp | ||
277 | #else | ||
278 | add r2=16+3*128,sp | ||
279 | add r3=16,sp | ||
280 | add r14=SW(R4)+16,sp | ||
281 | ;; | ||
282 | st8.spill [r14]=r4,SW(R6)-SW(R4) // spill r4 and prefetch offset 0x1c0 | ||
283 | lfetch.fault.excl.nt1 [r3],128 // prefetch offset 0x010 | ||
284 | ;; | ||
285 | lfetch.fault.excl.nt1 [r3],128 // prefetch offset 0x090 | ||
286 | lfetch.fault.excl.nt1 [r2],128 // prefetch offset 0x190 | ||
287 | ;; | ||
288 | lfetch.fault.excl.nt1 [r3] // prefetch offset 0x110 | ||
289 | lfetch.fault.excl.nt1 [r2] // prefetch offset 0x210 | ||
290 | adds r15=SW(R5)+16,sp | ||
291 | #endif | ||
292 | ;; | ||
293 | st8.spill [r15]=r5,SW(R7)-SW(R5) // spill r5 | ||
294 | mov.m ar.rsc=0 // put RSE in mode: enforced lazy, little endian, pl 0 | ||
295 | add r2=SW(F2)+16,sp // r2 = &sw->f2 | ||
296 | ;; | ||
297 | st8.spill [r14]=r6,SW(B0)-SW(R6) // spill r6 | ||
298 | mov.m r18=ar.fpsr // preserve fpsr | ||
299 | add r3=SW(F3)+16,sp // r3 = &sw->f3 | ||
300 | ;; | ||
301 | stf.spill [r2]=f2,32 | ||
302 | mov.m r19=ar.rnat | ||
303 | mov r21=b0 | ||
304 | |||
305 | stf.spill [r3]=f3,32 | ||
306 | st8.spill [r15]=r7,SW(B2)-SW(R7) // spill r7 | ||
307 | mov r22=b1 | ||
308 | ;; | ||
309 | // since we're done with the spills, read and save ar.unat: | ||
310 | mov.m r29=ar.unat | ||
311 | mov.m r20=ar.bspstore | ||
312 | mov r23=b2 | ||
313 | stf.spill [r2]=f4,32 | ||
314 | stf.spill [r3]=f5,32 | ||
315 | mov r24=b3 | ||
316 | ;; | ||
317 | st8 [r14]=r21,SW(B1)-SW(B0) // save b0 | ||
318 | st8 [r15]=r23,SW(B3)-SW(B2) // save b2 | ||
319 | mov r25=b4 | ||
320 | mov r26=b5 | ||
321 | ;; | ||
322 | st8 [r14]=r22,SW(B4)-SW(B1) // save b1 | ||
323 | st8 [r15]=r24,SW(AR_PFS)-SW(B3) // save b3 | ||
324 | mov r21=ar.lc // I-unit | ||
325 | stf.spill [r2]=f12,32 | ||
326 | stf.spill [r3]=f13,32 | ||
327 | ;; | ||
328 | st8 [r14]=r25,SW(B5)-SW(B4) // save b4 | ||
329 | st8 [r15]=r16,SW(AR_LC)-SW(AR_PFS) // save ar.pfs | ||
330 | stf.spill [r2]=f14,32 | ||
331 | stf.spill [r3]=f15,32 | ||
332 | ;; | ||
333 | st8 [r14]=r26 // save b5 | ||
334 | st8 [r15]=r21 // save ar.lc | ||
335 | stf.spill [r2]=f16,32 | ||
336 | stf.spill [r3]=f17,32 | ||
337 | ;; | ||
338 | stf.spill [r2]=f18,32 | ||
339 | stf.spill [r3]=f19,32 | ||
340 | ;; | ||
341 | stf.spill [r2]=f20,32 | ||
342 | stf.spill [r3]=f21,32 | ||
343 | ;; | ||
344 | stf.spill [r2]=f22,32 | ||
345 | stf.spill [r3]=f23,32 | ||
346 | ;; | ||
347 | stf.spill [r2]=f24,32 | ||
348 | stf.spill [r3]=f25,32 | ||
349 | ;; | ||
350 | stf.spill [r2]=f26,32 | ||
351 | stf.spill [r3]=f27,32 | ||
352 | ;; | ||
353 | stf.spill [r2]=f28,32 | ||
354 | stf.spill [r3]=f29,32 | ||
355 | ;; | ||
356 | stf.spill [r2]=f30,SW(AR_UNAT)-SW(F30) | ||
357 | stf.spill [r3]=f31,SW(PR)-SW(F31) | ||
358 | add r14=SW(CALLER_UNAT)+16,sp | ||
359 | ;; | ||
360 | st8 [r2]=r29,SW(AR_RNAT)-SW(AR_UNAT) // save ar.unat | ||
361 | st8 [r14]=r17,SW(AR_FPSR)-SW(CALLER_UNAT) // save caller_unat | ||
362 | mov r21=pr | ||
363 | ;; | ||
364 | st8 [r2]=r19,SW(AR_BSPSTORE)-SW(AR_RNAT) // save ar.rnat | ||
365 | st8 [r3]=r21 // save predicate registers | ||
366 | ;; | ||
367 | st8 [r2]=r20 // save ar.bspstore | ||
368 | st8 [r14]=r18 // save fpsr | ||
369 | mov ar.rsc=3 // put RSE back into eager mode, pl 0 | ||
370 | br.cond.sptk.many b7 | ||
371 | END(save_switch_stack) | ||
372 | |||
373 | /* | ||
374 | * load_switch_stack: | ||
375 | * - "invala" MUST be done at call site (normally in DO_LOAD_SWITCH_STACK) | ||
376 | * - b7 holds address to return to | ||
377 | * - must not touch r8-r11 | ||
378 | */ | ||
379 | ENTRY(load_switch_stack) | ||
380 | .prologue | ||
381 | .altrp b7 | ||
382 | |||
383 | .body | ||
384 | lfetch.fault.nt1 [sp] | ||
385 | adds r2=SW(AR_BSPSTORE)+16,sp | ||
386 | adds r3=SW(AR_UNAT)+16,sp | ||
387 | mov ar.rsc=0 // put RSE into enforced lazy mode | ||
388 | adds r14=SW(CALLER_UNAT)+16,sp | ||
389 | adds r15=SW(AR_FPSR)+16,sp | ||
390 | ;; | ||
391 | ld8 r27=[r2],(SW(B0)-SW(AR_BSPSTORE)) // bspstore | ||
392 | ld8 r29=[r3],(SW(B1)-SW(AR_UNAT)) // unat | ||
393 | ;; | ||
394 | ld8 r21=[r2],16 // restore b0 | ||
395 | ld8 r22=[r3],16 // restore b1 | ||
396 | ;; | ||
397 | ld8 r23=[r2],16 // restore b2 | ||
398 | ld8 r24=[r3],16 // restore b3 | ||
399 | ;; | ||
400 | ld8 r25=[r2],16 // restore b4 | ||
401 | ld8 r26=[r3],16 // restore b5 | ||
402 | ;; | ||
403 | ld8 r16=[r2],(SW(PR)-SW(AR_PFS)) // ar.pfs | ||
404 | ld8 r17=[r3],(SW(AR_RNAT)-SW(AR_LC)) // ar.lc | ||
405 | ;; | ||
406 | ld8 r28=[r2] // restore pr | ||
407 | ld8 r30=[r3] // restore rnat | ||
408 | ;; | ||
409 | ld8 r18=[r14],16 // restore caller's unat | ||
410 | ld8 r19=[r15],24 // restore fpsr | ||
411 | ;; | ||
412 | ldf.fill f2=[r14],32 | ||
413 | ldf.fill f3=[r15],32 | ||
414 | ;; | ||
415 | ldf.fill f4=[r14],32 | ||
416 | ldf.fill f5=[r15],32 | ||
417 | ;; | ||
418 | ldf.fill f12=[r14],32 | ||
419 | ldf.fill f13=[r15],32 | ||
420 | ;; | ||
421 | ldf.fill f14=[r14],32 | ||
422 | ldf.fill f15=[r15],32 | ||
423 | ;; | ||
424 | ldf.fill f16=[r14],32 | ||
425 | ldf.fill f17=[r15],32 | ||
426 | ;; | ||
427 | ldf.fill f18=[r14],32 | ||
428 | ldf.fill f19=[r15],32 | ||
429 | mov b0=r21 | ||
430 | ;; | ||
431 | ldf.fill f20=[r14],32 | ||
432 | ldf.fill f21=[r15],32 | ||
433 | mov b1=r22 | ||
434 | ;; | ||
435 | ldf.fill f22=[r14],32 | ||
436 | ldf.fill f23=[r15],32 | ||
437 | mov b2=r23 | ||
438 | ;; | ||
439 | mov ar.bspstore=r27 | ||
440 | mov ar.unat=r29 // establish unat holding the NaT bits for r4-r7 | ||
441 | mov b3=r24 | ||
442 | ;; | ||
443 | ldf.fill f24=[r14],32 | ||
444 | ldf.fill f25=[r15],32 | ||
445 | mov b4=r25 | ||
446 | ;; | ||
447 | ldf.fill f26=[r14],32 | ||
448 | ldf.fill f27=[r15],32 | ||
449 | mov b5=r26 | ||
450 | ;; | ||
451 | ldf.fill f28=[r14],32 | ||
452 | ldf.fill f29=[r15],32 | ||
453 | mov ar.pfs=r16 | ||
454 | ;; | ||
455 | ldf.fill f30=[r14],32 | ||
456 | ldf.fill f31=[r15],24 | ||
457 | mov ar.lc=r17 | ||
458 | ;; | ||
459 | ld8.fill r4=[r14],16 | ||
460 | ld8.fill r5=[r15],16 | ||
461 | mov pr=r28,-1 | ||
462 | ;; | ||
463 | ld8.fill r6=[r14],16 | ||
464 | ld8.fill r7=[r15],16 | ||
465 | |||
466 | mov ar.unat=r18 // restore caller's unat | ||
467 | mov ar.rnat=r30 // must restore after bspstore but before rsc! | ||
468 | mov ar.fpsr=r19 // restore fpsr | ||
469 | mov ar.rsc=3 // put RSE back into eager mode, pl 0 | ||
470 | br.cond.sptk.many b7 | ||
471 | END(load_switch_stack) | ||
472 | |||
473 | GLOBAL_ENTRY(__ia64_syscall) | ||
474 | .regstk 6,0,0,0 | ||
475 | mov r15=in5 // put syscall number in place | ||
476 | break __BREAK_SYSCALL | ||
477 | movl r2=errno | ||
478 | cmp.eq p6,p7=-1,r10 | ||
479 | ;; | ||
480 | (p6) st4 [r2]=r8 | ||
481 | (p6) mov r8=-1 | ||
482 | br.ret.sptk.many rp | ||
483 | END(__ia64_syscall) | ||
484 | |||
485 | GLOBAL_ENTRY(execve) | ||
486 | mov r15=__NR_execve // put syscall number in place | ||
487 | break __BREAK_SYSCALL | ||
488 | br.ret.sptk.many rp | ||
489 | END(execve) | ||
490 | |||
491 | GLOBAL_ENTRY(clone) | ||
492 | mov r15=__NR_clone // put syscall number in place | ||
493 | break __BREAK_SYSCALL | ||
494 | br.ret.sptk.many rp | ||
495 | END(clone) | ||
496 | |||
497 | /* | ||
498 | * Invoke a system call, but do some tracing before and after the call. | ||
499 | * We MUST preserve the current register frame throughout this routine | ||
500 | * because some system calls (such as ia64_execve) directly | ||
501 | * manipulate ar.pfs. | ||
502 | */ | ||
503 | GLOBAL_ENTRY(ia64_trace_syscall) | ||
504 | PT_REGS_UNWIND_INFO(0) | ||
505 | /* | ||
506 | * We need to preserve the scratch registers f6-f11 in case the system | ||
507 | * call is sigreturn. | ||
508 | */ | ||
509 | adds r16=PT(F6)+16,sp | ||
510 | adds r17=PT(F7)+16,sp | ||
511 | ;; | ||
512 | stf.spill [r16]=f6,32 | ||
513 | stf.spill [r17]=f7,32 | ||
514 | ;; | ||
515 | stf.spill [r16]=f8,32 | ||
516 | stf.spill [r17]=f9,32 | ||
517 | ;; | ||
518 | stf.spill [r16]=f10 | ||
519 | stf.spill [r17]=f11 | ||
520 | br.call.sptk.many rp=syscall_trace_enter // give parent a chance to catch syscall args | ||
521 | adds r16=PT(F6)+16,sp | ||
522 | adds r17=PT(F7)+16,sp | ||
523 | ;; | ||
524 | ldf.fill f6=[r16],32 | ||
525 | ldf.fill f7=[r17],32 | ||
526 | ;; | ||
527 | ldf.fill f8=[r16],32 | ||
528 | ldf.fill f9=[r17],32 | ||
529 | ;; | ||
530 | ldf.fill f10=[r16] | ||
531 | ldf.fill f11=[r17] | ||
532 | // the syscall number may have changed, so re-load it and re-calculate the | ||
533 | // syscall entry-point: | ||
534 | adds r15=PT(R15)+16,sp // r15 = &pt_regs.r15 (syscall #) | ||
535 | ;; | ||
536 | ld8 r15=[r15] | ||
537 | mov r3=NR_syscalls - 1 | ||
538 | ;; | ||
539 | adds r15=-1024,r15 | ||
540 | movl r16=sys_call_table | ||
541 | ;; | ||
542 | shladd r20=r15,3,r16 // r20 = sys_call_table + 8*(syscall-1024) | ||
543 | cmp.leu p6,p7=r15,r3 | ||
544 | ;; | ||
545 | (p6) ld8 r20=[r20] // load address of syscall entry point | ||
546 | (p7) movl r20=sys_ni_syscall | ||
547 | ;; | ||
548 | mov b6=r20 | ||
549 | br.call.sptk.many rp=b6 // do the syscall | ||
550 | .strace_check_retval: | ||
551 | cmp.lt p6,p0=r8,r0 // syscall failed? | ||
552 | adds r2=PT(R8)+16,sp // r2 = &pt_regs.r8 | ||
553 | adds r3=PT(R10)+16,sp // r3 = &pt_regs.r10 | ||
554 | mov r10=0 | ||
555 | (p6) br.cond.sptk strace_error // syscall failed -> | ||
556 | ;; // avoid RAW on r10 | ||
557 | .strace_save_retval: | ||
558 | .mem.offset 0,0; st8.spill [r2]=r8 // store return value in slot for r8 | ||
559 | .mem.offset 8,0; st8.spill [r3]=r10 // clear error indication in slot for r10 | ||
560 | br.call.sptk.many rp=syscall_trace_leave // give parent a chance to catch return value | ||
561 | .ret3: br.cond.sptk .work_pending_syscall_end | ||
562 | |||
563 | strace_error: | ||
564 | ld8 r3=[r2] // load pt_regs.r8 | ||
565 | sub r9=0,r8 // negate return value to get errno value | ||
566 | ;; | ||
567 | cmp.ne p6,p0=r3,r0 // is pt_regs.r8!=0? | ||
568 | adds r3=16,r2 // r3=&pt_regs.r10 | ||
569 | ;; | ||
570 | (p6) mov r10=-1 | ||
571 | (p6) mov r8=r9 | ||
572 | br.cond.sptk .strace_save_retval | ||
573 | END(ia64_trace_syscall) | ||
574 | |||
575 | /* | ||
576 | * When traced and returning from sigreturn, we invoke syscall_trace but then | ||
577 | * go straight to ia64_leave_kernel rather than ia64_leave_syscall. | ||
578 | */ | ||
579 | GLOBAL_ENTRY(ia64_strace_leave_kernel) | ||
580 | PT_REGS_UNWIND_INFO(0) | ||
581 | { /* | ||
582 | * Some versions of gas generate bad unwind info if the first instruction of a | ||
583 | * procedure doesn't go into the first slot of a bundle. This is a workaround. | ||
584 | */ | ||
585 | nop.m 0 | ||
586 | nop.i 0 | ||
587 | br.call.sptk.many rp=syscall_trace_leave // give parent a chance to catch return value | ||
588 | } | ||
589 | .ret4: br.cond.sptk ia64_leave_kernel | ||
590 | END(ia64_strace_leave_kernel) | ||
591 | |||
592 | GLOBAL_ENTRY(ia64_ret_from_clone) | ||
593 | PT_REGS_UNWIND_INFO(0) | ||
594 | { /* | ||
595 | * Some versions of gas generate bad unwind info if the first instruction of a | ||
596 | * procedure doesn't go into the first slot of a bundle. This is a workaround. | ||
597 | */ | ||
598 | nop.m 0 | ||
599 | nop.i 0 | ||
600 | /* | ||
601 | * We need to call schedule_tail() to complete the scheduling process. | ||
602 | * Called by ia64_switch_to() after do_fork()->copy_thread(). r8 contains the | ||
603 | * address of the previously executing task. | ||
604 | */ | ||
605 | br.call.sptk.many rp=ia64_invoke_schedule_tail | ||
606 | } | ||
607 | .ret8: | ||
608 | adds r2=TI_FLAGS+IA64_TASK_SIZE,r13 | ||
609 | ;; | ||
610 | ld4 r2=[r2] | ||
611 | ;; | ||
612 | mov r8=0 | ||
613 | and r2=_TIF_SYSCALL_TRACEAUDIT,r2 | ||
614 | ;; | ||
615 | cmp.ne p6,p0=r2,r0 | ||
616 | (p6) br.cond.spnt .strace_check_retval | ||
617 | ;; // added stop bits to prevent r8 dependency | ||
618 | END(ia64_ret_from_clone) | ||
619 | // fall through | ||
620 | GLOBAL_ENTRY(ia64_ret_from_syscall) | ||
621 | PT_REGS_UNWIND_INFO(0) | ||
622 | cmp.ge p6,p7=r8,r0 // syscall executed successfully? | ||
623 | adds r2=PT(R8)+16,sp // r2 = &pt_regs.r8 | ||
624 | mov r10=r0 // clear error indication in r10 | ||
625 | (p7) br.cond.spnt handle_syscall_error // handle potential syscall failure | ||
626 | END(ia64_ret_from_syscall) | ||
627 | // fall through | ||
628 | /* | ||
629 | * ia64_leave_syscall(): Same as ia64_leave_kernel, except that it doesn't | ||
630 | * need to switch to bank 0 and doesn't restore the scratch registers. | ||
631 | * To avoid leaking kernel bits, the scratch registers are set to | ||
632 | * the following known-to-be-safe values: | ||
633 | * | ||
634 | * r1: restored (global pointer) | ||
635 | * r2: cleared | ||
636 | * r3: 1 (when returning to user-level) | ||
637 | * r8-r11: restored (syscall return value(s)) | ||
638 | * r12: restored (user-level stack pointer) | ||
639 | * r13: restored (user-level thread pointer) | ||
640 | * r14: cleared | ||
641 | * r15: restored (syscall #) | ||
642 | * r16-r17: cleared | ||
643 | * r18: user-level b6 | ||
644 | * r19: cleared | ||
645 | * r20: user-level ar.fpsr | ||
646 | * r21: user-level b0 | ||
647 | * r22: cleared | ||
648 | * r23: user-level ar.bspstore | ||
649 | * r24: user-level ar.rnat | ||
650 | * r25: user-level ar.unat | ||
651 | * r26: user-level ar.pfs | ||
652 | * r27: user-level ar.rsc | ||
653 | * r28: user-level ip | ||
654 | * r29: user-level psr | ||
655 | * r30: user-level cfm | ||
656 | * r31: user-level pr | ||
657 | * f6-f11: cleared | ||
658 | * pr: restored (user-level pr) | ||
659 | * b0: restored (user-level rp) | ||
660 | * b6: restored | ||
661 | * b7: cleared | ||
662 | * ar.unat: restored (user-level ar.unat) | ||
663 | * ar.pfs: restored (user-level ar.pfs) | ||
664 | * ar.rsc: restored (user-level ar.rsc) | ||
665 | * ar.rnat: restored (user-level ar.rnat) | ||
666 | * ar.bspstore: restored (user-level ar.bspstore) | ||
667 | * ar.fpsr: restored (user-level ar.fpsr) | ||
668 | * ar.ccv: cleared | ||
669 | * ar.csd: cleared | ||
670 | * ar.ssd: cleared | ||
671 | */ | ||
672 | ENTRY(ia64_leave_syscall) | ||
673 | PT_REGS_UNWIND_INFO(0) | ||
674 | /* | ||
675 | * work.need_resched etc. mustn't get changed by this CPU before it returns to | ||
676 | * user- or fsys-mode, hence we disable interrupts early on. | ||
677 | * | ||
678 | * p6 controls whether current_thread_info()->flags needs to be check for | ||
679 | * extra work. We always check for extra work when returning to user-level. | ||
680 | * With CONFIG_PREEMPT, we also check for extra work when the preempt_count | ||
681 | * is 0. After extra work processing has been completed, execution | ||
682 | * resumes at .work_processed_syscall with p6 set to 1 if the extra-work-check | ||
683 | * needs to be redone. | ||
684 | */ | ||
685 | #ifdef CONFIG_PREEMPT | ||
686 | rsm psr.i // disable interrupts | ||
687 | cmp.eq pLvSys,p0=r0,r0 // pLvSys=1: leave from syscall | ||
688 | (pKStk) adds r20=TI_PRE_COUNT+IA64_TASK_SIZE,r13 | ||
689 | ;; | ||
690 | .pred.rel.mutex pUStk,pKStk | ||
691 | (pKStk) ld4 r21=[r20] // r21 <- preempt_count | ||
692 | (pUStk) mov r21=0 // r21 <- 0 | ||
693 | ;; | ||
694 | cmp.eq p6,p0=r21,r0 // p6 <- pUStk || (preempt_count == 0) | ||
695 | #else /* !CONFIG_PREEMPT */ | ||
696 | (pUStk) rsm psr.i | ||
697 | cmp.eq pLvSys,p0=r0,r0 // pLvSys=1: leave from syscall | ||
698 | (pUStk) cmp.eq.unc p6,p0=r0,r0 // p6 <- pUStk | ||
699 | #endif | ||
700 | .work_processed_syscall: | ||
701 | adds r2=PT(LOADRS)+16,r12 | ||
702 | adds r3=PT(AR_BSPSTORE)+16,r12 | ||
703 | adds r18=TI_FLAGS+IA64_TASK_SIZE,r13 | ||
704 | ;; | ||
705 | (p6) ld4 r31=[r18] // load current_thread_info()->flags | ||
706 | ld8 r19=[r2],PT(B6)-PT(LOADRS) // load ar.rsc value for "loadrs" | ||
707 | mov b7=r0 // clear b7 | ||
708 | ;; | ||
709 | ld8 r23=[r3],PT(R11)-PT(AR_BSPSTORE) // load ar.bspstore (may be garbage) | ||
710 | ld8 r18=[r2],PT(R9)-PT(B6) // load b6 | ||
711 | (p6) and r15=TIF_WORK_MASK,r31 // any work other than TIF_SYSCALL_TRACE? | ||
712 | ;; | ||
713 | mov r16=ar.bsp // M2 get existing backing store pointer | ||
714 | (p6) cmp4.ne.unc p6,p0=r15, r0 // any special work pending? | ||
715 | (p6) br.cond.spnt .work_pending_syscall | ||
716 | ;; | ||
717 | // start restoring the state saved on the kernel stack (struct pt_regs): | ||
718 | ld8 r9=[r2],PT(CR_IPSR)-PT(R9) | ||
719 | ld8 r11=[r3],PT(CR_IIP)-PT(R11) | ||
720 | mov f6=f0 // clear f6 | ||
721 | ;; | ||
722 | invala // M0|1 invalidate ALAT | ||
723 | rsm psr.i | psr.ic // M2 initiate turning off of interrupt and interruption collection | ||
724 | mov f9=f0 // clear f9 | ||
725 | |||
726 | ld8 r29=[r2],16 // load cr.ipsr | ||
727 | ld8 r28=[r3],16 // load cr.iip | ||
728 | mov f8=f0 // clear f8 | ||
729 | ;; | ||
730 | ld8 r30=[r2],16 // M0|1 load cr.ifs | ||
731 | mov.m ar.ssd=r0 // M2 clear ar.ssd | ||
732 | cmp.eq p9,p0=r0,r0 // set p9 to indicate that we should restore cr.ifs | ||
733 | ;; | ||
734 | ld8 r25=[r3],16 // M0|1 load ar.unat | ||
735 | mov.m ar.csd=r0 // M2 clear ar.csd | ||
736 | mov r22=r0 // clear r22 | ||
737 | ;; | ||
738 | ld8 r26=[r2],PT(B0)-PT(AR_PFS) // M0|1 load ar.pfs | ||
739 | (pKStk) mov r22=psr // M2 read PSR now that interrupts are disabled | ||
740 | mov f10=f0 // clear f10 | ||
741 | ;; | ||
742 | ld8 r21=[r2],PT(AR_RNAT)-PT(B0) // load b0 | ||
743 | ld8 r27=[r3],PT(PR)-PT(AR_RSC) // load ar.rsc | ||
744 | mov f11=f0 // clear f11 | ||
745 | ;; | ||
746 | ld8 r24=[r2],PT(AR_FPSR)-PT(AR_RNAT) // load ar.rnat (may be garbage) | ||
747 | ld8 r31=[r3],PT(R1)-PT(PR) // load predicates | ||
748 | (pUStk) add r14=IA64_TASK_THREAD_ON_USTACK_OFFSET,r13 | ||
749 | ;; | ||
750 | ld8 r20=[r2],PT(R12)-PT(AR_FPSR) // load ar.fpsr | ||
751 | ld8.fill r1=[r3],16 // load r1 | ||
752 | (pUStk) mov r17=1 | ||
753 | ;; | ||
754 | srlz.d // M0 ensure interruption collection is off | ||
755 | ld8.fill r13=[r3],16 | ||
756 | mov f7=f0 // clear f7 | ||
757 | ;; | ||
758 | ld8.fill r12=[r2] // restore r12 (sp) | ||
759 | ld8.fill r15=[r3] // restore r15 | ||
760 | addl r3=THIS_CPU(ia64_phys_stacked_size_p8),r0 | ||
761 | ;; | ||
762 | (pUStk) ld4 r3=[r3] // r3 = cpu_data->phys_stacked_size_p8 | ||
763 | (pUStk) st1 [r14]=r17 | ||
764 | mov b6=r18 // I0 restore b6 | ||
765 | ;; | ||
766 | mov r14=r0 // clear r14 | ||
767 | shr.u r18=r19,16 // I0|1 get byte size of existing "dirty" partition | ||
768 | (pKStk) br.cond.dpnt.many skip_rbs_switch | ||
769 | |||
770 | mov.m ar.ccv=r0 // clear ar.ccv | ||
771 | (pNonSys) br.cond.dpnt.many dont_preserve_current_frame | ||
772 | br.cond.sptk.many rbs_switch | ||
773 | END(ia64_leave_syscall) | ||
774 | |||
775 | #ifdef CONFIG_IA32_SUPPORT | ||
776 | GLOBAL_ENTRY(ia64_ret_from_ia32_execve) | ||
777 | PT_REGS_UNWIND_INFO(0) | ||
778 | adds r2=PT(R8)+16,sp // r2 = &pt_regs.r8 | ||
779 | adds r3=PT(R10)+16,sp // r3 = &pt_regs.r10 | ||
780 | ;; | ||
781 | .mem.offset 0,0 | ||
782 | st8.spill [r2]=r8 // store return value in slot for r8 and set unat bit | ||
783 | .mem.offset 8,0 | ||
784 | st8.spill [r3]=r0 // clear error indication in slot for r10 and set unat bit | ||
785 | END(ia64_ret_from_ia32_execve_syscall) | ||
786 | // fall through | ||
787 | #endif /* CONFIG_IA32_SUPPORT */ | ||
788 | GLOBAL_ENTRY(ia64_leave_kernel) | ||
789 | PT_REGS_UNWIND_INFO(0) | ||
790 | /* | ||
791 | * work.need_resched etc. mustn't get changed by this CPU before it returns to | ||
792 | * user- or fsys-mode, hence we disable interrupts early on. | ||
793 | * | ||
794 | * p6 controls whether current_thread_info()->flags needs to be check for | ||
795 | * extra work. We always check for extra work when returning to user-level. | ||
796 | * With CONFIG_PREEMPT, we also check for extra work when the preempt_count | ||
797 | * is 0. After extra work processing has been completed, execution | ||
798 | * resumes at .work_processed_syscall with p6 set to 1 if the extra-work-check | ||
799 | * needs to be redone. | ||
800 | */ | ||
801 | #ifdef CONFIG_PREEMPT | ||
802 | rsm psr.i // disable interrupts | ||
803 | cmp.eq p0,pLvSys=r0,r0 // pLvSys=0: leave from kernel | ||
804 | (pKStk) adds r20=TI_PRE_COUNT+IA64_TASK_SIZE,r13 | ||
805 | ;; | ||
806 | .pred.rel.mutex pUStk,pKStk | ||
807 | (pKStk) ld4 r21=[r20] // r21 <- preempt_count | ||
808 | (pUStk) mov r21=0 // r21 <- 0 | ||
809 | ;; | ||
810 | cmp.eq p6,p0=r21,r0 // p6 <- pUStk || (preempt_count == 0) | ||
811 | #else | ||
812 | (pUStk) rsm psr.i | ||
813 | cmp.eq p0,pLvSys=r0,r0 // pLvSys=0: leave from kernel | ||
814 | (pUStk) cmp.eq.unc p6,p0=r0,r0 // p6 <- pUStk | ||
815 | #endif | ||
816 | .work_processed_kernel: | ||
817 | adds r17=TI_FLAGS+IA64_TASK_SIZE,r13 | ||
818 | ;; | ||
819 | (p6) ld4 r31=[r17] // load current_thread_info()->flags | ||
820 | adds r21=PT(PR)+16,r12 | ||
821 | ;; | ||
822 | |||
823 | lfetch [r21],PT(CR_IPSR)-PT(PR) | ||
824 | adds r2=PT(B6)+16,r12 | ||
825 | adds r3=PT(R16)+16,r12 | ||
826 | ;; | ||
827 | lfetch [r21] | ||
828 | ld8 r28=[r2],8 // load b6 | ||
829 | adds r29=PT(R24)+16,r12 | ||
830 | |||
831 | ld8.fill r16=[r3],PT(AR_CSD)-PT(R16) | ||
832 | adds r30=PT(AR_CCV)+16,r12 | ||
833 | (p6) and r19=TIF_WORK_MASK,r31 // any work other than TIF_SYSCALL_TRACE? | ||
834 | ;; | ||
835 | ld8.fill r24=[r29] | ||
836 | ld8 r15=[r30] // load ar.ccv | ||
837 | (p6) cmp4.ne.unc p6,p0=r19, r0 // any special work pending? | ||
838 | ;; | ||
839 | ld8 r29=[r2],16 // load b7 | ||
840 | ld8 r30=[r3],16 // load ar.csd | ||
841 | (p6) br.cond.spnt .work_pending | ||
842 | ;; | ||
843 | ld8 r31=[r2],16 // load ar.ssd | ||
844 | ld8.fill r8=[r3],16 | ||
845 | ;; | ||
846 | ld8.fill r9=[r2],16 | ||
847 | ld8.fill r10=[r3],PT(R17)-PT(R10) | ||
848 | ;; | ||
849 | ld8.fill r11=[r2],PT(R18)-PT(R11) | ||
850 | ld8.fill r17=[r3],16 | ||
851 | ;; | ||
852 | ld8.fill r18=[r2],16 | ||
853 | ld8.fill r19=[r3],16 | ||
854 | ;; | ||
855 | ld8.fill r20=[r2],16 | ||
856 | ld8.fill r21=[r3],16 | ||
857 | mov ar.csd=r30 | ||
858 | mov ar.ssd=r31 | ||
859 | ;; | ||
860 | rsm psr.i | psr.ic // initiate turning off of interrupt and interruption collection | ||
861 | invala // invalidate ALAT | ||
862 | ;; | ||
863 | ld8.fill r22=[r2],24 | ||
864 | ld8.fill r23=[r3],24 | ||
865 | mov b6=r28 | ||
866 | ;; | ||
867 | ld8.fill r25=[r2],16 | ||
868 | ld8.fill r26=[r3],16 | ||
869 | mov b7=r29 | ||
870 | ;; | ||
871 | ld8.fill r27=[r2],16 | ||
872 | ld8.fill r28=[r3],16 | ||
873 | ;; | ||
874 | ld8.fill r29=[r2],16 | ||
875 | ld8.fill r30=[r3],24 | ||
876 | ;; | ||
877 | ld8.fill r31=[r2],PT(F9)-PT(R31) | ||
878 | adds r3=PT(F10)-PT(F6),r3 | ||
879 | ;; | ||
880 | ldf.fill f9=[r2],PT(F6)-PT(F9) | ||
881 | ldf.fill f10=[r3],PT(F8)-PT(F10) | ||
882 | ;; | ||
883 | ldf.fill f6=[r2],PT(F7)-PT(F6) | ||
884 | ;; | ||
885 | ldf.fill f7=[r2],PT(F11)-PT(F7) | ||
886 | ldf.fill f8=[r3],32 | ||
887 | ;; | ||
888 | srlz.i // ensure interruption collection is off | ||
889 | mov ar.ccv=r15 | ||
890 | ;; | ||
891 | ldf.fill f11=[r2] | ||
892 | bsw.0 // switch back to bank 0 (no stop bit required beforehand...) | ||
893 | ;; | ||
894 | (pUStk) mov r18=IA64_KR(CURRENT)// M2 (12 cycle read latency) | ||
895 | adds r16=PT(CR_IPSR)+16,r12 | ||
896 | adds r17=PT(CR_IIP)+16,r12 | ||
897 | |||
898 | (pKStk) mov r22=psr // M2 read PSR now that interrupts are disabled | ||
899 | nop.i 0 | ||
900 | nop.i 0 | ||
901 | ;; | ||
902 | ld8 r29=[r16],16 // load cr.ipsr | ||
903 | ld8 r28=[r17],16 // load cr.iip | ||
904 | ;; | ||
905 | ld8 r30=[r16],16 // load cr.ifs | ||
906 | ld8 r25=[r17],16 // load ar.unat | ||
907 | ;; | ||
908 | ld8 r26=[r16],16 // load ar.pfs | ||
909 | ld8 r27=[r17],16 // load ar.rsc | ||
910 | cmp.eq p9,p0=r0,r0 // set p9 to indicate that we should restore cr.ifs | ||
911 | ;; | ||
912 | ld8 r24=[r16],16 // load ar.rnat (may be garbage) | ||
913 | ld8 r23=[r17],16 // load ar.bspstore (may be garbage) | ||
914 | ;; | ||
915 | ld8 r31=[r16],16 // load predicates | ||
916 | ld8 r21=[r17],16 // load b0 | ||
917 | ;; | ||
918 | ld8 r19=[r16],16 // load ar.rsc value for "loadrs" | ||
919 | ld8.fill r1=[r17],16 // load r1 | ||
920 | ;; | ||
921 | ld8.fill r12=[r16],16 | ||
922 | ld8.fill r13=[r17],16 | ||
923 | (pUStk) adds r18=IA64_TASK_THREAD_ON_USTACK_OFFSET,r18 | ||
924 | ;; | ||
925 | ld8 r20=[r16],16 // ar.fpsr | ||
926 | ld8.fill r15=[r17],16 | ||
927 | ;; | ||
928 | ld8.fill r14=[r16],16 | ||
929 | ld8.fill r2=[r17] | ||
930 | (pUStk) mov r17=1 | ||
931 | ;; | ||
932 | ld8.fill r3=[r16] | ||
933 | (pUStk) st1 [r18]=r17 // restore current->thread.on_ustack | ||
934 | shr.u r18=r19,16 // get byte size of existing "dirty" partition | ||
935 | ;; | ||
936 | mov r16=ar.bsp // get existing backing store pointer | ||
937 | addl r17=THIS_CPU(ia64_phys_stacked_size_p8),r0 | ||
938 | ;; | ||
939 | ld4 r17=[r17] // r17 = cpu_data->phys_stacked_size_p8 | ||
940 | (pKStk) br.cond.dpnt skip_rbs_switch | ||
941 | |||
942 | /* | ||
943 | * Restore user backing store. | ||
944 | * | ||
945 | * NOTE: alloc, loadrs, and cover can't be predicated. | ||
946 | */ | ||
947 | (pNonSys) br.cond.dpnt dont_preserve_current_frame | ||
948 | |||
949 | rbs_switch: | ||
950 | cover // add current frame into dirty partition and set cr.ifs | ||
951 | ;; | ||
952 | mov r19=ar.bsp // get new backing store pointer | ||
953 | sub r16=r16,r18 // krbs = old bsp - size of dirty partition | ||
954 | cmp.ne p9,p0=r0,r0 // clear p9 to skip restore of cr.ifs | ||
955 | ;; | ||
956 | sub r19=r19,r16 // calculate total byte size of dirty partition | ||
957 | add r18=64,r18 // don't force in0-in7 into memory... | ||
958 | ;; | ||
959 | shl r19=r19,16 // shift size of dirty partition into loadrs position | ||
960 | ;; | ||
961 | dont_preserve_current_frame: | ||
962 | /* | ||
963 | * To prevent leaking bits between the kernel and user-space, | ||
964 | * we must clear the stacked registers in the "invalid" partition here. | ||
965 | * Not pretty, but at least it's fast (3.34 registers/cycle on Itanium, | ||
966 | * 5 registers/cycle on McKinley). | ||
967 | */ | ||
968 | # define pRecurse p6 | ||
969 | # define pReturn p7 | ||
970 | #ifdef CONFIG_ITANIUM | ||
971 | # define Nregs 10 | ||
972 | #else | ||
973 | # define Nregs 14 | ||
974 | #endif | ||
975 | alloc loc0=ar.pfs,2,Nregs-2,2,0 | ||
976 | shr.u loc1=r18,9 // RNaTslots <= floor(dirtySize / (64*8)) | ||
977 | sub r17=r17,r18 // r17 = (physStackedSize + 8) - dirtySize | ||
978 | ;; | ||
979 | mov ar.rsc=r19 // load ar.rsc to be used for "loadrs" | ||
980 | shladd in0=loc1,3,r17 | ||
981 | mov in1=0 | ||
982 | ;; | ||
983 | TEXT_ALIGN(32) | ||
984 | rse_clear_invalid: | ||
985 | #ifdef CONFIG_ITANIUM | ||
986 | // cycle 0 | ||
987 | { .mii | ||
988 | alloc loc0=ar.pfs,2,Nregs-2,2,0 | ||
989 | cmp.lt pRecurse,p0=Nregs*8,in0 // if more than Nregs regs left to clear, (re)curse | ||
990 | add out0=-Nregs*8,in0 | ||
991 | }{ .mfb | ||
992 | add out1=1,in1 // increment recursion count | ||
993 | nop.f 0 | ||
994 | nop.b 0 // can't do br.call here because of alloc (WAW on CFM) | ||
995 | ;; | ||
996 | }{ .mfi // cycle 1 | ||
997 | mov loc1=0 | ||
998 | nop.f 0 | ||
999 | mov loc2=0 | ||
1000 | }{ .mib | ||
1001 | mov loc3=0 | ||
1002 | mov loc4=0 | ||
1003 | (pRecurse) br.call.sptk.many b0=rse_clear_invalid | ||
1004 | |||
1005 | }{ .mfi // cycle 2 | ||
1006 | mov loc5=0 | ||
1007 | nop.f 0 | ||
1008 | cmp.ne pReturn,p0=r0,in1 // if recursion count != 0, we need to do a br.ret | ||
1009 | }{ .mib | ||
1010 | mov loc6=0 | ||
1011 | mov loc7=0 | ||
1012 | (pReturn) br.ret.sptk.many b0 | ||
1013 | } | ||
1014 | #else /* !CONFIG_ITANIUM */ | ||
1015 | alloc loc0=ar.pfs,2,Nregs-2,2,0 | ||
1016 | cmp.lt pRecurse,p0=Nregs*8,in0 // if more than Nregs regs left to clear, (re)curse | ||
1017 | add out0=-Nregs*8,in0 | ||
1018 | add out1=1,in1 // increment recursion count | ||
1019 | mov loc1=0 | ||
1020 | mov loc2=0 | ||
1021 | ;; | ||
1022 | mov loc3=0 | ||
1023 | mov loc4=0 | ||
1024 | mov loc5=0 | ||
1025 | mov loc6=0 | ||
1026 | mov loc7=0 | ||
1027 | (pRecurse) br.call.sptk.few b0=rse_clear_invalid | ||
1028 | ;; | ||
1029 | mov loc8=0 | ||
1030 | mov loc9=0 | ||
1031 | cmp.ne pReturn,p0=r0,in1 // if recursion count != 0, we need to do a br.ret | ||
1032 | mov loc10=0 | ||
1033 | mov loc11=0 | ||
1034 | (pReturn) br.ret.sptk.many b0 | ||
1035 | #endif /* !CONFIG_ITANIUM */ | ||
1036 | # undef pRecurse | ||
1037 | # undef pReturn | ||
1038 | ;; | ||
1039 | alloc r17=ar.pfs,0,0,0,0 // drop current register frame | ||
1040 | ;; | ||
1041 | loadrs | ||
1042 | ;; | ||
1043 | skip_rbs_switch: | ||
1044 | mov ar.unat=r25 // M2 | ||
1045 | (pKStk) extr.u r22=r22,21,1 // I0 extract current value of psr.pp from r22 | ||
1046 | (pLvSys)mov r19=r0 // A clear r19 for leave_syscall, no-op otherwise | ||
1047 | ;; | ||
1048 | (pUStk) mov ar.bspstore=r23 // M2 | ||
1049 | (pKStk) dep r29=r22,r29,21,1 // I0 update ipsr.pp with psr.pp | ||
1050 | (pLvSys)mov r16=r0 // A clear r16 for leave_syscall, no-op otherwise | ||
1051 | ;; | ||
1052 | mov cr.ipsr=r29 // M2 | ||
1053 | mov ar.pfs=r26 // I0 | ||
1054 | (pLvSys)mov r17=r0 // A clear r17 for leave_syscall, no-op otherwise | ||
1055 | |||
1056 | (p9) mov cr.ifs=r30 // M2 | ||
1057 | mov b0=r21 // I0 | ||
1058 | (pLvSys)mov r18=r0 // A clear r18 for leave_syscall, no-op otherwise | ||
1059 | |||
1060 | mov ar.fpsr=r20 // M2 | ||
1061 | mov cr.iip=r28 // M2 | ||
1062 | nop 0 | ||
1063 | ;; | ||
1064 | (pUStk) mov ar.rnat=r24 // M2 must happen with RSE in lazy mode | ||
1065 | nop 0 | ||
1066 | (pLvSys)mov r2=r0 | ||
1067 | |||
1068 | mov ar.rsc=r27 // M2 | ||
1069 | mov pr=r31,-1 // I0 | ||
1070 | rfi // B | ||
1071 | |||
1072 | /* | ||
1073 | * On entry: | ||
1074 | * r20 = ¤t->thread_info->pre_count (if CONFIG_PREEMPT) | ||
1075 | * r31 = current->thread_info->flags | ||
1076 | * On exit: | ||
1077 | * p6 = TRUE if work-pending-check needs to be redone | ||
1078 | */ | ||
1079 | .work_pending_syscall: | ||
1080 | add r2=-8,r2 | ||
1081 | add r3=-8,r3 | ||
1082 | ;; | ||
1083 | st8 [r2]=r8 | ||
1084 | st8 [r3]=r10 | ||
1085 | .work_pending: | ||
1086 | tbit.nz p6,p0=r31,TIF_SIGDELAYED // signal delayed from MCA/INIT/NMI/PMI context? | ||
1087 | (p6) br.cond.sptk.few .sigdelayed | ||
1088 | ;; | ||
1089 | tbit.z p6,p0=r31,TIF_NEED_RESCHED // current_thread_info()->need_resched==0? | ||
1090 | (p6) br.cond.sptk.few .notify | ||
1091 | #ifdef CONFIG_PREEMPT | ||
1092 | (pKStk) dep r21=-1,r0,PREEMPT_ACTIVE_BIT,1 | ||
1093 | ;; | ||
1094 | (pKStk) st4 [r20]=r21 | ||
1095 | ssm psr.i // enable interrupts | ||
1096 | #endif | ||
1097 | br.call.spnt.many rp=schedule | ||
1098 | .ret9: cmp.eq p6,p0=r0,r0 // p6 <- 1 | ||
1099 | rsm psr.i // disable interrupts | ||
1100 | ;; | ||
1101 | #ifdef CONFIG_PREEMPT | ||
1102 | (pKStk) adds r20=TI_PRE_COUNT+IA64_TASK_SIZE,r13 | ||
1103 | ;; | ||
1104 | (pKStk) st4 [r20]=r0 // preempt_count() <- 0 | ||
1105 | #endif | ||
1106 | (pLvSys)br.cond.sptk.few .work_pending_syscall_end | ||
1107 | br.cond.sptk.many .work_processed_kernel // re-check | ||
1108 | |||
1109 | .notify: | ||
1110 | (pUStk) br.call.spnt.many rp=notify_resume_user | ||
1111 | .ret10: cmp.ne p6,p0=r0,r0 // p6 <- 0 | ||
1112 | (pLvSys)br.cond.sptk.few .work_pending_syscall_end | ||
1113 | br.cond.sptk.many .work_processed_kernel // don't re-check | ||
1114 | |||
1115 | // There is a delayed signal that was detected in MCA/INIT/NMI/PMI context where | ||
1116 | // it could not be delivered. Deliver it now. The signal might be for us and | ||
1117 | // may set TIF_SIGPENDING, so redrive ia64_leave_* after processing the delayed | ||
1118 | // signal. | ||
1119 | |||
1120 | .sigdelayed: | ||
1121 | br.call.sptk.many rp=do_sigdelayed | ||
1122 | cmp.eq p6,p0=r0,r0 // p6 <- 1, always re-check | ||
1123 | (pLvSys)br.cond.sptk.few .work_pending_syscall_end | ||
1124 | br.cond.sptk.many .work_processed_kernel // re-check | ||
1125 | |||
1126 | .work_pending_syscall_end: | ||
1127 | adds r2=PT(R8)+16,r12 | ||
1128 | adds r3=PT(R10)+16,r12 | ||
1129 | ;; | ||
1130 | ld8 r8=[r2] | ||
1131 | ld8 r10=[r3] | ||
1132 | br.cond.sptk.many .work_processed_syscall // re-check | ||
1133 | |||
1134 | END(ia64_leave_kernel) | ||
1135 | |||
1136 | ENTRY(handle_syscall_error) | ||
1137 | /* | ||
1138 | * Some system calls (e.g., ptrace, mmap) can return arbitrary values which could | ||
1139 | * lead us to mistake a negative return value as a failed syscall. Those syscall | ||
1140 | * must deposit a non-zero value in pt_regs.r8 to indicate an error. If | ||
1141 | * pt_regs.r8 is zero, we assume that the call completed successfully. | ||
1142 | */ | ||
1143 | PT_REGS_UNWIND_INFO(0) | ||
1144 | ld8 r3=[r2] // load pt_regs.r8 | ||
1145 | ;; | ||
1146 | cmp.eq p6,p7=r3,r0 // is pt_regs.r8==0? | ||
1147 | ;; | ||
1148 | (p7) mov r10=-1 | ||
1149 | (p7) sub r8=0,r8 // negate return value to get errno | ||
1150 | br.cond.sptk ia64_leave_syscall | ||
1151 | END(handle_syscall_error) | ||
1152 | |||
1153 | /* | ||
1154 | * Invoke schedule_tail(task) while preserving in0-in7, which may be needed | ||
1155 | * in case a system call gets restarted. | ||
1156 | */ | ||
1157 | GLOBAL_ENTRY(ia64_invoke_schedule_tail) | ||
1158 | .prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(8) | ||
1159 | alloc loc1=ar.pfs,8,2,1,0 | ||
1160 | mov loc0=rp | ||
1161 | mov out0=r8 // Address of previous task | ||
1162 | ;; | ||
1163 | br.call.sptk.many rp=schedule_tail | ||
1164 | .ret11: mov ar.pfs=loc1 | ||
1165 | mov rp=loc0 | ||
1166 | br.ret.sptk.many rp | ||
1167 | END(ia64_invoke_schedule_tail) | ||
1168 | |||
1169 | /* | ||
1170 | * Setup stack and call do_notify_resume_user(). Note that pSys and pNonSys need to | ||
1171 | * be set up by the caller. We declare 8 input registers so the system call | ||
1172 | * args get preserved, in case we need to restart a system call. | ||
1173 | */ | ||
1174 | ENTRY(notify_resume_user) | ||
1175 | .prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(8) | ||
1176 | alloc loc1=ar.pfs,8,2,3,0 // preserve all eight input regs in case of syscall restart! | ||
1177 | mov r9=ar.unat | ||
1178 | mov loc0=rp // save return address | ||
1179 | mov out0=0 // there is no "oldset" | ||
1180 | adds out1=8,sp // out1=&sigscratch->ar_pfs | ||
1181 | (pSys) mov out2=1 // out2==1 => we're in a syscall | ||
1182 | ;; | ||
1183 | (pNonSys) mov out2=0 // out2==0 => not a syscall | ||
1184 | .fframe 16 | ||
1185 | .spillpsp ar.unat, 16 // (note that offset is relative to psp+0x10!) | ||
1186 | st8 [sp]=r9,-16 // allocate space for ar.unat and save it | ||
1187 | st8 [out1]=loc1,-8 // save ar.pfs, out1=&sigscratch | ||
1188 | .body | ||
1189 | br.call.sptk.many rp=do_notify_resume_user | ||
1190 | .ret15: .restore sp | ||
1191 | adds sp=16,sp // pop scratch stack space | ||
1192 | ;; | ||
1193 | ld8 r9=[sp] // load new unat from sigscratch->scratch_unat | ||
1194 | mov rp=loc0 | ||
1195 | ;; | ||
1196 | mov ar.unat=r9 | ||
1197 | mov ar.pfs=loc1 | ||
1198 | br.ret.sptk.many rp | ||
1199 | END(notify_resume_user) | ||
1200 | |||
1201 | GLOBAL_ENTRY(sys_rt_sigsuspend) | ||
1202 | .prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(8) | ||
1203 | alloc loc1=ar.pfs,8,2,3,0 // preserve all eight input regs in case of syscall restart! | ||
1204 | mov r9=ar.unat | ||
1205 | mov loc0=rp // save return address | ||
1206 | mov out0=in0 // mask | ||
1207 | mov out1=in1 // sigsetsize | ||
1208 | adds out2=8,sp // out2=&sigscratch->ar_pfs | ||
1209 | ;; | ||
1210 | .fframe 16 | ||
1211 | .spillpsp ar.unat, 16 // (note that offset is relative to psp+0x10!) | ||
1212 | st8 [sp]=r9,-16 // allocate space for ar.unat and save it | ||
1213 | st8 [out2]=loc1,-8 // save ar.pfs, out2=&sigscratch | ||
1214 | .body | ||
1215 | br.call.sptk.many rp=ia64_rt_sigsuspend | ||
1216 | .ret17: .restore sp | ||
1217 | adds sp=16,sp // pop scratch stack space | ||
1218 | ;; | ||
1219 | ld8 r9=[sp] // load new unat from sw->caller_unat | ||
1220 | mov rp=loc0 | ||
1221 | ;; | ||
1222 | mov ar.unat=r9 | ||
1223 | mov ar.pfs=loc1 | ||
1224 | br.ret.sptk.many rp | ||
1225 | END(sys_rt_sigsuspend) | ||
1226 | |||
1227 | ENTRY(sys_rt_sigreturn) | ||
1228 | PT_REGS_UNWIND_INFO(0) | ||
1229 | /* | ||
1230 | * Allocate 8 input registers since ptrace() may clobber them | ||
1231 | */ | ||
1232 | alloc r2=ar.pfs,8,0,1,0 | ||
1233 | .prologue | ||
1234 | PT_REGS_SAVES(16) | ||
1235 | adds sp=-16,sp | ||
1236 | .body | ||
1237 | cmp.eq pNonSys,pSys=r0,r0 // sigreturn isn't a normal syscall... | ||
1238 | ;; | ||
1239 | /* | ||
1240 | * leave_kernel() restores f6-f11 from pt_regs, but since the streamlined | ||
1241 | * syscall-entry path does not save them we save them here instead. Note: we | ||
1242 | * don't need to save any other registers that are not saved by the stream-lined | ||
1243 | * syscall path, because restore_sigcontext() restores them. | ||
1244 | */ | ||
1245 | adds r16=PT(F6)+32,sp | ||
1246 | adds r17=PT(F7)+32,sp | ||
1247 | ;; | ||
1248 | stf.spill [r16]=f6,32 | ||
1249 | stf.spill [r17]=f7,32 | ||
1250 | ;; | ||
1251 | stf.spill [r16]=f8,32 | ||
1252 | stf.spill [r17]=f9,32 | ||
1253 | ;; | ||
1254 | stf.spill [r16]=f10 | ||
1255 | stf.spill [r17]=f11 | ||
1256 | adds out0=16,sp // out0 = &sigscratch | ||
1257 | br.call.sptk.many rp=ia64_rt_sigreturn | ||
1258 | .ret19: .restore sp 0 | ||
1259 | adds sp=16,sp | ||
1260 | ;; | ||
1261 | ld8 r9=[sp] // load new ar.unat | ||
1262 | mov.sptk b7=r8,ia64_leave_kernel | ||
1263 | ;; | ||
1264 | mov ar.unat=r9 | ||
1265 | br.many b7 | ||
1266 | END(sys_rt_sigreturn) | ||
1267 | |||
1268 | GLOBAL_ENTRY(ia64_prepare_handle_unaligned) | ||
1269 | .prologue | ||
1270 | /* | ||
1271 | * r16 = fake ar.pfs, we simply need to make sure privilege is still 0 | ||
1272 | */ | ||
1273 | mov r16=r0 | ||
1274 | DO_SAVE_SWITCH_STACK | ||
1275 | br.call.sptk.many rp=ia64_handle_unaligned // stack frame setup in ivt | ||
1276 | .ret21: .body | ||
1277 | DO_LOAD_SWITCH_STACK | ||
1278 | br.cond.sptk.many rp // goes to ia64_leave_kernel | ||
1279 | END(ia64_prepare_handle_unaligned) | ||
1280 | |||
1281 | // | ||
1282 | // unw_init_running(void (*callback)(info, arg), void *arg) | ||
1283 | // | ||
1284 | # define EXTRA_FRAME_SIZE ((UNW_FRAME_INFO_SIZE+15)&~15) | ||
1285 | |||
1286 | GLOBAL_ENTRY(unw_init_running) | ||
1287 | .prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(2) | ||
1288 | alloc loc1=ar.pfs,2,3,3,0 | ||
1289 | ;; | ||
1290 | ld8 loc2=[in0],8 | ||
1291 | mov loc0=rp | ||
1292 | mov r16=loc1 | ||
1293 | DO_SAVE_SWITCH_STACK | ||
1294 | .body | ||
1295 | |||
1296 | .prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(2) | ||
1297 | .fframe IA64_SWITCH_STACK_SIZE+EXTRA_FRAME_SIZE | ||
1298 | SWITCH_STACK_SAVES(EXTRA_FRAME_SIZE) | ||
1299 | adds sp=-EXTRA_FRAME_SIZE,sp | ||
1300 | .body | ||
1301 | ;; | ||
1302 | adds out0=16,sp // &info | ||
1303 | mov out1=r13 // current | ||
1304 | adds out2=16+EXTRA_FRAME_SIZE,sp // &switch_stack | ||
1305 | br.call.sptk.many rp=unw_init_frame_info | ||
1306 | 1: adds out0=16,sp // &info | ||
1307 | mov b6=loc2 | ||
1308 | mov loc2=gp // save gp across indirect function call | ||
1309 | ;; | ||
1310 | ld8 gp=[in0] | ||
1311 | mov out1=in1 // arg | ||
1312 | br.call.sptk.many rp=b6 // invoke the callback function | ||
1313 | 1: mov gp=loc2 // restore gp | ||
1314 | |||
1315 | // For now, we don't allow changing registers from within | ||
1316 | // unw_init_running; if we ever want to allow that, we'd | ||
1317 | // have to do a load_switch_stack here: | ||
1318 | .restore sp | ||
1319 | adds sp=IA64_SWITCH_STACK_SIZE+EXTRA_FRAME_SIZE,sp | ||
1320 | |||
1321 | mov ar.pfs=loc1 | ||
1322 | mov rp=loc0 | ||
1323 | br.ret.sptk.many rp | ||
1324 | END(unw_init_running) | ||
1325 | |||
1326 | .rodata | ||
1327 | .align 8 | ||
1328 | .globl sys_call_table | ||
1329 | sys_call_table: | ||
1330 | data8 sys_ni_syscall // This must be sys_ni_syscall! See ivt.S. | ||
1331 | data8 sys_exit // 1025 | ||
1332 | data8 sys_read | ||
1333 | data8 sys_write | ||
1334 | data8 sys_open | ||
1335 | data8 sys_close | ||
1336 | data8 sys_creat // 1030 | ||
1337 | data8 sys_link | ||
1338 | data8 sys_unlink | ||
1339 | data8 ia64_execve | ||
1340 | data8 sys_chdir | ||
1341 | data8 sys_fchdir // 1035 | ||
1342 | data8 sys_utimes | ||
1343 | data8 sys_mknod | ||
1344 | data8 sys_chmod | ||
1345 | data8 sys_chown | ||
1346 | data8 sys_lseek // 1040 | ||
1347 | data8 sys_getpid | ||
1348 | data8 sys_getppid | ||
1349 | data8 sys_mount | ||
1350 | data8 sys_umount | ||
1351 | data8 sys_setuid // 1045 | ||
1352 | data8 sys_getuid | ||
1353 | data8 sys_geteuid | ||
1354 | data8 sys_ptrace | ||
1355 | data8 sys_access | ||
1356 | data8 sys_sync // 1050 | ||
1357 | data8 sys_fsync | ||
1358 | data8 sys_fdatasync | ||
1359 | data8 sys_kill | ||
1360 | data8 sys_rename | ||
1361 | data8 sys_mkdir // 1055 | ||
1362 | data8 sys_rmdir | ||
1363 | data8 sys_dup | ||
1364 | data8 sys_pipe | ||
1365 | data8 sys_times | ||
1366 | data8 ia64_brk // 1060 | ||
1367 | data8 sys_setgid | ||
1368 | data8 sys_getgid | ||
1369 | data8 sys_getegid | ||
1370 | data8 sys_acct | ||
1371 | data8 sys_ioctl // 1065 | ||
1372 | data8 sys_fcntl | ||
1373 | data8 sys_umask | ||
1374 | data8 sys_chroot | ||
1375 | data8 sys_ustat | ||
1376 | data8 sys_dup2 // 1070 | ||
1377 | data8 sys_setreuid | ||
1378 | data8 sys_setregid | ||
1379 | data8 sys_getresuid | ||
1380 | data8 sys_setresuid | ||
1381 | data8 sys_getresgid // 1075 | ||
1382 | data8 sys_setresgid | ||
1383 | data8 sys_getgroups | ||
1384 | data8 sys_setgroups | ||
1385 | data8 sys_getpgid | ||
1386 | data8 sys_setpgid // 1080 | ||
1387 | data8 sys_setsid | ||
1388 | data8 sys_getsid | ||
1389 | data8 sys_sethostname | ||
1390 | data8 sys_setrlimit | ||
1391 | data8 sys_getrlimit // 1085 | ||
1392 | data8 sys_getrusage | ||
1393 | data8 sys_gettimeofday | ||
1394 | data8 sys_settimeofday | ||
1395 | data8 sys_select | ||
1396 | data8 sys_poll // 1090 | ||
1397 | data8 sys_symlink | ||
1398 | data8 sys_readlink | ||
1399 | data8 sys_uselib | ||
1400 | data8 sys_swapon | ||
1401 | data8 sys_swapoff // 1095 | ||
1402 | data8 sys_reboot | ||
1403 | data8 sys_truncate | ||
1404 | data8 sys_ftruncate | ||
1405 | data8 sys_fchmod | ||
1406 | data8 sys_fchown // 1100 | ||
1407 | data8 ia64_getpriority | ||
1408 | data8 sys_setpriority | ||
1409 | data8 sys_statfs | ||
1410 | data8 sys_fstatfs | ||
1411 | data8 sys_gettid // 1105 | ||
1412 | data8 sys_semget | ||
1413 | data8 sys_semop | ||
1414 | data8 sys_semctl | ||
1415 | data8 sys_msgget | ||
1416 | data8 sys_msgsnd // 1110 | ||
1417 | data8 sys_msgrcv | ||
1418 | data8 sys_msgctl | ||
1419 | data8 sys_shmget | ||
1420 | data8 ia64_shmat | ||
1421 | data8 sys_shmdt // 1115 | ||
1422 | data8 sys_shmctl | ||
1423 | data8 sys_syslog | ||
1424 | data8 sys_setitimer | ||
1425 | data8 sys_getitimer | ||
1426 | data8 sys_ni_syscall // 1120 /* was: ia64_oldstat */ | ||
1427 | data8 sys_ni_syscall /* was: ia64_oldlstat */ | ||
1428 | data8 sys_ni_syscall /* was: ia64_oldfstat */ | ||
1429 | data8 sys_vhangup | ||
1430 | data8 sys_lchown | ||
1431 | data8 sys_remap_file_pages // 1125 | ||
1432 | data8 sys_wait4 | ||
1433 | data8 sys_sysinfo | ||
1434 | data8 sys_clone | ||
1435 | data8 sys_setdomainname | ||
1436 | data8 sys_newuname // 1130 | ||
1437 | data8 sys_adjtimex | ||
1438 | data8 sys_ni_syscall /* was: ia64_create_module */ | ||
1439 | data8 sys_init_module | ||
1440 | data8 sys_delete_module | ||
1441 | data8 sys_ni_syscall // 1135 /* was: sys_get_kernel_syms */ | ||
1442 | data8 sys_ni_syscall /* was: sys_query_module */ | ||
1443 | data8 sys_quotactl | ||
1444 | data8 sys_bdflush | ||
1445 | data8 sys_sysfs | ||
1446 | data8 sys_personality // 1140 | ||
1447 | data8 sys_ni_syscall // sys_afs_syscall | ||
1448 | data8 sys_setfsuid | ||
1449 | data8 sys_setfsgid | ||
1450 | data8 sys_getdents | ||
1451 | data8 sys_flock // 1145 | ||
1452 | data8 sys_readv | ||
1453 | data8 sys_writev | ||
1454 | data8 sys_pread64 | ||
1455 | data8 sys_pwrite64 | ||
1456 | data8 sys_sysctl // 1150 | ||
1457 | data8 sys_mmap | ||
1458 | data8 sys_munmap | ||
1459 | data8 sys_mlock | ||
1460 | data8 sys_mlockall | ||
1461 | data8 sys_mprotect // 1155 | ||
1462 | data8 ia64_mremap | ||
1463 | data8 sys_msync | ||
1464 | data8 sys_munlock | ||
1465 | data8 sys_munlockall | ||
1466 | data8 sys_sched_getparam // 1160 | ||
1467 | data8 sys_sched_setparam | ||
1468 | data8 sys_sched_getscheduler | ||
1469 | data8 sys_sched_setscheduler | ||
1470 | data8 sys_sched_yield | ||
1471 | data8 sys_sched_get_priority_max // 1165 | ||
1472 | data8 sys_sched_get_priority_min | ||
1473 | data8 sys_sched_rr_get_interval | ||
1474 | data8 sys_nanosleep | ||
1475 | data8 sys_nfsservctl | ||
1476 | data8 sys_prctl // 1170 | ||
1477 | data8 sys_getpagesize | ||
1478 | data8 sys_mmap2 | ||
1479 | data8 sys_pciconfig_read | ||
1480 | data8 sys_pciconfig_write | ||
1481 | data8 sys_perfmonctl // 1175 | ||
1482 | data8 sys_sigaltstack | ||
1483 | data8 sys_rt_sigaction | ||
1484 | data8 sys_rt_sigpending | ||
1485 | data8 sys_rt_sigprocmask | ||
1486 | data8 sys_rt_sigqueueinfo // 1180 | ||
1487 | data8 sys_rt_sigreturn | ||
1488 | data8 sys_rt_sigsuspend | ||
1489 | data8 sys_rt_sigtimedwait | ||
1490 | data8 sys_getcwd | ||
1491 | data8 sys_capget // 1185 | ||
1492 | data8 sys_capset | ||
1493 | data8 sys_sendfile64 | ||
1494 | data8 sys_ni_syscall // sys_getpmsg (STREAMS) | ||
1495 | data8 sys_ni_syscall // sys_putpmsg (STREAMS) | ||
1496 | data8 sys_socket // 1190 | ||
1497 | data8 sys_bind | ||
1498 | data8 sys_connect | ||
1499 | data8 sys_listen | ||
1500 | data8 sys_accept | ||
1501 | data8 sys_getsockname // 1195 | ||
1502 | data8 sys_getpeername | ||
1503 | data8 sys_socketpair | ||
1504 | data8 sys_send | ||
1505 | data8 sys_sendto | ||
1506 | data8 sys_recv // 1200 | ||
1507 | data8 sys_recvfrom | ||
1508 | data8 sys_shutdown | ||
1509 | data8 sys_setsockopt | ||
1510 | data8 sys_getsockopt | ||
1511 | data8 sys_sendmsg // 1205 | ||
1512 | data8 sys_recvmsg | ||
1513 | data8 sys_pivot_root | ||
1514 | data8 sys_mincore | ||
1515 | data8 sys_madvise | ||
1516 | data8 sys_newstat // 1210 | ||
1517 | data8 sys_newlstat | ||
1518 | data8 sys_newfstat | ||
1519 | data8 sys_clone2 | ||
1520 | data8 sys_getdents64 | ||
1521 | data8 sys_getunwind // 1215 | ||
1522 | data8 sys_readahead | ||
1523 | data8 sys_setxattr | ||
1524 | data8 sys_lsetxattr | ||
1525 | data8 sys_fsetxattr | ||
1526 | data8 sys_getxattr // 1220 | ||
1527 | data8 sys_lgetxattr | ||
1528 | data8 sys_fgetxattr | ||
1529 | data8 sys_listxattr | ||
1530 | data8 sys_llistxattr | ||
1531 | data8 sys_flistxattr // 1225 | ||
1532 | data8 sys_removexattr | ||
1533 | data8 sys_lremovexattr | ||
1534 | data8 sys_fremovexattr | ||
1535 | data8 sys_tkill | ||
1536 | data8 sys_futex // 1230 | ||
1537 | data8 sys_sched_setaffinity | ||
1538 | data8 sys_sched_getaffinity | ||
1539 | data8 sys_set_tid_address | ||
1540 | data8 sys_fadvise64_64 | ||
1541 | data8 sys_tgkill // 1235 | ||
1542 | data8 sys_exit_group | ||
1543 | data8 sys_lookup_dcookie | ||
1544 | data8 sys_io_setup | ||
1545 | data8 sys_io_destroy | ||
1546 | data8 sys_io_getevents // 1240 | ||
1547 | data8 sys_io_submit | ||
1548 | data8 sys_io_cancel | ||
1549 | data8 sys_epoll_create | ||
1550 | data8 sys_epoll_ctl | ||
1551 | data8 sys_epoll_wait // 1245 | ||
1552 | data8 sys_restart_syscall | ||
1553 | data8 sys_semtimedop | ||
1554 | data8 sys_timer_create | ||
1555 | data8 sys_timer_settime | ||
1556 | data8 sys_timer_gettime // 1250 | ||
1557 | data8 sys_timer_getoverrun | ||
1558 | data8 sys_timer_delete | ||
1559 | data8 sys_clock_settime | ||
1560 | data8 sys_clock_gettime | ||
1561 | data8 sys_clock_getres // 1255 | ||
1562 | data8 sys_clock_nanosleep | ||
1563 | data8 sys_fstatfs64 | ||
1564 | data8 sys_statfs64 | ||
1565 | data8 sys_mbind | ||
1566 | data8 sys_get_mempolicy // 1260 | ||
1567 | data8 sys_set_mempolicy | ||
1568 | data8 sys_mq_open | ||
1569 | data8 sys_mq_unlink | ||
1570 | data8 sys_mq_timedsend | ||
1571 | data8 sys_mq_timedreceive // 1265 | ||
1572 | data8 sys_mq_notify | ||
1573 | data8 sys_mq_getsetattr | ||
1574 | data8 sys_ni_syscall // reserved for kexec_load | ||
1575 | data8 sys_ni_syscall // reserved for vserver | ||
1576 | data8 sys_waitid // 1270 | ||
1577 | data8 sys_add_key | ||
1578 | data8 sys_request_key | ||
1579 | data8 sys_keyctl | ||
1580 | data8 sys_ni_syscall | ||
1581 | data8 sys_ni_syscall // 1275 | ||
1582 | data8 sys_ni_syscall | ||
1583 | data8 sys_ni_syscall | ||
1584 | data8 sys_ni_syscall | ||
1585 | data8 sys_ni_syscall | ||
1586 | |||
1587 | .org sys_call_table + 8*NR_syscalls // guard against failures to increase NR_syscalls | ||