aboutsummaryrefslogtreecommitdiffstats
path: root/arch/ia64/kernel/gate.S
diff options
context:
space:
mode:
authorIsaku Yamahata <yamahata@valinux.co.jp>2009-03-04 07:05:44 -0500
committerTony Luck <tony.luck@intel.com>2009-03-26 14:01:11 -0400
commit53129c5c553f8d0c45f12f15742ac112e8605ab5 (patch)
treefa95cae1a8b2e8fdf5da59e10e1a311ede77df25 /arch/ia64/kernel/gate.S
parentb937dd76d07f2347684d6cc1e1ec4e2746417357 (diff)
ia64/pv_ops: move down __kernel_syscall_via_epc.
Move down __kernel_syscall_via_epc to the end of the page. We want to paravirtualize only __kernel_syscall_via_epc because it includes privileged instructions. Its paravirtualization increases its symbols size. On the other hand, each paravirtualized gate must have e symbols of same value and size to native's because the page is mapped to GATE_ADDR and GATE_ADDR + PERCPU_PAGE_SIZE and vmlinux is linked to those symbols. Later to have the same symbol size, we pads NOPs at the end of __kernel_syscall_via_epc. Move it after other functions to keep symbols of other functions have same values and sizes. Signed-off-by: Isaku Yamahata <yamahata@valinux.co.jp> Signed-off-by: Tony Luck <tony.luck@intel.com>
Diffstat (limited to 'arch/ia64/kernel/gate.S')
-rw-r--r--arch/ia64/kernel/gate.S162
1 files changed, 81 insertions, 81 deletions
diff --git a/arch/ia64/kernel/gate.S b/arch/ia64/kernel/gate.S
index 74b1ccce4e84..c957228e3f1d 100644
--- a/arch/ia64/kernel/gate.S
+++ b/arch/ia64/kernel/gate.S
@@ -48,87 +48,6 @@ GLOBAL_ENTRY(__kernel_syscall_via_break)
48} 48}
49END(__kernel_syscall_via_break) 49END(__kernel_syscall_via_break)
50 50
51/*
52 * On entry:
53 * r11 = saved ar.pfs
54 * r15 = system call #
55 * b0 = saved return address
56 * b6 = return address
57 * On exit:
58 * r11 = saved ar.pfs
59 * r15 = system call #
60 * b0 = saved return address
61 * all other "scratch" registers: undefined
62 * all "preserved" registers: same as on entry
63 */
64
65GLOBAL_ENTRY(__kernel_syscall_via_epc)
66 .prologue
67 .altrp b6
68 .body
69{
70 /*
71 * Note: the kernel cannot assume that the first two instructions in this
72 * bundle get executed. The remaining code must be safe even if
73 * they do not get executed.
74 */
75 adds r17=-1024,r15 // A
76 mov r10=0 // A default to successful syscall execution
77 epc // B causes split-issue
78}
79 ;;
80 rsm psr.be | psr.i // M2 (5 cyc to srlz.d)
81 LOAD_FSYSCALL_TABLE(r14) // X
82 ;;
83 mov r16=IA64_KR(CURRENT) // M2 (12 cyc)
84 shladd r18=r17,3,r14 // A
85 mov r19=NR_syscalls-1 // A
86 ;;
87 lfetch [r18] // M0|1
88 mov r29=psr // M2 (12 cyc)
89 // If r17 is a NaT, p6 will be zero
90 cmp.geu p6,p7=r19,r17 // A (sysnr > 0 && sysnr < 1024+NR_syscalls)?
91 ;;
92 mov r21=ar.fpsr // M2 (12 cyc)
93 tnat.nz p10,p9=r15 // I0
94 mov.i r26=ar.pfs // I0 (would stall anyhow due to srlz.d...)
95 ;;
96 srlz.d // M0 (forces split-issue) ensure PSR.BE==0
97(p6) ld8 r18=[r18] // M0|1
98 nop.i 0
99 ;;
100 nop.m 0
101(p6) tbit.z.unc p8,p0=r18,0 // I0 (dual-issues with "mov b7=r18"!)
102 nop.i 0
103 ;;
104(p8) ssm psr.i
105(p6) mov b7=r18 // I0
106(p8) br.dptk.many b7 // B
107
108 mov r27=ar.rsc // M2 (12 cyc)
109/*
110 * brl.cond doesn't work as intended because the linker would convert this branch
111 * into a branch to a PLT. Perhaps there will be a way to avoid this with some
112 * future version of the linker. In the meantime, we just use an indirect branch
113 * instead.
114 */
115#ifdef CONFIG_ITANIUM
116(p6) add r14=-8,r14 // r14 <- addr of fsys_bubble_down entry
117 ;;
118(p6) ld8 r14=[r14] // r14 <- fsys_bubble_down
119 ;;
120(p6) mov b7=r14
121(p6) br.sptk.many b7
122#else
123 BRL_COND_FSYS_BUBBLE_DOWN(p6)
124#endif
125 ssm psr.i
126 mov r10=-1
127(p10) mov r8=EINVAL
128(p9) mov r8=ENOSYS
129 FSYS_RETURN
130END(__kernel_syscall_via_epc)
131
132# define ARG0_OFF (16 + IA64_SIGFRAME_ARG0_OFFSET) 51# define ARG0_OFF (16 + IA64_SIGFRAME_ARG0_OFFSET)
133# define ARG1_OFF (16 + IA64_SIGFRAME_ARG1_OFFSET) 52# define ARG1_OFF (16 + IA64_SIGFRAME_ARG1_OFFSET)
134# define ARG2_OFF (16 + IA64_SIGFRAME_ARG2_OFFSET) 53# define ARG2_OFF (16 + IA64_SIGFRAME_ARG2_OFFSET)
@@ -374,3 +293,84 @@ restore_rbs:
374 // invala not necessary as that will happen when returning to user-mode 293 // invala not necessary as that will happen when returning to user-mode
375 br.cond.sptk back_from_restore_rbs 294 br.cond.sptk back_from_restore_rbs
376END(__kernel_sigtramp) 295END(__kernel_sigtramp)
296
297/*
298 * On entry:
299 * r11 = saved ar.pfs
300 * r15 = system call #
301 * b0 = saved return address
302 * b6 = return address
303 * On exit:
304 * r11 = saved ar.pfs
305 * r15 = system call #
306 * b0 = saved return address
307 * all other "scratch" registers: undefined
308 * all "preserved" registers: same as on entry
309 */
310
311GLOBAL_ENTRY(__kernel_syscall_via_epc)
312 .prologue
313 .altrp b6
314 .body
315{
316 /*
317 * Note: the kernel cannot assume that the first two instructions in this
318 * bundle get executed. The remaining code must be safe even if
319 * they do not get executed.
320 */
321 adds r17=-1024,r15 // A
322 mov r10=0 // A default to successful syscall execution
323 epc // B causes split-issue
324}
325 ;;
326 rsm psr.be | psr.i // M2 (5 cyc to srlz.d)
327 LOAD_FSYSCALL_TABLE(r14) // X
328 ;;
329 mov r16=IA64_KR(CURRENT) // M2 (12 cyc)
330 shladd r18=r17,3,r14 // A
331 mov r19=NR_syscalls-1 // A
332 ;;
333 lfetch [r18] // M0|1
334 mov r29=psr // M2 (12 cyc)
335 // If r17 is a NaT, p6 will be zero
336 cmp.geu p6,p7=r19,r17 // A (sysnr > 0 && sysnr < 1024+NR_syscalls)?
337 ;;
338 mov r21=ar.fpsr // M2 (12 cyc)
339 tnat.nz p10,p9=r15 // I0
340 mov.i r26=ar.pfs // I0 (would stall anyhow due to srlz.d...)
341 ;;
342 srlz.d // M0 (forces split-issue) ensure PSR.BE==0
343(p6) ld8 r18=[r18] // M0|1
344 nop.i 0
345 ;;
346 nop.m 0
347(p6) tbit.z.unc p8,p0=r18,0 // I0 (dual-issues with "mov b7=r18"!)
348 nop.i 0
349 ;;
350(p8) ssm psr.i
351(p6) mov b7=r18 // I0
352(p8) br.dptk.many b7 // B
353
354 mov r27=ar.rsc // M2 (12 cyc)
355/*
356 * brl.cond doesn't work as intended because the linker would convert this branch
357 * into a branch to a PLT. Perhaps there will be a way to avoid this with some
358 * future version of the linker. In the meantime, we just use an indirect branch
359 * instead.
360 */
361#ifdef CONFIG_ITANIUM
362(p6) add r14=-8,r14 // r14 <- addr of fsys_bubble_down entry
363 ;;
364(p6) ld8 r14=[r14] // r14 <- fsys_bubble_down
365 ;;
366(p6) mov b7=r14
367(p6) br.sptk.many b7
368#else
369 BRL_COND_FSYS_BUBBLE_DOWN(p6)
370#endif
371 ssm psr.i
372 mov r10=-1
373(p10) mov r8=EINVAL
374(p9) mov r8=ENOSYS
375 FSYS_RETURN
376END(__kernel_syscall_via_epc)