aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorMichael Ellerman <mpe@ellerman.id.au>2015-03-28 06:35:16 -0400
committerMichael Ellerman <mpe@ellerman.id.au>2015-03-28 07:03:40 -0400
commit529d235a0e190ded1d21ccc80a73e625ebcad09b (patch)
treec807f7526f29b79a7ce0b233daf8a1c3030d8dd0
parentc03e73740d24fbe990291cd9ac2d6ae0d95b975f (diff)
powerpc: Add a proper syscall for switching endianness
We currently have a "special" syscall for switching endianness. This is syscall number 0x1ebe, which is handled explicitly in the 64-bit syscall exception entry. That has a few problems, firstly the syscall number is outside of the usual range, which confuses various tools. For example strace doesn't recognise the syscall at all. Secondly it's handled explicitly as a special case in the syscall exception entry, which is complicated enough without it. As a first step toward removing the special syscall, we need to add a regular syscall that implements the same functionality. The logic is simple, it simply toggles the MSR_LE bit in the userspace MSR. This is the same as the special syscall, with the caveat that the special syscall clobbers fewer registers. This version clobbers r9-r12, XER, CTR, and CR0-1,5-7. Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
-rw-r--r--arch/powerpc/include/asm/systbl.h1
-rw-r--r--arch/powerpc/include/asm/unistd.h2
-rw-r--r--arch/powerpc/include/uapi/asm/unistd.h1
-rw-r--r--arch/powerpc/kernel/entry_64.S5
-rw-r--r--arch/powerpc/kernel/syscalls.c17
-rw-r--r--arch/powerpc/kernel/systbl.S2
-rw-r--r--arch/powerpc/kernel/systbl_chk.c2
-rw-r--r--arch/powerpc/platforms/cell/spu_callbacks.c1
8 files changed, 30 insertions, 1 deletions
diff --git a/arch/powerpc/include/asm/systbl.h b/arch/powerpc/include/asm/systbl.h
index 91062eef582f..f1863a138b4a 100644
--- a/arch/powerpc/include/asm/systbl.h
+++ b/arch/powerpc/include/asm/systbl.h
@@ -367,3 +367,4 @@ SYSCALL_SPU(getrandom)
367SYSCALL_SPU(memfd_create) 367SYSCALL_SPU(memfd_create)
368SYSCALL_SPU(bpf) 368SYSCALL_SPU(bpf)
369COMPAT_SYS(execveat) 369COMPAT_SYS(execveat)
370PPC64ONLY(switch_endian)
diff --git a/arch/powerpc/include/asm/unistd.h b/arch/powerpc/include/asm/unistd.h
index 36b79c31eedd..f4f8b667d75b 100644
--- a/arch/powerpc/include/asm/unistd.h
+++ b/arch/powerpc/include/asm/unistd.h
@@ -12,7 +12,7 @@
12#include <uapi/asm/unistd.h> 12#include <uapi/asm/unistd.h>
13 13
14 14
15#define __NR_syscalls 363 15#define __NR_syscalls 364
16 16
17#define __NR__exit __NR_exit 17#define __NR__exit __NR_exit
18#define NR_syscalls __NR_syscalls 18#define NR_syscalls __NR_syscalls
diff --git a/arch/powerpc/include/uapi/asm/unistd.h b/arch/powerpc/include/uapi/asm/unistd.h
index ef5b5b1f3123..e4aa173dae62 100644
--- a/arch/powerpc/include/uapi/asm/unistd.h
+++ b/arch/powerpc/include/uapi/asm/unistd.h
@@ -385,5 +385,6 @@
385#define __NR_memfd_create 360 385#define __NR_memfd_create 360
386#define __NR_bpf 361 386#define __NR_bpf 361
387#define __NR_execveat 362 387#define __NR_execveat 362
388#define __NR_switch_endian 363
388 389
389#endif /* _UAPI_ASM_POWERPC_UNISTD_H_ */ 390#endif /* _UAPI_ASM_POWERPC_UNISTD_H_ */
diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S
index d180caf2d6de..afbc20019c2e 100644
--- a/arch/powerpc/kernel/entry_64.S
+++ b/arch/powerpc/kernel/entry_64.S
@@ -356,6 +356,11 @@ _GLOBAL(ppc64_swapcontext)
356 bl sys_swapcontext 356 bl sys_swapcontext
357 b .Lsyscall_exit 357 b .Lsyscall_exit
358 358
359_GLOBAL(ppc_switch_endian)
360 bl save_nvgprs
361 bl sys_switch_endian
362 b .Lsyscall_exit
363
359_GLOBAL(ret_from_fork) 364_GLOBAL(ret_from_fork)
360 bl schedule_tail 365 bl schedule_tail
361 REST_NVGPRS(r1) 366 REST_NVGPRS(r1)
diff --git a/arch/powerpc/kernel/syscalls.c b/arch/powerpc/kernel/syscalls.c
index b2702e87db0d..5fa92706444b 100644
--- a/arch/powerpc/kernel/syscalls.c
+++ b/arch/powerpc/kernel/syscalls.c
@@ -121,3 +121,20 @@ long ppc_fadvise64_64(int fd, int advice, u32 offset_high, u32 offset_low,
121 return sys_fadvise64(fd, (u64)offset_high << 32 | offset_low, 121 return sys_fadvise64(fd, (u64)offset_high << 32 | offset_low,
122 (u64)len_high << 32 | len_low, advice); 122 (u64)len_high << 32 | len_low, advice);
123} 123}
124
125long sys_switch_endian(void)
126{
127 struct thread_info *ti;
128
129 current->thread.regs->msr ^= MSR_LE;
130
131 /*
132 * Set TIF_RESTOREALL so that r3 isn't clobbered on return to
133 * userspace. That also has the effect of restoring the non-volatile
134 * GPRs, so we saved them on the way in here.
135 */
136 ti = current_thread_info();
137 ti->flags |= _TIF_RESTOREALL;
138
139 return 0;
140}
diff --git a/arch/powerpc/kernel/systbl.S b/arch/powerpc/kernel/systbl.S
index 7ab5d434e2ee..4d6b1d3a747f 100644
--- a/arch/powerpc/kernel/systbl.S
+++ b/arch/powerpc/kernel/systbl.S
@@ -22,6 +22,7 @@
22#define PPC_SYS(func) .llong DOTSYM(ppc_##func),DOTSYM(ppc_##func) 22#define PPC_SYS(func) .llong DOTSYM(ppc_##func),DOTSYM(ppc_##func)
23#define OLDSYS(func) .llong DOTSYM(sys_ni_syscall),DOTSYM(sys_ni_syscall) 23#define OLDSYS(func) .llong DOTSYM(sys_ni_syscall),DOTSYM(sys_ni_syscall)
24#define SYS32ONLY(func) .llong DOTSYM(sys_ni_syscall),DOTSYM(compat_sys_##func) 24#define SYS32ONLY(func) .llong DOTSYM(sys_ni_syscall),DOTSYM(compat_sys_##func)
25#define PPC64ONLY(func) .llong DOTSYM(ppc_##func),DOTSYM(sys_ni_syscall)
25#define SYSX(f, f3264, f32) .llong DOTSYM(f),DOTSYM(f3264) 26#define SYSX(f, f3264, f32) .llong DOTSYM(f),DOTSYM(f3264)
26#else 27#else
27#define SYSCALL(func) .long sys_##func 28#define SYSCALL(func) .long sys_##func
@@ -29,6 +30,7 @@
29#define PPC_SYS(func) .long ppc_##func 30#define PPC_SYS(func) .long ppc_##func
30#define OLDSYS(func) .long sys_##func 31#define OLDSYS(func) .long sys_##func
31#define SYS32ONLY(func) .long sys_##func 32#define SYS32ONLY(func) .long sys_##func
33#define PPC64ONLY(func) .long sys_ni_syscall
32#define SYSX(f, f3264, f32) .long f32 34#define SYSX(f, f3264, f32) .long f32
33#endif 35#endif
34#define SYSCALL_SPU(func) SYSCALL(func) 36#define SYSCALL_SPU(func) SYSCALL(func)
diff --git a/arch/powerpc/kernel/systbl_chk.c b/arch/powerpc/kernel/systbl_chk.c
index 238aa63ced8f..2384129f5893 100644
--- a/arch/powerpc/kernel/systbl_chk.c
+++ b/arch/powerpc/kernel/systbl_chk.c
@@ -21,9 +21,11 @@
21#ifdef CONFIG_PPC64 21#ifdef CONFIG_PPC64
22#define OLDSYS(func) -1 22#define OLDSYS(func) -1
23#define SYS32ONLY(func) -1 23#define SYS32ONLY(func) -1
24#define PPC64ONLY(func) __NR_##func
24#else 25#else
25#define OLDSYS(func) __NR_old##func 26#define OLDSYS(func) __NR_old##func
26#define SYS32ONLY(func) __NR_##func 27#define SYS32ONLY(func) __NR_##func
28#define PPC64ONLY(func) -1
27#endif 29#endif
28#define SYSX(f, f3264, f32) -1 30#define SYSX(f, f3264, f32) -1
29 31
diff --git a/arch/powerpc/platforms/cell/spu_callbacks.c b/arch/powerpc/platforms/cell/spu_callbacks.c
index b0ec78e8ad68..a494028b2cdf 100644
--- a/arch/powerpc/platforms/cell/spu_callbacks.c
+++ b/arch/powerpc/platforms/cell/spu_callbacks.c
@@ -39,6 +39,7 @@ static void *spu_syscall_table[] = {
39#define PPC_SYS(func) sys_ni_syscall, 39#define PPC_SYS(func) sys_ni_syscall,
40#define OLDSYS(func) sys_ni_syscall, 40#define OLDSYS(func) sys_ni_syscall,
41#define SYS32ONLY(func) sys_ni_syscall, 41#define SYS32ONLY(func) sys_ni_syscall,
42#define PPC64ONLY(func) sys_ni_syscall,
42#define SYSX(f, f3264, f32) sys_ni_syscall, 43#define SYSX(f, f3264, f32) sys_ni_syscall,
43 44
44#define SYSCALL_SPU(func) sys_##func, 45#define SYSCALL_SPU(func) sys_##func,