diff options
-rw-r--r-- | arch/s390/include/asm/futex.h | 66 | ||||
-rw-r--r-- | arch/s390/include/asm/mmu_context.h | 37 | ||||
-rw-r--r-- | arch/s390/include/asm/switch_to.h | 1 | ||||
-rw-r--r-- | arch/s390/include/asm/thread_info.h | 2 | ||||
-rw-r--r-- | arch/s390/include/asm/uaccess.h | 2 | ||||
-rw-r--r-- | arch/s390/kernel/asm-offsets.c | 1 | ||||
-rw-r--r-- | arch/s390/kernel/entry.S | 24 | ||||
-rw-r--r-- | arch/s390/kernel/entry64.S | 24 | ||||
-rw-r--r-- | arch/s390/lib/Makefile | 2 | ||||
-rw-r--r-- | arch/s390/lib/uaccess.c | 407 | ||||
-rw-r--r-- | arch/s390/lib/uaccess.h | 16 | ||||
-rw-r--r-- | arch/s390/lib/uaccess_mvcos.c | 263 | ||||
-rw-r--r-- | arch/s390/lib/uaccess_pt.c | 471 | ||||
-rw-r--r-- | arch/s390/mm/fault.c | 49 | ||||
-rw-r--r-- | arch/s390/mm/pgtable.c | 6 |
15 files changed, 556 insertions, 815 deletions
diff --git a/arch/s390/include/asm/futex.h b/arch/s390/include/asm/futex.h index fda46bd38c99..69cf5b5eddc9 100644 --- a/arch/s390/include/asm/futex.h +++ b/arch/s390/include/asm/futex.h | |||
@@ -1,12 +1,25 @@ | |||
1 | #ifndef _ASM_S390_FUTEX_H | 1 | #ifndef _ASM_S390_FUTEX_H |
2 | #define _ASM_S390_FUTEX_H | 2 | #define _ASM_S390_FUTEX_H |
3 | 3 | ||
4 | #include <linux/futex.h> | ||
5 | #include <linux/uaccess.h> | 4 | #include <linux/uaccess.h> |
5 | #include <linux/futex.h> | ||
6 | #include <asm/mmu_context.h> | ||
6 | #include <asm/errno.h> | 7 | #include <asm/errno.h> |
7 | 8 | ||
8 | int futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, u32 oldval, u32 newval); | 9 | #define __futex_atomic_op(insn, ret, oldval, newval, uaddr, oparg) \ |
9 | int __futex_atomic_op_inuser(int op, u32 __user *uaddr, int oparg, int *old); | 10 | asm volatile( \ |
11 | " sacf 256\n" \ | ||
12 | "0: l %1,0(%6)\n" \ | ||
13 | "1:"insn \ | ||
14 | "2: cs %1,%2,0(%6)\n" \ | ||
15 | "3: jl 1b\n" \ | ||
16 | " lhi %0,0\n" \ | ||
17 | "4: sacf 768\n" \ | ||
18 | EX_TABLE(0b,4b) EX_TABLE(2b,4b) EX_TABLE(3b,4b) \ | ||
19 | : "=d" (ret), "=&d" (oldval), "=&d" (newval), \ | ||
20 | "=m" (*uaddr) \ | ||
21 | : "0" (-EFAULT), "d" (oparg), "a" (uaddr), \ | ||
22 | "m" (*uaddr) : "cc"); | ||
10 | 23 | ||
11 | static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr) | 24 | static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr) |
12 | { | 25 | { |
@@ -14,13 +27,37 @@ static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr) | |||
14 | int cmp = (encoded_op >> 24) & 15; | 27 | int cmp = (encoded_op >> 24) & 15; |
15 | int oparg = (encoded_op << 8) >> 20; | 28 | int oparg = (encoded_op << 8) >> 20; |
16 | int cmparg = (encoded_op << 20) >> 20; | 29 | int cmparg = (encoded_op << 20) >> 20; |
17 | int oldval, ret; | 30 | int oldval = 0, newval, ret; |
18 | 31 | ||
32 | update_primary_asce(current); | ||
19 | if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28)) | 33 | if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28)) |
20 | oparg = 1 << oparg; | 34 | oparg = 1 << oparg; |
21 | 35 | ||
22 | pagefault_disable(); | 36 | pagefault_disable(); |
23 | ret = __futex_atomic_op_inuser(op, uaddr, oparg, &oldval); | 37 | switch (op) { |
38 | case FUTEX_OP_SET: | ||
39 | __futex_atomic_op("lr %2,%5\n", | ||
40 | ret, oldval, newval, uaddr, oparg); | ||
41 | break; | ||
42 | case FUTEX_OP_ADD: | ||
43 | __futex_atomic_op("lr %2,%1\nar %2,%5\n", | ||
44 | ret, oldval, newval, uaddr, oparg); | ||
45 | break; | ||
46 | case FUTEX_OP_OR: | ||
47 | __futex_atomic_op("lr %2,%1\nor %2,%5\n", | ||
48 | ret, oldval, newval, uaddr, oparg); | ||
49 | break; | ||
50 | case FUTEX_OP_ANDN: | ||
51 | __futex_atomic_op("lr %2,%1\nnr %2,%5\n", | ||
52 | ret, oldval, newval, uaddr, oparg); | ||
53 | break; | ||
54 | case FUTEX_OP_XOR: | ||
55 | __futex_atomic_op("lr %2,%1\nxr %2,%5\n", | ||
56 | ret, oldval, newval, uaddr, oparg); | ||
57 | break; | ||
58 | default: | ||
59 | ret = -ENOSYS; | ||
60 | } | ||
24 | pagefault_enable(); | 61 | pagefault_enable(); |
25 | 62 | ||
26 | if (!ret) { | 63 | if (!ret) { |
@@ -37,4 +74,23 @@ static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr) | |||
37 | return ret; | 74 | return ret; |
38 | } | 75 | } |
39 | 76 | ||
77 | static inline int futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, | ||
78 | u32 oldval, u32 newval) | ||
79 | { | ||
80 | int ret; | ||
81 | |||
82 | update_primary_asce(current); | ||
83 | asm volatile( | ||
84 | " sacf 256\n" | ||
85 | "0: cs %1,%4,0(%5)\n" | ||
86 | "1: la %0,0\n" | ||
87 | "2: sacf 768\n" | ||
88 | EX_TABLE(0b,2b) EX_TABLE(1b,2b) | ||
89 | : "=d" (ret), "+d" (oldval), "=m" (*uaddr) | ||
90 | : "0" (-EFAULT), "d" (newval), "a" (uaddr), "m" (*uaddr) | ||
91 | : "cc", "memory"); | ||
92 | *uval = oldval; | ||
93 | return ret; | ||
94 | } | ||
95 | |||
40 | #endif /* _ASM_S390_FUTEX_H */ | 96 | #endif /* _ASM_S390_FUTEX_H */ |
diff --git a/arch/s390/include/asm/mmu_context.h b/arch/s390/include/asm/mmu_context.h index 71a258839039..71be346d0e3c 100644 --- a/arch/s390/include/asm/mmu_context.h +++ b/arch/s390/include/asm/mmu_context.h | |||
@@ -30,27 +30,33 @@ static inline int init_new_context(struct task_struct *tsk, | |||
30 | 30 | ||
31 | #define destroy_context(mm) do { } while (0) | 31 | #define destroy_context(mm) do { } while (0) |
32 | 32 | ||
33 | #ifndef CONFIG_64BIT | 33 | static inline void update_user_asce(struct mm_struct *mm, int load_primary) |
34 | #define LCTL_OPCODE "lctl" | ||
35 | #else | ||
36 | #define LCTL_OPCODE "lctlg" | ||
37 | #endif | ||
38 | |||
39 | static inline void update_user_asce(struct mm_struct *mm) | ||
40 | { | 34 | { |
41 | pgd_t *pgd = mm->pgd; | 35 | pgd_t *pgd = mm->pgd; |
42 | 36 | ||
43 | S390_lowcore.user_asce = mm->context.asce_bits | __pa(pgd); | 37 | S390_lowcore.user_asce = mm->context.asce_bits | __pa(pgd); |
44 | /* Load primary space page table origin. */ | 38 | if (load_primary) |
45 | asm volatile(LCTL_OPCODE" 1,1,%0\n" : : "m" (S390_lowcore.user_asce)); | 39 | __ctl_load(S390_lowcore.user_asce, 1, 1); |
46 | set_fs(current->thread.mm_segment); | 40 | set_fs(current->thread.mm_segment); |
47 | } | 41 | } |
48 | 42 | ||
49 | static inline void clear_user_asce(struct mm_struct *mm) | 43 | static inline void clear_user_asce(struct mm_struct *mm, int load_primary) |
50 | { | 44 | { |
51 | S390_lowcore.user_asce = S390_lowcore.kernel_asce; | 45 | S390_lowcore.user_asce = S390_lowcore.kernel_asce; |
52 | asm volatile(LCTL_OPCODE" 1,1,%0\n" : : "m" (S390_lowcore.user_asce)); | 46 | |
53 | asm volatile(LCTL_OPCODE" 7,7,%0\n" : : "m" (S390_lowcore.user_asce)); | 47 | if (load_primary) |
48 | __ctl_load(S390_lowcore.user_asce, 1, 1); | ||
49 | __ctl_load(S390_lowcore.user_asce, 7, 7); | ||
50 | } | ||
51 | |||
52 | static inline void update_primary_asce(struct task_struct *tsk) | ||
53 | { | ||
54 | unsigned long asce; | ||
55 | |||
56 | __ctl_store(asce, 1, 1); | ||
57 | if (asce != S390_lowcore.kernel_asce) | ||
58 | __ctl_load(S390_lowcore.kernel_asce, 1, 1); | ||
59 | set_tsk_thread_flag(tsk, TIF_ASCE); | ||
54 | } | 60 | } |
55 | 61 | ||
56 | static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, | 62 | static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, |
@@ -58,6 +64,7 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, | |||
58 | { | 64 | { |
59 | int cpu = smp_processor_id(); | 65 | int cpu = smp_processor_id(); |
60 | 66 | ||
67 | update_primary_asce(tsk); | ||
61 | if (prev == next) | 68 | if (prev == next) |
62 | return; | 69 | return; |
63 | if (MACHINE_HAS_TLB_LC) | 70 | if (MACHINE_HAS_TLB_LC) |
@@ -66,10 +73,10 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, | |||
66 | /* Delay update_user_asce until all TLB flushes are done. */ | 73 | /* Delay update_user_asce until all TLB flushes are done. */ |
67 | set_tsk_thread_flag(tsk, TIF_TLB_WAIT); | 74 | set_tsk_thread_flag(tsk, TIF_TLB_WAIT); |
68 | /* Clear old ASCE by loading the kernel ASCE. */ | 75 | /* Clear old ASCE by loading the kernel ASCE. */ |
69 | clear_user_asce(next); | 76 | clear_user_asce(next, 0); |
70 | } else { | 77 | } else { |
71 | cpumask_set_cpu(cpu, mm_cpumask(next)); | 78 | cpumask_set_cpu(cpu, mm_cpumask(next)); |
72 | update_user_asce(next); | 79 | update_user_asce(next, 0); |
73 | if (next->context.flush_mm) | 80 | if (next->context.flush_mm) |
74 | /* Flush pending TLBs */ | 81 | /* Flush pending TLBs */ |
75 | __tlb_flush_mm(next); | 82 | __tlb_flush_mm(next); |
@@ -94,7 +101,7 @@ static inline void finish_arch_post_lock_switch(void) | |||
94 | cpu_relax(); | 101 | cpu_relax(); |
95 | 102 | ||
96 | cpumask_set_cpu(smp_processor_id(), mm_cpumask(mm)); | 103 | cpumask_set_cpu(smp_processor_id(), mm_cpumask(mm)); |
97 | update_user_asce(mm); | 104 | update_user_asce(mm, 0); |
98 | if (mm->context.flush_mm) | 105 | if (mm->context.flush_mm) |
99 | __tlb_flush_mm(mm); | 106 | __tlb_flush_mm(mm); |
100 | preempt_enable(); | 107 | preempt_enable(); |
diff --git a/arch/s390/include/asm/switch_to.h b/arch/s390/include/asm/switch_to.h index 29c81f82705e..e759181357fc 100644 --- a/arch/s390/include/asm/switch_to.h +++ b/arch/s390/include/asm/switch_to.h | |||
@@ -132,6 +132,7 @@ static inline void restore_access_regs(unsigned int *acrs) | |||
132 | update_cr_regs(next); \ | 132 | update_cr_regs(next); \ |
133 | } \ | 133 | } \ |
134 | prev = __switch_to(prev,next); \ | 134 | prev = __switch_to(prev,next); \ |
135 | update_primary_asce(current); \ | ||
135 | } while (0) | 136 | } while (0) |
136 | 137 | ||
137 | #define finish_arch_switch(prev) do { \ | 138 | #define finish_arch_switch(prev) do { \ |
diff --git a/arch/s390/include/asm/thread_info.h b/arch/s390/include/asm/thread_info.h index 3ccd71b90345..50630e6a35de 100644 --- a/arch/s390/include/asm/thread_info.h +++ b/arch/s390/include/asm/thread_info.h | |||
@@ -82,6 +82,7 @@ static inline struct thread_info *current_thread_info(void) | |||
82 | #define TIF_SIGPENDING 2 /* signal pending */ | 82 | #define TIF_SIGPENDING 2 /* signal pending */ |
83 | #define TIF_NEED_RESCHED 3 /* rescheduling necessary */ | 83 | #define TIF_NEED_RESCHED 3 /* rescheduling necessary */ |
84 | #define TIF_TLB_WAIT 4 /* wait for TLB flush completion */ | 84 | #define TIF_TLB_WAIT 4 /* wait for TLB flush completion */ |
85 | #define TIF_ASCE 5 /* primary asce needs fixup / uaccess */ | ||
85 | #define TIF_PER_TRAP 6 /* deliver sigtrap on return to user */ | 86 | #define TIF_PER_TRAP 6 /* deliver sigtrap on return to user */ |
86 | #define TIF_MCCK_PENDING 7 /* machine check handling is pending */ | 87 | #define TIF_MCCK_PENDING 7 /* machine check handling is pending */ |
87 | #define TIF_SYSCALL_TRACE 8 /* syscall trace active */ | 88 | #define TIF_SYSCALL_TRACE 8 /* syscall trace active */ |
@@ -99,6 +100,7 @@ static inline struct thread_info *current_thread_info(void) | |||
99 | #define _TIF_SIGPENDING (1<<TIF_SIGPENDING) | 100 | #define _TIF_SIGPENDING (1<<TIF_SIGPENDING) |
100 | #define _TIF_NEED_RESCHED (1<<TIF_NEED_RESCHED) | 101 | #define _TIF_NEED_RESCHED (1<<TIF_NEED_RESCHED) |
101 | #define _TIF_TLB_WAIT (1<<TIF_TLB_WAIT) | 102 | #define _TIF_TLB_WAIT (1<<TIF_TLB_WAIT) |
103 | #define _TIF_ASCE (1<<TIF_ASCE) | ||
102 | #define _TIF_PER_TRAP (1<<TIF_PER_TRAP) | 104 | #define _TIF_PER_TRAP (1<<TIF_PER_TRAP) |
103 | #define _TIF_MCCK_PENDING (1<<TIF_MCCK_PENDING) | 105 | #define _TIF_MCCK_PENDING (1<<TIF_MCCK_PENDING) |
104 | #define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE) | 106 | #define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE) |
diff --git a/arch/s390/include/asm/uaccess.h b/arch/s390/include/asm/uaccess.h index 4133b3f72fb0..1be64a1506d0 100644 --- a/arch/s390/include/asm/uaccess.h +++ b/arch/s390/include/asm/uaccess.h | |||
@@ -92,8 +92,6 @@ static inline unsigned long extable_fixup(const struct exception_table_entry *x) | |||
92 | #define ARCH_HAS_SORT_EXTABLE | 92 | #define ARCH_HAS_SORT_EXTABLE |
93 | #define ARCH_HAS_SEARCH_EXTABLE | 93 | #define ARCH_HAS_SEARCH_EXTABLE |
94 | 94 | ||
95 | int __handle_fault(unsigned long, unsigned long, int); | ||
96 | |||
97 | /** | 95 | /** |
98 | * __copy_from_user: - Copy a block of data from user space, with less checking. | 96 | * __copy_from_user: - Copy a block of data from user space, with less checking. |
99 | * @to: Destination address, in kernel space. | 97 | * @to: Destination address, in kernel space. |
diff --git a/arch/s390/kernel/asm-offsets.c b/arch/s390/kernel/asm-offsets.c index e4c99a183651..cc10cdd4d6a2 100644 --- a/arch/s390/kernel/asm-offsets.c +++ b/arch/s390/kernel/asm-offsets.c | |||
@@ -136,6 +136,7 @@ int main(void) | |||
136 | DEFINE(__LC_RESTART_FN, offsetof(struct _lowcore, restart_fn)); | 136 | DEFINE(__LC_RESTART_FN, offsetof(struct _lowcore, restart_fn)); |
137 | DEFINE(__LC_RESTART_DATA, offsetof(struct _lowcore, restart_data)); | 137 | DEFINE(__LC_RESTART_DATA, offsetof(struct _lowcore, restart_data)); |
138 | DEFINE(__LC_RESTART_SOURCE, offsetof(struct _lowcore, restart_source)); | 138 | DEFINE(__LC_RESTART_SOURCE, offsetof(struct _lowcore, restart_source)); |
139 | DEFINE(__LC_KERNEL_ASCE, offsetof(struct _lowcore, kernel_asce)); | ||
139 | DEFINE(__LC_USER_ASCE, offsetof(struct _lowcore, user_asce)); | 140 | DEFINE(__LC_USER_ASCE, offsetof(struct _lowcore, user_asce)); |
140 | DEFINE(__LC_INT_CLOCK, offsetof(struct _lowcore, int_clock)); | 141 | DEFINE(__LC_INT_CLOCK, offsetof(struct _lowcore, int_clock)); |
141 | DEFINE(__LC_MCCK_CLOCK, offsetof(struct _lowcore, mcck_clock)); | 142 | DEFINE(__LC_MCCK_CLOCK, offsetof(struct _lowcore, mcck_clock)); |
diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S index 526d3735ed29..1662038516c0 100644 --- a/arch/s390/kernel/entry.S +++ b/arch/s390/kernel/entry.S | |||
@@ -38,9 +38,9 @@ __PT_R14 = __PT_GPRS + 56 | |||
38 | __PT_R15 = __PT_GPRS + 60 | 38 | __PT_R15 = __PT_GPRS + 60 |
39 | 39 | ||
40 | _TIF_WORK_SVC = (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED | \ | 40 | _TIF_WORK_SVC = (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED | \ |
41 | _TIF_MCCK_PENDING | _TIF_PER_TRAP ) | 41 | _TIF_MCCK_PENDING | _TIF_PER_TRAP | _TIF_ASCE) |
42 | _TIF_WORK_INT = (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED | \ | 42 | _TIF_WORK_INT = (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED | \ |
43 | _TIF_MCCK_PENDING) | 43 | _TIF_MCCK_PENDING | _TIF_ASCE) |
44 | _TIF_TRACE = (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | _TIF_SECCOMP | \ | 44 | _TIF_TRACE = (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | _TIF_SECCOMP | \ |
45 | _TIF_SYSCALL_TRACEPOINT) | 45 | _TIF_SYSCALL_TRACEPOINT) |
46 | _TIF_TRANSFER = (_TIF_MCCK_PENDING | _TIF_TLB_WAIT) | 46 | _TIF_TRANSFER = (_TIF_MCCK_PENDING | _TIF_TLB_WAIT) |
@@ -241,6 +241,8 @@ sysc_work: | |||
241 | jo sysc_sigpending | 241 | jo sysc_sigpending |
242 | tm __TI_flags+3(%r12),_TIF_NOTIFY_RESUME | 242 | tm __TI_flags+3(%r12),_TIF_NOTIFY_RESUME |
243 | jo sysc_notify_resume | 243 | jo sysc_notify_resume |
244 | tm __TI_flags+3(%r12),_TIF_ASCE | ||
245 | jo sysc_uaccess | ||
244 | j sysc_return # beware of critical section cleanup | 246 | j sysc_return # beware of critical section cleanup |
245 | 247 | ||
246 | # | 248 | # |
@@ -260,6 +262,14 @@ sysc_mcck_pending: | |||
260 | br %r1 # TIF bit will be cleared by handler | 262 | br %r1 # TIF bit will be cleared by handler |
261 | 263 | ||
262 | # | 264 | # |
265 | # _TIF_ASCE is set, load user space asce | ||
266 | # | ||
267 | sysc_uaccess: | ||
268 | ni __TI_flags+3(%r12),255-_TIF_ASCE | ||
269 | lctl %c1,%c1,__LC_USER_ASCE # load primary asce | ||
270 | j sysc_return | ||
271 | |||
272 | # | ||
263 | # _TIF_SIGPENDING is set, call do_signal | 273 | # _TIF_SIGPENDING is set, call do_signal |
264 | # | 274 | # |
265 | sysc_sigpending: | 275 | sysc_sigpending: |
@@ -522,6 +532,8 @@ io_work_tif: | |||
522 | jo io_sigpending | 532 | jo io_sigpending |
523 | tm __TI_flags+3(%r12),_TIF_NOTIFY_RESUME | 533 | tm __TI_flags+3(%r12),_TIF_NOTIFY_RESUME |
524 | jo io_notify_resume | 534 | jo io_notify_resume |
535 | tm __TI_flags+3(%r12),_TIF_ASCE | ||
536 | jo io_uaccess | ||
525 | j io_return # beware of critical section cleanup | 537 | j io_return # beware of critical section cleanup |
526 | 538 | ||
527 | # | 539 | # |
@@ -535,6 +547,14 @@ io_mcck_pending: | |||
535 | j io_return | 547 | j io_return |
536 | 548 | ||
537 | # | 549 | # |
550 | # _TIF_ASCE is set, load user space asce | ||
551 | # | ||
552 | io_uaccess: | ||
553 | ni __TI_flags+3(%r12),255-_TIF_ASCE | ||
554 | lctl %c1,%c1,__LC_USER_ASCE # load primary asce | ||
555 | j io_return | ||
556 | |||
557 | # | ||
538 | # _TIF_NEED_RESCHED is set, call schedule | 558 | # _TIF_NEED_RESCHED is set, call schedule |
539 | # | 559 | # |
540 | io_reschedule: | 560 | io_reschedule: |
diff --git a/arch/s390/kernel/entry64.S b/arch/s390/kernel/entry64.S index e09dbe5f2901..5963e43618bb 100644 --- a/arch/s390/kernel/entry64.S +++ b/arch/s390/kernel/entry64.S | |||
@@ -43,9 +43,9 @@ STACK_SIZE = 1 << STACK_SHIFT | |||
43 | STACK_INIT = STACK_SIZE - STACK_FRAME_OVERHEAD - __PT_SIZE | 43 | STACK_INIT = STACK_SIZE - STACK_FRAME_OVERHEAD - __PT_SIZE |
44 | 44 | ||
45 | _TIF_WORK_SVC = (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED | \ | 45 | _TIF_WORK_SVC = (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED | \ |
46 | _TIF_MCCK_PENDING | _TIF_PER_TRAP ) | 46 | _TIF_MCCK_PENDING | _TIF_PER_TRAP | _TIF_ASCE) |
47 | _TIF_WORK_INT = (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED | \ | 47 | _TIF_WORK_INT = (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED | \ |
48 | _TIF_MCCK_PENDING) | 48 | _TIF_MCCK_PENDING | _TIF_ASCE) |
49 | _TIF_TRACE = (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | _TIF_SECCOMP | \ | 49 | _TIF_TRACE = (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | _TIF_SECCOMP | \ |
50 | _TIF_SYSCALL_TRACEPOINT) | 50 | _TIF_SYSCALL_TRACEPOINT) |
51 | _TIF_TRANSFER = (_TIF_MCCK_PENDING | _TIF_TLB_WAIT) | 51 | _TIF_TRANSFER = (_TIF_MCCK_PENDING | _TIF_TLB_WAIT) |
@@ -275,6 +275,8 @@ sysc_work: | |||
275 | jo sysc_sigpending | 275 | jo sysc_sigpending |
276 | tm __TI_flags+7(%r12),_TIF_NOTIFY_RESUME | 276 | tm __TI_flags+7(%r12),_TIF_NOTIFY_RESUME |
277 | jo sysc_notify_resume | 277 | jo sysc_notify_resume |
278 | tm __TI_flags+7(%r12),_TIF_ASCE | ||
279 | jo sysc_uaccess | ||
278 | j sysc_return # beware of critical section cleanup | 280 | j sysc_return # beware of critical section cleanup |
279 | 281 | ||
280 | # | 282 | # |
@@ -292,6 +294,14 @@ sysc_mcck_pending: | |||
292 | jg s390_handle_mcck # TIF bit will be cleared by handler | 294 | jg s390_handle_mcck # TIF bit will be cleared by handler |
293 | 295 | ||
294 | # | 296 | # |
297 | # _TIF_ASCE is set, load user space asce | ||
298 | # | ||
299 | sysc_uaccess: | ||
300 | ni __TI_flags+7(%r12),255-_TIF_ASCE | ||
301 | lctlg %c1,%c1,__LC_USER_ASCE # load primary asce | ||
302 | j sysc_return | ||
303 | |||
304 | # | ||
295 | # _TIF_SIGPENDING is set, call do_signal | 305 | # _TIF_SIGPENDING is set, call do_signal |
296 | # | 306 | # |
297 | sysc_sigpending: | 307 | sysc_sigpending: |
@@ -559,6 +569,8 @@ io_work_tif: | |||
559 | jo io_sigpending | 569 | jo io_sigpending |
560 | tm __TI_flags+7(%r12),_TIF_NOTIFY_RESUME | 570 | tm __TI_flags+7(%r12),_TIF_NOTIFY_RESUME |
561 | jo io_notify_resume | 571 | jo io_notify_resume |
572 | tm __TI_flags+7(%r12),_TIF_ASCE | ||
573 | jo io_uaccess | ||
562 | j io_return # beware of critical section cleanup | 574 | j io_return # beware of critical section cleanup |
563 | 575 | ||
564 | # | 576 | # |
@@ -571,6 +583,14 @@ io_mcck_pending: | |||
571 | j io_return | 583 | j io_return |
572 | 584 | ||
573 | # | 585 | # |
586 | # _TIF_ASCE is set, load user space asce | ||
587 | # | ||
588 | io_uaccess: | ||
589 | ni __TI_flags+7(%r12),255-_TIF_ASCE | ||
590 | lctlg %c1,%c1,__LC_USER_ASCE # load primary asce | ||
591 | j io_return | ||
592 | |||
593 | # | ||
574 | # _TIF_NEED_RESCHED is set, call schedule | 594 | # _TIF_NEED_RESCHED is set, call schedule |
575 | # | 595 | # |
576 | io_reschedule: | 596 | io_reschedule: |
diff --git a/arch/s390/lib/Makefile b/arch/s390/lib/Makefile index e3fffe1dff51..c6d752e8bf28 100644 --- a/arch/s390/lib/Makefile +++ b/arch/s390/lib/Makefile | |||
@@ -2,7 +2,7 @@ | |||
2 | # Makefile for s390-specific library files.. | 2 | # Makefile for s390-specific library files.. |
3 | # | 3 | # |
4 | 4 | ||
5 | lib-y += delay.o string.o uaccess_pt.o uaccess_mvcos.o find.o | 5 | lib-y += delay.o string.o uaccess.o find.o |
6 | obj-$(CONFIG_32BIT) += div64.o qrnnd.o ucmpdi2.o mem32.o | 6 | obj-$(CONFIG_32BIT) += div64.o qrnnd.o ucmpdi2.o mem32.o |
7 | obj-$(CONFIG_64BIT) += mem64.o | 7 | obj-$(CONFIG_64BIT) += mem64.o |
8 | lib-$(CONFIG_SMP) += spinlock.o | 8 | lib-$(CONFIG_SMP) += spinlock.o |
diff --git a/arch/s390/lib/uaccess.c b/arch/s390/lib/uaccess.c new file mode 100644 index 000000000000..23f866b4c7f1 --- /dev/null +++ b/arch/s390/lib/uaccess.c | |||
@@ -0,0 +1,407 @@ | |||
1 | /* | ||
2 | * Standard user space access functions based on mvcp/mvcs and doing | ||
3 | * interesting things in the secondary space mode. | ||
4 | * | ||
5 | * Copyright IBM Corp. 2006,2014 | ||
6 | * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com), | ||
7 | * Gerald Schaefer (gerald.schaefer@de.ibm.com) | ||
8 | */ | ||
9 | |||
10 | #include <linux/jump_label.h> | ||
11 | #include <linux/uaccess.h> | ||
12 | #include <linux/export.h> | ||
13 | #include <linux/errno.h> | ||
14 | #include <linux/mm.h> | ||
15 | #include <asm/mmu_context.h> | ||
16 | #include <asm/facility.h> | ||
17 | |||
18 | #ifndef CONFIG_64BIT | ||
19 | #define AHI "ahi" | ||
20 | #define ALR "alr" | ||
21 | #define CLR "clr" | ||
22 | #define LHI "lhi" | ||
23 | #define SLR "slr" | ||
24 | #else | ||
25 | #define AHI "aghi" | ||
26 | #define ALR "algr" | ||
27 | #define CLR "clgr" | ||
28 | #define LHI "lghi" | ||
29 | #define SLR "slgr" | ||
30 | #endif | ||
31 | |||
32 | static struct static_key have_mvcos = STATIC_KEY_INIT_FALSE; | ||
33 | |||
34 | static inline unsigned long copy_from_user_mvcos(void *x, const void __user *ptr, | ||
35 | unsigned long size) | ||
36 | { | ||
37 | register unsigned long reg0 asm("0") = 0x81UL; | ||
38 | unsigned long tmp1, tmp2; | ||
39 | |||
40 | tmp1 = -4096UL; | ||
41 | asm volatile( | ||
42 | "0: .insn ss,0xc80000000000,0(%0,%2),0(%1),0\n" | ||
43 | "9: jz 7f\n" | ||
44 | "1:"ALR" %0,%3\n" | ||
45 | " "SLR" %1,%3\n" | ||
46 | " "SLR" %2,%3\n" | ||
47 | " j 0b\n" | ||
48 | "2: la %4,4095(%1)\n"/* %4 = ptr + 4095 */ | ||
49 | " nr %4,%3\n" /* %4 = (ptr + 4095) & -4096 */ | ||
50 | " "SLR" %4,%1\n" | ||
51 | " "CLR" %0,%4\n" /* copy crosses next page boundary? */ | ||
52 | " jnh 4f\n" | ||
53 | "3: .insn ss,0xc80000000000,0(%4,%2),0(%1),0\n" | ||
54 | "10:"SLR" %0,%4\n" | ||
55 | " "ALR" %2,%4\n" | ||
56 | "4:"LHI" %4,-1\n" | ||
57 | " "ALR" %4,%0\n" /* copy remaining size, subtract 1 */ | ||
58 | " bras %3,6f\n" /* memset loop */ | ||
59 | " xc 0(1,%2),0(%2)\n" | ||
60 | "5: xc 0(256,%2),0(%2)\n" | ||
61 | " la %2,256(%2)\n" | ||
62 | "6:"AHI" %4,-256\n" | ||
63 | " jnm 5b\n" | ||
64 | " ex %4,0(%3)\n" | ||
65 | " j 8f\n" | ||
66 | "7:"SLR" %0,%0\n" | ||
67 | "8:\n" | ||
68 | EX_TABLE(0b,2b) EX_TABLE(3b,4b) EX_TABLE(9b,2b) EX_TABLE(10b,4b) | ||
69 | : "+a" (size), "+a" (ptr), "+a" (x), "+a" (tmp1), "=a" (tmp2) | ||
70 | : "d" (reg0) : "cc", "memory"); | ||
71 | return size; | ||
72 | } | ||
73 | |||
74 | static inline unsigned long copy_from_user_mvcp(void *x, const void __user *ptr, | ||
75 | unsigned long size) | ||
76 | { | ||
77 | unsigned long tmp1, tmp2; | ||
78 | |||
79 | update_primary_asce(current); | ||
80 | tmp1 = -256UL; | ||
81 | asm volatile( | ||
82 | " sacf 0\n" | ||
83 | "0: mvcp 0(%0,%2),0(%1),%3\n" | ||
84 | "10:jz 8f\n" | ||
85 | "1:"ALR" %0,%3\n" | ||
86 | " la %1,256(%1)\n" | ||
87 | " la %2,256(%2)\n" | ||
88 | "2: mvcp 0(%0,%2),0(%1),%3\n" | ||
89 | "11:jnz 1b\n" | ||
90 | " j 8f\n" | ||
91 | "3: la %4,255(%1)\n" /* %4 = ptr + 255 */ | ||
92 | " "LHI" %3,-4096\n" | ||
93 | " nr %4,%3\n" /* %4 = (ptr + 255) & -4096 */ | ||
94 | " "SLR" %4,%1\n" | ||
95 | " "CLR" %0,%4\n" /* copy crosses next page boundary? */ | ||
96 | " jnh 5f\n" | ||
97 | "4: mvcp 0(%4,%2),0(%1),%3\n" | ||
98 | "12:"SLR" %0,%4\n" | ||
99 | " "ALR" %2,%4\n" | ||
100 | "5:"LHI" %4,-1\n" | ||
101 | " "ALR" %4,%0\n" /* copy remaining size, subtract 1 */ | ||
102 | " bras %3,7f\n" /* memset loop */ | ||
103 | " xc 0(1,%2),0(%2)\n" | ||
104 | "6: xc 0(256,%2),0(%2)\n" | ||
105 | " la %2,256(%2)\n" | ||
106 | "7:"AHI" %4,-256\n" | ||
107 | " jnm 6b\n" | ||
108 | " ex %4,0(%3)\n" | ||
109 | " j 9f\n" | ||
110 | "8:"SLR" %0,%0\n" | ||
111 | "9: sacf 768\n" | ||
112 | EX_TABLE(0b,3b) EX_TABLE(2b,3b) EX_TABLE(4b,5b) | ||
113 | EX_TABLE(10b,3b) EX_TABLE(11b,3b) EX_TABLE(12b,5b) | ||
114 | : "+a" (size), "+a" (ptr), "+a" (x), "+a" (tmp1), "=a" (tmp2) | ||
115 | : : "cc", "memory"); | ||
116 | return size; | ||
117 | } | ||
118 | |||
119 | unsigned long __copy_from_user(void *to, const void __user *from, unsigned long n) | ||
120 | { | ||
121 | if (static_key_false(&have_mvcos)) | ||
122 | return copy_from_user_mvcos(to, from, n); | ||
123 | return copy_from_user_mvcp(to, from, n); | ||
124 | } | ||
125 | EXPORT_SYMBOL(__copy_from_user); | ||
126 | |||
127 | static inline unsigned long copy_to_user_mvcos(void __user *ptr, const void *x, | ||
128 | unsigned long size) | ||
129 | { | ||
130 | register unsigned long reg0 asm("0") = 0x810000UL; | ||
131 | unsigned long tmp1, tmp2; | ||
132 | |||
133 | tmp1 = -4096UL; | ||
134 | asm volatile( | ||
135 | "0: .insn ss,0xc80000000000,0(%0,%1),0(%2),0\n" | ||
136 | "6: jz 4f\n" | ||
137 | "1:"ALR" %0,%3\n" | ||
138 | " "SLR" %1,%3\n" | ||
139 | " "SLR" %2,%3\n" | ||
140 | " j 0b\n" | ||
141 | "2: la %4,4095(%1)\n"/* %4 = ptr + 4095 */ | ||
142 | " nr %4,%3\n" /* %4 = (ptr + 4095) & -4096 */ | ||
143 | " "SLR" %4,%1\n" | ||
144 | " "CLR" %0,%4\n" /* copy crosses next page boundary? */ | ||
145 | " jnh 5f\n" | ||
146 | "3: .insn ss,0xc80000000000,0(%4,%1),0(%2),0\n" | ||
147 | "7:"SLR" %0,%4\n" | ||
148 | " j 5f\n" | ||
149 | "4:"SLR" %0,%0\n" | ||
150 | "5:\n" | ||
151 | EX_TABLE(0b,2b) EX_TABLE(3b,5b) EX_TABLE(6b,2b) EX_TABLE(7b,5b) | ||
152 | : "+a" (size), "+a" (ptr), "+a" (x), "+a" (tmp1), "=a" (tmp2) | ||
153 | : "d" (reg0) : "cc", "memory"); | ||
154 | return size; | ||
155 | } | ||
156 | |||
157 | static inline unsigned long copy_to_user_mvcs(void __user *ptr, const void *x, | ||
158 | unsigned long size) | ||
159 | { | ||
160 | unsigned long tmp1, tmp2; | ||
161 | |||
162 | update_primary_asce(current); | ||
163 | tmp1 = -256UL; | ||
164 | asm volatile( | ||
165 | " sacf 0\n" | ||
166 | "0: mvcs 0(%0,%1),0(%2),%3\n" | ||
167 | "7: jz 5f\n" | ||
168 | "1:"ALR" %0,%3\n" | ||
169 | " la %1,256(%1)\n" | ||
170 | " la %2,256(%2)\n" | ||
171 | "2: mvcs 0(%0,%1),0(%2),%3\n" | ||
172 | "8: jnz 1b\n" | ||
173 | " j 5f\n" | ||
174 | "3: la %4,255(%1)\n" /* %4 = ptr + 255 */ | ||
175 | " "LHI" %3,-4096\n" | ||
176 | " nr %4,%3\n" /* %4 = (ptr + 255) & -4096 */ | ||
177 | " "SLR" %4,%1\n" | ||
178 | " "CLR" %0,%4\n" /* copy crosses next page boundary? */ | ||
179 | " jnh 6f\n" | ||
180 | "4: mvcs 0(%4,%1),0(%2),%3\n" | ||
181 | "9:"SLR" %0,%4\n" | ||
182 | " j 6f\n" | ||
183 | "5:"SLR" %0,%0\n" | ||
184 | "6: sacf 768\n" | ||
185 | EX_TABLE(0b,3b) EX_TABLE(2b,3b) EX_TABLE(4b,6b) | ||
186 | EX_TABLE(7b,3b) EX_TABLE(8b,3b) EX_TABLE(9b,6b) | ||
187 | : "+a" (size), "+a" (ptr), "+a" (x), "+a" (tmp1), "=a" (tmp2) | ||
188 | : : "cc", "memory"); | ||
189 | return size; | ||
190 | } | ||
191 | |||
192 | unsigned long __copy_to_user(void __user *to, const void *from, unsigned long n) | ||
193 | { | ||
194 | if (static_key_false(&have_mvcos)) | ||
195 | return copy_to_user_mvcos(to, from, n); | ||
196 | return copy_to_user_mvcs(to, from, n); | ||
197 | } | ||
198 | EXPORT_SYMBOL(__copy_to_user); | ||
199 | |||
200 | static inline unsigned long copy_in_user_mvcos(void __user *to, const void __user *from, | ||
201 | unsigned long size) | ||
202 | { | ||
203 | register unsigned long reg0 asm("0") = 0x810081UL; | ||
204 | unsigned long tmp1, tmp2; | ||
205 | |||
206 | tmp1 = -4096UL; | ||
207 | /* FIXME: copy with reduced length. */ | ||
208 | asm volatile( | ||
209 | "0: .insn ss,0xc80000000000,0(%0,%1),0(%2),0\n" | ||
210 | " jz 2f\n" | ||
211 | "1:"ALR" %0,%3\n" | ||
212 | " "SLR" %1,%3\n" | ||
213 | " "SLR" %2,%3\n" | ||
214 | " j 0b\n" | ||
215 | "2:"SLR" %0,%0\n" | ||
216 | "3: \n" | ||
217 | EX_TABLE(0b,3b) | ||
218 | : "+a" (size), "+a" (to), "+a" (from), "+a" (tmp1), "=a" (tmp2) | ||
219 | : "d" (reg0) : "cc", "memory"); | ||
220 | return size; | ||
221 | } | ||
222 | |||
223 | static inline unsigned long copy_in_user_mvc(void __user *to, const void __user *from, | ||
224 | unsigned long size) | ||
225 | { | ||
226 | unsigned long tmp1; | ||
227 | |||
228 | update_primary_asce(current); | ||
229 | asm volatile( | ||
230 | " sacf 256\n" | ||
231 | " "AHI" %0,-1\n" | ||
232 | " jo 5f\n" | ||
233 | " bras %3,3f\n" | ||
234 | "0:"AHI" %0,257\n" | ||
235 | "1: mvc 0(1,%1),0(%2)\n" | ||
236 | " la %1,1(%1)\n" | ||
237 | " la %2,1(%2)\n" | ||
238 | " "AHI" %0,-1\n" | ||
239 | " jnz 1b\n" | ||
240 | " j 5f\n" | ||
241 | "2: mvc 0(256,%1),0(%2)\n" | ||
242 | " la %1,256(%1)\n" | ||
243 | " la %2,256(%2)\n" | ||
244 | "3:"AHI" %0,-256\n" | ||
245 | " jnm 2b\n" | ||
246 | "4: ex %0,1b-0b(%3)\n" | ||
247 | "5: "SLR" %0,%0\n" | ||
248 | "6: sacf 768\n" | ||
249 | EX_TABLE(1b,6b) EX_TABLE(2b,0b) EX_TABLE(4b,0b) | ||
250 | : "+a" (size), "+a" (to), "+a" (from), "=a" (tmp1) | ||
251 | : : "cc", "memory"); | ||
252 | return size; | ||
253 | } | ||
254 | |||
255 | unsigned long __copy_in_user(void __user *to, const void __user *from, unsigned long n) | ||
256 | { | ||
257 | if (static_key_false(&have_mvcos)) | ||
258 | return copy_in_user_mvcos(to, from, n); | ||
259 | return copy_in_user_mvc(to, from, n); | ||
260 | } | ||
261 | EXPORT_SYMBOL(__copy_in_user); | ||
262 | |||
263 | static inline unsigned long clear_user_mvcos(void __user *to, unsigned long size) | ||
264 | { | ||
265 | register unsigned long reg0 asm("0") = 0x810000UL; | ||
266 | unsigned long tmp1, tmp2; | ||
267 | |||
268 | tmp1 = -4096UL; | ||
269 | asm volatile( | ||
270 | "0: .insn ss,0xc80000000000,0(%0,%1),0(%4),0\n" | ||
271 | " jz 4f\n" | ||
272 | "1:"ALR" %0,%2\n" | ||
273 | " "SLR" %1,%2\n" | ||
274 | " j 0b\n" | ||
275 | "2: la %3,4095(%1)\n"/* %4 = to + 4095 */ | ||
276 | " nr %3,%2\n" /* %4 = (to + 4095) & -4096 */ | ||
277 | " "SLR" %3,%1\n" | ||
278 | " "CLR" %0,%3\n" /* copy crosses next page boundary? */ | ||
279 | " jnh 5f\n" | ||
280 | "3: .insn ss,0xc80000000000,0(%3,%1),0(%4),0\n" | ||
281 | " "SLR" %0,%3\n" | ||
282 | " j 5f\n" | ||
283 | "4:"SLR" %0,%0\n" | ||
284 | "5:\n" | ||
285 | EX_TABLE(0b,2b) EX_TABLE(3b,5b) | ||
286 | : "+a" (size), "+a" (to), "+a" (tmp1), "=a" (tmp2) | ||
287 | : "a" (empty_zero_page), "d" (reg0) : "cc", "memory"); | ||
288 | return size; | ||
289 | } | ||
290 | |||
291 | static inline unsigned long clear_user_xc(void __user *to, unsigned long size) | ||
292 | { | ||
293 | unsigned long tmp1, tmp2; | ||
294 | |||
295 | update_primary_asce(current); | ||
296 | asm volatile( | ||
297 | " sacf 256\n" | ||
298 | " "AHI" %0,-1\n" | ||
299 | " jo 5f\n" | ||
300 | " bras %3,3f\n" | ||
301 | " xc 0(1,%1),0(%1)\n" | ||
302 | "0:"AHI" %0,257\n" | ||
303 | " la %2,255(%1)\n" /* %2 = ptr + 255 */ | ||
304 | " srl %2,12\n" | ||
305 | " sll %2,12\n" /* %2 = (ptr + 255) & -4096 */ | ||
306 | " "SLR" %2,%1\n" | ||
307 | " "CLR" %0,%2\n" /* clear crosses next page boundary? */ | ||
308 | " jnh 5f\n" | ||
309 | " "AHI" %2,-1\n" | ||
310 | "1: ex %2,0(%3)\n" | ||
311 | " "AHI" %2,1\n" | ||
312 | " "SLR" %0,%2\n" | ||
313 | " j 5f\n" | ||
314 | "2: xc 0(256,%1),0(%1)\n" | ||
315 | " la %1,256(%1)\n" | ||
316 | "3:"AHI" %0,-256\n" | ||
317 | " jnm 2b\n" | ||
318 | "4: ex %0,0(%3)\n" | ||
319 | "5: "SLR" %0,%0\n" | ||
320 | "6: sacf 768\n" | ||
321 | EX_TABLE(1b,6b) EX_TABLE(2b,0b) EX_TABLE(4b,0b) | ||
322 | : "+a" (size), "+a" (to), "=a" (tmp1), "=a" (tmp2) | ||
323 | : : "cc", "memory"); | ||
324 | return size; | ||
325 | } | ||
326 | |||
327 | unsigned long __clear_user(void __user *to, unsigned long size) | ||
328 | { | ||
329 | if (static_key_false(&have_mvcos)) | ||
330 | return clear_user_mvcos(to, size); | ||
331 | return clear_user_xc(to, size); | ||
332 | } | ||
333 | EXPORT_SYMBOL(__clear_user); | ||
334 | |||
335 | static inline unsigned long strnlen_user_srst(const char __user *src, | ||
336 | unsigned long size) | ||
337 | { | ||
338 | register unsigned long reg0 asm("0") = 0; | ||
339 | unsigned long tmp1, tmp2; | ||
340 | |||
341 | if (unlikely(!size)) | ||
342 | return 0; | ||
343 | update_primary_asce(current); | ||
344 | asm volatile( | ||
345 | " la %2,0(%1)\n" | ||
346 | " la %3,0(%0,%1)\n" | ||
347 | " "SLR" %0,%0\n" | ||
348 | " sacf 256\n" | ||
349 | "0: srst %3,%2\n" | ||
350 | " jo 0b\n" | ||
351 | " la %0,1(%3)\n" /* strnlen_user results includes \0 */ | ||
352 | " "SLR" %0,%1\n" | ||
353 | "1: sacf 768\n" | ||
354 | EX_TABLE(0b,1b) | ||
355 | : "+a" (size), "+a" (src), "=a" (tmp1), "=a" (tmp2) | ||
356 | : "d" (reg0) : "cc", "memory"); | ||
357 | return size; | ||
358 | } | ||
359 | |||
360 | unsigned long __strnlen_user(const char __user *src, unsigned long size) | ||
361 | { | ||
362 | update_primary_asce(current); | ||
363 | return strnlen_user_srst(src, size); | ||
364 | } | ||
365 | EXPORT_SYMBOL(__strnlen_user); | ||
366 | |||
367 | long __strncpy_from_user(char *dst, const char __user *src, long size) | ||
368 | { | ||
369 | size_t done, len, offset, len_str; | ||
370 | |||
371 | if (unlikely(size <= 0)) | ||
372 | return 0; | ||
373 | done = 0; | ||
374 | do { | ||
375 | offset = (size_t)src & ~PAGE_MASK; | ||
376 | len = min(size - done, PAGE_SIZE - offset); | ||
377 | if (copy_from_user(dst, src, len)) | ||
378 | return -EFAULT; | ||
379 | len_str = strnlen(dst, len); | ||
380 | done += len_str; | ||
381 | src += len_str; | ||
382 | dst += len_str; | ||
383 | } while ((len_str == len) && (done < size)); | ||
384 | return done; | ||
385 | } | ||
386 | EXPORT_SYMBOL(__strncpy_from_user); | ||
387 | |||
388 | /* | ||
389 | * The "old" uaccess variant without mvcos can be enforced with the | ||
390 | * uaccess_primary kernel parameter. This is mainly for debugging purposes. | ||
391 | */ | ||
392 | static int uaccess_primary __initdata; | ||
393 | |||
394 | static int __init parse_uaccess_pt(char *__unused) | ||
395 | { | ||
396 | uaccess_primary = 1; | ||
397 | return 0; | ||
398 | } | ||
399 | early_param("uaccess_primary", parse_uaccess_pt); | ||
400 | |||
401 | static int __init uaccess_init(void) | ||
402 | { | ||
403 | if (IS_ENABLED(CONFIG_64BIT) && !uaccess_primary && test_facility(27)) | ||
404 | static_key_slow_inc(&have_mvcos); | ||
405 | return 0; | ||
406 | } | ||
407 | early_initcall(uaccess_init); | ||
diff --git a/arch/s390/lib/uaccess.h b/arch/s390/lib/uaccess.h deleted file mode 100644 index c7e0e81f4b4e..000000000000 --- a/arch/s390/lib/uaccess.h +++ /dev/null | |||
@@ -1,16 +0,0 @@ | |||
1 | /* | ||
2 | * Copyright IBM Corp. 2007 | ||
3 | * | ||
4 | */ | ||
5 | |||
6 | #ifndef __ARCH_S390_LIB_UACCESS_H | ||
7 | #define __ARCH_S390_LIB_UACCESS_H | ||
8 | |||
9 | unsigned long copy_from_user_pt(void *to, const void __user *from, unsigned long n); | ||
10 | unsigned long copy_to_user_pt(void __user *to, const void *from, unsigned long n); | ||
11 | unsigned long copy_in_user_pt(void __user *to, const void __user *from, unsigned long n); | ||
12 | unsigned long clear_user_pt(void __user *to, unsigned long n); | ||
13 | unsigned long strnlen_user_pt(const char __user *src, unsigned long count); | ||
14 | long strncpy_from_user_pt(char *dst, const char __user *src, long count); | ||
15 | |||
16 | #endif /* __ARCH_S390_LIB_UACCESS_H */ | ||
diff --git a/arch/s390/lib/uaccess_mvcos.c b/arch/s390/lib/uaccess_mvcos.c deleted file mode 100644 index ae97b8df11aa..000000000000 --- a/arch/s390/lib/uaccess_mvcos.c +++ /dev/null | |||
@@ -1,263 +0,0 @@ | |||
1 | /* | ||
2 | * Optimized user space space access functions based on mvcos. | ||
3 | * | ||
4 | * Copyright IBM Corp. 2006 | ||
5 | * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com), | ||
6 | * Gerald Schaefer (gerald.schaefer@de.ibm.com) | ||
7 | */ | ||
8 | |||
9 | #include <linux/jump_label.h> | ||
10 | #include <linux/errno.h> | ||
11 | #include <linux/init.h> | ||
12 | #include <linux/mm.h> | ||
13 | #include <asm/facility.h> | ||
14 | #include <asm/uaccess.h> | ||
15 | #include <asm/futex.h> | ||
16 | #include "uaccess.h" | ||
17 | |||
18 | #ifndef CONFIG_64BIT | ||
19 | #define AHI "ahi" | ||
20 | #define ALR "alr" | ||
21 | #define CLR "clr" | ||
22 | #define LHI "lhi" | ||
23 | #define SLR "slr" | ||
24 | #else | ||
25 | #define AHI "aghi" | ||
26 | #define ALR "algr" | ||
27 | #define CLR "clgr" | ||
28 | #define LHI "lghi" | ||
29 | #define SLR "slgr" | ||
30 | #endif | ||
31 | |||
32 | static struct static_key have_mvcos = STATIC_KEY_INIT_TRUE; | ||
33 | |||
34 | static inline unsigned long copy_from_user_mvcos(void *x, const void __user *ptr, | ||
35 | unsigned long size) | ||
36 | { | ||
37 | register unsigned long reg0 asm("0") = 0x81UL; | ||
38 | unsigned long tmp1, tmp2; | ||
39 | |||
40 | tmp1 = -4096UL; | ||
41 | asm volatile( | ||
42 | "0: .insn ss,0xc80000000000,0(%0,%2),0(%1),0\n" | ||
43 | "9: jz 7f\n" | ||
44 | "1:"ALR" %0,%3\n" | ||
45 | " "SLR" %1,%3\n" | ||
46 | " "SLR" %2,%3\n" | ||
47 | " j 0b\n" | ||
48 | "2: la %4,4095(%1)\n"/* %4 = ptr + 4095 */ | ||
49 | " nr %4,%3\n" /* %4 = (ptr + 4095) & -4096 */ | ||
50 | " "SLR" %4,%1\n" | ||
51 | " "CLR" %0,%4\n" /* copy crosses next page boundary? */ | ||
52 | " jnh 4f\n" | ||
53 | "3: .insn ss,0xc80000000000,0(%4,%2),0(%1),0\n" | ||
54 | "10:"SLR" %0,%4\n" | ||
55 | " "ALR" %2,%4\n" | ||
56 | "4:"LHI" %4,-1\n" | ||
57 | " "ALR" %4,%0\n" /* copy remaining size, subtract 1 */ | ||
58 | " bras %3,6f\n" /* memset loop */ | ||
59 | " xc 0(1,%2),0(%2)\n" | ||
60 | "5: xc 0(256,%2),0(%2)\n" | ||
61 | " la %2,256(%2)\n" | ||
62 | "6:"AHI" %4,-256\n" | ||
63 | " jnm 5b\n" | ||
64 | " ex %4,0(%3)\n" | ||
65 | " j 8f\n" | ||
66 | "7:"SLR" %0,%0\n" | ||
67 | "8: \n" | ||
68 | EX_TABLE(0b,2b) EX_TABLE(3b,4b) EX_TABLE(9b,2b) EX_TABLE(10b,4b) | ||
69 | : "+a" (size), "+a" (ptr), "+a" (x), "+a" (tmp1), "=a" (tmp2) | ||
70 | : "d" (reg0) : "cc", "memory"); | ||
71 | return size; | ||
72 | } | ||
73 | |||
74 | unsigned long __copy_from_user(void *to, const void __user *from, unsigned long n) | ||
75 | { | ||
76 | if (static_key_true(&have_mvcos)) | ||
77 | return copy_from_user_mvcos(to, from, n); | ||
78 | return copy_from_user_pt(to, from, n); | ||
79 | } | ||
80 | EXPORT_SYMBOL(__copy_from_user); | ||
81 | |||
82 | static inline unsigned long copy_to_user_mvcos(void __user *ptr, const void *x, | ||
83 | unsigned long size) | ||
84 | { | ||
85 | register unsigned long reg0 asm("0") = 0x810000UL; | ||
86 | unsigned long tmp1, tmp2; | ||
87 | |||
88 | tmp1 = -4096UL; | ||
89 | asm volatile( | ||
90 | "0: .insn ss,0xc80000000000,0(%0,%1),0(%2),0\n" | ||
91 | "6: jz 4f\n" | ||
92 | "1:"ALR" %0,%3\n" | ||
93 | " "SLR" %1,%3\n" | ||
94 | " "SLR" %2,%3\n" | ||
95 | " j 0b\n" | ||
96 | "2: la %4,4095(%1)\n"/* %4 = ptr + 4095 */ | ||
97 | " nr %4,%3\n" /* %4 = (ptr + 4095) & -4096 */ | ||
98 | " "SLR" %4,%1\n" | ||
99 | " "CLR" %0,%4\n" /* copy crosses next page boundary? */ | ||
100 | " jnh 5f\n" | ||
101 | "3: .insn ss,0xc80000000000,0(%4,%1),0(%2),0\n" | ||
102 | "7:"SLR" %0,%4\n" | ||
103 | " j 5f\n" | ||
104 | "4:"SLR" %0,%0\n" | ||
105 | "5: \n" | ||
106 | EX_TABLE(0b,2b) EX_TABLE(3b,5b) EX_TABLE(6b,2b) EX_TABLE(7b,5b) | ||
107 | : "+a" (size), "+a" (ptr), "+a" (x), "+a" (tmp1), "=a" (tmp2) | ||
108 | : "d" (reg0) : "cc", "memory"); | ||
109 | return size; | ||
110 | } | ||
111 | |||
112 | unsigned long __copy_to_user(void __user *to, const void *from, unsigned long n) | ||
113 | { | ||
114 | if (static_key_true(&have_mvcos)) | ||
115 | return copy_to_user_mvcos(to, from, n); | ||
116 | return copy_to_user_pt(to, from, n); | ||
117 | } | ||
118 | EXPORT_SYMBOL(__copy_to_user); | ||
119 | |||
120 | static inline unsigned long copy_in_user_mvcos(void __user *to, const void __user *from, | ||
121 | unsigned long size) | ||
122 | { | ||
123 | register unsigned long reg0 asm("0") = 0x810081UL; | ||
124 | unsigned long tmp1, tmp2; | ||
125 | |||
126 | tmp1 = -4096UL; | ||
127 | /* FIXME: copy with reduced length. */ | ||
128 | asm volatile( | ||
129 | "0: .insn ss,0xc80000000000,0(%0,%1),0(%2),0\n" | ||
130 | " jz 2f\n" | ||
131 | "1:"ALR" %0,%3\n" | ||
132 | " "SLR" %1,%3\n" | ||
133 | " "SLR" %2,%3\n" | ||
134 | " j 0b\n" | ||
135 | "2:"SLR" %0,%0\n" | ||
136 | "3: \n" | ||
137 | EX_TABLE(0b,3b) | ||
138 | : "+a" (size), "+a" (to), "+a" (from), "+a" (tmp1), "=a" (tmp2) | ||
139 | : "d" (reg0) : "cc", "memory"); | ||
140 | return size; | ||
141 | } | ||
142 | |||
143 | unsigned long __copy_in_user(void __user *to, const void __user *from, unsigned long n) | ||
144 | { | ||
145 | if (static_key_true(&have_mvcos)) | ||
146 | return copy_in_user_mvcos(to, from, n); | ||
147 | return copy_in_user_pt(to, from, n); | ||
148 | } | ||
149 | EXPORT_SYMBOL(__copy_in_user); | ||
150 | |||
151 | static inline unsigned long clear_user_mvcos(void __user *to, unsigned long size) | ||
152 | { | ||
153 | register unsigned long reg0 asm("0") = 0x810000UL; | ||
154 | unsigned long tmp1, tmp2; | ||
155 | |||
156 | tmp1 = -4096UL; | ||
157 | asm volatile( | ||
158 | "0: .insn ss,0xc80000000000,0(%0,%1),0(%4),0\n" | ||
159 | " jz 4f\n" | ||
160 | "1:"ALR" %0,%2\n" | ||
161 | " "SLR" %1,%2\n" | ||
162 | " j 0b\n" | ||
163 | "2: la %3,4095(%1)\n"/* %4 = to + 4095 */ | ||
164 | " nr %3,%2\n" /* %4 = (to + 4095) & -4096 */ | ||
165 | " "SLR" %3,%1\n" | ||
166 | " "CLR" %0,%3\n" /* copy crosses next page boundary? */ | ||
167 | " jnh 5f\n" | ||
168 | "3: .insn ss,0xc80000000000,0(%3,%1),0(%4),0\n" | ||
169 | " "SLR" %0,%3\n" | ||
170 | " j 5f\n" | ||
171 | "4:"SLR" %0,%0\n" | ||
172 | "5: \n" | ||
173 | EX_TABLE(0b,2b) EX_TABLE(3b,5b) | ||
174 | : "+a" (size), "+a" (to), "+a" (tmp1), "=a" (tmp2) | ||
175 | : "a" (empty_zero_page), "d" (reg0) : "cc", "memory"); | ||
176 | return size; | ||
177 | } | ||
178 | |||
179 | unsigned long __clear_user(void __user *to, unsigned long size) | ||
180 | { | ||
181 | if (static_key_true(&have_mvcos)) | ||
182 | return clear_user_mvcos(to, size); | ||
183 | return clear_user_pt(to, size); | ||
184 | } | ||
185 | EXPORT_SYMBOL(__clear_user); | ||
186 | |||
187 | static inline unsigned long strnlen_user_mvcos(const char __user *src, | ||
188 | unsigned long count) | ||
189 | { | ||
190 | unsigned long done, len, offset, len_str; | ||
191 | char buf[256]; | ||
192 | |||
193 | done = 0; | ||
194 | do { | ||
195 | offset = (unsigned long)src & ~PAGE_MASK; | ||
196 | len = min(256UL, PAGE_SIZE - offset); | ||
197 | len = min(count - done, len); | ||
198 | if (copy_from_user_mvcos(buf, src, len)) | ||
199 | return 0; | ||
200 | len_str = strnlen(buf, len); | ||
201 | done += len_str; | ||
202 | src += len_str; | ||
203 | } while ((len_str == len) && (done < count)); | ||
204 | return done + 1; | ||
205 | } | ||
206 | |||
207 | unsigned long __strnlen_user(const char __user *src, unsigned long count) | ||
208 | { | ||
209 | if (static_key_true(&have_mvcos)) | ||
210 | return strnlen_user_mvcos(src, count); | ||
211 | return strnlen_user_pt(src, count); | ||
212 | } | ||
213 | EXPORT_SYMBOL(__strnlen_user); | ||
214 | |||
215 | static inline long strncpy_from_user_mvcos(char *dst, const char __user *src, | ||
216 | long count) | ||
217 | { | ||
218 | unsigned long done, len, offset, len_str; | ||
219 | |||
220 | if (unlikely(count <= 0)) | ||
221 | return 0; | ||
222 | done = 0; | ||
223 | do { | ||
224 | offset = (unsigned long)src & ~PAGE_MASK; | ||
225 | len = min(count - done, PAGE_SIZE - offset); | ||
226 | if (copy_from_user_mvcos(dst, src, len)) | ||
227 | return -EFAULT; | ||
228 | len_str = strnlen(dst, len); | ||
229 | done += len_str; | ||
230 | src += len_str; | ||
231 | dst += len_str; | ||
232 | } while ((len_str == len) && (done < count)); | ||
233 | return done; | ||
234 | } | ||
235 | |||
236 | long __strncpy_from_user(char *dst, const char __user *src, long count) | ||
237 | { | ||
238 | if (static_key_true(&have_mvcos)) | ||
239 | return strncpy_from_user_mvcos(dst, src, count); | ||
240 | return strncpy_from_user_pt(dst, src, count); | ||
241 | } | ||
242 | EXPORT_SYMBOL(__strncpy_from_user); | ||
243 | |||
244 | /* | ||
245 | * The uaccess page tabe walk variant can be enforced with the "uaccesspt" | ||
246 | * kernel parameter. This is mainly for debugging purposes. | ||
247 | */ | ||
248 | static int force_uaccess_pt __initdata; | ||
249 | |||
250 | static int __init parse_uaccess_pt(char *__unused) | ||
251 | { | ||
252 | force_uaccess_pt = 1; | ||
253 | return 0; | ||
254 | } | ||
255 | early_param("uaccesspt", parse_uaccess_pt); | ||
256 | |||
257 | static int __init uaccess_init(void) | ||
258 | { | ||
259 | if (IS_ENABLED(CONFIG_32BIT) || force_uaccess_pt || !test_facility(27)) | ||
260 | static_key_slow_dec(&have_mvcos); | ||
261 | return 0; | ||
262 | } | ||
263 | early_initcall(uaccess_init); | ||
diff --git a/arch/s390/lib/uaccess_pt.c b/arch/s390/lib/uaccess_pt.c deleted file mode 100644 index 8d39760bae68..000000000000 --- a/arch/s390/lib/uaccess_pt.c +++ /dev/null | |||
@@ -1,471 +0,0 @@ | |||
1 | /* | ||
2 | * User access functions based on page table walks for enhanced | ||
3 | * system layout without hardware support. | ||
4 | * | ||
5 | * Copyright IBM Corp. 2006, 2012 | ||
6 | * Author(s): Gerald Schaefer (gerald.schaefer@de.ibm.com) | ||
7 | */ | ||
8 | |||
9 | #include <linux/errno.h> | ||
10 | #include <linux/hardirq.h> | ||
11 | #include <linux/mm.h> | ||
12 | #include <linux/hugetlb.h> | ||
13 | #include <asm/uaccess.h> | ||
14 | #include <asm/futex.h> | ||
15 | #include "uaccess.h" | ||
16 | |||
17 | #ifndef CONFIG_64BIT | ||
18 | #define AHI "ahi" | ||
19 | #define SLR "slr" | ||
20 | #else | ||
21 | #define AHI "aghi" | ||
22 | #define SLR "slgr" | ||
23 | #endif | ||
24 | |||
25 | static unsigned long strnlen_kernel(const char __user *src, unsigned long count) | ||
26 | { | ||
27 | register unsigned long reg0 asm("0") = 0UL; | ||
28 | unsigned long tmp1, tmp2; | ||
29 | |||
30 | asm volatile( | ||
31 | " la %2,0(%1)\n" | ||
32 | " la %3,0(%0,%1)\n" | ||
33 | " "SLR" %0,%0\n" | ||
34 | "0: srst %3,%2\n" | ||
35 | " jo 0b\n" | ||
36 | " la %0,1(%3)\n" /* strnlen_kernel results includes \0 */ | ||
37 | " "SLR" %0,%1\n" | ||
38 | "1:\n" | ||
39 | EX_TABLE(0b,1b) | ||
40 | : "+a" (count), "+a" (src), "=a" (tmp1), "=a" (tmp2) | ||
41 | : "d" (reg0) : "cc", "memory"); | ||
42 | return count; | ||
43 | } | ||
44 | |||
45 | static unsigned long copy_in_kernel(void __user *to, const void __user *from, | ||
46 | unsigned long count) | ||
47 | { | ||
48 | unsigned long tmp1; | ||
49 | |||
50 | asm volatile( | ||
51 | " "AHI" %0,-1\n" | ||
52 | " jo 5f\n" | ||
53 | " bras %3,3f\n" | ||
54 | "0:"AHI" %0,257\n" | ||
55 | "1: mvc 0(1,%1),0(%2)\n" | ||
56 | " la %1,1(%1)\n" | ||
57 | " la %2,1(%2)\n" | ||
58 | " "AHI" %0,-1\n" | ||
59 | " jnz 1b\n" | ||
60 | " j 5f\n" | ||
61 | "2: mvc 0(256,%1),0(%2)\n" | ||
62 | " la %1,256(%1)\n" | ||
63 | " la %2,256(%2)\n" | ||
64 | "3:"AHI" %0,-256\n" | ||
65 | " jnm 2b\n" | ||
66 | "4: ex %0,1b-0b(%3)\n" | ||
67 | "5:"SLR" %0,%0\n" | ||
68 | "6:\n" | ||
69 | EX_TABLE(1b,6b) EX_TABLE(2b,0b) EX_TABLE(4b,0b) | ||
70 | : "+a" (count), "+a" (to), "+a" (from), "=a" (tmp1) | ||
71 | : : "cc", "memory"); | ||
72 | return count; | ||
73 | } | ||
74 | |||
75 | /* | ||
76 | * Returns kernel address for user virtual address. If the returned address is | ||
77 | * >= -4095 (IS_ERR_VALUE(x) returns true), a fault has occurred and the | ||
78 | * address contains the (negative) exception code. | ||
79 | */ | ||
80 | #ifdef CONFIG_64BIT | ||
81 | |||
82 | static unsigned long follow_table(struct mm_struct *mm, | ||
83 | unsigned long address, int write) | ||
84 | { | ||
85 | unsigned long *table = (unsigned long *)__pa(mm->pgd); | ||
86 | |||
87 | if (unlikely(address > mm->context.asce_limit - 1)) | ||
88 | return -0x38UL; | ||
89 | switch (mm->context.asce_bits & _ASCE_TYPE_MASK) { | ||
90 | case _ASCE_TYPE_REGION1: | ||
91 | table = table + ((address >> 53) & 0x7ff); | ||
92 | if (unlikely(*table & _REGION_ENTRY_INVALID)) | ||
93 | return -0x39UL; | ||
94 | table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN); | ||
95 | /* fallthrough */ | ||
96 | case _ASCE_TYPE_REGION2: | ||
97 | table = table + ((address >> 42) & 0x7ff); | ||
98 | if (unlikely(*table & _REGION_ENTRY_INVALID)) | ||
99 | return -0x3aUL; | ||
100 | table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN); | ||
101 | /* fallthrough */ | ||
102 | case _ASCE_TYPE_REGION3: | ||
103 | table = table + ((address >> 31) & 0x7ff); | ||
104 | if (unlikely(*table & _REGION_ENTRY_INVALID)) | ||
105 | return -0x3bUL; | ||
106 | table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN); | ||
107 | /* fallthrough */ | ||
108 | case _ASCE_TYPE_SEGMENT: | ||
109 | table = table + ((address >> 20) & 0x7ff); | ||
110 | if (unlikely(*table & _SEGMENT_ENTRY_INVALID)) | ||
111 | return -0x10UL; | ||
112 | if (unlikely(*table & _SEGMENT_ENTRY_LARGE)) { | ||
113 | if (write && (*table & _SEGMENT_ENTRY_PROTECT)) | ||
114 | return -0x04UL; | ||
115 | return (*table & _SEGMENT_ENTRY_ORIGIN_LARGE) + | ||
116 | (address & ~_SEGMENT_ENTRY_ORIGIN_LARGE); | ||
117 | } | ||
118 | table = (unsigned long *)(*table & _SEGMENT_ENTRY_ORIGIN); | ||
119 | } | ||
120 | table = table + ((address >> 12) & 0xff); | ||
121 | if (unlikely(*table & _PAGE_INVALID)) | ||
122 | return -0x11UL; | ||
123 | if (write && (*table & _PAGE_PROTECT)) | ||
124 | return -0x04UL; | ||
125 | return (*table & PAGE_MASK) + (address & ~PAGE_MASK); | ||
126 | } | ||
127 | |||
128 | #else /* CONFIG_64BIT */ | ||
129 | |||
130 | static unsigned long follow_table(struct mm_struct *mm, | ||
131 | unsigned long address, int write) | ||
132 | { | ||
133 | unsigned long *table = (unsigned long *)__pa(mm->pgd); | ||
134 | |||
135 | table = table + ((address >> 20) & 0x7ff); | ||
136 | if (unlikely(*table & _SEGMENT_ENTRY_INVALID)) | ||
137 | return -0x10UL; | ||
138 | table = (unsigned long *)(*table & _SEGMENT_ENTRY_ORIGIN); | ||
139 | table = table + ((address >> 12) & 0xff); | ||
140 | if (unlikely(*table & _PAGE_INVALID)) | ||
141 | return -0x11UL; | ||
142 | if (write && (*table & _PAGE_PROTECT)) | ||
143 | return -0x04UL; | ||
144 | return (*table & PAGE_MASK) + (address & ~PAGE_MASK); | ||
145 | } | ||
146 | |||
147 | #endif /* CONFIG_64BIT */ | ||
148 | |||
149 | static inline unsigned long __user_copy_pt(unsigned long uaddr, void *kptr, | ||
150 | unsigned long n, int write_user) | ||
151 | { | ||
152 | struct mm_struct *mm = current->mm; | ||
153 | unsigned long offset, done, size, kaddr; | ||
154 | void *from, *to; | ||
155 | |||
156 | if (!mm) | ||
157 | return n; | ||
158 | done = 0; | ||
159 | retry: | ||
160 | spin_lock(&mm->page_table_lock); | ||
161 | do { | ||
162 | kaddr = follow_table(mm, uaddr, write_user); | ||
163 | if (IS_ERR_VALUE(kaddr)) | ||
164 | goto fault; | ||
165 | |||
166 | offset = uaddr & ~PAGE_MASK; | ||
167 | size = min(n - done, PAGE_SIZE - offset); | ||
168 | if (write_user) { | ||
169 | to = (void *) kaddr; | ||
170 | from = kptr + done; | ||
171 | } else { | ||
172 | from = (void *) kaddr; | ||
173 | to = kptr + done; | ||
174 | } | ||
175 | memcpy(to, from, size); | ||
176 | done += size; | ||
177 | uaddr += size; | ||
178 | } while (done < n); | ||
179 | spin_unlock(&mm->page_table_lock); | ||
180 | return n - done; | ||
181 | fault: | ||
182 | spin_unlock(&mm->page_table_lock); | ||
183 | if (__handle_fault(uaddr, -kaddr, write_user)) | ||
184 | return n - done; | ||
185 | goto retry; | ||
186 | } | ||
187 | |||
188 | /* | ||
189 | * Do DAT for user address by page table walk, return kernel address. | ||
190 | * This function needs to be called with current->mm->page_table_lock held. | ||
191 | */ | ||
192 | static inline unsigned long __dat_user_addr(unsigned long uaddr, int write) | ||
193 | { | ||
194 | struct mm_struct *mm = current->mm; | ||
195 | unsigned long kaddr; | ||
196 | int rc; | ||
197 | |||
198 | retry: | ||
199 | kaddr = follow_table(mm, uaddr, write); | ||
200 | if (IS_ERR_VALUE(kaddr)) | ||
201 | goto fault; | ||
202 | |||
203 | return kaddr; | ||
204 | fault: | ||
205 | spin_unlock(&mm->page_table_lock); | ||
206 | rc = __handle_fault(uaddr, -kaddr, write); | ||
207 | spin_lock(&mm->page_table_lock); | ||
208 | if (!rc) | ||
209 | goto retry; | ||
210 | return 0; | ||
211 | } | ||
212 | |||
213 | unsigned long copy_from_user_pt(void *to, const void __user *from, unsigned long n) | ||
214 | { | ||
215 | unsigned long rc; | ||
216 | |||
217 | if (segment_eq(get_fs(), KERNEL_DS)) | ||
218 | return copy_in_kernel((void __user *) to, from, n); | ||
219 | rc = __user_copy_pt((unsigned long) from, to, n, 0); | ||
220 | if (unlikely(rc)) | ||
221 | memset(to + n - rc, 0, rc); | ||
222 | return rc; | ||
223 | } | ||
224 | |||
225 | unsigned long copy_to_user_pt(void __user *to, const void *from, unsigned long n) | ||
226 | { | ||
227 | if (segment_eq(get_fs(), KERNEL_DS)) | ||
228 | return copy_in_kernel(to, (void __user *) from, n); | ||
229 | return __user_copy_pt((unsigned long) to, (void *) from, n, 1); | ||
230 | } | ||
231 | |||
232 | unsigned long clear_user_pt(void __user *to, unsigned long n) | ||
233 | { | ||
234 | void *zpage = (void *) empty_zero_page; | ||
235 | unsigned long done, size, ret; | ||
236 | |||
237 | done = 0; | ||
238 | do { | ||
239 | if (n - done > PAGE_SIZE) | ||
240 | size = PAGE_SIZE; | ||
241 | else | ||
242 | size = n - done; | ||
243 | if (segment_eq(get_fs(), KERNEL_DS)) | ||
244 | ret = copy_in_kernel(to, (void __user *) zpage, n); | ||
245 | else | ||
246 | ret = __user_copy_pt((unsigned long) to, zpage, size, 1); | ||
247 | done += size; | ||
248 | to += size; | ||
249 | if (ret) | ||
250 | return ret + n - done; | ||
251 | } while (done < n); | ||
252 | return 0; | ||
253 | } | ||
254 | |||
255 | unsigned long strnlen_user_pt(const char __user *src, unsigned long count) | ||
256 | { | ||
257 | unsigned long uaddr = (unsigned long) src; | ||
258 | struct mm_struct *mm = current->mm; | ||
259 | unsigned long offset, done, len, kaddr; | ||
260 | unsigned long len_str; | ||
261 | |||
262 | if (unlikely(!count)) | ||
263 | return 0; | ||
264 | if (segment_eq(get_fs(), KERNEL_DS)) | ||
265 | return strnlen_kernel(src, count); | ||
266 | if (!mm) | ||
267 | return 0; | ||
268 | done = 0; | ||
269 | retry: | ||
270 | spin_lock(&mm->page_table_lock); | ||
271 | do { | ||
272 | kaddr = follow_table(mm, uaddr, 0); | ||
273 | if (IS_ERR_VALUE(kaddr)) | ||
274 | goto fault; | ||
275 | |||
276 | offset = uaddr & ~PAGE_MASK; | ||
277 | len = min(count - done, PAGE_SIZE - offset); | ||
278 | len_str = strnlen((char *) kaddr, len); | ||
279 | done += len_str; | ||
280 | uaddr += len_str; | ||
281 | } while ((len_str == len) && (done < count)); | ||
282 | spin_unlock(&mm->page_table_lock); | ||
283 | return done + 1; | ||
284 | fault: | ||
285 | spin_unlock(&mm->page_table_lock); | ||
286 | if (__handle_fault(uaddr, -kaddr, 0)) | ||
287 | return 0; | ||
288 | goto retry; | ||
289 | } | ||
290 | |||
291 | long strncpy_from_user_pt(char *dst, const char __user *src, long count) | ||
292 | { | ||
293 | unsigned long done, len, offset, len_str; | ||
294 | |||
295 | if (unlikely(count <= 0)) | ||
296 | return 0; | ||
297 | done = 0; | ||
298 | do { | ||
299 | offset = (unsigned long)src & ~PAGE_MASK; | ||
300 | len = min(count - done, PAGE_SIZE - offset); | ||
301 | if (segment_eq(get_fs(), KERNEL_DS)) { | ||
302 | if (copy_in_kernel((void __user *) dst, src, len)) | ||
303 | return -EFAULT; | ||
304 | } else { | ||
305 | if (__user_copy_pt((unsigned long) src, dst, len, 0)) | ||
306 | return -EFAULT; | ||
307 | } | ||
308 | len_str = strnlen(dst, len); | ||
309 | done += len_str; | ||
310 | src += len_str; | ||
311 | dst += len_str; | ||
312 | } while ((len_str == len) && (done < count)); | ||
313 | return done; | ||
314 | } | ||
315 | |||
316 | unsigned long copy_in_user_pt(void __user *to, const void __user *from, | ||
317 | unsigned long n) | ||
318 | { | ||
319 | struct mm_struct *mm = current->mm; | ||
320 | unsigned long offset_max, uaddr, done, size, error_code; | ||
321 | unsigned long uaddr_from = (unsigned long) from; | ||
322 | unsigned long uaddr_to = (unsigned long) to; | ||
323 | unsigned long kaddr_to, kaddr_from; | ||
324 | int write_user; | ||
325 | |||
326 | if (segment_eq(get_fs(), KERNEL_DS)) | ||
327 | return copy_in_kernel(to, from, n); | ||
328 | if (!mm) | ||
329 | return n; | ||
330 | done = 0; | ||
331 | retry: | ||
332 | spin_lock(&mm->page_table_lock); | ||
333 | do { | ||
334 | write_user = 0; | ||
335 | uaddr = uaddr_from; | ||
336 | kaddr_from = follow_table(mm, uaddr_from, 0); | ||
337 | error_code = kaddr_from; | ||
338 | if (IS_ERR_VALUE(error_code)) | ||
339 | goto fault; | ||
340 | |||
341 | write_user = 1; | ||
342 | uaddr = uaddr_to; | ||
343 | kaddr_to = follow_table(mm, uaddr_to, 1); | ||
344 | error_code = (unsigned long) kaddr_to; | ||
345 | if (IS_ERR_VALUE(error_code)) | ||
346 | goto fault; | ||
347 | |||
348 | offset_max = max(uaddr_from & ~PAGE_MASK, | ||
349 | uaddr_to & ~PAGE_MASK); | ||
350 | size = min(n - done, PAGE_SIZE - offset_max); | ||
351 | |||
352 | memcpy((void *) kaddr_to, (void *) kaddr_from, size); | ||
353 | done += size; | ||
354 | uaddr_from += size; | ||
355 | uaddr_to += size; | ||
356 | } while (done < n); | ||
357 | spin_unlock(&mm->page_table_lock); | ||
358 | return n - done; | ||
359 | fault: | ||
360 | spin_unlock(&mm->page_table_lock); | ||
361 | if (__handle_fault(uaddr, -error_code, write_user)) | ||
362 | return n - done; | ||
363 | goto retry; | ||
364 | } | ||
365 | |||
366 | #define __futex_atomic_op(insn, ret, oldval, newval, uaddr, oparg) \ | ||
367 | asm volatile("0: l %1,0(%6)\n" \ | ||
368 | "1: " insn \ | ||
369 | "2: cs %1,%2,0(%6)\n" \ | ||
370 | "3: jl 1b\n" \ | ||
371 | " lhi %0,0\n" \ | ||
372 | "4:\n" \ | ||
373 | EX_TABLE(0b,4b) EX_TABLE(2b,4b) EX_TABLE(3b,4b) \ | ||
374 | : "=d" (ret), "=&d" (oldval), "=&d" (newval), \ | ||
375 | "=m" (*uaddr) \ | ||
376 | : "0" (-EFAULT), "d" (oparg), "a" (uaddr), \ | ||
377 | "m" (*uaddr) : "cc" ); | ||
378 | |||
379 | static int __futex_atomic_op_pt(int op, u32 __user *uaddr, int oparg, int *old) | ||
380 | { | ||
381 | int oldval = 0, newval, ret; | ||
382 | |||
383 | switch (op) { | ||
384 | case FUTEX_OP_SET: | ||
385 | __futex_atomic_op("lr %2,%5\n", | ||
386 | ret, oldval, newval, uaddr, oparg); | ||
387 | break; | ||
388 | case FUTEX_OP_ADD: | ||
389 | __futex_atomic_op("lr %2,%1\nar %2,%5\n", | ||
390 | ret, oldval, newval, uaddr, oparg); | ||
391 | break; | ||
392 | case FUTEX_OP_OR: | ||
393 | __futex_atomic_op("lr %2,%1\nor %2,%5\n", | ||
394 | ret, oldval, newval, uaddr, oparg); | ||
395 | break; | ||
396 | case FUTEX_OP_ANDN: | ||
397 | __futex_atomic_op("lr %2,%1\nnr %2,%5\n", | ||
398 | ret, oldval, newval, uaddr, oparg); | ||
399 | break; | ||
400 | case FUTEX_OP_XOR: | ||
401 | __futex_atomic_op("lr %2,%1\nxr %2,%5\n", | ||
402 | ret, oldval, newval, uaddr, oparg); | ||
403 | break; | ||
404 | default: | ||
405 | ret = -ENOSYS; | ||
406 | } | ||
407 | if (ret == 0) | ||
408 | *old = oldval; | ||
409 | return ret; | ||
410 | } | ||
411 | |||
412 | int __futex_atomic_op_inuser(int op, u32 __user *uaddr, int oparg, int *old) | ||
413 | { | ||
414 | int ret; | ||
415 | |||
416 | if (segment_eq(get_fs(), KERNEL_DS)) | ||
417 | return __futex_atomic_op_pt(op, uaddr, oparg, old); | ||
418 | if (unlikely(!current->mm)) | ||
419 | return -EFAULT; | ||
420 | spin_lock(¤t->mm->page_table_lock); | ||
421 | uaddr = (u32 __force __user *) | ||
422 | __dat_user_addr((__force unsigned long) uaddr, 1); | ||
423 | if (!uaddr) { | ||
424 | spin_unlock(¤t->mm->page_table_lock); | ||
425 | return -EFAULT; | ||
426 | } | ||
427 | get_page(virt_to_page(uaddr)); | ||
428 | spin_unlock(¤t->mm->page_table_lock); | ||
429 | ret = __futex_atomic_op_pt(op, uaddr, oparg, old); | ||
430 | put_page(virt_to_page(uaddr)); | ||
431 | return ret; | ||
432 | } | ||
433 | |||
434 | static int __futex_atomic_cmpxchg_pt(u32 *uval, u32 __user *uaddr, | ||
435 | u32 oldval, u32 newval) | ||
436 | { | ||
437 | int ret; | ||
438 | |||
439 | asm volatile("0: cs %1,%4,0(%5)\n" | ||
440 | "1: la %0,0\n" | ||
441 | "2:\n" | ||
442 | EX_TABLE(0b,2b) EX_TABLE(1b,2b) | ||
443 | : "=d" (ret), "+d" (oldval), "=m" (*uaddr) | ||
444 | : "0" (-EFAULT), "d" (newval), "a" (uaddr), "m" (*uaddr) | ||
445 | : "cc", "memory" ); | ||
446 | *uval = oldval; | ||
447 | return ret; | ||
448 | } | ||
449 | |||
450 | int futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, | ||
451 | u32 oldval, u32 newval) | ||
452 | { | ||
453 | int ret; | ||
454 | |||
455 | if (segment_eq(get_fs(), KERNEL_DS)) | ||
456 | return __futex_atomic_cmpxchg_pt(uval, uaddr, oldval, newval); | ||
457 | if (unlikely(!current->mm)) | ||
458 | return -EFAULT; | ||
459 | spin_lock(¤t->mm->page_table_lock); | ||
460 | uaddr = (u32 __force __user *) | ||
461 | __dat_user_addr((__force unsigned long) uaddr, 1); | ||
462 | if (!uaddr) { | ||
463 | spin_unlock(¤t->mm->page_table_lock); | ||
464 | return -EFAULT; | ||
465 | } | ||
466 | get_page(virt_to_page(uaddr)); | ||
467 | spin_unlock(¤t->mm->page_table_lock); | ||
468 | ret = __futex_atomic_cmpxchg_pt(uval, uaddr, oldval, newval); | ||
469 | put_page(virt_to_page(uaddr)); | ||
470 | return ret; | ||
471 | } | ||
diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c index 750565f72e06..f93e6c2d4ba5 100644 --- a/arch/s390/mm/fault.c +++ b/arch/s390/mm/fault.c | |||
@@ -105,21 +105,24 @@ void bust_spinlocks(int yes) | |||
105 | * Returns the address space associated with the fault. | 105 | * Returns the address space associated with the fault. |
106 | * Returns 0 for kernel space and 1 for user space. | 106 | * Returns 0 for kernel space and 1 for user space. |
107 | */ | 107 | */ |
108 | static inline int user_space_fault(unsigned long trans_exc_code) | 108 | static inline int user_space_fault(struct pt_regs *regs) |
109 | { | 109 | { |
110 | unsigned long trans_exc_code; | ||
111 | |||
110 | /* | 112 | /* |
111 | * The lowest two bits of the translation exception | 113 | * The lowest two bits of the translation exception |
112 | * identification indicate which paging table was used. | 114 | * identification indicate which paging table was used. |
113 | */ | 115 | */ |
114 | trans_exc_code &= 3; | 116 | trans_exc_code = regs->int_parm_long & 3; |
115 | if (trans_exc_code == 2) | 117 | if (trans_exc_code == 3) /* home space -> kernel */ |
116 | /* Access via secondary space, set_fs setting decides */ | 118 | return 0; |
119 | if (user_mode(regs)) | ||
120 | return 1; | ||
121 | if (trans_exc_code == 2) /* secondary space -> set_fs */ | ||
117 | return current->thread.mm_segment.ar4; | 122 | return current->thread.mm_segment.ar4; |
118 | /* | 123 | if (current->flags & PF_VCPU) |
119 | * Access via primary space or access register is from user space | 124 | return 1; |
120 | * and access via home space is from the kernel. | 125 | return 0; |
121 | */ | ||
122 | return trans_exc_code != 3; | ||
123 | } | 126 | } |
124 | 127 | ||
125 | static inline void report_user_fault(struct pt_regs *regs, long signr) | 128 | static inline void report_user_fault(struct pt_regs *regs, long signr) |
@@ -171,7 +174,7 @@ static noinline void do_no_context(struct pt_regs *regs) | |||
171 | * terminate things with extreme prejudice. | 174 | * terminate things with extreme prejudice. |
172 | */ | 175 | */ |
173 | address = regs->int_parm_long & __FAIL_ADDR_MASK; | 176 | address = regs->int_parm_long & __FAIL_ADDR_MASK; |
174 | if (!user_space_fault(regs->int_parm_long)) | 177 | if (!user_space_fault(regs)) |
175 | printk(KERN_ALERT "Unable to handle kernel pointer dereference" | 178 | printk(KERN_ALERT "Unable to handle kernel pointer dereference" |
176 | " at virtual kernel address %p\n", (void *)address); | 179 | " at virtual kernel address %p\n", (void *)address); |
177 | else | 180 | else |
@@ -291,7 +294,7 @@ static inline int do_exception(struct pt_regs *regs, int access) | |||
291 | * user context. | 294 | * user context. |
292 | */ | 295 | */ |
293 | fault = VM_FAULT_BADCONTEXT; | 296 | fault = VM_FAULT_BADCONTEXT; |
294 | if (unlikely(!user_space_fault(trans_exc_code) || in_atomic() || !mm)) | 297 | if (unlikely(!user_space_fault(regs) || in_atomic() || !mm)) |
295 | goto out; | 298 | goto out; |
296 | 299 | ||
297 | address = trans_exc_code & __FAIL_ADDR_MASK; | 300 | address = trans_exc_code & __FAIL_ADDR_MASK; |
@@ -423,30 +426,6 @@ void __kprobes do_dat_exception(struct pt_regs *regs) | |||
423 | do_fault_error(regs, fault); | 426 | do_fault_error(regs, fault); |
424 | } | 427 | } |
425 | 428 | ||
426 | int __handle_fault(unsigned long uaddr, unsigned long pgm_int_code, int write) | ||
427 | { | ||
428 | struct pt_regs regs; | ||
429 | int access, fault; | ||
430 | |||
431 | /* Emulate a uaccess fault from kernel mode. */ | ||
432 | regs.psw.mask = PSW_KERNEL_BITS | PSW_MASK_DAT | PSW_MASK_MCHECK; | ||
433 | if (!irqs_disabled()) | ||
434 | regs.psw.mask |= PSW_MASK_IO | PSW_MASK_EXT; | ||
435 | regs.psw.addr = (unsigned long) __builtin_return_address(0); | ||
436 | regs.psw.addr |= PSW_ADDR_AMODE; | ||
437 | regs.int_code = pgm_int_code; | ||
438 | regs.int_parm_long = (uaddr & PAGE_MASK) | 2; | ||
439 | access = write ? VM_WRITE : VM_READ; | ||
440 | fault = do_exception(®s, access); | ||
441 | /* | ||
442 | * Since the fault happened in kernel mode while performing a uaccess | ||
443 | * all we need to do now is emulating a fixup in case "fault" is not | ||
444 | * zero. | ||
445 | * For the calling uaccess functions this results always in -EFAULT. | ||
446 | */ | ||
447 | return fault ? -EFAULT : 0; | ||
448 | } | ||
449 | |||
450 | #ifdef CONFIG_PFAULT | 429 | #ifdef CONFIG_PFAULT |
451 | /* | 430 | /* |
452 | * 'pfault' pseudo page faults routines. | 431 | * 'pfault' pseudo page faults routines. |
diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c index c57c63380184..b5745dc9c6b5 100644 --- a/arch/s390/mm/pgtable.c +++ b/arch/s390/mm/pgtable.c | |||
@@ -54,7 +54,7 @@ static void __crst_table_upgrade(void *arg) | |||
54 | struct mm_struct *mm = arg; | 54 | struct mm_struct *mm = arg; |
55 | 55 | ||
56 | if (current->active_mm == mm) | 56 | if (current->active_mm == mm) |
57 | update_user_asce(mm); | 57 | update_user_asce(mm, 1); |
58 | __tlb_flush_local(); | 58 | __tlb_flush_local(); |
59 | } | 59 | } |
60 | 60 | ||
@@ -108,7 +108,7 @@ void crst_table_downgrade(struct mm_struct *mm, unsigned long limit) | |||
108 | pgd_t *pgd; | 108 | pgd_t *pgd; |
109 | 109 | ||
110 | if (current->active_mm == mm) { | 110 | if (current->active_mm == mm) { |
111 | clear_user_asce(mm); | 111 | clear_user_asce(mm, 1); |
112 | __tlb_flush_mm(mm); | 112 | __tlb_flush_mm(mm); |
113 | } | 113 | } |
114 | while (mm->context.asce_limit > limit) { | 114 | while (mm->context.asce_limit > limit) { |
@@ -134,7 +134,7 @@ void crst_table_downgrade(struct mm_struct *mm, unsigned long limit) | |||
134 | crst_table_free(mm, (unsigned long *) pgd); | 134 | crst_table_free(mm, (unsigned long *) pgd); |
135 | } | 135 | } |
136 | if (current->active_mm == mm) | 136 | if (current->active_mm == mm) |
137 | update_user_asce(mm); | 137 | update_user_asce(mm, 1); |
138 | } | 138 | } |
139 | #endif | 139 | #endif |
140 | 140 | ||