aboutsummaryrefslogtreecommitdiffstats
path: root/arch/s390/mm
diff options
context:
space:
mode:
authorHeiko Carstens <heiko.carstens@de.ibm.com>2014-03-21 05:42:25 -0400
committerMartin Schwidefsky <schwidefsky@de.ibm.com>2014-04-03 08:31:04 -0400
commit457f2180951cdcbfb4657ddcc83b486e93497f56 (patch)
tree4a4b085f2dc4c98810decac658fd0b629acd385e /arch/s390/mm
parent1b948d6caec4f28e3524244ca0f77c6ae8ddceef (diff)
s390/uaccess: rework uaccess code - fix locking issues
The current uaccess code uses a page table walk in some circumstances, e.g. in case of the in atomic futex operations or if running on old hardware which doesn't support the mvcos instruction. However it turned out that the page table walk code does not correctly lock page tables when accessing page table entries. In other words: a different cpu may invalidate a page table entry while the current cpu inspects the pte. This may lead to random data corruption. Adding correct locking however isn't trivial for all uaccess operations. Especially copy_in_user() is problematic since that requires to hold at least two locks, but must be protected against ABBA deadlock when a different cpu also performs a copy_in_user() operation. So the solution is a different approach where we change address spaces: User space runs in primary address mode, or access register mode within vdso code, like it currently already does. The kernel usually also runs in home space mode, however when accessing user space the kernel switches to primary or secondary address mode if the mvcos instruction is not available or if a compare-and-swap (futex) instruction on a user space address is performed. KVM however is special, since that requires the kernel to run in home address space while implicitly accessing user space with the sie instruction. So we end up with: User space: - runs in primary or access register mode - cr1 contains the user asce - cr7 contains the user asce - cr13 contains the kernel asce Kernel space: - runs in home space mode - cr1 contains the user or kernel asce -> the kernel asce is loaded when a uaccess requires primary or secondary address mode - cr7 contains the user or kernel asce, (changed with set_fs()) - cr13 contains the kernel asce In case of uaccess the kernel changes to: - primary space mode in case of a uaccess (copy_to_user) and uses e.g. the mvcp instruction to access user space. However the kernel will stay in home space mode if the mvcos instruction is available - secondary space mode in case of futex atomic operations, so that the instructions come from primary address space and data from secondary space In case of kvm the kernel runs in home space mode, but cr1 gets switched to contain the gmap asce before the sie instruction gets executed. When the sie instruction is finished cr1 will be switched back to contain the user asce. A context switch between two processes will always load the kernel asce for the next process in cr1. So the first exit to user space is a bit more expensive (one extra load control register instruction) than before, however keeps the code rather simple. In sum this means there is no need to perform any error prone page table walks anymore when accessing user space. The patch seems to be rather large, however it mainly removes the the page table walk code and restores the previously deleted "standard" uaccess code, with a couple of changes. The uaccess without mvcos mode can be enforced with the "uaccess_primary" kernel parameter. Reported-by: Christian Borntraeger <borntraeger@de.ibm.com> Signed-off-by: Heiko Carstens <heiko.carstens@de.ibm.com> Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
Diffstat (limited to 'arch/s390/mm')
-rw-r--r--arch/s390/mm/fault.c49
-rw-r--r--arch/s390/mm/pgtable.c6
2 files changed, 17 insertions, 38 deletions
diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c
index 750565f72e06..f93e6c2d4ba5 100644
--- a/arch/s390/mm/fault.c
+++ b/arch/s390/mm/fault.c
@@ -105,21 +105,24 @@ void bust_spinlocks(int yes)
105 * Returns the address space associated with the fault. 105 * Returns the address space associated with the fault.
106 * Returns 0 for kernel space and 1 for user space. 106 * Returns 0 for kernel space and 1 for user space.
107 */ 107 */
108static inline int user_space_fault(unsigned long trans_exc_code) 108static inline int user_space_fault(struct pt_regs *regs)
109{ 109{
110 unsigned long trans_exc_code;
111
110 /* 112 /*
111 * The lowest two bits of the translation exception 113 * The lowest two bits of the translation exception
112 * identification indicate which paging table was used. 114 * identification indicate which paging table was used.
113 */ 115 */
114 trans_exc_code &= 3; 116 trans_exc_code = regs->int_parm_long & 3;
115 if (trans_exc_code == 2) 117 if (trans_exc_code == 3) /* home space -> kernel */
116 /* Access via secondary space, set_fs setting decides */ 118 return 0;
119 if (user_mode(regs))
120 return 1;
121 if (trans_exc_code == 2) /* secondary space -> set_fs */
117 return current->thread.mm_segment.ar4; 122 return current->thread.mm_segment.ar4;
118 /* 123 if (current->flags & PF_VCPU)
119 * Access via primary space or access register is from user space 124 return 1;
120 * and access via home space is from the kernel. 125 return 0;
121 */
122 return trans_exc_code != 3;
123} 126}
124 127
125static inline void report_user_fault(struct pt_regs *regs, long signr) 128static inline void report_user_fault(struct pt_regs *regs, long signr)
@@ -171,7 +174,7 @@ static noinline void do_no_context(struct pt_regs *regs)
171 * terminate things with extreme prejudice. 174 * terminate things with extreme prejudice.
172 */ 175 */
173 address = regs->int_parm_long & __FAIL_ADDR_MASK; 176 address = regs->int_parm_long & __FAIL_ADDR_MASK;
174 if (!user_space_fault(regs->int_parm_long)) 177 if (!user_space_fault(regs))
175 printk(KERN_ALERT "Unable to handle kernel pointer dereference" 178 printk(KERN_ALERT "Unable to handle kernel pointer dereference"
176 " at virtual kernel address %p\n", (void *)address); 179 " at virtual kernel address %p\n", (void *)address);
177 else 180 else
@@ -291,7 +294,7 @@ static inline int do_exception(struct pt_regs *regs, int access)
291 * user context. 294 * user context.
292 */ 295 */
293 fault = VM_FAULT_BADCONTEXT; 296 fault = VM_FAULT_BADCONTEXT;
294 if (unlikely(!user_space_fault(trans_exc_code) || in_atomic() || !mm)) 297 if (unlikely(!user_space_fault(regs) || in_atomic() || !mm))
295 goto out; 298 goto out;
296 299
297 address = trans_exc_code & __FAIL_ADDR_MASK; 300 address = trans_exc_code & __FAIL_ADDR_MASK;
@@ -423,30 +426,6 @@ void __kprobes do_dat_exception(struct pt_regs *regs)
423 do_fault_error(regs, fault); 426 do_fault_error(regs, fault);
424} 427}
425 428
426int __handle_fault(unsigned long uaddr, unsigned long pgm_int_code, int write)
427{
428 struct pt_regs regs;
429 int access, fault;
430
431 /* Emulate a uaccess fault from kernel mode. */
432 regs.psw.mask = PSW_KERNEL_BITS | PSW_MASK_DAT | PSW_MASK_MCHECK;
433 if (!irqs_disabled())
434 regs.psw.mask |= PSW_MASK_IO | PSW_MASK_EXT;
435 regs.psw.addr = (unsigned long) __builtin_return_address(0);
436 regs.psw.addr |= PSW_ADDR_AMODE;
437 regs.int_code = pgm_int_code;
438 regs.int_parm_long = (uaddr & PAGE_MASK) | 2;
439 access = write ? VM_WRITE : VM_READ;
440 fault = do_exception(&regs, access);
441 /*
442 * Since the fault happened in kernel mode while performing a uaccess
443 * all we need to do now is emulating a fixup in case "fault" is not
444 * zero.
445 * For the calling uaccess functions this results always in -EFAULT.
446 */
447 return fault ? -EFAULT : 0;
448}
449
450#ifdef CONFIG_PFAULT 429#ifdef CONFIG_PFAULT
451/* 430/*
452 * 'pfault' pseudo page faults routines. 431 * 'pfault' pseudo page faults routines.
diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c
index c57c63380184..b5745dc9c6b5 100644
--- a/arch/s390/mm/pgtable.c
+++ b/arch/s390/mm/pgtable.c
@@ -54,7 +54,7 @@ static void __crst_table_upgrade(void *arg)
54 struct mm_struct *mm = arg; 54 struct mm_struct *mm = arg;
55 55
56 if (current->active_mm == mm) 56 if (current->active_mm == mm)
57 update_user_asce(mm); 57 update_user_asce(mm, 1);
58 __tlb_flush_local(); 58 __tlb_flush_local();
59} 59}
60 60
@@ -108,7 +108,7 @@ void crst_table_downgrade(struct mm_struct *mm, unsigned long limit)
108 pgd_t *pgd; 108 pgd_t *pgd;
109 109
110 if (current->active_mm == mm) { 110 if (current->active_mm == mm) {
111 clear_user_asce(mm); 111 clear_user_asce(mm, 1);
112 __tlb_flush_mm(mm); 112 __tlb_flush_mm(mm);
113 } 113 }
114 while (mm->context.asce_limit > limit) { 114 while (mm->context.asce_limit > limit) {
@@ -134,7 +134,7 @@ void crst_table_downgrade(struct mm_struct *mm, unsigned long limit)
134 crst_table_free(mm, (unsigned long *) pgd); 134 crst_table_free(mm, (unsigned long *) pgd);
135 } 135 }
136 if (current->active_mm == mm) 136 if (current->active_mm == mm)
137 update_user_asce(mm); 137 update_user_asce(mm, 1);
138} 138}
139#endif 139#endif
140 140