aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorMartin Schwidefsky <schwidefsky@de.ibm.com>2014-04-14 09:11:26 -0400
committerMartin Schwidefsky <schwidefsky@de.ibm.com>2014-05-20 02:58:46 -0400
commitbeef560b4cdfafb2211a856e1d722540f5151933 (patch)
tree7ded8bd3d03266371d68d74d2d96ce8728c2390d
parentf4192bf2dc5ae3b24ffb004e771397e737ef01e0 (diff)
s390/uaccess: simplify control register updates
Always switch to the kernel ASCE in switch_mm. Load the secondary space ASCE in finish_arch_post_lock_switch after checking that any pending page table operations have completed. The primary ASCE is loaded in entry[64].S. With this the update_primary_asce call can be removed from the switch_to macro and from the start of switch_mm function. Remove the load_primary argument from update_user_asce/clear_user_asce, rename update_user_asce to set_user_asce and rename update_primary_asce to load_kernel_asce. Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
-rw-r--r--arch/s390/include/asm/futex.h4
-rw-r--r--arch/s390/include/asm/mmu_context.h43
-rw-r--r--arch/s390/include/asm/switch_to.h1
-rw-r--r--arch/s390/include/asm/thread_info.h4
-rw-r--r--arch/s390/kernel/entry.S2
-rw-r--r--arch/s390/kernel/entry64.S2
-rw-r--r--arch/s390/lib/uaccess.c10
-rw-r--r--arch/s390/mm/pgtable.c10
8 files changed, 33 insertions, 43 deletions
diff --git a/arch/s390/include/asm/futex.h b/arch/s390/include/asm/futex.h
index 69cf5b5eddc9..a4811aa0304d 100644
--- a/arch/s390/include/asm/futex.h
+++ b/arch/s390/include/asm/futex.h
@@ -29,7 +29,7 @@ static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
29 int cmparg = (encoded_op << 20) >> 20; 29 int cmparg = (encoded_op << 20) >> 20;
30 int oldval = 0, newval, ret; 30 int oldval = 0, newval, ret;
31 31
32 update_primary_asce(current); 32 load_kernel_asce();
33 if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28)) 33 if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
34 oparg = 1 << oparg; 34 oparg = 1 << oparg;
35 35
@@ -79,7 +79,7 @@ static inline int futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
79{ 79{
80 int ret; 80 int ret;
81 81
82 update_primary_asce(current); 82 load_kernel_asce();
83 asm volatile( 83 asm volatile(
84 " sacf 256\n" 84 " sacf 256\n"
85 "0: cs %1,%4,0(%5)\n" 85 "0: cs %1,%4,0(%5)\n"
diff --git a/arch/s390/include/asm/mmu_context.h b/arch/s390/include/asm/mmu_context.h
index 71be346d0e3c..93ec0c8e4c83 100644
--- a/arch/s390/include/asm/mmu_context.h
+++ b/arch/s390/include/asm/mmu_context.h
@@ -30,33 +30,31 @@ static inline int init_new_context(struct task_struct *tsk,
30 30
31#define destroy_context(mm) do { } while (0) 31#define destroy_context(mm) do { } while (0)
32 32
33static inline void update_user_asce(struct mm_struct *mm, int load_primary) 33static inline void set_user_asce(struct mm_struct *mm)
34{ 34{
35 pgd_t *pgd = mm->pgd; 35 pgd_t *pgd = mm->pgd;
36 36
37 S390_lowcore.user_asce = mm->context.asce_bits | __pa(pgd); 37 S390_lowcore.user_asce = mm->context.asce_bits | __pa(pgd);
38 if (load_primary)
39 __ctl_load(S390_lowcore.user_asce, 1, 1);
40 set_fs(current->thread.mm_segment); 38 set_fs(current->thread.mm_segment);
39 set_thread_flag(TIF_ASCE);
41} 40}
42 41
43static inline void clear_user_asce(struct mm_struct *mm, int load_primary) 42static inline void clear_user_asce(void)
44{ 43{
45 S390_lowcore.user_asce = S390_lowcore.kernel_asce; 44 S390_lowcore.user_asce = S390_lowcore.kernel_asce;
46 45
47 if (load_primary) 46 __ctl_load(S390_lowcore.user_asce, 1, 1);
48 __ctl_load(S390_lowcore.user_asce, 1, 1);
49 __ctl_load(S390_lowcore.user_asce, 7, 7); 47 __ctl_load(S390_lowcore.user_asce, 7, 7);
50} 48}
51 49
52static inline void update_primary_asce(struct task_struct *tsk) 50static inline void load_kernel_asce(void)
53{ 51{
54 unsigned long asce; 52 unsigned long asce;
55 53
56 __ctl_store(asce, 1, 1); 54 __ctl_store(asce, 1, 1);
57 if (asce != S390_lowcore.kernel_asce) 55 if (asce != S390_lowcore.kernel_asce)
58 __ctl_load(S390_lowcore.kernel_asce, 1, 1); 56 __ctl_load(S390_lowcore.kernel_asce, 1, 1);
59 set_tsk_thread_flag(tsk, TIF_ASCE); 57 set_thread_flag(TIF_ASCE);
60} 58}
61 59
62static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, 60static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
@@ -64,25 +62,17 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
64{ 62{
65 int cpu = smp_processor_id(); 63 int cpu = smp_processor_id();
66 64
67 update_primary_asce(tsk);
68 if (prev == next) 65 if (prev == next)
69 return; 66 return;
70 if (MACHINE_HAS_TLB_LC) 67 if (MACHINE_HAS_TLB_LC)
71 cpumask_set_cpu(cpu, &next->context.cpu_attach_mask); 68 cpumask_set_cpu(cpu, &next->context.cpu_attach_mask);
72 if (atomic_inc_return(&next->context.attach_count) >> 16) { 69 /* Clear old ASCE by loading the kernel ASCE. */
73 /* Delay update_user_asce until all TLB flushes are done. */ 70 __ctl_load(S390_lowcore.kernel_asce, 1, 1);
74 set_tsk_thread_flag(tsk, TIF_TLB_WAIT); 71 __ctl_load(S390_lowcore.kernel_asce, 7, 7);
75 /* Clear old ASCE by loading the kernel ASCE. */ 72 /* Delay loading of the new ASCE to control registers CR1 & CR7 */
76 clear_user_asce(next, 0); 73 set_thread_flag(TIF_ASCE);
77 } else { 74 atomic_inc(&next->context.attach_count);
78 cpumask_set_cpu(cpu, mm_cpumask(next));
79 update_user_asce(next, 0);
80 if (next->context.flush_mm)
81 /* Flush pending TLBs */
82 __tlb_flush_mm(next);
83 }
84 atomic_dec(&prev->context.attach_count); 75 atomic_dec(&prev->context.attach_count);
85 WARN_ON(atomic_read(&prev->context.attach_count) < 0);
86 if (MACHINE_HAS_TLB_LC) 76 if (MACHINE_HAS_TLB_LC)
87 cpumask_clear_cpu(cpu, &prev->context.cpu_attach_mask); 77 cpumask_clear_cpu(cpu, &prev->context.cpu_attach_mask);
88} 78}
@@ -93,15 +83,14 @@ static inline void finish_arch_post_lock_switch(void)
93 struct task_struct *tsk = current; 83 struct task_struct *tsk = current;
94 struct mm_struct *mm = tsk->mm; 84 struct mm_struct *mm = tsk->mm;
95 85
96 if (!test_tsk_thread_flag(tsk, TIF_TLB_WAIT)) 86 if (!mm)
97 return; 87 return;
98 preempt_disable(); 88 preempt_disable();
99 clear_tsk_thread_flag(tsk, TIF_TLB_WAIT);
100 while (atomic_read(&mm->context.attach_count) >> 16) 89 while (atomic_read(&mm->context.attach_count) >> 16)
101 cpu_relax(); 90 cpu_relax();
102 91
103 cpumask_set_cpu(smp_processor_id(), mm_cpumask(mm)); 92 cpumask_set_cpu(smp_processor_id(), mm_cpumask(mm));
104 update_user_asce(mm, 0); 93 set_user_asce(mm);
105 if (mm->context.flush_mm) 94 if (mm->context.flush_mm)
106 __tlb_flush_mm(mm); 95 __tlb_flush_mm(mm);
107 preempt_enable(); 96 preempt_enable();
@@ -113,7 +102,9 @@ static inline void finish_arch_post_lock_switch(void)
113static inline void activate_mm(struct mm_struct *prev, 102static inline void activate_mm(struct mm_struct *prev,
114 struct mm_struct *next) 103 struct mm_struct *next)
115{ 104{
116 switch_mm(prev, next, current); 105 switch_mm(prev, next, current);
106 cpumask_set_cpu(smp_processor_id(), mm_cpumask(next));
107 set_user_asce(next);
117} 108}
118 109
119static inline void arch_dup_mmap(struct mm_struct *oldmm, 110static inline void arch_dup_mmap(struct mm_struct *oldmm,
diff --git a/arch/s390/include/asm/switch_to.h b/arch/s390/include/asm/switch_to.h
index e759181357fc..29c81f82705e 100644
--- a/arch/s390/include/asm/switch_to.h
+++ b/arch/s390/include/asm/switch_to.h
@@ -132,7 +132,6 @@ static inline void restore_access_regs(unsigned int *acrs)
132 update_cr_regs(next); \ 132 update_cr_regs(next); \
133 } \ 133 } \
134 prev = __switch_to(prev,next); \ 134 prev = __switch_to(prev,next); \
135 update_primary_asce(current); \
136} while (0) 135} while (0)
137 136
138#define finish_arch_switch(prev) do { \ 137#define finish_arch_switch(prev) do { \
diff --git a/arch/s390/include/asm/thread_info.h b/arch/s390/include/asm/thread_info.h
index 50630e6a35de..f3e5cad93b26 100644
--- a/arch/s390/include/asm/thread_info.h
+++ b/arch/s390/include/asm/thread_info.h
@@ -81,8 +81,7 @@ static inline struct thread_info *current_thread_info(void)
81#define TIF_NOTIFY_RESUME 1 /* callback before returning to user */ 81#define TIF_NOTIFY_RESUME 1 /* callback before returning to user */
82#define TIF_SIGPENDING 2 /* signal pending */ 82#define TIF_SIGPENDING 2 /* signal pending */
83#define TIF_NEED_RESCHED 3 /* rescheduling necessary */ 83#define TIF_NEED_RESCHED 3 /* rescheduling necessary */
84#define TIF_TLB_WAIT 4 /* wait for TLB flush completion */ 84#define TIF_ASCE 5 /* user asce needs fixup / uaccess */
85#define TIF_ASCE 5 /* primary asce needs fixup / uaccess */
86#define TIF_PER_TRAP 6 /* deliver sigtrap on return to user */ 85#define TIF_PER_TRAP 6 /* deliver sigtrap on return to user */
87#define TIF_MCCK_PENDING 7 /* machine check handling is pending */ 86#define TIF_MCCK_PENDING 7 /* machine check handling is pending */
88#define TIF_SYSCALL_TRACE 8 /* syscall trace active */ 87#define TIF_SYSCALL_TRACE 8 /* syscall trace active */
@@ -99,7 +98,6 @@ static inline struct thread_info *current_thread_info(void)
99#define _TIF_NOTIFY_RESUME (1<<TIF_NOTIFY_RESUME) 98#define _TIF_NOTIFY_RESUME (1<<TIF_NOTIFY_RESUME)
100#define _TIF_SIGPENDING (1<<TIF_SIGPENDING) 99#define _TIF_SIGPENDING (1<<TIF_SIGPENDING)
101#define _TIF_NEED_RESCHED (1<<TIF_NEED_RESCHED) 100#define _TIF_NEED_RESCHED (1<<TIF_NEED_RESCHED)
102#define _TIF_TLB_WAIT (1<<TIF_TLB_WAIT)
103#define _TIF_ASCE (1<<TIF_ASCE) 101#define _TIF_ASCE (1<<TIF_ASCE)
104#define _TIF_PER_TRAP (1<<TIF_PER_TRAP) 102#define _TIF_PER_TRAP (1<<TIF_PER_TRAP)
105#define _TIF_MCCK_PENDING (1<<TIF_MCCK_PENDING) 103#define _TIF_MCCK_PENDING (1<<TIF_MCCK_PENDING)
diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S
index 1662038516c0..7006bfdf5c52 100644
--- a/arch/s390/kernel/entry.S
+++ b/arch/s390/kernel/entry.S
@@ -43,7 +43,7 @@ _TIF_WORK_INT = (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED | \
43 _TIF_MCCK_PENDING | _TIF_ASCE) 43 _TIF_MCCK_PENDING | _TIF_ASCE)
44_TIF_TRACE = (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | _TIF_SECCOMP | \ 44_TIF_TRACE = (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | _TIF_SECCOMP | \
45 _TIF_SYSCALL_TRACEPOINT) 45 _TIF_SYSCALL_TRACEPOINT)
46_TIF_TRANSFER = (_TIF_MCCK_PENDING | _TIF_TLB_WAIT) 46_TIF_TRANSFER = (_TIF_MCCK_PENDING | _TIF_ASCE)
47 47
48STACK_SHIFT = PAGE_SHIFT + THREAD_ORDER 48STACK_SHIFT = PAGE_SHIFT + THREAD_ORDER
49STACK_SIZE = 1 << STACK_SHIFT 49STACK_SIZE = 1 << STACK_SHIFT
diff --git a/arch/s390/kernel/entry64.S b/arch/s390/kernel/entry64.S
index 5963e43618bb..d15e7bf6a863 100644
--- a/arch/s390/kernel/entry64.S
+++ b/arch/s390/kernel/entry64.S
@@ -48,7 +48,7 @@ _TIF_WORK_INT = (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED | \
48 _TIF_MCCK_PENDING | _TIF_ASCE) 48 _TIF_MCCK_PENDING | _TIF_ASCE)
49_TIF_TRACE = (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | _TIF_SECCOMP | \ 49_TIF_TRACE = (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | _TIF_SECCOMP | \
50 _TIF_SYSCALL_TRACEPOINT) 50 _TIF_SYSCALL_TRACEPOINT)
51_TIF_TRANSFER = (_TIF_MCCK_PENDING | _TIF_TLB_WAIT) 51_TIF_TRANSFER = (_TIF_MCCK_PENDING | _TIF_ASCE)
52 52
53#define BASED(name) name-system_call(%r13) 53#define BASED(name) name-system_call(%r13)
54 54
diff --git a/arch/s390/lib/uaccess.c b/arch/s390/lib/uaccess.c
index 7416efe8eae4..53dd5d7a0c96 100644
--- a/arch/s390/lib/uaccess.c
+++ b/arch/s390/lib/uaccess.c
@@ -76,7 +76,7 @@ static inline unsigned long copy_from_user_mvcp(void *x, const void __user *ptr,
76{ 76{
77 unsigned long tmp1, tmp2; 77 unsigned long tmp1, tmp2;
78 78
79 update_primary_asce(current); 79 load_kernel_asce();
80 tmp1 = -256UL; 80 tmp1 = -256UL;
81 asm volatile( 81 asm volatile(
82 " sacf 0\n" 82 " sacf 0\n"
@@ -159,7 +159,7 @@ static inline unsigned long copy_to_user_mvcs(void __user *ptr, const void *x,
159{ 159{
160 unsigned long tmp1, tmp2; 160 unsigned long tmp1, tmp2;
161 161
162 update_primary_asce(current); 162 load_kernel_asce();
163 tmp1 = -256UL; 163 tmp1 = -256UL;
164 asm volatile( 164 asm volatile(
165 " sacf 0\n" 165 " sacf 0\n"
@@ -225,7 +225,7 @@ static inline unsigned long copy_in_user_mvc(void __user *to, const void __user
225{ 225{
226 unsigned long tmp1; 226 unsigned long tmp1;
227 227
228 update_primary_asce(current); 228 load_kernel_asce();
229 asm volatile( 229 asm volatile(
230 " sacf 256\n" 230 " sacf 256\n"
231 " "AHI" %0,-1\n" 231 " "AHI" %0,-1\n"
@@ -292,7 +292,7 @@ static inline unsigned long clear_user_xc(void __user *to, unsigned long size)
292{ 292{
293 unsigned long tmp1, tmp2; 293 unsigned long tmp1, tmp2;
294 294
295 update_primary_asce(current); 295 load_kernel_asce();
296 asm volatile( 296 asm volatile(
297 " sacf 256\n" 297 " sacf 256\n"
298 " "AHI" %0,-1\n" 298 " "AHI" %0,-1\n"
@@ -358,7 +358,7 @@ unsigned long __strnlen_user(const char __user *src, unsigned long size)
358{ 358{
359 if (unlikely(!size)) 359 if (unlikely(!size))
360 return 0; 360 return 0;
361 update_primary_asce(current); 361 load_kernel_asce();
362 return strnlen_user_srst(src, size); 362 return strnlen_user_srst(src, size);
363} 363}
364EXPORT_SYMBOL(__strnlen_user); 364EXPORT_SYMBOL(__strnlen_user);
diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c
index d7cfd57815fb..7881d4eb8b6b 100644
--- a/arch/s390/mm/pgtable.c
+++ b/arch/s390/mm/pgtable.c
@@ -53,8 +53,10 @@ static void __crst_table_upgrade(void *arg)
53{ 53{
54 struct mm_struct *mm = arg; 54 struct mm_struct *mm = arg;
55 55
56 if (current->active_mm == mm) 56 if (current->active_mm == mm) {
57 update_user_asce(mm, 1); 57 clear_user_asce();
58 set_user_asce(mm);
59 }
58 __tlb_flush_local(); 60 __tlb_flush_local();
59} 61}
60 62
@@ -108,7 +110,7 @@ void crst_table_downgrade(struct mm_struct *mm, unsigned long limit)
108 pgd_t *pgd; 110 pgd_t *pgd;
109 111
110 if (current->active_mm == mm) { 112 if (current->active_mm == mm) {
111 clear_user_asce(mm, 1); 113 clear_user_asce();
112 __tlb_flush_mm(mm); 114 __tlb_flush_mm(mm);
113 } 115 }
114 while (mm->context.asce_limit > limit) { 116 while (mm->context.asce_limit > limit) {
@@ -134,7 +136,7 @@ void crst_table_downgrade(struct mm_struct *mm, unsigned long limit)
134 crst_table_free(mm, (unsigned long *) pgd); 136 crst_table_free(mm, (unsigned long *) pgd);
135 } 137 }
136 if (current->active_mm == mm) 138 if (current->active_mm == mm)
137 update_user_asce(mm, 1); 139 set_user_asce(mm);
138} 140}
139#endif 141#endif
140 142