diff options
author | Martin Schwidefsky <schwidefsky@de.ibm.com> | 2014-06-02 08:53:57 -0400 |
---|---|---|
committer | Martin Schwidefsky <schwidefsky@de.ibm.com> | 2014-06-10 04:48:28 -0400 |
commit | f8b13505607823bb9a212eb9410669ecf2bc2615 (patch) | |
tree | f7f02832e582cda3afa8ef9aabc3145b25620f94 /arch/s390/include/asm/mmu_context.h | |
parent | c1a42f49b20e9498c7abd47c01b04c6312af13c4 (diff) |
s390/uaccess: always load the kernel ASCE after task switch
This patch fixes a problem introduced with git commit beef560b4cdfafb2
"s390/uaccess: simplify control register updates".
The switch_mm function is not called if the next process is a kernel
thread without an attached mm or is a nop if the mm does not change.
But CR1 still needs to be loaded with the kernel ASCE in case the
code returns to a uaccess function that uses the secondary space mode.
In addition move the set_fs call from finish_arch_switch to
finish_arch_post_lock_switch and then remove finish_arch_switch.
Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
Diffstat (limited to 'arch/s390/include/asm/mmu_context.h')
-rw-r--r-- | arch/s390/include/asm/mmu_context.h | 33 |
1 files changed, 16 insertions, 17 deletions
diff --git a/arch/s390/include/asm/mmu_context.h b/arch/s390/include/asm/mmu_context.h index c28f32a45af5..3815bfea1b2d 100644 --- a/arch/s390/include/asm/mmu_context.h +++ b/arch/s390/include/asm/mmu_context.h | |||
@@ -33,10 +33,9 @@ static inline int init_new_context(struct task_struct *tsk, | |||
33 | 33 | ||
34 | static inline void set_user_asce(struct mm_struct *mm) | 34 | static inline void set_user_asce(struct mm_struct *mm) |
35 | { | 35 | { |
36 | pgd_t *pgd = mm->pgd; | 36 | S390_lowcore.user_asce = mm->context.asce_bits | __pa(mm->pgd); |
37 | 37 | if (current->thread.mm_segment.ar4) | |
38 | S390_lowcore.user_asce = mm->context.asce_bits | __pa(pgd); | 38 | __ctl_load(S390_lowcore.user_asce, 7, 7); |
39 | set_fs(current->thread.mm_segment); | ||
40 | set_cpu_flag(CIF_ASCE); | 39 | set_cpu_flag(CIF_ASCE); |
41 | } | 40 | } |
42 | 41 | ||
@@ -70,12 +69,11 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, | |||
70 | /* Clear old ASCE by loading the kernel ASCE. */ | 69 | /* Clear old ASCE by loading the kernel ASCE. */ |
71 | __ctl_load(S390_lowcore.kernel_asce, 1, 1); | 70 | __ctl_load(S390_lowcore.kernel_asce, 1, 1); |
72 | __ctl_load(S390_lowcore.kernel_asce, 7, 7); | 71 | __ctl_load(S390_lowcore.kernel_asce, 7, 7); |
73 | /* Delay loading of the new ASCE to control registers CR1 & CR7 */ | ||
74 | set_cpu_flag(CIF_ASCE); | ||
75 | atomic_inc(&next->context.attach_count); | 72 | atomic_inc(&next->context.attach_count); |
76 | atomic_dec(&prev->context.attach_count); | 73 | atomic_dec(&prev->context.attach_count); |
77 | if (MACHINE_HAS_TLB_LC) | 74 | if (MACHINE_HAS_TLB_LC) |
78 | cpumask_clear_cpu(cpu, &prev->context.cpu_attach_mask); | 75 | cpumask_clear_cpu(cpu, &prev->context.cpu_attach_mask); |
76 | S390_lowcore.user_asce = next->context.asce_bits | __pa(next->pgd); | ||
79 | } | 77 | } |
80 | 78 | ||
81 | #define finish_arch_post_lock_switch finish_arch_post_lock_switch | 79 | #define finish_arch_post_lock_switch finish_arch_post_lock_switch |
@@ -84,17 +82,18 @@ static inline void finish_arch_post_lock_switch(void) | |||
84 | struct task_struct *tsk = current; | 82 | struct task_struct *tsk = current; |
85 | struct mm_struct *mm = tsk->mm; | 83 | struct mm_struct *mm = tsk->mm; |
86 | 84 | ||
87 | if (!mm) | 85 | load_kernel_asce(); |
88 | return; | 86 | if (mm) { |
89 | preempt_disable(); | 87 | preempt_disable(); |
90 | while (atomic_read(&mm->context.attach_count) >> 16) | 88 | while (atomic_read(&mm->context.attach_count) >> 16) |
91 | cpu_relax(); | 89 | cpu_relax(); |
92 | 90 | ||
93 | cpumask_set_cpu(smp_processor_id(), mm_cpumask(mm)); | 91 | cpumask_set_cpu(smp_processor_id(), mm_cpumask(mm)); |
94 | set_user_asce(mm); | 92 | if (mm->context.flush_mm) |
95 | if (mm->context.flush_mm) | 93 | __tlb_flush_mm(mm); |
96 | __tlb_flush_mm(mm); | 94 | preempt_enable(); |
97 | preempt_enable(); | 95 | } |
96 | set_fs(current->thread.mm_segment); | ||
98 | } | 97 | } |
99 | 98 | ||
100 | #define enter_lazy_tlb(mm,tsk) do { } while (0) | 99 | #define enter_lazy_tlb(mm,tsk) do { } while (0) |