diff options
| -rw-r--r-- | arch/s390/include/asm/mmu_context.h | 33 | ||||
| -rw-r--r-- | arch/s390/include/asm/switch_to.h | 4 |
2 files changed, 16 insertions, 21 deletions
diff --git a/arch/s390/include/asm/mmu_context.h b/arch/s390/include/asm/mmu_context.h index c28f32a45af5..3815bfea1b2d 100644 --- a/arch/s390/include/asm/mmu_context.h +++ b/arch/s390/include/asm/mmu_context.h | |||
| @@ -33,10 +33,9 @@ static inline int init_new_context(struct task_struct *tsk, | |||
| 33 | 33 | ||
| 34 | static inline void set_user_asce(struct mm_struct *mm) | 34 | static inline void set_user_asce(struct mm_struct *mm) |
| 35 | { | 35 | { |
| 36 | pgd_t *pgd = mm->pgd; | 36 | S390_lowcore.user_asce = mm->context.asce_bits | __pa(mm->pgd); |
| 37 | 37 | if (current->thread.mm_segment.ar4) | |
| 38 | S390_lowcore.user_asce = mm->context.asce_bits | __pa(pgd); | 38 | __ctl_load(S390_lowcore.user_asce, 7, 7); |
| 39 | set_fs(current->thread.mm_segment); | ||
| 40 | set_cpu_flag(CIF_ASCE); | 39 | set_cpu_flag(CIF_ASCE); |
| 41 | } | 40 | } |
| 42 | 41 | ||
| @@ -70,12 +69,11 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, | |||
| 70 | /* Clear old ASCE by loading the kernel ASCE. */ | 69 | /* Clear old ASCE by loading the kernel ASCE. */ |
| 71 | __ctl_load(S390_lowcore.kernel_asce, 1, 1); | 70 | __ctl_load(S390_lowcore.kernel_asce, 1, 1); |
| 72 | __ctl_load(S390_lowcore.kernel_asce, 7, 7); | 71 | __ctl_load(S390_lowcore.kernel_asce, 7, 7); |
| 73 | /* Delay loading of the new ASCE to control registers CR1 & CR7 */ | ||
| 74 | set_cpu_flag(CIF_ASCE); | ||
| 75 | atomic_inc(&next->context.attach_count); | 72 | atomic_inc(&next->context.attach_count); |
| 76 | atomic_dec(&prev->context.attach_count); | 73 | atomic_dec(&prev->context.attach_count); |
| 77 | if (MACHINE_HAS_TLB_LC) | 74 | if (MACHINE_HAS_TLB_LC) |
| 78 | cpumask_clear_cpu(cpu, &prev->context.cpu_attach_mask); | 75 | cpumask_clear_cpu(cpu, &prev->context.cpu_attach_mask); |
| 76 | S390_lowcore.user_asce = next->context.asce_bits | __pa(next->pgd); | ||
| 79 | } | 77 | } |
| 80 | 78 | ||
| 81 | #define finish_arch_post_lock_switch finish_arch_post_lock_switch | 79 | #define finish_arch_post_lock_switch finish_arch_post_lock_switch |
| @@ -84,17 +82,18 @@ static inline void finish_arch_post_lock_switch(void) | |||
| 84 | struct task_struct *tsk = current; | 82 | struct task_struct *tsk = current; |
| 85 | struct mm_struct *mm = tsk->mm; | 83 | struct mm_struct *mm = tsk->mm; |
| 86 | 84 | ||
| 87 | if (!mm) | 85 | load_kernel_asce(); |
| 88 | return; | 86 | if (mm) { |
| 89 | preempt_disable(); | 87 | preempt_disable(); |
| 90 | while (atomic_read(&mm->context.attach_count) >> 16) | 88 | while (atomic_read(&mm->context.attach_count) >> 16) |
| 91 | cpu_relax(); | 89 | cpu_relax(); |
| 92 | 90 | ||
| 93 | cpumask_set_cpu(smp_processor_id(), mm_cpumask(mm)); | 91 | cpumask_set_cpu(smp_processor_id(), mm_cpumask(mm)); |
| 94 | set_user_asce(mm); | 92 | if (mm->context.flush_mm) |
| 95 | if (mm->context.flush_mm) | 93 | __tlb_flush_mm(mm); |
| 96 | __tlb_flush_mm(mm); | 94 | preempt_enable(); |
| 97 | preempt_enable(); | 95 | } |
| 96 | set_fs(current->thread.mm_segment); | ||
| 98 | } | 97 | } |
| 99 | 98 | ||
| 100 | #define enter_lazy_tlb(mm,tsk) do { } while (0) | 99 | #define enter_lazy_tlb(mm,tsk) do { } while (0) |
diff --git a/arch/s390/include/asm/switch_to.h b/arch/s390/include/asm/switch_to.h index 29c81f82705e..df38c70cd59e 100644 --- a/arch/s390/include/asm/switch_to.h +++ b/arch/s390/include/asm/switch_to.h | |||
| @@ -134,8 +134,4 @@ static inline void restore_access_regs(unsigned int *acrs) | |||
| 134 | prev = __switch_to(prev,next); \ | 134 | prev = __switch_to(prev,next); \ |
| 135 | } while (0) | 135 | } while (0) |
| 136 | 136 | ||
| 137 | #define finish_arch_switch(prev) do { \ | ||
| 138 | set_fs(current->thread.mm_segment); \ | ||
| 139 | } while (0) | ||
| 140 | |||
| 141 | #endif /* __ASM_SWITCH_TO_H */ | 137 | #endif /* __ASM_SWITCH_TO_H */ |
