aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorMartin Schwidefsky <schwidefsky@de.ibm.com>2014-04-03 07:54:59 -0400
committerMartin Schwidefsky <schwidefsky@de.ibm.com>2014-04-03 08:30:55 -0400
commit02a8f3abb708919149cb657a5202f4603f0c38e2 (patch)
tree2bf430c528af833f0544f575512f7e49faf81f57 /arch
parent1dad093b66fdd4fd5d7d2692169dc1bafd794628 (diff)
s390/mm,tlb: safeguard against speculative TLB creation
The principles of operations states that the CPU is allowed to create TLB entries for an address space anytime while an ASCE is loaded to the control register. This is true even if the CPU is running in the kernel and the user address space is not (actively) accessed. In theory this can affect two aspects of the TLB flush logic. For full-mm flushes the ASCE of the dying process is still attached. The approach to flush first with IDTE and then just free all page tables can in theory lead to stale TLB entries. Use the batched free of page tables for the full-mm flushes as well. For operations that can have a stale ASCE in the control register, e.g. a delayed update_user_asce in switch_mm, load the kernel ASCE to prevent invalid TLBs from being created. Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
Diffstat (limited to 'arch')
-rw-r--r--arch/s390/include/asm/mmu_context.h17
-rw-r--r--arch/s390/include/asm/tlb.h14
-rw-r--r--arch/s390/mm/pgtable.c8
3 files changed, 21 insertions, 18 deletions
diff --git a/arch/s390/include/asm/mmu_context.h b/arch/s390/include/asm/mmu_context.h
index 38149b63dc44..7abf318b1522 100644
--- a/arch/s390/include/asm/mmu_context.h
+++ b/arch/s390/include/asm/mmu_context.h
@@ -35,7 +35,7 @@ static inline int init_new_context(struct task_struct *tsk,
35#define LCTL_OPCODE "lctlg" 35#define LCTL_OPCODE "lctlg"
36#endif 36#endif
37 37
38static inline void update_mm(struct mm_struct *mm, struct task_struct *tsk) 38static inline void update_user_asce(struct mm_struct *mm)
39{ 39{
40 pgd_t *pgd = mm->pgd; 40 pgd_t *pgd = mm->pgd;
41 41
@@ -45,6 +45,13 @@ static inline void update_mm(struct mm_struct *mm, struct task_struct *tsk)
45 set_fs(current->thread.mm_segment); 45 set_fs(current->thread.mm_segment);
46} 46}
47 47
48static inline void clear_user_asce(struct mm_struct *mm)
49{
50 S390_lowcore.user_asce = S390_lowcore.kernel_asce;
51 asm volatile(LCTL_OPCODE" 1,1,%0\n" : : "m" (S390_lowcore.user_asce));
52 asm volatile(LCTL_OPCODE" 7,7,%0\n" : : "m" (S390_lowcore.user_asce));
53}
54
48static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, 55static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
49 struct task_struct *tsk) 56 struct task_struct *tsk)
50{ 57{
@@ -53,11 +60,13 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
53 if (prev == next) 60 if (prev == next)
54 return; 61 return;
55 if (atomic_inc_return(&next->context.attach_count) >> 16) { 62 if (atomic_inc_return(&next->context.attach_count) >> 16) {
56 /* Delay update_mm until all TLB flushes are done. */ 63 /* Delay update_user_asce until all TLB flushes are done. */
57 set_tsk_thread_flag(tsk, TIF_TLB_WAIT); 64 set_tsk_thread_flag(tsk, TIF_TLB_WAIT);
65 /* Clear old ASCE by loading the kernel ASCE. */
66 clear_user_asce(next);
58 } else { 67 } else {
59 cpumask_set_cpu(cpu, mm_cpumask(next)); 68 cpumask_set_cpu(cpu, mm_cpumask(next));
60 update_mm(next, tsk); 69 update_user_asce(next);
61 if (next->context.flush_mm) 70 if (next->context.flush_mm)
62 /* Flush pending TLBs */ 71 /* Flush pending TLBs */
63 __tlb_flush_mm(next); 72 __tlb_flush_mm(next);
@@ -80,7 +89,7 @@ static inline void finish_arch_post_lock_switch(void)
80 cpu_relax(); 89 cpu_relax();
81 90
82 cpumask_set_cpu(smp_processor_id(), mm_cpumask(mm)); 91 cpumask_set_cpu(smp_processor_id(), mm_cpumask(mm));
83 update_mm(mm, tsk); 92 update_user_asce(mm);
84 if (mm->context.flush_mm) 93 if (mm->context.flush_mm)
85 __tlb_flush_mm(mm); 94 __tlb_flush_mm(mm);
86 preempt_enable(); 95 preempt_enable();
diff --git a/arch/s390/include/asm/tlb.h b/arch/s390/include/asm/tlb.h
index 2cb846c4b37f..c544b6f05d95 100644
--- a/arch/s390/include/asm/tlb.h
+++ b/arch/s390/include/asm/tlb.h
@@ -57,8 +57,6 @@ static inline void tlb_gather_mmu(struct mmu_gather *tlb,
57 tlb->end = end; 57 tlb->end = end;
58 tlb->fullmm = !(start | (end+1)); 58 tlb->fullmm = !(start | (end+1));
59 tlb->batch = NULL; 59 tlb->batch = NULL;
60 if (tlb->fullmm)
61 __tlb_flush_mm(mm);
62} 60}
63 61
64static inline void tlb_flush_mmu(struct mmu_gather *tlb) 62static inline void tlb_flush_mmu(struct mmu_gather *tlb)
@@ -96,9 +94,7 @@ static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page)
96static inline void pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte, 94static inline void pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte,
97 unsigned long address) 95 unsigned long address)
98{ 96{
99 if (!tlb->fullmm) 97 page_table_free_rcu(tlb, (unsigned long *) pte);
100 return page_table_free_rcu(tlb, (unsigned long *) pte);
101 page_table_free(tlb->mm, (unsigned long *) pte);
102} 98}
103 99
104/* 100/*
@@ -114,9 +110,7 @@ static inline void pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd,
114#ifdef CONFIG_64BIT 110#ifdef CONFIG_64BIT
115 if (tlb->mm->context.asce_limit <= (1UL << 31)) 111 if (tlb->mm->context.asce_limit <= (1UL << 31))
116 return; 112 return;
117 if (!tlb->fullmm) 113 tlb_remove_table(tlb, pmd);
118 return tlb_remove_table(tlb, pmd);
119 crst_table_free(tlb->mm, (unsigned long *) pmd);
120#endif 114#endif
121} 115}
122 116
@@ -133,9 +127,7 @@ static inline void pud_free_tlb(struct mmu_gather *tlb, pud_t *pud,
133#ifdef CONFIG_64BIT 127#ifdef CONFIG_64BIT
134 if (tlb->mm->context.asce_limit <= (1UL << 42)) 128 if (tlb->mm->context.asce_limit <= (1UL << 42))
135 return; 129 return;
136 if (!tlb->fullmm) 130 tlb_remove_table(tlb, pud);
137 return tlb_remove_table(tlb, pud);
138 crst_table_free(tlb->mm, (unsigned long *) pud);
139#endif 131#endif
140} 132}
141 133
diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c
index 796c9320c709..24c62900b532 100644
--- a/arch/s390/mm/pgtable.c
+++ b/arch/s390/mm/pgtable.c
@@ -54,7 +54,7 @@ static void __crst_table_upgrade(void *arg)
54 struct mm_struct *mm = arg; 54 struct mm_struct *mm = arg;
55 55
56 if (current->active_mm == mm) 56 if (current->active_mm == mm)
57 update_mm(mm, current); 57 update_user_asce(mm);
58 __tlb_flush_local(); 58 __tlb_flush_local();
59} 59}
60 60
@@ -107,8 +107,10 @@ void crst_table_downgrade(struct mm_struct *mm, unsigned long limit)
107{ 107{
108 pgd_t *pgd; 108 pgd_t *pgd;
109 109
110 if (current->active_mm == mm) 110 if (current->active_mm == mm) {
111 clear_user_asce(mm);
111 __tlb_flush_mm(mm); 112 __tlb_flush_mm(mm);
113 }
112 while (mm->context.asce_limit > limit) { 114 while (mm->context.asce_limit > limit) {
113 pgd = mm->pgd; 115 pgd = mm->pgd;
114 switch (pgd_val(*pgd) & _REGION_ENTRY_TYPE_MASK) { 116 switch (pgd_val(*pgd) & _REGION_ENTRY_TYPE_MASK) {
@@ -132,7 +134,7 @@ void crst_table_downgrade(struct mm_struct *mm, unsigned long limit)
132 crst_table_free(mm, (unsigned long *) pgd); 134 crst_table_free(mm, (unsigned long *) pgd);
133 } 135 }
134 if (current->active_mm == mm) 136 if (current->active_mm == mm)
135 update_mm(mm, current); 137 update_user_asce(mm);
136} 138}
137#endif 139#endif
138 140