aboutsummaryrefslogtreecommitdiffstats
path: root/arch/s390/mm
diff options
context:
space:
mode:
authorMartin Schwidefsky <schwidefsky@de.ibm.com>2014-04-03 07:54:59 -0400
committerMartin Schwidefsky <schwidefsky@de.ibm.com>2014-04-03 08:30:55 -0400
commit02a8f3abb708919149cb657a5202f4603f0c38e2 (patch)
tree2bf430c528af833f0544f575512f7e49faf81f57 /arch/s390/mm
parent1dad093b66fdd4fd5d7d2692169dc1bafd794628 (diff)
s390/mm,tlb: safeguard against speculative TLB creation
The principles of operations states that the CPU is allowed to create TLB entries for an address space anytime while an ASCE is loaded to the control register. This is true even if the CPU is running in the kernel and the user address space is not (actively) accessed. In theory this can affect two aspects of the TLB flush logic. For full-mm flushes the ASCE of the dying process is still attached. The approach to flush first with IDTE and then just free all page tables can in theory lead to stale TLB entries. Use the batched free of page tables for the full-mm flushes as well. For operations that can have a stale ASCE in the control register, e.g. a delayed update_user_asce in switch_mm, load the kernel ASCE to prevent invalid TLBs from being created. Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
Diffstat (limited to 'arch/s390/mm')
-rw-r--r--arch/s390/mm/pgtable.c8
1 files changed, 5 insertions, 3 deletions
diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c
index 796c9320c709..24c62900b532 100644
--- a/arch/s390/mm/pgtable.c
+++ b/arch/s390/mm/pgtable.c
@@ -54,7 +54,7 @@ static void __crst_table_upgrade(void *arg)
54 struct mm_struct *mm = arg; 54 struct mm_struct *mm = arg;
55 55
56 if (current->active_mm == mm) 56 if (current->active_mm == mm)
57 update_mm(mm, current); 57 update_user_asce(mm);
58 __tlb_flush_local(); 58 __tlb_flush_local();
59} 59}
60 60
@@ -107,8 +107,10 @@ void crst_table_downgrade(struct mm_struct *mm, unsigned long limit)
107{ 107{
108 pgd_t *pgd; 108 pgd_t *pgd;
109 109
110 if (current->active_mm == mm) 110 if (current->active_mm == mm) {
111 clear_user_asce(mm);
111 __tlb_flush_mm(mm); 112 __tlb_flush_mm(mm);
113 }
112 while (mm->context.asce_limit > limit) { 114 while (mm->context.asce_limit > limit) {
113 pgd = mm->pgd; 115 pgd = mm->pgd;
114 switch (pgd_val(*pgd) & _REGION_ENTRY_TYPE_MASK) { 116 switch (pgd_val(*pgd) & _REGION_ENTRY_TYPE_MASK) {
@@ -132,7 +134,7 @@ void crst_table_downgrade(struct mm_struct *mm, unsigned long limit)
132 crst_table_free(mm, (unsigned long *) pgd); 134 crst_table_free(mm, (unsigned long *) pgd);
133 } 135 }
134 if (current->active_mm == mm) 136 if (current->active_mm == mm)
135 update_mm(mm, current); 137 update_user_asce(mm);
136} 138}
137#endif 139#endif
138 140