aboutsummaryrefslogtreecommitdiffstats
path: root/arch/s390/mm/pgtable.c
diff options
context:
space:
mode:
authorMartin Schwidefsky <schwidefsky@de.ibm.com>2013-10-28 09:48:30 -0400
committerMartin Schwidefsky <schwidefsky@de.ibm.com>2013-11-04 07:51:47 -0500
commit106078641f32a6a10d9759f809f809725695cb09 (patch)
treea65201fd31994348a067c63e820530ad15387476 /arch/s390/mm/pgtable.c
parentbe39f1968e33ca641af120a2d659421ad2225dea (diff)
s390/mm,tlb: correct tlb flush on page table upgrade
The IDTE instruction used to flush TLB entries for a specific address space uses the address-space-control element (ASCE) to identify affected TLB entries. The upgrade of a page table adds a new top level page table which changes the ASCE. The TLB entries associated with the old ASCE need to be flushed and the ASCE for the address space needs to be replaced synchronously on all CPUs which currently use it. The concept of a lazy ASCE update with an exception handler is broken. Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
Diffstat (limited to 'arch/s390/mm/pgtable.c')
-rw-r--r--arch/s390/mm/pgtable.c18
1 files changed, 18 insertions, 0 deletions
diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c
index a9be08899b0c..0a2e5e086749 100644
--- a/arch/s390/mm/pgtable.c
+++ b/arch/s390/mm/pgtable.c
@@ -48,12 +48,23 @@ void crst_table_free(struct mm_struct *mm, unsigned long *table)
48} 48}
49 49
50#ifdef CONFIG_64BIT 50#ifdef CONFIG_64BIT
51static void __crst_table_upgrade(void *arg)
52{
53 struct mm_struct *mm = arg;
54
55 if (current->active_mm == mm)
56 update_mm(mm, current);
57 __tlb_flush_local();
58}
59
51int crst_table_upgrade(struct mm_struct *mm, unsigned long limit) 60int crst_table_upgrade(struct mm_struct *mm, unsigned long limit)
52{ 61{
53 unsigned long *table, *pgd; 62 unsigned long *table, *pgd;
54 unsigned long entry; 63 unsigned long entry;
64 int flush;
55 65
56 BUG_ON(limit > (1UL << 53)); 66 BUG_ON(limit > (1UL << 53));
67 flush = 0;
57repeat: 68repeat:
58 table = crst_table_alloc(mm); 69 table = crst_table_alloc(mm);
59 if (!table) 70 if (!table)
@@ -79,12 +90,15 @@ repeat:
79 mm->pgd = (pgd_t *) table; 90 mm->pgd = (pgd_t *) table;
80 mm->task_size = mm->context.asce_limit; 91 mm->task_size = mm->context.asce_limit;
81 table = NULL; 92 table = NULL;
93 flush = 1;
82 } 94 }
83 spin_unlock_bh(&mm->page_table_lock); 95 spin_unlock_bh(&mm->page_table_lock);
84 if (table) 96 if (table)
85 crst_table_free(mm, table); 97 crst_table_free(mm, table);
86 if (mm->context.asce_limit < limit) 98 if (mm->context.asce_limit < limit)
87 goto repeat; 99 goto repeat;
100 if (flush)
101 on_each_cpu(__crst_table_upgrade, mm, 0);
88 return 0; 102 return 0;
89} 103}
90 104
@@ -92,6 +106,8 @@ void crst_table_downgrade(struct mm_struct *mm, unsigned long limit)
92{ 106{
93 pgd_t *pgd; 107 pgd_t *pgd;
94 108
109 if (current->active_mm == mm)
110 __tlb_flush_mm(mm);
95 while (mm->context.asce_limit > limit) { 111 while (mm->context.asce_limit > limit) {
96 pgd = mm->pgd; 112 pgd = mm->pgd;
97 switch (pgd_val(*pgd) & _REGION_ENTRY_TYPE_MASK) { 113 switch (pgd_val(*pgd) & _REGION_ENTRY_TYPE_MASK) {
@@ -114,6 +130,8 @@ void crst_table_downgrade(struct mm_struct *mm, unsigned long limit)
114 mm->task_size = mm->context.asce_limit; 130 mm->task_size = mm->context.asce_limit;
115 crst_table_free(mm, (unsigned long *) pgd); 131 crst_table_free(mm, (unsigned long *) pgd);
116 } 132 }
133 if (current->active_mm == mm)
134 update_mm(mm, current);
117} 135}
118#endif 136#endif
119 137