summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorMartin Schwidefsky <schwidefsky@de.ibm.com>2016-06-13 04:36:00 -0400
committerMartin Schwidefsky <schwidefsky@de.ibm.com>2016-08-24 03:23:55 -0400
commit44b6cc8130e80e673ba8b3baf8e41891fe484786 (patch)
tree08427794ef7fd6898a965addc41f672e544557b9
parentd5dcafee5f183e9aedddb147a89cb46ab038f26b (diff)
s390/mm,kvm: flush gmap address space with IDTE
The __tlb_flush_mm() helper uses a global flush if the mm struct has a gmap structure attached to it. Replace the global flush with two individual flushes by means of the IDTE instruction if only a single gmap is attached the the mm. Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
-rw-r--r--arch/s390/include/asm/mmu.h1
-rw-r--r--arch/s390/include/asm/mmu_context.h1
-rw-r--r--arch/s390/include/asm/tlbflush.h40
-rw-r--r--arch/s390/mm/gmap.c15
4 files changed, 34 insertions, 23 deletions
diff --git a/arch/s390/include/asm/mmu.h b/arch/s390/include/asm/mmu.h
index 6d39329c894b..bea785d7f853 100644
--- a/arch/s390/include/asm/mmu.h
+++ b/arch/s390/include/asm/mmu.h
@@ -12,6 +12,7 @@ typedef struct {
12 struct list_head pgtable_list; 12 struct list_head pgtable_list;
13 spinlock_t gmap_lock; 13 spinlock_t gmap_lock;
14 struct list_head gmap_list; 14 struct list_head gmap_list;
15 unsigned long gmap_asce;
15 unsigned long asce; 16 unsigned long asce;
16 unsigned long asce_limit; 17 unsigned long asce_limit;
17 unsigned long vdso_base; 18 unsigned long vdso_base;
diff --git a/arch/s390/include/asm/mmu_context.h b/arch/s390/include/asm/mmu_context.h
index c6a088c91aee..515fea5a3fc4 100644
--- a/arch/s390/include/asm/mmu_context.h
+++ b/arch/s390/include/asm/mmu_context.h
@@ -21,6 +21,7 @@ static inline int init_new_context(struct task_struct *tsk,
21 INIT_LIST_HEAD(&mm->context.gmap_list); 21 INIT_LIST_HEAD(&mm->context.gmap_list);
22 cpumask_clear(&mm->context.cpu_attach_mask); 22 cpumask_clear(&mm->context.cpu_attach_mask);
23 atomic_set(&mm->context.flush_count, 0); 23 atomic_set(&mm->context.flush_count, 0);
24 mm->context.gmap_asce = 0;
24 mm->context.flush_mm = 0; 25 mm->context.flush_mm = 0;
25#ifdef CONFIG_PGSTE 26#ifdef CONFIG_PGSTE
26 mm->context.alloc_pgste = page_table_allocate_pgste; 27 mm->context.alloc_pgste = page_table_allocate_pgste;
diff --git a/arch/s390/include/asm/tlbflush.h b/arch/s390/include/asm/tlbflush.h
index 490014c48b13..39846100682a 100644
--- a/arch/s390/include/asm/tlbflush.h
+++ b/arch/s390/include/asm/tlbflush.h
@@ -60,18 +60,25 @@ static inline void __tlb_flush_full(struct mm_struct *mm)
60 preempt_enable(); 60 preempt_enable();
61} 61}
62 62
63/* 63static inline void __tlb_flush_mm(struct mm_struct *mm)
64 * Flush TLB entries for a specific ASCE on all CPUs. Should never be used
65 * when more than one asce (e.g. gmap) ran on this mm.
66 */
67static inline void __tlb_flush_asce(struct mm_struct *mm, unsigned long asce)
68{ 64{
65 unsigned long gmap_asce;
66
67 /*
68 * If the machine has IDTE we prefer to do a per mm flush
69 * on all cpus instead of doing a local flush if the mm
70 * only ran on the local cpu.
71 */
69 preempt_disable(); 72 preempt_disable();
70 atomic_inc(&mm->context.flush_count); 73 atomic_inc(&mm->context.flush_count);
71 if (MACHINE_HAS_IDTE) 74 gmap_asce = READ_ONCE(mm->context.gmap_asce);
72 __tlb_flush_idte(asce); 75 if (MACHINE_HAS_IDTE && gmap_asce != -1UL) {
73 else 76 if (gmap_asce)
74 __tlb_flush_global(); 77 __tlb_flush_idte(gmap_asce);
78 __tlb_flush_idte(mm->context.asce);
79 } else {
80 __tlb_flush_full(mm);
81 }
75 /* Reset TLB flush mask */ 82 /* Reset TLB flush mask */
76 cpumask_copy(mm_cpumask(mm), &mm->context.cpu_attach_mask); 83 cpumask_copy(mm_cpumask(mm), &mm->context.cpu_attach_mask);
77 atomic_dec(&mm->context.flush_count); 84 atomic_dec(&mm->context.flush_count);
@@ -92,7 +99,7 @@ static inline void __tlb_flush_kernel(void)
92/* 99/*
93 * Flush TLB entries for a specific ASCE on all CPUs. 100 * Flush TLB entries for a specific ASCE on all CPUs.
94 */ 101 */
95static inline void __tlb_flush_asce(struct mm_struct *mm, unsigned long asce) 102static inline void __tlb_flush_mm(struct mm_struct *mm)
96{ 103{
97 __tlb_flush_local(); 104 __tlb_flush_local();
98} 105}
@@ -103,19 +110,6 @@ static inline void __tlb_flush_kernel(void)
103} 110}
104#endif 111#endif
105 112
106static inline void __tlb_flush_mm(struct mm_struct * mm)
107{
108 /*
109 * If the machine has IDTE we prefer to do a per mm flush
110 * on all cpus instead of doing a local flush if the mm
111 * only ran on the local cpu.
112 */
113 if (MACHINE_HAS_IDTE && list_empty(&mm->context.gmap_list))
114 __tlb_flush_asce(mm, mm->context.asce);
115 else
116 __tlb_flush_full(mm);
117}
118
119static inline void __tlb_flush_mm_lazy(struct mm_struct * mm) 113static inline void __tlb_flush_mm_lazy(struct mm_struct * mm)
120{ 114{
121 if (mm->context.flush_mm) { 115 if (mm->context.flush_mm) {
diff --git a/arch/s390/mm/gmap.c b/arch/s390/mm/gmap.c
index 2ce6bb3bab32..3ba622702ce4 100644
--- a/arch/s390/mm/gmap.c
+++ b/arch/s390/mm/gmap.c
@@ -94,6 +94,7 @@ out:
94struct gmap *gmap_create(struct mm_struct *mm, unsigned long limit) 94struct gmap *gmap_create(struct mm_struct *mm, unsigned long limit)
95{ 95{
96 struct gmap *gmap; 96 struct gmap *gmap;
97 unsigned long gmap_asce;
97 98
98 gmap = gmap_alloc(limit); 99 gmap = gmap_alloc(limit);
99 if (!gmap) 100 if (!gmap)
@@ -101,6 +102,11 @@ struct gmap *gmap_create(struct mm_struct *mm, unsigned long limit)
101 gmap->mm = mm; 102 gmap->mm = mm;
102 spin_lock(&mm->context.gmap_lock); 103 spin_lock(&mm->context.gmap_lock);
103 list_add_rcu(&gmap->list, &mm->context.gmap_list); 104 list_add_rcu(&gmap->list, &mm->context.gmap_list);
105 if (list_is_singular(&mm->context.gmap_list))
106 gmap_asce = gmap->asce;
107 else
108 gmap_asce = -1UL;
109 WRITE_ONCE(mm->context.gmap_asce, gmap_asce);
104 spin_unlock(&mm->context.gmap_lock); 110 spin_unlock(&mm->context.gmap_lock);
105 return gmap; 111 return gmap;
106} 112}
@@ -230,6 +236,7 @@ EXPORT_SYMBOL_GPL(gmap_put);
230void gmap_remove(struct gmap *gmap) 236void gmap_remove(struct gmap *gmap)
231{ 237{
232 struct gmap *sg, *next; 238 struct gmap *sg, *next;
239 unsigned long gmap_asce;
233 240
234 /* Remove all shadow gmaps linked to this gmap */ 241 /* Remove all shadow gmaps linked to this gmap */
235 if (!list_empty(&gmap->children)) { 242 if (!list_empty(&gmap->children)) {
@@ -243,6 +250,14 @@ void gmap_remove(struct gmap *gmap)
243 /* Remove gmap from the pre-mm list */ 250 /* Remove gmap from the pre-mm list */
244 spin_lock(&gmap->mm->context.gmap_lock); 251 spin_lock(&gmap->mm->context.gmap_lock);
245 list_del_rcu(&gmap->list); 252 list_del_rcu(&gmap->list);
253 if (list_empty(&gmap->mm->context.gmap_list))
254 gmap_asce = 0;
255 else if (list_is_singular(&gmap->mm->context.gmap_list))
256 gmap_asce = list_first_entry(&gmap->mm->context.gmap_list,
257 struct gmap, list)->asce;
258 else
259 gmap_asce = -1UL;
260 WRITE_ONCE(gmap->mm->context.gmap_asce, gmap_asce);
246 spin_unlock(&gmap->mm->context.gmap_lock); 261 spin_unlock(&gmap->mm->context.gmap_lock);
247 synchronize_rcu(); 262 synchronize_rcu();
248 /* Put reference */ 263 /* Put reference */