aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorMartin Schwidefsky <schwidefsky@de.ibm.com>2016-03-08 05:54:14 -0500
committerChristian Borntraeger <borntraeger@de.ibm.com>2016-06-20 03:46:49 -0400
commit8ecb1a59d6c6674bc98e4eee0c2482490748e21a (patch)
treec39abe49adf4ef52fd94342b459c7c011cbd6da2
parent414d3b07496604a4372466a6b474ca24291a143c (diff)
s390/mm: use RCU for gmap notifier list and the per-mm gmap list
The gmap notifier list and the gmap list in the mm_struct change rarely. Use RCU to optimize the reader of these lists. Reviewed-by: David Hildenbrand <dahi@linux.vnet.ibm.com> Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com> Signed-off-by: Christian Borntraeger <borntraeger@de.ibm.com>
-rw-r--r--arch/s390/include/asm/gmap.h1
-rw-r--r--arch/s390/include/asm/mmu.h11
-rw-r--r--arch/s390/include/asm/mmu_context.h3
-rw-r--r--arch/s390/mm/gmap.c39
-rw-r--r--arch/s390/mm/pgalloc.c16
5 files changed, 41 insertions, 29 deletions
diff --git a/arch/s390/include/asm/gmap.h b/arch/s390/include/asm/gmap.h
index bc0eadf9ed8e..2cf49624af99 100644
--- a/arch/s390/include/asm/gmap.h
+++ b/arch/s390/include/asm/gmap.h
@@ -39,6 +39,7 @@ struct gmap {
39 */ 39 */
40struct gmap_notifier { 40struct gmap_notifier {
41 struct list_head list; 41 struct list_head list;
42 struct rcu_head rcu;
42 void (*notifier_call)(struct gmap *gmap, unsigned long start, 43 void (*notifier_call)(struct gmap *gmap, unsigned long start,
43 unsigned long end); 44 unsigned long end);
44}; 45};
diff --git a/arch/s390/include/asm/mmu.h b/arch/s390/include/asm/mmu.h
index 081b2ad99d73..b941528cc49e 100644
--- a/arch/s390/include/asm/mmu.h
+++ b/arch/s390/include/asm/mmu.h
@@ -8,8 +8,9 @@ typedef struct {
8 cpumask_t cpu_attach_mask; 8 cpumask_t cpu_attach_mask;
9 atomic_t attach_count; 9 atomic_t attach_count;
10 unsigned int flush_mm; 10 unsigned int flush_mm;
11 spinlock_t list_lock; 11 spinlock_t pgtable_lock;
12 struct list_head pgtable_list; 12 struct list_head pgtable_list;
13 spinlock_t gmap_lock;
13 struct list_head gmap_list; 14 struct list_head gmap_list;
14 unsigned long asce; 15 unsigned long asce;
15 unsigned long asce_limit; 16 unsigned long asce_limit;
@@ -22,9 +23,11 @@ typedef struct {
22 unsigned int use_skey:1; 23 unsigned int use_skey:1;
23} mm_context_t; 24} mm_context_t;
24 25
25#define INIT_MM_CONTEXT(name) \ 26#define INIT_MM_CONTEXT(name) \
26 .context.list_lock = __SPIN_LOCK_UNLOCKED(name.context.list_lock), \ 27 .context.pgtable_lock = \
27 .context.pgtable_list = LIST_HEAD_INIT(name.context.pgtable_list), \ 28 __SPIN_LOCK_UNLOCKED(name.context.pgtable_lock), \
29 .context.pgtable_list = LIST_HEAD_INIT(name.context.pgtable_list), \
30 .context.gmap_lock = __SPIN_LOCK_UNLOCKED(name.context.gmap_lock), \
28 .context.gmap_list = LIST_HEAD_INIT(name.context.gmap_list), 31 .context.gmap_list = LIST_HEAD_INIT(name.context.gmap_list),
29 32
30static inline int tprot(unsigned long addr) 33static inline int tprot(unsigned long addr)
diff --git a/arch/s390/include/asm/mmu_context.h b/arch/s390/include/asm/mmu_context.h
index c837b79b455d..3ce3854b7a41 100644
--- a/arch/s390/include/asm/mmu_context.h
+++ b/arch/s390/include/asm/mmu_context.h
@@ -15,8 +15,9 @@
15static inline int init_new_context(struct task_struct *tsk, 15static inline int init_new_context(struct task_struct *tsk,
16 struct mm_struct *mm) 16 struct mm_struct *mm)
17{ 17{
18 spin_lock_init(&mm->context.list_lock); 18 spin_lock_init(&mm->context.pgtable_lock);
19 INIT_LIST_HEAD(&mm->context.pgtable_list); 19 INIT_LIST_HEAD(&mm->context.pgtable_list);
20 spin_lock_init(&mm->context.gmap_lock);
20 INIT_LIST_HEAD(&mm->context.gmap_list); 21 INIT_LIST_HEAD(&mm->context.gmap_list);
21 cpumask_clear(&mm->context.cpu_attach_mask); 22 cpumask_clear(&mm->context.cpu_attach_mask);
22 atomic_set(&mm->context.attach_count, 0); 23 atomic_set(&mm->context.attach_count, 0);
diff --git a/arch/s390/mm/gmap.c b/arch/s390/mm/gmap.c
index b5820bf47ec6..8b56423a8297 100644
--- a/arch/s390/mm/gmap.c
+++ b/arch/s390/mm/gmap.c
@@ -70,9 +70,9 @@ struct gmap *gmap_alloc(struct mm_struct *mm, unsigned long limit)
70 gmap->asce = atype | _ASCE_TABLE_LENGTH | 70 gmap->asce = atype | _ASCE_TABLE_LENGTH |
71 _ASCE_USER_BITS | __pa(table); 71 _ASCE_USER_BITS | __pa(table);
72 gmap->asce_end = limit; 72 gmap->asce_end = limit;
73 down_write(&mm->mmap_sem); 73 spin_lock(&mm->context.gmap_lock);
74 list_add(&gmap->list, &mm->context.gmap_list); 74 list_add_rcu(&gmap->list, &mm->context.gmap_list);
75 up_write(&mm->mmap_sem); 75 spin_unlock(&mm->context.gmap_lock);
76 return gmap; 76 return gmap;
77 77
78out_free: 78out_free:
@@ -128,14 +128,16 @@ void gmap_free(struct gmap *gmap)
128 else 128 else
129 __tlb_flush_global(); 129 __tlb_flush_global();
130 130
131 spin_lock(&gmap->mm->context.gmap_lock);
132 list_del_rcu(&gmap->list);
133 spin_unlock(&gmap->mm->context.gmap_lock);
134 synchronize_rcu();
135
131 /* Free all segment & region tables. */ 136 /* Free all segment & region tables. */
132 list_for_each_entry_safe(page, next, &gmap->crst_list, lru) 137 list_for_each_entry_safe(page, next, &gmap->crst_list, lru)
133 __free_pages(page, 2); 138 __free_pages(page, 2);
134 gmap_radix_tree_free(&gmap->guest_to_host); 139 gmap_radix_tree_free(&gmap->guest_to_host);
135 gmap_radix_tree_free(&gmap->host_to_guest); 140 gmap_radix_tree_free(&gmap->host_to_guest);
136 down_write(&gmap->mm->mmap_sem);
137 list_del(&gmap->list);
138 up_write(&gmap->mm->mmap_sem);
139 kfree(gmap); 141 kfree(gmap);
140} 142}
141EXPORT_SYMBOL_GPL(gmap_free); 143EXPORT_SYMBOL_GPL(gmap_free);
@@ -369,11 +371,13 @@ void gmap_unlink(struct mm_struct *mm, unsigned long *table,
369 struct gmap *gmap; 371 struct gmap *gmap;
370 int flush; 372 int flush;
371 373
372 list_for_each_entry(gmap, &mm->context.gmap_list, list) { 374 rcu_read_lock();
375 list_for_each_entry_rcu(gmap, &mm->context.gmap_list, list) {
373 flush = __gmap_unlink_by_vmaddr(gmap, vmaddr); 376 flush = __gmap_unlink_by_vmaddr(gmap, vmaddr);
374 if (flush) 377 if (flush)
375 gmap_flush_tlb(gmap); 378 gmap_flush_tlb(gmap);
376 } 379 }
380 rcu_read_unlock();
377} 381}
378 382
379/** 383/**
@@ -555,7 +559,7 @@ static DEFINE_SPINLOCK(gmap_notifier_lock);
555void gmap_register_ipte_notifier(struct gmap_notifier *nb) 559void gmap_register_ipte_notifier(struct gmap_notifier *nb)
556{ 560{
557 spin_lock(&gmap_notifier_lock); 561 spin_lock(&gmap_notifier_lock);
558 list_add(&nb->list, &gmap_notifier_list); 562 list_add_rcu(&nb->list, &gmap_notifier_list);
559 spin_unlock(&gmap_notifier_lock); 563 spin_unlock(&gmap_notifier_lock);
560} 564}
561EXPORT_SYMBOL_GPL(gmap_register_ipte_notifier); 565EXPORT_SYMBOL_GPL(gmap_register_ipte_notifier);
@@ -567,8 +571,9 @@ EXPORT_SYMBOL_GPL(gmap_register_ipte_notifier);
567void gmap_unregister_ipte_notifier(struct gmap_notifier *nb) 571void gmap_unregister_ipte_notifier(struct gmap_notifier *nb)
568{ 572{
569 spin_lock(&gmap_notifier_lock); 573 spin_lock(&gmap_notifier_lock);
570 list_del_init(&nb->list); 574 list_del_rcu(&nb->list);
571 spin_unlock(&gmap_notifier_lock); 575 spin_unlock(&gmap_notifier_lock);
576 synchronize_rcu();
572} 577}
573EXPORT_SYMBOL_GPL(gmap_unregister_ipte_notifier); 578EXPORT_SYMBOL_GPL(gmap_unregister_ipte_notifier);
574 579
@@ -662,16 +667,18 @@ void ptep_notify(struct mm_struct *mm, unsigned long vmaddr, pte_t *pte)
662 667
663 offset = ((unsigned long) pte) & (255 * sizeof(pte_t)); 668 offset = ((unsigned long) pte) & (255 * sizeof(pte_t));
664 offset = offset * (4096 / sizeof(pte_t)); 669 offset = offset * (4096 / sizeof(pte_t));
665 spin_lock(&gmap_notifier_lock); 670 rcu_read_lock();
666 list_for_each_entry(gmap, &mm->context.gmap_list, list) { 671 list_for_each_entry_rcu(gmap, &mm->context.gmap_list, list) {
672 spin_lock(&gmap->guest_table_lock);
667 table = radix_tree_lookup(&gmap->host_to_guest, 673 table = radix_tree_lookup(&gmap->host_to_guest,
668 vmaddr >> PMD_SHIFT); 674 vmaddr >> PMD_SHIFT);
669 if (!table) 675 if (table)
670 continue; 676 gaddr = __gmap_segment_gaddr(table) + offset;
671 gaddr = __gmap_segment_gaddr(table) + offset; 677 spin_unlock(&gmap->guest_table_lock);
672 gmap_call_notifier(gmap, gaddr, gaddr + PAGE_SIZE - 1); 678 if (table)
679 gmap_call_notifier(gmap, gaddr, gaddr + PAGE_SIZE - 1);
673 } 680 }
674 spin_unlock(&gmap_notifier_lock); 681 rcu_read_unlock();
675} 682}
676EXPORT_SYMBOL_GPL(ptep_notify); 683EXPORT_SYMBOL_GPL(ptep_notify);
677 684
diff --git a/arch/s390/mm/pgalloc.c b/arch/s390/mm/pgalloc.c
index e8b5962ac12a..7be1f94f70a8 100644
--- a/arch/s390/mm/pgalloc.c
+++ b/arch/s390/mm/pgalloc.c
@@ -149,7 +149,7 @@ unsigned long *page_table_alloc(struct mm_struct *mm)
149 /* Try to get a fragment of a 4K page as a 2K page table */ 149 /* Try to get a fragment of a 4K page as a 2K page table */
150 if (!mm_alloc_pgste(mm)) { 150 if (!mm_alloc_pgste(mm)) {
151 table = NULL; 151 table = NULL;
152 spin_lock_bh(&mm->context.list_lock); 152 spin_lock_bh(&mm->context.pgtable_lock);
153 if (!list_empty(&mm->context.pgtable_list)) { 153 if (!list_empty(&mm->context.pgtable_list)) {
154 page = list_first_entry(&mm->context.pgtable_list, 154 page = list_first_entry(&mm->context.pgtable_list,
155 struct page, lru); 155 struct page, lru);
@@ -164,7 +164,7 @@ unsigned long *page_table_alloc(struct mm_struct *mm)
164 list_del(&page->lru); 164 list_del(&page->lru);
165 } 165 }
166 } 166 }
167 spin_unlock_bh(&mm->context.list_lock); 167 spin_unlock_bh(&mm->context.pgtable_lock);
168 if (table) 168 if (table)
169 return table; 169 return table;
170 } 170 }
@@ -187,9 +187,9 @@ unsigned long *page_table_alloc(struct mm_struct *mm)
187 /* Return the first 2K fragment of the page */ 187 /* Return the first 2K fragment of the page */
188 atomic_set(&page->_mapcount, 1); 188 atomic_set(&page->_mapcount, 1);
189 clear_table(table, _PAGE_INVALID, PAGE_SIZE); 189 clear_table(table, _PAGE_INVALID, PAGE_SIZE);
190 spin_lock_bh(&mm->context.list_lock); 190 spin_lock_bh(&mm->context.pgtable_lock);
191 list_add(&page->lru, &mm->context.pgtable_list); 191 list_add(&page->lru, &mm->context.pgtable_list);
192 spin_unlock_bh(&mm->context.list_lock); 192 spin_unlock_bh(&mm->context.pgtable_lock);
193 } 193 }
194 return table; 194 return table;
195} 195}
@@ -203,13 +203,13 @@ void page_table_free(struct mm_struct *mm, unsigned long *table)
203 if (!mm_alloc_pgste(mm)) { 203 if (!mm_alloc_pgste(mm)) {
204 /* Free 2K page table fragment of a 4K page */ 204 /* Free 2K page table fragment of a 4K page */
205 bit = (__pa(table) & ~PAGE_MASK)/(PTRS_PER_PTE*sizeof(pte_t)); 205 bit = (__pa(table) & ~PAGE_MASK)/(PTRS_PER_PTE*sizeof(pte_t));
206 spin_lock_bh(&mm->context.list_lock); 206 spin_lock_bh(&mm->context.pgtable_lock);
207 mask = atomic_xor_bits(&page->_mapcount, 1U << bit); 207 mask = atomic_xor_bits(&page->_mapcount, 1U << bit);
208 if (mask & 3) 208 if (mask & 3)
209 list_add(&page->lru, &mm->context.pgtable_list); 209 list_add(&page->lru, &mm->context.pgtable_list);
210 else 210 else
211 list_del(&page->lru); 211 list_del(&page->lru);
212 spin_unlock_bh(&mm->context.list_lock); 212 spin_unlock_bh(&mm->context.pgtable_lock);
213 if (mask != 0) 213 if (mask != 0)
214 return; 214 return;
215 } 215 }
@@ -235,13 +235,13 @@ void page_table_free_rcu(struct mmu_gather *tlb, unsigned long *table,
235 return; 235 return;
236 } 236 }
237 bit = (__pa(table) & ~PAGE_MASK) / (PTRS_PER_PTE*sizeof(pte_t)); 237 bit = (__pa(table) & ~PAGE_MASK) / (PTRS_PER_PTE*sizeof(pte_t));
238 spin_lock_bh(&mm->context.list_lock); 238 spin_lock_bh(&mm->context.pgtable_lock);
239 mask = atomic_xor_bits(&page->_mapcount, 0x11U << bit); 239 mask = atomic_xor_bits(&page->_mapcount, 0x11U << bit);
240 if (mask & 3) 240 if (mask & 3)
241 list_add_tail(&page->lru, &mm->context.pgtable_list); 241 list_add_tail(&page->lru, &mm->context.pgtable_list);
242 else 242 else
243 list_del(&page->lru); 243 list_del(&page->lru);
244 spin_unlock_bh(&mm->context.list_lock); 244 spin_unlock_bh(&mm->context.pgtable_lock);
245 table = (unsigned long *) (__pa(table) | (1U << bit)); 245 table = (unsigned long *) (__pa(table) | (1U << bit));
246 tlb_remove_table(tlb, table); 246 tlb_remove_table(tlb, table);
247} 247}