aboutsummaryrefslogtreecommitdiffstats
path: root/arch/s390
diff options
context:
space:
mode:
authorMartin Schwidefsky <schwidefsky@de.ibm.com>2012-04-11 08:28:07 -0400
committerLuis Henriques <luis.henriques@canonical.com>2012-05-01 06:00:11 -0400
commit543258d0d88f3068cbd802d6ccdfc53ff9f2c16a (patch)
treec44d12150b1d5a5bba2294e62580bfc84d68023a /arch/s390
parentba93ad52137a1726b2dcf81ed60b81eeed3c24a2 (diff)
S390: fix tlb flushing for page table pages
BugLink: http://bugs.launchpad.net/bugs/987283 commit cd94154cc6a28dd9dc271042c1a59c08d26da886 upstream. Git commit 36409f6353fc2d7b6516e631415f938eadd92ffa "use generic RCU page-table freeing code" introduced a tlb flushing bug. Partially revert the above git commit and go back to s390 specific page table flush code. For s390 the TLB can contain three types of entries, "normal" TLB page-table entries, TLB combined region-and-segment-table (CRST) entries and real-space entries. Linux does not use real-space entries which leaves normal TLB entries and CRST entries. The CRST entries are intermediate steps in the page-table translation called translation paths. For example a 4K page access in a three-level page table setup will create two CRST TLB entries and one page-table TLB entry. The advantage of that approach is that a page access next to the previous one can reuse the CRST entries and needs just a single read from memory to create the page-table TLB entry. The disadvantage is that the TLB flushing rules are more complicated, before any page-table may be freed the TLB needs to be flushed. In short: the generic RCU page-table freeing code is incorrect for the CRST entries, in particular the check for mm_users < 2 is troublesome. This is applicable to 3.0+ kernels. Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org> Signed-off-by: Tim Gardner <tim.gardner@canonical.com>
Diffstat (limited to 'arch/s390')
-rw-r--r--arch/s390/Kconfig1
-rw-r--r--arch/s390/include/asm/pgalloc.h3
-rw-r--r--arch/s390/include/asm/tlb.h22
-rw-r--r--arch/s390/mm/pgtable.c63
4 files changed, 61 insertions, 28 deletions
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
index 9b922b12e9f..c395f713ce3 100644
--- a/arch/s390/Kconfig
+++ b/arch/s390/Kconfig
@@ -89,7 +89,6 @@ config S390
89 select HAVE_GET_USER_PAGES_FAST 89 select HAVE_GET_USER_PAGES_FAST
90 select HAVE_ARCH_MUTEX_CPU_RELAX 90 select HAVE_ARCH_MUTEX_CPU_RELAX
91 select HAVE_ARCH_JUMP_LABEL if !MARCH_G5 91 select HAVE_ARCH_JUMP_LABEL if !MARCH_G5
92 select HAVE_RCU_TABLE_FREE if SMP
93 select ARCH_INLINE_SPIN_TRYLOCK 92 select ARCH_INLINE_SPIN_TRYLOCK
94 select ARCH_INLINE_SPIN_TRYLOCK_BH 93 select ARCH_INLINE_SPIN_TRYLOCK_BH
95 select ARCH_INLINE_SPIN_LOCK 94 select ARCH_INLINE_SPIN_LOCK
diff --git a/arch/s390/include/asm/pgalloc.h b/arch/s390/include/asm/pgalloc.h
index 38e71ebcd3c..e4b6609fe92 100644
--- a/arch/s390/include/asm/pgalloc.h
+++ b/arch/s390/include/asm/pgalloc.h
@@ -22,10 +22,7 @@ void crst_table_free(struct mm_struct *, unsigned long *);
22 22
23unsigned long *page_table_alloc(struct mm_struct *); 23unsigned long *page_table_alloc(struct mm_struct *);
24void page_table_free(struct mm_struct *, unsigned long *); 24void page_table_free(struct mm_struct *, unsigned long *);
25#ifdef CONFIG_HAVE_RCU_TABLE_FREE
26void page_table_free_rcu(struct mmu_gather *, unsigned long *); 25void page_table_free_rcu(struct mmu_gather *, unsigned long *);
27void __tlb_remove_table(void *_table);
28#endif
29 26
30static inline void clear_table(unsigned long *s, unsigned long val, size_t n) 27static inline void clear_table(unsigned long *s, unsigned long val, size_t n)
31{ 28{
diff --git a/arch/s390/include/asm/tlb.h b/arch/s390/include/asm/tlb.h
index c687a2c8346..775a5eea8f9 100644
--- a/arch/s390/include/asm/tlb.h
+++ b/arch/s390/include/asm/tlb.h
@@ -30,14 +30,10 @@
30 30
31struct mmu_gather { 31struct mmu_gather {
32 struct mm_struct *mm; 32 struct mm_struct *mm;
33#ifdef CONFIG_HAVE_RCU_TABLE_FREE
34 struct mmu_table_batch *batch; 33 struct mmu_table_batch *batch;
35#endif
36 unsigned int fullmm; 34 unsigned int fullmm;
37 unsigned int need_flush;
38}; 35};
39 36
40#ifdef CONFIG_HAVE_RCU_TABLE_FREE
41struct mmu_table_batch { 37struct mmu_table_batch {
42 struct rcu_head rcu; 38 struct rcu_head rcu;
43 unsigned int nr; 39 unsigned int nr;
@@ -49,7 +45,6 @@ struct mmu_table_batch {
49 45
50extern void tlb_table_flush(struct mmu_gather *tlb); 46extern void tlb_table_flush(struct mmu_gather *tlb);
51extern void tlb_remove_table(struct mmu_gather *tlb, void *table); 47extern void tlb_remove_table(struct mmu_gather *tlb, void *table);
52#endif
53 48
54static inline void tlb_gather_mmu(struct mmu_gather *tlb, 49static inline void tlb_gather_mmu(struct mmu_gather *tlb,
55 struct mm_struct *mm, 50 struct mm_struct *mm,
@@ -57,29 +52,20 @@ static inline void tlb_gather_mmu(struct mmu_gather *tlb,
57{ 52{
58 tlb->mm = mm; 53 tlb->mm = mm;
59 tlb->fullmm = full_mm_flush; 54 tlb->fullmm = full_mm_flush;
60 tlb->need_flush = 0;
61#ifdef CONFIG_HAVE_RCU_TABLE_FREE
62 tlb->batch = NULL; 55 tlb->batch = NULL;
63#endif
64 if (tlb->fullmm) 56 if (tlb->fullmm)
65 __tlb_flush_mm(mm); 57 __tlb_flush_mm(mm);
66} 58}
67 59
68static inline void tlb_flush_mmu(struct mmu_gather *tlb) 60static inline void tlb_flush_mmu(struct mmu_gather *tlb)
69{ 61{
70 if (!tlb->need_flush)
71 return;
72 tlb->need_flush = 0;
73 __tlb_flush_mm(tlb->mm);
74#ifdef CONFIG_HAVE_RCU_TABLE_FREE
75 tlb_table_flush(tlb); 62 tlb_table_flush(tlb);
76#endif
77} 63}
78 64
79static inline void tlb_finish_mmu(struct mmu_gather *tlb, 65static inline void tlb_finish_mmu(struct mmu_gather *tlb,
80 unsigned long start, unsigned long end) 66 unsigned long start, unsigned long end)
81{ 67{
82 tlb_flush_mmu(tlb); 68 tlb_table_flush(tlb);
83} 69}
84 70
85/* 71/*
@@ -105,10 +91,8 @@ static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page)
105static inline void pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte, 91static inline void pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte,
106 unsigned long address) 92 unsigned long address)
107{ 93{
108#ifdef CONFIG_HAVE_RCU_TABLE_FREE
109 if (!tlb->fullmm) 94 if (!tlb->fullmm)
110 return page_table_free_rcu(tlb, (unsigned long *) pte); 95 return page_table_free_rcu(tlb, (unsigned long *) pte);
111#endif
112 page_table_free(tlb->mm, (unsigned long *) pte); 96 page_table_free(tlb->mm, (unsigned long *) pte);
113} 97}
114 98
@@ -125,10 +109,8 @@ static inline void pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd,
125#ifdef __s390x__ 109#ifdef __s390x__
126 if (tlb->mm->context.asce_limit <= (1UL << 31)) 110 if (tlb->mm->context.asce_limit <= (1UL << 31))
127 return; 111 return;
128#ifdef CONFIG_HAVE_RCU_TABLE_FREE
129 if (!tlb->fullmm) 112 if (!tlb->fullmm)
130 return tlb_remove_table(tlb, pmd); 113 return tlb_remove_table(tlb, pmd);
131#endif
132 crst_table_free(tlb->mm, (unsigned long *) pmd); 114 crst_table_free(tlb->mm, (unsigned long *) pmd);
133#endif 115#endif
134} 116}
@@ -146,10 +128,8 @@ static inline void pud_free_tlb(struct mmu_gather *tlb, pud_t *pud,
146#ifdef __s390x__ 128#ifdef __s390x__
147 if (tlb->mm->context.asce_limit <= (1UL << 42)) 129 if (tlb->mm->context.asce_limit <= (1UL << 42))
148 return; 130 return;
149#ifdef CONFIG_HAVE_RCU_TABLE_FREE
150 if (!tlb->fullmm) 131 if (!tlb->fullmm)
151 return tlb_remove_table(tlb, pud); 132 return tlb_remove_table(tlb, pud);
152#endif
153 crst_table_free(tlb->mm, (unsigned long *) pud); 133 crst_table_free(tlb->mm, (unsigned long *) pud);
154#endif 134#endif
155} 135}
diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c
index 458893f5f6b..51b80b9d1f6 100644
--- a/arch/s390/mm/pgtable.c
+++ b/arch/s390/mm/pgtable.c
@@ -243,8 +243,6 @@ void page_table_free(struct mm_struct *mm, unsigned long *table)
243 } 243 }
244} 244}
245 245
246#ifdef CONFIG_HAVE_RCU_TABLE_FREE
247
248static void __page_table_free_rcu(void *table, unsigned bit) 246static void __page_table_free_rcu(void *table, unsigned bit)
249{ 247{
250 struct page *page; 248 struct page *page;
@@ -301,7 +299,66 @@ void __tlb_remove_table(void *_table)
301 free_pages((unsigned long) table, ALLOC_ORDER); 299 free_pages((unsigned long) table, ALLOC_ORDER);
302} 300}
303 301
304#endif 302static void tlb_remove_table_smp_sync(void *arg)
303{
304 /* Simply deliver the interrupt */
305}
306
307static void tlb_remove_table_one(void *table)
308{
309 /*
310 * This isn't an RCU grace period and hence the page-tables cannot be
311 * assumed to be actually RCU-freed.
312 *
313 * It is however sufficient for software page-table walkers that rely
314 * on IRQ disabling. See the comment near struct mmu_table_batch.
315 */
316 smp_call_function(tlb_remove_table_smp_sync, NULL, 1);
317 __tlb_remove_table(table);
318}
319
320static void tlb_remove_table_rcu(struct rcu_head *head)
321{
322 struct mmu_table_batch *batch;
323 int i;
324
325 batch = container_of(head, struct mmu_table_batch, rcu);
326
327 for (i = 0; i < batch->nr; i++)
328 __tlb_remove_table(batch->tables[i]);
329
330 free_page((unsigned long)batch);
331}
332
333void tlb_table_flush(struct mmu_gather *tlb)
334{
335 struct mmu_table_batch **batch = &tlb->batch;
336
337 if (*batch) {
338 __tlb_flush_mm(tlb->mm);
339 call_rcu_sched(&(*batch)->rcu, tlb_remove_table_rcu);
340 *batch = NULL;
341 }
342}
343
344void tlb_remove_table(struct mmu_gather *tlb, void *table)
345{
346 struct mmu_table_batch **batch = &tlb->batch;
347
348 if (*batch == NULL) {
349 *batch = (struct mmu_table_batch *)
350 __get_free_page(GFP_NOWAIT | __GFP_NOWARN);
351 if (*batch == NULL) {
352 __tlb_flush_mm(tlb->mm);
353 tlb_remove_table_one(table);
354 return;
355 }
356 (*batch)->nr = 0;
357 }
358 (*batch)->tables[(*batch)->nr++] = table;
359 if ((*batch)->nr == MAX_TABLE_BATCH)
360 tlb_table_flush(tlb);
361}
305 362
306/* 363/*
307 * switch on pgstes for its userspace process (for kvm) 364 * switch on pgstes for its userspace process (for kvm)