aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/powerpc/include/asm/pgalloc.h39
-rw-r--r--arch/powerpc/include/asm/tlb.h38
-rw-r--r--arch/powerpc/mm/pgtable.c10
-rw-r--r--arch/powerpc/mm/tlb_hash32.c3
-rw-r--r--arch/powerpc/mm/tlb_hash64.c15
-rw-r--r--arch/powerpc/mm/tlb_nohash.c8
6 files changed, 67 insertions, 46 deletions
diff --git a/arch/powerpc/include/asm/pgalloc.h b/arch/powerpc/include/asm/pgalloc.h
index 1730e5e298d..34b080671f0 100644
--- a/arch/powerpc/include/asm/pgalloc.h
+++ b/arch/powerpc/include/asm/pgalloc.h
@@ -4,6 +4,15 @@
4 4
5#include <linux/mm.h> 5#include <linux/mm.h>
6 6
7#ifdef CONFIG_PPC_BOOK3E
8extern void tlb_flush_pgtable(struct mmu_gather *tlb, unsigned long address);
9#else /* CONFIG_PPC_BOOK3E */
10static inline void tlb_flush_pgtable(struct mmu_gather *tlb,
11 unsigned long address)
12{
13}
14#endif /* !CONFIG_PPC_BOOK3E */
15
7static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte) 16static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
8{ 17{
9 free_page((unsigned long)pte); 18 free_page((unsigned long)pte);
@@ -35,19 +44,27 @@ static inline pgtable_free_t pgtable_free_cache(void *p, int cachenum,
35#include <asm/pgalloc-32.h> 44#include <asm/pgalloc-32.h>
36#endif 45#endif
37 46
38extern void pgtable_free_tlb(struct mmu_gather *tlb, pgtable_free_t pgf);
39
40#ifdef CONFIG_SMP 47#ifdef CONFIG_SMP
41#define __pte_free_tlb(tlb,ptepage,address) \ 48extern void pgtable_free_tlb(struct mmu_gather *tlb, pgtable_free_t pgf);
42do { \ 49extern void pte_free_finish(void);
43 pgtable_page_dtor(ptepage); \ 50#else /* CONFIG_SMP */
44 pgtable_free_tlb(tlb, pgtable_free_cache(page_address(ptepage), \ 51static inline void pgtable_free_tlb(struct mmu_gather *tlb, pgtable_free_t pgf)
45 PTE_NONCACHE_NUM, PTE_TABLE_SIZE-1)); \ 52{
46} while (0) 53 pgtable_free(pgf);
47#else 54}
48#define __pte_free_tlb(tlb, pte, address) pte_free((tlb)->mm, (pte)) 55static inline void pte_free_finish(void) { }
49#endif 56#endif /* !CONFIG_SMP */
50 57
58static inline void __pte_free_tlb(struct mmu_gather *tlb, struct page *ptepage,
59 unsigned long address)
60{
61 pgtable_free_t pgf = pgtable_free_cache(page_address(ptepage),
62 PTE_NONCACHE_NUM,
63 PTE_TABLE_SIZE-1);
64 tlb_flush_pgtable(tlb, address);
65 pgtable_page_dtor(ptepage);
66 pgtable_free_tlb(tlb, pgf);
67}
51 68
52#endif /* __KERNEL__ */ 69#endif /* __KERNEL__ */
53#endif /* _ASM_POWERPC_PGALLOC_H */ 70#endif /* _ASM_POWERPC_PGALLOC_H */
diff --git a/arch/powerpc/include/asm/tlb.h b/arch/powerpc/include/asm/tlb.h
index e20ff7541f3..e2b428b0f7b 100644
--- a/arch/powerpc/include/asm/tlb.h
+++ b/arch/powerpc/include/asm/tlb.h
@@ -25,57 +25,25 @@
25 25
26#include <linux/pagemap.h> 26#include <linux/pagemap.h>
27 27
28struct mmu_gather;
29
30#define tlb_start_vma(tlb, vma) do { } while (0) 28#define tlb_start_vma(tlb, vma) do { } while (0)
31#define tlb_end_vma(tlb, vma) do { } while (0) 29#define tlb_end_vma(tlb, vma) do { } while (0)
32 30
33#if !defined(CONFIG_PPC_STD_MMU)
34
35#define tlb_flush(tlb) flush_tlb_mm((tlb)->mm)
36
37#elif defined(__powerpc64__)
38
39extern void pte_free_finish(void);
40
41static inline void tlb_flush(struct mmu_gather *tlb)
42{
43 struct ppc64_tlb_batch *tlbbatch = &__get_cpu_var(ppc64_tlb_batch);
44
45 /* If there's a TLB batch pending, then we must flush it because the
46 * pages are going to be freed and we really don't want to have a CPU
47 * access a freed page because it has a stale TLB
48 */
49 if (tlbbatch->index)
50 __flush_tlb_pending(tlbbatch);
51
52 pte_free_finish();
53}
54
55#else
56
57extern void tlb_flush(struct mmu_gather *tlb); 31extern void tlb_flush(struct mmu_gather *tlb);
58 32
59#endif
60
61/* Get the generic bits... */ 33/* Get the generic bits... */
62#include <asm-generic/tlb.h> 34#include <asm-generic/tlb.h>
63 35
64#if !defined(CONFIG_PPC_STD_MMU) || defined(__powerpc64__)
65
66#define __tlb_remove_tlb_entry(tlb, pte, address) do { } while (0)
67
68#else
69extern void flush_hash_entry(struct mm_struct *mm, pte_t *ptep, 36extern void flush_hash_entry(struct mm_struct *mm, pte_t *ptep,
70 unsigned long address); 37 unsigned long address);
71 38
72static inline void __tlb_remove_tlb_entry(struct mmu_gather *tlb, pte_t *ptep, 39static inline void __tlb_remove_tlb_entry(struct mmu_gather *tlb, pte_t *ptep,
73 unsigned long address) 40 unsigned long address)
74{ 41{
42#ifdef CONFIG_PPC_STD_MMU_32
75 if (pte_val(*ptep) & _PAGE_HASHPTE) 43 if (pte_val(*ptep) & _PAGE_HASHPTE)
76 flush_hash_entry(tlb->mm, ptep, address); 44 flush_hash_entry(tlb->mm, ptep, address);
45#endif
77} 46}
78 47
79#endif
80#endif /* __KERNEL__ */ 48#endif /* __KERNEL__ */
81#endif /* __ASM_POWERPC_TLB_H */ 49#endif /* __ASM_POWERPC_TLB_H */
diff --git a/arch/powerpc/mm/pgtable.c b/arch/powerpc/mm/pgtable.c
index 627767d6169..a65979a5f75 100644
--- a/arch/powerpc/mm/pgtable.c
+++ b/arch/powerpc/mm/pgtable.c
@@ -30,6 +30,14 @@
30#include <asm/tlbflush.h> 30#include <asm/tlbflush.h>
31#include <asm/tlb.h> 31#include <asm/tlb.h>
32 32
33#ifdef CONFIG_SMP
34
35/*
36 * Handle batching of page table freeing on SMP. Page tables are
37 * queued up and send to be freed later by RCU in order to avoid
38 * freeing a page table page that is being walked without locks
39 */
40
33static DEFINE_PER_CPU(struct pte_freelist_batch *, pte_freelist_cur); 41static DEFINE_PER_CPU(struct pte_freelist_batch *, pte_freelist_cur);
34static unsigned long pte_freelist_forced_free; 42static unsigned long pte_freelist_forced_free;
35 43
@@ -116,6 +124,8 @@ void pte_free_finish(void)
116 *batchp = NULL; 124 *batchp = NULL;
117} 125}
118 126
127#endif /* CONFIG_SMP */
128
119/* 129/*
120 * Handle i/d cache flushing, called from set_pte_at() or ptep_set_access_flags() 130 * Handle i/d cache flushing, called from set_pte_at() or ptep_set_access_flags()
121 */ 131 */
diff --git a/arch/powerpc/mm/tlb_hash32.c b/arch/powerpc/mm/tlb_hash32.c
index 65190587a36..8aaa8b7eb32 100644
--- a/arch/powerpc/mm/tlb_hash32.c
+++ b/arch/powerpc/mm/tlb_hash32.c
@@ -71,6 +71,9 @@ void tlb_flush(struct mmu_gather *tlb)
71 */ 71 */
72 _tlbia(); 72 _tlbia();
73 } 73 }
74
75 /* Push out batch of freed page tables */
76 pte_free_finish();
74} 77}
75 78
76/* 79/*
diff --git a/arch/powerpc/mm/tlb_hash64.c b/arch/powerpc/mm/tlb_hash64.c
index 937eb90677d..8e35a606693 100644
--- a/arch/powerpc/mm/tlb_hash64.c
+++ b/arch/powerpc/mm/tlb_hash64.c
@@ -154,6 +154,21 @@ void __flush_tlb_pending(struct ppc64_tlb_batch *batch)
154 batch->index = 0; 154 batch->index = 0;
155} 155}
156 156
157void tlb_flush(struct mmu_gather *tlb)
158{
159 struct ppc64_tlb_batch *tlbbatch = &__get_cpu_var(ppc64_tlb_batch);
160
161 /* If there's a TLB batch pending, then we must flush it because the
162 * pages are going to be freed and we really don't want to have a CPU
163 * access a freed page because it has a stale TLB
164 */
165 if (tlbbatch->index)
166 __flush_tlb_pending(tlbbatch);
167
168 /* Push out batch of freed page tables */
169 pte_free_finish();
170}
171
157/** 172/**
158 * __flush_hash_table_range - Flush all HPTEs for a given address range 173 * __flush_hash_table_range - Flush all HPTEs for a given address range
159 * from the hash table (and the TLB). But keeps 174 * from the hash table (and the TLB). But keeps
diff --git a/arch/powerpc/mm/tlb_nohash.c b/arch/powerpc/mm/tlb_nohash.c
index 761e8882416..6b43fc49f10 100644
--- a/arch/powerpc/mm/tlb_nohash.c
+++ b/arch/powerpc/mm/tlb_nohash.c
@@ -233,3 +233,11 @@ void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
233 flush_tlb_mm(vma->vm_mm); 233 flush_tlb_mm(vma->vm_mm);
234} 234}
235EXPORT_SYMBOL(flush_tlb_range); 235EXPORT_SYMBOL(flush_tlb_range);
236
237void tlb_flush(struct mmu_gather *tlb)
238{
239 flush_tlb_mm(tlb->mm);
240
241 /* Push out batch of freed page tables */
242 pte_free_finish();
243}