diff options
author | Benjamin Herrenschmidt <benh@kernel.crashing.org> | 2009-07-23 19:15:28 -0400 |
---|---|---|
committer | Benjamin Herrenschmidt <benh@kernel.crashing.org> | 2009-08-19 20:24:56 -0400 |
commit | c7cc58a1ad8dfe3c199d3b6ce50412b86dd3edaf (patch) | |
tree | 1d1ded72de81743ddd1306677d64757136972402 /arch/powerpc/mm | |
parent | cf54dc7cd4f9aab55cd3e1794b0b74c3c88cd1a0 (diff) |
powerpc/mm: Rework & cleanup page table freeing code path
That patch used to just add a hook to page table flushing but
pulling that string brought out a whole bunch of issues, so it
now does that and more:
- We now make the RCU batching of page freeing SMP only, as I
believe it was intended initially. We make a few more things compile
to nothing on !CONFIG_SMP
- Some macros are turned into functions, though that forced me to
out of line a few stuffs due to unsolvable include depenencies,
however it's probably better that way anyway, it's not -that-
critical code path.
- 32-bit didn't call pte_free_finish() on tlb_flush() which means
that it wouldn't push out the batch to RCU for delayed freeing when
a bunch of page tables have been freed, they would just stay in there
until the batch gets full.
64-bit BookE will use that hook to maintain the virtually linear
page tables or the indirect entries in the TLB when using the
HW loader.
Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Diffstat (limited to 'arch/powerpc/mm')
-rw-r--r-- | arch/powerpc/mm/pgtable.c | 10 | ||||
-rw-r--r-- | arch/powerpc/mm/tlb_hash32.c | 3 | ||||
-rw-r--r-- | arch/powerpc/mm/tlb_hash64.c | 15 | ||||
-rw-r--r-- | arch/powerpc/mm/tlb_nohash.c | 8 |
4 files changed, 36 insertions, 0 deletions
diff --git a/arch/powerpc/mm/pgtable.c b/arch/powerpc/mm/pgtable.c index 627767d6169b..a65979a5f75b 100644 --- a/arch/powerpc/mm/pgtable.c +++ b/arch/powerpc/mm/pgtable.c | |||
@@ -30,6 +30,14 @@ | |||
30 | #include <asm/tlbflush.h> | 30 | #include <asm/tlbflush.h> |
31 | #include <asm/tlb.h> | 31 | #include <asm/tlb.h> |
32 | 32 | ||
33 | #ifdef CONFIG_SMP | ||
34 | |||
35 | /* | ||
36 | * Handle batching of page table freeing on SMP. Page tables are | ||
37 | * queued up and send to be freed later by RCU in order to avoid | ||
38 | * freeing a page table page that is being walked without locks | ||
39 | */ | ||
40 | |||
33 | static DEFINE_PER_CPU(struct pte_freelist_batch *, pte_freelist_cur); | 41 | static DEFINE_PER_CPU(struct pte_freelist_batch *, pte_freelist_cur); |
34 | static unsigned long pte_freelist_forced_free; | 42 | static unsigned long pte_freelist_forced_free; |
35 | 43 | ||
@@ -116,6 +124,8 @@ void pte_free_finish(void) | |||
116 | *batchp = NULL; | 124 | *batchp = NULL; |
117 | } | 125 | } |
118 | 126 | ||
127 | #endif /* CONFIG_SMP */ | ||
128 | |||
119 | /* | 129 | /* |
120 | * Handle i/d cache flushing, called from set_pte_at() or ptep_set_access_flags() | 130 | * Handle i/d cache flushing, called from set_pte_at() or ptep_set_access_flags() |
121 | */ | 131 | */ |
diff --git a/arch/powerpc/mm/tlb_hash32.c b/arch/powerpc/mm/tlb_hash32.c index 65190587a365..8aaa8b7eb324 100644 --- a/arch/powerpc/mm/tlb_hash32.c +++ b/arch/powerpc/mm/tlb_hash32.c | |||
@@ -71,6 +71,9 @@ void tlb_flush(struct mmu_gather *tlb) | |||
71 | */ | 71 | */ |
72 | _tlbia(); | 72 | _tlbia(); |
73 | } | 73 | } |
74 | |||
75 | /* Push out batch of freed page tables */ | ||
76 | pte_free_finish(); | ||
74 | } | 77 | } |
75 | 78 | ||
76 | /* | 79 | /* |
diff --git a/arch/powerpc/mm/tlb_hash64.c b/arch/powerpc/mm/tlb_hash64.c index 937eb90677d9..8e35a6066938 100644 --- a/arch/powerpc/mm/tlb_hash64.c +++ b/arch/powerpc/mm/tlb_hash64.c | |||
@@ -154,6 +154,21 @@ void __flush_tlb_pending(struct ppc64_tlb_batch *batch) | |||
154 | batch->index = 0; | 154 | batch->index = 0; |
155 | } | 155 | } |
156 | 156 | ||
157 | void tlb_flush(struct mmu_gather *tlb) | ||
158 | { | ||
159 | struct ppc64_tlb_batch *tlbbatch = &__get_cpu_var(ppc64_tlb_batch); | ||
160 | |||
161 | /* If there's a TLB batch pending, then we must flush it because the | ||
162 | * pages are going to be freed and we really don't want to have a CPU | ||
163 | * access a freed page because it has a stale TLB | ||
164 | */ | ||
165 | if (tlbbatch->index) | ||
166 | __flush_tlb_pending(tlbbatch); | ||
167 | |||
168 | /* Push out batch of freed page tables */ | ||
169 | pte_free_finish(); | ||
170 | } | ||
171 | |||
157 | /** | 172 | /** |
158 | * __flush_hash_table_range - Flush all HPTEs for a given address range | 173 | * __flush_hash_table_range - Flush all HPTEs for a given address range |
159 | * from the hash table (and the TLB). But keeps | 174 | * from the hash table (and the TLB). But keeps |
diff --git a/arch/powerpc/mm/tlb_nohash.c b/arch/powerpc/mm/tlb_nohash.c index 761e8882416f..6b43fc49f103 100644 --- a/arch/powerpc/mm/tlb_nohash.c +++ b/arch/powerpc/mm/tlb_nohash.c | |||
@@ -233,3 +233,11 @@ void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, | |||
233 | flush_tlb_mm(vma->vm_mm); | 233 | flush_tlb_mm(vma->vm_mm); |
234 | } | 234 | } |
235 | EXPORT_SYMBOL(flush_tlb_range); | 235 | EXPORT_SYMBOL(flush_tlb_range); |
236 | |||
237 | void tlb_flush(struct mmu_gather *tlb) | ||
238 | { | ||
239 | flush_tlb_mm(tlb->mm); | ||
240 | |||
241 | /* Push out batch of freed page tables */ | ||
242 | pte_free_finish(); | ||
243 | } | ||