aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/mm/tlb_hash64.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc/mm/tlb_hash64.c')
-rw-r--r--arch/powerpc/mm/tlb_hash64.c20
1 files changed, 15 insertions, 5 deletions
diff --git a/arch/powerpc/mm/tlb_hash64.c b/arch/powerpc/mm/tlb_hash64.c
index 937eb90677d9..2b2f35f6985e 100644
--- a/arch/powerpc/mm/tlb_hash64.c
+++ b/arch/powerpc/mm/tlb_hash64.c
@@ -33,11 +33,6 @@
33 33
34DEFINE_PER_CPU(struct ppc64_tlb_batch, ppc64_tlb_batch); 34DEFINE_PER_CPU(struct ppc64_tlb_batch, ppc64_tlb_batch);
35 35
36/* This is declared as we are using the more or less generic
37 * arch/powerpc/include/asm/tlb.h file -- tgall
38 */
39DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
40
41/* 36/*
42 * A linux PTE was changed and the corresponding hash table entry 37 * A linux PTE was changed and the corresponding hash table entry
43 * neesd to be flushed. This function will either perform the flush 38 * neesd to be flushed. This function will either perform the flush
@@ -154,6 +149,21 @@ void __flush_tlb_pending(struct ppc64_tlb_batch *batch)
154 batch->index = 0; 149 batch->index = 0;
155} 150}
156 151
152void tlb_flush(struct mmu_gather *tlb)
153{
154 struct ppc64_tlb_batch *tlbbatch = &__get_cpu_var(ppc64_tlb_batch);
155
156 /* If there's a TLB batch pending, then we must flush it because the
157 * pages are going to be freed and we really don't want to have a CPU
158 * access a freed page because it has a stale TLB
159 */
160 if (tlbbatch->index)
161 __flush_tlb_pending(tlbbatch);
162
163 /* Push out batch of freed page tables */
164 pte_free_finish();
165}
166
157/** 167/**
158 * __flush_hash_table_range - Flush all HPTEs for a given address range 168 * __flush_hash_table_range - Flush all HPTEs for a given address range
159 * from the hash table (and the TLB). But keeps 169 * from the hash table (and the TLB). But keeps