aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/mm/pgtable.c
diff options
context:
space:
mode:
authorPeter Zijlstra <a.p.zijlstra@chello.nl>2011-05-24 20:11:48 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2011-05-25 11:39:13 -0400
commitd6bf29b44ddf3ca915f77b9383bee8b7a209f3fd (patch)
tree777e98ebcbf207ea8442e977bd93053bb23a8df8 /arch/powerpc/mm/pgtable.c
parentd16dfc550f5326a4000f3322582a7c05dec91d7a (diff)
powerpc: mmu_gather rework
Fix up powerpc to the new mmu_gather stuff. PPC has an extra batching queue to RCU free the actual pagetable allocations, use the ARCH extentions for that for now. For the ppc64_tlb_batch, which tracks the vaddrs to unhash from the hardware hash-table, keep using per-cpu arrays but flush on context switch and use a TLF bit to track the lazy_mmu state. Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Acked-by: Benjamin Herrenschmidt <benh@kernel.crashing.org> Cc: David Miller <davem@davemloft.net> Cc: Martin Schwidefsky <schwidefsky@de.ibm.com> Cc: Russell King <rmk@arm.linux.org.uk> Cc: Paul Mundt <lethal@linux-sh.org> Cc: Jeff Dike <jdike@addtoit.com> Cc: Richard Weinberger <richard@nod.at> Cc: Tony Luck <tony.luck@intel.com> Cc: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> Cc: Hugh Dickins <hughd@google.com> Cc: Mel Gorman <mel@csn.ul.ie> Cc: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com> Cc: Nick Piggin <npiggin@kernel.dk> Cc: Namhyung Kim <namhyung@gmail.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'arch/powerpc/mm/pgtable.c')
-rw-r--r--arch/powerpc/mm/pgtable.c14
1 files changed, 4 insertions, 10 deletions
diff --git a/arch/powerpc/mm/pgtable.c b/arch/powerpc/mm/pgtable.c
index 6a3997f98dfb..6e72788598f8 100644
--- a/arch/powerpc/mm/pgtable.c
+++ b/arch/powerpc/mm/pgtable.c
@@ -33,8 +33,6 @@
33 33
34#include "mmu_decl.h" 34#include "mmu_decl.h"
35 35
36DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
37
38#ifdef CONFIG_SMP 36#ifdef CONFIG_SMP
39 37
40/* 38/*
@@ -43,7 +41,6 @@ DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
43 * freeing a page table page that is being walked without locks 41 * freeing a page table page that is being walked without locks
44 */ 42 */
45 43
46static DEFINE_PER_CPU(struct pte_freelist_batch *, pte_freelist_cur);
47static unsigned long pte_freelist_forced_free; 44static unsigned long pte_freelist_forced_free;
48 45
49struct pte_freelist_batch 46struct pte_freelist_batch
@@ -97,12 +94,10 @@ static void pte_free_submit(struct pte_freelist_batch *batch)
97 94
98void pgtable_free_tlb(struct mmu_gather *tlb, void *table, unsigned shift) 95void pgtable_free_tlb(struct mmu_gather *tlb, void *table, unsigned shift)
99{ 96{
100 /* This is safe since tlb_gather_mmu has disabled preemption */ 97 struct pte_freelist_batch **batchp = &tlb->arch.batch;
101 struct pte_freelist_batch **batchp = &__get_cpu_var(pte_freelist_cur);
102 unsigned long pgf; 98 unsigned long pgf;
103 99
104 if (atomic_read(&tlb->mm->mm_users) < 2 || 100 if (atomic_read(&tlb->mm->mm_users) < 2) {
105 cpumask_equal(mm_cpumask(tlb->mm), cpumask_of(smp_processor_id()))){
106 pgtable_free(table, shift); 101 pgtable_free(table, shift);
107 return; 102 return;
108 } 103 }
@@ -124,10 +119,9 @@ void pgtable_free_tlb(struct mmu_gather *tlb, void *table, unsigned shift)
124 } 119 }
125} 120}
126 121
127void pte_free_finish(void) 122void pte_free_finish(struct mmu_gather *tlb)
128{ 123{
129 /* This is safe since tlb_gather_mmu has disabled preemption */ 124 struct pte_freelist_batch **batchp = &tlb->arch.batch;
130 struct pte_freelist_batch **batchp = &__get_cpu_var(pte_freelist_cur);
131 125
132 if (*batchp == NULL) 126 if (*batchp == NULL)
133 return; 127 return;