aboutsummaryrefslogtreecommitdiffstats
path: root/arch/sparc/mm/tlb.c
diff options
context:
space:
mode:
authorPeter Zijlstra <a.p.zijlstra@chello.nl>2011-05-24 20:11:50 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2011-05-25 11:39:13 -0400
commit90f08e399d054d017c0e2c5089a0f44a76418271 (patch)
tree771d8a9ff675e3fe5deda53d0701e41f1f15e9a1 /arch/sparc/mm/tlb.c
parentd6bf29b44ddf3ca915f77b9383bee8b7a209f3fd (diff)
sparc: mmu_gather rework
Rework the sparc mmu_gather usage to conform to the new world order :-) Sparc mmu_gather does two things: - tracks vaddrs to unhash - tracks pages to free Split these two things like powerpc has done and keep the vaddrs in per-cpu data structures and flush them on context switch. The remaining bits can then use the generic mmu_gather. Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Acked-by: David Miller <davem@davemloft.net> Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org> Cc: Martin Schwidefsky <schwidefsky@de.ibm.com> Cc: Russell King <rmk@arm.linux.org.uk> Cc: Paul Mundt <lethal@linux-sh.org> Cc: Jeff Dike <jdike@addtoit.com> Cc: Richard Weinberger <richard@nod.at> Cc: Tony Luck <tony.luck@intel.com> Cc: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> Cc: Hugh Dickins <hughd@google.com> Cc: Mel Gorman <mel@csn.ul.ie> Cc: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com> Cc: Nick Piggin <npiggin@kernel.dk> Cc: Namhyung Kim <namhyung@gmail.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'arch/sparc/mm/tlb.c')
-rw-r--r--arch/sparc/mm/tlb.c43
1 files changed, 24 insertions, 19 deletions
diff --git a/arch/sparc/mm/tlb.c b/arch/sparc/mm/tlb.c
index d8f21e24a82f..b1f279cd00bf 100644
--- a/arch/sparc/mm/tlb.c
+++ b/arch/sparc/mm/tlb.c
@@ -19,33 +19,34 @@
19 19
20/* Heavily inspired by the ppc64 code. */ 20/* Heavily inspired by the ppc64 code. */
21 21
22DEFINE_PER_CPU(struct mmu_gather, mmu_gathers); 22static DEFINE_PER_CPU(struct tlb_batch, tlb_batch);
23 23
24void flush_tlb_pending(void) 24void flush_tlb_pending(void)
25{ 25{
26 struct mmu_gather *mp = &get_cpu_var(mmu_gathers); 26 struct tlb_batch *tb = &get_cpu_var(tlb_batch);
27 27
28 if (mp->tlb_nr) { 28 if (tb->tlb_nr) {
29 flush_tsb_user(mp); 29 flush_tsb_user(tb);
30 30
31 if (CTX_VALID(mp->mm->context)) { 31 if (CTX_VALID(tb->mm->context)) {
32#ifdef CONFIG_SMP 32#ifdef CONFIG_SMP
33 smp_flush_tlb_pending(mp->mm, mp->tlb_nr, 33 smp_flush_tlb_pending(tb->mm, tb->tlb_nr,
34 &mp->vaddrs[0]); 34 &tb->vaddrs[0]);
35#else 35#else
36 __flush_tlb_pending(CTX_HWBITS(mp->mm->context), 36 __flush_tlb_pending(CTX_HWBITS(tb->mm->context),
37 mp->tlb_nr, &mp->vaddrs[0]); 37 tb->tlb_nr, &tb->vaddrs[0]);
38#endif 38#endif
39 } 39 }
40 mp->tlb_nr = 0; 40 tb->tlb_nr = 0;
41 } 41 }
42 42
43 put_cpu_var(mmu_gathers); 43 put_cpu_var(tlb_batch);
44} 44}
45 45
46void tlb_batch_add(struct mm_struct *mm, unsigned long vaddr, pte_t *ptep, pte_t orig) 46void tlb_batch_add(struct mm_struct *mm, unsigned long vaddr,
47 pte_t *ptep, pte_t orig, int fullmm)
47{ 48{
48 struct mmu_gather *mp = &__get_cpu_var(mmu_gathers); 49 struct tlb_batch *tb = &get_cpu_var(tlb_batch);
49 unsigned long nr; 50 unsigned long nr;
50 51
51 vaddr &= PAGE_MASK; 52 vaddr &= PAGE_MASK;
@@ -77,21 +78,25 @@ void tlb_batch_add(struct mm_struct *mm, unsigned long vaddr, pte_t *ptep, pte_t
77 78
78no_cache_flush: 79no_cache_flush:
79 80
80 if (mp->fullmm) 81 if (fullmm) {
82 put_cpu_var(tlb_batch);
81 return; 83 return;
84 }
82 85
83 nr = mp->tlb_nr; 86 nr = tb->tlb_nr;
84 87
85 if (unlikely(nr != 0 && mm != mp->mm)) { 88 if (unlikely(nr != 0 && mm != tb->mm)) {
86 flush_tlb_pending(); 89 flush_tlb_pending();
87 nr = 0; 90 nr = 0;
88 } 91 }
89 92
90 if (nr == 0) 93 if (nr == 0)
91 mp->mm = mm; 94 tb->mm = mm;
92 95
93 mp->vaddrs[nr] = vaddr; 96 tb->vaddrs[nr] = vaddr;
94 mp->tlb_nr = ++nr; 97 tb->tlb_nr = ++nr;
95 if (nr >= TLB_BATCH_NR) 98 if (nr >= TLB_BATCH_NR)
96 flush_tlb_pending(); 99 flush_tlb_pending();
100
101 put_cpu_var(tlb_batch);
97} 102}