aboutsummaryrefslogtreecommitdiffstats
path: root/arch/sparc/mm
diff options
context:
space:
mode:
authorPeter Zijlstra <a.p.zijlstra@chello.nl>2011-05-24 20:11:50 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2011-05-25 11:39:13 -0400
commit90f08e399d054d017c0e2c5089a0f44a76418271 (patch)
tree771d8a9ff675e3fe5deda53d0701e41f1f15e9a1 /arch/sparc/mm
parentd6bf29b44ddf3ca915f77b9383bee8b7a209f3fd (diff)
sparc: mmu_gather rework
Rework the sparc mmu_gather usage to conform to the new world order :-) Sparc mmu_gather does two things: - tracks vaddrs to unhash - tracks pages to free Split these two things like powerpc has done and keep the vaddrs in per-cpu data structures and flush them on context switch. The remaining bits can then use the generic mmu_gather. Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Acked-by: David Miller <davem@davemloft.net> Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org> Cc: Martin Schwidefsky <schwidefsky@de.ibm.com> Cc: Russell King <rmk@arm.linux.org.uk> Cc: Paul Mundt <lethal@linux-sh.org> Cc: Jeff Dike <jdike@addtoit.com> Cc: Richard Weinberger <richard@nod.at> Cc: Tony Luck <tony.luck@intel.com> Cc: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> Cc: Hugh Dickins <hughd@google.com> Cc: Mel Gorman <mel@csn.ul.ie> Cc: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com> Cc: Nick Piggin <npiggin@kernel.dk> Cc: Namhyung Kim <namhyung@gmail.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'arch/sparc/mm')
-rw-r--r--arch/sparc/mm/tlb.c43
-rw-r--r--arch/sparc/mm/tsb.c15
2 files changed, 32 insertions, 26 deletions
diff --git a/arch/sparc/mm/tlb.c b/arch/sparc/mm/tlb.c
index d8f21e24a82f..b1f279cd00bf 100644
--- a/arch/sparc/mm/tlb.c
+++ b/arch/sparc/mm/tlb.c
@@ -19,33 +19,34 @@
19 19
20/* Heavily inspired by the ppc64 code. */ 20/* Heavily inspired by the ppc64 code. */
21 21
22DEFINE_PER_CPU(struct mmu_gather, mmu_gathers); 22static DEFINE_PER_CPU(struct tlb_batch, tlb_batch);
23 23
24void flush_tlb_pending(void) 24void flush_tlb_pending(void)
25{ 25{
26 struct mmu_gather *mp = &get_cpu_var(mmu_gathers); 26 struct tlb_batch *tb = &get_cpu_var(tlb_batch);
27 27
28 if (mp->tlb_nr) { 28 if (tb->tlb_nr) {
29 flush_tsb_user(mp); 29 flush_tsb_user(tb);
30 30
31 if (CTX_VALID(mp->mm->context)) { 31 if (CTX_VALID(tb->mm->context)) {
32#ifdef CONFIG_SMP 32#ifdef CONFIG_SMP
33 smp_flush_tlb_pending(mp->mm, mp->tlb_nr, 33 smp_flush_tlb_pending(tb->mm, tb->tlb_nr,
34 &mp->vaddrs[0]); 34 &tb->vaddrs[0]);
35#else 35#else
36 __flush_tlb_pending(CTX_HWBITS(mp->mm->context), 36 __flush_tlb_pending(CTX_HWBITS(tb->mm->context),
37 mp->tlb_nr, &mp->vaddrs[0]); 37 tb->tlb_nr, &tb->vaddrs[0]);
38#endif 38#endif
39 } 39 }
40 mp->tlb_nr = 0; 40 tb->tlb_nr = 0;
41 } 41 }
42 42
43 put_cpu_var(mmu_gathers); 43 put_cpu_var(tlb_batch);
44} 44}
45 45
46void tlb_batch_add(struct mm_struct *mm, unsigned long vaddr, pte_t *ptep, pte_t orig) 46void tlb_batch_add(struct mm_struct *mm, unsigned long vaddr,
47 pte_t *ptep, pte_t orig, int fullmm)
47{ 48{
48 struct mmu_gather *mp = &__get_cpu_var(mmu_gathers); 49 struct tlb_batch *tb = &get_cpu_var(tlb_batch);
49 unsigned long nr; 50 unsigned long nr;
50 51
51 vaddr &= PAGE_MASK; 52 vaddr &= PAGE_MASK;
@@ -77,21 +78,25 @@ void tlb_batch_add(struct mm_struct *mm, unsigned long vaddr, pte_t *ptep, pte_t
77 78
78no_cache_flush: 79no_cache_flush:
79 80
80 if (mp->fullmm) 81 if (fullmm) {
82 put_cpu_var(tlb_batch);
81 return; 83 return;
84 }
82 85
83 nr = mp->tlb_nr; 86 nr = tb->tlb_nr;
84 87
85 if (unlikely(nr != 0 && mm != mp->mm)) { 88 if (unlikely(nr != 0 && mm != tb->mm)) {
86 flush_tlb_pending(); 89 flush_tlb_pending();
87 nr = 0; 90 nr = 0;
88 } 91 }
89 92
90 if (nr == 0) 93 if (nr == 0)
91 mp->mm = mm; 94 tb->mm = mm;
92 95
93 mp->vaddrs[nr] = vaddr; 96 tb->vaddrs[nr] = vaddr;
94 mp->tlb_nr = ++nr; 97 tb->tlb_nr = ++nr;
95 if (nr >= TLB_BATCH_NR) 98 if (nr >= TLB_BATCH_NR)
96 flush_tlb_pending(); 99 flush_tlb_pending();
100
101 put_cpu_var(tlb_batch);
97} 102}
diff --git a/arch/sparc/mm/tsb.c b/arch/sparc/mm/tsb.c
index 101d7c82870b..948461513499 100644
--- a/arch/sparc/mm/tsb.c
+++ b/arch/sparc/mm/tsb.c
@@ -47,12 +47,13 @@ void flush_tsb_kernel_range(unsigned long start, unsigned long end)
47 } 47 }
48} 48}
49 49
50static void __flush_tsb_one(struct mmu_gather *mp, unsigned long hash_shift, unsigned long tsb, unsigned long nentries) 50static void __flush_tsb_one(struct tlb_batch *tb, unsigned long hash_shift,
51 unsigned long tsb, unsigned long nentries)
51{ 52{
52 unsigned long i; 53 unsigned long i;
53 54
54 for (i = 0; i < mp->tlb_nr; i++) { 55 for (i = 0; i < tb->tlb_nr; i++) {
55 unsigned long v = mp->vaddrs[i]; 56 unsigned long v = tb->vaddrs[i];
56 unsigned long tag, ent, hash; 57 unsigned long tag, ent, hash;
57 58
58 v &= ~0x1UL; 59 v &= ~0x1UL;
@@ -65,9 +66,9 @@ static void __flush_tsb_one(struct mmu_gather *mp, unsigned long hash_shift, uns
65 } 66 }
66} 67}
67 68
68void flush_tsb_user(struct mmu_gather *mp) 69void flush_tsb_user(struct tlb_batch *tb)
69{ 70{
70 struct mm_struct *mm = mp->mm; 71 struct mm_struct *mm = tb->mm;
71 unsigned long nentries, base, flags; 72 unsigned long nentries, base, flags;
72 73
73 spin_lock_irqsave(&mm->context.lock, flags); 74 spin_lock_irqsave(&mm->context.lock, flags);
@@ -76,7 +77,7 @@ void flush_tsb_user(struct mmu_gather *mp)
76 nentries = mm->context.tsb_block[MM_TSB_BASE].tsb_nentries; 77 nentries = mm->context.tsb_block[MM_TSB_BASE].tsb_nentries;
77 if (tlb_type == cheetah_plus || tlb_type == hypervisor) 78 if (tlb_type == cheetah_plus || tlb_type == hypervisor)
78 base = __pa(base); 79 base = __pa(base);
79 __flush_tsb_one(mp, PAGE_SHIFT, base, nentries); 80 __flush_tsb_one(tb, PAGE_SHIFT, base, nentries);
80 81
81#ifdef CONFIG_HUGETLB_PAGE 82#ifdef CONFIG_HUGETLB_PAGE
82 if (mm->context.tsb_block[MM_TSB_HUGE].tsb) { 83 if (mm->context.tsb_block[MM_TSB_HUGE].tsb) {
@@ -84,7 +85,7 @@ void flush_tsb_user(struct mmu_gather *mp)
84 nentries = mm->context.tsb_block[MM_TSB_HUGE].tsb_nentries; 85 nentries = mm->context.tsb_block[MM_TSB_HUGE].tsb_nentries;
85 if (tlb_type == cheetah_plus || tlb_type == hypervisor) 86 if (tlb_type == cheetah_plus || tlb_type == hypervisor)
86 base = __pa(base); 87 base = __pa(base);
87 __flush_tsb_one(mp, HPAGE_SHIFT, base, nentries); 88 __flush_tsb_one(tb, HPAGE_SHIFT, base, nentries);
88 } 89 }
89#endif 90#endif
90 spin_unlock_irqrestore(&mm->context.lock, flags); 91 spin_unlock_irqrestore(&mm->context.lock, flags);