diff options
Diffstat (limited to 'arch/sparc/mm')
-rw-r--r-- | arch/sparc/mm/tlb.c | 43 | ||||
-rw-r--r-- | arch/sparc/mm/tsb.c | 15 |
2 files changed, 32 insertions, 26 deletions
diff --git a/arch/sparc/mm/tlb.c b/arch/sparc/mm/tlb.c index d8f21e24a82f..b1f279cd00bf 100644 --- a/arch/sparc/mm/tlb.c +++ b/arch/sparc/mm/tlb.c | |||
@@ -19,33 +19,34 @@ | |||
19 | 19 | ||
20 | /* Heavily inspired by the ppc64 code. */ | 20 | /* Heavily inspired by the ppc64 code. */ |
21 | 21 | ||
22 | DEFINE_PER_CPU(struct mmu_gather, mmu_gathers); | 22 | static DEFINE_PER_CPU(struct tlb_batch, tlb_batch); |
23 | 23 | ||
24 | void flush_tlb_pending(void) | 24 | void flush_tlb_pending(void) |
25 | { | 25 | { |
26 | struct mmu_gather *mp = &get_cpu_var(mmu_gathers); | 26 | struct tlb_batch *tb = &get_cpu_var(tlb_batch); |
27 | 27 | ||
28 | if (mp->tlb_nr) { | 28 | if (tb->tlb_nr) { |
29 | flush_tsb_user(mp); | 29 | flush_tsb_user(tb); |
30 | 30 | ||
31 | if (CTX_VALID(mp->mm->context)) { | 31 | if (CTX_VALID(tb->mm->context)) { |
32 | #ifdef CONFIG_SMP | 32 | #ifdef CONFIG_SMP |
33 | smp_flush_tlb_pending(mp->mm, mp->tlb_nr, | 33 | smp_flush_tlb_pending(tb->mm, tb->tlb_nr, |
34 | &mp->vaddrs[0]); | 34 | &tb->vaddrs[0]); |
35 | #else | 35 | #else |
36 | __flush_tlb_pending(CTX_HWBITS(mp->mm->context), | 36 | __flush_tlb_pending(CTX_HWBITS(tb->mm->context), |
37 | mp->tlb_nr, &mp->vaddrs[0]); | 37 | tb->tlb_nr, &tb->vaddrs[0]); |
38 | #endif | 38 | #endif |
39 | } | 39 | } |
40 | mp->tlb_nr = 0; | 40 | tb->tlb_nr = 0; |
41 | } | 41 | } |
42 | 42 | ||
43 | put_cpu_var(mmu_gathers); | 43 | put_cpu_var(tlb_batch); |
44 | } | 44 | } |
45 | 45 | ||
46 | void tlb_batch_add(struct mm_struct *mm, unsigned long vaddr, pte_t *ptep, pte_t orig) | 46 | void tlb_batch_add(struct mm_struct *mm, unsigned long vaddr, |
47 | pte_t *ptep, pte_t orig, int fullmm) | ||
47 | { | 48 | { |
48 | struct mmu_gather *mp = &__get_cpu_var(mmu_gathers); | 49 | struct tlb_batch *tb = &get_cpu_var(tlb_batch); |
49 | unsigned long nr; | 50 | unsigned long nr; |
50 | 51 | ||
51 | vaddr &= PAGE_MASK; | 52 | vaddr &= PAGE_MASK; |
@@ -77,21 +78,25 @@ void tlb_batch_add(struct mm_struct *mm, unsigned long vaddr, pte_t *ptep, pte_t | |||
77 | 78 | ||
78 | no_cache_flush: | 79 | no_cache_flush: |
79 | 80 | ||
80 | if (mp->fullmm) | 81 | if (fullmm) { |
82 | put_cpu_var(tlb_batch); | ||
81 | return; | 83 | return; |
84 | } | ||
82 | 85 | ||
83 | nr = mp->tlb_nr; | 86 | nr = tb->tlb_nr; |
84 | 87 | ||
85 | if (unlikely(nr != 0 && mm != mp->mm)) { | 88 | if (unlikely(nr != 0 && mm != tb->mm)) { |
86 | flush_tlb_pending(); | 89 | flush_tlb_pending(); |
87 | nr = 0; | 90 | nr = 0; |
88 | } | 91 | } |
89 | 92 | ||
90 | if (nr == 0) | 93 | if (nr == 0) |
91 | mp->mm = mm; | 94 | tb->mm = mm; |
92 | 95 | ||
93 | mp->vaddrs[nr] = vaddr; | 96 | tb->vaddrs[nr] = vaddr; |
94 | mp->tlb_nr = ++nr; | 97 | tb->tlb_nr = ++nr; |
95 | if (nr >= TLB_BATCH_NR) | 98 | if (nr >= TLB_BATCH_NR) |
96 | flush_tlb_pending(); | 99 | flush_tlb_pending(); |
100 | |||
101 | put_cpu_var(tlb_batch); | ||
97 | } | 102 | } |
diff --git a/arch/sparc/mm/tsb.c b/arch/sparc/mm/tsb.c index 101d7c82870b..948461513499 100644 --- a/arch/sparc/mm/tsb.c +++ b/arch/sparc/mm/tsb.c | |||
@@ -47,12 +47,13 @@ void flush_tsb_kernel_range(unsigned long start, unsigned long end) | |||
47 | } | 47 | } |
48 | } | 48 | } |
49 | 49 | ||
50 | static void __flush_tsb_one(struct mmu_gather *mp, unsigned long hash_shift, unsigned long tsb, unsigned long nentries) | 50 | static void __flush_tsb_one(struct tlb_batch *tb, unsigned long hash_shift, |
51 | unsigned long tsb, unsigned long nentries) | ||
51 | { | 52 | { |
52 | unsigned long i; | 53 | unsigned long i; |
53 | 54 | ||
54 | for (i = 0; i < mp->tlb_nr; i++) { | 55 | for (i = 0; i < tb->tlb_nr; i++) { |
55 | unsigned long v = mp->vaddrs[i]; | 56 | unsigned long v = tb->vaddrs[i]; |
56 | unsigned long tag, ent, hash; | 57 | unsigned long tag, ent, hash; |
57 | 58 | ||
58 | v &= ~0x1UL; | 59 | v &= ~0x1UL; |
@@ -65,9 +66,9 @@ static void __flush_tsb_one(struct mmu_gather *mp, unsigned long hash_shift, uns | |||
65 | } | 66 | } |
66 | } | 67 | } |
67 | 68 | ||
68 | void flush_tsb_user(struct mmu_gather *mp) | 69 | void flush_tsb_user(struct tlb_batch *tb) |
69 | { | 70 | { |
70 | struct mm_struct *mm = mp->mm; | 71 | struct mm_struct *mm = tb->mm; |
71 | unsigned long nentries, base, flags; | 72 | unsigned long nentries, base, flags; |
72 | 73 | ||
73 | spin_lock_irqsave(&mm->context.lock, flags); | 74 | spin_lock_irqsave(&mm->context.lock, flags); |
@@ -76,7 +77,7 @@ void flush_tsb_user(struct mmu_gather *mp) | |||
76 | nentries = mm->context.tsb_block[MM_TSB_BASE].tsb_nentries; | 77 | nentries = mm->context.tsb_block[MM_TSB_BASE].tsb_nentries; |
77 | if (tlb_type == cheetah_plus || tlb_type == hypervisor) | 78 | if (tlb_type == cheetah_plus || tlb_type == hypervisor) |
78 | base = __pa(base); | 79 | base = __pa(base); |
79 | __flush_tsb_one(mp, PAGE_SHIFT, base, nentries); | 80 | __flush_tsb_one(tb, PAGE_SHIFT, base, nentries); |
80 | 81 | ||
81 | #ifdef CONFIG_HUGETLB_PAGE | 82 | #ifdef CONFIG_HUGETLB_PAGE |
82 | if (mm->context.tsb_block[MM_TSB_HUGE].tsb) { | 83 | if (mm->context.tsb_block[MM_TSB_HUGE].tsb) { |
@@ -84,7 +85,7 @@ void flush_tsb_user(struct mmu_gather *mp) | |||
84 | nentries = mm->context.tsb_block[MM_TSB_HUGE].tsb_nentries; | 85 | nentries = mm->context.tsb_block[MM_TSB_HUGE].tsb_nentries; |
85 | if (tlb_type == cheetah_plus || tlb_type == hypervisor) | 86 | if (tlb_type == cheetah_plus || tlb_type == hypervisor) |
86 | base = __pa(base); | 87 | base = __pa(base); |
87 | __flush_tsb_one(mp, HPAGE_SHIFT, base, nentries); | 88 | __flush_tsb_one(tb, HPAGE_SHIFT, base, nentries); |
88 | } | 89 | } |
89 | #endif | 90 | #endif |
90 | spin_unlock_irqrestore(&mm->context.lock, flags); | 91 | spin_unlock_irqrestore(&mm->context.lock, flags); |