diff options
Diffstat (limited to 'arch/sparc/mm/tsb.c')
-rw-r--r-- | arch/sparc/mm/tsb.c | 57 |
1 files changed, 42 insertions, 15 deletions
diff --git a/arch/sparc/mm/tsb.c b/arch/sparc/mm/tsb.c index 428982b9becf..2cc3bce5ee91 100644 --- a/arch/sparc/mm/tsb.c +++ b/arch/sparc/mm/tsb.c | |||
@@ -7,11 +7,10 @@ | |||
7 | #include <linux/preempt.h> | 7 | #include <linux/preempt.h> |
8 | #include <linux/slab.h> | 8 | #include <linux/slab.h> |
9 | #include <asm/page.h> | 9 | #include <asm/page.h> |
10 | #include <asm/tlbflush.h> | ||
11 | #include <asm/tlb.h> | ||
12 | #include <asm/mmu_context.h> | ||
13 | #include <asm/pgtable.h> | 10 | #include <asm/pgtable.h> |
11 | #include <asm/mmu_context.h> | ||
14 | #include <asm/tsb.h> | 12 | #include <asm/tsb.h> |
13 | #include <asm/tlb.h> | ||
15 | #include <asm/oplib.h> | 14 | #include <asm/oplib.h> |
16 | 15 | ||
17 | extern struct tsb swapper_tsb[KERNEL_TSB_NENTRIES]; | 16 | extern struct tsb swapper_tsb[KERNEL_TSB_NENTRIES]; |
@@ -46,23 +45,27 @@ void flush_tsb_kernel_range(unsigned long start, unsigned long end) | |||
46 | } | 45 | } |
47 | } | 46 | } |
48 | 47 | ||
49 | static void __flush_tsb_one(struct tlb_batch *tb, unsigned long hash_shift, | 48 | static void __flush_tsb_one_entry(unsigned long tsb, unsigned long v, |
50 | unsigned long tsb, unsigned long nentries) | 49 | unsigned long hash_shift, |
50 | unsigned long nentries) | ||
51 | { | 51 | { |
52 | unsigned long i; | 52 | unsigned long tag, ent, hash; |
53 | 53 | ||
54 | for (i = 0; i < tb->tlb_nr; i++) { | 54 | v &= ~0x1UL; |
55 | unsigned long v = tb->vaddrs[i]; | 55 | hash = tsb_hash(v, hash_shift, nentries); |
56 | unsigned long tag, ent, hash; | 56 | ent = tsb + (hash * sizeof(struct tsb)); |
57 | tag = (v >> 22UL); | ||
57 | 58 | ||
58 | v &= ~0x1UL; | 59 | tsb_flush(ent, tag); |
60 | } | ||
59 | 61 | ||
60 | hash = tsb_hash(v, hash_shift, nentries); | 62 | static void __flush_tsb_one(struct tlb_batch *tb, unsigned long hash_shift, |
61 | ent = tsb + (hash * sizeof(struct tsb)); | 63 | unsigned long tsb, unsigned long nentries) |
62 | tag = (v >> 22UL); | 64 | { |
65 | unsigned long i; | ||
63 | 66 | ||
64 | tsb_flush(ent, tag); | 67 | for (i = 0; i < tb->tlb_nr; i++) |
65 | } | 68 | __flush_tsb_one_entry(tsb, tb->vaddrs[i], hash_shift, nentries); |
66 | } | 69 | } |
67 | 70 | ||
68 | void flush_tsb_user(struct tlb_batch *tb) | 71 | void flush_tsb_user(struct tlb_batch *tb) |
@@ -90,6 +93,30 @@ void flush_tsb_user(struct tlb_batch *tb) | |||
90 | spin_unlock_irqrestore(&mm->context.lock, flags); | 93 | spin_unlock_irqrestore(&mm->context.lock, flags); |
91 | } | 94 | } |
92 | 95 | ||
96 | void flush_tsb_user_page(struct mm_struct *mm, unsigned long vaddr) | ||
97 | { | ||
98 | unsigned long nentries, base, flags; | ||
99 | |||
100 | spin_lock_irqsave(&mm->context.lock, flags); | ||
101 | |||
102 | base = (unsigned long) mm->context.tsb_block[MM_TSB_BASE].tsb; | ||
103 | nentries = mm->context.tsb_block[MM_TSB_BASE].tsb_nentries; | ||
104 | if (tlb_type == cheetah_plus || tlb_type == hypervisor) | ||
105 | base = __pa(base); | ||
106 | __flush_tsb_one_entry(base, vaddr, PAGE_SHIFT, nentries); | ||
107 | |||
108 | #if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE) | ||
109 | if (mm->context.tsb_block[MM_TSB_HUGE].tsb) { | ||
110 | base = (unsigned long) mm->context.tsb_block[MM_TSB_HUGE].tsb; | ||
111 | nentries = mm->context.tsb_block[MM_TSB_HUGE].tsb_nentries; | ||
112 | if (tlb_type == cheetah_plus || tlb_type == hypervisor) | ||
113 | base = __pa(base); | ||
114 | __flush_tsb_one_entry(base, vaddr, HPAGE_SHIFT, nentries); | ||
115 | } | ||
116 | #endif | ||
117 | spin_unlock_irqrestore(&mm->context.lock, flags); | ||
118 | } | ||
119 | |||
93 | #define HV_PGSZ_IDX_BASE HV_PGSZ_IDX_8K | 120 | #define HV_PGSZ_IDX_BASE HV_PGSZ_IDX_8K |
94 | #define HV_PGSZ_MASK_BASE HV_PGSZ_MASK_8K | 121 | #define HV_PGSZ_MASK_BASE HV_PGSZ_MASK_8K |
95 | 122 | ||