aboutsummaryrefslogtreecommitdiffstats
path: root/arch/sparc64/mm/tsb.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/sparc64/mm/tsb.c')
-rw-r--r--arch/sparc64/mm/tsb.c95
1 files changed, 65 insertions, 30 deletions
diff --git a/arch/sparc64/mm/tsb.c b/arch/sparc64/mm/tsb.c
index 1c4e5c2dfc53..787533f01049 100644
--- a/arch/sparc64/mm/tsb.c
+++ b/arch/sparc64/mm/tsb.c
@@ -20,12 +20,9 @@ static inline unsigned long tsb_hash(unsigned long vaddr, unsigned long nentries
20 return vaddr & (nentries - 1); 20 return vaddr & (nentries - 1);
21} 21}
22 22
23static inline int tag_compare(struct tsb *entry, unsigned long vaddr, unsigned long context) 23static inline int tag_compare(unsigned long tag, unsigned long vaddr, unsigned long context)
24{ 24{
25 if (context == ~0UL) 25 return (tag == ((vaddr >> 22) | (context << 48)));
26 return 1;
27
28 return (entry->tag == ((vaddr >> 22) | (context << 48)));
29} 26}
30 27
31/* TSB flushes need only occur on the processor initiating the address 28/* TSB flushes need only occur on the processor initiating the address
@@ -41,7 +38,7 @@ void flush_tsb_kernel_range(unsigned long start, unsigned long end)
41 unsigned long hash = tsb_hash(v, KERNEL_TSB_NENTRIES); 38 unsigned long hash = tsb_hash(v, KERNEL_TSB_NENTRIES);
42 struct tsb *ent = &swapper_tsb[hash]; 39 struct tsb *ent = &swapper_tsb[hash];
43 40
44 if (tag_compare(ent, v, 0)) { 41 if (tag_compare(ent->tag, v, 0)) {
45 ent->tag = 0UL; 42 ent->tag = 0UL;
46 membar_storeload_storestore(); 43 membar_storeload_storestore();
47 } 44 }
@@ -52,24 +49,31 @@ void flush_tsb_user(struct mmu_gather *mp)
52{ 49{
53 struct mm_struct *mm = mp->mm; 50 struct mm_struct *mm = mp->mm;
54 struct tsb *tsb = mm->context.tsb; 51 struct tsb *tsb = mm->context.tsb;
55 unsigned long ctx = ~0UL;
56 unsigned long nentries = mm->context.tsb_nentries; 52 unsigned long nentries = mm->context.tsb_nentries;
53 unsigned long ctx, base;
57 int i; 54 int i;
58 55
59 if (CTX_VALID(mm->context)) 56 if (unlikely(!CTX_VALID(mm->context)))
60 ctx = CTX_HWBITS(mm->context); 57 return;
58
59 ctx = CTX_HWBITS(mm->context);
61 60
61 if (tlb_type == cheetah_plus)
62 base = __pa(tsb);
63 else
64 base = (unsigned long) tsb;
65
62 for (i = 0; i < mp->tlb_nr; i++) { 66 for (i = 0; i < mp->tlb_nr; i++) {
63 unsigned long v = mp->vaddrs[i]; 67 unsigned long v = mp->vaddrs[i];
64 struct tsb *ent; 68 unsigned long tag, ent, hash;
65 69
66 v &= ~0x1UL; 70 v &= ~0x1UL;
67 71
68 ent = &tsb[tsb_hash(v, nentries)]; 72 hash = tsb_hash(v, nentries);
69 if (tag_compare(ent, v, ctx)) { 73 ent = base + (hash * sizeof(struct tsb));
70 ent->tag = 0UL; 74 tag = (v >> 22UL) | (ctx << 48UL);
71 membar_storeload_storestore(); 75
72 } 76 tsb_flush(ent, tag);
73 } 77 }
74} 78}
75 79
@@ -84,6 +88,7 @@ static void setup_tsb_params(struct mm_struct *mm, unsigned long tsb_bytes)
84 tte = (_PAGE_VALID | _PAGE_L | _PAGE_CP | 88 tte = (_PAGE_VALID | _PAGE_L | _PAGE_CP |
85 _PAGE_CV | _PAGE_P | _PAGE_W); 89 _PAGE_CV | _PAGE_P | _PAGE_W);
86 tsb_paddr = __pa(mm->context.tsb); 90 tsb_paddr = __pa(mm->context.tsb);
91 BUG_ON(tsb_paddr & (tsb_bytes - 1UL));
87 92
88 /* Use the smallest page size that can map the whole TSB 93 /* Use the smallest page size that can map the whole TSB
89 * in one TLB entry. 94 * in one TLB entry.
@@ -144,13 +149,23 @@ static void setup_tsb_params(struct mm_struct *mm, unsigned long tsb_bytes)
144 BUG(); 149 BUG();
145 }; 150 };
146 151
147 tsb_reg |= base; 152 if (tlb_type == cheetah_plus) {
148 tsb_reg |= (tsb_paddr & (page_sz - 1UL)); 153 /* Physical mapping, no locked TLB entry for TSB. */
149 tte |= (tsb_paddr & ~(page_sz - 1UL)); 154 tsb_reg |= tsb_paddr;
155
156 mm->context.tsb_reg_val = tsb_reg;
157 mm->context.tsb_map_vaddr = 0;
158 mm->context.tsb_map_pte = 0;
159 } else {
160 tsb_reg |= base;
161 tsb_reg |= (tsb_paddr & (page_sz - 1UL));
162 tte |= (tsb_paddr & ~(page_sz - 1UL));
163
164 mm->context.tsb_reg_val = tsb_reg;
165 mm->context.tsb_map_vaddr = base;
166 mm->context.tsb_map_pte = tte;
167 }
150 168
151 mm->context.tsb_reg_val = tsb_reg;
152 mm->context.tsb_map_vaddr = base;
153 mm->context.tsb_map_pte = tte;
154} 169}
155 170
156/* The page tables are locked against modifications while this 171/* The page tables are locked against modifications while this
@@ -168,13 +183,21 @@ static void copy_tsb(struct tsb *old_tsb, unsigned long old_size,
168 for (i = 0; i < old_nentries; i++) { 183 for (i = 0; i < old_nentries; i++) {
169 register unsigned long tag asm("o4"); 184 register unsigned long tag asm("o4");
170 register unsigned long pte asm("o5"); 185 register unsigned long pte asm("o5");
171 unsigned long v; 186 unsigned long v, hash;
172 unsigned int hash; 187
173 188 if (tlb_type == cheetah_plus) {
174 __asm__ __volatile__( 189 __asm__ __volatile__(
175 "ldda [%2] %3, %0" 190 "ldda [%2] %3, %0"
176 : "=r" (tag), "=r" (pte) 191 : "=r" (tag), "=r" (pte)
177 : "r" (&old_tsb[i]), "i" (ASI_NUCLEUS_QUAD_LDD)); 192 : "r" (__pa(&old_tsb[i])),
193 "i" (ASI_QUAD_LDD_PHYS));
194 } else {
195 __asm__ __volatile__(
196 "ldda [%2] %3, %0"
197 : "=r" (tag), "=r" (pte)
198 : "r" (&old_tsb[i]),
199 "i" (ASI_NUCLEUS_QUAD_LDD));
200 }
178 201
179 if (!tag || (tag & (1UL << TSB_TAG_LOCK_BIT))) 202 if (!tag || (tag & (1UL << TSB_TAG_LOCK_BIT)))
180 continue; 203 continue;
@@ -198,8 +221,20 @@ static void copy_tsb(struct tsb *old_tsb, unsigned long old_size,
198 v |= (i & (512UL - 1UL)) << 13UL; 221 v |= (i & (512UL - 1UL)) << 13UL;
199 222
200 hash = tsb_hash(v, new_nentries); 223 hash = tsb_hash(v, new_nentries);
201 new_tsb[hash].tag = tag; 224 if (tlb_type == cheetah_plus) {
202 new_tsb[hash].pte = pte; 225 __asm__ __volatile__(
226 "stxa %0, [%1] %2\n\t"
227 "stxa %3, [%4] %2"
228 : /* no outputs */
229 : "r" (tag),
230 "r" (__pa(&new_tsb[hash].tag)),
231 "i" (ASI_PHYS_USE_EC),
232 "r" (pte),
233 "r" (__pa(&new_tsb[hash].pte)));
234 } else {
235 new_tsb[hash].tag = tag;
236 new_tsb[hash].pte = pte;
237 }
203 } 238 }
204} 239}
205 240