aboutsummaryrefslogtreecommitdiffstats
path: root/arch/sparc64/mm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/sparc64/mm')
-rw-r--r--arch/sparc64/mm/fault.c15
-rw-r--r--arch/sparc64/mm/hugetlbpage.c28
-rw-r--r--arch/sparc64/mm/init.c21
-rw-r--r--arch/sparc64/mm/tsb.c234
4 files changed, 197 insertions, 101 deletions
diff --git a/arch/sparc64/mm/fault.c b/arch/sparc64/mm/fault.c
index 63b6cc0cd5d5..d21ff3230c02 100644
--- a/arch/sparc64/mm/fault.c
+++ b/arch/sparc64/mm/fault.c
@@ -410,9 +410,18 @@ good_area:
410 up_read(&mm->mmap_sem); 410 up_read(&mm->mmap_sem);
411 411
412 mm_rss = get_mm_rss(mm); 412 mm_rss = get_mm_rss(mm);
413 if (unlikely(mm_rss >= mm->context.tsb_rss_limit)) 413#ifdef CONFIG_HUGETLB_PAGE
414 tsb_grow(mm, mm_rss); 414 mm_rss -= (mm->context.huge_pte_count * (HPAGE_SIZE / PAGE_SIZE));
415 415#endif
416 if (unlikely(mm_rss >=
417 mm->context.tsb_block[MM_TSB_BASE].tsb_rss_limit))
418 tsb_grow(mm, MM_TSB_BASE, mm_rss);
419#ifdef CONFIG_HUGETLB_PAGE
420 mm_rss = mm->context.huge_pte_count;
421 if (unlikely(mm_rss >=
422 mm->context.tsb_block[MM_TSB_HUGE].tsb_rss_limit))
423 tsb_grow(mm, MM_TSB_HUGE, mm_rss);
424#endif
416 return; 425 return;
417 426
418 /* 427 /*
diff --git a/arch/sparc64/mm/hugetlbpage.c b/arch/sparc64/mm/hugetlbpage.c
index a7a24869d045..0a1d4cd24cda 100644
--- a/arch/sparc64/mm/hugetlbpage.c
+++ b/arch/sparc64/mm/hugetlbpage.c
@@ -199,13 +199,11 @@ pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr)
199 pte_t *pte = NULL; 199 pte_t *pte = NULL;
200 200
201 pgd = pgd_offset(mm, addr); 201 pgd = pgd_offset(mm, addr);
202 if (pgd) { 202 pud = pud_alloc(mm, pgd, addr);
203 pud = pud_offset(pgd, addr); 203 if (pud) {
204 if (pud) { 204 pmd = pmd_alloc(mm, pud, addr);
205 pmd = pmd_alloc(mm, pud, addr); 205 if (pmd)
206 if (pmd) 206 pte = pte_alloc_map(mm, pmd, addr);
207 pte = pte_alloc_map(mm, pmd, addr);
208 }
209 } 207 }
210 return pte; 208 return pte;
211} 209}
@@ -231,13 +229,14 @@ pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr)
231 return pte; 229 return pte;
232} 230}
233 231
234#define mk_pte_huge(entry) do { pte_val(entry) |= _PAGE_SZHUGE; } while (0)
235
236void set_huge_pte_at(struct mm_struct *mm, unsigned long addr, 232void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
237 pte_t *ptep, pte_t entry) 233 pte_t *ptep, pte_t entry)
238{ 234{
239 int i; 235 int i;
240 236
237 if (!pte_present(*ptep) && pte_present(entry))
238 mm->context.huge_pte_count++;
239
241 for (i = 0; i < (1 << HUGETLB_PAGE_ORDER); i++) { 240 for (i = 0; i < (1 << HUGETLB_PAGE_ORDER); i++) {
242 set_pte_at(mm, addr, ptep, entry); 241 set_pte_at(mm, addr, ptep, entry);
243 ptep++; 242 ptep++;
@@ -253,6 +252,8 @@ pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
253 int i; 252 int i;
254 253
255 entry = *ptep; 254 entry = *ptep;
255 if (pte_present(entry))
256 mm->context.huge_pte_count--;
256 257
257 for (i = 0; i < (1 << HUGETLB_PAGE_ORDER); i++) { 258 for (i = 0; i < (1 << HUGETLB_PAGE_ORDER); i++) {
258 pte_clear(mm, addr, ptep); 259 pte_clear(mm, addr, ptep);
@@ -302,6 +303,15 @@ static void context_reload(void *__data)
302 303
303void hugetlb_prefault_arch_hook(struct mm_struct *mm) 304void hugetlb_prefault_arch_hook(struct mm_struct *mm)
304{ 305{
306 struct tsb_config *tp = &mm->context.tsb_block[MM_TSB_HUGE];
307
308 if (likely(tp->tsb != NULL))
309 return;
310
311 tsb_grow(mm, MM_TSB_HUGE, 0);
312 tsb_context_switch(mm);
313 smp_tsb_sync(mm);
314
305 /* On UltraSPARC-III+ and later, configure the second half of 315 /* On UltraSPARC-III+ and later, configure the second half of
306 * the Data-TLB for huge pages. 316 * the Data-TLB for huge pages.
307 */ 317 */
diff --git a/arch/sparc64/mm/init.c b/arch/sparc64/mm/init.c
index c2b556106fc1..16d231703d6a 100644
--- a/arch/sparc64/mm/init.c
+++ b/arch/sparc64/mm/init.c
@@ -283,6 +283,7 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t p
283 struct mm_struct *mm; 283 struct mm_struct *mm;
284 struct tsb *tsb; 284 struct tsb *tsb;
285 unsigned long tag, flags; 285 unsigned long tag, flags;
286 unsigned long tsb_index, tsb_hash_shift;
286 287
287 if (tlb_type != hypervisor) { 288 if (tlb_type != hypervisor) {
288 unsigned long pfn = pte_pfn(pte); 289 unsigned long pfn = pte_pfn(pte);
@@ -312,10 +313,26 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t p
312 313
313 mm = vma->vm_mm; 314 mm = vma->vm_mm;
314 315
316 tsb_index = MM_TSB_BASE;
317 tsb_hash_shift = PAGE_SHIFT;
318
315 spin_lock_irqsave(&mm->context.lock, flags); 319 spin_lock_irqsave(&mm->context.lock, flags);
316 320
317 tsb = &mm->context.tsb[(address >> PAGE_SHIFT) & 321#ifdef CONFIG_HUGETLB_PAGE
318 (mm->context.tsb_nentries - 1UL)]; 322 if (mm->context.tsb_block[MM_TSB_HUGE].tsb != NULL) {
323 if ((tlb_type == hypervisor &&
324 (pte_val(pte) & _PAGE_SZALL_4V) == _PAGE_SZHUGE_4V) ||
325 (tlb_type != hypervisor &&
326 (pte_val(pte) & _PAGE_SZALL_4U) == _PAGE_SZHUGE_4U)) {
327 tsb_index = MM_TSB_HUGE;
328 tsb_hash_shift = HPAGE_SHIFT;
329 }
330 }
331#endif
332
333 tsb = mm->context.tsb_block[tsb_index].tsb;
334 tsb += ((address >> tsb_hash_shift) &
335 (mm->context.tsb_block[tsb_index].tsb_nentries - 1UL));
319 tag = (address >> 22UL); 336 tag = (address >> 22UL);
320 tsb_insert(tsb, tag, pte_val(pte)); 337 tsb_insert(tsb, tag, pte_val(pte));
321 338
diff --git a/arch/sparc64/mm/tsb.c b/arch/sparc64/mm/tsb.c
index b2064e2a44d6..beaa02810f0e 100644
--- a/arch/sparc64/mm/tsb.c
+++ b/arch/sparc64/mm/tsb.c
@@ -15,9 +15,9 @@
15 15
16extern struct tsb swapper_tsb[KERNEL_TSB_NENTRIES]; 16extern struct tsb swapper_tsb[KERNEL_TSB_NENTRIES];
17 17
18static inline unsigned long tsb_hash(unsigned long vaddr, unsigned long nentries) 18static inline unsigned long tsb_hash(unsigned long vaddr, unsigned long hash_shift, unsigned long nentries)
19{ 19{
20 vaddr >>= PAGE_SHIFT; 20 vaddr >>= hash_shift;
21 return vaddr & (nentries - 1); 21 return vaddr & (nentries - 1);
22} 22}
23 23
@@ -36,7 +36,8 @@ void flush_tsb_kernel_range(unsigned long start, unsigned long end)
36 unsigned long v; 36 unsigned long v;
37 37
38 for (v = start; v < end; v += PAGE_SIZE) { 38 for (v = start; v < end; v += PAGE_SIZE) {
39 unsigned long hash = tsb_hash(v, KERNEL_TSB_NENTRIES); 39 unsigned long hash = tsb_hash(v, PAGE_SHIFT,
40 KERNEL_TSB_NENTRIES);
40 struct tsb *ent = &swapper_tsb[hash]; 41 struct tsb *ent = &swapper_tsb[hash];
41 42
42 if (tag_compare(ent->tag, v)) { 43 if (tag_compare(ent->tag, v)) {
@@ -46,49 +47,91 @@ void flush_tsb_kernel_range(unsigned long start, unsigned long end)
46 } 47 }
47} 48}
48 49
49void flush_tsb_user(struct mmu_gather *mp) 50static void __flush_tsb_one(struct mmu_gather *mp, unsigned long hash_shift, unsigned long tsb, unsigned long nentries)
50{ 51{
51 struct mm_struct *mm = mp->mm; 52 unsigned long i;
52 unsigned long nentries, base, flags;
53 struct tsb *tsb;
54 int i;
55
56 spin_lock_irqsave(&mm->context.lock, flags);
57
58 tsb = mm->context.tsb;
59 nentries = mm->context.tsb_nentries;
60 53
61 if (tlb_type == cheetah_plus || tlb_type == hypervisor)
62 base = __pa(tsb);
63 else
64 base = (unsigned long) tsb;
65
66 for (i = 0; i < mp->tlb_nr; i++) { 54 for (i = 0; i < mp->tlb_nr; i++) {
67 unsigned long v = mp->vaddrs[i]; 55 unsigned long v = mp->vaddrs[i];
68 unsigned long tag, ent, hash; 56 unsigned long tag, ent, hash;
69 57
70 v &= ~0x1UL; 58 v &= ~0x1UL;
71 59
72 hash = tsb_hash(v, nentries); 60 hash = tsb_hash(v, hash_shift, nentries);
73 ent = base + (hash * sizeof(struct tsb)); 61 ent = tsb + (hash * sizeof(struct tsb));
74 tag = (v >> 22UL); 62 tag = (v >> 22UL);
75 63
76 tsb_flush(ent, tag); 64 tsb_flush(ent, tag);
77 } 65 }
66}
67
68void flush_tsb_user(struct mmu_gather *mp)
69{
70 struct mm_struct *mm = mp->mm;
71 unsigned long nentries, base, flags;
72
73 spin_lock_irqsave(&mm->context.lock, flags);
78 74
75 base = (unsigned long) mm->context.tsb_block[MM_TSB_BASE].tsb;
76 nentries = mm->context.tsb_block[MM_TSB_BASE].tsb_nentries;
77 if (tlb_type == cheetah_plus || tlb_type == hypervisor)
78 base = __pa(base);
79 __flush_tsb_one(mp, PAGE_SHIFT, base, nentries);
80
81#ifdef CONFIG_HUGETLB_PAGE
82 if (mm->context.tsb_block[MM_TSB_HUGE].tsb) {
83 base = (unsigned long) mm->context.tsb_block[MM_TSB_HUGE].tsb;
84 nentries = mm->context.tsb_block[MM_TSB_HUGE].tsb_nentries;
85 if (tlb_type == cheetah_plus || tlb_type == hypervisor)
86 base = __pa(base);
87 __flush_tsb_one(mp, HPAGE_SHIFT, base, nentries);
88 }
89#endif
79 spin_unlock_irqrestore(&mm->context.lock, flags); 90 spin_unlock_irqrestore(&mm->context.lock, flags);
80} 91}
81 92
82static void setup_tsb_params(struct mm_struct *mm, unsigned long tsb_bytes) 93#if defined(CONFIG_SPARC64_PAGE_SIZE_8KB)
94#define HV_PGSZ_IDX_BASE HV_PGSZ_IDX_8K
95#define HV_PGSZ_MASK_BASE HV_PGSZ_MASK_8K
96#elif defined(CONFIG_SPARC64_PAGE_SIZE_64KB)
97#define HV_PGSZ_IDX_BASE HV_PGSZ_IDX_64K
98#define HV_PGSZ_MASK_BASE HV_PGSZ_MASK_64K
99#elif defined(CONFIG_SPARC64_PAGE_SIZE_512KB)
100#define HV_PGSZ_IDX_BASE HV_PGSZ_IDX_512K
101#define HV_PGSZ_MASK_BASE HV_PGSZ_MASK_512K
102#elif defined(CONFIG_SPARC64_PAGE_SIZE_4MB)
103#define HV_PGSZ_IDX_BASE HV_PGSZ_IDX_4MB
104#define HV_PGSZ_MASK_BASE HV_PGSZ_MASK_4MB
105#else
106#error Broken base page size setting...
107#endif
108
109#ifdef CONFIG_HUGETLB_PAGE
110#if defined(CONFIG_HUGETLB_PAGE_SIZE_64K)
111#define HV_PGSZ_IDX_HUGE HV_PGSZ_IDX_64K
112#define HV_PGSZ_MASK_HUGE HV_PGSZ_MASK_64K
113#elif defined(CONFIG_HUGETLB_PAGE_SIZE_512K)
114#define HV_PGSZ_IDX_HUGE HV_PGSZ_IDX_512K
115#define HV_PGSZ_MASK_HUGE HV_PGSZ_MASK_512K
116#elif defined(CONFIG_HUGETLB_PAGE_SIZE_4MB)
117#define HV_PGSZ_IDX_HUGE HV_PGSZ_IDX_4MB
118#define HV_PGSZ_MASK_HUGE HV_PGSZ_MASK_4MB
119#else
120#error Broken huge page size setting...
121#endif
122#endif
123
124static void setup_tsb_params(struct mm_struct *mm, unsigned long tsb_idx, unsigned long tsb_bytes)
83{ 125{
84 unsigned long tsb_reg, base, tsb_paddr; 126 unsigned long tsb_reg, base, tsb_paddr;
85 unsigned long page_sz, tte; 127 unsigned long page_sz, tte;
86 128
87 mm->context.tsb_nentries = tsb_bytes / sizeof(struct tsb); 129 mm->context.tsb_block[tsb_idx].tsb_nentries =
130 tsb_bytes / sizeof(struct tsb);
88 131
89 base = TSBMAP_BASE; 132 base = TSBMAP_BASE;
90 tte = pgprot_val(PAGE_KERNEL_LOCKED); 133 tte = pgprot_val(PAGE_KERNEL_LOCKED);
91 tsb_paddr = __pa(mm->context.tsb); 134 tsb_paddr = __pa(mm->context.tsb_block[tsb_idx].tsb);
92 BUG_ON(tsb_paddr & (tsb_bytes - 1UL)); 135 BUG_ON(tsb_paddr & (tsb_bytes - 1UL));
93 136
94 /* Use the smallest page size that can map the whole TSB 137 /* Use the smallest page size that can map the whole TSB
@@ -147,61 +190,49 @@ static void setup_tsb_params(struct mm_struct *mm, unsigned long tsb_bytes)
147 /* Physical mapping, no locked TLB entry for TSB. */ 190 /* Physical mapping, no locked TLB entry for TSB. */
148 tsb_reg |= tsb_paddr; 191 tsb_reg |= tsb_paddr;
149 192
150 mm->context.tsb_reg_val = tsb_reg; 193 mm->context.tsb_block[tsb_idx].tsb_reg_val = tsb_reg;
151 mm->context.tsb_map_vaddr = 0; 194 mm->context.tsb_block[tsb_idx].tsb_map_vaddr = 0;
152 mm->context.tsb_map_pte = 0; 195 mm->context.tsb_block[tsb_idx].tsb_map_pte = 0;
153 } else { 196 } else {
154 tsb_reg |= base; 197 tsb_reg |= base;
155 tsb_reg |= (tsb_paddr & (page_sz - 1UL)); 198 tsb_reg |= (tsb_paddr & (page_sz - 1UL));
156 tte |= (tsb_paddr & ~(page_sz - 1UL)); 199 tte |= (tsb_paddr & ~(page_sz - 1UL));
157 200
158 mm->context.tsb_reg_val = tsb_reg; 201 mm->context.tsb_block[tsb_idx].tsb_reg_val = tsb_reg;
159 mm->context.tsb_map_vaddr = base; 202 mm->context.tsb_block[tsb_idx].tsb_map_vaddr = base;
160 mm->context.tsb_map_pte = tte; 203 mm->context.tsb_block[tsb_idx].tsb_map_pte = tte;
161 } 204 }
162 205
163 /* Setup the Hypervisor TSB descriptor. */ 206 /* Setup the Hypervisor TSB descriptor. */
164 if (tlb_type == hypervisor) { 207 if (tlb_type == hypervisor) {
165 struct hv_tsb_descr *hp = &mm->context.tsb_descr; 208 struct hv_tsb_descr *hp = &mm->context.tsb_descr[tsb_idx];
166 209
167 switch (PAGE_SIZE) { 210 switch (tsb_idx) {
168 case 8192: 211 case MM_TSB_BASE:
169 default: 212 hp->pgsz_idx = HV_PGSZ_IDX_BASE;
170 hp->pgsz_idx = HV_PGSZ_IDX_8K;
171 break; 213 break;
172 214#ifdef CONFIG_HUGETLB_PAGE
173 case 64 * 1024: 215 case MM_TSB_HUGE:
174 hp->pgsz_idx = HV_PGSZ_IDX_64K; 216 hp->pgsz_idx = HV_PGSZ_IDX_HUGE;
175 break;
176
177 case 512 * 1024:
178 hp->pgsz_idx = HV_PGSZ_IDX_512K;
179 break;
180
181 case 4 * 1024 * 1024:
182 hp->pgsz_idx = HV_PGSZ_IDX_4MB;
183 break; 217 break;
218#endif
219 default:
220 BUG();
184 }; 221 };
185 hp->assoc = 1; 222 hp->assoc = 1;
186 hp->num_ttes = tsb_bytes / 16; 223 hp->num_ttes = tsb_bytes / 16;
187 hp->ctx_idx = 0; 224 hp->ctx_idx = 0;
188 switch (PAGE_SIZE) { 225 switch (tsb_idx) {
189 case 8192: 226 case MM_TSB_BASE:
190 default: 227 hp->pgsz_mask = HV_PGSZ_MASK_BASE;
191 hp->pgsz_mask = HV_PGSZ_MASK_8K;
192 break;
193
194 case 64 * 1024:
195 hp->pgsz_mask = HV_PGSZ_MASK_64K;
196 break;
197
198 case 512 * 1024:
199 hp->pgsz_mask = HV_PGSZ_MASK_512K;
200 break; 228 break;
201 229#ifdef CONFIG_HUGETLB_PAGE
202 case 4 * 1024 * 1024: 230 case MM_TSB_HUGE:
203 hp->pgsz_mask = HV_PGSZ_MASK_4MB; 231 hp->pgsz_mask = HV_PGSZ_MASK_HUGE;
204 break; 232 break;
233#endif
234 default:
235 BUG();
205 }; 236 };
206 hp->tsb_base = tsb_paddr; 237 hp->tsb_base = tsb_paddr;
207 hp->resv = 0; 238 hp->resv = 0;
@@ -241,11 +272,11 @@ void __init tsb_cache_init(void)
241 } 272 }
242} 273}
243 274
244/* When the RSS of an address space exceeds mm->context.tsb_rss_limit, 275/* When the RSS of an address space exceeds tsb_rss_limit for a TSB,
245 * do_sparc64_fault() invokes this routine to try and grow the TSB. 276 * do_sparc64_fault() invokes this routine to try and grow it.
246 * 277 *
247 * When we reach the maximum TSB size supported, we stick ~0UL into 278 * When we reach the maximum TSB size supported, we stick ~0UL into
248 * mm->context.tsb_rss_limit so the grow checks in update_mmu_cache() 279 * tsb_rss_limit for that TSB so the grow checks in do_sparc64_fault()
249 * will not trigger any longer. 280 * will not trigger any longer.
250 * 281 *
251 * The TSB can be anywhere from 8K to 1MB in size, in increasing powers 282 * The TSB can be anywhere from 8K to 1MB in size, in increasing powers
@@ -257,7 +288,7 @@ void __init tsb_cache_init(void)
257 * the number of entries that the current TSB can hold at once. Currently, 288 * the number of entries that the current TSB can hold at once. Currently,
258 * we trigger when the RSS hits 3/4 of the TSB capacity. 289 * we trigger when the RSS hits 3/4 of the TSB capacity.
259 */ 290 */
260void tsb_grow(struct mm_struct *mm, unsigned long rss) 291void tsb_grow(struct mm_struct *mm, unsigned long tsb_index, unsigned long rss)
261{ 292{
262 unsigned long max_tsb_size = 1 * 1024 * 1024; 293 unsigned long max_tsb_size = 1 * 1024 * 1024;
263 unsigned long new_size, old_size, flags; 294 unsigned long new_size, old_size, flags;
@@ -297,7 +328,8 @@ retry_tsb_alloc:
297 * down to a 0-order allocation and force no TSB 328 * down to a 0-order allocation and force no TSB
298 * growing for this address space. 329 * growing for this address space.
299 */ 330 */
300 if (mm->context.tsb == NULL && new_cache_index > 0) { 331 if (mm->context.tsb_block[tsb_index].tsb == NULL &&
332 new_cache_index > 0) {
301 new_cache_index = 0; 333 new_cache_index = 0;
302 new_size = 8192; 334 new_size = 8192;
303 new_rss_limit = ~0UL; 335 new_rss_limit = ~0UL;
@@ -307,8 +339,8 @@ retry_tsb_alloc:
307 /* If we failed on a TSB grow, we are under serious 339 /* If we failed on a TSB grow, we are under serious
308 * memory pressure so don't try to grow any more. 340 * memory pressure so don't try to grow any more.
309 */ 341 */
310 if (mm->context.tsb != NULL) 342 if (mm->context.tsb_block[tsb_index].tsb != NULL)
311 mm->context.tsb_rss_limit = ~0UL; 343 mm->context.tsb_block[tsb_index].tsb_rss_limit = ~0UL;
312 return; 344 return;
313 } 345 }
314 346
@@ -339,23 +371,26 @@ retry_tsb_alloc:
339 */ 371 */
340 spin_lock_irqsave(&mm->context.lock, flags); 372 spin_lock_irqsave(&mm->context.lock, flags);
341 373
342 old_tsb = mm->context.tsb; 374 old_tsb = mm->context.tsb_block[tsb_index].tsb;
343 old_cache_index = (mm->context.tsb_reg_val & 0x7UL); 375 old_cache_index =
344 old_size = mm->context.tsb_nentries * sizeof(struct tsb); 376 (mm->context.tsb_block[tsb_index].tsb_reg_val & 0x7UL);
377 old_size = (mm->context.tsb_block[tsb_index].tsb_nentries *
378 sizeof(struct tsb));
345 379
346 380
347 /* Handle multiple threads trying to grow the TSB at the same time. 381 /* Handle multiple threads trying to grow the TSB at the same time.
348 * One will get in here first, and bump the size and the RSS limit. 382 * One will get in here first, and bump the size and the RSS limit.
349 * The others will get in here next and hit this check. 383 * The others will get in here next and hit this check.
350 */ 384 */
351 if (unlikely(old_tsb && (rss < mm->context.tsb_rss_limit))) { 385 if (unlikely(old_tsb &&
386 (rss < mm->context.tsb_block[tsb_index].tsb_rss_limit))) {
352 spin_unlock_irqrestore(&mm->context.lock, flags); 387 spin_unlock_irqrestore(&mm->context.lock, flags);
353 388
354 kmem_cache_free(tsb_caches[new_cache_index], new_tsb); 389 kmem_cache_free(tsb_caches[new_cache_index], new_tsb);
355 return; 390 return;
356 } 391 }
357 392
358 mm->context.tsb_rss_limit = new_rss_limit; 393 mm->context.tsb_block[tsb_index].tsb_rss_limit = new_rss_limit;
359 394
360 if (old_tsb) { 395 if (old_tsb) {
361 extern void copy_tsb(unsigned long old_tsb_base, 396 extern void copy_tsb(unsigned long old_tsb_base,
@@ -372,8 +407,8 @@ retry_tsb_alloc:
372 copy_tsb(old_tsb_base, old_size, new_tsb_base, new_size); 407 copy_tsb(old_tsb_base, old_size, new_tsb_base, new_size);
373 } 408 }
374 409
375 mm->context.tsb = new_tsb; 410 mm->context.tsb_block[tsb_index].tsb = new_tsb;
376 setup_tsb_params(mm, new_size); 411 setup_tsb_params(mm, tsb_index, new_size);
377 412
378 spin_unlock_irqrestore(&mm->context.lock, flags); 413 spin_unlock_irqrestore(&mm->context.lock, flags);
379 414
@@ -394,40 +429,65 @@ retry_tsb_alloc:
394 429
395int init_new_context(struct task_struct *tsk, struct mm_struct *mm) 430int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
396{ 431{
432#ifdef CONFIG_HUGETLB_PAGE
433 unsigned long huge_pte_count;
434#endif
435 unsigned int i;
436
397 spin_lock_init(&mm->context.lock); 437 spin_lock_init(&mm->context.lock);
398 438
399 mm->context.sparc64_ctx_val = 0UL; 439 mm->context.sparc64_ctx_val = 0UL;
400 440
441#ifdef CONFIG_HUGETLB_PAGE
442 /* We reset it to zero because the fork() page copying
443 * will re-increment the counters as the parent PTEs are
444 * copied into the child address space.
445 */
446 huge_pte_count = mm->context.huge_pte_count;
447 mm->context.huge_pte_count = 0;
448#endif
449
401 /* copy_mm() copies over the parent's mm_struct before calling 450 /* copy_mm() copies over the parent's mm_struct before calling
402 * us, so we need to zero out the TSB pointer or else tsb_grow() 451 * us, so we need to zero out the TSB pointer or else tsb_grow()
403 * will be confused and think there is an older TSB to free up. 452 * will be confused and think there is an older TSB to free up.
404 */ 453 */
405 mm->context.tsb = NULL; 454 for (i = 0; i < MM_NUM_TSBS; i++)
455 mm->context.tsb_block[i].tsb = NULL;
406 456
407 /* If this is fork, inherit the parent's TSB size. We would 457 /* If this is fork, inherit the parent's TSB size. We would
408 * grow it to that size on the first page fault anyways. 458 * grow it to that size on the first page fault anyways.
409 */ 459 */
410 tsb_grow(mm, get_mm_rss(mm)); 460 tsb_grow(mm, MM_TSB_BASE, get_mm_rss(mm));
411 461
412 if (unlikely(!mm->context.tsb)) 462#ifdef CONFIG_HUGETLB_PAGE
463 if (unlikely(huge_pte_count))
464 tsb_grow(mm, MM_TSB_HUGE, huge_pte_count);
465#endif
466
467 if (unlikely(!mm->context.tsb_block[MM_TSB_BASE].tsb))
413 return -ENOMEM; 468 return -ENOMEM;
414 469
415 return 0; 470 return 0;
416} 471}
417 472
418void destroy_context(struct mm_struct *mm) 473static void tsb_destroy_one(struct tsb_config *tp)
419{ 474{
420 unsigned long flags, cache_index; 475 unsigned long cache_index;
421 476
422 cache_index = (mm->context.tsb_reg_val & 0x7UL); 477 if (!tp->tsb)
423 kmem_cache_free(tsb_caches[cache_index], mm->context.tsb); 478 return;
479 cache_index = tp->tsb_reg_val & 0x7UL;
480 kmem_cache_free(tsb_caches[cache_index], tp->tsb);
481 tp->tsb = NULL;
482 tp->tsb_reg_val = 0UL;
483}
424 484
425 /* We can remove these later, but for now it's useful 485void destroy_context(struct mm_struct *mm)
426 * to catch any bogus post-destroy_context() references 486{
427 * to the TSB. 487 unsigned long flags, i;
428 */ 488
429 mm->context.tsb = NULL; 489 for (i = 0; i < MM_NUM_TSBS; i++)
430 mm->context.tsb_reg_val = 0UL; 490 tsb_destroy_one(&mm->context.tsb_block[i]);
431 491
432 spin_lock_irqsave(&ctx_alloc_lock, flags); 492 spin_lock_irqsave(&ctx_alloc_lock, flags);
433 493