aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2013-02-19 16:20:08 -0500
committerDavid S. Miller <davem@davemloft.net>2013-02-20 12:46:08 -0500
commitbcd896bae0166b4443503482a26ecf84d9ba60ab (patch)
tree9674d3bf3999833f9e9ee562bd6113509b94f684
parenta55ee1ff751f88252207160087d8197bb7538d4c (diff)
sparc64: Handle hugepage TSB being NULL.
Accomodate the possibility that the TSB might be NULL at the point that update_mmu_cache() is invoked. This is necessary because we will sometimes need to defer the TSB allocation to the first fault that happens in the 'mm'. Seperate out the hugepage PTE test into a seperate function so that the logic is clearer. Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--arch/sparc/mm/init_64.c38
1 files changed, 22 insertions, 16 deletions
diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c
index c3b72423c846..0d0bc392c35f 100644
--- a/arch/sparc/mm/init_64.c
+++ b/arch/sparc/mm/init_64.c
@@ -314,16 +314,31 @@ static void __update_mmu_tsb_insert(struct mm_struct *mm, unsigned long tsb_inde
314 struct tsb *tsb = mm->context.tsb_block[tsb_index].tsb; 314 struct tsb *tsb = mm->context.tsb_block[tsb_index].tsb;
315 unsigned long tag; 315 unsigned long tag;
316 316
317 if (unlikely(!tsb))
318 return;
319
317 tsb += ((address >> tsb_hash_shift) & 320 tsb += ((address >> tsb_hash_shift) &
318 (mm->context.tsb_block[tsb_index].tsb_nentries - 1UL)); 321 (mm->context.tsb_block[tsb_index].tsb_nentries - 1UL));
319 tag = (address >> 22UL); 322 tag = (address >> 22UL);
320 tsb_insert(tsb, tag, tte); 323 tsb_insert(tsb, tag, tte);
321} 324}
322 325
326#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
327static inline bool is_hugetlb_pte(pte_t pte)
328{
329 if ((tlb_type == hypervisor &&
330 (pte_val(pte) & _PAGE_SZALL_4V) == _PAGE_SZHUGE_4V) ||
331 (tlb_type != hypervisor &&
332 (pte_val(pte) & _PAGE_SZALL_4U) == _PAGE_SZHUGE_4U))
333 return true;
334 return false;
335}
336#endif
337
323void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *ptep) 338void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *ptep)
324{ 339{
325 unsigned long tsb_index, tsb_hash_shift, flags;
326 struct mm_struct *mm; 340 struct mm_struct *mm;
341 unsigned long flags;
327 pte_t pte = *ptep; 342 pte_t pte = *ptep;
328 343
329 if (tlb_type != hypervisor) { 344 if (tlb_type != hypervisor) {
@@ -335,25 +350,16 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *
335 350
336 mm = vma->vm_mm; 351 mm = vma->vm_mm;
337 352
338 tsb_index = MM_TSB_BASE;
339 tsb_hash_shift = PAGE_SHIFT;
340
341 spin_lock_irqsave(&mm->context.lock, flags); 353 spin_lock_irqsave(&mm->context.lock, flags);
342 354
343#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE) 355#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
344 if (mm->context.tsb_block[MM_TSB_HUGE].tsb != NULL) { 356 if (mm->context.huge_pte_count && is_hugetlb_pte(pte))
345 if ((tlb_type == hypervisor && 357 __update_mmu_tsb_insert(mm, MM_TSB_HUGE, HPAGE_SHIFT,
346 (pte_val(pte) & _PAGE_SZALL_4V) == _PAGE_SZHUGE_4V) || 358 address, pte_val(pte));
347 (tlb_type != hypervisor && 359 else
348 (pte_val(pte) & _PAGE_SZALL_4U) == _PAGE_SZHUGE_4U)) {
349 tsb_index = MM_TSB_HUGE;
350 tsb_hash_shift = HPAGE_SHIFT;
351 }
352 }
353#endif 360#endif
354 361 __update_mmu_tsb_insert(mm, MM_TSB_BASE, PAGE_SHIFT,
355 __update_mmu_tsb_insert(mm, tsb_index, tsb_hash_shift, 362 address, pte_val(pte));
356 address, pte_val(pte));
357 363
358 spin_unlock_irqrestore(&mm->context.lock, flags); 364 spin_unlock_irqrestore(&mm->context.lock, flags);
359} 365}