diff options
author | David S. Miller <davem@davemloft.net> | 2014-08-04 19:34:01 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2014-08-04 19:34:01 -0400 |
commit | 18f38132528c3e603c66ea464727b29e9bbcb91b (patch) | |
tree | 9600ee863f2b3fc4898ad218a1273cf77fe020d7 | |
parent | 31dab719fa50cf56d56d3dc25980fecd336f6ca8 (diff) |
sparc64: Do not insert non-valid PTEs into the TSB hash table.
The assumption was that update_mmu_cache() (and the equivalent for PMDs) would
only be called when the PTE being installed will be accessible by the user.
This is not true for code paths originating from remove_migration_pte().
There are dire consequences for placing a non-valid PTE into the TSB. The TLB
miss frramework assumes thatwhen a TSB entry matches we can just load it into
the TLB and return from the TLB miss trap.
So if a non-valid PTE is in there, we will deadlock taking the TLB miss over
and over, never satisfying the miss.
Just exit early from update_mmu_cache() and friends in this situation.
Based upon a report and patch from Christopher Alexander Tobias Schulze.
Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r-- | arch/sparc/mm/init_64.c | 8 |
1 files changed, 8 insertions, 0 deletions
diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c index 16b58ff11e65..db5ddde0b335 100644 --- a/arch/sparc/mm/init_64.c +++ b/arch/sparc/mm/init_64.c | |||
@@ -351,6 +351,10 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t * | |||
351 | 351 | ||
352 | mm = vma->vm_mm; | 352 | mm = vma->vm_mm; |
353 | 353 | ||
354 | /* Don't insert a non-valid PTE into the TSB, we'll deadlock. */ | ||
355 | if (!pte_accessible(mm, pte)) | ||
356 | return; | ||
357 | |||
354 | spin_lock_irqsave(&mm->context.lock, flags); | 358 | spin_lock_irqsave(&mm->context.lock, flags); |
355 | 359 | ||
356 | #if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE) | 360 | #if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE) |
@@ -2619,6 +2623,10 @@ void update_mmu_cache_pmd(struct vm_area_struct *vma, unsigned long addr, | |||
2619 | 2623 | ||
2620 | pte = pmd_val(entry); | 2624 | pte = pmd_val(entry); |
2621 | 2625 | ||
2626 | /* Don't insert a non-valid PMD into the TSB, we'll deadlock. */ | ||
2627 | if (!(pte & _PAGE_VALID)) | ||
2628 | return; | ||
2629 | |||
2622 | /* We are fabricating 8MB pages using 4MB real hw pages. */ | 2630 | /* We are fabricating 8MB pages using 4MB real hw pages. */ |
2623 | pte |= (addr & (1UL << REAL_HPAGE_SHIFT)); | 2631 | pte |= (addr & (1UL << REAL_HPAGE_SHIFT)); |
2624 | 2632 | ||