aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJohn David Anglin <dave.anglin@bell.net>2016-12-06 21:47:04 -0500
committerHelge Deller <deller@gmx.de>2016-12-07 02:56:40 -0500
commitc78e710c1c9fbeff43dddc0aa3d0ff458e70b0cc (patch)
tree981210b6bafc21ba73f0ea3b2f5ac00f8b60a6b9
parentbc3913a5378cd0ddefd1dfec6917cc12eb23a946 (diff)
parisc: Purge TLB before setting PTE
The attached change interchanges the order of purging the TLB and setting the corresponding page table entry. TLB purges are strongly ordered. It occurred to me one night that setting the PTE first might have subtle ordering issues on SMP machines and cause random memory corruption. A TLB lock guards the insertion of user TLB entries. So after the TLB is purged, a new entry can't be inserted until the lock is released. This ensures that the new PTE value is used when the lock is released. Since making this change, no random segmentation faults have been observed on the Debian hppa buildd servers. Signed-off-by: John David Anglin <dave.anglin@bell.net> Cc: <stable@vger.kernel.org> # v3.16+ Signed-off-by: Helge Deller <deller@gmx.de>
-rw-r--r--arch/parisc/include/asm/pgtable.h8
1 files changed, 4 insertions, 4 deletions
diff --git a/arch/parisc/include/asm/pgtable.h b/arch/parisc/include/asm/pgtable.h
index c2c43f714684..3a4ed9f91d57 100644
--- a/arch/parisc/include/asm/pgtable.h
+++ b/arch/parisc/include/asm/pgtable.h
@@ -65,9 +65,9 @@ static inline void purge_tlb_entries(struct mm_struct *mm, unsigned long addr)
65 unsigned long flags; \ 65 unsigned long flags; \
66 spin_lock_irqsave(&pa_tlb_lock, flags); \ 66 spin_lock_irqsave(&pa_tlb_lock, flags); \
67 old_pte = *ptep; \ 67 old_pte = *ptep; \
68 set_pte(ptep, pteval); \
69 if (pte_inserted(old_pte)) \ 68 if (pte_inserted(old_pte)) \
70 purge_tlb_entries(mm, addr); \ 69 purge_tlb_entries(mm, addr); \
70 set_pte(ptep, pteval); \
71 spin_unlock_irqrestore(&pa_tlb_lock, flags); \ 71 spin_unlock_irqrestore(&pa_tlb_lock, flags); \
72 } while (0) 72 } while (0)
73 73
@@ -478,8 +478,8 @@ static inline int ptep_test_and_clear_young(struct vm_area_struct *vma, unsigned
478 spin_unlock_irqrestore(&pa_tlb_lock, flags); 478 spin_unlock_irqrestore(&pa_tlb_lock, flags);
479 return 0; 479 return 0;
480 } 480 }
481 set_pte(ptep, pte_mkold(pte));
482 purge_tlb_entries(vma->vm_mm, addr); 481 purge_tlb_entries(vma->vm_mm, addr);
482 set_pte(ptep, pte_mkold(pte));
483 spin_unlock_irqrestore(&pa_tlb_lock, flags); 483 spin_unlock_irqrestore(&pa_tlb_lock, flags);
484 return 1; 484 return 1;
485} 485}
@@ -492,9 +492,9 @@ static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
492 492
493 spin_lock_irqsave(&pa_tlb_lock, flags); 493 spin_lock_irqsave(&pa_tlb_lock, flags);
494 old_pte = *ptep; 494 old_pte = *ptep;
495 set_pte(ptep, __pte(0));
496 if (pte_inserted(old_pte)) 495 if (pte_inserted(old_pte))
497 purge_tlb_entries(mm, addr); 496 purge_tlb_entries(mm, addr);
497 set_pte(ptep, __pte(0));
498 spin_unlock_irqrestore(&pa_tlb_lock, flags); 498 spin_unlock_irqrestore(&pa_tlb_lock, flags);
499 499
500 return old_pte; 500 return old_pte;
@@ -504,8 +504,8 @@ static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr,
504{ 504{
505 unsigned long flags; 505 unsigned long flags;
506 spin_lock_irqsave(&pa_tlb_lock, flags); 506 spin_lock_irqsave(&pa_tlb_lock, flags);
507 set_pte(ptep, pte_wrprotect(*ptep));
508 purge_tlb_entries(mm, addr); 507 purge_tlb_entries(mm, addr);
508 set_pte(ptep, pte_wrprotect(*ptep));
509 spin_unlock_irqrestore(&pa_tlb_lock, flags); 509 spin_unlock_irqrestore(&pa_tlb_lock, flags);
510} 510}
511 511