aboutsummaryrefslogtreecommitdiffstats
path: root/arch/parisc/include/asm
diff options
context:
space:
mode:
authorJohn David Anglin <dave.anglin@bell.net>2013-04-23 16:42:07 -0400
committerHelge Deller <deller@gmx.de>2013-04-25 16:37:00 -0400
commitbda079d336cd8183e1d844a265ea87ae3e1bbe78 (patch)
tree0e2a80cf2dacd44299e55baa485dd7dd4a2ff3d9 /arch/parisc/include/asm
parentcf71130d630d773ef1861adbd8a034d3ac806f3e (diff)
parisc: use spin_lock_irqsave/spin_unlock_irqrestore for PTE updates
User applications running on SMP kernels have long suffered from instability and random segmentation faults. This patch improves the situation although there is more work to be done. One of the problems is the various routines in pgtable.h that update page table entries use different locking mechanisms, or no lock at all (set_pte_at). This change modifies the routines to all use the same lock pa_dbit_lock. This lock is used for dirty bit updates in the interruption code. The patch also purges the TLB entries associated with the PTE to ensure that inconsistent values are not used after the page table entry is updated. The UP and SMP code are now identical. The change also includes a minor update to the purge_tlb_entries function in cache.c to improve its efficiency. Signed-off-by: John David Anglin <dave.anglin@bell.net> Cc: Helge Deller <deller@gmx.de> Signed-off-by: Helge Deller <deller@gmx.de>
Diffstat (limited to 'arch/parisc/include/asm')
-rw-r--r--arch/parisc/include/asm/pgtable.h47
1 files changed, 25 insertions, 22 deletions
diff --git a/arch/parisc/include/asm/pgtable.h b/arch/parisc/include/asm/pgtable.h
index 7df49fad29f9..1e40d7f86be3 100644
--- a/arch/parisc/include/asm/pgtable.h
+++ b/arch/parisc/include/asm/pgtable.h
@@ -16,6 +16,8 @@
16#include <asm/processor.h> 16#include <asm/processor.h>
17#include <asm/cache.h> 17#include <asm/cache.h>
18 18
19extern spinlock_t pa_dbit_lock;
20
19/* 21/*
20 * kern_addr_valid(ADDR) tests if ADDR is pointing to valid kernel 22 * kern_addr_valid(ADDR) tests if ADDR is pointing to valid kernel
21 * memory. For the return value to be meaningful, ADDR must be >= 23 * memory. For the return value to be meaningful, ADDR must be >=
@@ -44,8 +46,11 @@ extern void purge_tlb_entries(struct mm_struct *, unsigned long);
44 46
45#define set_pte_at(mm, addr, ptep, pteval) \ 47#define set_pte_at(mm, addr, ptep, pteval) \
46 do { \ 48 do { \
49 unsigned long flags; \
50 spin_lock_irqsave(&pa_dbit_lock, flags); \
47 set_pte(ptep, pteval); \ 51 set_pte(ptep, pteval); \
48 purge_tlb_entries(mm, addr); \ 52 purge_tlb_entries(mm, addr); \
53 spin_unlock_irqrestore(&pa_dbit_lock, flags); \
49 } while (0) 54 } while (0)
50 55
51#endif /* !__ASSEMBLY__ */ 56#endif /* !__ASSEMBLY__ */
@@ -435,48 +440,46 @@ extern void update_mmu_cache(struct vm_area_struct *, unsigned long, pte_t *);
435 440
436static inline int ptep_test_and_clear_young(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep) 441static inline int ptep_test_and_clear_young(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep)
437{ 442{
438#ifdef CONFIG_SMP 443 pte_t pte;
444 unsigned long flags;
445
439 if (!pte_young(*ptep)) 446 if (!pte_young(*ptep))
440 return 0; 447 return 0;
441 return test_and_clear_bit(xlate_pabit(_PAGE_ACCESSED_BIT), &pte_val(*ptep)); 448
442#else 449 spin_lock_irqsave(&pa_dbit_lock, flags);
443 pte_t pte = *ptep; 450 pte = *ptep;
444 if (!pte_young(pte)) 451 if (!pte_young(pte)) {
452 spin_unlock_irqrestore(&pa_dbit_lock, flags);
445 return 0; 453 return 0;
446 set_pte_at(vma->vm_mm, addr, ptep, pte_mkold(pte)); 454 }
455 set_pte(ptep, pte_mkold(pte));
456 purge_tlb_entries(vma->vm_mm, addr);
457 spin_unlock_irqrestore(&pa_dbit_lock, flags);
447 return 1; 458 return 1;
448#endif
449} 459}
450 460
451extern spinlock_t pa_dbit_lock;
452
453struct mm_struct; 461struct mm_struct;
454static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) 462static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
455{ 463{
456 pte_t old_pte; 464 pte_t old_pte;
465 unsigned long flags;
457 466
458 spin_lock(&pa_dbit_lock); 467 spin_lock_irqsave(&pa_dbit_lock, flags);
459 old_pte = *ptep; 468 old_pte = *ptep;
460 pte_clear(mm,addr,ptep); 469 pte_clear(mm,addr,ptep);
461 spin_unlock(&pa_dbit_lock); 470 purge_tlb_entries(mm, addr);
471 spin_unlock_irqrestore(&pa_dbit_lock, flags);
462 472
463 return old_pte; 473 return old_pte;
464} 474}
465 475
466static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep) 476static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
467{ 477{
468#ifdef CONFIG_SMP 478 unsigned long flags;
469 unsigned long new, old; 479 spin_lock_irqsave(&pa_dbit_lock, flags);
470 480 set_pte(ptep, pte_wrprotect(*ptep));
471 do {
472 old = pte_val(*ptep);
473 new = pte_val(pte_wrprotect(__pte (old)));
474 } while (cmpxchg((unsigned long *) ptep, old, new) != old);
475 purge_tlb_entries(mm, addr); 481 purge_tlb_entries(mm, addr);
476#else 482 spin_unlock_irqrestore(&pa_dbit_lock, flags);
477 pte_t old_pte = *ptep;
478 set_pte_at(mm, addr, ptep, pte_wrprotect(old_pte));
479#endif
480} 483}
481 484
482#define pte_same(A,B) (pte_val(A) == pte_val(B)) 485#define pte_same(A,B) (pte_val(A) == pte_val(B))