aboutsummaryrefslogtreecommitdiffstats
path: root/include/asm-ppc/pgtable.h
diff options
context:
space:
mode:
authorKumar Gala <galak@freescale.com>2005-04-16 18:24:20 -0400
committerLinus Torvalds <torvalds@ppc970.osdl.org>2005-04-16 18:24:20 -0400
commit7a1e335085ead05da08f791340f58b493126894d (patch)
tree5b1a763061d68caef26ec85d55404d868bab7a74 /include/asm-ppc/pgtable.h
parent0c541b4406a68e74d94ddb667c69d9e03bce8681 (diff)
[PATCH] ppc32: Fix pte_update for 64-bit PTEs
While the existing pte_update code handled atomically modifying a 64-bit PTE, it did not return all 64-bits of the PTE before it was modified. This causes problems in some places that expect the full PTE to be returned, like ptep_get_and_clear(). Created a new pte_update function that is conditional on CONFIG_PTE_64BIT. It atomically reads the low PTE word which all PTE flags are required to be in and returns a premodified full 64-bit PTE. Since we now have an explicit 64-bit PTE version of pte_update we can also remove the hack that existed to get the low PTE word regardless of size. Signed-off-by: Kumar Gala <kumar.gala@freescale.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'include/asm-ppc/pgtable.h')
-rw-r--r--include/asm-ppc/pgtable.h29
1 files changed, 25 insertions, 4 deletions
diff --git a/include/asm-ppc/pgtable.h b/include/asm-ppc/pgtable.h
index 19dfb7abaa21..2e88cd9feffe 100644
--- a/include/asm-ppc/pgtable.h
+++ b/include/asm-ppc/pgtable.h
@@ -526,10 +526,10 @@ extern void add_hash_page(unsigned context, unsigned long va,
526 * Atomic PTE updates. 526 * Atomic PTE updates.
527 * 527 *
528 * pte_update clears and sets bit atomically, and returns 528 * pte_update clears and sets bit atomically, and returns
529 * the old pte value. 529 * the old pte value. In the 64-bit PTE case we lock around the
530 * The ((unsigned long)(p+1) - 4) hack is to get to the least-significant 530 * low PTE word since we expect ALL flag bits to be there
531 * 32 bits of the PTE regardless of whether PTEs are 32 or 64 bits.
532 */ 531 */
532#ifndef CONFIG_PTE_64BIT
533static inline unsigned long pte_update(pte_t *p, unsigned long clr, 533static inline unsigned long pte_update(pte_t *p, unsigned long clr,
534 unsigned long set) 534 unsigned long set)
535{ 535{
@@ -543,10 +543,31 @@ static inline unsigned long pte_update(pte_t *p, unsigned long clr,
543" stwcx. %1,0,%3\n\ 543" stwcx. %1,0,%3\n\
544 bne- 1b" 544 bne- 1b"
545 : "=&r" (old), "=&r" (tmp), "=m" (*p) 545 : "=&r" (old), "=&r" (tmp), "=m" (*p)
546 : "r" ((unsigned long)(p+1) - 4), "r" (clr), "r" (set), "m" (*p) 546 : "r" (p), "r" (clr), "r" (set), "m" (*p)
547 : "cc" ); 547 : "cc" );
548 return old; 548 return old;
549} 549}
550#else
551static inline unsigned long long pte_update(pte_t *p, unsigned long clr,
552 unsigned long set)
553{
554 unsigned long long old;
555 unsigned long tmp;
556
557 __asm__ __volatile__("\
5581: lwarx %L0,0,%4\n\
559 lwzx %0,0,%3\n\
560 andc %1,%L0,%5\n\
561 or %1,%1,%6\n"
562 PPC405_ERR77(0,%3)
563" stwcx. %1,0,%4\n\
564 bne- 1b"
565 : "=&r" (old), "=&r" (tmp), "=m" (*p)
566 : "r" (p), "r" ((unsigned long)(p) + 4), "r" (clr), "r" (set), "m" (*p)
567 : "cc" );
568 return old;
569}
570#endif
550 571
551/* 572/*
552 * set_pte stores a linux PTE into the linux page table. 573 * set_pte stores a linux PTE into the linux page table.