aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorPaul Mackerras <paulus@samba.org>2009-08-17 00:36:32 -0400
committerPaul Mackerras <paulus@samba.org>2009-08-18 00:48:39 -0400
commit1660e9d3d04b6c636b7171bf6c08ac7b82a7de79 (patch)
tree9a402e3cbe5613ecb6d8f1ee762dd760ae57a38a
parent64f1607ffbbc772685733ea63e6f7f4183df1b16 (diff)
powerpc/32: Always order writes to halves of 64-bit PTEs
On 32-bit systems with 64-bit PTEs, the PTEs have to be written in two 32-bit halves. On SMP we write the higher-order half and then the lower-order half, with a write barrier between the two halves, but on UP there was no particular ordering of the writes to the two halves. This extends the ordering that we already do on SMP to the UP case as well. The reason is that with the perf_counter subsystem potentially accessing user memory at interrupt time to get stack traces, we have to be careful not to create an incorrect but apparently valid PTE even on UP. Acked-by: Benjamin Herrenschmidt <benh@kernel.crashing.org> Signed-off-by: Paul Mackerras <paulus@samba.org>
-rw-r--r--arch/powerpc/include/asm/pgtable.h6
1 files changed, 3 insertions, 3 deletions
diff --git a/arch/powerpc/include/asm/pgtable.h b/arch/powerpc/include/asm/pgtable.h
index eb17da781128..2a5da069714e 100644
--- a/arch/powerpc/include/asm/pgtable.h
+++ b/arch/powerpc/include/asm/pgtable.h
@@ -104,8 +104,8 @@ static inline void __set_pte_at(struct mm_struct *mm, unsigned long addr,
104 else 104 else
105 pte_update(ptep, ~_PAGE_HASHPTE, pte_val(pte)); 105 pte_update(ptep, ~_PAGE_HASHPTE, pte_val(pte));
106 106
107#elif defined(CONFIG_PPC32) && defined(CONFIG_PTE_64BIT) && defined(CONFIG_SMP) 107#elif defined(CONFIG_PPC32) && defined(CONFIG_PTE_64BIT)
108 /* Second case is 32-bit with 64-bit PTE in SMP mode. In this case, we 108 /* Second case is 32-bit with 64-bit PTE. In this case, we
109 * can just store as long as we do the two halves in the right order 109 * can just store as long as we do the two halves in the right order
110 * with a barrier in between. This is possible because we take care, 110 * with a barrier in between. This is possible because we take care,
111 * in the hash code, to pre-invalidate if the PTE was already hashed, 111 * in the hash code, to pre-invalidate if the PTE was already hashed,
@@ -140,7 +140,7 @@ static inline void __set_pte_at(struct mm_struct *mm, unsigned long addr,
140 140
141#else 141#else
142 /* Anything else just stores the PTE normally. That covers all 64-bit 142 /* Anything else just stores the PTE normally. That covers all 64-bit
143 * cases, and 32-bit non-hash with 64-bit PTEs in UP mode 143 * cases, and 32-bit non-hash with 32-bit PTEs.
144 */ 144 */
145 *ptep = pte; 145 *ptep = pte;
146#endif 146#endif