aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/include
diff options
context:
space:
mode:
authorAneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>2014-08-13 03:02:03 -0400
committerBenjamin Herrenschmidt <benh@kernel.crashing.org>2014-08-13 04:20:41 -0400
commit85c1fafd7262e68ad821ee1808686b1392b1167d (patch)
tree9b454eb7a172d0b583f7132527fd3d5098f2defa /arch/powerpc/include
parent7e467245bf5226db34c4b12d3cbacfa2f7a15a8b (diff)
powerpc/mm: Use read barrier when creating real_pte
On ppc64 we support 4K hash pte with 64K page size. That requires us to track the hash pte slot information on a per 4k basis. We do that by storing the slot details in the second half of pte page. The pte bit _PAGE_COMBO is used to indicate whether the second half need to be looked while building real_pte. We need to use read memory barrier while doing that so that load of hidx is not reordered w.r.t _PAGE_COMBO check. On the store side we already do a lwsync in __hash_page_4K CC: <stable@vger.kernel.org> Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com> Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Diffstat (limited to 'arch/powerpc/include')
-rw-r--r--arch/powerpc/include/asm/pte-hash64-64k.h30
1 files changed, 25 insertions, 5 deletions
diff --git a/arch/powerpc/include/asm/pte-hash64-64k.h b/arch/powerpc/include/asm/pte-hash64-64k.h
index b6d2d42f84b5..4f4ec2ab45c9 100644
--- a/arch/powerpc/include/asm/pte-hash64-64k.h
+++ b/arch/powerpc/include/asm/pte-hash64-64k.h
@@ -46,11 +46,31 @@
46 * in order to deal with 64K made of 4K HW pages. Thus we override the 46 * in order to deal with 64K made of 4K HW pages. Thus we override the
47 * generic accessors and iterators here 47 * generic accessors and iterators here
48 */ 48 */
49#define __real_pte(e,p) ((real_pte_t) { \ 49#define __real_pte __real_pte
50 (e), (pte_val(e) & _PAGE_COMBO) ? \ 50static inline real_pte_t __real_pte(pte_t pte, pte_t *ptep)
51 (pte_val(*((p) + PTRS_PER_PTE))) : 0 }) 51{
52#define __rpte_to_hidx(r,index) ((pte_val((r).pte) & _PAGE_COMBO) ? \ 52 real_pte_t rpte;
53 (((r).hidx >> ((index)<<2)) & 0xf) : ((pte_val((r).pte) >> 12) & 0xf)) 53
54 rpte.pte = pte;
55 rpte.hidx = 0;
56 if (pte_val(pte) & _PAGE_COMBO) {
57 /*
58 * Make sure we order the hidx load against the _PAGE_COMBO
59 * check. The store side ordering is done in __hash_page_4K
60 */
61 smp_rmb();
62 rpte.hidx = pte_val(*((ptep) + PTRS_PER_PTE));
63 }
64 return rpte;
65}
66
67static inline unsigned long __rpte_to_hidx(real_pte_t rpte, unsigned long index)
68{
69 if ((pte_val(rpte.pte) & _PAGE_COMBO))
70 return (rpte.hidx >> (index<<2)) & 0xf;
71 return (pte_val(rpte.pte) >> 12) & 0xf;
72}
73
54#define __rpte_to_pte(r) ((r).pte) 74#define __rpte_to_pte(r) ((r).pte)
55#define __rpte_sub_valid(rpte, index) \ 75#define __rpte_sub_valid(rpte, index) \
56 (pte_val(rpte.pte) & (_PAGE_HPTE_SUB0 >> (index))) 76 (pte_val(rpte.pte) & (_PAGE_HPTE_SUB0 >> (index)))