aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc
diff options
context:
space:
mode:
authorAneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>2014-08-13 03:01:59 -0400
committerBenjamin Herrenschmidt <benh@kernel.crashing.org>2014-08-13 04:20:39 -0400
commit629149fae478f0ac6bf705a535708b192e9c6b59 (patch)
tree2ffdc967eac8e67df163da7c53c15799697455e1 /arch/powerpc
parentfa1f8ae80f8bb996594167ff4750a0b0a5a5bb5d (diff)
powerpc/thp: Invalidate old 64K based hash page mapping before insert of 4k pte
If we changed base page size of the segment, either via sub_page_protect or via remap_4k_pfn, we do a demote_segment which doesn't flush the hash table entries. We do a lazy hash page table flush for all mapped pages in the demoted segment. This happens when we handle hash page fault for these pages. We use _PAGE_COMBO bit along with _PAGE_HASHPTE to indicate whether a pte is backed by 4K hash pte. If we find _PAGE_COMBO not set on the pte, that implies that we could possibly have older 64K hash pte entries in the hash page table and we need to invalidate those entries. Handle this correctly for 16M pages CC: <stable@vger.kernel.org> Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com> Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Diffstat (limited to 'arch/powerpc')
-rw-r--r--arch/powerpc/mm/hugepage-hash64.c79
1 files changed, 70 insertions, 9 deletions
diff --git a/arch/powerpc/mm/hugepage-hash64.c b/arch/powerpc/mm/hugepage-hash64.c
index 11f9a37ca2c6..1fb609dcc49b 100644
--- a/arch/powerpc/mm/hugepage-hash64.c
+++ b/arch/powerpc/mm/hugepage-hash64.c
@@ -18,6 +18,57 @@
18#include <linux/mm.h> 18#include <linux/mm.h>
19#include <asm/machdep.h> 19#include <asm/machdep.h>
20 20
21static void invalidate_old_hpte(unsigned long vsid, unsigned long addr,
22 pmd_t *pmdp, unsigned int psize, int ssize)
23{
24 int i, max_hpte_count, valid;
25 unsigned long s_addr;
26 unsigned char *hpte_slot_array;
27 unsigned long hidx, shift, vpn, hash, slot;
28
29 s_addr = addr & HPAGE_PMD_MASK;
30 hpte_slot_array = get_hpte_slot_array(pmdp);
31 /*
32 * IF we try to do a HUGE PTE update after a withdraw is done.
33 * we will find the below NULL. This happens when we do
34 * split_huge_page_pmd
35 */
36 if (!hpte_slot_array)
37 return;
38
39 if (ppc_md.hugepage_invalidate)
40 return ppc_md.hugepage_invalidate(vsid, s_addr, hpte_slot_array,
41 psize, ssize);
42 /*
43 * No bluk hpte removal support, invalidate each entry
44 */
45 shift = mmu_psize_defs[psize].shift;
46 max_hpte_count = HPAGE_PMD_SIZE >> shift;
47 for (i = 0; i < max_hpte_count; i++) {
48 /*
49 * 8 bits per each hpte entries
50 * 000| [ secondary group (one bit) | hidx (3 bits) | valid bit]
51 */
52 valid = hpte_valid(hpte_slot_array, i);
53 if (!valid)
54 continue;
55 hidx = hpte_hash_index(hpte_slot_array, i);
56
57 /* get the vpn */
58 addr = s_addr + (i * (1ul << shift));
59 vpn = hpt_vpn(addr, vsid, ssize);
60 hash = hpt_hash(vpn, shift, ssize);
61 if (hidx & _PTEIDX_SECONDARY)
62 hash = ~hash;
63
64 slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
65 slot += hidx & _PTEIDX_GROUP_IX;
66 ppc_md.hpte_invalidate(slot, vpn, psize,
67 MMU_PAGE_16M, ssize, 0);
68 }
69}
70
71
21int __hash_page_thp(unsigned long ea, unsigned long access, unsigned long vsid, 72int __hash_page_thp(unsigned long ea, unsigned long access, unsigned long vsid,
22 pmd_t *pmdp, unsigned long trap, int local, int ssize, 73 pmd_t *pmdp, unsigned long trap, int local, int ssize,
23 unsigned int psize) 74 unsigned int psize)
@@ -85,6 +136,15 @@ int __hash_page_thp(unsigned long ea, unsigned long access, unsigned long vsid,
85 vpn = hpt_vpn(ea, vsid, ssize); 136 vpn = hpt_vpn(ea, vsid, ssize);
86 hash = hpt_hash(vpn, shift, ssize); 137 hash = hpt_hash(vpn, shift, ssize);
87 hpte_slot_array = get_hpte_slot_array(pmdp); 138 hpte_slot_array = get_hpte_slot_array(pmdp);
139 if (psize == MMU_PAGE_4K) {
140 /*
141 * invalidate the old hpte entry if we have that mapped via 64K
142 * base page size. This is because demote_segment won't flush
143 * hash page table entries.
144 */
145 if ((old_pmd & _PAGE_HASHPTE) && !(old_pmd & _PAGE_COMBO))
146 invalidate_old_hpte(vsid, ea, pmdp, MMU_PAGE_64K, ssize);
147 }
88 148
89 valid = hpte_valid(hpte_slot_array, index); 149 valid = hpte_valid(hpte_slot_array, index);
90 if (valid) { 150 if (valid) {
@@ -107,11 +167,8 @@ int __hash_page_thp(unsigned long ea, unsigned long access, unsigned long vsid,
107 * safely update this here. 167 * safely update this here.
108 */ 168 */
109 valid = 0; 169 valid = 0;
110 new_pmd &= ~_PAGE_HPTEFLAGS;
111 hpte_slot_array[index] = 0; 170 hpte_slot_array[index] = 0;
112 } else 171 }
113 /* clear the busy bits and set the hash pte bits */
114 new_pmd = (new_pmd & ~_PAGE_HPTEFLAGS) | _PAGE_HASHPTE;
115 } 172 }
116 173
117 if (!valid) { 174 if (!valid) {
@@ -119,11 +176,7 @@ int __hash_page_thp(unsigned long ea, unsigned long access, unsigned long vsid,
119 176
120 /* insert new entry */ 177 /* insert new entry */
121 pa = pmd_pfn(__pmd(old_pmd)) << PAGE_SHIFT; 178 pa = pmd_pfn(__pmd(old_pmd)) << PAGE_SHIFT;
122repeat: 179 new_pmd |= _PAGE_HASHPTE;
123 hpte_group = ((hash & htab_hash_mask) * HPTES_PER_GROUP) & ~0x7UL;
124
125 /* clear the busy bits and set the hash pte bits */
126 new_pmd = (new_pmd & ~_PAGE_HPTEFLAGS) | _PAGE_HASHPTE;
127 180
128 /* Add in WIMG bits */ 181 /* Add in WIMG bits */
129 rflags |= (new_pmd & (_PAGE_WRITETHRU | _PAGE_NO_CACHE | 182 rflags |= (new_pmd & (_PAGE_WRITETHRU | _PAGE_NO_CACHE |
@@ -132,6 +185,8 @@ repeat:
132 * enable the memory coherence always 185 * enable the memory coherence always
133 */ 186 */
134 rflags |= HPTE_R_M; 187 rflags |= HPTE_R_M;
188repeat:
189 hpte_group = ((hash & htab_hash_mask) * HPTES_PER_GROUP) & ~0x7UL;
135 190
136 /* Insert into the hash table, primary slot */ 191 /* Insert into the hash table, primary slot */
137 slot = ppc_md.hpte_insert(hpte_group, vpn, pa, rflags, 0, 192 slot = ppc_md.hpte_insert(hpte_group, vpn, pa, rflags, 0,
@@ -172,6 +227,12 @@ repeat:
172 mark_hpte_slot_valid(hpte_slot_array, index, slot); 227 mark_hpte_slot_valid(hpte_slot_array, index, slot);
173 } 228 }
174 /* 229 /*
230 * Mark the pte with _PAGE_COMBO, if we are trying to hash it with
231 * base page size 4k.
232 */
233 if (psize == MMU_PAGE_4K)
234 new_pmd |= _PAGE_COMBO;
235 /*
175 * The hpte valid is stored in the pgtable whose address is in the 236 * The hpte valid is stored in the pgtable whose address is in the
176 * second half of the PMD. Order this against clearing of the busy bit in 237 * second half of the PMD. Order this against clearing of the busy bit in
177 * huge pmd. 238 * huge pmd.