diff options
Diffstat (limited to 'arch/powerpc/mm/hash_utils_64.c')
-rw-r--r-- | arch/powerpc/mm/hash_utils_64.c | 114 |
1 files changed, 98 insertions, 16 deletions
diff --git a/arch/powerpc/mm/hash_utils_64.c b/arch/powerpc/mm/hash_utils_64.c index d5339a3b9945..e56a307bc676 100644 --- a/arch/powerpc/mm/hash_utils_64.c +++ b/arch/powerpc/mm/hash_utils_64.c | |||
@@ -989,7 +989,9 @@ static void check_paca_psize(unsigned long ea, struct mm_struct *mm, | |||
989 | * -1 - critical hash insertion error | 989 | * -1 - critical hash insertion error |
990 | * -2 - access not permitted by subpage protection mechanism | 990 | * -2 - access not permitted by subpage protection mechanism |
991 | */ | 991 | */ |
992 | int hash_page_mm(struct mm_struct *mm, unsigned long ea, unsigned long access, unsigned long trap) | 992 | int hash_page_mm(struct mm_struct *mm, unsigned long ea, |
993 | unsigned long access, unsigned long trap, | ||
994 | unsigned long flags) | ||
993 | { | 995 | { |
994 | enum ctx_state prev_state = exception_enter(); | 996 | enum ctx_state prev_state = exception_enter(); |
995 | pgd_t *pgdir; | 997 | pgd_t *pgdir; |
@@ -997,7 +999,7 @@ int hash_page_mm(struct mm_struct *mm, unsigned long ea, unsigned long access, u | |||
997 | pte_t *ptep; | 999 | pte_t *ptep; |
998 | unsigned hugeshift; | 1000 | unsigned hugeshift; |
999 | const struct cpumask *tmp; | 1001 | const struct cpumask *tmp; |
1000 | int rc, user_region = 0, local = 0; | 1002 | int rc, user_region = 0; |
1001 | int psize, ssize; | 1003 | int psize, ssize; |
1002 | 1004 | ||
1003 | DBG_LOW("hash_page(ea=%016lx, access=%lx, trap=%lx\n", | 1005 | DBG_LOW("hash_page(ea=%016lx, access=%lx, trap=%lx\n", |
@@ -1049,7 +1051,7 @@ int hash_page_mm(struct mm_struct *mm, unsigned long ea, unsigned long access, u | |||
1049 | /* Check CPU locality */ | 1051 | /* Check CPU locality */ |
1050 | tmp = cpumask_of(smp_processor_id()); | 1052 | tmp = cpumask_of(smp_processor_id()); |
1051 | if (user_region && cpumask_equal(mm_cpumask(mm), tmp)) | 1053 | if (user_region && cpumask_equal(mm_cpumask(mm), tmp)) |
1052 | local = 1; | 1054 | flags |= HPTE_LOCAL_UPDATE; |
1053 | 1055 | ||
1054 | #ifndef CONFIG_PPC_64K_PAGES | 1056 | #ifndef CONFIG_PPC_64K_PAGES |
1055 | /* If we use 4K pages and our psize is not 4K, then we might | 1057 | /* If we use 4K pages and our psize is not 4K, then we might |
@@ -1086,11 +1088,11 @@ int hash_page_mm(struct mm_struct *mm, unsigned long ea, unsigned long access, u | |||
1086 | if (hugeshift) { | 1088 | if (hugeshift) { |
1087 | if (pmd_trans_huge(*(pmd_t *)ptep)) | 1089 | if (pmd_trans_huge(*(pmd_t *)ptep)) |
1088 | rc = __hash_page_thp(ea, access, vsid, (pmd_t *)ptep, | 1090 | rc = __hash_page_thp(ea, access, vsid, (pmd_t *)ptep, |
1089 | trap, local, ssize, psize); | 1091 | trap, flags, ssize, psize); |
1090 | #ifdef CONFIG_HUGETLB_PAGE | 1092 | #ifdef CONFIG_HUGETLB_PAGE |
1091 | else | 1093 | else |
1092 | rc = __hash_page_huge(ea, access, vsid, ptep, trap, | 1094 | rc = __hash_page_huge(ea, access, vsid, ptep, trap, |
1093 | local, ssize, hugeshift, psize); | 1095 | flags, ssize, hugeshift, psize); |
1094 | #else | 1096 | #else |
1095 | else { | 1097 | else { |
1096 | /* | 1098 | /* |
@@ -1149,7 +1151,8 @@ int hash_page_mm(struct mm_struct *mm, unsigned long ea, unsigned long access, u | |||
1149 | 1151 | ||
1150 | #ifdef CONFIG_PPC_HAS_HASH_64K | 1152 | #ifdef CONFIG_PPC_HAS_HASH_64K |
1151 | if (psize == MMU_PAGE_64K) | 1153 | if (psize == MMU_PAGE_64K) |
1152 | rc = __hash_page_64K(ea, access, vsid, ptep, trap, local, ssize); | 1154 | rc = __hash_page_64K(ea, access, vsid, ptep, trap, |
1155 | flags, ssize); | ||
1153 | else | 1156 | else |
1154 | #endif /* CONFIG_PPC_HAS_HASH_64K */ | 1157 | #endif /* CONFIG_PPC_HAS_HASH_64K */ |
1155 | { | 1158 | { |
@@ -1158,7 +1161,7 @@ int hash_page_mm(struct mm_struct *mm, unsigned long ea, unsigned long access, u | |||
1158 | rc = -2; | 1161 | rc = -2; |
1159 | else | 1162 | else |
1160 | rc = __hash_page_4K(ea, access, vsid, ptep, trap, | 1163 | rc = __hash_page_4K(ea, access, vsid, ptep, trap, |
1161 | local, ssize, spp); | 1164 | flags, ssize, spp); |
1162 | } | 1165 | } |
1163 | 1166 | ||
1164 | /* Dump some info in case of hash insertion failure, they should | 1167 | /* Dump some info in case of hash insertion failure, they should |
@@ -1181,14 +1184,19 @@ bail: | |||
1181 | } | 1184 | } |
1182 | EXPORT_SYMBOL_GPL(hash_page_mm); | 1185 | EXPORT_SYMBOL_GPL(hash_page_mm); |
1183 | 1186 | ||
1184 | int hash_page(unsigned long ea, unsigned long access, unsigned long trap) | 1187 | int hash_page(unsigned long ea, unsigned long access, unsigned long trap, |
1188 | unsigned long dsisr) | ||
1185 | { | 1189 | { |
1190 | unsigned long flags = 0; | ||
1186 | struct mm_struct *mm = current->mm; | 1191 | struct mm_struct *mm = current->mm; |
1187 | 1192 | ||
1188 | if (REGION_ID(ea) == VMALLOC_REGION_ID) | 1193 | if (REGION_ID(ea) == VMALLOC_REGION_ID) |
1189 | mm = &init_mm; | 1194 | mm = &init_mm; |
1190 | 1195 | ||
1191 | return hash_page_mm(mm, ea, access, trap); | 1196 | if (dsisr & DSISR_NOHPTE) |
1197 | flags |= HPTE_NOHPTE_UPDATE; | ||
1198 | |||
1199 | return hash_page_mm(mm, ea, access, trap, flags); | ||
1192 | } | 1200 | } |
1193 | EXPORT_SYMBOL_GPL(hash_page); | 1201 | EXPORT_SYMBOL_GPL(hash_page); |
1194 | 1202 | ||
@@ -1200,7 +1208,7 @@ void hash_preload(struct mm_struct *mm, unsigned long ea, | |||
1200 | pgd_t *pgdir; | 1208 | pgd_t *pgdir; |
1201 | pte_t *ptep; | 1209 | pte_t *ptep; |
1202 | unsigned long flags; | 1210 | unsigned long flags; |
1203 | int rc, ssize, local = 0; | 1211 | int rc, ssize, update_flags = 0; |
1204 | 1212 | ||
1205 | BUG_ON(REGION_ID(ea) != USER_REGION_ID); | 1213 | BUG_ON(REGION_ID(ea) != USER_REGION_ID); |
1206 | 1214 | ||
@@ -1251,16 +1259,17 @@ void hash_preload(struct mm_struct *mm, unsigned long ea, | |||
1251 | 1259 | ||
1252 | /* Is that local to this CPU ? */ | 1260 | /* Is that local to this CPU ? */ |
1253 | if (cpumask_equal(mm_cpumask(mm), cpumask_of(smp_processor_id()))) | 1261 | if (cpumask_equal(mm_cpumask(mm), cpumask_of(smp_processor_id()))) |
1254 | local = 1; | 1262 | update_flags |= HPTE_LOCAL_UPDATE; |
1255 | 1263 | ||
1256 | /* Hash it in */ | 1264 | /* Hash it in */ |
1257 | #ifdef CONFIG_PPC_HAS_HASH_64K | 1265 | #ifdef CONFIG_PPC_HAS_HASH_64K |
1258 | if (mm->context.user_psize == MMU_PAGE_64K) | 1266 | if (mm->context.user_psize == MMU_PAGE_64K) |
1259 | rc = __hash_page_64K(ea, access, vsid, ptep, trap, local, ssize); | 1267 | rc = __hash_page_64K(ea, access, vsid, ptep, trap, |
1268 | update_flags, ssize); | ||
1260 | else | 1269 | else |
1261 | #endif /* CONFIG_PPC_HAS_HASH_64K */ | 1270 | #endif /* CONFIG_PPC_HAS_HASH_64K */ |
1262 | rc = __hash_page_4K(ea, access, vsid, ptep, trap, local, ssize, | 1271 | rc = __hash_page_4K(ea, access, vsid, ptep, trap, update_flags, |
1263 | subpage_protection(mm, ea)); | 1272 | ssize, subpage_protection(mm, ea)); |
1264 | 1273 | ||
1265 | /* Dump some info in case of hash insertion failure, they should | 1274 | /* Dump some info in case of hash insertion failure, they should |
1266 | * never happen so it is really useful to know if/when they do | 1275 | * never happen so it is really useful to know if/when they do |
@@ -1278,9 +1287,10 @@ out_exit: | |||
1278 | * do not forget to update the assembly call site ! | 1287 | * do not forget to update the assembly call site ! |
1279 | */ | 1288 | */ |
1280 | void flush_hash_page(unsigned long vpn, real_pte_t pte, int psize, int ssize, | 1289 | void flush_hash_page(unsigned long vpn, real_pte_t pte, int psize, int ssize, |
1281 | int local) | 1290 | unsigned long flags) |
1282 | { | 1291 | { |
1283 | unsigned long hash, index, shift, hidx, slot; | 1292 | unsigned long hash, index, shift, hidx, slot; |
1293 | int local = flags & HPTE_LOCAL_UPDATE; | ||
1284 | 1294 | ||
1285 | DBG_LOW("flush_hash_page(vpn=%016lx)\n", vpn); | 1295 | DBG_LOW("flush_hash_page(vpn=%016lx)\n", vpn); |
1286 | pte_iterate_hashed_subpages(pte, psize, vpn, index, shift) { | 1296 | pte_iterate_hashed_subpages(pte, psize, vpn, index, shift) { |
@@ -1315,6 +1325,78 @@ void flush_hash_page(unsigned long vpn, real_pte_t pte, int psize, int ssize, | |||
1315 | #endif | 1325 | #endif |
1316 | } | 1326 | } |
1317 | 1327 | ||
1328 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE | ||
1329 | void flush_hash_hugepage(unsigned long vsid, unsigned long addr, | ||
1330 | pmd_t *pmdp, unsigned int psize, int ssize, | ||
1331 | unsigned long flags) | ||
1332 | { | ||
1333 | int i, max_hpte_count, valid; | ||
1334 | unsigned long s_addr; | ||
1335 | unsigned char *hpte_slot_array; | ||
1336 | unsigned long hidx, shift, vpn, hash, slot; | ||
1337 | int local = flags & HPTE_LOCAL_UPDATE; | ||
1338 | |||
1339 | s_addr = addr & HPAGE_PMD_MASK; | ||
1340 | hpte_slot_array = get_hpte_slot_array(pmdp); | ||
1341 | /* | ||
1342 | * IF we try to do a HUGE PTE update after a withdraw is done. | ||
1343 | * we will find the below NULL. This happens when we do | ||
1344 | * split_huge_page_pmd | ||
1345 | */ | ||
1346 | if (!hpte_slot_array) | ||
1347 | return; | ||
1348 | |||
1349 | if (ppc_md.hugepage_invalidate) { | ||
1350 | ppc_md.hugepage_invalidate(vsid, s_addr, hpte_slot_array, | ||
1351 | psize, ssize, local); | ||
1352 | goto tm_abort; | ||
1353 | } | ||
1354 | /* | ||
1355 | * No bluk hpte removal support, invalidate each entry | ||
1356 | */ | ||
1357 | shift = mmu_psize_defs[psize].shift; | ||
1358 | max_hpte_count = HPAGE_PMD_SIZE >> shift; | ||
1359 | for (i = 0; i < max_hpte_count; i++) { | ||
1360 | /* | ||
1361 | * 8 bits per each hpte entries | ||
1362 | * 000| [ secondary group (one bit) | hidx (3 bits) | valid bit] | ||
1363 | */ | ||
1364 | valid = hpte_valid(hpte_slot_array, i); | ||
1365 | if (!valid) | ||
1366 | continue; | ||
1367 | hidx = hpte_hash_index(hpte_slot_array, i); | ||
1368 | |||
1369 | /* get the vpn */ | ||
1370 | addr = s_addr + (i * (1ul << shift)); | ||
1371 | vpn = hpt_vpn(addr, vsid, ssize); | ||
1372 | hash = hpt_hash(vpn, shift, ssize); | ||
1373 | if (hidx & _PTEIDX_SECONDARY) | ||
1374 | hash = ~hash; | ||
1375 | |||
1376 | slot = (hash & htab_hash_mask) * HPTES_PER_GROUP; | ||
1377 | slot += hidx & _PTEIDX_GROUP_IX; | ||
1378 | ppc_md.hpte_invalidate(slot, vpn, psize, | ||
1379 | MMU_PAGE_16M, ssize, local); | ||
1380 | } | ||
1381 | tm_abort: | ||
1382 | #ifdef CONFIG_PPC_TRANSACTIONAL_MEM | ||
1383 | /* Transactions are not aborted by tlbiel, only tlbie. | ||
1384 | * Without, syncing a page back to a block device w/ PIO could pick up | ||
1385 | * transactional data (bad!) so we force an abort here. Before the | ||
1386 | * sync the page will be made read-only, which will flush_hash_page. | ||
1387 | * BIG ISSUE here: if the kernel uses a page from userspace without | ||
1388 | * unmapping it first, it may see the speculated version. | ||
1389 | */ | ||
1390 | if (local && cpu_has_feature(CPU_FTR_TM) && | ||
1391 | current->thread.regs && | ||
1392 | MSR_TM_ACTIVE(current->thread.regs->msr)) { | ||
1393 | tm_enable(); | ||
1394 | tm_abort(TM_CAUSE_TLBI); | ||
1395 | } | ||
1396 | #endif | ||
1397 | } | ||
1398 | #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ | ||
1399 | |||
1318 | void flush_hash_range(unsigned long number, int local) | 1400 | void flush_hash_range(unsigned long number, int local) |
1319 | { | 1401 | { |
1320 | if (ppc_md.flush_hash_range) | 1402 | if (ppc_md.flush_hash_range) |
@@ -1322,7 +1404,7 @@ void flush_hash_range(unsigned long number, int local) | |||
1322 | else { | 1404 | else { |
1323 | int i; | 1405 | int i; |
1324 | struct ppc64_tlb_batch *batch = | 1406 | struct ppc64_tlb_batch *batch = |
1325 | &__get_cpu_var(ppc64_tlb_batch); | 1407 | this_cpu_ptr(&ppc64_tlb_batch); |
1326 | 1408 | ||
1327 | for (i = 0; i < number; i++) | 1409 | for (i = 0; i < number; i++) |
1328 | flush_hash_page(batch->vpn[i], batch->pte[i], | 1410 | flush_hash_page(batch->vpn[i], batch->pte[i], |