diff options
Diffstat (limited to 'mm/huge_memory.c')
-rw-r--r-- | mm/huge_memory.c | 26 |
1 files changed, 13 insertions, 13 deletions
diff --git a/mm/huge_memory.c b/mm/huge_memory.c index 626e93db28ba..6817b0350c71 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c | |||
@@ -1260,6 +1260,7 @@ int do_huge_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct *vma, | |||
1260 | int target_nid, last_cpupid = -1; | 1260 | int target_nid, last_cpupid = -1; |
1261 | bool page_locked; | 1261 | bool page_locked; |
1262 | bool migrated = false; | 1262 | bool migrated = false; |
1263 | bool was_writable; | ||
1263 | int flags = 0; | 1264 | int flags = 0; |
1264 | 1265 | ||
1265 | /* A PROT_NONE fault should not end up here */ | 1266 | /* A PROT_NONE fault should not end up here */ |
@@ -1291,17 +1292,8 @@ int do_huge_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct *vma, | |||
1291 | flags |= TNF_FAULT_LOCAL; | 1292 | flags |= TNF_FAULT_LOCAL; |
1292 | } | 1293 | } |
1293 | 1294 | ||
1294 | /* | 1295 | /* See similar comment in do_numa_page for explanation */ |
1295 | * Avoid grouping on DSO/COW pages in specific and RO pages | 1296 | if (!(vma->vm_flags & VM_WRITE)) |
1296 | * in general, RO pages shouldn't hurt as much anyway since | ||
1297 | * they can be in shared cache state. | ||
1298 | * | ||
1299 | * FIXME! This checks "pmd_dirty()" as an approximation of | ||
1300 | * "is this a read-only page", since checking "pmd_write()" | ||
1301 | * is even more broken. We haven't actually turned this into | ||
1302 | * a writable page, so pmd_write() will always be false. | ||
1303 | */ | ||
1304 | if (!pmd_dirty(pmd)) | ||
1305 | flags |= TNF_NO_GROUP; | 1297 | flags |= TNF_NO_GROUP; |
1306 | 1298 | ||
1307 | /* | 1299 | /* |
@@ -1358,12 +1350,17 @@ int do_huge_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct *vma, | |||
1358 | if (migrated) { | 1350 | if (migrated) { |
1359 | flags |= TNF_MIGRATED; | 1351 | flags |= TNF_MIGRATED; |
1360 | page_nid = target_nid; | 1352 | page_nid = target_nid; |
1361 | } | 1353 | } else |
1354 | flags |= TNF_MIGRATE_FAIL; | ||
1362 | 1355 | ||
1363 | goto out; | 1356 | goto out; |
1364 | clear_pmdnuma: | 1357 | clear_pmdnuma: |
1365 | BUG_ON(!PageLocked(page)); | 1358 | BUG_ON(!PageLocked(page)); |
1359 | was_writable = pmd_write(pmd); | ||
1366 | pmd = pmd_modify(pmd, vma->vm_page_prot); | 1360 | pmd = pmd_modify(pmd, vma->vm_page_prot); |
1361 | pmd = pmd_mkyoung(pmd); | ||
1362 | if (was_writable) | ||
1363 | pmd = pmd_mkwrite(pmd); | ||
1367 | set_pmd_at(mm, haddr, pmdp, pmd); | 1364 | set_pmd_at(mm, haddr, pmdp, pmd); |
1368 | update_mmu_cache_pmd(vma, addr, pmdp); | 1365 | update_mmu_cache_pmd(vma, addr, pmdp); |
1369 | unlock_page(page); | 1366 | unlock_page(page); |
@@ -1487,6 +1484,7 @@ int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, | |||
1487 | 1484 | ||
1488 | if (__pmd_trans_huge_lock(pmd, vma, &ptl) == 1) { | 1485 | if (__pmd_trans_huge_lock(pmd, vma, &ptl) == 1) { |
1489 | pmd_t entry; | 1486 | pmd_t entry; |
1487 | bool preserve_write = prot_numa && pmd_write(*pmd); | ||
1490 | ret = 1; | 1488 | ret = 1; |
1491 | 1489 | ||
1492 | /* | 1490 | /* |
@@ -1502,9 +1500,11 @@ int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, | |||
1502 | if (!prot_numa || !pmd_protnone(*pmd)) { | 1500 | if (!prot_numa || !pmd_protnone(*pmd)) { |
1503 | entry = pmdp_get_and_clear_notify(mm, addr, pmd); | 1501 | entry = pmdp_get_and_clear_notify(mm, addr, pmd); |
1504 | entry = pmd_modify(entry, newprot); | 1502 | entry = pmd_modify(entry, newprot); |
1503 | if (preserve_write) | ||
1504 | entry = pmd_mkwrite(entry); | ||
1505 | ret = HPAGE_PMD_NR; | 1505 | ret = HPAGE_PMD_NR; |
1506 | set_pmd_at(mm, addr, pmd, entry); | 1506 | set_pmd_at(mm, addr, pmd, entry); |
1507 | BUG_ON(pmd_write(entry)); | 1507 | BUG_ON(!preserve_write && pmd_write(entry)); |
1508 | } | 1508 | } |
1509 | spin_unlock(ptl); | 1509 | spin_unlock(ptl); |
1510 | } | 1510 | } |