aboutsummaryrefslogtreecommitdiffstats
path: root/mm/memory.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/memory.c')
-rw-r--r--mm/memory.c22
1 files changed, 12 insertions, 10 deletions
diff --git a/mm/memory.c b/mm/memory.c
index 411144f977b1..97839f5c8c30 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -3035,6 +3035,7 @@ static int do_numa_page(struct mm_struct *mm, struct vm_area_struct *vma,
3035 int last_cpupid; 3035 int last_cpupid;
3036 int target_nid; 3036 int target_nid;
3037 bool migrated = false; 3037 bool migrated = false;
3038 bool was_writable = pte_write(pte);
3038 int flags = 0; 3039 int flags = 0;
3039 3040
3040 /* A PROT_NONE fault should not end up here */ 3041 /* A PROT_NONE fault should not end up here */
@@ -3059,6 +3060,8 @@ static int do_numa_page(struct mm_struct *mm, struct vm_area_struct *vma,
3059 /* Make it present again */ 3060 /* Make it present again */
3060 pte = pte_modify(pte, vma->vm_page_prot); 3061 pte = pte_modify(pte, vma->vm_page_prot);
3061 pte = pte_mkyoung(pte); 3062 pte = pte_mkyoung(pte);
3063 if (was_writable)
3064 pte = pte_mkwrite(pte);
3062 set_pte_at(mm, addr, ptep, pte); 3065 set_pte_at(mm, addr, ptep, pte);
3063 update_mmu_cache(vma, addr, ptep); 3066 update_mmu_cache(vma, addr, ptep);
3064 3067
@@ -3069,16 +3072,14 @@ static int do_numa_page(struct mm_struct *mm, struct vm_area_struct *vma,
3069 } 3072 }
3070 3073
3071 /* 3074 /*
3072 * Avoid grouping on DSO/COW pages in specific and RO pages 3075 * Avoid grouping on RO pages in general. RO pages shouldn't hurt as
3073 * in general, RO pages shouldn't hurt as much anyway since 3076 * much anyway since they can be in shared cache state. This misses
3074 * they can be in shared cache state. 3077 * the case where a mapping is writable but the process never writes
3075 * 3078 * to it but pte_write gets cleared during protection updates and
3076 * FIXME! This checks "pmd_dirty()" as an approximation of 3079 * pte_dirty has unpredictable behaviour between PTE scan updates,
3077 * "is this a read-only page", since checking "pmd_write()" 3080 * background writeback, dirty balancing and application behaviour.
3078 * is even more broken. We haven't actually turned this into
3079 * a writable page, so pmd_write() will always be false.
3080 */ 3081 */
3081 if (!pte_dirty(pte)) 3082 if (!(vma->vm_flags & VM_WRITE))
3082 flags |= TNF_NO_GROUP; 3083 flags |= TNF_NO_GROUP;
3083 3084
3084 /* 3085 /*
@@ -3102,7 +3103,8 @@ static int do_numa_page(struct mm_struct *mm, struct vm_area_struct *vma,
3102 if (migrated) { 3103 if (migrated) {
3103 page_nid = target_nid; 3104 page_nid = target_nid;
3104 flags |= TNF_MIGRATED; 3105 flags |= TNF_MIGRATED;
3105 } 3106 } else
3107 flags |= TNF_MIGRATE_FAIL;
3106 3108
3107out: 3109out:
3108 if (page_nid != -1) 3110 if (page_nid != -1)