aboutsummaryrefslogtreecommitdiffstats
path: root/mm/memory.c
diff options
context:
space:
mode:
authorKirill A. Shutemov <kirill.shutemov@linux.intel.com>2016-06-24 17:49:45 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2016-06-24 20:23:52 -0400
commit315d09bf30c2b436a1fdac86d31c24380cd56c4f (patch)
treefa85cc65d0850806f589d6fc525c6036afb308b4 /mm/memory.c
parent1f08fe266560fc2d1383fd9c8c08fdd432ea302b (diff)
Revert "mm: make faultaround produce old ptes"
This reverts commit 5c0a85fad949212b3e059692deecdeed74ae7ec7. The commit causes ~6% regression in unixbench. Let's revert it for now and consider other solution for reclaim problem later. Link: http://lkml.kernel.org/r/1465893750-44080-2-git-send-email-kirill.shutemov@linux.intel.com Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com> Reported-by: "Huang, Ying" <ying.huang@intel.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Rik van Riel <riel@redhat.com> Cc: Mel Gorman <mgorman@suse.de> Cc: Michal Hocko <mhocko@suse.com> Cc: Minchan Kim <minchan@kernel.org> Cc: Vinayak Menon <vinmenon@codeaurora.org> Cc: Dave Hansen <dave.hansen@linux.intel.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/memory.c')
-rw-r--r--mm/memory.c23
1 files changed, 5 insertions, 18 deletions
diff --git a/mm/memory.c b/mm/memory.c
index 15322b73636b..61fe7e7b56bf 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -2877,7 +2877,7 @@ static int __do_fault(struct vm_area_struct *vma, unsigned long address,
2877 * vm_ops->map_pages. 2877 * vm_ops->map_pages.
2878 */ 2878 */
2879void do_set_pte(struct vm_area_struct *vma, unsigned long address, 2879void do_set_pte(struct vm_area_struct *vma, unsigned long address,
2880 struct page *page, pte_t *pte, bool write, bool anon, bool old) 2880 struct page *page, pte_t *pte, bool write, bool anon)
2881{ 2881{
2882 pte_t entry; 2882 pte_t entry;
2883 2883
@@ -2885,8 +2885,6 @@ void do_set_pte(struct vm_area_struct *vma, unsigned long address,
2885 entry = mk_pte(page, vma->vm_page_prot); 2885 entry = mk_pte(page, vma->vm_page_prot);
2886 if (write) 2886 if (write)
2887 entry = maybe_mkwrite(pte_mkdirty(entry), vma); 2887 entry = maybe_mkwrite(pte_mkdirty(entry), vma);
2888 if (old)
2889 entry = pte_mkold(entry);
2890 if (anon) { 2888 if (anon) {
2891 inc_mm_counter_fast(vma->vm_mm, MM_ANONPAGES); 2889 inc_mm_counter_fast(vma->vm_mm, MM_ANONPAGES);
2892 page_add_new_anon_rmap(page, vma, address, false); 2890 page_add_new_anon_rmap(page, vma, address, false);
@@ -3032,20 +3030,9 @@ static int do_read_fault(struct mm_struct *mm, struct vm_area_struct *vma,
3032 */ 3030 */
3033 if (vma->vm_ops->map_pages && fault_around_bytes >> PAGE_SHIFT > 1) { 3031 if (vma->vm_ops->map_pages && fault_around_bytes >> PAGE_SHIFT > 1) {
3034 pte = pte_offset_map_lock(mm, pmd, address, &ptl); 3032 pte = pte_offset_map_lock(mm, pmd, address, &ptl);
3035 if (!pte_same(*pte, orig_pte))
3036 goto unlock_out;
3037 do_fault_around(vma, address, pte, pgoff, flags); 3033 do_fault_around(vma, address, pte, pgoff, flags);
3038 /* Check if the fault is handled by faultaround */ 3034 if (!pte_same(*pte, orig_pte))
3039 if (!pte_same(*pte, orig_pte)) {
3040 /*
3041 * Faultaround produce old pte, but the pte we've
3042 * handler fault for should be young.
3043 */
3044 pte_t entry = pte_mkyoung(*pte);
3045 if (ptep_set_access_flags(vma, address, pte, entry, 0))
3046 update_mmu_cache(vma, address, pte);
3047 goto unlock_out; 3035 goto unlock_out;
3048 }
3049 pte_unmap_unlock(pte, ptl); 3036 pte_unmap_unlock(pte, ptl);
3050 } 3037 }
3051 3038
@@ -3060,7 +3047,7 @@ static int do_read_fault(struct mm_struct *mm, struct vm_area_struct *vma,
3060 put_page(fault_page); 3047 put_page(fault_page);
3061 return ret; 3048 return ret;
3062 } 3049 }
3063 do_set_pte(vma, address, fault_page, pte, false, false, false); 3050 do_set_pte(vma, address, fault_page, pte, false, false);
3064 unlock_page(fault_page); 3051 unlock_page(fault_page);
3065unlock_out: 3052unlock_out:
3066 pte_unmap_unlock(pte, ptl); 3053 pte_unmap_unlock(pte, ptl);
@@ -3111,7 +3098,7 @@ static int do_cow_fault(struct mm_struct *mm, struct vm_area_struct *vma,
3111 } 3098 }
3112 goto uncharge_out; 3099 goto uncharge_out;
3113 } 3100 }
3114 do_set_pte(vma, address, new_page, pte, true, true, false); 3101 do_set_pte(vma, address, new_page, pte, true, true);
3115 mem_cgroup_commit_charge(new_page, memcg, false, false); 3102 mem_cgroup_commit_charge(new_page, memcg, false, false);
3116 lru_cache_add_active_or_unevictable(new_page, vma); 3103 lru_cache_add_active_or_unevictable(new_page, vma);
3117 pte_unmap_unlock(pte, ptl); 3104 pte_unmap_unlock(pte, ptl);
@@ -3164,7 +3151,7 @@ static int do_shared_fault(struct mm_struct *mm, struct vm_area_struct *vma,
3164 put_page(fault_page); 3151 put_page(fault_page);
3165 return ret; 3152 return ret;
3166 } 3153 }
3167 do_set_pte(vma, address, fault_page, pte, true, false, false); 3154 do_set_pte(vma, address, fault_page, pte, true, false);
3168 pte_unmap_unlock(pte, ptl); 3155 pte_unmap_unlock(pte, ptl);
3169 3156
3170 if (set_page_dirty(fault_page)) 3157 if (set_page_dirty(fault_page))