aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2013-08-14 13:04:43 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2013-08-14 13:04:43 -0400
commitf1d6e17f540af37bb1891480143669ba7636c4cf (patch)
tree962d95f43fe425c9a7d4c7f1316c76000bcec370 /mm
parent28fbc8b6a29c849a3f03a6b05010d4b584055665 (diff)
parent8c8296223f3abb142be8fc31711b18a704c0e7d8 (diff)
Merge branch 'akpm' (patches from Andrew Morton)
Merge a bunch of fixes from Andrew Morton. * emailed patches from Andrew Morton <akpm@linux-foundation.org>: fs/proc/task_mmu.c: fix buffer overflow in add_page_map() arch: *: Kconfig: add "kernel/Kconfig.freezer" to "arch/*/Kconfig" ocfs2: fix null pointer dereference in ocfs2_dir_foreach_blk_id() x86 get_unmapped_area(): use proper mmap base for bottom-up direction ocfs2: fix NULL pointer dereference in ocfs2_duplicate_clusters_by_page ocfs2: Revert 40bd62e to avoid regression in extended allocation drivers/rtc/rtc-stmp3xxx.c: provide timeout for potentially endless loop polling a HW bit hugetlb: fix lockdep splat caused by pmd sharing aoe: adjust ref of head for compound page tails microblaze: fix clone syscall mm: save soft-dirty bits on file pages mm: save soft-dirty bits on swapped pages memcg: don't initialize kmem-cache destroying work for root caches
Diffstat (limited to 'mm')
-rw-r--r--mm/fremap.c11
-rw-r--r--mm/memcontrol.c4
-rw-r--r--mm/memory.c13
-rw-r--r--mm/rmap.c14
-rw-r--r--mm/swapfile.c19
5 files changed, 48 insertions, 13 deletions
diff --git a/mm/fremap.c b/mm/fremap.c
index 87da3590c61e..5bff08147768 100644
--- a/mm/fremap.c
+++ b/mm/fremap.c
@@ -57,17 +57,22 @@ static int install_file_pte(struct mm_struct *mm, struct vm_area_struct *vma,
57 unsigned long addr, unsigned long pgoff, pgprot_t prot) 57 unsigned long addr, unsigned long pgoff, pgprot_t prot)
58{ 58{
59 int err = -ENOMEM; 59 int err = -ENOMEM;
60 pte_t *pte; 60 pte_t *pte, ptfile;
61 spinlock_t *ptl; 61 spinlock_t *ptl;
62 62
63 pte = get_locked_pte(mm, addr, &ptl); 63 pte = get_locked_pte(mm, addr, &ptl);
64 if (!pte) 64 if (!pte)
65 goto out; 65 goto out;
66 66
67 if (!pte_none(*pte)) 67 ptfile = pgoff_to_pte(pgoff);
68
69 if (!pte_none(*pte)) {
70 if (pte_present(*pte) && pte_soft_dirty(*pte))
71 pte_file_mksoft_dirty(ptfile);
68 zap_pte(mm, vma, addr, pte); 72 zap_pte(mm, vma, addr, pte);
73 }
69 74
70 set_pte_at(mm, addr, pte, pgoff_to_pte(pgoff)); 75 set_pte_at(mm, addr, pte, ptfile);
71 /* 76 /*
72 * We don't need to run update_mmu_cache() here because the "file pte" 77 * We don't need to run update_mmu_cache() here because the "file pte"
73 * being installed by install_file_pte() is not a real pte - it's a 78 * being installed by install_file_pte() is not a real pte - it's a
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index c290a1cf3862..c5792a5d87ce 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -3195,11 +3195,11 @@ int memcg_register_cache(struct mem_cgroup *memcg, struct kmem_cache *s,
3195 if (!s->memcg_params) 3195 if (!s->memcg_params)
3196 return -ENOMEM; 3196 return -ENOMEM;
3197 3197
3198 INIT_WORK(&s->memcg_params->destroy,
3199 kmem_cache_destroy_work_func);
3200 if (memcg) { 3198 if (memcg) {
3201 s->memcg_params->memcg = memcg; 3199 s->memcg_params->memcg = memcg;
3202 s->memcg_params->root_cache = root_cache; 3200 s->memcg_params->root_cache = root_cache;
3201 INIT_WORK(&s->memcg_params->destroy,
3202 kmem_cache_destroy_work_func);
3203 } else 3203 } else
3204 s->memcg_params->is_root_cache = true; 3204 s->memcg_params->is_root_cache = true;
3205 3205
diff --git a/mm/memory.c b/mm/memory.c
index 1ce2e2a734fc..40268410732a 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -1141,9 +1141,12 @@ again:
1141 continue; 1141 continue;
1142 if (unlikely(details) && details->nonlinear_vma 1142 if (unlikely(details) && details->nonlinear_vma
1143 && linear_page_index(details->nonlinear_vma, 1143 && linear_page_index(details->nonlinear_vma,
1144 addr) != page->index) 1144 addr) != page->index) {
1145 set_pte_at(mm, addr, pte, 1145 pte_t ptfile = pgoff_to_pte(page->index);
1146 pgoff_to_pte(page->index)); 1146 if (pte_soft_dirty(ptent))
1147 pte_file_mksoft_dirty(ptfile);
1148 set_pte_at(mm, addr, pte, ptfile);
1149 }
1147 if (PageAnon(page)) 1150 if (PageAnon(page))
1148 rss[MM_ANONPAGES]--; 1151 rss[MM_ANONPAGES]--;
1149 else { 1152 else {
@@ -3115,6 +3118,8 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
3115 exclusive = 1; 3118 exclusive = 1;
3116 } 3119 }
3117 flush_icache_page(vma, page); 3120 flush_icache_page(vma, page);
3121 if (pte_swp_soft_dirty(orig_pte))
3122 pte = pte_mksoft_dirty(pte);
3118 set_pte_at(mm, address, page_table, pte); 3123 set_pte_at(mm, address, page_table, pte);
3119 if (page == swapcache) 3124 if (page == swapcache)
3120 do_page_add_anon_rmap(page, vma, address, exclusive); 3125 do_page_add_anon_rmap(page, vma, address, exclusive);
@@ -3408,6 +3413,8 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
3408 entry = mk_pte(page, vma->vm_page_prot); 3413 entry = mk_pte(page, vma->vm_page_prot);
3409 if (flags & FAULT_FLAG_WRITE) 3414 if (flags & FAULT_FLAG_WRITE)
3410 entry = maybe_mkwrite(pte_mkdirty(entry), vma); 3415 entry = maybe_mkwrite(pte_mkdirty(entry), vma);
3416 else if (pte_file(orig_pte) && pte_file_soft_dirty(orig_pte))
3417 pte_mksoft_dirty(entry);
3411 if (anon) { 3418 if (anon) {
3412 inc_mm_counter_fast(mm, MM_ANONPAGES); 3419 inc_mm_counter_fast(mm, MM_ANONPAGES);
3413 page_add_new_anon_rmap(page, vma, address); 3420 page_add_new_anon_rmap(page, vma, address);
diff --git a/mm/rmap.c b/mm/rmap.c
index cd356df4f71a..b2e29acd7e3d 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -1236,6 +1236,7 @@ int try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
1236 swp_entry_to_pte(make_hwpoison_entry(page))); 1236 swp_entry_to_pte(make_hwpoison_entry(page)));
1237 } else if (PageAnon(page)) { 1237 } else if (PageAnon(page)) {
1238 swp_entry_t entry = { .val = page_private(page) }; 1238 swp_entry_t entry = { .val = page_private(page) };
1239 pte_t swp_pte;
1239 1240
1240 if (PageSwapCache(page)) { 1241 if (PageSwapCache(page)) {
1241 /* 1242 /*
@@ -1264,7 +1265,10 @@ int try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
1264 BUG_ON(TTU_ACTION(flags) != TTU_MIGRATION); 1265 BUG_ON(TTU_ACTION(flags) != TTU_MIGRATION);
1265 entry = make_migration_entry(page, pte_write(pteval)); 1266 entry = make_migration_entry(page, pte_write(pteval));
1266 } 1267 }
1267 set_pte_at(mm, address, pte, swp_entry_to_pte(entry)); 1268 swp_pte = swp_entry_to_pte(entry);
1269 if (pte_soft_dirty(pteval))
1270 swp_pte = pte_swp_mksoft_dirty(swp_pte);
1271 set_pte_at(mm, address, pte, swp_pte);
1268 BUG_ON(pte_file(*pte)); 1272 BUG_ON(pte_file(*pte));
1269 } else if (IS_ENABLED(CONFIG_MIGRATION) && 1273 } else if (IS_ENABLED(CONFIG_MIGRATION) &&
1270 (TTU_ACTION(flags) == TTU_MIGRATION)) { 1274 (TTU_ACTION(flags) == TTU_MIGRATION)) {
@@ -1401,8 +1405,12 @@ static int try_to_unmap_cluster(unsigned long cursor, unsigned int *mapcount,
1401 pteval = ptep_clear_flush(vma, address, pte); 1405 pteval = ptep_clear_flush(vma, address, pte);
1402 1406
1403 /* If nonlinear, store the file page offset in the pte. */ 1407 /* If nonlinear, store the file page offset in the pte. */
1404 if (page->index != linear_page_index(vma, address)) 1408 if (page->index != linear_page_index(vma, address)) {
1405 set_pte_at(mm, address, pte, pgoff_to_pte(page->index)); 1409 pte_t ptfile = pgoff_to_pte(page->index);
1410 if (pte_soft_dirty(pteval))
1411 pte_file_mksoft_dirty(ptfile);
1412 set_pte_at(mm, address, pte, ptfile);
1413 }
1406 1414
1407 /* Move the dirty bit to the physical page now the pte is gone. */ 1415 /* Move the dirty bit to the physical page now the pte is gone. */
1408 if (pte_dirty(pteval)) 1416 if (pte_dirty(pteval))
diff --git a/mm/swapfile.c b/mm/swapfile.c
index 36af6eeaa67e..6cf2e60983b7 100644
--- a/mm/swapfile.c
+++ b/mm/swapfile.c
@@ -866,6 +866,21 @@ unsigned int count_swap_pages(int type, int free)
866} 866}
867#endif /* CONFIG_HIBERNATION */ 867#endif /* CONFIG_HIBERNATION */
868 868
869static inline int maybe_same_pte(pte_t pte, pte_t swp_pte)
870{
871#ifdef CONFIG_MEM_SOFT_DIRTY
872 /*
873 * When pte keeps soft dirty bit the pte generated
874 * from swap entry does not has it, still it's same
875 * pte from logical point of view.
876 */
877 pte_t swp_pte_dirty = pte_swp_mksoft_dirty(swp_pte);
878 return pte_same(pte, swp_pte) || pte_same(pte, swp_pte_dirty);
879#else
880 return pte_same(pte, swp_pte);
881#endif
882}
883
869/* 884/*
870 * No need to decide whether this PTE shares the swap entry with others, 885 * No need to decide whether this PTE shares the swap entry with others,
871 * just let do_wp_page work it out if a write is requested later - to 886 * just let do_wp_page work it out if a write is requested later - to
@@ -892,7 +907,7 @@ static int unuse_pte(struct vm_area_struct *vma, pmd_t *pmd,
892 } 907 }
893 908
894 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); 909 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
895 if (unlikely(!pte_same(*pte, swp_entry_to_pte(entry)))) { 910 if (unlikely(!maybe_same_pte(*pte, swp_entry_to_pte(entry)))) {
896 mem_cgroup_cancel_charge_swapin(memcg); 911 mem_cgroup_cancel_charge_swapin(memcg);
897 ret = 0; 912 ret = 0;
898 goto out; 913 goto out;
@@ -947,7 +962,7 @@ static int unuse_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
947 * swapoff spends a _lot_ of time in this loop! 962 * swapoff spends a _lot_ of time in this loop!
948 * Test inline before going to call unuse_pte. 963 * Test inline before going to call unuse_pte.
949 */ 964 */
950 if (unlikely(pte_same(*pte, swp_pte))) { 965 if (unlikely(maybe_same_pte(*pte, swp_pte))) {
951 pte_unmap(pte); 966 pte_unmap(pte);
952 ret = unuse_pte(vma, pmd, addr, entry, page); 967 ret = unuse_pte(vma, pmd, addr, entry, page);
953 if (ret) 968 if (ret)