diff options
author | Greg Kroah-Hartman <gregkh@linuxfoundation.org> | 2013-08-18 23:40:33 -0400 |
---|---|---|
committer | Greg Kroah-Hartman <gregkh@linuxfoundation.org> | 2013-08-18 23:40:33 -0400 |
commit | 5bc0b123dcb2bb65b0b1ec57e591459dcf583d3d (patch) | |
tree | 6ee79d18fd716755d49d18c465c1b25fabc43597 /mm | |
parent | eefbc594abbb1b7e6e7eeadb65ae7c7538474210 (diff) | |
parent | b36f4be3de1b123d8601de062e7dbfc904f305fb (diff) |
Merge 3.11-rc6 into char-misc-next
We want these fixes in this tree.
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Diffstat (limited to 'mm')
-rw-r--r-- | mm/fremap.c | 11 | ||||
-rw-r--r-- | mm/huge_memory.c | 4 | ||||
-rw-r--r-- | mm/hugetlb.c | 2 | ||||
-rw-r--r-- | mm/memcontrol.c | 5 | ||||
-rw-r--r-- | mm/memory.c | 49 | ||||
-rw-r--r-- | mm/mempolicy.c | 6 | ||||
-rw-r--r-- | mm/mmap.c | 6 | ||||
-rw-r--r-- | mm/rmap.c | 14 | ||||
-rw-r--r-- | mm/shmem.c | 3 | ||||
-rw-r--r-- | mm/slub.c | 3 | ||||
-rw-r--r-- | mm/swap.c | 29 | ||||
-rw-r--r-- | mm/swapfile.c | 19 | ||||
-rw-r--r-- | mm/vmpressure.c | 28 | ||||
-rw-r--r-- | mm/zbud.c | 2 |
14 files changed, 111 insertions, 70 deletions
diff --git a/mm/fremap.c b/mm/fremap.c index 87da3590c61e..5bff08147768 100644 --- a/mm/fremap.c +++ b/mm/fremap.c | |||
@@ -57,17 +57,22 @@ static int install_file_pte(struct mm_struct *mm, struct vm_area_struct *vma, | |||
57 | unsigned long addr, unsigned long pgoff, pgprot_t prot) | 57 | unsigned long addr, unsigned long pgoff, pgprot_t prot) |
58 | { | 58 | { |
59 | int err = -ENOMEM; | 59 | int err = -ENOMEM; |
60 | pte_t *pte; | 60 | pte_t *pte, ptfile; |
61 | spinlock_t *ptl; | 61 | spinlock_t *ptl; |
62 | 62 | ||
63 | pte = get_locked_pte(mm, addr, &ptl); | 63 | pte = get_locked_pte(mm, addr, &ptl); |
64 | if (!pte) | 64 | if (!pte) |
65 | goto out; | 65 | goto out; |
66 | 66 | ||
67 | if (!pte_none(*pte)) | 67 | ptfile = pgoff_to_pte(pgoff); |
68 | |||
69 | if (!pte_none(*pte)) { | ||
70 | if (pte_present(*pte) && pte_soft_dirty(*pte)) | ||
71 | pte_file_mksoft_dirty(ptfile); | ||
68 | zap_pte(mm, vma, addr, pte); | 72 | zap_pte(mm, vma, addr, pte); |
73 | } | ||
69 | 74 | ||
70 | set_pte_at(mm, addr, pte, pgoff_to_pte(pgoff)); | 75 | set_pte_at(mm, addr, pte, ptfile); |
71 | /* | 76 | /* |
72 | * We don't need to run update_mmu_cache() here because the "file pte" | 77 | * We don't need to run update_mmu_cache() here because the "file pte" |
73 | * being installed by install_file_pte() is not a real pte - it's a | 78 | * being installed by install_file_pte() is not a real pte - it's a |
diff --git a/mm/huge_memory.c b/mm/huge_memory.c index 243e710c6039..a92012a71702 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c | |||
@@ -1620,7 +1620,9 @@ static void __split_huge_page_refcount(struct page *page, | |||
1620 | ((1L << PG_referenced) | | 1620 | ((1L << PG_referenced) | |
1621 | (1L << PG_swapbacked) | | 1621 | (1L << PG_swapbacked) | |
1622 | (1L << PG_mlocked) | | 1622 | (1L << PG_mlocked) | |
1623 | (1L << PG_uptodate))); | 1623 | (1L << PG_uptodate) | |
1624 | (1L << PG_active) | | ||
1625 | (1L << PG_unevictable))); | ||
1624 | page_tail->flags |= (1L << PG_dirty); | 1626 | page_tail->flags |= (1L << PG_dirty); |
1625 | 1627 | ||
1626 | /* clear PageTail before overwriting first_page */ | 1628 | /* clear PageTail before overwriting first_page */ |
diff --git a/mm/hugetlb.c b/mm/hugetlb.c index 83aff0a4d093..b60f33080a28 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c | |||
@@ -2490,7 +2490,7 @@ void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start, | |||
2490 | 2490 | ||
2491 | mm = vma->vm_mm; | 2491 | mm = vma->vm_mm; |
2492 | 2492 | ||
2493 | tlb_gather_mmu(&tlb, mm, 0); | 2493 | tlb_gather_mmu(&tlb, mm, start, end); |
2494 | __unmap_hugepage_range(&tlb, vma, start, end, ref_page); | 2494 | __unmap_hugepage_range(&tlb, vma, start, end, ref_page); |
2495 | tlb_finish_mmu(&tlb, start, end); | 2495 | tlb_finish_mmu(&tlb, start, end); |
2496 | } | 2496 | } |
diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 00a7a664b9c1..c5792a5d87ce 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c | |||
@@ -3195,11 +3195,11 @@ int memcg_register_cache(struct mem_cgroup *memcg, struct kmem_cache *s, | |||
3195 | if (!s->memcg_params) | 3195 | if (!s->memcg_params) |
3196 | return -ENOMEM; | 3196 | return -ENOMEM; |
3197 | 3197 | ||
3198 | INIT_WORK(&s->memcg_params->destroy, | ||
3199 | kmem_cache_destroy_work_func); | ||
3200 | if (memcg) { | 3198 | if (memcg) { |
3201 | s->memcg_params->memcg = memcg; | 3199 | s->memcg_params->memcg = memcg; |
3202 | s->memcg_params->root_cache = root_cache; | 3200 | s->memcg_params->root_cache = root_cache; |
3201 | INIT_WORK(&s->memcg_params->destroy, | ||
3202 | kmem_cache_destroy_work_func); | ||
3203 | } else | 3203 | } else |
3204 | s->memcg_params->is_root_cache = true; | 3204 | s->memcg_params->is_root_cache = true; |
3205 | 3205 | ||
@@ -6335,6 +6335,7 @@ static void mem_cgroup_css_offline(struct cgroup *cont) | |||
6335 | mem_cgroup_invalidate_reclaim_iterators(memcg); | 6335 | mem_cgroup_invalidate_reclaim_iterators(memcg); |
6336 | mem_cgroup_reparent_charges(memcg); | 6336 | mem_cgroup_reparent_charges(memcg); |
6337 | mem_cgroup_destroy_all_caches(memcg); | 6337 | mem_cgroup_destroy_all_caches(memcg); |
6338 | vmpressure_cleanup(&memcg->vmpressure); | ||
6338 | } | 6339 | } |
6339 | 6340 | ||
6340 | static void mem_cgroup_css_free(struct cgroup *cont) | 6341 | static void mem_cgroup_css_free(struct cgroup *cont) |
diff --git a/mm/memory.c b/mm/memory.c index 8d9255b69ff0..b3c6bf9a398e 100644 --- a/mm/memory.c +++ b/mm/memory.c | |||
@@ -209,14 +209,15 @@ static int tlb_next_batch(struct mmu_gather *tlb) | |||
209 | * tear-down from @mm. The @fullmm argument is used when @mm is without | 209 | * tear-down from @mm. The @fullmm argument is used when @mm is without |
210 | * users and we're going to destroy the full address space (exit/execve). | 210 | * users and we're going to destroy the full address space (exit/execve). |
211 | */ | 211 | */ |
212 | void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, bool fullmm) | 212 | void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start, unsigned long end) |
213 | { | 213 | { |
214 | tlb->mm = mm; | 214 | tlb->mm = mm; |
215 | 215 | ||
216 | tlb->fullmm = fullmm; | 216 | /* Is it from 0 to ~0? */ |
217 | tlb->fullmm = !(start | (end+1)); | ||
217 | tlb->need_flush_all = 0; | 218 | tlb->need_flush_all = 0; |
218 | tlb->start = -1UL; | 219 | tlb->start = start; |
219 | tlb->end = 0; | 220 | tlb->end = end; |
220 | tlb->need_flush = 0; | 221 | tlb->need_flush = 0; |
221 | tlb->local.next = NULL; | 222 | tlb->local.next = NULL; |
222 | tlb->local.nr = 0; | 223 | tlb->local.nr = 0; |
@@ -256,8 +257,6 @@ void tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long e | |||
256 | { | 257 | { |
257 | struct mmu_gather_batch *batch, *next; | 258 | struct mmu_gather_batch *batch, *next; |
258 | 259 | ||
259 | tlb->start = start; | ||
260 | tlb->end = end; | ||
261 | tlb_flush_mmu(tlb); | 260 | tlb_flush_mmu(tlb); |
262 | 261 | ||
263 | /* keep the page table cache within bounds */ | 262 | /* keep the page table cache within bounds */ |
@@ -1099,7 +1098,6 @@ static unsigned long zap_pte_range(struct mmu_gather *tlb, | |||
1099 | spinlock_t *ptl; | 1098 | spinlock_t *ptl; |
1100 | pte_t *start_pte; | 1099 | pte_t *start_pte; |
1101 | pte_t *pte; | 1100 | pte_t *pte; |
1102 | unsigned long range_start = addr; | ||
1103 | 1101 | ||
1104 | again: | 1102 | again: |
1105 | init_rss_vec(rss); | 1103 | init_rss_vec(rss); |
@@ -1141,9 +1139,12 @@ again: | |||
1141 | continue; | 1139 | continue; |
1142 | if (unlikely(details) && details->nonlinear_vma | 1140 | if (unlikely(details) && details->nonlinear_vma |
1143 | && linear_page_index(details->nonlinear_vma, | 1141 | && linear_page_index(details->nonlinear_vma, |
1144 | addr) != page->index) | 1142 | addr) != page->index) { |
1145 | set_pte_at(mm, addr, pte, | 1143 | pte_t ptfile = pgoff_to_pte(page->index); |
1146 | pgoff_to_pte(page->index)); | 1144 | if (pte_soft_dirty(ptent)) |
1145 | pte_file_mksoft_dirty(ptfile); | ||
1146 | set_pte_at(mm, addr, pte, ptfile); | ||
1147 | } | ||
1147 | if (PageAnon(page)) | 1148 | if (PageAnon(page)) |
1148 | rss[MM_ANONPAGES]--; | 1149 | rss[MM_ANONPAGES]--; |
1149 | else { | 1150 | else { |
@@ -1202,17 +1203,25 @@ again: | |||
1202 | * and page-free while holding it. | 1203 | * and page-free while holding it. |
1203 | */ | 1204 | */ |
1204 | if (force_flush) { | 1205 | if (force_flush) { |
1206 | unsigned long old_end; | ||
1207 | |||
1205 | force_flush = 0; | 1208 | force_flush = 0; |
1206 | 1209 | ||
1207 | #ifdef HAVE_GENERIC_MMU_GATHER | 1210 | /* |
1208 | tlb->start = range_start; | 1211 | * Flush the TLB just for the previous segment, |
1212 | * then update the range to be the remaining | ||
1213 | * TLB range. | ||
1214 | */ | ||
1215 | old_end = tlb->end; | ||
1209 | tlb->end = addr; | 1216 | tlb->end = addr; |
1210 | #endif | 1217 | |
1211 | tlb_flush_mmu(tlb); | 1218 | tlb_flush_mmu(tlb); |
1212 | if (addr != end) { | 1219 | |
1213 | range_start = addr; | 1220 | tlb->start = addr; |
1221 | tlb->end = old_end; | ||
1222 | |||
1223 | if (addr != end) | ||
1214 | goto again; | 1224 | goto again; |
1215 | } | ||
1216 | } | 1225 | } |
1217 | 1226 | ||
1218 | return addr; | 1227 | return addr; |
@@ -1397,7 +1406,7 @@ void zap_page_range(struct vm_area_struct *vma, unsigned long start, | |||
1397 | unsigned long end = start + size; | 1406 | unsigned long end = start + size; |
1398 | 1407 | ||
1399 | lru_add_drain(); | 1408 | lru_add_drain(); |
1400 | tlb_gather_mmu(&tlb, mm, 0); | 1409 | tlb_gather_mmu(&tlb, mm, start, end); |
1401 | update_hiwater_rss(mm); | 1410 | update_hiwater_rss(mm); |
1402 | mmu_notifier_invalidate_range_start(mm, start, end); | 1411 | mmu_notifier_invalidate_range_start(mm, start, end); |
1403 | for ( ; vma && vma->vm_start < end; vma = vma->vm_next) | 1412 | for ( ; vma && vma->vm_start < end; vma = vma->vm_next) |
@@ -1423,7 +1432,7 @@ static void zap_page_range_single(struct vm_area_struct *vma, unsigned long addr | |||
1423 | unsigned long end = address + size; | 1432 | unsigned long end = address + size; |
1424 | 1433 | ||
1425 | lru_add_drain(); | 1434 | lru_add_drain(); |
1426 | tlb_gather_mmu(&tlb, mm, 0); | 1435 | tlb_gather_mmu(&tlb, mm, address, end); |
1427 | update_hiwater_rss(mm); | 1436 | update_hiwater_rss(mm); |
1428 | mmu_notifier_invalidate_range_start(mm, address, end); | 1437 | mmu_notifier_invalidate_range_start(mm, address, end); |
1429 | unmap_single_vma(&tlb, vma, address, end, details); | 1438 | unmap_single_vma(&tlb, vma, address, end, details); |
@@ -3115,6 +3124,8 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma, | |||
3115 | exclusive = 1; | 3124 | exclusive = 1; |
3116 | } | 3125 | } |
3117 | flush_icache_page(vma, page); | 3126 | flush_icache_page(vma, page); |
3127 | if (pte_swp_soft_dirty(orig_pte)) | ||
3128 | pte = pte_mksoft_dirty(pte); | ||
3118 | set_pte_at(mm, address, page_table, pte); | 3129 | set_pte_at(mm, address, page_table, pte); |
3119 | if (page == swapcache) | 3130 | if (page == swapcache) |
3120 | do_page_add_anon_rmap(page, vma, address, exclusive); | 3131 | do_page_add_anon_rmap(page, vma, address, exclusive); |
@@ -3408,6 +3419,8 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma, | |||
3408 | entry = mk_pte(page, vma->vm_page_prot); | 3419 | entry = mk_pte(page, vma->vm_page_prot); |
3409 | if (flags & FAULT_FLAG_WRITE) | 3420 | if (flags & FAULT_FLAG_WRITE) |
3410 | entry = maybe_mkwrite(pte_mkdirty(entry), vma); | 3421 | entry = maybe_mkwrite(pte_mkdirty(entry), vma); |
3422 | else if (pte_file(orig_pte) && pte_file_soft_dirty(orig_pte)) | ||
3423 | pte_mksoft_dirty(entry); | ||
3411 | if (anon) { | 3424 | if (anon) { |
3412 | inc_mm_counter_fast(mm, MM_ANONPAGES); | 3425 | inc_mm_counter_fast(mm, MM_ANONPAGES); |
3413 | page_add_new_anon_rmap(page, vma, address); | 3426 | page_add_new_anon_rmap(page, vma, address); |
diff --git a/mm/mempolicy.c b/mm/mempolicy.c index 74310017296e..4baf12e534d1 100644 --- a/mm/mempolicy.c +++ b/mm/mempolicy.c | |||
@@ -732,7 +732,10 @@ static int mbind_range(struct mm_struct *mm, unsigned long start, | |||
732 | if (prev) { | 732 | if (prev) { |
733 | vma = prev; | 733 | vma = prev; |
734 | next = vma->vm_next; | 734 | next = vma->vm_next; |
735 | continue; | 735 | if (mpol_equal(vma_policy(vma), new_pol)) |
736 | continue; | ||
737 | /* vma_merge() joined vma && vma->next, case 8 */ | ||
738 | goto replace; | ||
736 | } | 739 | } |
737 | if (vma->vm_start != vmstart) { | 740 | if (vma->vm_start != vmstart) { |
738 | err = split_vma(vma->vm_mm, vma, vmstart, 1); | 741 | err = split_vma(vma->vm_mm, vma, vmstart, 1); |
@@ -744,6 +747,7 @@ static int mbind_range(struct mm_struct *mm, unsigned long start, | |||
744 | if (err) | 747 | if (err) |
745 | goto out; | 748 | goto out; |
746 | } | 749 | } |
750 | replace: | ||
747 | err = vma_replace_policy(vma, new_pol); | 751 | err = vma_replace_policy(vma, new_pol); |
748 | if (err) | 752 | if (err) |
749 | goto out; | 753 | goto out; |
@@ -865,7 +865,7 @@ again: remove_next = 1 + (end > next->vm_end); | |||
865 | if (next->anon_vma) | 865 | if (next->anon_vma) |
866 | anon_vma_merge(vma, next); | 866 | anon_vma_merge(vma, next); |
867 | mm->map_count--; | 867 | mm->map_count--; |
868 | vma_set_policy(vma, vma_policy(next)); | 868 | mpol_put(vma_policy(next)); |
869 | kmem_cache_free(vm_area_cachep, next); | 869 | kmem_cache_free(vm_area_cachep, next); |
870 | /* | 870 | /* |
871 | * In mprotect's case 6 (see comments on vma_merge), | 871 | * In mprotect's case 6 (see comments on vma_merge), |
@@ -2336,7 +2336,7 @@ static void unmap_region(struct mm_struct *mm, | |||
2336 | struct mmu_gather tlb; | 2336 | struct mmu_gather tlb; |
2337 | 2337 | ||
2338 | lru_add_drain(); | 2338 | lru_add_drain(); |
2339 | tlb_gather_mmu(&tlb, mm, 0); | 2339 | tlb_gather_mmu(&tlb, mm, start, end); |
2340 | update_hiwater_rss(mm); | 2340 | update_hiwater_rss(mm); |
2341 | unmap_vmas(&tlb, vma, start, end); | 2341 | unmap_vmas(&tlb, vma, start, end); |
2342 | free_pgtables(&tlb, vma, prev ? prev->vm_end : FIRST_USER_ADDRESS, | 2342 | free_pgtables(&tlb, vma, prev ? prev->vm_end : FIRST_USER_ADDRESS, |
@@ -2709,7 +2709,7 @@ void exit_mmap(struct mm_struct *mm) | |||
2709 | 2709 | ||
2710 | lru_add_drain(); | 2710 | lru_add_drain(); |
2711 | flush_cache_mm(mm); | 2711 | flush_cache_mm(mm); |
2712 | tlb_gather_mmu(&tlb, mm, 1); | 2712 | tlb_gather_mmu(&tlb, mm, 0, -1); |
2713 | /* update_hiwater_rss(mm) here? but nobody should be looking */ | 2713 | /* update_hiwater_rss(mm) here? but nobody should be looking */ |
2714 | /* Use -1 here to ensure all VMAs in the mm are unmapped */ | 2714 | /* Use -1 here to ensure all VMAs in the mm are unmapped */ |
2715 | unmap_vmas(&tlb, vma, 0, -1); | 2715 | unmap_vmas(&tlb, vma, 0, -1); |
@@ -1236,6 +1236,7 @@ int try_to_unmap_one(struct page *page, struct vm_area_struct *vma, | |||
1236 | swp_entry_to_pte(make_hwpoison_entry(page))); | 1236 | swp_entry_to_pte(make_hwpoison_entry(page))); |
1237 | } else if (PageAnon(page)) { | 1237 | } else if (PageAnon(page)) { |
1238 | swp_entry_t entry = { .val = page_private(page) }; | 1238 | swp_entry_t entry = { .val = page_private(page) }; |
1239 | pte_t swp_pte; | ||
1239 | 1240 | ||
1240 | if (PageSwapCache(page)) { | 1241 | if (PageSwapCache(page)) { |
1241 | /* | 1242 | /* |
@@ -1264,7 +1265,10 @@ int try_to_unmap_one(struct page *page, struct vm_area_struct *vma, | |||
1264 | BUG_ON(TTU_ACTION(flags) != TTU_MIGRATION); | 1265 | BUG_ON(TTU_ACTION(flags) != TTU_MIGRATION); |
1265 | entry = make_migration_entry(page, pte_write(pteval)); | 1266 | entry = make_migration_entry(page, pte_write(pteval)); |
1266 | } | 1267 | } |
1267 | set_pte_at(mm, address, pte, swp_entry_to_pte(entry)); | 1268 | swp_pte = swp_entry_to_pte(entry); |
1269 | if (pte_soft_dirty(pteval)) | ||
1270 | swp_pte = pte_swp_mksoft_dirty(swp_pte); | ||
1271 | set_pte_at(mm, address, pte, swp_pte); | ||
1268 | BUG_ON(pte_file(*pte)); | 1272 | BUG_ON(pte_file(*pte)); |
1269 | } else if (IS_ENABLED(CONFIG_MIGRATION) && | 1273 | } else if (IS_ENABLED(CONFIG_MIGRATION) && |
1270 | (TTU_ACTION(flags) == TTU_MIGRATION)) { | 1274 | (TTU_ACTION(flags) == TTU_MIGRATION)) { |
@@ -1401,8 +1405,12 @@ static int try_to_unmap_cluster(unsigned long cursor, unsigned int *mapcount, | |||
1401 | pteval = ptep_clear_flush(vma, address, pte); | 1405 | pteval = ptep_clear_flush(vma, address, pte); |
1402 | 1406 | ||
1403 | /* If nonlinear, store the file page offset in the pte. */ | 1407 | /* If nonlinear, store the file page offset in the pte. */ |
1404 | if (page->index != linear_page_index(vma, address)) | 1408 | if (page->index != linear_page_index(vma, address)) { |
1405 | set_pte_at(mm, address, pte, pgoff_to_pte(page->index)); | 1409 | pte_t ptfile = pgoff_to_pte(page->index); |
1410 | if (pte_soft_dirty(pteval)) | ||
1411 | pte_file_mksoft_dirty(ptfile); | ||
1412 | set_pte_at(mm, address, pte, ptfile); | ||
1413 | } | ||
1406 | 1414 | ||
1407 | /* Move the dirty bit to the physical page now the pte is gone. */ | 1415 | /* Move the dirty bit to the physical page now the pte is gone. */ |
1408 | if (pte_dirty(pteval)) | 1416 | if (pte_dirty(pteval)) |
diff --git a/mm/shmem.c b/mm/shmem.c index a87990cf9f94..8335dbd3fc35 100644 --- a/mm/shmem.c +++ b/mm/shmem.c | |||
@@ -1798,7 +1798,8 @@ static loff_t shmem_file_llseek(struct file *file, loff_t offset, int whence) | |||
1798 | } | 1798 | } |
1799 | } | 1799 | } |
1800 | 1800 | ||
1801 | offset = vfs_setpos(file, offset, MAX_LFS_FILESIZE); | 1801 | if (offset >= 0) |
1802 | offset = vfs_setpos(file, offset, MAX_LFS_FILESIZE); | ||
1802 | mutex_unlock(&inode->i_mutex); | 1803 | mutex_unlock(&inode->i_mutex); |
1803 | return offset; | 1804 | return offset; |
1804 | } | 1805 | } |
@@ -1968,9 +1968,6 @@ static void put_cpu_partial(struct kmem_cache *s, struct page *page, int drain) | |||
1968 | int pages; | 1968 | int pages; |
1969 | int pobjects; | 1969 | int pobjects; |
1970 | 1970 | ||
1971 | if (!s->cpu_partial) | ||
1972 | return; | ||
1973 | |||
1974 | do { | 1971 | do { |
1975 | pages = 0; | 1972 | pages = 0; |
1976 | pobjects = 0; | 1973 | pobjects = 0; |
@@ -512,12 +512,7 @@ EXPORT_SYMBOL(__lru_cache_add); | |||
512 | */ | 512 | */ |
513 | void lru_cache_add(struct page *page) | 513 | void lru_cache_add(struct page *page) |
514 | { | 514 | { |
515 | if (PageActive(page)) { | 515 | VM_BUG_ON(PageActive(page) && PageUnevictable(page)); |
516 | VM_BUG_ON(PageUnevictable(page)); | ||
517 | } else if (PageUnevictable(page)) { | ||
518 | VM_BUG_ON(PageActive(page)); | ||
519 | } | ||
520 | |||
521 | VM_BUG_ON(PageLRU(page)); | 516 | VM_BUG_ON(PageLRU(page)); |
522 | __lru_cache_add(page); | 517 | __lru_cache_add(page); |
523 | } | 518 | } |
@@ -539,6 +534,7 @@ void add_page_to_unevictable_list(struct page *page) | |||
539 | 534 | ||
540 | spin_lock_irq(&zone->lru_lock); | 535 | spin_lock_irq(&zone->lru_lock); |
541 | lruvec = mem_cgroup_page_lruvec(page, zone); | 536 | lruvec = mem_cgroup_page_lruvec(page, zone); |
537 | ClearPageActive(page); | ||
542 | SetPageUnevictable(page); | 538 | SetPageUnevictable(page); |
543 | SetPageLRU(page); | 539 | SetPageLRU(page); |
544 | add_page_to_lru_list(page, lruvec, LRU_UNEVICTABLE); | 540 | add_page_to_lru_list(page, lruvec, LRU_UNEVICTABLE); |
@@ -774,8 +770,6 @@ EXPORT_SYMBOL(__pagevec_release); | |||
774 | void lru_add_page_tail(struct page *page, struct page *page_tail, | 770 | void lru_add_page_tail(struct page *page, struct page *page_tail, |
775 | struct lruvec *lruvec, struct list_head *list) | 771 | struct lruvec *lruvec, struct list_head *list) |
776 | { | 772 | { |
777 | int uninitialized_var(active); | ||
778 | enum lru_list lru; | ||
779 | const int file = 0; | 773 | const int file = 0; |
780 | 774 | ||
781 | VM_BUG_ON(!PageHead(page)); | 775 | VM_BUG_ON(!PageHead(page)); |
@@ -787,20 +781,6 @@ void lru_add_page_tail(struct page *page, struct page *page_tail, | |||
787 | if (!list) | 781 | if (!list) |
788 | SetPageLRU(page_tail); | 782 | SetPageLRU(page_tail); |
789 | 783 | ||
790 | if (page_evictable(page_tail)) { | ||
791 | if (PageActive(page)) { | ||
792 | SetPageActive(page_tail); | ||
793 | active = 1; | ||
794 | lru = LRU_ACTIVE_ANON; | ||
795 | } else { | ||
796 | active = 0; | ||
797 | lru = LRU_INACTIVE_ANON; | ||
798 | } | ||
799 | } else { | ||
800 | SetPageUnevictable(page_tail); | ||
801 | lru = LRU_UNEVICTABLE; | ||
802 | } | ||
803 | |||
804 | if (likely(PageLRU(page))) | 784 | if (likely(PageLRU(page))) |
805 | list_add_tail(&page_tail->lru, &page->lru); | 785 | list_add_tail(&page_tail->lru, &page->lru); |
806 | else if (list) { | 786 | else if (list) { |
@@ -816,13 +796,13 @@ void lru_add_page_tail(struct page *page, struct page *page_tail, | |||
816 | * Use the standard add function to put page_tail on the list, | 796 | * Use the standard add function to put page_tail on the list, |
817 | * but then correct its position so they all end up in order. | 797 | * but then correct its position so they all end up in order. |
818 | */ | 798 | */ |
819 | add_page_to_lru_list(page_tail, lruvec, lru); | 799 | add_page_to_lru_list(page_tail, lruvec, page_lru(page_tail)); |
820 | list_head = page_tail->lru.prev; | 800 | list_head = page_tail->lru.prev; |
821 | list_move_tail(&page_tail->lru, list_head); | 801 | list_move_tail(&page_tail->lru, list_head); |
822 | } | 802 | } |
823 | 803 | ||
824 | if (!PageUnevictable(page)) | 804 | if (!PageUnevictable(page)) |
825 | update_page_reclaim_stat(lruvec, file, active); | 805 | update_page_reclaim_stat(lruvec, file, PageActive(page_tail)); |
826 | } | 806 | } |
827 | #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ | 807 | #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ |
828 | 808 | ||
@@ -833,7 +813,6 @@ static void __pagevec_lru_add_fn(struct page *page, struct lruvec *lruvec, | |||
833 | int active = PageActive(page); | 813 | int active = PageActive(page); |
834 | enum lru_list lru = page_lru(page); | 814 | enum lru_list lru = page_lru(page); |
835 | 815 | ||
836 | VM_BUG_ON(PageUnevictable(page)); | ||
837 | VM_BUG_ON(PageLRU(page)); | 816 | VM_BUG_ON(PageLRU(page)); |
838 | 817 | ||
839 | SetPageLRU(page); | 818 | SetPageLRU(page); |
diff --git a/mm/swapfile.c b/mm/swapfile.c index 36af6eeaa67e..6cf2e60983b7 100644 --- a/mm/swapfile.c +++ b/mm/swapfile.c | |||
@@ -866,6 +866,21 @@ unsigned int count_swap_pages(int type, int free) | |||
866 | } | 866 | } |
867 | #endif /* CONFIG_HIBERNATION */ | 867 | #endif /* CONFIG_HIBERNATION */ |
868 | 868 | ||
869 | static inline int maybe_same_pte(pte_t pte, pte_t swp_pte) | ||
870 | { | ||
871 | #ifdef CONFIG_MEM_SOFT_DIRTY | ||
872 | /* | ||
873 | * When pte keeps soft dirty bit the pte generated | ||
874 | * from swap entry does not has it, still it's same | ||
875 | * pte from logical point of view. | ||
876 | */ | ||
877 | pte_t swp_pte_dirty = pte_swp_mksoft_dirty(swp_pte); | ||
878 | return pte_same(pte, swp_pte) || pte_same(pte, swp_pte_dirty); | ||
879 | #else | ||
880 | return pte_same(pte, swp_pte); | ||
881 | #endif | ||
882 | } | ||
883 | |||
869 | /* | 884 | /* |
870 | * No need to decide whether this PTE shares the swap entry with others, | 885 | * No need to decide whether this PTE shares the swap entry with others, |
871 | * just let do_wp_page work it out if a write is requested later - to | 886 | * just let do_wp_page work it out if a write is requested later - to |
@@ -892,7 +907,7 @@ static int unuse_pte(struct vm_area_struct *vma, pmd_t *pmd, | |||
892 | } | 907 | } |
893 | 908 | ||
894 | pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); | 909 | pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); |
895 | if (unlikely(!pte_same(*pte, swp_entry_to_pte(entry)))) { | 910 | if (unlikely(!maybe_same_pte(*pte, swp_entry_to_pte(entry)))) { |
896 | mem_cgroup_cancel_charge_swapin(memcg); | 911 | mem_cgroup_cancel_charge_swapin(memcg); |
897 | ret = 0; | 912 | ret = 0; |
898 | goto out; | 913 | goto out; |
@@ -947,7 +962,7 @@ static int unuse_pte_range(struct vm_area_struct *vma, pmd_t *pmd, | |||
947 | * swapoff spends a _lot_ of time in this loop! | 962 | * swapoff spends a _lot_ of time in this loop! |
948 | * Test inline before going to call unuse_pte. | 963 | * Test inline before going to call unuse_pte. |
949 | */ | 964 | */ |
950 | if (unlikely(pte_same(*pte, swp_pte))) { | 965 | if (unlikely(maybe_same_pte(*pte, swp_pte))) { |
951 | pte_unmap(pte); | 966 | pte_unmap(pte); |
952 | ret = unuse_pte(vma, pmd, addr, entry, page); | 967 | ret = unuse_pte(vma, pmd, addr, entry, page); |
953 | if (ret) | 968 | if (ret) |
diff --git a/mm/vmpressure.c b/mm/vmpressure.c index 736a6011c2c8..0c1e37d829fa 100644 --- a/mm/vmpressure.c +++ b/mm/vmpressure.c | |||
@@ -180,12 +180,12 @@ static void vmpressure_work_fn(struct work_struct *work) | |||
180 | if (!vmpr->scanned) | 180 | if (!vmpr->scanned) |
181 | return; | 181 | return; |
182 | 182 | ||
183 | mutex_lock(&vmpr->sr_lock); | 183 | spin_lock(&vmpr->sr_lock); |
184 | scanned = vmpr->scanned; | 184 | scanned = vmpr->scanned; |
185 | reclaimed = vmpr->reclaimed; | 185 | reclaimed = vmpr->reclaimed; |
186 | vmpr->scanned = 0; | 186 | vmpr->scanned = 0; |
187 | vmpr->reclaimed = 0; | 187 | vmpr->reclaimed = 0; |
188 | mutex_unlock(&vmpr->sr_lock); | 188 | spin_unlock(&vmpr->sr_lock); |
189 | 189 | ||
190 | do { | 190 | do { |
191 | if (vmpressure_event(vmpr, scanned, reclaimed)) | 191 | if (vmpressure_event(vmpr, scanned, reclaimed)) |
@@ -240,13 +240,13 @@ void vmpressure(gfp_t gfp, struct mem_cgroup *memcg, | |||
240 | if (!scanned) | 240 | if (!scanned) |
241 | return; | 241 | return; |
242 | 242 | ||
243 | mutex_lock(&vmpr->sr_lock); | 243 | spin_lock(&vmpr->sr_lock); |
244 | vmpr->scanned += scanned; | 244 | vmpr->scanned += scanned; |
245 | vmpr->reclaimed += reclaimed; | 245 | vmpr->reclaimed += reclaimed; |
246 | scanned = vmpr->scanned; | 246 | scanned = vmpr->scanned; |
247 | mutex_unlock(&vmpr->sr_lock); | 247 | spin_unlock(&vmpr->sr_lock); |
248 | 248 | ||
249 | if (scanned < vmpressure_win || work_pending(&vmpr->work)) | 249 | if (scanned < vmpressure_win) |
250 | return; | 250 | return; |
251 | schedule_work(&vmpr->work); | 251 | schedule_work(&vmpr->work); |
252 | } | 252 | } |
@@ -367,8 +367,24 @@ void vmpressure_unregister_event(struct cgroup *cg, struct cftype *cft, | |||
367 | */ | 367 | */ |
368 | void vmpressure_init(struct vmpressure *vmpr) | 368 | void vmpressure_init(struct vmpressure *vmpr) |
369 | { | 369 | { |
370 | mutex_init(&vmpr->sr_lock); | 370 | spin_lock_init(&vmpr->sr_lock); |
371 | mutex_init(&vmpr->events_lock); | 371 | mutex_init(&vmpr->events_lock); |
372 | INIT_LIST_HEAD(&vmpr->events); | 372 | INIT_LIST_HEAD(&vmpr->events); |
373 | INIT_WORK(&vmpr->work, vmpressure_work_fn); | 373 | INIT_WORK(&vmpr->work, vmpressure_work_fn); |
374 | } | 374 | } |
375 | |||
376 | /** | ||
377 | * vmpressure_cleanup() - shuts down vmpressure control structure | ||
378 | * @vmpr: Structure to be cleaned up | ||
379 | * | ||
380 | * This function should be called before the structure in which it is | ||
381 | * embedded is cleaned up. | ||
382 | */ | ||
383 | void vmpressure_cleanup(struct vmpressure *vmpr) | ||
384 | { | ||
385 | /* | ||
386 | * Make sure there is no pending work before eventfd infrastructure | ||
387 | * goes away. | ||
388 | */ | ||
389 | flush_work(&vmpr->work); | ||
390 | } | ||
@@ -257,7 +257,7 @@ int zbud_alloc(struct zbud_pool *pool, int size, gfp_t gfp, | |||
257 | 257 | ||
258 | if (size <= 0 || gfp & __GFP_HIGHMEM) | 258 | if (size <= 0 || gfp & __GFP_HIGHMEM) |
259 | return -EINVAL; | 259 | return -EINVAL; |
260 | if (size > PAGE_SIZE - ZHDR_SIZE_ALIGNED) | 260 | if (size > PAGE_SIZE - ZHDR_SIZE_ALIGNED - CHUNK_SIZE) |
261 | return -ENOSPC; | 261 | return -ENOSPC; |
262 | chunks = size_to_chunks(size); | 262 | chunks = size_to_chunks(size); |
263 | spin_lock(&pool->lock); | 263 | spin_lock(&pool->lock); |