diff options
Diffstat (limited to 'mm')
-rw-r--r-- | mm/backing-dev.c | 23 | ||||
-rw-r--r-- | mm/compaction.c | 24 | ||||
-rw-r--r-- | mm/filemap.c | 8 | ||||
-rw-r--r-- | mm/filemap_xip.c | 7 | ||||
-rw-r--r-- | mm/huge_memory.c | 4 | ||||
-rw-r--r-- | mm/hugetlb.c | 9 | ||||
-rw-r--r-- | mm/kmemleak.c | 3 | ||||
-rw-r--r-- | mm/memblock.c | 7 | ||||
-rw-r--r-- | mm/memcontrol.c | 14 | ||||
-rw-r--r-- | mm/memory.c | 37 | ||||
-rw-r--r-- | mm/migrate.c | 2 | ||||
-rw-r--r-- | mm/nommu.c | 9 | ||||
-rw-r--r-- | mm/page_alloc.c | 21 | ||||
-rw-r--r-- | mm/process_vm_access.c | 23 | ||||
-rw-r--r-- | mm/shmem.c | 53 | ||||
-rw-r--r-- | mm/swap.c | 2 | ||||
-rw-r--r-- | mm/vmscan.c | 124 |
17 files changed, 224 insertions, 146 deletions
diff --git a/mm/backing-dev.c b/mm/backing-dev.c index 7ba8feae11b8..dd8e2aafb07e 100644 --- a/mm/backing-dev.c +++ b/mm/backing-dev.c | |||
@@ -318,7 +318,7 @@ static void wakeup_timer_fn(unsigned long data) | |||
318 | if (bdi->wb.task) { | 318 | if (bdi->wb.task) { |
319 | trace_writeback_wake_thread(bdi); | 319 | trace_writeback_wake_thread(bdi); |
320 | wake_up_process(bdi->wb.task); | 320 | wake_up_process(bdi->wb.task); |
321 | } else { | 321 | } else if (bdi->dev) { |
322 | /* | 322 | /* |
323 | * When bdi tasks are inactive for long time, they are killed. | 323 | * When bdi tasks are inactive for long time, they are killed. |
324 | * In this case we have to wake-up the forker thread which | 324 | * In this case we have to wake-up the forker thread which |
@@ -584,6 +584,8 @@ EXPORT_SYMBOL(bdi_register_dev); | |||
584 | */ | 584 | */ |
585 | static void bdi_wb_shutdown(struct backing_dev_info *bdi) | 585 | static void bdi_wb_shutdown(struct backing_dev_info *bdi) |
586 | { | 586 | { |
587 | struct task_struct *task; | ||
588 | |||
587 | if (!bdi_cap_writeback_dirty(bdi)) | 589 | if (!bdi_cap_writeback_dirty(bdi)) |
588 | return; | 590 | return; |
589 | 591 | ||
@@ -602,8 +604,13 @@ static void bdi_wb_shutdown(struct backing_dev_info *bdi) | |||
602 | * Finally, kill the kernel thread. We don't need to be RCU | 604 | * Finally, kill the kernel thread. We don't need to be RCU |
603 | * safe anymore, since the bdi is gone from visibility. | 605 | * safe anymore, since the bdi is gone from visibility. |
604 | */ | 606 | */ |
605 | if (bdi->wb.task) | 607 | spin_lock_bh(&bdi->wb_lock); |
606 | kthread_stop(bdi->wb.task); | 608 | task = bdi->wb.task; |
609 | bdi->wb.task = NULL; | ||
610 | spin_unlock_bh(&bdi->wb_lock); | ||
611 | |||
612 | if (task) | ||
613 | kthread_stop(task); | ||
607 | } | 614 | } |
608 | 615 | ||
609 | /* | 616 | /* |
@@ -623,7 +630,9 @@ static void bdi_prune_sb(struct backing_dev_info *bdi) | |||
623 | 630 | ||
624 | void bdi_unregister(struct backing_dev_info *bdi) | 631 | void bdi_unregister(struct backing_dev_info *bdi) |
625 | { | 632 | { |
626 | if (bdi->dev) { | 633 | struct device *dev = bdi->dev; |
634 | |||
635 | if (dev) { | ||
627 | bdi_set_min_ratio(bdi, 0); | 636 | bdi_set_min_ratio(bdi, 0); |
628 | trace_writeback_bdi_unregister(bdi); | 637 | trace_writeback_bdi_unregister(bdi); |
629 | bdi_prune_sb(bdi); | 638 | bdi_prune_sb(bdi); |
@@ -632,8 +641,12 @@ void bdi_unregister(struct backing_dev_info *bdi) | |||
632 | if (!bdi_cap_flush_forker(bdi)) | 641 | if (!bdi_cap_flush_forker(bdi)) |
633 | bdi_wb_shutdown(bdi); | 642 | bdi_wb_shutdown(bdi); |
634 | bdi_debug_unregister(bdi); | 643 | bdi_debug_unregister(bdi); |
635 | device_unregister(bdi->dev); | 644 | |
645 | spin_lock_bh(&bdi->wb_lock); | ||
636 | bdi->dev = NULL; | 646 | bdi->dev = NULL; |
647 | spin_unlock_bh(&bdi->wb_lock); | ||
648 | |||
649 | device_unregister(dev); | ||
637 | } | 650 | } |
638 | } | 651 | } |
639 | EXPORT_SYMBOL(bdi_unregister); | 652 | EXPORT_SYMBOL(bdi_unregister); |
diff --git a/mm/compaction.c b/mm/compaction.c index 71a58f67f481..d9ebebe1a2aa 100644 --- a/mm/compaction.c +++ b/mm/compaction.c | |||
@@ -313,12 +313,34 @@ static isolate_migrate_t isolate_migratepages(struct zone *zone, | |||
313 | } else if (!locked) | 313 | } else if (!locked) |
314 | spin_lock_irq(&zone->lru_lock); | 314 | spin_lock_irq(&zone->lru_lock); |
315 | 315 | ||
316 | /* | ||
317 | * migrate_pfn does not necessarily start aligned to a | ||
318 | * pageblock. Ensure that pfn_valid is called when moving | ||
319 | * into a new MAX_ORDER_NR_PAGES range in case of large | ||
320 | * memory holes within the zone | ||
321 | */ | ||
322 | if ((low_pfn & (MAX_ORDER_NR_PAGES - 1)) == 0) { | ||
323 | if (!pfn_valid(low_pfn)) { | ||
324 | low_pfn += MAX_ORDER_NR_PAGES - 1; | ||
325 | continue; | ||
326 | } | ||
327 | } | ||
328 | |||
316 | if (!pfn_valid_within(low_pfn)) | 329 | if (!pfn_valid_within(low_pfn)) |
317 | continue; | 330 | continue; |
318 | nr_scanned++; | 331 | nr_scanned++; |
319 | 332 | ||
320 | /* Get the page and skip if free */ | 333 | /* |
334 | * Get the page and ensure the page is within the same zone. | ||
335 | * See the comment in isolate_freepages about overlapping | ||
336 | * nodes. It is deliberate that the new zone lock is not taken | ||
337 | * as memory compaction should not move pages between nodes. | ||
338 | */ | ||
321 | page = pfn_to_page(low_pfn); | 339 | page = pfn_to_page(low_pfn); |
340 | if (page_zone(page) != zone) | ||
341 | continue; | ||
342 | |||
343 | /* Skip if free */ | ||
322 | if (PageBuddy(page)) | 344 | if (PageBuddy(page)) |
323 | continue; | 345 | continue; |
324 | 346 | ||
diff --git a/mm/filemap.c b/mm/filemap.c index 97f49ed35bd2..b66275757c28 100644 --- a/mm/filemap.c +++ b/mm/filemap.c | |||
@@ -1400,15 +1400,12 @@ generic_file_aio_read(struct kiocb *iocb, const struct iovec *iov, | |||
1400 | unsigned long seg = 0; | 1400 | unsigned long seg = 0; |
1401 | size_t count; | 1401 | size_t count; |
1402 | loff_t *ppos = &iocb->ki_pos; | 1402 | loff_t *ppos = &iocb->ki_pos; |
1403 | struct blk_plug plug; | ||
1404 | 1403 | ||
1405 | count = 0; | 1404 | count = 0; |
1406 | retval = generic_segment_checks(iov, &nr_segs, &count, VERIFY_WRITE); | 1405 | retval = generic_segment_checks(iov, &nr_segs, &count, VERIFY_WRITE); |
1407 | if (retval) | 1406 | if (retval) |
1408 | return retval; | 1407 | return retval; |
1409 | 1408 | ||
1410 | blk_start_plug(&plug); | ||
1411 | |||
1412 | /* coalesce the iovecs and go direct-to-BIO for O_DIRECT */ | 1409 | /* coalesce the iovecs and go direct-to-BIO for O_DIRECT */ |
1413 | if (filp->f_flags & O_DIRECT) { | 1410 | if (filp->f_flags & O_DIRECT) { |
1414 | loff_t size; | 1411 | loff_t size; |
@@ -1424,8 +1421,12 @@ generic_file_aio_read(struct kiocb *iocb, const struct iovec *iov, | |||
1424 | retval = filemap_write_and_wait_range(mapping, pos, | 1421 | retval = filemap_write_and_wait_range(mapping, pos, |
1425 | pos + iov_length(iov, nr_segs) - 1); | 1422 | pos + iov_length(iov, nr_segs) - 1); |
1426 | if (!retval) { | 1423 | if (!retval) { |
1424 | struct blk_plug plug; | ||
1425 | |||
1426 | blk_start_plug(&plug); | ||
1427 | retval = mapping->a_ops->direct_IO(READ, iocb, | 1427 | retval = mapping->a_ops->direct_IO(READ, iocb, |
1428 | iov, pos, nr_segs); | 1428 | iov, pos, nr_segs); |
1429 | blk_finish_plug(&plug); | ||
1429 | } | 1430 | } |
1430 | if (retval > 0) { | 1431 | if (retval > 0) { |
1431 | *ppos = pos + retval; | 1432 | *ppos = pos + retval; |
@@ -1481,7 +1482,6 @@ generic_file_aio_read(struct kiocb *iocb, const struct iovec *iov, | |||
1481 | break; | 1482 | break; |
1482 | } | 1483 | } |
1483 | out: | 1484 | out: |
1484 | blk_finish_plug(&plug); | ||
1485 | return retval; | 1485 | return retval; |
1486 | } | 1486 | } |
1487 | EXPORT_SYMBOL(generic_file_aio_read); | 1487 | EXPORT_SYMBOL(generic_file_aio_read); |
diff --git a/mm/filemap_xip.c b/mm/filemap_xip.c index f91b2f687343..a4eb31132229 100644 --- a/mm/filemap_xip.c +++ b/mm/filemap_xip.c | |||
@@ -263,7 +263,12 @@ found: | |||
263 | xip_pfn); | 263 | xip_pfn); |
264 | if (err == -ENOMEM) | 264 | if (err == -ENOMEM) |
265 | return VM_FAULT_OOM; | 265 | return VM_FAULT_OOM; |
266 | BUG_ON(err); | 266 | /* |
267 | * err == -EBUSY is fine, we've raced against another thread | ||
268 | * that faulted-in the same page | ||
269 | */ | ||
270 | if (err != -EBUSY) | ||
271 | BUG_ON(err); | ||
267 | return VM_FAULT_NOPAGE; | 272 | return VM_FAULT_NOPAGE; |
268 | } else { | 273 | } else { |
269 | int err, ret = VM_FAULT_OOM; | 274 | int err, ret = VM_FAULT_OOM; |
diff --git a/mm/huge_memory.c b/mm/huge_memory.c index b3ffc21ce801..91d3efb25d15 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c | |||
@@ -2083,7 +2083,7 @@ static void collect_mm_slot(struct mm_slot *mm_slot) | |||
2083 | { | 2083 | { |
2084 | struct mm_struct *mm = mm_slot->mm; | 2084 | struct mm_struct *mm = mm_slot->mm; |
2085 | 2085 | ||
2086 | VM_BUG_ON(!spin_is_locked(&khugepaged_mm_lock)); | 2086 | VM_BUG_ON(NR_CPUS != 1 && !spin_is_locked(&khugepaged_mm_lock)); |
2087 | 2087 | ||
2088 | if (khugepaged_test_exit(mm)) { | 2088 | if (khugepaged_test_exit(mm)) { |
2089 | /* free mm_slot */ | 2089 | /* free mm_slot */ |
@@ -2113,7 +2113,7 @@ static unsigned int khugepaged_scan_mm_slot(unsigned int pages, | |||
2113 | int progress = 0; | 2113 | int progress = 0; |
2114 | 2114 | ||
2115 | VM_BUG_ON(!pages); | 2115 | VM_BUG_ON(!pages); |
2116 | VM_BUG_ON(!spin_is_locked(&khugepaged_mm_lock)); | 2116 | VM_BUG_ON(NR_CPUS != 1 && !spin_is_locked(&khugepaged_mm_lock)); |
2117 | 2117 | ||
2118 | if (khugepaged_scan.mm_slot) | 2118 | if (khugepaged_scan.mm_slot) |
2119 | mm_slot = khugepaged_scan.mm_slot; | 2119 | mm_slot = khugepaged_scan.mm_slot; |
diff --git a/mm/hugetlb.c b/mm/hugetlb.c index ea8c3a4cd2ae..5f34bd8dda34 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c | |||
@@ -2508,6 +2508,7 @@ static int hugetlb_no_page(struct mm_struct *mm, struct vm_area_struct *vma, | |||
2508 | { | 2508 | { |
2509 | struct hstate *h = hstate_vma(vma); | 2509 | struct hstate *h = hstate_vma(vma); |
2510 | int ret = VM_FAULT_SIGBUS; | 2510 | int ret = VM_FAULT_SIGBUS; |
2511 | int anon_rmap = 0; | ||
2511 | pgoff_t idx; | 2512 | pgoff_t idx; |
2512 | unsigned long size; | 2513 | unsigned long size; |
2513 | struct page *page; | 2514 | struct page *page; |
@@ -2562,14 +2563,13 @@ retry: | |||
2562 | spin_lock(&inode->i_lock); | 2563 | spin_lock(&inode->i_lock); |
2563 | inode->i_blocks += blocks_per_huge_page(h); | 2564 | inode->i_blocks += blocks_per_huge_page(h); |
2564 | spin_unlock(&inode->i_lock); | 2565 | spin_unlock(&inode->i_lock); |
2565 | page_dup_rmap(page); | ||
2566 | } else { | 2566 | } else { |
2567 | lock_page(page); | 2567 | lock_page(page); |
2568 | if (unlikely(anon_vma_prepare(vma))) { | 2568 | if (unlikely(anon_vma_prepare(vma))) { |
2569 | ret = VM_FAULT_OOM; | 2569 | ret = VM_FAULT_OOM; |
2570 | goto backout_unlocked; | 2570 | goto backout_unlocked; |
2571 | } | 2571 | } |
2572 | hugepage_add_new_anon_rmap(page, vma, address); | 2572 | anon_rmap = 1; |
2573 | } | 2573 | } |
2574 | } else { | 2574 | } else { |
2575 | /* | 2575 | /* |
@@ -2582,7 +2582,6 @@ retry: | |||
2582 | VM_FAULT_SET_HINDEX(h - hstates); | 2582 | VM_FAULT_SET_HINDEX(h - hstates); |
2583 | goto backout_unlocked; | 2583 | goto backout_unlocked; |
2584 | } | 2584 | } |
2585 | page_dup_rmap(page); | ||
2586 | } | 2585 | } |
2587 | 2586 | ||
2588 | /* | 2587 | /* |
@@ -2606,6 +2605,10 @@ retry: | |||
2606 | if (!huge_pte_none(huge_ptep_get(ptep))) | 2605 | if (!huge_pte_none(huge_ptep_get(ptep))) |
2607 | goto backout; | 2606 | goto backout; |
2608 | 2607 | ||
2608 | if (anon_rmap) | ||
2609 | hugepage_add_new_anon_rmap(page, vma, address); | ||
2610 | else | ||
2611 | page_dup_rmap(page); | ||
2609 | new_pte = make_huge_pte(vma, page, ((vma->vm_flags & VM_WRITE) | 2612 | new_pte = make_huge_pte(vma, page, ((vma->vm_flags & VM_WRITE) |
2610 | && (vma->vm_flags & VM_SHARED))); | 2613 | && (vma->vm_flags & VM_SHARED))); |
2611 | set_huge_pte_at(mm, address, ptep, new_pte); | 2614 | set_huge_pte_at(mm, address, ptep, new_pte); |
diff --git a/mm/kmemleak.c b/mm/kmemleak.c index c833addd94d7..45eb6217bf38 100644 --- a/mm/kmemleak.c +++ b/mm/kmemleak.c | |||
@@ -1036,7 +1036,7 @@ void __ref kmemleak_scan_area(const void *ptr, size_t size, gfp_t gfp) | |||
1036 | { | 1036 | { |
1037 | pr_debug("%s(0x%p)\n", __func__, ptr); | 1037 | pr_debug("%s(0x%p)\n", __func__, ptr); |
1038 | 1038 | ||
1039 | if (atomic_read(&kmemleak_enabled) && ptr && !IS_ERR(ptr)) | 1039 | if (atomic_read(&kmemleak_enabled) && ptr && size && !IS_ERR(ptr)) |
1040 | add_scan_area((unsigned long)ptr, size, gfp); | 1040 | add_scan_area((unsigned long)ptr, size, gfp); |
1041 | else if (atomic_read(&kmemleak_early_log)) | 1041 | else if (atomic_read(&kmemleak_early_log)) |
1042 | log_early(KMEMLEAK_SCAN_AREA, ptr, size, 0); | 1042 | log_early(KMEMLEAK_SCAN_AREA, ptr, size, 0); |
@@ -1757,6 +1757,7 @@ void __init kmemleak_init(void) | |||
1757 | 1757 | ||
1758 | #ifdef CONFIG_DEBUG_KMEMLEAK_DEFAULT_OFF | 1758 | #ifdef CONFIG_DEBUG_KMEMLEAK_DEFAULT_OFF |
1759 | if (!kmemleak_skip_disable) { | 1759 | if (!kmemleak_skip_disable) { |
1760 | atomic_set(&kmemleak_early_log, 0); | ||
1760 | kmemleak_disable(); | 1761 | kmemleak_disable(); |
1761 | return; | 1762 | return; |
1762 | } | 1763 | } |
diff --git a/mm/memblock.c b/mm/memblock.c index 2f55f19b7c86..77b5f227e1d8 100644 --- a/mm/memblock.c +++ b/mm/memblock.c | |||
@@ -106,14 +106,17 @@ phys_addr_t __init_memblock memblock_find_in_range_node(phys_addr_t start, | |||
106 | if (end == MEMBLOCK_ALLOC_ACCESSIBLE) | 106 | if (end == MEMBLOCK_ALLOC_ACCESSIBLE) |
107 | end = memblock.current_limit; | 107 | end = memblock.current_limit; |
108 | 108 | ||
109 | /* adjust @start to avoid underflow and allocating the first page */ | 109 | /* avoid allocating the first page */ |
110 | start = max3(start, size, (phys_addr_t)PAGE_SIZE); | 110 | start = max_t(phys_addr_t, start, PAGE_SIZE); |
111 | end = max(start, end); | 111 | end = max(start, end); |
112 | 112 | ||
113 | for_each_free_mem_range_reverse(i, nid, &this_start, &this_end, NULL) { | 113 | for_each_free_mem_range_reverse(i, nid, &this_start, &this_end, NULL) { |
114 | this_start = clamp(this_start, start, end); | 114 | this_start = clamp(this_start, start, end); |
115 | this_end = clamp(this_end, start, end); | 115 | this_end = clamp(this_end, start, end); |
116 | 116 | ||
117 | if (this_end < size) | ||
118 | continue; | ||
119 | |||
117 | cand = round_down(this_end - size, align); | 120 | cand = round_down(this_end - size, align); |
118 | if (cand >= this_start) | 121 | if (cand >= this_start) |
119 | return cand; | 122 | return cand; |
diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 3dbff4dcde35..228d6461c12a 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c | |||
@@ -379,7 +379,7 @@ static void mem_cgroup_put(struct mem_cgroup *memcg); | |||
379 | static bool mem_cgroup_is_root(struct mem_cgroup *memcg); | 379 | static bool mem_cgroup_is_root(struct mem_cgroup *memcg); |
380 | void sock_update_memcg(struct sock *sk) | 380 | void sock_update_memcg(struct sock *sk) |
381 | { | 381 | { |
382 | if (static_branch(&memcg_socket_limit_enabled)) { | 382 | if (mem_cgroup_sockets_enabled) { |
383 | struct mem_cgroup *memcg; | 383 | struct mem_cgroup *memcg; |
384 | 384 | ||
385 | BUG_ON(!sk->sk_prot->proto_cgroup); | 385 | BUG_ON(!sk->sk_prot->proto_cgroup); |
@@ -411,7 +411,7 @@ EXPORT_SYMBOL(sock_update_memcg); | |||
411 | 411 | ||
412 | void sock_release_memcg(struct sock *sk) | 412 | void sock_release_memcg(struct sock *sk) |
413 | { | 413 | { |
414 | if (static_branch(&memcg_socket_limit_enabled) && sk->sk_cgrp) { | 414 | if (mem_cgroup_sockets_enabled && sk->sk_cgrp) { |
415 | struct mem_cgroup *memcg; | 415 | struct mem_cgroup *memcg; |
416 | WARN_ON(!sk->sk_cgrp->memcg); | 416 | WARN_ON(!sk->sk_cgrp->memcg); |
417 | memcg = sk->sk_cgrp->memcg; | 417 | memcg = sk->sk_cgrp->memcg; |
@@ -776,7 +776,8 @@ static void memcg_check_events(struct mem_cgroup *memcg, struct page *page) | |||
776 | /* threshold event is triggered in finer grain than soft limit */ | 776 | /* threshold event is triggered in finer grain than soft limit */ |
777 | if (unlikely(mem_cgroup_event_ratelimit(memcg, | 777 | if (unlikely(mem_cgroup_event_ratelimit(memcg, |
778 | MEM_CGROUP_TARGET_THRESH))) { | 778 | MEM_CGROUP_TARGET_THRESH))) { |
779 | bool do_softlimit, do_numainfo; | 779 | bool do_softlimit; |
780 | bool do_numainfo __maybe_unused; | ||
780 | 781 | ||
781 | do_softlimit = mem_cgroup_event_ratelimit(memcg, | 782 | do_softlimit = mem_cgroup_event_ratelimit(memcg, |
782 | MEM_CGROUP_TARGET_SOFTLIMIT); | 783 | MEM_CGROUP_TARGET_SOFTLIMIT); |
@@ -3247,7 +3248,7 @@ int mem_cgroup_prepare_migration(struct page *page, | |||
3247 | ctype = MEM_CGROUP_CHARGE_TYPE_CACHE; | 3248 | ctype = MEM_CGROUP_CHARGE_TYPE_CACHE; |
3248 | else | 3249 | else |
3249 | ctype = MEM_CGROUP_CHARGE_TYPE_SHMEM; | 3250 | ctype = MEM_CGROUP_CHARGE_TYPE_SHMEM; |
3250 | __mem_cgroup_commit_charge(memcg, page, 1, pc, ctype); | 3251 | __mem_cgroup_commit_charge(memcg, newpage, 1, pc, ctype); |
3251 | return ret; | 3252 | return ret; |
3252 | } | 3253 | } |
3253 | 3254 | ||
@@ -4413,6 +4414,9 @@ static void mem_cgroup_usage_unregister_event(struct cgroup *cgrp, | |||
4413 | */ | 4414 | */ |
4414 | BUG_ON(!thresholds); | 4415 | BUG_ON(!thresholds); |
4415 | 4416 | ||
4417 | if (!thresholds->primary) | ||
4418 | goto unlock; | ||
4419 | |||
4416 | usage = mem_cgroup_usage(memcg, type == _MEMSWAP); | 4420 | usage = mem_cgroup_usage(memcg, type == _MEMSWAP); |
4417 | 4421 | ||
4418 | /* Check if a threshold crossed before removing */ | 4422 | /* Check if a threshold crossed before removing */ |
@@ -4461,7 +4465,7 @@ swap_buffers: | |||
4461 | 4465 | ||
4462 | /* To be sure that nobody uses thresholds */ | 4466 | /* To be sure that nobody uses thresholds */ |
4463 | synchronize_rcu(); | 4467 | synchronize_rcu(); |
4464 | 4468 | unlock: | |
4465 | mutex_unlock(&memcg->thresholds_lock); | 4469 | mutex_unlock(&memcg->thresholds_lock); |
4466 | } | 4470 | } |
4467 | 4471 | ||
diff --git a/mm/memory.c b/mm/memory.c index 5e30583c2605..fa2f04e0337c 100644 --- a/mm/memory.c +++ b/mm/memory.c | |||
@@ -878,15 +878,24 @@ copy_one_pte(struct mm_struct *dst_mm, struct mm_struct *src_mm, | |||
878 | } | 878 | } |
879 | if (likely(!non_swap_entry(entry))) | 879 | if (likely(!non_swap_entry(entry))) |
880 | rss[MM_SWAPENTS]++; | 880 | rss[MM_SWAPENTS]++; |
881 | else if (is_write_migration_entry(entry) && | 881 | else if (is_migration_entry(entry)) { |
882 | is_cow_mapping(vm_flags)) { | 882 | page = migration_entry_to_page(entry); |
883 | /* | 883 | |
884 | * COW mappings require pages in both parent | 884 | if (PageAnon(page)) |
885 | * and child to be set to read. | 885 | rss[MM_ANONPAGES]++; |
886 | */ | 886 | else |
887 | make_migration_entry_read(&entry); | 887 | rss[MM_FILEPAGES]++; |
888 | pte = swp_entry_to_pte(entry); | 888 | |
889 | set_pte_at(src_mm, addr, src_pte, pte); | 889 | if (is_write_migration_entry(entry) && |
890 | is_cow_mapping(vm_flags)) { | ||
891 | /* | ||
892 | * COW mappings require pages in both | ||
893 | * parent and child to be set to read. | ||
894 | */ | ||
895 | make_migration_entry_read(&entry); | ||
896 | pte = swp_entry_to_pte(entry); | ||
897 | set_pte_at(src_mm, addr, src_pte, pte); | ||
898 | } | ||
890 | } | 899 | } |
891 | } | 900 | } |
892 | goto out_set_pte; | 901 | goto out_set_pte; |
@@ -1191,6 +1200,16 @@ again: | |||
1191 | 1200 | ||
1192 | if (!non_swap_entry(entry)) | 1201 | if (!non_swap_entry(entry)) |
1193 | rss[MM_SWAPENTS]--; | 1202 | rss[MM_SWAPENTS]--; |
1203 | else if (is_migration_entry(entry)) { | ||
1204 | struct page *page; | ||
1205 | |||
1206 | page = migration_entry_to_page(entry); | ||
1207 | |||
1208 | if (PageAnon(page)) | ||
1209 | rss[MM_ANONPAGES]--; | ||
1210 | else | ||
1211 | rss[MM_FILEPAGES]--; | ||
1212 | } | ||
1194 | if (unlikely(!free_swap_and_cache(entry))) | 1213 | if (unlikely(!free_swap_and_cache(entry))) |
1195 | print_bad_pte(vma, addr, ptent, NULL); | 1214 | print_bad_pte(vma, addr, ptent, NULL); |
1196 | } | 1215 | } |
diff --git a/mm/migrate.c b/mm/migrate.c index 9871a56d82c3..df141f60289e 100644 --- a/mm/migrate.c +++ b/mm/migrate.c | |||
@@ -445,7 +445,6 @@ void migrate_page_copy(struct page *newpage, struct page *page) | |||
445 | ClearPageSwapCache(page); | 445 | ClearPageSwapCache(page); |
446 | ClearPagePrivate(page); | 446 | ClearPagePrivate(page); |
447 | set_page_private(page, 0); | 447 | set_page_private(page, 0); |
448 | page->mapping = NULL; | ||
449 | 448 | ||
450 | /* | 449 | /* |
451 | * If any waiters have accumulated on the new page then | 450 | * If any waiters have accumulated on the new page then |
@@ -667,6 +666,7 @@ static int move_to_new_page(struct page *newpage, struct page *page, | |||
667 | } else { | 666 | } else { |
668 | if (remap_swapcache) | 667 | if (remap_swapcache) |
669 | remove_migration_ptes(page, newpage); | 668 | remove_migration_ptes(page, newpage); |
669 | page->mapping = NULL; | ||
670 | } | 670 | } |
671 | 671 | ||
672 | unlock_page(newpage); | 672 | unlock_page(newpage); |
diff --git a/mm/nommu.c b/mm/nommu.c index b982290fd962..f59e170fceb4 100644 --- a/mm/nommu.c +++ b/mm/nommu.c | |||
@@ -696,9 +696,11 @@ static void add_vma_to_mm(struct mm_struct *mm, struct vm_area_struct *vma) | |||
696 | if (vma->vm_file) { | 696 | if (vma->vm_file) { |
697 | mapping = vma->vm_file->f_mapping; | 697 | mapping = vma->vm_file->f_mapping; |
698 | 698 | ||
699 | mutex_lock(&mapping->i_mmap_mutex); | ||
699 | flush_dcache_mmap_lock(mapping); | 700 | flush_dcache_mmap_lock(mapping); |
700 | vma_prio_tree_insert(vma, &mapping->i_mmap); | 701 | vma_prio_tree_insert(vma, &mapping->i_mmap); |
701 | flush_dcache_mmap_unlock(mapping); | 702 | flush_dcache_mmap_unlock(mapping); |
703 | mutex_unlock(&mapping->i_mmap_mutex); | ||
702 | } | 704 | } |
703 | 705 | ||
704 | /* add the VMA to the tree */ | 706 | /* add the VMA to the tree */ |
@@ -760,9 +762,11 @@ static void delete_vma_from_mm(struct vm_area_struct *vma) | |||
760 | if (vma->vm_file) { | 762 | if (vma->vm_file) { |
761 | mapping = vma->vm_file->f_mapping; | 763 | mapping = vma->vm_file->f_mapping; |
762 | 764 | ||
765 | mutex_lock(&mapping->i_mmap_mutex); | ||
763 | flush_dcache_mmap_lock(mapping); | 766 | flush_dcache_mmap_lock(mapping); |
764 | vma_prio_tree_remove(vma, &mapping->i_mmap); | 767 | vma_prio_tree_remove(vma, &mapping->i_mmap); |
765 | flush_dcache_mmap_unlock(mapping); | 768 | flush_dcache_mmap_unlock(mapping); |
769 | mutex_unlock(&mapping->i_mmap_mutex); | ||
766 | } | 770 | } |
767 | 771 | ||
768 | /* remove from the MM's tree and list */ | 772 | /* remove from the MM's tree and list */ |
@@ -775,8 +779,6 @@ static void delete_vma_from_mm(struct vm_area_struct *vma) | |||
775 | 779 | ||
776 | if (vma->vm_next) | 780 | if (vma->vm_next) |
777 | vma->vm_next->vm_prev = vma->vm_prev; | 781 | vma->vm_next->vm_prev = vma->vm_prev; |
778 | |||
779 | vma->vm_mm = NULL; | ||
780 | } | 782 | } |
781 | 783 | ||
782 | /* | 784 | /* |
@@ -2052,6 +2054,7 @@ int nommu_shrink_inode_mappings(struct inode *inode, size_t size, | |||
2052 | high = (size + PAGE_SIZE - 1) >> PAGE_SHIFT; | 2054 | high = (size + PAGE_SIZE - 1) >> PAGE_SHIFT; |
2053 | 2055 | ||
2054 | down_write(&nommu_region_sem); | 2056 | down_write(&nommu_region_sem); |
2057 | mutex_lock(&inode->i_mapping->i_mmap_mutex); | ||
2055 | 2058 | ||
2056 | /* search for VMAs that fall within the dead zone */ | 2059 | /* search for VMAs that fall within the dead zone */ |
2057 | vma_prio_tree_foreach(vma, &iter, &inode->i_mapping->i_mmap, | 2060 | vma_prio_tree_foreach(vma, &iter, &inode->i_mapping->i_mmap, |
@@ -2059,6 +2062,7 @@ int nommu_shrink_inode_mappings(struct inode *inode, size_t size, | |||
2059 | /* found one - only interested if it's shared out of the page | 2062 | /* found one - only interested if it's shared out of the page |
2060 | * cache */ | 2063 | * cache */ |
2061 | if (vma->vm_flags & VM_SHARED) { | 2064 | if (vma->vm_flags & VM_SHARED) { |
2065 | mutex_unlock(&inode->i_mapping->i_mmap_mutex); | ||
2062 | up_write(&nommu_region_sem); | 2066 | up_write(&nommu_region_sem); |
2063 | return -ETXTBSY; /* not quite true, but near enough */ | 2067 | return -ETXTBSY; /* not quite true, but near enough */ |
2064 | } | 2068 | } |
@@ -2086,6 +2090,7 @@ int nommu_shrink_inode_mappings(struct inode *inode, size_t size, | |||
2086 | } | 2090 | } |
2087 | } | 2091 | } |
2088 | 2092 | ||
2093 | mutex_unlock(&inode->i_mapping->i_mmap_mutex); | ||
2089 | up_write(&nommu_region_sem); | 2094 | up_write(&nommu_region_sem); |
2090 | return 0; | 2095 | return 0; |
2091 | } | 2096 | } |
diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 0027d8f4a1bb..a13ded1938f0 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c | |||
@@ -5236,6 +5236,7 @@ void *__init alloc_large_system_hash(const char *tablename, | |||
5236 | max = ((unsigned long long)nr_all_pages << PAGE_SHIFT) >> 4; | 5236 | max = ((unsigned long long)nr_all_pages << PAGE_SHIFT) >> 4; |
5237 | do_div(max, bucketsize); | 5237 | do_div(max, bucketsize); |
5238 | } | 5238 | } |
5239 | max = min(max, 0x80000000ULL); | ||
5239 | 5240 | ||
5240 | if (numentries > max) | 5241 | if (numentries > max) |
5241 | numentries = max; | 5242 | numentries = max; |
@@ -5413,7 +5414,25 @@ __count_immobile_pages(struct zone *zone, struct page *page, int count) | |||
5413 | 5414 | ||
5414 | bool is_pageblock_removable_nolock(struct page *page) | 5415 | bool is_pageblock_removable_nolock(struct page *page) |
5415 | { | 5416 | { |
5416 | struct zone *zone = page_zone(page); | 5417 | struct zone *zone; |
5418 | unsigned long pfn; | ||
5419 | |||
5420 | /* | ||
5421 | * We have to be careful here because we are iterating over memory | ||
5422 | * sections which are not zone aware so we might end up outside of | ||
5423 | * the zone but still within the section. | ||
5424 | * We have to take care about the node as well. If the node is offline | ||
5425 | * its NODE_DATA will be NULL - see page_zone. | ||
5426 | */ | ||
5427 | if (!node_online(page_to_nid(page))) | ||
5428 | return false; | ||
5429 | |||
5430 | zone = page_zone(page); | ||
5431 | pfn = page_to_pfn(page); | ||
5432 | if (zone->zone_start_pfn > pfn || | ||
5433 | zone->zone_start_pfn + zone->spanned_pages <= pfn) | ||
5434 | return false; | ||
5435 | |||
5417 | return __count_immobile_pages(zone, page, 0); | 5436 | return __count_immobile_pages(zone, page, 0); |
5418 | } | 5437 | } |
5419 | 5438 | ||
diff --git a/mm/process_vm_access.c b/mm/process_vm_access.c index e920aa3ce104..c20ff48994c2 100644 --- a/mm/process_vm_access.c +++ b/mm/process_vm_access.c | |||
@@ -298,23 +298,18 @@ static ssize_t process_vm_rw_core(pid_t pid, const struct iovec *lvec, | |||
298 | goto free_proc_pages; | 298 | goto free_proc_pages; |
299 | } | 299 | } |
300 | 300 | ||
301 | task_lock(task); | 301 | mm = mm_access(task, PTRACE_MODE_ATTACH); |
302 | if (__ptrace_may_access(task, PTRACE_MODE_ATTACH)) { | 302 | if (!mm || IS_ERR(mm)) { |
303 | task_unlock(task); | 303 | rc = IS_ERR(mm) ? PTR_ERR(mm) : -ESRCH; |
304 | rc = -EPERM; | 304 | /* |
305 | goto put_task_struct; | 305 | * Explicitly map EACCES to EPERM as EPERM is a more a |
306 | } | 306 | * appropriate error code for process_vw_readv/writev |
307 | mm = task->mm; | 307 | */ |
308 | 308 | if (rc == -EACCES) | |
309 | if (!mm || (task->flags & PF_KTHREAD)) { | 309 | rc = -EPERM; |
310 | task_unlock(task); | ||
311 | rc = -EINVAL; | ||
312 | goto put_task_struct; | 310 | goto put_task_struct; |
313 | } | 311 | } |
314 | 312 | ||
315 | atomic_inc(&mm->mm_users); | ||
316 | task_unlock(task); | ||
317 | |||
318 | for (i = 0; i < riovcnt && iov_l_curr_idx < liovcnt; i++) { | 313 | for (i = 0; i < riovcnt && iov_l_curr_idx < liovcnt; i++) { |
319 | rc = process_vm_rw_single_vec( | 314 | rc = process_vm_rw_single_vec( |
320 | (unsigned long)rvec[i].iov_base, rvec[i].iov_len, | 315 | (unsigned long)rvec[i].iov_base, rvec[i].iov_len, |
diff --git a/mm/shmem.c b/mm/shmem.c index feead1943d92..269d049294ab 100644 --- a/mm/shmem.c +++ b/mm/shmem.c | |||
@@ -379,7 +379,7 @@ static int shmem_free_swap(struct address_space *mapping, | |||
379 | /* | 379 | /* |
380 | * Pagevec may contain swap entries, so shuffle up pages before releasing. | 380 | * Pagevec may contain swap entries, so shuffle up pages before releasing. |
381 | */ | 381 | */ |
382 | static void shmem_pagevec_release(struct pagevec *pvec) | 382 | static void shmem_deswap_pagevec(struct pagevec *pvec) |
383 | { | 383 | { |
384 | int i, j; | 384 | int i, j; |
385 | 385 | ||
@@ -389,7 +389,36 @@ static void shmem_pagevec_release(struct pagevec *pvec) | |||
389 | pvec->pages[j++] = page; | 389 | pvec->pages[j++] = page; |
390 | } | 390 | } |
391 | pvec->nr = j; | 391 | pvec->nr = j; |
392 | pagevec_release(pvec); | 392 | } |
393 | |||
394 | /* | ||
395 | * SysV IPC SHM_UNLOCK restore Unevictable pages to their evictable lists. | ||
396 | */ | ||
397 | void shmem_unlock_mapping(struct address_space *mapping) | ||
398 | { | ||
399 | struct pagevec pvec; | ||
400 | pgoff_t indices[PAGEVEC_SIZE]; | ||
401 | pgoff_t index = 0; | ||
402 | |||
403 | pagevec_init(&pvec, 0); | ||
404 | /* | ||
405 | * Minor point, but we might as well stop if someone else SHM_LOCKs it. | ||
406 | */ | ||
407 | while (!mapping_unevictable(mapping)) { | ||
408 | /* | ||
409 | * Avoid pagevec_lookup(): find_get_pages() returns 0 as if it | ||
410 | * has finished, if it hits a row of PAGEVEC_SIZE swap entries. | ||
411 | */ | ||
412 | pvec.nr = shmem_find_get_pages_and_swap(mapping, index, | ||
413 | PAGEVEC_SIZE, pvec.pages, indices); | ||
414 | if (!pvec.nr) | ||
415 | break; | ||
416 | index = indices[pvec.nr - 1] + 1; | ||
417 | shmem_deswap_pagevec(&pvec); | ||
418 | check_move_unevictable_pages(pvec.pages, pvec.nr); | ||
419 | pagevec_release(&pvec); | ||
420 | cond_resched(); | ||
421 | } | ||
393 | } | 422 | } |
394 | 423 | ||
395 | /* | 424 | /* |
@@ -440,7 +469,8 @@ void shmem_truncate_range(struct inode *inode, loff_t lstart, loff_t lend) | |||
440 | } | 469 | } |
441 | unlock_page(page); | 470 | unlock_page(page); |
442 | } | 471 | } |
443 | shmem_pagevec_release(&pvec); | 472 | shmem_deswap_pagevec(&pvec); |
473 | pagevec_release(&pvec); | ||
444 | mem_cgroup_uncharge_end(); | 474 | mem_cgroup_uncharge_end(); |
445 | cond_resched(); | 475 | cond_resched(); |
446 | index++; | 476 | index++; |
@@ -470,7 +500,8 @@ void shmem_truncate_range(struct inode *inode, loff_t lstart, loff_t lend) | |||
470 | continue; | 500 | continue; |
471 | } | 501 | } |
472 | if (index == start && indices[0] > end) { | 502 | if (index == start && indices[0] > end) { |
473 | shmem_pagevec_release(&pvec); | 503 | shmem_deswap_pagevec(&pvec); |
504 | pagevec_release(&pvec); | ||
474 | break; | 505 | break; |
475 | } | 506 | } |
476 | mem_cgroup_uncharge_start(); | 507 | mem_cgroup_uncharge_start(); |
@@ -494,7 +525,8 @@ void shmem_truncate_range(struct inode *inode, loff_t lstart, loff_t lend) | |||
494 | } | 525 | } |
495 | unlock_page(page); | 526 | unlock_page(page); |
496 | } | 527 | } |
497 | shmem_pagevec_release(&pvec); | 528 | shmem_deswap_pagevec(&pvec); |
529 | pagevec_release(&pvec); | ||
498 | mem_cgroup_uncharge_end(); | 530 | mem_cgroup_uncharge_end(); |
499 | index++; | 531 | index++; |
500 | } | 532 | } |
@@ -1068,13 +1100,6 @@ int shmem_lock(struct file *file, int lock, struct user_struct *user) | |||
1068 | user_shm_unlock(inode->i_size, user); | 1100 | user_shm_unlock(inode->i_size, user); |
1069 | info->flags &= ~VM_LOCKED; | 1101 | info->flags &= ~VM_LOCKED; |
1070 | mapping_clear_unevictable(file->f_mapping); | 1102 | mapping_clear_unevictable(file->f_mapping); |
1071 | /* | ||
1072 | * Ensure that a racing putback_lru_page() can see | ||
1073 | * the pages of this mapping are evictable when we | ||
1074 | * skip them due to !PageLRU during the scan. | ||
1075 | */ | ||
1076 | smp_mb__after_clear_bit(); | ||
1077 | scan_mapping_unevictable_pages(file->f_mapping); | ||
1078 | } | 1103 | } |
1079 | retval = 0; | 1104 | retval = 0; |
1080 | 1105 | ||
@@ -2445,6 +2470,10 @@ int shmem_lock(struct file *file, int lock, struct user_struct *user) | |||
2445 | return 0; | 2470 | return 0; |
2446 | } | 2471 | } |
2447 | 2472 | ||
2473 | void shmem_unlock_mapping(struct address_space *mapping) | ||
2474 | { | ||
2475 | } | ||
2476 | |||
2448 | void shmem_truncate_range(struct inode *inode, loff_t lstart, loff_t lend) | 2477 | void shmem_truncate_range(struct inode *inode, loff_t lstart, loff_t lend) |
2449 | { | 2478 | { |
2450 | truncate_inode_pages_range(inode->i_mapping, lstart, lend); | 2479 | truncate_inode_pages_range(inode->i_mapping, lstart, lend); |
@@ -659,7 +659,7 @@ void lru_add_page_tail(struct zone* zone, | |||
659 | VM_BUG_ON(!PageHead(page)); | 659 | VM_BUG_ON(!PageHead(page)); |
660 | VM_BUG_ON(PageCompound(page_tail)); | 660 | VM_BUG_ON(PageCompound(page_tail)); |
661 | VM_BUG_ON(PageLRU(page_tail)); | 661 | VM_BUG_ON(PageLRU(page_tail)); |
662 | VM_BUG_ON(!spin_is_locked(&zone->lru_lock)); | 662 | VM_BUG_ON(NR_CPUS != 1 && !spin_is_locked(&zone->lru_lock)); |
663 | 663 | ||
664 | SetPageLRU(page_tail); | 664 | SetPageLRU(page_tail); |
665 | 665 | ||
diff --git a/mm/vmscan.c b/mm/vmscan.c index 2880396f7953..c52b23552659 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c | |||
@@ -26,7 +26,6 @@ | |||
26 | #include <linux/buffer_head.h> /* for try_to_release_page(), | 26 | #include <linux/buffer_head.h> /* for try_to_release_page(), |
27 | buffer_heads_over_limit */ | 27 | buffer_heads_over_limit */ |
28 | #include <linux/mm_inline.h> | 28 | #include <linux/mm_inline.h> |
29 | #include <linux/pagevec.h> | ||
30 | #include <linux/backing-dev.h> | 29 | #include <linux/backing-dev.h> |
31 | #include <linux/rmap.h> | 30 | #include <linux/rmap.h> |
32 | #include <linux/topology.h> | 31 | #include <linux/topology.h> |
@@ -661,7 +660,7 @@ redo: | |||
661 | * When racing with an mlock or AS_UNEVICTABLE clearing | 660 | * When racing with an mlock or AS_UNEVICTABLE clearing |
662 | * (page is unlocked) make sure that if the other thread | 661 | * (page is unlocked) make sure that if the other thread |
663 | * does not observe our setting of PG_lru and fails | 662 | * does not observe our setting of PG_lru and fails |
664 | * isolation/check_move_unevictable_page, | 663 | * isolation/check_move_unevictable_pages, |
665 | * we see PG_mlocked/AS_UNEVICTABLE cleared below and move | 664 | * we see PG_mlocked/AS_UNEVICTABLE cleared below and move |
666 | * the page back to the evictable list. | 665 | * the page back to the evictable list. |
667 | * | 666 | * |
@@ -3499,100 +3498,61 @@ int page_evictable(struct page *page, struct vm_area_struct *vma) | |||
3499 | return 1; | 3498 | return 1; |
3500 | } | 3499 | } |
3501 | 3500 | ||
3501 | #ifdef CONFIG_SHMEM | ||
3502 | /** | 3502 | /** |
3503 | * check_move_unevictable_page - check page for evictability and move to appropriate zone lru list | 3503 | * check_move_unevictable_pages - check pages for evictability and move to appropriate zone lru list |
3504 | * @page: page to check evictability and move to appropriate lru list | 3504 | * @pages: array of pages to check |
3505 | * @zone: zone page is in | 3505 | * @nr_pages: number of pages to check |
3506 | * | 3506 | * |
3507 | * Checks a page for evictability and moves the page to the appropriate | 3507 | * Checks pages for evictability and moves them to the appropriate lru list. |
3508 | * zone lru list. | ||
3509 | * | 3508 | * |
3510 | * Restrictions: zone->lru_lock must be held, page must be on LRU and must | 3509 | * This function is only used for SysV IPC SHM_UNLOCK. |
3511 | * have PageUnevictable set. | ||
3512 | */ | 3510 | */ |
3513 | static void check_move_unevictable_page(struct page *page, struct zone *zone) | 3511 | void check_move_unevictable_pages(struct page **pages, int nr_pages) |
3514 | { | 3512 | { |
3515 | struct lruvec *lruvec; | 3513 | struct lruvec *lruvec; |
3514 | struct zone *zone = NULL; | ||
3515 | int pgscanned = 0; | ||
3516 | int pgrescued = 0; | ||
3517 | int i; | ||
3516 | 3518 | ||
3517 | VM_BUG_ON(PageActive(page)); | 3519 | for (i = 0; i < nr_pages; i++) { |
3518 | retry: | 3520 | struct page *page = pages[i]; |
3519 | ClearPageUnevictable(page); | 3521 | struct zone *pagezone; |
3520 | if (page_evictable(page, NULL)) { | ||
3521 | enum lru_list l = page_lru_base_type(page); | ||
3522 | |||
3523 | __dec_zone_state(zone, NR_UNEVICTABLE); | ||
3524 | lruvec = mem_cgroup_lru_move_lists(zone, page, | ||
3525 | LRU_UNEVICTABLE, l); | ||
3526 | list_move(&page->lru, &lruvec->lists[l]); | ||
3527 | __inc_zone_state(zone, NR_INACTIVE_ANON + l); | ||
3528 | __count_vm_event(UNEVICTABLE_PGRESCUED); | ||
3529 | } else { | ||
3530 | /* | ||
3531 | * rotate unevictable list | ||
3532 | */ | ||
3533 | SetPageUnevictable(page); | ||
3534 | lruvec = mem_cgroup_lru_move_lists(zone, page, LRU_UNEVICTABLE, | ||
3535 | LRU_UNEVICTABLE); | ||
3536 | list_move(&page->lru, &lruvec->lists[LRU_UNEVICTABLE]); | ||
3537 | if (page_evictable(page, NULL)) | ||
3538 | goto retry; | ||
3539 | } | ||
3540 | } | ||
3541 | |||
3542 | /** | ||
3543 | * scan_mapping_unevictable_pages - scan an address space for evictable pages | ||
3544 | * @mapping: struct address_space to scan for evictable pages | ||
3545 | * | ||
3546 | * Scan all pages in mapping. Check unevictable pages for | ||
3547 | * evictability and move them to the appropriate zone lru list. | ||
3548 | */ | ||
3549 | void scan_mapping_unevictable_pages(struct address_space *mapping) | ||
3550 | { | ||
3551 | pgoff_t next = 0; | ||
3552 | pgoff_t end = (i_size_read(mapping->host) + PAGE_CACHE_SIZE - 1) >> | ||
3553 | PAGE_CACHE_SHIFT; | ||
3554 | struct zone *zone; | ||
3555 | struct pagevec pvec; | ||
3556 | |||
3557 | if (mapping->nrpages == 0) | ||
3558 | return; | ||
3559 | |||
3560 | pagevec_init(&pvec, 0); | ||
3561 | while (next < end && | ||
3562 | pagevec_lookup(&pvec, mapping, next, PAGEVEC_SIZE)) { | ||
3563 | int i; | ||
3564 | int pg_scanned = 0; | ||
3565 | |||
3566 | zone = NULL; | ||
3567 | |||
3568 | for (i = 0; i < pagevec_count(&pvec); i++) { | ||
3569 | struct page *page = pvec.pages[i]; | ||
3570 | pgoff_t page_index = page->index; | ||
3571 | struct zone *pagezone = page_zone(page); | ||
3572 | 3522 | ||
3573 | pg_scanned++; | 3523 | pgscanned++; |
3574 | if (page_index > next) | 3524 | pagezone = page_zone(page); |
3575 | next = page_index; | 3525 | if (pagezone != zone) { |
3576 | next++; | 3526 | if (zone) |
3527 | spin_unlock_irq(&zone->lru_lock); | ||
3528 | zone = pagezone; | ||
3529 | spin_lock_irq(&zone->lru_lock); | ||
3530 | } | ||
3577 | 3531 | ||
3578 | if (pagezone != zone) { | 3532 | if (!PageLRU(page) || !PageUnevictable(page)) |
3579 | if (zone) | 3533 | continue; |
3580 | spin_unlock_irq(&zone->lru_lock); | ||
3581 | zone = pagezone; | ||
3582 | spin_lock_irq(&zone->lru_lock); | ||
3583 | } | ||
3584 | 3534 | ||
3585 | if (PageLRU(page) && PageUnevictable(page)) | 3535 | if (page_evictable(page, NULL)) { |
3586 | check_move_unevictable_page(page, zone); | 3536 | enum lru_list lru = page_lru_base_type(page); |
3537 | |||
3538 | VM_BUG_ON(PageActive(page)); | ||
3539 | ClearPageUnevictable(page); | ||
3540 | __dec_zone_state(zone, NR_UNEVICTABLE); | ||
3541 | lruvec = mem_cgroup_lru_move_lists(zone, page, | ||
3542 | LRU_UNEVICTABLE, lru); | ||
3543 | list_move(&page->lru, &lruvec->lists[lru]); | ||
3544 | __inc_zone_state(zone, NR_INACTIVE_ANON + lru); | ||
3545 | pgrescued++; | ||
3587 | } | 3546 | } |
3588 | if (zone) | ||
3589 | spin_unlock_irq(&zone->lru_lock); | ||
3590 | pagevec_release(&pvec); | ||
3591 | |||
3592 | count_vm_events(UNEVICTABLE_PGSCANNED, pg_scanned); | ||
3593 | } | 3547 | } |
3594 | 3548 | ||
3549 | if (zone) { | ||
3550 | __count_vm_events(UNEVICTABLE_PGRESCUED, pgrescued); | ||
3551 | __count_vm_events(UNEVICTABLE_PGSCANNED, pgscanned); | ||
3552 | spin_unlock_irq(&zone->lru_lock); | ||
3553 | } | ||
3595 | } | 3554 | } |
3555 | #endif /* CONFIG_SHMEM */ | ||
3596 | 3556 | ||
3597 | static void warn_scan_unevictable_pages(void) | 3557 | static void warn_scan_unevictable_pages(void) |
3598 | { | 3558 | { |