aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorMauro Carvalho Chehab <mchehab@redhat.com>2012-03-19 12:41:24 -0400
committerMauro Carvalho Chehab <mchehab@redhat.com>2012-03-19 12:41:24 -0400
commit9ce28d827f74d0acdd058bded8bab5309b0f5c8f (patch)
tree634f22e8df9c7fd3966b3639e3e997436751ca50 /mm
parentf074ff92b5b26f3a559fab1203c36e140ea8d067 (diff)
parentc16fa4f2ad19908a47c63d8fa436a1178438c7e7 (diff)
Merge tag 'v3.3' into staging/for_v3.4
* tag 'v3.3': (1646 commits) Linux 3.3 Don't limit non-nested epoll paths netfilter: ctnetlink: fix race between delete and timeout expiration ipv6: Don't dev_hold(dev) in ip6_mc_find_dev_rcu. nilfs2: fix NULL pointer dereference in nilfs_load_super_block() nilfs2: clamp ns_r_segments_percentage to [1, 99] afs: Remote abort can cause BUG in rxrpc code afs: Read of file returns EBADMSG C6X: remove dead code from entry.S wimax/i2400m: fix erroneous NETDEV_TX_BUSY use net/hyperv: fix erroneous NETDEV_TX_BUSY use net/usbnet: reserve headroom on rx skbs bnx2x: fix memory leak in bnx2x_init_firmware() bnx2x: fix a crash on corrupt firmware file sch_sfq: revert dont put new flow at the end of flows ipv6: fix icmp6_dst_alloc() MAINTAINERS: Add Serge as maintainer of capabilities drivers/video/backlight/s6e63m0.c: fix corruption storing gamma mode MAINTAINERS: add entry for exynos mipi display drivers MAINTAINERS: fix link to Gustavo Padovans tree ...
Diffstat (limited to 'mm')
-rw-r--r--mm/backing-dev.c23
-rw-r--r--mm/compaction.c24
-rw-r--r--mm/filemap.c8
-rw-r--r--mm/filemap_xip.c7
-rw-r--r--mm/huge_memory.c10
-rw-r--r--mm/hugetlb.c11
-rw-r--r--mm/kmemleak.c3
-rw-r--r--mm/ksm.c11
-rw-r--r--mm/memblock.c13
-rw-r--r--mm/memcontrol.c167
-rw-r--r--mm/memory.c37
-rw-r--r--mm/mempolicy.c3
-rw-r--r--mm/migrate.c4
-rw-r--r--mm/mlock.c3
-rw-r--r--mm/mmap.c17
-rw-r--r--mm/mprotect.c3
-rw-r--r--mm/nommu.c9
-rw-r--r--mm/page_alloc.c21
-rw-r--r--mm/page_cgroup.c4
-rw-r--r--mm/percpu-vm.c3
-rw-r--r--mm/process_vm_access.c23
-rw-r--r--mm/shmem.c53
-rw-r--r--mm/swap.c10
-rw-r--r--mm/swap_state.c10
-rw-r--r--mm/vmscan.c124
25 files changed, 356 insertions, 245 deletions
diff --git a/mm/backing-dev.c b/mm/backing-dev.c
index 7ba8feae11b8..dd8e2aafb07e 100644
--- a/mm/backing-dev.c
+++ b/mm/backing-dev.c
@@ -318,7 +318,7 @@ static void wakeup_timer_fn(unsigned long data)
318 if (bdi->wb.task) { 318 if (bdi->wb.task) {
319 trace_writeback_wake_thread(bdi); 319 trace_writeback_wake_thread(bdi);
320 wake_up_process(bdi->wb.task); 320 wake_up_process(bdi->wb.task);
321 } else { 321 } else if (bdi->dev) {
322 /* 322 /*
323 * When bdi tasks are inactive for long time, they are killed. 323 * When bdi tasks are inactive for long time, they are killed.
324 * In this case we have to wake-up the forker thread which 324 * In this case we have to wake-up the forker thread which
@@ -584,6 +584,8 @@ EXPORT_SYMBOL(bdi_register_dev);
584 */ 584 */
585static void bdi_wb_shutdown(struct backing_dev_info *bdi) 585static void bdi_wb_shutdown(struct backing_dev_info *bdi)
586{ 586{
587 struct task_struct *task;
588
587 if (!bdi_cap_writeback_dirty(bdi)) 589 if (!bdi_cap_writeback_dirty(bdi))
588 return; 590 return;
589 591
@@ -602,8 +604,13 @@ static void bdi_wb_shutdown(struct backing_dev_info *bdi)
602 * Finally, kill the kernel thread. We don't need to be RCU 604 * Finally, kill the kernel thread. We don't need to be RCU
603 * safe anymore, since the bdi is gone from visibility. 605 * safe anymore, since the bdi is gone from visibility.
604 */ 606 */
605 if (bdi->wb.task) 607 spin_lock_bh(&bdi->wb_lock);
606 kthread_stop(bdi->wb.task); 608 task = bdi->wb.task;
609 bdi->wb.task = NULL;
610 spin_unlock_bh(&bdi->wb_lock);
611
612 if (task)
613 kthread_stop(task);
607} 614}
608 615
609/* 616/*
@@ -623,7 +630,9 @@ static void bdi_prune_sb(struct backing_dev_info *bdi)
623 630
624void bdi_unregister(struct backing_dev_info *bdi) 631void bdi_unregister(struct backing_dev_info *bdi)
625{ 632{
626 if (bdi->dev) { 633 struct device *dev = bdi->dev;
634
635 if (dev) {
627 bdi_set_min_ratio(bdi, 0); 636 bdi_set_min_ratio(bdi, 0);
628 trace_writeback_bdi_unregister(bdi); 637 trace_writeback_bdi_unregister(bdi);
629 bdi_prune_sb(bdi); 638 bdi_prune_sb(bdi);
@@ -632,8 +641,12 @@ void bdi_unregister(struct backing_dev_info *bdi)
632 if (!bdi_cap_flush_forker(bdi)) 641 if (!bdi_cap_flush_forker(bdi))
633 bdi_wb_shutdown(bdi); 642 bdi_wb_shutdown(bdi);
634 bdi_debug_unregister(bdi); 643 bdi_debug_unregister(bdi);
635 device_unregister(bdi->dev); 644
645 spin_lock_bh(&bdi->wb_lock);
636 bdi->dev = NULL; 646 bdi->dev = NULL;
647 spin_unlock_bh(&bdi->wb_lock);
648
649 device_unregister(dev);
637 } 650 }
638} 651}
639EXPORT_SYMBOL(bdi_unregister); 652EXPORT_SYMBOL(bdi_unregister);
diff --git a/mm/compaction.c b/mm/compaction.c
index 71a58f67f481..d9ebebe1a2aa 100644
--- a/mm/compaction.c
+++ b/mm/compaction.c
@@ -313,12 +313,34 @@ static isolate_migrate_t isolate_migratepages(struct zone *zone,
313 } else if (!locked) 313 } else if (!locked)
314 spin_lock_irq(&zone->lru_lock); 314 spin_lock_irq(&zone->lru_lock);
315 315
316 /*
317 * migrate_pfn does not necessarily start aligned to a
318 * pageblock. Ensure that pfn_valid is called when moving
319 * into a new MAX_ORDER_NR_PAGES range in case of large
320 * memory holes within the zone
321 */
322 if ((low_pfn & (MAX_ORDER_NR_PAGES - 1)) == 0) {
323 if (!pfn_valid(low_pfn)) {
324 low_pfn += MAX_ORDER_NR_PAGES - 1;
325 continue;
326 }
327 }
328
316 if (!pfn_valid_within(low_pfn)) 329 if (!pfn_valid_within(low_pfn))
317 continue; 330 continue;
318 nr_scanned++; 331 nr_scanned++;
319 332
320 /* Get the page and skip if free */ 333 /*
334 * Get the page and ensure the page is within the same zone.
335 * See the comment in isolate_freepages about overlapping
336 * nodes. It is deliberate that the new zone lock is not taken
337 * as memory compaction should not move pages between nodes.
338 */
321 page = pfn_to_page(low_pfn); 339 page = pfn_to_page(low_pfn);
340 if (page_zone(page) != zone)
341 continue;
342
343 /* Skip if free */
322 if (PageBuddy(page)) 344 if (PageBuddy(page))
323 continue; 345 continue;
324 346
diff --git a/mm/filemap.c b/mm/filemap.c
index 97f49ed35bd2..b66275757c28 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -1400,15 +1400,12 @@ generic_file_aio_read(struct kiocb *iocb, const struct iovec *iov,
1400 unsigned long seg = 0; 1400 unsigned long seg = 0;
1401 size_t count; 1401 size_t count;
1402 loff_t *ppos = &iocb->ki_pos; 1402 loff_t *ppos = &iocb->ki_pos;
1403 struct blk_plug plug;
1404 1403
1405 count = 0; 1404 count = 0;
1406 retval = generic_segment_checks(iov, &nr_segs, &count, VERIFY_WRITE); 1405 retval = generic_segment_checks(iov, &nr_segs, &count, VERIFY_WRITE);
1407 if (retval) 1406 if (retval)
1408 return retval; 1407 return retval;
1409 1408
1410 blk_start_plug(&plug);
1411
1412 /* coalesce the iovecs and go direct-to-BIO for O_DIRECT */ 1409 /* coalesce the iovecs and go direct-to-BIO for O_DIRECT */
1413 if (filp->f_flags & O_DIRECT) { 1410 if (filp->f_flags & O_DIRECT) {
1414 loff_t size; 1411 loff_t size;
@@ -1424,8 +1421,12 @@ generic_file_aio_read(struct kiocb *iocb, const struct iovec *iov,
1424 retval = filemap_write_and_wait_range(mapping, pos, 1421 retval = filemap_write_and_wait_range(mapping, pos,
1425 pos + iov_length(iov, nr_segs) - 1); 1422 pos + iov_length(iov, nr_segs) - 1);
1426 if (!retval) { 1423 if (!retval) {
1424 struct blk_plug plug;
1425
1426 blk_start_plug(&plug);
1427 retval = mapping->a_ops->direct_IO(READ, iocb, 1427 retval = mapping->a_ops->direct_IO(READ, iocb,
1428 iov, pos, nr_segs); 1428 iov, pos, nr_segs);
1429 blk_finish_plug(&plug);
1429 } 1430 }
1430 if (retval > 0) { 1431 if (retval > 0) {
1431 *ppos = pos + retval; 1432 *ppos = pos + retval;
@@ -1481,7 +1482,6 @@ generic_file_aio_read(struct kiocb *iocb, const struct iovec *iov,
1481 break; 1482 break;
1482 } 1483 }
1483out: 1484out:
1484 blk_finish_plug(&plug);
1485 return retval; 1485 return retval;
1486} 1486}
1487EXPORT_SYMBOL(generic_file_aio_read); 1487EXPORT_SYMBOL(generic_file_aio_read);
diff --git a/mm/filemap_xip.c b/mm/filemap_xip.c
index f91b2f687343..a4eb31132229 100644
--- a/mm/filemap_xip.c
+++ b/mm/filemap_xip.c
@@ -263,7 +263,12 @@ found:
263 xip_pfn); 263 xip_pfn);
264 if (err == -ENOMEM) 264 if (err == -ENOMEM)
265 return VM_FAULT_OOM; 265 return VM_FAULT_OOM;
266 BUG_ON(err); 266 /*
267 * err == -EBUSY is fine, we've raced against another thread
268 * that faulted-in the same page
269 */
270 if (err != -EBUSY)
271 BUG_ON(err);
267 return VM_FAULT_NOPAGE; 272 return VM_FAULT_NOPAGE;
268 } else { 273 } else {
269 int err, ret = VM_FAULT_OOM; 274 int err, ret = VM_FAULT_OOM;
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index b3ffc21ce801..8f7fc394f636 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -671,6 +671,7 @@ static int __do_huge_pmd_anonymous_page(struct mm_struct *mm,
671 set_pmd_at(mm, haddr, pmd, entry); 671 set_pmd_at(mm, haddr, pmd, entry);
672 prepare_pmd_huge_pte(pgtable, mm); 672 prepare_pmd_huge_pte(pgtable, mm);
673 add_mm_counter(mm, MM_ANONPAGES, HPAGE_PMD_NR); 673 add_mm_counter(mm, MM_ANONPAGES, HPAGE_PMD_NR);
674 mm->nr_ptes++;
674 spin_unlock(&mm->page_table_lock); 675 spin_unlock(&mm->page_table_lock);
675 } 676 }
676 677
@@ -789,6 +790,7 @@ int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm,
789 pmd = pmd_mkold(pmd_wrprotect(pmd)); 790 pmd = pmd_mkold(pmd_wrprotect(pmd));
790 set_pmd_at(dst_mm, addr, dst_pmd, pmd); 791 set_pmd_at(dst_mm, addr, dst_pmd, pmd);
791 prepare_pmd_huge_pte(pgtable, dst_mm); 792 prepare_pmd_huge_pte(pgtable, dst_mm);
793 dst_mm->nr_ptes++;
792 794
793 ret = 0; 795 ret = 0;
794out_unlock: 796out_unlock:
@@ -887,7 +889,6 @@ static int do_huge_pmd_wp_page_fallback(struct mm_struct *mm,
887 } 889 }
888 kfree(pages); 890 kfree(pages);
889 891
890 mm->nr_ptes++;
891 smp_wmb(); /* make pte visible before pmd */ 892 smp_wmb(); /* make pte visible before pmd */
892 pmd_populate(mm, pmd, pgtable); 893 pmd_populate(mm, pmd, pgtable);
893 page_remove_rmap(page); 894 page_remove_rmap(page);
@@ -1047,6 +1048,7 @@ int zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
1047 VM_BUG_ON(page_mapcount(page) < 0); 1048 VM_BUG_ON(page_mapcount(page) < 0);
1048 add_mm_counter(tlb->mm, MM_ANONPAGES, -HPAGE_PMD_NR); 1049 add_mm_counter(tlb->mm, MM_ANONPAGES, -HPAGE_PMD_NR);
1049 VM_BUG_ON(!PageHead(page)); 1050 VM_BUG_ON(!PageHead(page));
1051 tlb->mm->nr_ptes--;
1050 spin_unlock(&tlb->mm->page_table_lock); 1052 spin_unlock(&tlb->mm->page_table_lock);
1051 tlb_remove_page(tlb, page); 1053 tlb_remove_page(tlb, page);
1052 pte_free(tlb->mm, pgtable); 1054 pte_free(tlb->mm, pgtable);
@@ -1375,7 +1377,6 @@ static int __split_huge_page_map(struct page *page,
1375 pte_unmap(pte); 1377 pte_unmap(pte);
1376 } 1378 }
1377 1379
1378 mm->nr_ptes++;
1379 smp_wmb(); /* make pte visible before pmd */ 1380 smp_wmb(); /* make pte visible before pmd */
1380 /* 1381 /*
1381 * Up to this point the pmd is present and huge and 1382 * Up to this point the pmd is present and huge and
@@ -1988,7 +1989,6 @@ static void collapse_huge_page(struct mm_struct *mm,
1988 set_pmd_at(mm, address, pmd, _pmd); 1989 set_pmd_at(mm, address, pmd, _pmd);
1989 update_mmu_cache(vma, address, _pmd); 1990 update_mmu_cache(vma, address, _pmd);
1990 prepare_pmd_huge_pte(pgtable, mm); 1991 prepare_pmd_huge_pte(pgtable, mm);
1991 mm->nr_ptes--;
1992 spin_unlock(&mm->page_table_lock); 1992 spin_unlock(&mm->page_table_lock);
1993 1993
1994#ifndef CONFIG_NUMA 1994#ifndef CONFIG_NUMA
@@ -2083,7 +2083,7 @@ static void collect_mm_slot(struct mm_slot *mm_slot)
2083{ 2083{
2084 struct mm_struct *mm = mm_slot->mm; 2084 struct mm_struct *mm = mm_slot->mm;
2085 2085
2086 VM_BUG_ON(!spin_is_locked(&khugepaged_mm_lock)); 2086 VM_BUG_ON(NR_CPUS != 1 && !spin_is_locked(&khugepaged_mm_lock));
2087 2087
2088 if (khugepaged_test_exit(mm)) { 2088 if (khugepaged_test_exit(mm)) {
2089 /* free mm_slot */ 2089 /* free mm_slot */
@@ -2113,7 +2113,7 @@ static unsigned int khugepaged_scan_mm_slot(unsigned int pages,
2113 int progress = 0; 2113 int progress = 0;
2114 2114
2115 VM_BUG_ON(!pages); 2115 VM_BUG_ON(!pages);
2116 VM_BUG_ON(!spin_is_locked(&khugepaged_mm_lock)); 2116 VM_BUG_ON(NR_CPUS != 1 && !spin_is_locked(&khugepaged_mm_lock));
2117 2117
2118 if (khugepaged_scan.mm_slot) 2118 if (khugepaged_scan.mm_slot)
2119 mm_slot = khugepaged_scan.mm_slot; 2119 mm_slot = khugepaged_scan.mm_slot;
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index ea8c3a4cd2ae..a876871f6be5 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -2277,8 +2277,8 @@ void __unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
2277 set_page_dirty(page); 2277 set_page_dirty(page);
2278 list_add(&page->lru, &page_list); 2278 list_add(&page->lru, &page_list);
2279 } 2279 }
2280 spin_unlock(&mm->page_table_lock);
2281 flush_tlb_range(vma, start, end); 2280 flush_tlb_range(vma, start, end);
2281 spin_unlock(&mm->page_table_lock);
2282 mmu_notifier_invalidate_range_end(mm, start, end); 2282 mmu_notifier_invalidate_range_end(mm, start, end);
2283 list_for_each_entry_safe(page, tmp, &page_list, lru) { 2283 list_for_each_entry_safe(page, tmp, &page_list, lru) {
2284 page_remove_rmap(page); 2284 page_remove_rmap(page);
@@ -2508,6 +2508,7 @@ static int hugetlb_no_page(struct mm_struct *mm, struct vm_area_struct *vma,
2508{ 2508{
2509 struct hstate *h = hstate_vma(vma); 2509 struct hstate *h = hstate_vma(vma);
2510 int ret = VM_FAULT_SIGBUS; 2510 int ret = VM_FAULT_SIGBUS;
2511 int anon_rmap = 0;
2511 pgoff_t idx; 2512 pgoff_t idx;
2512 unsigned long size; 2513 unsigned long size;
2513 struct page *page; 2514 struct page *page;
@@ -2562,14 +2563,13 @@ retry:
2562 spin_lock(&inode->i_lock); 2563 spin_lock(&inode->i_lock);
2563 inode->i_blocks += blocks_per_huge_page(h); 2564 inode->i_blocks += blocks_per_huge_page(h);
2564 spin_unlock(&inode->i_lock); 2565 spin_unlock(&inode->i_lock);
2565 page_dup_rmap(page);
2566 } else { 2566 } else {
2567 lock_page(page); 2567 lock_page(page);
2568 if (unlikely(anon_vma_prepare(vma))) { 2568 if (unlikely(anon_vma_prepare(vma))) {
2569 ret = VM_FAULT_OOM; 2569 ret = VM_FAULT_OOM;
2570 goto backout_unlocked; 2570 goto backout_unlocked;
2571 } 2571 }
2572 hugepage_add_new_anon_rmap(page, vma, address); 2572 anon_rmap = 1;
2573 } 2573 }
2574 } else { 2574 } else {
2575 /* 2575 /*
@@ -2582,7 +2582,6 @@ retry:
2582 VM_FAULT_SET_HINDEX(h - hstates); 2582 VM_FAULT_SET_HINDEX(h - hstates);
2583 goto backout_unlocked; 2583 goto backout_unlocked;
2584 } 2584 }
2585 page_dup_rmap(page);
2586 } 2585 }
2587 2586
2588 /* 2587 /*
@@ -2606,6 +2605,10 @@ retry:
2606 if (!huge_pte_none(huge_ptep_get(ptep))) 2605 if (!huge_pte_none(huge_ptep_get(ptep)))
2607 goto backout; 2606 goto backout;
2608 2607
2608 if (anon_rmap)
2609 hugepage_add_new_anon_rmap(page, vma, address);
2610 else
2611 page_dup_rmap(page);
2609 new_pte = make_huge_pte(vma, page, ((vma->vm_flags & VM_WRITE) 2612 new_pte = make_huge_pte(vma, page, ((vma->vm_flags & VM_WRITE)
2610 && (vma->vm_flags & VM_SHARED))); 2613 && (vma->vm_flags & VM_SHARED)));
2611 set_huge_pte_at(mm, address, ptep, new_pte); 2614 set_huge_pte_at(mm, address, ptep, new_pte);
diff --git a/mm/kmemleak.c b/mm/kmemleak.c
index c833addd94d7..45eb6217bf38 100644
--- a/mm/kmemleak.c
+++ b/mm/kmemleak.c
@@ -1036,7 +1036,7 @@ void __ref kmemleak_scan_area(const void *ptr, size_t size, gfp_t gfp)
1036{ 1036{
1037 pr_debug("%s(0x%p)\n", __func__, ptr); 1037 pr_debug("%s(0x%p)\n", __func__, ptr);
1038 1038
1039 if (atomic_read(&kmemleak_enabled) && ptr && !IS_ERR(ptr)) 1039 if (atomic_read(&kmemleak_enabled) && ptr && size && !IS_ERR(ptr))
1040 add_scan_area((unsigned long)ptr, size, gfp); 1040 add_scan_area((unsigned long)ptr, size, gfp);
1041 else if (atomic_read(&kmemleak_early_log)) 1041 else if (atomic_read(&kmemleak_early_log))
1042 log_early(KMEMLEAK_SCAN_AREA, ptr, size, 0); 1042 log_early(KMEMLEAK_SCAN_AREA, ptr, size, 0);
@@ -1757,6 +1757,7 @@ void __init kmemleak_init(void)
1757 1757
1758#ifdef CONFIG_DEBUG_KMEMLEAK_DEFAULT_OFF 1758#ifdef CONFIG_DEBUG_KMEMLEAK_DEFAULT_OFF
1759 if (!kmemleak_skip_disable) { 1759 if (!kmemleak_skip_disable) {
1760 atomic_set(&kmemleak_early_log, 0);
1760 kmemleak_disable(); 1761 kmemleak_disable();
1761 return; 1762 return;
1762 } 1763 }
diff --git a/mm/ksm.c b/mm/ksm.c
index 1925ffbfb27f..310544a379ae 100644
--- a/mm/ksm.c
+++ b/mm/ksm.c
@@ -28,7 +28,6 @@
28#include <linux/kthread.h> 28#include <linux/kthread.h>
29#include <linux/wait.h> 29#include <linux/wait.h>
30#include <linux/slab.h> 30#include <linux/slab.h>
31#include <linux/memcontrol.h>
32#include <linux/rbtree.h> 31#include <linux/rbtree.h>
33#include <linux/memory.h> 32#include <linux/memory.h>
34#include <linux/mmu_notifier.h> 33#include <linux/mmu_notifier.h>
@@ -1572,16 +1571,6 @@ struct page *ksm_does_need_to_copy(struct page *page,
1572 1571
1573 new_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, address); 1572 new_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, address);
1574 if (new_page) { 1573 if (new_page) {
1575 /*
1576 * The memcg-specific accounting when moving
1577 * pages around the LRU lists relies on the
1578 * page's owner (memcg) to be valid. Usually,
1579 * pages are assigned to a new owner before
1580 * being put on the LRU list, but since this
1581 * is not the case here, the stale owner from
1582 * a previous allocation cycle must be reset.
1583 */
1584 mem_cgroup_reset_owner(new_page);
1585 copy_user_highpage(new_page, page, address, vma); 1574 copy_user_highpage(new_page, page, address, vma);
1586 1575
1587 SetPageDirty(new_page); 1576 SetPageDirty(new_page);
diff --git a/mm/memblock.c b/mm/memblock.c
index 2f55f19b7c86..99f285599501 100644
--- a/mm/memblock.c
+++ b/mm/memblock.c
@@ -99,21 +99,21 @@ phys_addr_t __init_memblock memblock_find_in_range_node(phys_addr_t start,
99 phys_addr_t this_start, this_end, cand; 99 phys_addr_t this_start, this_end, cand;
100 u64 i; 100 u64 i;
101 101
102 /* align @size to avoid excessive fragmentation on reserved array */
103 size = round_up(size, align);
104
105 /* pump up @end */ 102 /* pump up @end */
106 if (end == MEMBLOCK_ALLOC_ACCESSIBLE) 103 if (end == MEMBLOCK_ALLOC_ACCESSIBLE)
107 end = memblock.current_limit; 104 end = memblock.current_limit;
108 105
109 /* adjust @start to avoid underflow and allocating the first page */ 106 /* avoid allocating the first page */
110 start = max3(start, size, (phys_addr_t)PAGE_SIZE); 107 start = max_t(phys_addr_t, start, PAGE_SIZE);
111 end = max(start, end); 108 end = max(start, end);
112 109
113 for_each_free_mem_range_reverse(i, nid, &this_start, &this_end, NULL) { 110 for_each_free_mem_range_reverse(i, nid, &this_start, &this_end, NULL) {
114 this_start = clamp(this_start, start, end); 111 this_start = clamp(this_start, start, end);
115 this_end = clamp(this_end, start, end); 112 this_end = clamp(this_end, start, end);
116 113
114 if (this_end < size)
115 continue;
116
117 cand = round_down(this_end - size, align); 117 cand = round_down(this_end - size, align);
118 if (cand >= this_start) 118 if (cand >= this_start)
119 return cand; 119 return cand;
@@ -728,6 +728,9 @@ static phys_addr_t __init memblock_alloc_base_nid(phys_addr_t size,
728{ 728{
729 phys_addr_t found; 729 phys_addr_t found;
730 730
731 /* align @size to avoid excessive fragmentation on reserved array */
732 size = round_up(size, align);
733
731 found = memblock_find_in_range_node(0, max_addr, size, align, nid); 734 found = memblock_find_in_range_node(0, max_addr, size, align, nid);
732 if (found && !memblock_reserve(found, size)) 735 if (found && !memblock_reserve(found, size))
733 return found; 736 return found;
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 3dbff4dcde35..58a08fc7414a 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -230,10 +230,30 @@ struct mem_cgroup {
230 * the counter to account for memory usage 230 * the counter to account for memory usage
231 */ 231 */
232 struct res_counter res; 232 struct res_counter res;
233 /* 233
234 * the counter to account for mem+swap usage. 234 union {
235 */ 235 /*
236 struct res_counter memsw; 236 * the counter to account for mem+swap usage.
237 */
238 struct res_counter memsw;
239
240 /*
241 * rcu_freeing is used only when freeing struct mem_cgroup,
242 * so put it into a union to avoid wasting more memory.
243 * It must be disjoint from the css field. It could be
244 * in a union with the res field, but res plays a much
245 * larger part in mem_cgroup life than memsw, and might
246 * be of interest, even at time of free, when debugging.
247 * So share rcu_head with the less interesting memsw.
248 */
249 struct rcu_head rcu_freeing;
250 /*
251 * But when using vfree(), that cannot be done at
252 * interrupt time, so we must then queue the work.
253 */
254 struct work_struct work_freeing;
255 };
256
237 /* 257 /*
238 * Per cgroup active and inactive list, similar to the 258 * Per cgroup active and inactive list, similar to the
239 * per zone LRU lists. 259 * per zone LRU lists.
@@ -379,7 +399,7 @@ static void mem_cgroup_put(struct mem_cgroup *memcg);
379static bool mem_cgroup_is_root(struct mem_cgroup *memcg); 399static bool mem_cgroup_is_root(struct mem_cgroup *memcg);
380void sock_update_memcg(struct sock *sk) 400void sock_update_memcg(struct sock *sk)
381{ 401{
382 if (static_branch(&memcg_socket_limit_enabled)) { 402 if (mem_cgroup_sockets_enabled) {
383 struct mem_cgroup *memcg; 403 struct mem_cgroup *memcg;
384 404
385 BUG_ON(!sk->sk_prot->proto_cgroup); 405 BUG_ON(!sk->sk_prot->proto_cgroup);
@@ -411,7 +431,7 @@ EXPORT_SYMBOL(sock_update_memcg);
411 431
412void sock_release_memcg(struct sock *sk) 432void sock_release_memcg(struct sock *sk)
413{ 433{
414 if (static_branch(&memcg_socket_limit_enabled) && sk->sk_cgrp) { 434 if (mem_cgroup_sockets_enabled && sk->sk_cgrp) {
415 struct mem_cgroup *memcg; 435 struct mem_cgroup *memcg;
416 WARN_ON(!sk->sk_cgrp->memcg); 436 WARN_ON(!sk->sk_cgrp->memcg);
417 memcg = sk->sk_cgrp->memcg; 437 memcg = sk->sk_cgrp->memcg;
@@ -776,7 +796,8 @@ static void memcg_check_events(struct mem_cgroup *memcg, struct page *page)
776 /* threshold event is triggered in finer grain than soft limit */ 796 /* threshold event is triggered in finer grain than soft limit */
777 if (unlikely(mem_cgroup_event_ratelimit(memcg, 797 if (unlikely(mem_cgroup_event_ratelimit(memcg,
778 MEM_CGROUP_TARGET_THRESH))) { 798 MEM_CGROUP_TARGET_THRESH))) {
779 bool do_softlimit, do_numainfo; 799 bool do_softlimit;
800 bool do_numainfo __maybe_unused;
780 801
781 do_softlimit = mem_cgroup_event_ratelimit(memcg, 802 do_softlimit = mem_cgroup_event_ratelimit(memcg,
782 MEM_CGROUP_TARGET_SOFTLIMIT); 803 MEM_CGROUP_TARGET_SOFTLIMIT);
@@ -1041,6 +1062,19 @@ struct lruvec *mem_cgroup_lru_add_list(struct zone *zone, struct page *page,
1041 1062
1042 pc = lookup_page_cgroup(page); 1063 pc = lookup_page_cgroup(page);
1043 memcg = pc->mem_cgroup; 1064 memcg = pc->mem_cgroup;
1065
1066 /*
1067 * Surreptitiously switch any uncharged page to root:
1068 * an uncharged page off lru does nothing to secure
1069 * its former mem_cgroup from sudden removal.
1070 *
1071 * Our caller holds lru_lock, and PageCgroupUsed is updated
1072 * under page_cgroup lock: between them, they make all uses
1073 * of pc->mem_cgroup safe.
1074 */
1075 if (!PageCgroupUsed(pc) && memcg != root_mem_cgroup)
1076 pc->mem_cgroup = memcg = root_mem_cgroup;
1077
1044 mz = page_cgroup_zoneinfo(memcg, page); 1078 mz = page_cgroup_zoneinfo(memcg, page);
1045 /* compound_order() is stabilized through lru_lock */ 1079 /* compound_order() is stabilized through lru_lock */
1046 MEM_CGROUP_ZSTAT(mz, lru) += 1 << compound_order(page); 1080 MEM_CGROUP_ZSTAT(mz, lru) += 1 << compound_order(page);
@@ -2407,8 +2441,12 @@ static void __mem_cgroup_commit_charge(struct mem_cgroup *memcg,
2407 struct page *page, 2441 struct page *page,
2408 unsigned int nr_pages, 2442 unsigned int nr_pages,
2409 struct page_cgroup *pc, 2443 struct page_cgroup *pc,
2410 enum charge_type ctype) 2444 enum charge_type ctype,
2445 bool lrucare)
2411{ 2446{
2447 struct zone *uninitialized_var(zone);
2448 bool was_on_lru = false;
2449
2412 lock_page_cgroup(pc); 2450 lock_page_cgroup(pc);
2413 if (unlikely(PageCgroupUsed(pc))) { 2451 if (unlikely(PageCgroupUsed(pc))) {
2414 unlock_page_cgroup(pc); 2452 unlock_page_cgroup(pc);
@@ -2419,6 +2457,21 @@ static void __mem_cgroup_commit_charge(struct mem_cgroup *memcg,
2419 * we don't need page_cgroup_lock about tail pages, becase they are not 2457 * we don't need page_cgroup_lock about tail pages, becase they are not
2420 * accessed by any other context at this point. 2458 * accessed by any other context at this point.
2421 */ 2459 */
2460
2461 /*
2462 * In some cases, SwapCache and FUSE(splice_buf->radixtree), the page
2463 * may already be on some other mem_cgroup's LRU. Take care of it.
2464 */
2465 if (lrucare) {
2466 zone = page_zone(page);
2467 spin_lock_irq(&zone->lru_lock);
2468 if (PageLRU(page)) {
2469 ClearPageLRU(page);
2470 del_page_from_lru_list(zone, page, page_lru(page));
2471 was_on_lru = true;
2472 }
2473 }
2474
2422 pc->mem_cgroup = memcg; 2475 pc->mem_cgroup = memcg;
2423 /* 2476 /*
2424 * We access a page_cgroup asynchronously without lock_page_cgroup(). 2477 * We access a page_cgroup asynchronously without lock_page_cgroup().
@@ -2442,9 +2495,18 @@ static void __mem_cgroup_commit_charge(struct mem_cgroup *memcg,
2442 break; 2495 break;
2443 } 2496 }
2444 2497
2498 if (lrucare) {
2499 if (was_on_lru) {
2500 VM_BUG_ON(PageLRU(page));
2501 SetPageLRU(page);
2502 add_page_to_lru_list(zone, page, page_lru(page));
2503 }
2504 spin_unlock_irq(&zone->lru_lock);
2505 }
2506
2445 mem_cgroup_charge_statistics(memcg, PageCgroupCache(pc), nr_pages); 2507 mem_cgroup_charge_statistics(memcg, PageCgroupCache(pc), nr_pages);
2446 unlock_page_cgroup(pc); 2508 unlock_page_cgroup(pc);
2447 WARN_ON_ONCE(PageLRU(page)); 2509
2448 /* 2510 /*
2449 * "charge_statistics" updated event counter. Then, check it. 2511 * "charge_statistics" updated event counter. Then, check it.
2450 * Insert ancestor (and ancestor's ancestors), to softlimit RB-tree. 2512 * Insert ancestor (and ancestor's ancestors), to softlimit RB-tree.
@@ -2642,7 +2704,7 @@ static int mem_cgroup_charge_common(struct page *page, struct mm_struct *mm,
2642 ret = __mem_cgroup_try_charge(mm, gfp_mask, nr_pages, &memcg, oom); 2704 ret = __mem_cgroup_try_charge(mm, gfp_mask, nr_pages, &memcg, oom);
2643 if (ret == -ENOMEM) 2705 if (ret == -ENOMEM)
2644 return ret; 2706 return ret;
2645 __mem_cgroup_commit_charge(memcg, page, nr_pages, pc, ctype); 2707 __mem_cgroup_commit_charge(memcg, page, nr_pages, pc, ctype, false);
2646 return 0; 2708 return 0;
2647} 2709}
2648 2710
@@ -2662,35 +2724,6 @@ static void
2662__mem_cgroup_commit_charge_swapin(struct page *page, struct mem_cgroup *ptr, 2724__mem_cgroup_commit_charge_swapin(struct page *page, struct mem_cgroup *ptr,
2663 enum charge_type ctype); 2725 enum charge_type ctype);
2664 2726
2665static void
2666__mem_cgroup_commit_charge_lrucare(struct page *page, struct mem_cgroup *memcg,
2667 enum charge_type ctype)
2668{
2669 struct page_cgroup *pc = lookup_page_cgroup(page);
2670 struct zone *zone = page_zone(page);
2671 unsigned long flags;
2672 bool removed = false;
2673
2674 /*
2675 * In some case, SwapCache, FUSE(splice_buf->radixtree), the page
2676 * is already on LRU. It means the page may on some other page_cgroup's
2677 * LRU. Take care of it.
2678 */
2679 spin_lock_irqsave(&zone->lru_lock, flags);
2680 if (PageLRU(page)) {
2681 del_page_from_lru_list(zone, page, page_lru(page));
2682 ClearPageLRU(page);
2683 removed = true;
2684 }
2685 __mem_cgroup_commit_charge(memcg, page, 1, pc, ctype);
2686 if (removed) {
2687 add_page_to_lru_list(zone, page, page_lru(page));
2688 SetPageLRU(page);
2689 }
2690 spin_unlock_irqrestore(&zone->lru_lock, flags);
2691 return;
2692}
2693
2694int mem_cgroup_cache_charge(struct page *page, struct mm_struct *mm, 2727int mem_cgroup_cache_charge(struct page *page, struct mm_struct *mm,
2695 gfp_t gfp_mask) 2728 gfp_t gfp_mask)
2696{ 2729{
@@ -2768,13 +2801,16 @@ static void
2768__mem_cgroup_commit_charge_swapin(struct page *page, struct mem_cgroup *memcg, 2801__mem_cgroup_commit_charge_swapin(struct page *page, struct mem_cgroup *memcg,
2769 enum charge_type ctype) 2802 enum charge_type ctype)
2770{ 2803{
2804 struct page_cgroup *pc;
2805
2771 if (mem_cgroup_disabled()) 2806 if (mem_cgroup_disabled())
2772 return; 2807 return;
2773 if (!memcg) 2808 if (!memcg)
2774 return; 2809 return;
2775 cgroup_exclude_rmdir(&memcg->css); 2810 cgroup_exclude_rmdir(&memcg->css);
2776 2811
2777 __mem_cgroup_commit_charge_lrucare(page, memcg, ctype); 2812 pc = lookup_page_cgroup(page);
2813 __mem_cgroup_commit_charge(memcg, page, 1, pc, ctype, true);
2778 /* 2814 /*
2779 * Now swap is on-memory. This means this page may be 2815 * Now swap is on-memory. This means this page may be
2780 * counted both as mem and swap....double count. 2816 * counted both as mem and swap....double count.
@@ -3026,23 +3062,6 @@ void mem_cgroup_uncharge_end(void)
3026 batch->memcg = NULL; 3062 batch->memcg = NULL;
3027} 3063}
3028 3064
3029/*
3030 * A function for resetting pc->mem_cgroup for newly allocated pages.
3031 * This function should be called if the newpage will be added to LRU
3032 * before start accounting.
3033 */
3034void mem_cgroup_reset_owner(struct page *newpage)
3035{
3036 struct page_cgroup *pc;
3037
3038 if (mem_cgroup_disabled())
3039 return;
3040
3041 pc = lookup_page_cgroup(newpage);
3042 VM_BUG_ON(PageCgroupUsed(pc));
3043 pc->mem_cgroup = root_mem_cgroup;
3044}
3045
3046#ifdef CONFIG_SWAP 3065#ifdef CONFIG_SWAP
3047/* 3066/*
3048 * called after __delete_from_swap_cache() and drop "page" account. 3067 * called after __delete_from_swap_cache() and drop "page" account.
@@ -3247,7 +3266,7 @@ int mem_cgroup_prepare_migration(struct page *page,
3247 ctype = MEM_CGROUP_CHARGE_TYPE_CACHE; 3266 ctype = MEM_CGROUP_CHARGE_TYPE_CACHE;
3248 else 3267 else
3249 ctype = MEM_CGROUP_CHARGE_TYPE_SHMEM; 3268 ctype = MEM_CGROUP_CHARGE_TYPE_SHMEM;
3250 __mem_cgroup_commit_charge(memcg, page, 1, pc, ctype); 3269 __mem_cgroup_commit_charge(memcg, newpage, 1, pc, ctype, false);
3251 return ret; 3270 return ret;
3252} 3271}
3253 3272
@@ -3331,7 +3350,7 @@ void mem_cgroup_replace_page_cache(struct page *oldpage,
3331 * the newpage may be on LRU(or pagevec for LRU) already. We lock 3350 * the newpage may be on LRU(or pagevec for LRU) already. We lock
3332 * LRU while we overwrite pc->mem_cgroup. 3351 * LRU while we overwrite pc->mem_cgroup.
3333 */ 3352 */
3334 __mem_cgroup_commit_charge_lrucare(newpage, memcg, type); 3353 __mem_cgroup_commit_charge(memcg, newpage, 1, pc, type, true);
3335} 3354}
3336 3355
3337#ifdef CONFIG_DEBUG_VM 3356#ifdef CONFIG_DEBUG_VM
@@ -4413,6 +4432,9 @@ static void mem_cgroup_usage_unregister_event(struct cgroup *cgrp,
4413 */ 4432 */
4414 BUG_ON(!thresholds); 4433 BUG_ON(!thresholds);
4415 4434
4435 if (!thresholds->primary)
4436 goto unlock;
4437
4416 usage = mem_cgroup_usage(memcg, type == _MEMSWAP); 4438 usage = mem_cgroup_usage(memcg, type == _MEMSWAP);
4417 4439
4418 /* Check if a threshold crossed before removing */ 4440 /* Check if a threshold crossed before removing */
@@ -4461,7 +4483,7 @@ swap_buffers:
4461 4483
4462 /* To be sure that nobody uses thresholds */ 4484 /* To be sure that nobody uses thresholds */
4463 synchronize_rcu(); 4485 synchronize_rcu();
4464 4486unlock:
4465 mutex_unlock(&memcg->thresholds_lock); 4487 mutex_unlock(&memcg->thresholds_lock);
4466} 4488}
4467 4489
@@ -4778,6 +4800,27 @@ out_free:
4778} 4800}
4779 4801
4780/* 4802/*
4803 * Helpers for freeing a vzalloc()ed mem_cgroup by RCU,
4804 * but in process context. The work_freeing structure is overlaid
4805 * on the rcu_freeing structure, which itself is overlaid on memsw.
4806 */
4807static void vfree_work(struct work_struct *work)
4808{
4809 struct mem_cgroup *memcg;
4810
4811 memcg = container_of(work, struct mem_cgroup, work_freeing);
4812 vfree(memcg);
4813}
4814static void vfree_rcu(struct rcu_head *rcu_head)
4815{
4816 struct mem_cgroup *memcg;
4817
4818 memcg = container_of(rcu_head, struct mem_cgroup, rcu_freeing);
4819 INIT_WORK(&memcg->work_freeing, vfree_work);
4820 schedule_work(&memcg->work_freeing);
4821}
4822
4823/*
4781 * At destroying mem_cgroup, references from swap_cgroup can remain. 4824 * At destroying mem_cgroup, references from swap_cgroup can remain.
4782 * (scanning all at force_empty is too costly...) 4825 * (scanning all at force_empty is too costly...)
4783 * 4826 *
@@ -4800,9 +4843,9 @@ static void __mem_cgroup_free(struct mem_cgroup *memcg)
4800 4843
4801 free_percpu(memcg->stat); 4844 free_percpu(memcg->stat);
4802 if (sizeof(struct mem_cgroup) < PAGE_SIZE) 4845 if (sizeof(struct mem_cgroup) < PAGE_SIZE)
4803 kfree(memcg); 4846 kfree_rcu(memcg, rcu_freeing);
4804 else 4847 else
4805 vfree(memcg); 4848 call_rcu(&memcg->rcu_freeing, vfree_rcu);
4806} 4849}
4807 4850
4808static void mem_cgroup_get(struct mem_cgroup *memcg) 4851static void mem_cgroup_get(struct mem_cgroup *memcg)
diff --git a/mm/memory.c b/mm/memory.c
index 5e30583c2605..fa2f04e0337c 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -878,15 +878,24 @@ copy_one_pte(struct mm_struct *dst_mm, struct mm_struct *src_mm,
878 } 878 }
879 if (likely(!non_swap_entry(entry))) 879 if (likely(!non_swap_entry(entry)))
880 rss[MM_SWAPENTS]++; 880 rss[MM_SWAPENTS]++;
881 else if (is_write_migration_entry(entry) && 881 else if (is_migration_entry(entry)) {
882 is_cow_mapping(vm_flags)) { 882 page = migration_entry_to_page(entry);
883 /* 883
884 * COW mappings require pages in both parent 884 if (PageAnon(page))
885 * and child to be set to read. 885 rss[MM_ANONPAGES]++;
886 */ 886 else
887 make_migration_entry_read(&entry); 887 rss[MM_FILEPAGES]++;
888 pte = swp_entry_to_pte(entry); 888
889 set_pte_at(src_mm, addr, src_pte, pte); 889 if (is_write_migration_entry(entry) &&
890 is_cow_mapping(vm_flags)) {
891 /*
892 * COW mappings require pages in both
893 * parent and child to be set to read.
894 */
895 make_migration_entry_read(&entry);
896 pte = swp_entry_to_pte(entry);
897 set_pte_at(src_mm, addr, src_pte, pte);
898 }
890 } 899 }
891 } 900 }
892 goto out_set_pte; 901 goto out_set_pte;
@@ -1191,6 +1200,16 @@ again:
1191 1200
1192 if (!non_swap_entry(entry)) 1201 if (!non_swap_entry(entry))
1193 rss[MM_SWAPENTS]--; 1202 rss[MM_SWAPENTS]--;
1203 else if (is_migration_entry(entry)) {
1204 struct page *page;
1205
1206 page = migration_entry_to_page(entry);
1207
1208 if (PageAnon(page))
1209 rss[MM_ANONPAGES]--;
1210 else
1211 rss[MM_FILEPAGES]--;
1212 }
1194 if (unlikely(!free_swap_and_cache(entry))) 1213 if (unlikely(!free_swap_and_cache(entry)))
1195 print_bad_pte(vma, addr, ptent, NULL); 1214 print_bad_pte(vma, addr, ptent, NULL);
1196 } 1215 }
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
index 06b145fb64ab..47296fee23db 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -640,10 +640,11 @@ static int mbind_range(struct mm_struct *mm, unsigned long start,
640 unsigned long vmstart; 640 unsigned long vmstart;
641 unsigned long vmend; 641 unsigned long vmend;
642 642
643 vma = find_vma_prev(mm, start, &prev); 643 vma = find_vma(mm, start);
644 if (!vma || vma->vm_start > start) 644 if (!vma || vma->vm_start > start)
645 return -EFAULT; 645 return -EFAULT;
646 646
647 prev = vma->vm_prev;
647 if (start > vma->vm_start) 648 if (start > vma->vm_start)
648 prev = vma; 649 prev = vma;
649 650
diff --git a/mm/migrate.c b/mm/migrate.c
index 9871a56d82c3..1503b6b54ecb 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -445,7 +445,6 @@ void migrate_page_copy(struct page *newpage, struct page *page)
445 ClearPageSwapCache(page); 445 ClearPageSwapCache(page);
446 ClearPagePrivate(page); 446 ClearPagePrivate(page);
447 set_page_private(page, 0); 447 set_page_private(page, 0);
448 page->mapping = NULL;
449 448
450 /* 449 /*
451 * If any waiters have accumulated on the new page then 450 * If any waiters have accumulated on the new page then
@@ -667,6 +666,7 @@ static int move_to_new_page(struct page *newpage, struct page *page,
667 } else { 666 } else {
668 if (remap_swapcache) 667 if (remap_swapcache)
669 remove_migration_ptes(page, newpage); 668 remove_migration_ptes(page, newpage);
669 page->mapping = NULL;
670 } 670 }
671 671
672 unlock_page(newpage); 672 unlock_page(newpage);
@@ -839,8 +839,6 @@ static int unmap_and_move(new_page_t get_new_page, unsigned long private,
839 if (!newpage) 839 if (!newpage)
840 return -ENOMEM; 840 return -ENOMEM;
841 841
842 mem_cgroup_reset_owner(newpage);
843
844 if (page_count(page) == 1) { 842 if (page_count(page) == 1) {
845 /* page was freed from under us. So we are done. */ 843 /* page was freed from under us. So we are done. */
846 goto out; 844 goto out;
diff --git a/mm/mlock.c b/mm/mlock.c
index 4f4f53bdc65d..ef726e8aa8e9 100644
--- a/mm/mlock.c
+++ b/mm/mlock.c
@@ -385,10 +385,11 @@ static int do_mlock(unsigned long start, size_t len, int on)
385 return -EINVAL; 385 return -EINVAL;
386 if (end == start) 386 if (end == start)
387 return 0; 387 return 0;
388 vma = find_vma_prev(current->mm, start, &prev); 388 vma = find_vma(current->mm, start);
389 if (!vma || vma->vm_start > start) 389 if (!vma || vma->vm_start > start)
390 return -ENOMEM; 390 return -ENOMEM;
391 391
392 prev = vma->vm_prev;
392 if (start > vma->vm_start) 393 if (start > vma->vm_start)
393 prev = vma; 394 prev = vma;
394 395
diff --git a/mm/mmap.c b/mm/mmap.c
index 3f758c7f4c81..da15a79b1441 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -1266,8 +1266,9 @@ munmap_back:
1266 vma->vm_pgoff = pgoff; 1266 vma->vm_pgoff = pgoff;
1267 INIT_LIST_HEAD(&vma->anon_vma_chain); 1267 INIT_LIST_HEAD(&vma->anon_vma_chain);
1268 1268
1269 error = -EINVAL; /* when rejecting VM_GROWSDOWN|VM_GROWSUP */
1270
1269 if (file) { 1271 if (file) {
1270 error = -EINVAL;
1271 if (vm_flags & (VM_GROWSDOWN|VM_GROWSUP)) 1272 if (vm_flags & (VM_GROWSDOWN|VM_GROWSUP))
1272 goto free_vma; 1273 goto free_vma;
1273 if (vm_flags & VM_DENYWRITE) { 1274 if (vm_flags & VM_DENYWRITE) {
@@ -1293,6 +1294,8 @@ munmap_back:
1293 pgoff = vma->vm_pgoff; 1294 pgoff = vma->vm_pgoff;
1294 vm_flags = vma->vm_flags; 1295 vm_flags = vma->vm_flags;
1295 } else if (vm_flags & VM_SHARED) { 1296 } else if (vm_flags & VM_SHARED) {
1297 if (unlikely(vm_flags & (VM_GROWSDOWN|VM_GROWSUP)))
1298 goto free_vma;
1296 error = shmem_zero_setup(vma); 1299 error = shmem_zero_setup(vma);
1297 if (error) 1300 if (error)
1298 goto free_vma; 1301 goto free_vma;
@@ -1605,7 +1608,6 @@ EXPORT_SYMBOL(find_vma);
1605 1608
1606/* 1609/*
1607 * Same as find_vma, but also return a pointer to the previous VMA in *pprev. 1610 * Same as find_vma, but also return a pointer to the previous VMA in *pprev.
1608 * Note: pprev is set to NULL when return value is NULL.
1609 */ 1611 */
1610struct vm_area_struct * 1612struct vm_area_struct *
1611find_vma_prev(struct mm_struct *mm, unsigned long addr, 1613find_vma_prev(struct mm_struct *mm, unsigned long addr,
@@ -1614,7 +1616,16 @@ find_vma_prev(struct mm_struct *mm, unsigned long addr,
1614 struct vm_area_struct *vma; 1616 struct vm_area_struct *vma;
1615 1617
1616 vma = find_vma(mm, addr); 1618 vma = find_vma(mm, addr);
1617 *pprev = vma ? vma->vm_prev : NULL; 1619 if (vma) {
1620 *pprev = vma->vm_prev;
1621 } else {
1622 struct rb_node *rb_node = mm->mm_rb.rb_node;
1623 *pprev = NULL;
1624 while (rb_node) {
1625 *pprev = rb_entry(rb_node, struct vm_area_struct, vm_rb);
1626 rb_node = rb_node->rb_right;
1627 }
1628 }
1618 return vma; 1629 return vma;
1619} 1630}
1620 1631
diff --git a/mm/mprotect.c b/mm/mprotect.c
index 5a688a2756be..f437d054c3bf 100644
--- a/mm/mprotect.c
+++ b/mm/mprotect.c
@@ -262,10 +262,11 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
262 262
263 down_write(&current->mm->mmap_sem); 263 down_write(&current->mm->mmap_sem);
264 264
265 vma = find_vma_prev(current->mm, start, &prev); 265 vma = find_vma(current->mm, start);
266 error = -ENOMEM; 266 error = -ENOMEM;
267 if (!vma) 267 if (!vma)
268 goto out; 268 goto out;
269 prev = vma->vm_prev;
269 if (unlikely(grows & PROT_GROWSDOWN)) { 270 if (unlikely(grows & PROT_GROWSDOWN)) {
270 if (vma->vm_start >= end) 271 if (vma->vm_start >= end)
271 goto out; 272 goto out;
diff --git a/mm/nommu.c b/mm/nommu.c
index b982290fd962..f59e170fceb4 100644
--- a/mm/nommu.c
+++ b/mm/nommu.c
@@ -696,9 +696,11 @@ static void add_vma_to_mm(struct mm_struct *mm, struct vm_area_struct *vma)
696 if (vma->vm_file) { 696 if (vma->vm_file) {
697 mapping = vma->vm_file->f_mapping; 697 mapping = vma->vm_file->f_mapping;
698 698
699 mutex_lock(&mapping->i_mmap_mutex);
699 flush_dcache_mmap_lock(mapping); 700 flush_dcache_mmap_lock(mapping);
700 vma_prio_tree_insert(vma, &mapping->i_mmap); 701 vma_prio_tree_insert(vma, &mapping->i_mmap);
701 flush_dcache_mmap_unlock(mapping); 702 flush_dcache_mmap_unlock(mapping);
703 mutex_unlock(&mapping->i_mmap_mutex);
702 } 704 }
703 705
704 /* add the VMA to the tree */ 706 /* add the VMA to the tree */
@@ -760,9 +762,11 @@ static void delete_vma_from_mm(struct vm_area_struct *vma)
760 if (vma->vm_file) { 762 if (vma->vm_file) {
761 mapping = vma->vm_file->f_mapping; 763 mapping = vma->vm_file->f_mapping;
762 764
765 mutex_lock(&mapping->i_mmap_mutex);
763 flush_dcache_mmap_lock(mapping); 766 flush_dcache_mmap_lock(mapping);
764 vma_prio_tree_remove(vma, &mapping->i_mmap); 767 vma_prio_tree_remove(vma, &mapping->i_mmap);
765 flush_dcache_mmap_unlock(mapping); 768 flush_dcache_mmap_unlock(mapping);
769 mutex_unlock(&mapping->i_mmap_mutex);
766 } 770 }
767 771
768 /* remove from the MM's tree and list */ 772 /* remove from the MM's tree and list */
@@ -775,8 +779,6 @@ static void delete_vma_from_mm(struct vm_area_struct *vma)
775 779
776 if (vma->vm_next) 780 if (vma->vm_next)
777 vma->vm_next->vm_prev = vma->vm_prev; 781 vma->vm_next->vm_prev = vma->vm_prev;
778
779 vma->vm_mm = NULL;
780} 782}
781 783
782/* 784/*
@@ -2052,6 +2054,7 @@ int nommu_shrink_inode_mappings(struct inode *inode, size_t size,
2052 high = (size + PAGE_SIZE - 1) >> PAGE_SHIFT; 2054 high = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
2053 2055
2054 down_write(&nommu_region_sem); 2056 down_write(&nommu_region_sem);
2057 mutex_lock(&inode->i_mapping->i_mmap_mutex);
2055 2058
2056 /* search for VMAs that fall within the dead zone */ 2059 /* search for VMAs that fall within the dead zone */
2057 vma_prio_tree_foreach(vma, &iter, &inode->i_mapping->i_mmap, 2060 vma_prio_tree_foreach(vma, &iter, &inode->i_mapping->i_mmap,
@@ -2059,6 +2062,7 @@ int nommu_shrink_inode_mappings(struct inode *inode, size_t size,
2059 /* found one - only interested if it's shared out of the page 2062 /* found one - only interested if it's shared out of the page
2060 * cache */ 2063 * cache */
2061 if (vma->vm_flags & VM_SHARED) { 2064 if (vma->vm_flags & VM_SHARED) {
2065 mutex_unlock(&inode->i_mapping->i_mmap_mutex);
2062 up_write(&nommu_region_sem); 2066 up_write(&nommu_region_sem);
2063 return -ETXTBSY; /* not quite true, but near enough */ 2067 return -ETXTBSY; /* not quite true, but near enough */
2064 } 2068 }
@@ -2086,6 +2090,7 @@ int nommu_shrink_inode_mappings(struct inode *inode, size_t size,
2086 } 2090 }
2087 } 2091 }
2088 2092
2093 mutex_unlock(&inode->i_mapping->i_mmap_mutex);
2089 up_write(&nommu_region_sem); 2094 up_write(&nommu_region_sem);
2090 return 0; 2095 return 0;
2091} 2096}
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 0027d8f4a1bb..a13ded1938f0 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -5236,6 +5236,7 @@ void *__init alloc_large_system_hash(const char *tablename,
5236 max = ((unsigned long long)nr_all_pages << PAGE_SHIFT) >> 4; 5236 max = ((unsigned long long)nr_all_pages << PAGE_SHIFT) >> 4;
5237 do_div(max, bucketsize); 5237 do_div(max, bucketsize);
5238 } 5238 }
5239 max = min(max, 0x80000000ULL);
5239 5240
5240 if (numentries > max) 5241 if (numentries > max)
5241 numentries = max; 5242 numentries = max;
@@ -5413,7 +5414,25 @@ __count_immobile_pages(struct zone *zone, struct page *page, int count)
5413 5414
5414bool is_pageblock_removable_nolock(struct page *page) 5415bool is_pageblock_removable_nolock(struct page *page)
5415{ 5416{
5416 struct zone *zone = page_zone(page); 5417 struct zone *zone;
5418 unsigned long pfn;
5419
5420 /*
5421 * We have to be careful here because we are iterating over memory
5422 * sections which are not zone aware so we might end up outside of
5423 * the zone but still within the section.
5424 * We have to take care about the node as well. If the node is offline
5425 * its NODE_DATA will be NULL - see page_zone.
5426 */
5427 if (!node_online(page_to_nid(page)))
5428 return false;
5429
5430 zone = page_zone(page);
5431 pfn = page_to_pfn(page);
5432 if (zone->zone_start_pfn > pfn ||
5433 zone->zone_start_pfn + zone->spanned_pages <= pfn)
5434 return false;
5435
5417 return __count_immobile_pages(zone, page, 0); 5436 return __count_immobile_pages(zone, page, 0);
5418} 5437}
5419 5438
diff --git a/mm/page_cgroup.c b/mm/page_cgroup.c
index de1616aa9b1e..1ccbd714059c 100644
--- a/mm/page_cgroup.c
+++ b/mm/page_cgroup.c
@@ -379,13 +379,15 @@ static struct swap_cgroup *lookup_swap_cgroup(swp_entry_t ent,
379 pgoff_t offset = swp_offset(ent); 379 pgoff_t offset = swp_offset(ent);
380 struct swap_cgroup_ctrl *ctrl; 380 struct swap_cgroup_ctrl *ctrl;
381 struct page *mappage; 381 struct page *mappage;
382 struct swap_cgroup *sc;
382 383
383 ctrl = &swap_cgroup_ctrl[swp_type(ent)]; 384 ctrl = &swap_cgroup_ctrl[swp_type(ent)];
384 if (ctrlp) 385 if (ctrlp)
385 *ctrlp = ctrl; 386 *ctrlp = ctrl;
386 387
387 mappage = ctrl->map[offset / SC_PER_PAGE]; 388 mappage = ctrl->map[offset / SC_PER_PAGE];
388 return page_address(mappage) + offset % SC_PER_PAGE; 389 sc = page_address(mappage);
390 return sc + offset % SC_PER_PAGE;
389} 391}
390 392
391/** 393/**
diff --git a/mm/percpu-vm.c b/mm/percpu-vm.c
index 12a48a88c0d8..405d331804c3 100644
--- a/mm/percpu-vm.c
+++ b/mm/percpu-vm.c
@@ -184,8 +184,7 @@ static void pcpu_unmap_pages(struct pcpu_chunk *chunk,
184 page_end - page_start); 184 page_end - page_start);
185 } 185 }
186 186
187 for (i = page_start; i < page_end; i++) 187 bitmap_clear(populated, page_start, page_end - page_start);
188 __clear_bit(i, populated);
189} 188}
190 189
191/** 190/**
diff --git a/mm/process_vm_access.c b/mm/process_vm_access.c
index e920aa3ce104..c20ff48994c2 100644
--- a/mm/process_vm_access.c
+++ b/mm/process_vm_access.c
@@ -298,23 +298,18 @@ static ssize_t process_vm_rw_core(pid_t pid, const struct iovec *lvec,
298 goto free_proc_pages; 298 goto free_proc_pages;
299 } 299 }
300 300
301 task_lock(task); 301 mm = mm_access(task, PTRACE_MODE_ATTACH);
302 if (__ptrace_may_access(task, PTRACE_MODE_ATTACH)) { 302 if (!mm || IS_ERR(mm)) {
303 task_unlock(task); 303 rc = IS_ERR(mm) ? PTR_ERR(mm) : -ESRCH;
304 rc = -EPERM; 304 /*
305 goto put_task_struct; 305 * Explicitly map EACCES to EPERM as EPERM is a more a
306 } 306 * appropriate error code for process_vw_readv/writev
307 mm = task->mm; 307 */
308 308 if (rc == -EACCES)
309 if (!mm || (task->flags & PF_KTHREAD)) { 309 rc = -EPERM;
310 task_unlock(task);
311 rc = -EINVAL;
312 goto put_task_struct; 310 goto put_task_struct;
313 } 311 }
314 312
315 atomic_inc(&mm->mm_users);
316 task_unlock(task);
317
318 for (i = 0; i < riovcnt && iov_l_curr_idx < liovcnt; i++) { 313 for (i = 0; i < riovcnt && iov_l_curr_idx < liovcnt; i++) {
319 rc = process_vm_rw_single_vec( 314 rc = process_vm_rw_single_vec(
320 (unsigned long)rvec[i].iov_base, rvec[i].iov_len, 315 (unsigned long)rvec[i].iov_base, rvec[i].iov_len,
diff --git a/mm/shmem.c b/mm/shmem.c
index feead1943d92..269d049294ab 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -379,7 +379,7 @@ static int shmem_free_swap(struct address_space *mapping,
379/* 379/*
380 * Pagevec may contain swap entries, so shuffle up pages before releasing. 380 * Pagevec may contain swap entries, so shuffle up pages before releasing.
381 */ 381 */
382static void shmem_pagevec_release(struct pagevec *pvec) 382static void shmem_deswap_pagevec(struct pagevec *pvec)
383{ 383{
384 int i, j; 384 int i, j;
385 385
@@ -389,7 +389,36 @@ static void shmem_pagevec_release(struct pagevec *pvec)
389 pvec->pages[j++] = page; 389 pvec->pages[j++] = page;
390 } 390 }
391 pvec->nr = j; 391 pvec->nr = j;
392 pagevec_release(pvec); 392}
393
394/*
395 * SysV IPC SHM_UNLOCK restore Unevictable pages to their evictable lists.
396 */
397void shmem_unlock_mapping(struct address_space *mapping)
398{
399 struct pagevec pvec;
400 pgoff_t indices[PAGEVEC_SIZE];
401 pgoff_t index = 0;
402
403 pagevec_init(&pvec, 0);
404 /*
405 * Minor point, but we might as well stop if someone else SHM_LOCKs it.
406 */
407 while (!mapping_unevictable(mapping)) {
408 /*
409 * Avoid pagevec_lookup(): find_get_pages() returns 0 as if it
410 * has finished, if it hits a row of PAGEVEC_SIZE swap entries.
411 */
412 pvec.nr = shmem_find_get_pages_and_swap(mapping, index,
413 PAGEVEC_SIZE, pvec.pages, indices);
414 if (!pvec.nr)
415 break;
416 index = indices[pvec.nr - 1] + 1;
417 shmem_deswap_pagevec(&pvec);
418 check_move_unevictable_pages(pvec.pages, pvec.nr);
419 pagevec_release(&pvec);
420 cond_resched();
421 }
393} 422}
394 423
395/* 424/*
@@ -440,7 +469,8 @@ void shmem_truncate_range(struct inode *inode, loff_t lstart, loff_t lend)
440 } 469 }
441 unlock_page(page); 470 unlock_page(page);
442 } 471 }
443 shmem_pagevec_release(&pvec); 472 shmem_deswap_pagevec(&pvec);
473 pagevec_release(&pvec);
444 mem_cgroup_uncharge_end(); 474 mem_cgroup_uncharge_end();
445 cond_resched(); 475 cond_resched();
446 index++; 476 index++;
@@ -470,7 +500,8 @@ void shmem_truncate_range(struct inode *inode, loff_t lstart, loff_t lend)
470 continue; 500 continue;
471 } 501 }
472 if (index == start && indices[0] > end) { 502 if (index == start && indices[0] > end) {
473 shmem_pagevec_release(&pvec); 503 shmem_deswap_pagevec(&pvec);
504 pagevec_release(&pvec);
474 break; 505 break;
475 } 506 }
476 mem_cgroup_uncharge_start(); 507 mem_cgroup_uncharge_start();
@@ -494,7 +525,8 @@ void shmem_truncate_range(struct inode *inode, loff_t lstart, loff_t lend)
494 } 525 }
495 unlock_page(page); 526 unlock_page(page);
496 } 527 }
497 shmem_pagevec_release(&pvec); 528 shmem_deswap_pagevec(&pvec);
529 pagevec_release(&pvec);
498 mem_cgroup_uncharge_end(); 530 mem_cgroup_uncharge_end();
499 index++; 531 index++;
500 } 532 }
@@ -1068,13 +1100,6 @@ int shmem_lock(struct file *file, int lock, struct user_struct *user)
1068 user_shm_unlock(inode->i_size, user); 1100 user_shm_unlock(inode->i_size, user);
1069 info->flags &= ~VM_LOCKED; 1101 info->flags &= ~VM_LOCKED;
1070 mapping_clear_unevictable(file->f_mapping); 1102 mapping_clear_unevictable(file->f_mapping);
1071 /*
1072 * Ensure that a racing putback_lru_page() can see
1073 * the pages of this mapping are evictable when we
1074 * skip them due to !PageLRU during the scan.
1075 */
1076 smp_mb__after_clear_bit();
1077 scan_mapping_unevictable_pages(file->f_mapping);
1078 } 1103 }
1079 retval = 0; 1104 retval = 0;
1080 1105
@@ -2445,6 +2470,10 @@ int shmem_lock(struct file *file, int lock, struct user_struct *user)
2445 return 0; 2470 return 0;
2446} 2471}
2447 2472
2473void shmem_unlock_mapping(struct address_space *mapping)
2474{
2475}
2476
2448void shmem_truncate_range(struct inode *inode, loff_t lstart, loff_t lend) 2477void shmem_truncate_range(struct inode *inode, loff_t lstart, loff_t lend)
2449{ 2478{
2450 truncate_inode_pages_range(inode->i_mapping, lstart, lend); 2479 truncate_inode_pages_range(inode->i_mapping, lstart, lend);
diff --git a/mm/swap.c b/mm/swap.c
index b0f529b38979..14380e9fbe33 100644
--- a/mm/swap.c
+++ b/mm/swap.c
@@ -652,14 +652,14 @@ EXPORT_SYMBOL(__pagevec_release);
652void lru_add_page_tail(struct zone* zone, 652void lru_add_page_tail(struct zone* zone,
653 struct page *page, struct page *page_tail) 653 struct page *page, struct page *page_tail)
654{ 654{
655 int active; 655 int uninitialized_var(active);
656 enum lru_list lru; 656 enum lru_list lru;
657 const int file = 0; 657 const int file = 0;
658 658
659 VM_BUG_ON(!PageHead(page)); 659 VM_BUG_ON(!PageHead(page));
660 VM_BUG_ON(PageCompound(page_tail)); 660 VM_BUG_ON(PageCompound(page_tail));
661 VM_BUG_ON(PageLRU(page_tail)); 661 VM_BUG_ON(PageLRU(page_tail));
662 VM_BUG_ON(!spin_is_locked(&zone->lru_lock)); 662 VM_BUG_ON(NR_CPUS != 1 && !spin_is_locked(&zone->lru_lock));
663 663
664 SetPageLRU(page_tail); 664 SetPageLRU(page_tail);
665 665
@@ -672,7 +672,6 @@ void lru_add_page_tail(struct zone* zone,
672 active = 0; 672 active = 0;
673 lru = LRU_INACTIVE_ANON; 673 lru = LRU_INACTIVE_ANON;
674 } 674 }
675 update_page_reclaim_stat(zone, page_tail, file, active);
676 } else { 675 } else {
677 SetPageUnevictable(page_tail); 676 SetPageUnevictable(page_tail);
678 lru = LRU_UNEVICTABLE; 677 lru = LRU_UNEVICTABLE;
@@ -693,6 +692,9 @@ void lru_add_page_tail(struct zone* zone,
693 list_head = page_tail->lru.prev; 692 list_head = page_tail->lru.prev;
694 list_move_tail(&page_tail->lru, list_head); 693 list_move_tail(&page_tail->lru, list_head);
695 } 694 }
695
696 if (!PageUnevictable(page))
697 update_page_reclaim_stat(zone, page_tail, file, active);
696} 698}
697#endif /* CONFIG_TRANSPARENT_HUGEPAGE */ 699#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
698 700
@@ -710,8 +712,8 @@ static void __pagevec_lru_add_fn(struct page *page, void *arg)
710 SetPageLRU(page); 712 SetPageLRU(page);
711 if (active) 713 if (active)
712 SetPageActive(page); 714 SetPageActive(page);
713 update_page_reclaim_stat(zone, page, file, active);
714 add_page_to_lru_list(zone, page, lru); 715 add_page_to_lru_list(zone, page, lru);
716 update_page_reclaim_stat(zone, page, file, active);
715} 717}
716 718
717/* 719/*
diff --git a/mm/swap_state.c b/mm/swap_state.c
index 470038a91873..ea6b32d61873 100644
--- a/mm/swap_state.c
+++ b/mm/swap_state.c
@@ -300,16 +300,6 @@ struct page *read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
300 new_page = alloc_page_vma(gfp_mask, vma, addr); 300 new_page = alloc_page_vma(gfp_mask, vma, addr);
301 if (!new_page) 301 if (!new_page)
302 break; /* Out of memory */ 302 break; /* Out of memory */
303 /*
304 * The memcg-specific accounting when moving
305 * pages around the LRU lists relies on the
306 * page's owner (memcg) to be valid. Usually,
307 * pages are assigned to a new owner before
308 * being put on the LRU list, but since this
309 * is not the case here, the stale owner from
310 * a previous allocation cycle must be reset.
311 */
312 mem_cgroup_reset_owner(new_page);
313 } 303 }
314 304
315 /* 305 /*
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 2880396f7953..c52b23552659 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -26,7 +26,6 @@
26#include <linux/buffer_head.h> /* for try_to_release_page(), 26#include <linux/buffer_head.h> /* for try_to_release_page(),
27 buffer_heads_over_limit */ 27 buffer_heads_over_limit */
28#include <linux/mm_inline.h> 28#include <linux/mm_inline.h>
29#include <linux/pagevec.h>
30#include <linux/backing-dev.h> 29#include <linux/backing-dev.h>
31#include <linux/rmap.h> 30#include <linux/rmap.h>
32#include <linux/topology.h> 31#include <linux/topology.h>
@@ -661,7 +660,7 @@ redo:
661 * When racing with an mlock or AS_UNEVICTABLE clearing 660 * When racing with an mlock or AS_UNEVICTABLE clearing
662 * (page is unlocked) make sure that if the other thread 661 * (page is unlocked) make sure that if the other thread
663 * does not observe our setting of PG_lru and fails 662 * does not observe our setting of PG_lru and fails
664 * isolation/check_move_unevictable_page, 663 * isolation/check_move_unevictable_pages,
665 * we see PG_mlocked/AS_UNEVICTABLE cleared below and move 664 * we see PG_mlocked/AS_UNEVICTABLE cleared below and move
666 * the page back to the evictable list. 665 * the page back to the evictable list.
667 * 666 *
@@ -3499,100 +3498,61 @@ int page_evictable(struct page *page, struct vm_area_struct *vma)
3499 return 1; 3498 return 1;
3500} 3499}
3501 3500
3501#ifdef CONFIG_SHMEM
3502/** 3502/**
3503 * check_move_unevictable_page - check page for evictability and move to appropriate zone lru list 3503 * check_move_unevictable_pages - check pages for evictability and move to appropriate zone lru list
3504 * @page: page to check evictability and move to appropriate lru list 3504 * @pages: array of pages to check
3505 * @zone: zone page is in 3505 * @nr_pages: number of pages to check
3506 * 3506 *
3507 * Checks a page for evictability and moves the page to the appropriate 3507 * Checks pages for evictability and moves them to the appropriate lru list.
3508 * zone lru list.
3509 * 3508 *
3510 * Restrictions: zone->lru_lock must be held, page must be on LRU and must 3509 * This function is only used for SysV IPC SHM_UNLOCK.
3511 * have PageUnevictable set.
3512 */ 3510 */
3513static void check_move_unevictable_page(struct page *page, struct zone *zone) 3511void check_move_unevictable_pages(struct page **pages, int nr_pages)
3514{ 3512{
3515 struct lruvec *lruvec; 3513 struct lruvec *lruvec;
3514 struct zone *zone = NULL;
3515 int pgscanned = 0;
3516 int pgrescued = 0;
3517 int i;
3516 3518
3517 VM_BUG_ON(PageActive(page)); 3519 for (i = 0; i < nr_pages; i++) {
3518retry: 3520 struct page *page = pages[i];
3519 ClearPageUnevictable(page); 3521 struct zone *pagezone;
3520 if (page_evictable(page, NULL)) {
3521 enum lru_list l = page_lru_base_type(page);
3522
3523 __dec_zone_state(zone, NR_UNEVICTABLE);
3524 lruvec = mem_cgroup_lru_move_lists(zone, page,
3525 LRU_UNEVICTABLE, l);
3526 list_move(&page->lru, &lruvec->lists[l]);
3527 __inc_zone_state(zone, NR_INACTIVE_ANON + l);
3528 __count_vm_event(UNEVICTABLE_PGRESCUED);
3529 } else {
3530 /*
3531 * rotate unevictable list
3532 */
3533 SetPageUnevictable(page);
3534 lruvec = mem_cgroup_lru_move_lists(zone, page, LRU_UNEVICTABLE,
3535 LRU_UNEVICTABLE);
3536 list_move(&page->lru, &lruvec->lists[LRU_UNEVICTABLE]);
3537 if (page_evictable(page, NULL))
3538 goto retry;
3539 }
3540}
3541
3542/**
3543 * scan_mapping_unevictable_pages - scan an address space for evictable pages
3544 * @mapping: struct address_space to scan for evictable pages
3545 *
3546 * Scan all pages in mapping. Check unevictable pages for
3547 * evictability and move them to the appropriate zone lru list.
3548 */
3549void scan_mapping_unevictable_pages(struct address_space *mapping)
3550{
3551 pgoff_t next = 0;
3552 pgoff_t end = (i_size_read(mapping->host) + PAGE_CACHE_SIZE - 1) >>
3553 PAGE_CACHE_SHIFT;
3554 struct zone *zone;
3555 struct pagevec pvec;
3556
3557 if (mapping->nrpages == 0)
3558 return;
3559
3560 pagevec_init(&pvec, 0);
3561 while (next < end &&
3562 pagevec_lookup(&pvec, mapping, next, PAGEVEC_SIZE)) {
3563 int i;
3564 int pg_scanned = 0;
3565
3566 zone = NULL;
3567
3568 for (i = 0; i < pagevec_count(&pvec); i++) {
3569 struct page *page = pvec.pages[i];
3570 pgoff_t page_index = page->index;
3571 struct zone *pagezone = page_zone(page);
3572 3522
3573 pg_scanned++; 3523 pgscanned++;
3574 if (page_index > next) 3524 pagezone = page_zone(page);
3575 next = page_index; 3525 if (pagezone != zone) {
3576 next++; 3526 if (zone)
3527 spin_unlock_irq(&zone->lru_lock);
3528 zone = pagezone;
3529 spin_lock_irq(&zone->lru_lock);
3530 }
3577 3531
3578 if (pagezone != zone) { 3532 if (!PageLRU(page) || !PageUnevictable(page))
3579 if (zone) 3533 continue;
3580 spin_unlock_irq(&zone->lru_lock);
3581 zone = pagezone;
3582 spin_lock_irq(&zone->lru_lock);
3583 }
3584 3534
3585 if (PageLRU(page) && PageUnevictable(page)) 3535 if (page_evictable(page, NULL)) {
3586 check_move_unevictable_page(page, zone); 3536 enum lru_list lru = page_lru_base_type(page);
3537
3538 VM_BUG_ON(PageActive(page));
3539 ClearPageUnevictable(page);
3540 __dec_zone_state(zone, NR_UNEVICTABLE);
3541 lruvec = mem_cgroup_lru_move_lists(zone, page,
3542 LRU_UNEVICTABLE, lru);
3543 list_move(&page->lru, &lruvec->lists[lru]);
3544 __inc_zone_state(zone, NR_INACTIVE_ANON + lru);
3545 pgrescued++;
3587 } 3546 }
3588 if (zone)
3589 spin_unlock_irq(&zone->lru_lock);
3590 pagevec_release(&pvec);
3591
3592 count_vm_events(UNEVICTABLE_PGSCANNED, pg_scanned);
3593 } 3547 }
3594 3548
3549 if (zone) {
3550 __count_vm_events(UNEVICTABLE_PGRESCUED, pgrescued);
3551 __count_vm_events(UNEVICTABLE_PGSCANNED, pgscanned);
3552 spin_unlock_irq(&zone->lru_lock);
3553 }
3595} 3554}
3555#endif /* CONFIG_SHMEM */
3596 3556
3597static void warn_scan_unevictable_pages(void) 3557static void warn_scan_unevictable_pages(void)
3598{ 3558{