aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorArnd Bergmann <arnd@arndb.de>2012-03-15 11:19:05 -0400
committerArnd Bergmann <arnd@arndb.de>2012-03-15 11:20:07 -0400
commitf4e2467bad53023589cbff18dd1ab6e0aa3f004c (patch)
tree8d7abbf418eabd25bbcdc9b6de2f8216d2eaa616 /mm
parente3643b77de143c5548ec93abd8aa68f4123295ea (diff)
parenta6de3df4f172e124280d88e617ee7d29f7af970b (diff)
Merge branch 'ep93xx-for-arm-soc' of git://github.com/RyanMallon/linux-2.6 into next/cleanup
* 'ep93xx-for-arm-soc' of git://github.com/RyanMallon/linux-2.6: ep93xx: Remove unnecessary includes of ep93xx-regs.h ep93xx: Move EP93XX_SYSCON defines to SoC private header ep93xx: Move crunch code to mach-ep93xx directory ep93xx: Make syscon access functions private to SoC ep93xx: Configure GPIO ports in core code ep93xx: Move peripheral defines to local SoC header ep93xx: Convert the watchdog driver into a platform device. ep93xx: Use ioremap for backlight driver ep93xx: Move GPIO defines to gpio-ep93xx.h ep93xx: Don't use system controller defines in audio drivers ep93xx: Move PHYS_BASE defines to local SoC header file (update to v3.3-rc7) Conflicts: arch/arm/mach-s3c2440/common.h
Diffstat (limited to 'mm')
-rw-r--r--mm/huge_memory.c6
-rw-r--r--mm/hugetlb.c2
-rw-r--r--mm/ksm.c11
-rw-r--r--mm/memcontrol.c102
-rw-r--r--mm/mempolicy.c3
-rw-r--r--mm/migrate.c2
-rw-r--r--mm/mlock.c3
-rw-r--r--mm/mmap.c17
-rw-r--r--mm/mprotect.c3
-rw-r--r--mm/page_cgroup.c4
-rw-r--r--mm/percpu-vm.c3
-rw-r--r--mm/swap.c8
-rw-r--r--mm/swap_state.c10
13 files changed, 83 insertions, 91 deletions
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index 91d3efb25d15..8f7fc394f636 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -671,6 +671,7 @@ static int __do_huge_pmd_anonymous_page(struct mm_struct *mm,
671 set_pmd_at(mm, haddr, pmd, entry); 671 set_pmd_at(mm, haddr, pmd, entry);
672 prepare_pmd_huge_pte(pgtable, mm); 672 prepare_pmd_huge_pte(pgtable, mm);
673 add_mm_counter(mm, MM_ANONPAGES, HPAGE_PMD_NR); 673 add_mm_counter(mm, MM_ANONPAGES, HPAGE_PMD_NR);
674 mm->nr_ptes++;
674 spin_unlock(&mm->page_table_lock); 675 spin_unlock(&mm->page_table_lock);
675 } 676 }
676 677
@@ -789,6 +790,7 @@ int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm,
789 pmd = pmd_mkold(pmd_wrprotect(pmd)); 790 pmd = pmd_mkold(pmd_wrprotect(pmd));
790 set_pmd_at(dst_mm, addr, dst_pmd, pmd); 791 set_pmd_at(dst_mm, addr, dst_pmd, pmd);
791 prepare_pmd_huge_pte(pgtable, dst_mm); 792 prepare_pmd_huge_pte(pgtable, dst_mm);
793 dst_mm->nr_ptes++;
792 794
793 ret = 0; 795 ret = 0;
794out_unlock: 796out_unlock:
@@ -887,7 +889,6 @@ static int do_huge_pmd_wp_page_fallback(struct mm_struct *mm,
887 } 889 }
888 kfree(pages); 890 kfree(pages);
889 891
890 mm->nr_ptes++;
891 smp_wmb(); /* make pte visible before pmd */ 892 smp_wmb(); /* make pte visible before pmd */
892 pmd_populate(mm, pmd, pgtable); 893 pmd_populate(mm, pmd, pgtable);
893 page_remove_rmap(page); 894 page_remove_rmap(page);
@@ -1047,6 +1048,7 @@ int zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
1047 VM_BUG_ON(page_mapcount(page) < 0); 1048 VM_BUG_ON(page_mapcount(page) < 0);
1048 add_mm_counter(tlb->mm, MM_ANONPAGES, -HPAGE_PMD_NR); 1049 add_mm_counter(tlb->mm, MM_ANONPAGES, -HPAGE_PMD_NR);
1049 VM_BUG_ON(!PageHead(page)); 1050 VM_BUG_ON(!PageHead(page));
1051 tlb->mm->nr_ptes--;
1050 spin_unlock(&tlb->mm->page_table_lock); 1052 spin_unlock(&tlb->mm->page_table_lock);
1051 tlb_remove_page(tlb, page); 1053 tlb_remove_page(tlb, page);
1052 pte_free(tlb->mm, pgtable); 1054 pte_free(tlb->mm, pgtable);
@@ -1375,7 +1377,6 @@ static int __split_huge_page_map(struct page *page,
1375 pte_unmap(pte); 1377 pte_unmap(pte);
1376 } 1378 }
1377 1379
1378 mm->nr_ptes++;
1379 smp_wmb(); /* make pte visible before pmd */ 1380 smp_wmb(); /* make pte visible before pmd */
1380 /* 1381 /*
1381 * Up to this point the pmd is present and huge and 1382 * Up to this point the pmd is present and huge and
@@ -1988,7 +1989,6 @@ static void collapse_huge_page(struct mm_struct *mm,
1988 set_pmd_at(mm, address, pmd, _pmd); 1989 set_pmd_at(mm, address, pmd, _pmd);
1989 update_mmu_cache(vma, address, _pmd); 1990 update_mmu_cache(vma, address, _pmd);
1990 prepare_pmd_huge_pte(pgtable, mm); 1991 prepare_pmd_huge_pte(pgtable, mm);
1991 mm->nr_ptes--;
1992 spin_unlock(&mm->page_table_lock); 1992 spin_unlock(&mm->page_table_lock);
1993 1993
1994#ifndef CONFIG_NUMA 1994#ifndef CONFIG_NUMA
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 5f34bd8dda34..a876871f6be5 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -2277,8 +2277,8 @@ void __unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
2277 set_page_dirty(page); 2277 set_page_dirty(page);
2278 list_add(&page->lru, &page_list); 2278 list_add(&page->lru, &page_list);
2279 } 2279 }
2280 spin_unlock(&mm->page_table_lock);
2281 flush_tlb_range(vma, start, end); 2280 flush_tlb_range(vma, start, end);
2281 spin_unlock(&mm->page_table_lock);
2282 mmu_notifier_invalidate_range_end(mm, start, end); 2282 mmu_notifier_invalidate_range_end(mm, start, end);
2283 list_for_each_entry_safe(page, tmp, &page_list, lru) { 2283 list_for_each_entry_safe(page, tmp, &page_list, lru) {
2284 page_remove_rmap(page); 2284 page_remove_rmap(page);
diff --git a/mm/ksm.c b/mm/ksm.c
index 1925ffbfb27f..310544a379ae 100644
--- a/mm/ksm.c
+++ b/mm/ksm.c
@@ -28,7 +28,6 @@
28#include <linux/kthread.h> 28#include <linux/kthread.h>
29#include <linux/wait.h> 29#include <linux/wait.h>
30#include <linux/slab.h> 30#include <linux/slab.h>
31#include <linux/memcontrol.h>
32#include <linux/rbtree.h> 31#include <linux/rbtree.h>
33#include <linux/memory.h> 32#include <linux/memory.h>
34#include <linux/mmu_notifier.h> 33#include <linux/mmu_notifier.h>
@@ -1572,16 +1571,6 @@ struct page *ksm_does_need_to_copy(struct page *page,
1572 1571
1573 new_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, address); 1572 new_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, address);
1574 if (new_page) { 1573 if (new_page) {
1575 /*
1576 * The memcg-specific accounting when moving
1577 * pages around the LRU lists relies on the
1578 * page's owner (memcg) to be valid. Usually,
1579 * pages are assigned to a new owner before
1580 * being put on the LRU list, but since this
1581 * is not the case here, the stale owner from
1582 * a previous allocation cycle must be reset.
1583 */
1584 mem_cgroup_reset_owner(new_page);
1585 copy_user_highpage(new_page, page, address, vma); 1574 copy_user_highpage(new_page, page, address, vma);
1586 1575
1587 SetPageDirty(new_page); 1576 SetPageDirty(new_page);
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 228d6461c12a..d0e57a3cda18 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -1042,6 +1042,19 @@ struct lruvec *mem_cgroup_lru_add_list(struct zone *zone, struct page *page,
1042 1042
1043 pc = lookup_page_cgroup(page); 1043 pc = lookup_page_cgroup(page);
1044 memcg = pc->mem_cgroup; 1044 memcg = pc->mem_cgroup;
1045
1046 /*
1047 * Surreptitiously switch any uncharged page to root:
1048 * an uncharged page off lru does nothing to secure
1049 * its former mem_cgroup from sudden removal.
1050 *
1051 * Our caller holds lru_lock, and PageCgroupUsed is updated
1052 * under page_cgroup lock: between them, they make all uses
1053 * of pc->mem_cgroup safe.
1054 */
1055 if (!PageCgroupUsed(pc) && memcg != root_mem_cgroup)
1056 pc->mem_cgroup = memcg = root_mem_cgroup;
1057
1045 mz = page_cgroup_zoneinfo(memcg, page); 1058 mz = page_cgroup_zoneinfo(memcg, page);
1046 /* compound_order() is stabilized through lru_lock */ 1059 /* compound_order() is stabilized through lru_lock */
1047 MEM_CGROUP_ZSTAT(mz, lru) += 1 << compound_order(page); 1060 MEM_CGROUP_ZSTAT(mz, lru) += 1 << compound_order(page);
@@ -2408,8 +2421,12 @@ static void __mem_cgroup_commit_charge(struct mem_cgroup *memcg,
2408 struct page *page, 2421 struct page *page,
2409 unsigned int nr_pages, 2422 unsigned int nr_pages,
2410 struct page_cgroup *pc, 2423 struct page_cgroup *pc,
2411 enum charge_type ctype) 2424 enum charge_type ctype,
2425 bool lrucare)
2412{ 2426{
2427 struct zone *uninitialized_var(zone);
2428 bool was_on_lru = false;
2429
2413 lock_page_cgroup(pc); 2430 lock_page_cgroup(pc);
2414 if (unlikely(PageCgroupUsed(pc))) { 2431 if (unlikely(PageCgroupUsed(pc))) {
2415 unlock_page_cgroup(pc); 2432 unlock_page_cgroup(pc);
@@ -2420,6 +2437,21 @@ static void __mem_cgroup_commit_charge(struct mem_cgroup *memcg,
2420 * we don't need page_cgroup_lock about tail pages, becase they are not 2437 * we don't need page_cgroup_lock about tail pages, becase they are not
2421 * accessed by any other context at this point. 2438 * accessed by any other context at this point.
2422 */ 2439 */
2440
2441 /*
2442 * In some cases, SwapCache and FUSE(splice_buf->radixtree), the page
2443 * may already be on some other mem_cgroup's LRU. Take care of it.
2444 */
2445 if (lrucare) {
2446 zone = page_zone(page);
2447 spin_lock_irq(&zone->lru_lock);
2448 if (PageLRU(page)) {
2449 ClearPageLRU(page);
2450 del_page_from_lru_list(zone, page, page_lru(page));
2451 was_on_lru = true;
2452 }
2453 }
2454
2423 pc->mem_cgroup = memcg; 2455 pc->mem_cgroup = memcg;
2424 /* 2456 /*
2425 * We access a page_cgroup asynchronously without lock_page_cgroup(). 2457 * We access a page_cgroup asynchronously without lock_page_cgroup().
@@ -2443,9 +2475,18 @@ static void __mem_cgroup_commit_charge(struct mem_cgroup *memcg,
2443 break; 2475 break;
2444 } 2476 }
2445 2477
2478 if (lrucare) {
2479 if (was_on_lru) {
2480 VM_BUG_ON(PageLRU(page));
2481 SetPageLRU(page);
2482 add_page_to_lru_list(zone, page, page_lru(page));
2483 }
2484 spin_unlock_irq(&zone->lru_lock);
2485 }
2486
2446 mem_cgroup_charge_statistics(memcg, PageCgroupCache(pc), nr_pages); 2487 mem_cgroup_charge_statistics(memcg, PageCgroupCache(pc), nr_pages);
2447 unlock_page_cgroup(pc); 2488 unlock_page_cgroup(pc);
2448 WARN_ON_ONCE(PageLRU(page)); 2489
2449 /* 2490 /*
2450 * "charge_statistics" updated event counter. Then, check it. 2491 * "charge_statistics" updated event counter. Then, check it.
2451 * Insert ancestor (and ancestor's ancestors), to softlimit RB-tree. 2492 * Insert ancestor (and ancestor's ancestors), to softlimit RB-tree.
@@ -2643,7 +2684,7 @@ static int mem_cgroup_charge_common(struct page *page, struct mm_struct *mm,
2643 ret = __mem_cgroup_try_charge(mm, gfp_mask, nr_pages, &memcg, oom); 2684 ret = __mem_cgroup_try_charge(mm, gfp_mask, nr_pages, &memcg, oom);
2644 if (ret == -ENOMEM) 2685 if (ret == -ENOMEM)
2645 return ret; 2686 return ret;
2646 __mem_cgroup_commit_charge(memcg, page, nr_pages, pc, ctype); 2687 __mem_cgroup_commit_charge(memcg, page, nr_pages, pc, ctype, false);
2647 return 0; 2688 return 0;
2648} 2689}
2649 2690
@@ -2663,35 +2704,6 @@ static void
2663__mem_cgroup_commit_charge_swapin(struct page *page, struct mem_cgroup *ptr, 2704__mem_cgroup_commit_charge_swapin(struct page *page, struct mem_cgroup *ptr,
2664 enum charge_type ctype); 2705 enum charge_type ctype);
2665 2706
2666static void
2667__mem_cgroup_commit_charge_lrucare(struct page *page, struct mem_cgroup *memcg,
2668 enum charge_type ctype)
2669{
2670 struct page_cgroup *pc = lookup_page_cgroup(page);
2671 struct zone *zone = page_zone(page);
2672 unsigned long flags;
2673 bool removed = false;
2674
2675 /*
2676 * In some case, SwapCache, FUSE(splice_buf->radixtree), the page
2677 * is already on LRU. It means the page may on some other page_cgroup's
2678 * LRU. Take care of it.
2679 */
2680 spin_lock_irqsave(&zone->lru_lock, flags);
2681 if (PageLRU(page)) {
2682 del_page_from_lru_list(zone, page, page_lru(page));
2683 ClearPageLRU(page);
2684 removed = true;
2685 }
2686 __mem_cgroup_commit_charge(memcg, page, 1, pc, ctype);
2687 if (removed) {
2688 add_page_to_lru_list(zone, page, page_lru(page));
2689 SetPageLRU(page);
2690 }
2691 spin_unlock_irqrestore(&zone->lru_lock, flags);
2692 return;
2693}
2694
2695int mem_cgroup_cache_charge(struct page *page, struct mm_struct *mm, 2707int mem_cgroup_cache_charge(struct page *page, struct mm_struct *mm,
2696 gfp_t gfp_mask) 2708 gfp_t gfp_mask)
2697{ 2709{
@@ -2769,13 +2781,16 @@ static void
2769__mem_cgroup_commit_charge_swapin(struct page *page, struct mem_cgroup *memcg, 2781__mem_cgroup_commit_charge_swapin(struct page *page, struct mem_cgroup *memcg,
2770 enum charge_type ctype) 2782 enum charge_type ctype)
2771{ 2783{
2784 struct page_cgroup *pc;
2785
2772 if (mem_cgroup_disabled()) 2786 if (mem_cgroup_disabled())
2773 return; 2787 return;
2774 if (!memcg) 2788 if (!memcg)
2775 return; 2789 return;
2776 cgroup_exclude_rmdir(&memcg->css); 2790 cgroup_exclude_rmdir(&memcg->css);
2777 2791
2778 __mem_cgroup_commit_charge_lrucare(page, memcg, ctype); 2792 pc = lookup_page_cgroup(page);
2793 __mem_cgroup_commit_charge(memcg, page, 1, pc, ctype, true);
2779 /* 2794 /*
2780 * Now swap is on-memory. This means this page may be 2795 * Now swap is on-memory. This means this page may be
2781 * counted both as mem and swap....double count. 2796 * counted both as mem and swap....double count.
@@ -3027,23 +3042,6 @@ void mem_cgroup_uncharge_end(void)
3027 batch->memcg = NULL; 3042 batch->memcg = NULL;
3028} 3043}
3029 3044
3030/*
3031 * A function for resetting pc->mem_cgroup for newly allocated pages.
3032 * This function should be called if the newpage will be added to LRU
3033 * before start accounting.
3034 */
3035void mem_cgroup_reset_owner(struct page *newpage)
3036{
3037 struct page_cgroup *pc;
3038
3039 if (mem_cgroup_disabled())
3040 return;
3041
3042 pc = lookup_page_cgroup(newpage);
3043 VM_BUG_ON(PageCgroupUsed(pc));
3044 pc->mem_cgroup = root_mem_cgroup;
3045}
3046
3047#ifdef CONFIG_SWAP 3045#ifdef CONFIG_SWAP
3048/* 3046/*
3049 * called after __delete_from_swap_cache() and drop "page" account. 3047 * called after __delete_from_swap_cache() and drop "page" account.
@@ -3248,7 +3246,7 @@ int mem_cgroup_prepare_migration(struct page *page,
3248 ctype = MEM_CGROUP_CHARGE_TYPE_CACHE; 3246 ctype = MEM_CGROUP_CHARGE_TYPE_CACHE;
3249 else 3247 else
3250 ctype = MEM_CGROUP_CHARGE_TYPE_SHMEM; 3248 ctype = MEM_CGROUP_CHARGE_TYPE_SHMEM;
3251 __mem_cgroup_commit_charge(memcg, newpage, 1, pc, ctype); 3249 __mem_cgroup_commit_charge(memcg, newpage, 1, pc, ctype, false);
3252 return ret; 3250 return ret;
3253} 3251}
3254 3252
@@ -3332,7 +3330,7 @@ void mem_cgroup_replace_page_cache(struct page *oldpage,
3332 * the newpage may be on LRU(or pagevec for LRU) already. We lock 3330 * the newpage may be on LRU(or pagevec for LRU) already. We lock
3333 * LRU while we overwrite pc->mem_cgroup. 3331 * LRU while we overwrite pc->mem_cgroup.
3334 */ 3332 */
3335 __mem_cgroup_commit_charge_lrucare(newpage, memcg, type); 3333 __mem_cgroup_commit_charge(memcg, newpage, 1, pc, type, true);
3336} 3334}
3337 3335
3338#ifdef CONFIG_DEBUG_VM 3336#ifdef CONFIG_DEBUG_VM
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
index 06b145fb64ab..47296fee23db 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -640,10 +640,11 @@ static int mbind_range(struct mm_struct *mm, unsigned long start,
640 unsigned long vmstart; 640 unsigned long vmstart;
641 unsigned long vmend; 641 unsigned long vmend;
642 642
643 vma = find_vma_prev(mm, start, &prev); 643 vma = find_vma(mm, start);
644 if (!vma || vma->vm_start > start) 644 if (!vma || vma->vm_start > start)
645 return -EFAULT; 645 return -EFAULT;
646 646
647 prev = vma->vm_prev;
647 if (start > vma->vm_start) 648 if (start > vma->vm_start)
648 prev = vma; 649 prev = vma;
649 650
diff --git a/mm/migrate.c b/mm/migrate.c
index df141f60289e..1503b6b54ecb 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -839,8 +839,6 @@ static int unmap_and_move(new_page_t get_new_page, unsigned long private,
839 if (!newpage) 839 if (!newpage)
840 return -ENOMEM; 840 return -ENOMEM;
841 841
842 mem_cgroup_reset_owner(newpage);
843
844 if (page_count(page) == 1) { 842 if (page_count(page) == 1) {
845 /* page was freed from under us. So we are done. */ 843 /* page was freed from under us. So we are done. */
846 goto out; 844 goto out;
diff --git a/mm/mlock.c b/mm/mlock.c
index 4f4f53bdc65d..ef726e8aa8e9 100644
--- a/mm/mlock.c
+++ b/mm/mlock.c
@@ -385,10 +385,11 @@ static int do_mlock(unsigned long start, size_t len, int on)
385 return -EINVAL; 385 return -EINVAL;
386 if (end == start) 386 if (end == start)
387 return 0; 387 return 0;
388 vma = find_vma_prev(current->mm, start, &prev); 388 vma = find_vma(current->mm, start);
389 if (!vma || vma->vm_start > start) 389 if (!vma || vma->vm_start > start)
390 return -ENOMEM; 390 return -ENOMEM;
391 391
392 prev = vma->vm_prev;
392 if (start > vma->vm_start) 393 if (start > vma->vm_start)
393 prev = vma; 394 prev = vma;
394 395
diff --git a/mm/mmap.c b/mm/mmap.c
index 3f758c7f4c81..da15a79b1441 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -1266,8 +1266,9 @@ munmap_back:
1266 vma->vm_pgoff = pgoff; 1266 vma->vm_pgoff = pgoff;
1267 INIT_LIST_HEAD(&vma->anon_vma_chain); 1267 INIT_LIST_HEAD(&vma->anon_vma_chain);
1268 1268
1269 error = -EINVAL; /* when rejecting VM_GROWSDOWN|VM_GROWSUP */
1270
1269 if (file) { 1271 if (file) {
1270 error = -EINVAL;
1271 if (vm_flags & (VM_GROWSDOWN|VM_GROWSUP)) 1272 if (vm_flags & (VM_GROWSDOWN|VM_GROWSUP))
1272 goto free_vma; 1273 goto free_vma;
1273 if (vm_flags & VM_DENYWRITE) { 1274 if (vm_flags & VM_DENYWRITE) {
@@ -1293,6 +1294,8 @@ munmap_back:
1293 pgoff = vma->vm_pgoff; 1294 pgoff = vma->vm_pgoff;
1294 vm_flags = vma->vm_flags; 1295 vm_flags = vma->vm_flags;
1295 } else if (vm_flags & VM_SHARED) { 1296 } else if (vm_flags & VM_SHARED) {
1297 if (unlikely(vm_flags & (VM_GROWSDOWN|VM_GROWSUP)))
1298 goto free_vma;
1296 error = shmem_zero_setup(vma); 1299 error = shmem_zero_setup(vma);
1297 if (error) 1300 if (error)
1298 goto free_vma; 1301 goto free_vma;
@@ -1605,7 +1608,6 @@ EXPORT_SYMBOL(find_vma);
1605 1608
1606/* 1609/*
1607 * Same as find_vma, but also return a pointer to the previous VMA in *pprev. 1610 * Same as find_vma, but also return a pointer to the previous VMA in *pprev.
1608 * Note: pprev is set to NULL when return value is NULL.
1609 */ 1611 */
1610struct vm_area_struct * 1612struct vm_area_struct *
1611find_vma_prev(struct mm_struct *mm, unsigned long addr, 1613find_vma_prev(struct mm_struct *mm, unsigned long addr,
@@ -1614,7 +1616,16 @@ find_vma_prev(struct mm_struct *mm, unsigned long addr,
1614 struct vm_area_struct *vma; 1616 struct vm_area_struct *vma;
1615 1617
1616 vma = find_vma(mm, addr); 1618 vma = find_vma(mm, addr);
1617 *pprev = vma ? vma->vm_prev : NULL; 1619 if (vma) {
1620 *pprev = vma->vm_prev;
1621 } else {
1622 struct rb_node *rb_node = mm->mm_rb.rb_node;
1623 *pprev = NULL;
1624 while (rb_node) {
1625 *pprev = rb_entry(rb_node, struct vm_area_struct, vm_rb);
1626 rb_node = rb_node->rb_right;
1627 }
1628 }
1618 return vma; 1629 return vma;
1619} 1630}
1620 1631
diff --git a/mm/mprotect.c b/mm/mprotect.c
index 5a688a2756be..f437d054c3bf 100644
--- a/mm/mprotect.c
+++ b/mm/mprotect.c
@@ -262,10 +262,11 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
262 262
263 down_write(&current->mm->mmap_sem); 263 down_write(&current->mm->mmap_sem);
264 264
265 vma = find_vma_prev(current->mm, start, &prev); 265 vma = find_vma(current->mm, start);
266 error = -ENOMEM; 266 error = -ENOMEM;
267 if (!vma) 267 if (!vma)
268 goto out; 268 goto out;
269 prev = vma->vm_prev;
269 if (unlikely(grows & PROT_GROWSDOWN)) { 270 if (unlikely(grows & PROT_GROWSDOWN)) {
270 if (vma->vm_start >= end) 271 if (vma->vm_start >= end)
271 goto out; 272 goto out;
diff --git a/mm/page_cgroup.c b/mm/page_cgroup.c
index de1616aa9b1e..1ccbd714059c 100644
--- a/mm/page_cgroup.c
+++ b/mm/page_cgroup.c
@@ -379,13 +379,15 @@ static struct swap_cgroup *lookup_swap_cgroup(swp_entry_t ent,
379 pgoff_t offset = swp_offset(ent); 379 pgoff_t offset = swp_offset(ent);
380 struct swap_cgroup_ctrl *ctrl; 380 struct swap_cgroup_ctrl *ctrl;
381 struct page *mappage; 381 struct page *mappage;
382 struct swap_cgroup *sc;
382 383
383 ctrl = &swap_cgroup_ctrl[swp_type(ent)]; 384 ctrl = &swap_cgroup_ctrl[swp_type(ent)];
384 if (ctrlp) 385 if (ctrlp)
385 *ctrlp = ctrl; 386 *ctrlp = ctrl;
386 387
387 mappage = ctrl->map[offset / SC_PER_PAGE]; 388 mappage = ctrl->map[offset / SC_PER_PAGE];
388 return page_address(mappage) + offset % SC_PER_PAGE; 389 sc = page_address(mappage);
390 return sc + offset % SC_PER_PAGE;
389} 391}
390 392
391/** 393/**
diff --git a/mm/percpu-vm.c b/mm/percpu-vm.c
index 12a48a88c0d8..405d331804c3 100644
--- a/mm/percpu-vm.c
+++ b/mm/percpu-vm.c
@@ -184,8 +184,7 @@ static void pcpu_unmap_pages(struct pcpu_chunk *chunk,
184 page_end - page_start); 184 page_end - page_start);
185 } 185 }
186 186
187 for (i = page_start; i < page_end; i++) 187 bitmap_clear(populated, page_start, page_end - page_start);
188 __clear_bit(i, populated);
189} 188}
190 189
191/** 190/**
diff --git a/mm/swap.c b/mm/swap.c
index fff1ff7fb9ad..14380e9fbe33 100644
--- a/mm/swap.c
+++ b/mm/swap.c
@@ -652,7 +652,7 @@ EXPORT_SYMBOL(__pagevec_release);
652void lru_add_page_tail(struct zone* zone, 652void lru_add_page_tail(struct zone* zone,
653 struct page *page, struct page *page_tail) 653 struct page *page, struct page *page_tail)
654{ 654{
655 int active; 655 int uninitialized_var(active);
656 enum lru_list lru; 656 enum lru_list lru;
657 const int file = 0; 657 const int file = 0;
658 658
@@ -672,7 +672,6 @@ void lru_add_page_tail(struct zone* zone,
672 active = 0; 672 active = 0;
673 lru = LRU_INACTIVE_ANON; 673 lru = LRU_INACTIVE_ANON;
674 } 674 }
675 update_page_reclaim_stat(zone, page_tail, file, active);
676 } else { 675 } else {
677 SetPageUnevictable(page_tail); 676 SetPageUnevictable(page_tail);
678 lru = LRU_UNEVICTABLE; 677 lru = LRU_UNEVICTABLE;
@@ -693,6 +692,9 @@ void lru_add_page_tail(struct zone* zone,
693 list_head = page_tail->lru.prev; 692 list_head = page_tail->lru.prev;
694 list_move_tail(&page_tail->lru, list_head); 693 list_move_tail(&page_tail->lru, list_head);
695 } 694 }
695
696 if (!PageUnevictable(page))
697 update_page_reclaim_stat(zone, page_tail, file, active);
696} 698}
697#endif /* CONFIG_TRANSPARENT_HUGEPAGE */ 699#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
698 700
@@ -710,8 +712,8 @@ static void __pagevec_lru_add_fn(struct page *page, void *arg)
710 SetPageLRU(page); 712 SetPageLRU(page);
711 if (active) 713 if (active)
712 SetPageActive(page); 714 SetPageActive(page);
713 update_page_reclaim_stat(zone, page, file, active);
714 add_page_to_lru_list(zone, page, lru); 715 add_page_to_lru_list(zone, page, lru);
716 update_page_reclaim_stat(zone, page, file, active);
715} 717}
716 718
717/* 719/*
diff --git a/mm/swap_state.c b/mm/swap_state.c
index 470038a91873..ea6b32d61873 100644
--- a/mm/swap_state.c
+++ b/mm/swap_state.c
@@ -300,16 +300,6 @@ struct page *read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
300 new_page = alloc_page_vma(gfp_mask, vma, addr); 300 new_page = alloc_page_vma(gfp_mask, vma, addr);
301 if (!new_page) 301 if (!new_page)
302 break; /* Out of memory */ 302 break; /* Out of memory */
303 /*
304 * The memcg-specific accounting when moving
305 * pages around the LRU lists relies on the
306 * page's owner (memcg) to be valid. Usually,
307 * pages are assigned to a new owner before
308 * being put on the LRU list, but since this
309 * is not the case here, the stale owner from
310 * a previous allocation cycle must be reset.
311 */
312 mem_cgroup_reset_owner(new_page);
313 } 303 }
314 304
315 /* 305 /*