aboutsummaryrefslogtreecommitdiffstats
path: root/mm/memcontrol.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/memcontrol.c')
-rw-r--r--mm/memcontrol.c102
1 files changed, 50 insertions, 52 deletions
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 228d6461c12a..d0e57a3cda18 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -1042,6 +1042,19 @@ struct lruvec *mem_cgroup_lru_add_list(struct zone *zone, struct page *page,
1042 1042
1043 pc = lookup_page_cgroup(page); 1043 pc = lookup_page_cgroup(page);
1044 memcg = pc->mem_cgroup; 1044 memcg = pc->mem_cgroup;
1045
1046 /*
1047 * Surreptitiously switch any uncharged page to root:
1048 * an uncharged page off lru does nothing to secure
1049 * its former mem_cgroup from sudden removal.
1050 *
1051 * Our caller holds lru_lock, and PageCgroupUsed is updated
1052 * under page_cgroup lock: between them, they make all uses
1053 * of pc->mem_cgroup safe.
1054 */
1055 if (!PageCgroupUsed(pc) && memcg != root_mem_cgroup)
1056 pc->mem_cgroup = memcg = root_mem_cgroup;
1057
1045 mz = page_cgroup_zoneinfo(memcg, page); 1058 mz = page_cgroup_zoneinfo(memcg, page);
1046 /* compound_order() is stabilized through lru_lock */ 1059 /* compound_order() is stabilized through lru_lock */
1047 MEM_CGROUP_ZSTAT(mz, lru) += 1 << compound_order(page); 1060 MEM_CGROUP_ZSTAT(mz, lru) += 1 << compound_order(page);
@@ -2408,8 +2421,12 @@ static void __mem_cgroup_commit_charge(struct mem_cgroup *memcg,
2408 struct page *page, 2421 struct page *page,
2409 unsigned int nr_pages, 2422 unsigned int nr_pages,
2410 struct page_cgroup *pc, 2423 struct page_cgroup *pc,
2411 enum charge_type ctype) 2424 enum charge_type ctype,
2425 bool lrucare)
2412{ 2426{
2427 struct zone *uninitialized_var(zone);
2428 bool was_on_lru = false;
2429
2413 lock_page_cgroup(pc); 2430 lock_page_cgroup(pc);
2414 if (unlikely(PageCgroupUsed(pc))) { 2431 if (unlikely(PageCgroupUsed(pc))) {
2415 unlock_page_cgroup(pc); 2432 unlock_page_cgroup(pc);
@@ -2420,6 +2437,21 @@ static void __mem_cgroup_commit_charge(struct mem_cgroup *memcg,
2420 * we don't need page_cgroup_lock about tail pages, becase they are not 2437 * we don't need page_cgroup_lock about tail pages, becase they are not
2421 * accessed by any other context at this point. 2438 * accessed by any other context at this point.
2422 */ 2439 */
2440
2441 /*
2442 * In some cases, SwapCache and FUSE(splice_buf->radixtree), the page
2443 * may already be on some other mem_cgroup's LRU. Take care of it.
2444 */
2445 if (lrucare) {
2446 zone = page_zone(page);
2447 spin_lock_irq(&zone->lru_lock);
2448 if (PageLRU(page)) {
2449 ClearPageLRU(page);
2450 del_page_from_lru_list(zone, page, page_lru(page));
2451 was_on_lru = true;
2452 }
2453 }
2454
2423 pc->mem_cgroup = memcg; 2455 pc->mem_cgroup = memcg;
2424 /* 2456 /*
2425 * We access a page_cgroup asynchronously without lock_page_cgroup(). 2457 * We access a page_cgroup asynchronously without lock_page_cgroup().
@@ -2443,9 +2475,18 @@ static void __mem_cgroup_commit_charge(struct mem_cgroup *memcg,
2443 break; 2475 break;
2444 } 2476 }
2445 2477
2478 if (lrucare) {
2479 if (was_on_lru) {
2480 VM_BUG_ON(PageLRU(page));
2481 SetPageLRU(page);
2482 add_page_to_lru_list(zone, page, page_lru(page));
2483 }
2484 spin_unlock_irq(&zone->lru_lock);
2485 }
2486
2446 mem_cgroup_charge_statistics(memcg, PageCgroupCache(pc), nr_pages); 2487 mem_cgroup_charge_statistics(memcg, PageCgroupCache(pc), nr_pages);
2447 unlock_page_cgroup(pc); 2488 unlock_page_cgroup(pc);
2448 WARN_ON_ONCE(PageLRU(page)); 2489
2449 /* 2490 /*
2450 * "charge_statistics" updated event counter. Then, check it. 2491 * "charge_statistics" updated event counter. Then, check it.
2451 * Insert ancestor (and ancestor's ancestors), to softlimit RB-tree. 2492 * Insert ancestor (and ancestor's ancestors), to softlimit RB-tree.
@@ -2643,7 +2684,7 @@ static int mem_cgroup_charge_common(struct page *page, struct mm_struct *mm,
2643 ret = __mem_cgroup_try_charge(mm, gfp_mask, nr_pages, &memcg, oom); 2684 ret = __mem_cgroup_try_charge(mm, gfp_mask, nr_pages, &memcg, oom);
2644 if (ret == -ENOMEM) 2685 if (ret == -ENOMEM)
2645 return ret; 2686 return ret;
2646 __mem_cgroup_commit_charge(memcg, page, nr_pages, pc, ctype); 2687 __mem_cgroup_commit_charge(memcg, page, nr_pages, pc, ctype, false);
2647 return 0; 2688 return 0;
2648} 2689}
2649 2690
@@ -2663,35 +2704,6 @@ static void
2663__mem_cgroup_commit_charge_swapin(struct page *page, struct mem_cgroup *ptr, 2704__mem_cgroup_commit_charge_swapin(struct page *page, struct mem_cgroup *ptr,
2664 enum charge_type ctype); 2705 enum charge_type ctype);
2665 2706
2666static void
2667__mem_cgroup_commit_charge_lrucare(struct page *page, struct mem_cgroup *memcg,
2668 enum charge_type ctype)
2669{
2670 struct page_cgroup *pc = lookup_page_cgroup(page);
2671 struct zone *zone = page_zone(page);
2672 unsigned long flags;
2673 bool removed = false;
2674
2675 /*
2676 * In some case, SwapCache, FUSE(splice_buf->radixtree), the page
2677 * is already on LRU. It means the page may on some other page_cgroup's
2678 * LRU. Take care of it.
2679 */
2680 spin_lock_irqsave(&zone->lru_lock, flags);
2681 if (PageLRU(page)) {
2682 del_page_from_lru_list(zone, page, page_lru(page));
2683 ClearPageLRU(page);
2684 removed = true;
2685 }
2686 __mem_cgroup_commit_charge(memcg, page, 1, pc, ctype);
2687 if (removed) {
2688 add_page_to_lru_list(zone, page, page_lru(page));
2689 SetPageLRU(page);
2690 }
2691 spin_unlock_irqrestore(&zone->lru_lock, flags);
2692 return;
2693}
2694
2695int mem_cgroup_cache_charge(struct page *page, struct mm_struct *mm, 2707int mem_cgroup_cache_charge(struct page *page, struct mm_struct *mm,
2696 gfp_t gfp_mask) 2708 gfp_t gfp_mask)
2697{ 2709{
@@ -2769,13 +2781,16 @@ static void
2769__mem_cgroup_commit_charge_swapin(struct page *page, struct mem_cgroup *memcg, 2781__mem_cgroup_commit_charge_swapin(struct page *page, struct mem_cgroup *memcg,
2770 enum charge_type ctype) 2782 enum charge_type ctype)
2771{ 2783{
2784 struct page_cgroup *pc;
2785
2772 if (mem_cgroup_disabled()) 2786 if (mem_cgroup_disabled())
2773 return; 2787 return;
2774 if (!memcg) 2788 if (!memcg)
2775 return; 2789 return;
2776 cgroup_exclude_rmdir(&memcg->css); 2790 cgroup_exclude_rmdir(&memcg->css);
2777 2791
2778 __mem_cgroup_commit_charge_lrucare(page, memcg, ctype); 2792 pc = lookup_page_cgroup(page);
2793 __mem_cgroup_commit_charge(memcg, page, 1, pc, ctype, true);
2779 /* 2794 /*
2780 * Now swap is on-memory. This means this page may be 2795 * Now swap is on-memory. This means this page may be
2781 * counted both as mem and swap....double count. 2796 * counted both as mem and swap....double count.
@@ -3027,23 +3042,6 @@ void mem_cgroup_uncharge_end(void)
3027 batch->memcg = NULL; 3042 batch->memcg = NULL;
3028} 3043}
3029 3044
3030/*
3031 * A function for resetting pc->mem_cgroup for newly allocated pages.
3032 * This function should be called if the newpage will be added to LRU
3033 * before start accounting.
3034 */
3035void mem_cgroup_reset_owner(struct page *newpage)
3036{
3037 struct page_cgroup *pc;
3038
3039 if (mem_cgroup_disabled())
3040 return;
3041
3042 pc = lookup_page_cgroup(newpage);
3043 VM_BUG_ON(PageCgroupUsed(pc));
3044 pc->mem_cgroup = root_mem_cgroup;
3045}
3046
3047#ifdef CONFIG_SWAP 3045#ifdef CONFIG_SWAP
3048/* 3046/*
3049 * called after __delete_from_swap_cache() and drop "page" account. 3047 * called after __delete_from_swap_cache() and drop "page" account.
@@ -3248,7 +3246,7 @@ int mem_cgroup_prepare_migration(struct page *page,
3248 ctype = MEM_CGROUP_CHARGE_TYPE_CACHE; 3246 ctype = MEM_CGROUP_CHARGE_TYPE_CACHE;
3249 else 3247 else
3250 ctype = MEM_CGROUP_CHARGE_TYPE_SHMEM; 3248 ctype = MEM_CGROUP_CHARGE_TYPE_SHMEM;
3251 __mem_cgroup_commit_charge(memcg, newpage, 1, pc, ctype); 3249 __mem_cgroup_commit_charge(memcg, newpage, 1, pc, ctype, false);
3252 return ret; 3250 return ret;
3253} 3251}
3254 3252
@@ -3332,7 +3330,7 @@ void mem_cgroup_replace_page_cache(struct page *oldpage,
3332 * the newpage may be on LRU(or pagevec for LRU) already. We lock 3330 * the newpage may be on LRU(or pagevec for LRU) already. We lock
3333 * LRU while we overwrite pc->mem_cgroup. 3331 * LRU while we overwrite pc->mem_cgroup.
3334 */ 3332 */
3335 __mem_cgroup_commit_charge_lrucare(newpage, memcg, type); 3333 __mem_cgroup_commit_charge(memcg, newpage, 1, pc, type, true);
3336} 3334}
3337 3335
3338#ifdef CONFIG_DEBUG_VM 3336#ifdef CONFIG_DEBUG_VM