diff options
Diffstat (limited to 'mm/memcontrol.c')
-rw-r--r-- | mm/memcontrol.c | 155 |
1 files changed, 97 insertions, 58 deletions
diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 228d6461c12a..58a08fc7414a 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c | |||
@@ -230,10 +230,30 @@ struct mem_cgroup { | |||
230 | * the counter to account for memory usage | 230 | * the counter to account for memory usage |
231 | */ | 231 | */ |
232 | struct res_counter res; | 232 | struct res_counter res; |
233 | /* | 233 | |
234 | * the counter to account for mem+swap usage. | 234 | union { |
235 | */ | 235 | /* |
236 | struct res_counter memsw; | 236 | * the counter to account for mem+swap usage. |
237 | */ | ||
238 | struct res_counter memsw; | ||
239 | |||
240 | /* | ||
241 | * rcu_freeing is used only when freeing struct mem_cgroup, | ||
242 | * so put it into a union to avoid wasting more memory. | ||
243 | * It must be disjoint from the css field. It could be | ||
244 | * in a union with the res field, but res plays a much | ||
245 | * larger part in mem_cgroup life than memsw, and might | ||
246 | * be of interest, even at time of free, when debugging. | ||
247 | * So share rcu_head with the less interesting memsw. | ||
248 | */ | ||
249 | struct rcu_head rcu_freeing; | ||
250 | /* | ||
251 | * But when using vfree(), that cannot be done at | ||
252 | * interrupt time, so we must then queue the work. | ||
253 | */ | ||
254 | struct work_struct work_freeing; | ||
255 | }; | ||
256 | |||
237 | /* | 257 | /* |
238 | * Per cgroup active and inactive list, similar to the | 258 | * Per cgroup active and inactive list, similar to the |
239 | * per zone LRU lists. | 259 | * per zone LRU lists. |
@@ -1042,6 +1062,19 @@ struct lruvec *mem_cgroup_lru_add_list(struct zone *zone, struct page *page, | |||
1042 | 1062 | ||
1043 | pc = lookup_page_cgroup(page); | 1063 | pc = lookup_page_cgroup(page); |
1044 | memcg = pc->mem_cgroup; | 1064 | memcg = pc->mem_cgroup; |
1065 | |||
1066 | /* | ||
1067 | * Surreptitiously switch any uncharged page to root: | ||
1068 | * an uncharged page off lru does nothing to secure | ||
1069 | * its former mem_cgroup from sudden removal. | ||
1070 | * | ||
1071 | * Our caller holds lru_lock, and PageCgroupUsed is updated | ||
1072 | * under page_cgroup lock: between them, they make all uses | ||
1073 | * of pc->mem_cgroup safe. | ||
1074 | */ | ||
1075 | if (!PageCgroupUsed(pc) && memcg != root_mem_cgroup) | ||
1076 | pc->mem_cgroup = memcg = root_mem_cgroup; | ||
1077 | |||
1045 | mz = page_cgroup_zoneinfo(memcg, page); | 1078 | mz = page_cgroup_zoneinfo(memcg, page); |
1046 | /* compound_order() is stabilized through lru_lock */ | 1079 | /* compound_order() is stabilized through lru_lock */ |
1047 | MEM_CGROUP_ZSTAT(mz, lru) += 1 << compound_order(page); | 1080 | MEM_CGROUP_ZSTAT(mz, lru) += 1 << compound_order(page); |
@@ -2408,8 +2441,12 @@ static void __mem_cgroup_commit_charge(struct mem_cgroup *memcg, | |||
2408 | struct page *page, | 2441 | struct page *page, |
2409 | unsigned int nr_pages, | 2442 | unsigned int nr_pages, |
2410 | struct page_cgroup *pc, | 2443 | struct page_cgroup *pc, |
2411 | enum charge_type ctype) | 2444 | enum charge_type ctype, |
2445 | bool lrucare) | ||
2412 | { | 2446 | { |
2447 | struct zone *uninitialized_var(zone); | ||
2448 | bool was_on_lru = false; | ||
2449 | |||
2413 | lock_page_cgroup(pc); | 2450 | lock_page_cgroup(pc); |
2414 | if (unlikely(PageCgroupUsed(pc))) { | 2451 | if (unlikely(PageCgroupUsed(pc))) { |
2415 | unlock_page_cgroup(pc); | 2452 | unlock_page_cgroup(pc); |
@@ -2420,6 +2457,21 @@ static void __mem_cgroup_commit_charge(struct mem_cgroup *memcg, | |||
2420 | * we don't need page_cgroup_lock about tail pages, becase they are not | 2457 | * we don't need page_cgroup_lock about tail pages, becase they are not |
2421 | * accessed by any other context at this point. | 2458 | * accessed by any other context at this point. |
2422 | */ | 2459 | */ |
2460 | |||
2461 | /* | ||
2462 | * In some cases, SwapCache and FUSE(splice_buf->radixtree), the page | ||
2463 | * may already be on some other mem_cgroup's LRU. Take care of it. | ||
2464 | */ | ||
2465 | if (lrucare) { | ||
2466 | zone = page_zone(page); | ||
2467 | spin_lock_irq(&zone->lru_lock); | ||
2468 | if (PageLRU(page)) { | ||
2469 | ClearPageLRU(page); | ||
2470 | del_page_from_lru_list(zone, page, page_lru(page)); | ||
2471 | was_on_lru = true; | ||
2472 | } | ||
2473 | } | ||
2474 | |||
2423 | pc->mem_cgroup = memcg; | 2475 | pc->mem_cgroup = memcg; |
2424 | /* | 2476 | /* |
2425 | * We access a page_cgroup asynchronously without lock_page_cgroup(). | 2477 | * We access a page_cgroup asynchronously without lock_page_cgroup(). |
@@ -2443,9 +2495,18 @@ static void __mem_cgroup_commit_charge(struct mem_cgroup *memcg, | |||
2443 | break; | 2495 | break; |
2444 | } | 2496 | } |
2445 | 2497 | ||
2498 | if (lrucare) { | ||
2499 | if (was_on_lru) { | ||
2500 | VM_BUG_ON(PageLRU(page)); | ||
2501 | SetPageLRU(page); | ||
2502 | add_page_to_lru_list(zone, page, page_lru(page)); | ||
2503 | } | ||
2504 | spin_unlock_irq(&zone->lru_lock); | ||
2505 | } | ||
2506 | |||
2446 | mem_cgroup_charge_statistics(memcg, PageCgroupCache(pc), nr_pages); | 2507 | mem_cgroup_charge_statistics(memcg, PageCgroupCache(pc), nr_pages); |
2447 | unlock_page_cgroup(pc); | 2508 | unlock_page_cgroup(pc); |
2448 | WARN_ON_ONCE(PageLRU(page)); | 2509 | |
2449 | /* | 2510 | /* |
2450 | * "charge_statistics" updated event counter. Then, check it. | 2511 | * "charge_statistics" updated event counter. Then, check it. |
2451 | * Insert ancestor (and ancestor's ancestors), to softlimit RB-tree. | 2512 | * Insert ancestor (and ancestor's ancestors), to softlimit RB-tree. |
@@ -2643,7 +2704,7 @@ static int mem_cgroup_charge_common(struct page *page, struct mm_struct *mm, | |||
2643 | ret = __mem_cgroup_try_charge(mm, gfp_mask, nr_pages, &memcg, oom); | 2704 | ret = __mem_cgroup_try_charge(mm, gfp_mask, nr_pages, &memcg, oom); |
2644 | if (ret == -ENOMEM) | 2705 | if (ret == -ENOMEM) |
2645 | return ret; | 2706 | return ret; |
2646 | __mem_cgroup_commit_charge(memcg, page, nr_pages, pc, ctype); | 2707 | __mem_cgroup_commit_charge(memcg, page, nr_pages, pc, ctype, false); |
2647 | return 0; | 2708 | return 0; |
2648 | } | 2709 | } |
2649 | 2710 | ||
@@ -2663,35 +2724,6 @@ static void | |||
2663 | __mem_cgroup_commit_charge_swapin(struct page *page, struct mem_cgroup *ptr, | 2724 | __mem_cgroup_commit_charge_swapin(struct page *page, struct mem_cgroup *ptr, |
2664 | enum charge_type ctype); | 2725 | enum charge_type ctype); |
2665 | 2726 | ||
2666 | static void | ||
2667 | __mem_cgroup_commit_charge_lrucare(struct page *page, struct mem_cgroup *memcg, | ||
2668 | enum charge_type ctype) | ||
2669 | { | ||
2670 | struct page_cgroup *pc = lookup_page_cgroup(page); | ||
2671 | struct zone *zone = page_zone(page); | ||
2672 | unsigned long flags; | ||
2673 | bool removed = false; | ||
2674 | |||
2675 | /* | ||
2676 | * In some case, SwapCache, FUSE(splice_buf->radixtree), the page | ||
2677 | * is already on LRU. It means the page may on some other page_cgroup's | ||
2678 | * LRU. Take care of it. | ||
2679 | */ | ||
2680 | spin_lock_irqsave(&zone->lru_lock, flags); | ||
2681 | if (PageLRU(page)) { | ||
2682 | del_page_from_lru_list(zone, page, page_lru(page)); | ||
2683 | ClearPageLRU(page); | ||
2684 | removed = true; | ||
2685 | } | ||
2686 | __mem_cgroup_commit_charge(memcg, page, 1, pc, ctype); | ||
2687 | if (removed) { | ||
2688 | add_page_to_lru_list(zone, page, page_lru(page)); | ||
2689 | SetPageLRU(page); | ||
2690 | } | ||
2691 | spin_unlock_irqrestore(&zone->lru_lock, flags); | ||
2692 | return; | ||
2693 | } | ||
2694 | |||
2695 | int mem_cgroup_cache_charge(struct page *page, struct mm_struct *mm, | 2727 | int mem_cgroup_cache_charge(struct page *page, struct mm_struct *mm, |
2696 | gfp_t gfp_mask) | 2728 | gfp_t gfp_mask) |
2697 | { | 2729 | { |
@@ -2769,13 +2801,16 @@ static void | |||
2769 | __mem_cgroup_commit_charge_swapin(struct page *page, struct mem_cgroup *memcg, | 2801 | __mem_cgroup_commit_charge_swapin(struct page *page, struct mem_cgroup *memcg, |
2770 | enum charge_type ctype) | 2802 | enum charge_type ctype) |
2771 | { | 2803 | { |
2804 | struct page_cgroup *pc; | ||
2805 | |||
2772 | if (mem_cgroup_disabled()) | 2806 | if (mem_cgroup_disabled()) |
2773 | return; | 2807 | return; |
2774 | if (!memcg) | 2808 | if (!memcg) |
2775 | return; | 2809 | return; |
2776 | cgroup_exclude_rmdir(&memcg->css); | 2810 | cgroup_exclude_rmdir(&memcg->css); |
2777 | 2811 | ||
2778 | __mem_cgroup_commit_charge_lrucare(page, memcg, ctype); | 2812 | pc = lookup_page_cgroup(page); |
2813 | __mem_cgroup_commit_charge(memcg, page, 1, pc, ctype, true); | ||
2779 | /* | 2814 | /* |
2780 | * Now swap is on-memory. This means this page may be | 2815 | * Now swap is on-memory. This means this page may be |
2781 | * counted both as mem and swap....double count. | 2816 | * counted both as mem and swap....double count. |
@@ -3027,23 +3062,6 @@ void mem_cgroup_uncharge_end(void) | |||
3027 | batch->memcg = NULL; | 3062 | batch->memcg = NULL; |
3028 | } | 3063 | } |
3029 | 3064 | ||
3030 | /* | ||
3031 | * A function for resetting pc->mem_cgroup for newly allocated pages. | ||
3032 | * This function should be called if the newpage will be added to LRU | ||
3033 | * before start accounting. | ||
3034 | */ | ||
3035 | void mem_cgroup_reset_owner(struct page *newpage) | ||
3036 | { | ||
3037 | struct page_cgroup *pc; | ||
3038 | |||
3039 | if (mem_cgroup_disabled()) | ||
3040 | return; | ||
3041 | |||
3042 | pc = lookup_page_cgroup(newpage); | ||
3043 | VM_BUG_ON(PageCgroupUsed(pc)); | ||
3044 | pc->mem_cgroup = root_mem_cgroup; | ||
3045 | } | ||
3046 | |||
3047 | #ifdef CONFIG_SWAP | 3065 | #ifdef CONFIG_SWAP |
3048 | /* | 3066 | /* |
3049 | * called after __delete_from_swap_cache() and drop "page" account. | 3067 | * called after __delete_from_swap_cache() and drop "page" account. |
@@ -3248,7 +3266,7 @@ int mem_cgroup_prepare_migration(struct page *page, | |||
3248 | ctype = MEM_CGROUP_CHARGE_TYPE_CACHE; | 3266 | ctype = MEM_CGROUP_CHARGE_TYPE_CACHE; |
3249 | else | 3267 | else |
3250 | ctype = MEM_CGROUP_CHARGE_TYPE_SHMEM; | 3268 | ctype = MEM_CGROUP_CHARGE_TYPE_SHMEM; |
3251 | __mem_cgroup_commit_charge(memcg, newpage, 1, pc, ctype); | 3269 | __mem_cgroup_commit_charge(memcg, newpage, 1, pc, ctype, false); |
3252 | return ret; | 3270 | return ret; |
3253 | } | 3271 | } |
3254 | 3272 | ||
@@ -3332,7 +3350,7 @@ void mem_cgroup_replace_page_cache(struct page *oldpage, | |||
3332 | * the newpage may be on LRU(or pagevec for LRU) already. We lock | 3350 | * the newpage may be on LRU(or pagevec for LRU) already. We lock |
3333 | * LRU while we overwrite pc->mem_cgroup. | 3351 | * LRU while we overwrite pc->mem_cgroup. |
3334 | */ | 3352 | */ |
3335 | __mem_cgroup_commit_charge_lrucare(newpage, memcg, type); | 3353 | __mem_cgroup_commit_charge(memcg, newpage, 1, pc, type, true); |
3336 | } | 3354 | } |
3337 | 3355 | ||
3338 | #ifdef CONFIG_DEBUG_VM | 3356 | #ifdef CONFIG_DEBUG_VM |
@@ -4782,6 +4800,27 @@ out_free: | |||
4782 | } | 4800 | } |
4783 | 4801 | ||
4784 | /* | 4802 | /* |
4803 | * Helpers for freeing a vzalloc()ed mem_cgroup by RCU, | ||
4804 | * but in process context. The work_freeing structure is overlaid | ||
4805 | * on the rcu_freeing structure, which itself is overlaid on memsw. | ||
4806 | */ | ||
4807 | static void vfree_work(struct work_struct *work) | ||
4808 | { | ||
4809 | struct mem_cgroup *memcg; | ||
4810 | |||
4811 | memcg = container_of(work, struct mem_cgroup, work_freeing); | ||
4812 | vfree(memcg); | ||
4813 | } | ||
4814 | static void vfree_rcu(struct rcu_head *rcu_head) | ||
4815 | { | ||
4816 | struct mem_cgroup *memcg; | ||
4817 | |||
4818 | memcg = container_of(rcu_head, struct mem_cgroup, rcu_freeing); | ||
4819 | INIT_WORK(&memcg->work_freeing, vfree_work); | ||
4820 | schedule_work(&memcg->work_freeing); | ||
4821 | } | ||
4822 | |||
4823 | /* | ||
4785 | * At destroying mem_cgroup, references from swap_cgroup can remain. | 4824 | * At destroying mem_cgroup, references from swap_cgroup can remain. |
4786 | * (scanning all at force_empty is too costly...) | 4825 | * (scanning all at force_empty is too costly...) |
4787 | * | 4826 | * |
@@ -4804,9 +4843,9 @@ static void __mem_cgroup_free(struct mem_cgroup *memcg) | |||
4804 | 4843 | ||
4805 | free_percpu(memcg->stat); | 4844 | free_percpu(memcg->stat); |
4806 | if (sizeof(struct mem_cgroup) < PAGE_SIZE) | 4845 | if (sizeof(struct mem_cgroup) < PAGE_SIZE) |
4807 | kfree(memcg); | 4846 | kfree_rcu(memcg, rcu_freeing); |
4808 | else | 4847 | else |
4809 | vfree(memcg); | 4848 | call_rcu(&memcg->rcu_freeing, vfree_rcu); |
4810 | } | 4849 | } |
4811 | 4850 | ||
4812 | static void mem_cgroup_get(struct mem_cgroup *memcg) | 4851 | static void mem_cgroup_get(struct mem_cgroup *memcg) |