aboutsummaryrefslogtreecommitdiffstats
path: root/mm/memcontrol.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/memcontrol.c')
-rw-r--r--mm/memcontrol.c721
1 files changed, 450 insertions, 271 deletions
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 8e4be9cb2a6a..78eb8552818b 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -27,6 +27,7 @@
27#include <linux/backing-dev.h> 27#include <linux/backing-dev.h>
28#include <linux/bit_spinlock.h> 28#include <linux/bit_spinlock.h>
29#include <linux/rcupdate.h> 29#include <linux/rcupdate.h>
30#include <linux/limits.h>
30#include <linux/mutex.h> 31#include <linux/mutex.h>
31#include <linux/slab.h> 32#include <linux/slab.h>
32#include <linux/swap.h> 33#include <linux/swap.h>
@@ -95,6 +96,15 @@ static s64 mem_cgroup_read_stat(struct mem_cgroup_stat *stat,
95 return ret; 96 return ret;
96} 97}
97 98
99static s64 mem_cgroup_local_usage(struct mem_cgroup_stat *stat)
100{
101 s64 ret;
102
103 ret = mem_cgroup_read_stat(stat, MEM_CGROUP_STAT_CACHE);
104 ret += mem_cgroup_read_stat(stat, MEM_CGROUP_STAT_RSS);
105 return ret;
106}
107
98/* 108/*
99 * per-zone information in memory controller. 109 * per-zone information in memory controller.
100 */ 110 */
@@ -154,9 +164,9 @@ struct mem_cgroup {
154 164
155 /* 165 /*
156 * While reclaiming in a hiearchy, we cache the last child we 166 * While reclaiming in a hiearchy, we cache the last child we
157 * reclaimed from. Protected by hierarchy_mutex 167 * reclaimed from.
158 */ 168 */
159 struct mem_cgroup *last_scanned_child; 169 int last_scanned_child;
160 /* 170 /*
161 * Should the accounting and control be hierarchical, per subtree? 171 * Should the accounting and control be hierarchical, per subtree?
162 */ 172 */
@@ -247,7 +257,7 @@ page_cgroup_zoneinfo(struct page_cgroup *pc)
247 return mem_cgroup_zoneinfo(mem, nid, zid); 257 return mem_cgroup_zoneinfo(mem, nid, zid);
248} 258}
249 259
250static unsigned long mem_cgroup_get_all_zonestat(struct mem_cgroup *mem, 260static unsigned long mem_cgroup_get_local_zonestat(struct mem_cgroup *mem,
251 enum lru_list idx) 261 enum lru_list idx)
252{ 262{
253 int nid, zid; 263 int nid, zid;
@@ -286,6 +296,9 @@ struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p)
286static struct mem_cgroup *try_get_mem_cgroup_from_mm(struct mm_struct *mm) 296static struct mem_cgroup *try_get_mem_cgroup_from_mm(struct mm_struct *mm)
287{ 297{
288 struct mem_cgroup *mem = NULL; 298 struct mem_cgroup *mem = NULL;
299
300 if (!mm)
301 return NULL;
289 /* 302 /*
290 * Because we have no locks, mm->owner's may be being moved to other 303 * Because we have no locks, mm->owner's may be being moved to other
291 * cgroup. We use css_tryget() here even if this looks 304 * cgroup. We use css_tryget() here even if this looks
@@ -301,11 +314,39 @@ static struct mem_cgroup *try_get_mem_cgroup_from_mm(struct mm_struct *mm)
301 return mem; 314 return mem;
302} 315}
303 316
304static bool mem_cgroup_is_obsolete(struct mem_cgroup *mem) 317/*
318 * Call callback function against all cgroup under hierarchy tree.
319 */
320static int mem_cgroup_walk_tree(struct mem_cgroup *root, void *data,
321 int (*func)(struct mem_cgroup *, void *))
305{ 322{
306 if (!mem) 323 int found, ret, nextid;
307 return true; 324 struct cgroup_subsys_state *css;
308 return css_is_removed(&mem->css); 325 struct mem_cgroup *mem;
326
327 if (!root->use_hierarchy)
328 return (*func)(root, data);
329
330 nextid = 1;
331 do {
332 ret = 0;
333 mem = NULL;
334
335 rcu_read_lock();
336 css = css_get_next(&mem_cgroup_subsys, nextid, &root->css,
337 &found);
338 if (css && css_tryget(css))
339 mem = container_of(css, struct mem_cgroup, css);
340 rcu_read_unlock();
341
342 if (mem) {
343 ret = (*func)(mem, data);
344 css_put(&mem->css);
345 }
346 nextid = found + 1;
347 } while (!ret && css);
348
349 return ret;
309} 350}
310 351
311/* 352/*
@@ -441,31 +482,24 @@ void mem_cgroup_move_lists(struct page *page,
441int task_in_mem_cgroup(struct task_struct *task, const struct mem_cgroup *mem) 482int task_in_mem_cgroup(struct task_struct *task, const struct mem_cgroup *mem)
442{ 483{
443 int ret; 484 int ret;
485 struct mem_cgroup *curr = NULL;
444 486
445 task_lock(task); 487 task_lock(task);
446 ret = task->mm && mm_match_cgroup(task->mm, mem); 488 rcu_read_lock();
489 curr = try_get_mem_cgroup_from_mm(task->mm);
490 rcu_read_unlock();
447 task_unlock(task); 491 task_unlock(task);
492 if (!curr)
493 return 0;
494 if (curr->use_hierarchy)
495 ret = css_is_ancestor(&curr->css, &mem->css);
496 else
497 ret = (curr == mem);
498 css_put(&curr->css);
448 return ret; 499 return ret;
449} 500}
450 501
451/* 502/*
452 * Calculate mapped_ratio under memory controller. This will be used in
453 * vmscan.c for deteremining we have to reclaim mapped pages.
454 */
455int mem_cgroup_calc_mapped_ratio(struct mem_cgroup *mem)
456{
457 long total, rss;
458
459 /*
460 * usage is recorded in bytes. But, here, we assume the number of
461 * physical pages can be represented by "long" on any arch.
462 */
463 total = (long) (mem->res.usage >> PAGE_SHIFT) + 1L;
464 rss = (long)mem_cgroup_read_stat(&mem->stat, MEM_CGROUP_STAT_RSS);
465 return (int)((rss * 100L) / total);
466}
467
468/*
469 * prev_priority control...this will be used in memory reclaim path. 503 * prev_priority control...this will be used in memory reclaim path.
470 */ 504 */
471int mem_cgroup_get_reclaim_priority(struct mem_cgroup *mem) 505int mem_cgroup_get_reclaim_priority(struct mem_cgroup *mem)
@@ -501,8 +535,8 @@ static int calc_inactive_ratio(struct mem_cgroup *memcg, unsigned long *present_
501 unsigned long gb; 535 unsigned long gb;
502 unsigned long inactive_ratio; 536 unsigned long inactive_ratio;
503 537
504 inactive = mem_cgroup_get_all_zonestat(memcg, LRU_INACTIVE_ANON); 538 inactive = mem_cgroup_get_local_zonestat(memcg, LRU_INACTIVE_ANON);
505 active = mem_cgroup_get_all_zonestat(memcg, LRU_ACTIVE_ANON); 539 active = mem_cgroup_get_local_zonestat(memcg, LRU_ACTIVE_ANON);
506 540
507 gb = (inactive + active) >> (30 - PAGE_SHIFT); 541 gb = (inactive + active) >> (30 - PAGE_SHIFT);
508 if (gb) 542 if (gb)
@@ -629,172 +663,202 @@ unsigned long mem_cgroup_isolate_pages(unsigned long nr_to_scan,
629#define mem_cgroup_from_res_counter(counter, member) \ 663#define mem_cgroup_from_res_counter(counter, member) \
630 container_of(counter, struct mem_cgroup, member) 664 container_of(counter, struct mem_cgroup, member)
631 665
632/* 666static bool mem_cgroup_check_under_limit(struct mem_cgroup *mem)
633 * This routine finds the DFS walk successor. This routine should be
634 * called with hierarchy_mutex held
635 */
636static struct mem_cgroup *
637__mem_cgroup_get_next_node(struct mem_cgroup *curr, struct mem_cgroup *root_mem)
638{ 667{
639 struct cgroup *cgroup, *curr_cgroup, *root_cgroup; 668 if (do_swap_account) {
640 669 if (res_counter_check_under_limit(&mem->res) &&
641 curr_cgroup = curr->css.cgroup; 670 res_counter_check_under_limit(&mem->memsw))
642 root_cgroup = root_mem->css.cgroup; 671 return true;
672 } else
673 if (res_counter_check_under_limit(&mem->res))
674 return true;
675 return false;
676}
643 677
644 if (!list_empty(&curr_cgroup->children)) { 678static unsigned int get_swappiness(struct mem_cgroup *memcg)
645 /* 679{
646 * Walk down to children 680 struct cgroup *cgrp = memcg->css.cgroup;
647 */ 681 unsigned int swappiness;
648 cgroup = list_entry(curr_cgroup->children.next,
649 struct cgroup, sibling);
650 curr = mem_cgroup_from_cont(cgroup);
651 goto done;
652 }
653 682
654visit_parent: 683 /* root ? */
655 if (curr_cgroup == root_cgroup) { 684 if (cgrp->parent == NULL)
656 /* caller handles NULL case */ 685 return vm_swappiness;
657 curr = NULL;
658 goto done;
659 }
660 686
661 /* 687 spin_lock(&memcg->reclaim_param_lock);
662 * Goto next sibling 688 swappiness = memcg->swappiness;
663 */ 689 spin_unlock(&memcg->reclaim_param_lock);
664 if (curr_cgroup->sibling.next != &curr_cgroup->parent->children) {
665 cgroup = list_entry(curr_cgroup->sibling.next, struct cgroup,
666 sibling);
667 curr = mem_cgroup_from_cont(cgroup);
668 goto done;
669 }
670 690
671 /* 691 return swappiness;
672 * Go up to next parent and next parent's sibling if need be 692}
673 */
674 curr_cgroup = curr_cgroup->parent;
675 goto visit_parent;
676 693
677done: 694static int mem_cgroup_count_children_cb(struct mem_cgroup *mem, void *data)
678 return curr; 695{
696 int *val = data;
697 (*val)++;
698 return 0;
679} 699}
680 700
681/* 701/**
682 * Visit the first child (need not be the first child as per the ordering 702 * mem_cgroup_print_mem_info: Called from OOM with tasklist_lock held in read mode.
683 * of the cgroup list, since we track last_scanned_child) of @mem and use 703 * @memcg: The memory cgroup that went over limit
684 * that to reclaim free pages from. 704 * @p: Task that is going to be killed
705 *
706 * NOTE: @memcg and @p's mem_cgroup can be different when hierarchy is
707 * enabled
685 */ 708 */
686static struct mem_cgroup * 709void mem_cgroup_print_oom_info(struct mem_cgroup *memcg, struct task_struct *p)
687mem_cgroup_get_next_node(struct mem_cgroup *root_mem)
688{ 710{
689 struct cgroup *cgroup; 711 struct cgroup *task_cgrp;
690 struct mem_cgroup *orig, *next; 712 struct cgroup *mem_cgrp;
691 bool obsolete;
692
693 /* 713 /*
694 * Scan all children under the mem_cgroup mem 714 * Need a buffer in BSS, can't rely on allocations. The code relies
715 * on the assumption that OOM is serialized for memory controller.
716 * If this assumption is broken, revisit this code.
695 */ 717 */
696 mutex_lock(&mem_cgroup_subsys.hierarchy_mutex); 718 static char memcg_name[PATH_MAX];
719 int ret;
697 720
698 orig = root_mem->last_scanned_child; 721 if (!memcg)
699 obsolete = mem_cgroup_is_obsolete(orig); 722 return;
700 723
701 if (list_empty(&root_mem->css.cgroup->children)) { 724
725 rcu_read_lock();
726
727 mem_cgrp = memcg->css.cgroup;
728 task_cgrp = task_cgroup(p, mem_cgroup_subsys_id);
729
730 ret = cgroup_path(task_cgrp, memcg_name, PATH_MAX);
731 if (ret < 0) {
702 /* 732 /*
703 * root_mem might have children before and last_scanned_child 733 * Unfortunately, we are unable to convert to a useful name
704 * may point to one of them. We put it later. 734 * But we'll still print out the usage information
705 */ 735 */
706 if (orig) 736 rcu_read_unlock();
707 VM_BUG_ON(!obsolete);
708 next = NULL;
709 goto done; 737 goto done;
710 } 738 }
739 rcu_read_unlock();
711 740
712 if (!orig || obsolete) { 741 printk(KERN_INFO "Task in %s killed", memcg_name);
713 cgroup = list_first_entry(&root_mem->css.cgroup->children,
714 struct cgroup, sibling);
715 next = mem_cgroup_from_cont(cgroup);
716 } else
717 next = __mem_cgroup_get_next_node(orig, root_mem);
718 742
743 rcu_read_lock();
744 ret = cgroup_path(mem_cgrp, memcg_name, PATH_MAX);
745 if (ret < 0) {
746 rcu_read_unlock();
747 goto done;
748 }
749 rcu_read_unlock();
750
751 /*
752 * Continues from above, so we don't need an KERN_ level
753 */
754 printk(KERN_CONT " as a result of limit of %s\n", memcg_name);
719done: 755done:
720 if (next) 756
721 mem_cgroup_get(next); 757 printk(KERN_INFO "memory: usage %llukB, limit %llukB, failcnt %llu\n",
722 root_mem->last_scanned_child = next; 758 res_counter_read_u64(&memcg->res, RES_USAGE) >> 10,
723 if (orig) 759 res_counter_read_u64(&memcg->res, RES_LIMIT) >> 10,
724 mem_cgroup_put(orig); 760 res_counter_read_u64(&memcg->res, RES_FAILCNT));
725 mutex_unlock(&mem_cgroup_subsys.hierarchy_mutex); 761 printk(KERN_INFO "memory+swap: usage %llukB, limit %llukB, "
726 return (next) ? next : root_mem; 762 "failcnt %llu\n",
763 res_counter_read_u64(&memcg->memsw, RES_USAGE) >> 10,
764 res_counter_read_u64(&memcg->memsw, RES_LIMIT) >> 10,
765 res_counter_read_u64(&memcg->memsw, RES_FAILCNT));
727} 766}
728 767
729static bool mem_cgroup_check_under_limit(struct mem_cgroup *mem) 768/*
769 * This function returns the number of memcg under hierarchy tree. Returns
770 * 1(self count) if no children.
771 */
772static int mem_cgroup_count_children(struct mem_cgroup *mem)
730{ 773{
731 if (do_swap_account) { 774 int num = 0;
732 if (res_counter_check_under_limit(&mem->res) && 775 mem_cgroup_walk_tree(mem, &num, mem_cgroup_count_children_cb);
733 res_counter_check_under_limit(&mem->memsw)) 776 return num;
734 return true;
735 } else
736 if (res_counter_check_under_limit(&mem->res))
737 return true;
738 return false;
739} 777}
740 778
741static unsigned int get_swappiness(struct mem_cgroup *memcg) 779/*
780 * Visit the first child (need not be the first child as per the ordering
781 * of the cgroup list, since we track last_scanned_child) of @mem and use
782 * that to reclaim free pages from.
783 */
784static struct mem_cgroup *
785mem_cgroup_select_victim(struct mem_cgroup *root_mem)
742{ 786{
743 struct cgroup *cgrp = memcg->css.cgroup; 787 struct mem_cgroup *ret = NULL;
744 unsigned int swappiness; 788 struct cgroup_subsys_state *css;
789 int nextid, found;
745 790
746 /* root ? */ 791 if (!root_mem->use_hierarchy) {
747 if (cgrp->parent == NULL) 792 css_get(&root_mem->css);
748 return vm_swappiness; 793 ret = root_mem;
794 }
749 795
750 spin_lock(&memcg->reclaim_param_lock); 796 while (!ret) {
751 swappiness = memcg->swappiness; 797 rcu_read_lock();
752 spin_unlock(&memcg->reclaim_param_lock); 798 nextid = root_mem->last_scanned_child + 1;
799 css = css_get_next(&mem_cgroup_subsys, nextid, &root_mem->css,
800 &found);
801 if (css && css_tryget(css))
802 ret = container_of(css, struct mem_cgroup, css);
803
804 rcu_read_unlock();
805 /* Updates scanning parameter */
806 spin_lock(&root_mem->reclaim_param_lock);
807 if (!css) {
808 /* this means start scan from ID:1 */
809 root_mem->last_scanned_child = 0;
810 } else
811 root_mem->last_scanned_child = found;
812 spin_unlock(&root_mem->reclaim_param_lock);
813 }
753 814
754 return swappiness; 815 return ret;
755} 816}
756 817
757/* 818/*
758 * Dance down the hierarchy if needed to reclaim memory. We remember the 819 * Scan the hierarchy if needed to reclaim memory. We remember the last child
759 * last child we reclaimed from, so that we don't end up penalizing 820 * we reclaimed from, so that we don't end up penalizing one child extensively
760 * one child extensively based on its position in the children list. 821 * based on its position in the children list.
761 * 822 *
762 * root_mem is the original ancestor that we've been reclaim from. 823 * root_mem is the original ancestor that we've been reclaim from.
824 *
825 * We give up and return to the caller when we visit root_mem twice.
826 * (other groups can be removed while we're walking....)
827 *
828 * If shrink==true, for avoiding to free too much, this returns immedieately.
763 */ 829 */
764static int mem_cgroup_hierarchical_reclaim(struct mem_cgroup *root_mem, 830static int mem_cgroup_hierarchical_reclaim(struct mem_cgroup *root_mem,
765 gfp_t gfp_mask, bool noswap) 831 gfp_t gfp_mask, bool noswap, bool shrink)
766{ 832{
767 struct mem_cgroup *next_mem; 833 struct mem_cgroup *victim;
768 int ret = 0; 834 int ret, total = 0;
769 835 int loop = 0;
770 /* 836
771 * Reclaim unconditionally and don't check for return value. 837 while (loop < 2) {
772 * We need to reclaim in the current group and down the tree. 838 victim = mem_cgroup_select_victim(root_mem);
773 * One might think about checking for children before reclaiming, 839 if (victim == root_mem)
774 * but there might be left over accounting, even after children 840 loop++;
775 * have left. 841 if (!mem_cgroup_local_usage(&victim->stat)) {
776 */ 842 /* this cgroup's local usage == 0 */
777 ret += try_to_free_mem_cgroup_pages(root_mem, gfp_mask, noswap, 843 css_put(&victim->css);
778 get_swappiness(root_mem));
779 if (mem_cgroup_check_under_limit(root_mem))
780 return 1; /* indicate reclaim has succeeded */
781 if (!root_mem->use_hierarchy)
782 return ret;
783
784 next_mem = mem_cgroup_get_next_node(root_mem);
785
786 while (next_mem != root_mem) {
787 if (mem_cgroup_is_obsolete(next_mem)) {
788 next_mem = mem_cgroup_get_next_node(root_mem);
789 continue; 844 continue;
790 } 845 }
791 ret += try_to_free_mem_cgroup_pages(next_mem, gfp_mask, noswap, 846 /* we use swappiness of local cgroup */
792 get_swappiness(next_mem)); 847 ret = try_to_free_mem_cgroup_pages(victim, gfp_mask, noswap,
848 get_swappiness(victim));
849 css_put(&victim->css);
850 /*
851 * At shrinking usage, we can't check we should stop here or
852 * reclaim more. It's depends on callers. last_scanned_child
853 * will work enough for keeping fairness under tree.
854 */
855 if (shrink)
856 return ret;
857 total += ret;
793 if (mem_cgroup_check_under_limit(root_mem)) 858 if (mem_cgroup_check_under_limit(root_mem))
794 return 1; /* indicate reclaim has succeeded */ 859 return 1 + total;
795 next_mem = mem_cgroup_get_next_node(root_mem);
796 } 860 }
797 return ret; 861 return total;
798} 862}
799 863
800bool mem_cgroup_oom_called(struct task_struct *task) 864bool mem_cgroup_oom_called(struct task_struct *task)
@@ -813,6 +877,19 @@ bool mem_cgroup_oom_called(struct task_struct *task)
813 rcu_read_unlock(); 877 rcu_read_unlock();
814 return ret; 878 return ret;
815} 879}
880
881static int record_last_oom_cb(struct mem_cgroup *mem, void *data)
882{
883 mem->last_oom_jiffies = jiffies;
884 return 0;
885}
886
887static void record_last_oom(struct mem_cgroup *mem)
888{
889 mem_cgroup_walk_tree(mem, NULL, record_last_oom_cb);
890}
891
892
816/* 893/*
817 * Unlike exported interface, "oom" parameter is added. if oom==true, 894 * Unlike exported interface, "oom" parameter is added. if oom==true,
818 * oom-killer can be invoked. 895 * oom-killer can be invoked.
@@ -847,7 +924,7 @@ static int __mem_cgroup_try_charge(struct mm_struct *mm,
847 if (unlikely(!mem)) 924 if (unlikely(!mem))
848 return 0; 925 return 0;
849 926
850 VM_BUG_ON(mem_cgroup_is_obsolete(mem)); 927 VM_BUG_ON(css_is_removed(&mem->css));
851 928
852 while (1) { 929 while (1) {
853 int ret; 930 int ret;
@@ -875,7 +952,7 @@ static int __mem_cgroup_try_charge(struct mm_struct *mm,
875 goto nomem; 952 goto nomem;
876 953
877 ret = mem_cgroup_hierarchical_reclaim(mem_over_limit, gfp_mask, 954 ret = mem_cgroup_hierarchical_reclaim(mem_over_limit, gfp_mask,
878 noswap); 955 noswap, false);
879 if (ret) 956 if (ret)
880 continue; 957 continue;
881 958
@@ -895,7 +972,7 @@ static int __mem_cgroup_try_charge(struct mm_struct *mm,
895 mutex_lock(&memcg_tasklist); 972 mutex_lock(&memcg_tasklist);
896 mem_cgroup_out_of_memory(mem_over_limit, gfp_mask); 973 mem_cgroup_out_of_memory(mem_over_limit, gfp_mask);
897 mutex_unlock(&memcg_tasklist); 974 mutex_unlock(&memcg_tasklist);
898 mem_over_limit->last_oom_jiffies = jiffies; 975 record_last_oom(mem_over_limit);
899 } 976 }
900 goto nomem; 977 goto nomem;
901 } 978 }
@@ -906,20 +983,54 @@ nomem:
906 return -ENOMEM; 983 return -ENOMEM;
907} 984}
908 985
986
987/*
988 * A helper function to get mem_cgroup from ID. must be called under
989 * rcu_read_lock(). The caller must check css_is_removed() or some if
990 * it's concern. (dropping refcnt from swap can be called against removed
991 * memcg.)
992 */
993static struct mem_cgroup *mem_cgroup_lookup(unsigned short id)
994{
995 struct cgroup_subsys_state *css;
996
997 /* ID 0 is unused ID */
998 if (!id)
999 return NULL;
1000 css = css_lookup(&mem_cgroup_subsys, id);
1001 if (!css)
1002 return NULL;
1003 return container_of(css, struct mem_cgroup, css);
1004}
1005
909static struct mem_cgroup *try_get_mem_cgroup_from_swapcache(struct page *page) 1006static struct mem_cgroup *try_get_mem_cgroup_from_swapcache(struct page *page)
910{ 1007{
911 struct mem_cgroup *mem; 1008 struct mem_cgroup *mem;
1009 struct page_cgroup *pc;
1010 unsigned short id;
912 swp_entry_t ent; 1011 swp_entry_t ent;
913 1012
1013 VM_BUG_ON(!PageLocked(page));
1014
914 if (!PageSwapCache(page)) 1015 if (!PageSwapCache(page))
915 return NULL; 1016 return NULL;
916 1017
917 ent.val = page_private(page); 1018 pc = lookup_page_cgroup(page);
918 mem = lookup_swap_cgroup(ent); 1019 lock_page_cgroup(pc);
919 if (!mem) 1020 if (PageCgroupUsed(pc)) {
920 return NULL; 1021 mem = pc->mem_cgroup;
921 if (!css_tryget(&mem->css)) 1022 if (mem && !css_tryget(&mem->css))
922 return NULL; 1023 mem = NULL;
1024 } else {
1025 ent.val = page_private(page);
1026 id = lookup_swap_cgroup(ent);
1027 rcu_read_lock();
1028 mem = mem_cgroup_lookup(id);
1029 if (mem && !css_tryget(&mem->css))
1030 mem = NULL;
1031 rcu_read_unlock();
1032 }
1033 unlock_page_cgroup(pc);
923 return mem; 1034 return mem;
924} 1035}
925 1036
@@ -1118,6 +1229,10 @@ int mem_cgroup_newpage_charge(struct page *page,
1118 MEM_CGROUP_CHARGE_TYPE_MAPPED, NULL); 1229 MEM_CGROUP_CHARGE_TYPE_MAPPED, NULL);
1119} 1230}
1120 1231
1232static void
1233__mem_cgroup_commit_charge_swapin(struct page *page, struct mem_cgroup *ptr,
1234 enum charge_type ctype);
1235
1121int mem_cgroup_cache_charge(struct page *page, struct mm_struct *mm, 1236int mem_cgroup_cache_charge(struct page *page, struct mm_struct *mm,
1122 gfp_t gfp_mask) 1237 gfp_t gfp_mask)
1123{ 1238{
@@ -1154,16 +1269,6 @@ int mem_cgroup_cache_charge(struct page *page, struct mm_struct *mm,
1154 unlock_page_cgroup(pc); 1269 unlock_page_cgroup(pc);
1155 } 1270 }
1156 1271
1157 if (do_swap_account && PageSwapCache(page)) {
1158 mem = try_get_mem_cgroup_from_swapcache(page);
1159 if (mem)
1160 mm = NULL;
1161 else
1162 mem = NULL;
1163 /* SwapCache may be still linked to LRU now. */
1164 mem_cgroup_lru_del_before_commit_swapcache(page);
1165 }
1166
1167 if (unlikely(!mm && !mem)) 1272 if (unlikely(!mm && !mem))
1168 mm = &init_mm; 1273 mm = &init_mm;
1169 1274
@@ -1171,22 +1276,16 @@ int mem_cgroup_cache_charge(struct page *page, struct mm_struct *mm,
1171 return mem_cgroup_charge_common(page, mm, gfp_mask, 1276 return mem_cgroup_charge_common(page, mm, gfp_mask,
1172 MEM_CGROUP_CHARGE_TYPE_CACHE, NULL); 1277 MEM_CGROUP_CHARGE_TYPE_CACHE, NULL);
1173 1278
1174 ret = mem_cgroup_charge_common(page, mm, gfp_mask, 1279 /* shmem */
1175 MEM_CGROUP_CHARGE_TYPE_SHMEM, mem); 1280 if (PageSwapCache(page)) {
1176 if (mem) 1281 ret = mem_cgroup_try_charge_swapin(mm, page, gfp_mask, &mem);
1177 css_put(&mem->css); 1282 if (!ret)
1178 if (PageSwapCache(page)) 1283 __mem_cgroup_commit_charge_swapin(page, mem,
1179 mem_cgroup_lru_add_after_commit_swapcache(page); 1284 MEM_CGROUP_CHARGE_TYPE_SHMEM);
1285 } else
1286 ret = mem_cgroup_charge_common(page, mm, gfp_mask,
1287 MEM_CGROUP_CHARGE_TYPE_SHMEM, mem);
1180 1288
1181 if (do_swap_account && !ret && PageSwapCache(page)) {
1182 swp_entry_t ent = {.val = page_private(page)};
1183 /* avoid double counting */
1184 mem = swap_cgroup_record(ent, NULL);
1185 if (mem) {
1186 res_counter_uncharge(&mem->memsw, PAGE_SIZE);
1187 mem_cgroup_put(mem);
1188 }
1189 }
1190 return ret; 1289 return ret;
1191} 1290}
1192 1291
@@ -1229,7 +1328,9 @@ charge_cur_mm:
1229 return __mem_cgroup_try_charge(mm, mask, ptr, true); 1328 return __mem_cgroup_try_charge(mm, mask, ptr, true);
1230} 1329}
1231 1330
1232void mem_cgroup_commit_charge_swapin(struct page *page, struct mem_cgroup *ptr) 1331static void
1332__mem_cgroup_commit_charge_swapin(struct page *page, struct mem_cgroup *ptr,
1333 enum charge_type ctype)
1233{ 1334{
1234 struct page_cgroup *pc; 1335 struct page_cgroup *pc;
1235 1336
@@ -1239,7 +1340,7 @@ void mem_cgroup_commit_charge_swapin(struct page *page, struct mem_cgroup *ptr)
1239 return; 1340 return;
1240 pc = lookup_page_cgroup(page); 1341 pc = lookup_page_cgroup(page);
1241 mem_cgroup_lru_del_before_commit_swapcache(page); 1342 mem_cgroup_lru_del_before_commit_swapcache(page);
1242 __mem_cgroup_commit_charge(ptr, pc, MEM_CGROUP_CHARGE_TYPE_MAPPED); 1343 __mem_cgroup_commit_charge(ptr, pc, ctype);
1243 mem_cgroup_lru_add_after_commit_swapcache(page); 1344 mem_cgroup_lru_add_after_commit_swapcache(page);
1244 /* 1345 /*
1245 * Now swap is on-memory. This means this page may be 1346 * Now swap is on-memory. This means this page may be
@@ -1250,18 +1351,32 @@ void mem_cgroup_commit_charge_swapin(struct page *page, struct mem_cgroup *ptr)
1250 */ 1351 */
1251 if (do_swap_account && PageSwapCache(page)) { 1352 if (do_swap_account && PageSwapCache(page)) {
1252 swp_entry_t ent = {.val = page_private(page)}; 1353 swp_entry_t ent = {.val = page_private(page)};
1354 unsigned short id;
1253 struct mem_cgroup *memcg; 1355 struct mem_cgroup *memcg;
1254 memcg = swap_cgroup_record(ent, NULL); 1356
1357 id = swap_cgroup_record(ent, 0);
1358 rcu_read_lock();
1359 memcg = mem_cgroup_lookup(id);
1255 if (memcg) { 1360 if (memcg) {
1361 /*
1362 * This recorded memcg can be obsolete one. So, avoid
1363 * calling css_tryget
1364 */
1256 res_counter_uncharge(&memcg->memsw, PAGE_SIZE); 1365 res_counter_uncharge(&memcg->memsw, PAGE_SIZE);
1257 mem_cgroup_put(memcg); 1366 mem_cgroup_put(memcg);
1258 } 1367 }
1259 1368 rcu_read_unlock();
1260 } 1369 }
1261 /* add this page(page_cgroup) to the LRU we want. */ 1370 /* add this page(page_cgroup) to the LRU we want. */
1262 1371
1263} 1372}
1264 1373
1374void mem_cgroup_commit_charge_swapin(struct page *page, struct mem_cgroup *ptr)
1375{
1376 __mem_cgroup_commit_charge_swapin(page, ptr,
1377 MEM_CGROUP_CHARGE_TYPE_MAPPED);
1378}
1379
1265void mem_cgroup_cancel_charge_swapin(struct mem_cgroup *mem) 1380void mem_cgroup_cancel_charge_swapin(struct mem_cgroup *mem)
1266{ 1381{
1267 if (mem_cgroup_disabled()) 1382 if (mem_cgroup_disabled())
@@ -1324,8 +1439,8 @@ __mem_cgroup_uncharge_common(struct page *page, enum charge_type ctype)
1324 res_counter_uncharge(&mem->res, PAGE_SIZE); 1439 res_counter_uncharge(&mem->res, PAGE_SIZE);
1325 if (do_swap_account && (ctype != MEM_CGROUP_CHARGE_TYPE_SWAPOUT)) 1440 if (do_swap_account && (ctype != MEM_CGROUP_CHARGE_TYPE_SWAPOUT))
1326 res_counter_uncharge(&mem->memsw, PAGE_SIZE); 1441 res_counter_uncharge(&mem->memsw, PAGE_SIZE);
1327
1328 mem_cgroup_charge_statistics(mem, pc, false); 1442 mem_cgroup_charge_statistics(mem, pc, false);
1443
1329 ClearPageCgroupUsed(pc); 1444 ClearPageCgroupUsed(pc);
1330 /* 1445 /*
1331 * pc->mem_cgroup is not cleared here. It will be accessed when it's 1446 * pc->mem_cgroup is not cleared here. It will be accessed when it's
@@ -1365,8 +1480,9 @@ void mem_cgroup_uncharge_cache_page(struct page *page)
1365 __mem_cgroup_uncharge_common(page, MEM_CGROUP_CHARGE_TYPE_CACHE); 1480 __mem_cgroup_uncharge_common(page, MEM_CGROUP_CHARGE_TYPE_CACHE);
1366} 1481}
1367 1482
1483#ifdef CONFIG_SWAP
1368/* 1484/*
1369 * called from __delete_from_swap_cache() and drop "page" account. 1485 * called after __delete_from_swap_cache() and drop "page" account.
1370 * memcg information is recorded to swap_cgroup of "ent" 1486 * memcg information is recorded to swap_cgroup of "ent"
1371 */ 1487 */
1372void mem_cgroup_uncharge_swapcache(struct page *page, swp_entry_t ent) 1488void mem_cgroup_uncharge_swapcache(struct page *page, swp_entry_t ent)
@@ -1377,12 +1493,13 @@ void mem_cgroup_uncharge_swapcache(struct page *page, swp_entry_t ent)
1377 MEM_CGROUP_CHARGE_TYPE_SWAPOUT); 1493 MEM_CGROUP_CHARGE_TYPE_SWAPOUT);
1378 /* record memcg information */ 1494 /* record memcg information */
1379 if (do_swap_account && memcg) { 1495 if (do_swap_account && memcg) {
1380 swap_cgroup_record(ent, memcg); 1496 swap_cgroup_record(ent, css_id(&memcg->css));
1381 mem_cgroup_get(memcg); 1497 mem_cgroup_get(memcg);
1382 } 1498 }
1383 if (memcg) 1499 if (memcg)
1384 css_put(&memcg->css); 1500 css_put(&memcg->css);
1385} 1501}
1502#endif
1386 1503
1387#ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP 1504#ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
1388/* 1505/*
@@ -1392,15 +1509,23 @@ void mem_cgroup_uncharge_swapcache(struct page *page, swp_entry_t ent)
1392void mem_cgroup_uncharge_swap(swp_entry_t ent) 1509void mem_cgroup_uncharge_swap(swp_entry_t ent)
1393{ 1510{
1394 struct mem_cgroup *memcg; 1511 struct mem_cgroup *memcg;
1512 unsigned short id;
1395 1513
1396 if (!do_swap_account) 1514 if (!do_swap_account)
1397 return; 1515 return;
1398 1516
1399 memcg = swap_cgroup_record(ent, NULL); 1517 id = swap_cgroup_record(ent, 0);
1518 rcu_read_lock();
1519 memcg = mem_cgroup_lookup(id);
1400 if (memcg) { 1520 if (memcg) {
1521 /*
1522 * We uncharge this because swap is freed.
1523 * This memcg can be obsolete one. We avoid calling css_tryget
1524 */
1401 res_counter_uncharge(&memcg->memsw, PAGE_SIZE); 1525 res_counter_uncharge(&memcg->memsw, PAGE_SIZE);
1402 mem_cgroup_put(memcg); 1526 mem_cgroup_put(memcg);
1403 } 1527 }
1528 rcu_read_unlock();
1404} 1529}
1405#endif 1530#endif
1406 1531
@@ -1486,36 +1611,28 @@ void mem_cgroup_end_migration(struct mem_cgroup *mem,
1486} 1611}
1487 1612
1488/* 1613/*
1489 * A call to try to shrink memory usage under specified resource controller. 1614 * A call to try to shrink memory usage on charge failure at shmem's swapin.
1490 * This is typically used for page reclaiming for shmem for reducing side 1615 * Calling hierarchical_reclaim is not enough because we should update
1491 * effect of page allocation from shmem, which is used by some mem_cgroup. 1616 * last_oom_jiffies to prevent pagefault_out_of_memory from invoking global OOM.
1617 * Moreover considering hierarchy, we should reclaim from the mem_over_limit,
1618 * not from the memcg which this page would be charged to.
1619 * try_charge_swapin does all of these works properly.
1492 */ 1620 */
1493int mem_cgroup_shrink_usage(struct page *page, 1621int mem_cgroup_shmem_charge_fallback(struct page *page,
1494 struct mm_struct *mm, 1622 struct mm_struct *mm,
1495 gfp_t gfp_mask) 1623 gfp_t gfp_mask)
1496{ 1624{
1497 struct mem_cgroup *mem = NULL; 1625 struct mem_cgroup *mem = NULL;
1498 int progress = 0; 1626 int ret;
1499 int retry = MEM_CGROUP_RECLAIM_RETRIES;
1500 1627
1501 if (mem_cgroup_disabled()) 1628 if (mem_cgroup_disabled())
1502 return 0; 1629 return 0;
1503 if (page)
1504 mem = try_get_mem_cgroup_from_swapcache(page);
1505 if (!mem && mm)
1506 mem = try_get_mem_cgroup_from_mm(mm);
1507 if (unlikely(!mem))
1508 return 0;
1509 1630
1510 do { 1631 ret = mem_cgroup_try_charge_swapin(mm, page, gfp_mask, &mem);
1511 progress = mem_cgroup_hierarchical_reclaim(mem, gfp_mask, true); 1632 if (!ret)
1512 progress += mem_cgroup_check_under_limit(mem); 1633 mem_cgroup_cancel_charge_swapin(mem); /* it does !mem check */
1513 } while (!progress && --retry);
1514 1634
1515 css_put(&mem->css); 1635 return ret;
1516 if (!retry)
1517 return -ENOMEM;
1518 return 0;
1519} 1636}
1520 1637
1521static DEFINE_MUTEX(set_limit_mutex); 1638static DEFINE_MUTEX(set_limit_mutex);
@@ -1523,11 +1640,21 @@ static DEFINE_MUTEX(set_limit_mutex);
1523static int mem_cgroup_resize_limit(struct mem_cgroup *memcg, 1640static int mem_cgroup_resize_limit(struct mem_cgroup *memcg,
1524 unsigned long long val) 1641 unsigned long long val)
1525{ 1642{
1526 1643 int retry_count;
1527 int retry_count = MEM_CGROUP_RECLAIM_RETRIES;
1528 int progress; 1644 int progress;
1529 u64 memswlimit; 1645 u64 memswlimit;
1530 int ret = 0; 1646 int ret = 0;
1647 int children = mem_cgroup_count_children(memcg);
1648 u64 curusage, oldusage;
1649
1650 /*
1651 * For keeping hierarchical_reclaim simple, how long we should retry
1652 * is depends on callers. We set our retry-count to be function
1653 * of # of children which we should visit in this loop.
1654 */
1655 retry_count = MEM_CGROUP_RECLAIM_RETRIES * children;
1656
1657 oldusage = res_counter_read_u64(&memcg->res, RES_USAGE);
1531 1658
1532 while (retry_count) { 1659 while (retry_count) {
1533 if (signal_pending(current)) { 1660 if (signal_pending(current)) {
@@ -1553,8 +1680,13 @@ static int mem_cgroup_resize_limit(struct mem_cgroup *memcg,
1553 break; 1680 break;
1554 1681
1555 progress = mem_cgroup_hierarchical_reclaim(memcg, GFP_KERNEL, 1682 progress = mem_cgroup_hierarchical_reclaim(memcg, GFP_KERNEL,
1556 false); 1683 false, true);
1557 if (!progress) retry_count--; 1684 curusage = res_counter_read_u64(&memcg->res, RES_USAGE);
1685 /* Usage is reduced ? */
1686 if (curusage >= oldusage)
1687 retry_count--;
1688 else
1689 oldusage = curusage;
1558 } 1690 }
1559 1691
1560 return ret; 1692 return ret;
@@ -1563,13 +1695,16 @@ static int mem_cgroup_resize_limit(struct mem_cgroup *memcg,
1563int mem_cgroup_resize_memsw_limit(struct mem_cgroup *memcg, 1695int mem_cgroup_resize_memsw_limit(struct mem_cgroup *memcg,
1564 unsigned long long val) 1696 unsigned long long val)
1565{ 1697{
1566 int retry_count = MEM_CGROUP_RECLAIM_RETRIES; 1698 int retry_count;
1567 u64 memlimit, oldusage, curusage; 1699 u64 memlimit, oldusage, curusage;
1568 int ret; 1700 int children = mem_cgroup_count_children(memcg);
1701 int ret = -EBUSY;
1569 1702
1570 if (!do_swap_account) 1703 if (!do_swap_account)
1571 return -EINVAL; 1704 return -EINVAL;
1572 1705 /* see mem_cgroup_resize_res_limit */
1706 retry_count = children * MEM_CGROUP_RECLAIM_RETRIES;
1707 oldusage = res_counter_read_u64(&memcg->memsw, RES_USAGE);
1573 while (retry_count) { 1708 while (retry_count) {
1574 if (signal_pending(current)) { 1709 if (signal_pending(current)) {
1575 ret = -EINTR; 1710 ret = -EINTR;
@@ -1593,11 +1728,13 @@ int mem_cgroup_resize_memsw_limit(struct mem_cgroup *memcg,
1593 if (!ret) 1728 if (!ret)
1594 break; 1729 break;
1595 1730
1596 oldusage = res_counter_read_u64(&memcg->memsw, RES_USAGE); 1731 mem_cgroup_hierarchical_reclaim(memcg, GFP_KERNEL, true, true);
1597 mem_cgroup_hierarchical_reclaim(memcg, GFP_KERNEL, true);
1598 curusage = res_counter_read_u64(&memcg->memsw, RES_USAGE); 1732 curusage = res_counter_read_u64(&memcg->memsw, RES_USAGE);
1733 /* Usage is reduced ? */
1599 if (curusage >= oldusage) 1734 if (curusage >= oldusage)
1600 retry_count--; 1735 retry_count--;
1736 else
1737 oldusage = curusage;
1601 } 1738 }
1602 return ret; 1739 return ret;
1603} 1740}
@@ -1893,54 +2030,90 @@ static int mem_cgroup_reset(struct cgroup *cont, unsigned int event)
1893 return 0; 2030 return 0;
1894} 2031}
1895 2032
1896static const struct mem_cgroup_stat_desc { 2033
1897 const char *msg; 2034/* For read statistics */
1898 u64 unit; 2035enum {
1899} mem_cgroup_stat_desc[] = { 2036 MCS_CACHE,
1900 [MEM_CGROUP_STAT_CACHE] = { "cache", PAGE_SIZE, }, 2037 MCS_RSS,
1901 [MEM_CGROUP_STAT_RSS] = { "rss", PAGE_SIZE, }, 2038 MCS_PGPGIN,
1902 [MEM_CGROUP_STAT_PGPGIN_COUNT] = {"pgpgin", 1, }, 2039 MCS_PGPGOUT,
1903 [MEM_CGROUP_STAT_PGPGOUT_COUNT] = {"pgpgout", 1, }, 2040 MCS_INACTIVE_ANON,
2041 MCS_ACTIVE_ANON,
2042 MCS_INACTIVE_FILE,
2043 MCS_ACTIVE_FILE,
2044 MCS_UNEVICTABLE,
2045 NR_MCS_STAT,
2046};
2047
2048struct mcs_total_stat {
2049 s64 stat[NR_MCS_STAT];
2050};
2051
2052struct {
2053 char *local_name;
2054 char *total_name;
2055} memcg_stat_strings[NR_MCS_STAT] = {
2056 {"cache", "total_cache"},
2057 {"rss", "total_rss"},
2058 {"pgpgin", "total_pgpgin"},
2059 {"pgpgout", "total_pgpgout"},
2060 {"inactive_anon", "total_inactive_anon"},
2061 {"active_anon", "total_active_anon"},
2062 {"inactive_file", "total_inactive_file"},
2063 {"active_file", "total_active_file"},
2064 {"unevictable", "total_unevictable"}
1904}; 2065};
1905 2066
2067
2068static int mem_cgroup_get_local_stat(struct mem_cgroup *mem, void *data)
2069{
2070 struct mcs_total_stat *s = data;
2071 s64 val;
2072
2073 /* per cpu stat */
2074 val = mem_cgroup_read_stat(&mem->stat, MEM_CGROUP_STAT_CACHE);
2075 s->stat[MCS_CACHE] += val * PAGE_SIZE;
2076 val = mem_cgroup_read_stat(&mem->stat, MEM_CGROUP_STAT_RSS);
2077 s->stat[MCS_RSS] += val * PAGE_SIZE;
2078 val = mem_cgroup_read_stat(&mem->stat, MEM_CGROUP_STAT_PGPGIN_COUNT);
2079 s->stat[MCS_PGPGIN] += val;
2080 val = mem_cgroup_read_stat(&mem->stat, MEM_CGROUP_STAT_PGPGOUT_COUNT);
2081 s->stat[MCS_PGPGOUT] += val;
2082
2083 /* per zone stat */
2084 val = mem_cgroup_get_local_zonestat(mem, LRU_INACTIVE_ANON);
2085 s->stat[MCS_INACTIVE_ANON] += val * PAGE_SIZE;
2086 val = mem_cgroup_get_local_zonestat(mem, LRU_ACTIVE_ANON);
2087 s->stat[MCS_ACTIVE_ANON] += val * PAGE_SIZE;
2088 val = mem_cgroup_get_local_zonestat(mem, LRU_INACTIVE_FILE);
2089 s->stat[MCS_INACTIVE_FILE] += val * PAGE_SIZE;
2090 val = mem_cgroup_get_local_zonestat(mem, LRU_ACTIVE_FILE);
2091 s->stat[MCS_ACTIVE_FILE] += val * PAGE_SIZE;
2092 val = mem_cgroup_get_local_zonestat(mem, LRU_UNEVICTABLE);
2093 s->stat[MCS_UNEVICTABLE] += val * PAGE_SIZE;
2094 return 0;
2095}
2096
2097static void
2098mem_cgroup_get_total_stat(struct mem_cgroup *mem, struct mcs_total_stat *s)
2099{
2100 mem_cgroup_walk_tree(mem, s, mem_cgroup_get_local_stat);
2101}
2102
1906static int mem_control_stat_show(struct cgroup *cont, struct cftype *cft, 2103static int mem_control_stat_show(struct cgroup *cont, struct cftype *cft,
1907 struct cgroup_map_cb *cb) 2104 struct cgroup_map_cb *cb)
1908{ 2105{
1909 struct mem_cgroup *mem_cont = mem_cgroup_from_cont(cont); 2106 struct mem_cgroup *mem_cont = mem_cgroup_from_cont(cont);
1910 struct mem_cgroup_stat *stat = &mem_cont->stat; 2107 struct mcs_total_stat mystat;
1911 int i; 2108 int i;
1912 2109
1913 for (i = 0; i < ARRAY_SIZE(stat->cpustat[0].count); i++) { 2110 memset(&mystat, 0, sizeof(mystat));
1914 s64 val; 2111 mem_cgroup_get_local_stat(mem_cont, &mystat);
1915 2112
1916 val = mem_cgroup_read_stat(stat, i); 2113 for (i = 0; i < NR_MCS_STAT; i++)
1917 val *= mem_cgroup_stat_desc[i].unit; 2114 cb->fill(cb, memcg_stat_strings[i].local_name, mystat.stat[i]);
1918 cb->fill(cb, mem_cgroup_stat_desc[i].msg, val);
1919 }
1920 /* showing # of active pages */
1921 {
1922 unsigned long active_anon, inactive_anon;
1923 unsigned long active_file, inactive_file;
1924 unsigned long unevictable;
1925
1926 inactive_anon = mem_cgroup_get_all_zonestat(mem_cont,
1927 LRU_INACTIVE_ANON);
1928 active_anon = mem_cgroup_get_all_zonestat(mem_cont,
1929 LRU_ACTIVE_ANON);
1930 inactive_file = mem_cgroup_get_all_zonestat(mem_cont,
1931 LRU_INACTIVE_FILE);
1932 active_file = mem_cgroup_get_all_zonestat(mem_cont,
1933 LRU_ACTIVE_FILE);
1934 unevictable = mem_cgroup_get_all_zonestat(mem_cont,
1935 LRU_UNEVICTABLE);
1936
1937 cb->fill(cb, "active_anon", (active_anon) * PAGE_SIZE);
1938 cb->fill(cb, "inactive_anon", (inactive_anon) * PAGE_SIZE);
1939 cb->fill(cb, "active_file", (active_file) * PAGE_SIZE);
1940 cb->fill(cb, "inactive_file", (inactive_file) * PAGE_SIZE);
1941 cb->fill(cb, "unevictable", unevictable * PAGE_SIZE);
1942 2115
1943 } 2116 /* Hierarchical information */
1944 { 2117 {
1945 unsigned long long limit, memsw_limit; 2118 unsigned long long limit, memsw_limit;
1946 memcg_get_hierarchical_limit(mem_cont, &limit, &memsw_limit); 2119 memcg_get_hierarchical_limit(mem_cont, &limit, &memsw_limit);
@@ -1949,6 +2122,12 @@ static int mem_control_stat_show(struct cgroup *cont, struct cftype *cft,
1949 cb->fill(cb, "hierarchical_memsw_limit", memsw_limit); 2122 cb->fill(cb, "hierarchical_memsw_limit", memsw_limit);
1950 } 2123 }
1951 2124
2125 memset(&mystat, 0, sizeof(mystat));
2126 mem_cgroup_get_total_stat(mem_cont, &mystat);
2127 for (i = 0; i < NR_MCS_STAT; i++)
2128 cb->fill(cb, memcg_stat_strings[i].total_name, mystat.stat[i]);
2129
2130
1952#ifdef CONFIG_DEBUG_VM 2131#ifdef CONFIG_DEBUG_VM
1953 cb->fill(cb, "inactive_ratio", calc_inactive_ratio(mem_cont, NULL)); 2132 cb->fill(cb, "inactive_ratio", calc_inactive_ratio(mem_cont, NULL));
1954 2133
@@ -2178,6 +2357,8 @@ static void __mem_cgroup_free(struct mem_cgroup *mem)
2178{ 2357{
2179 int node; 2358 int node;
2180 2359
2360 free_css_id(&mem_cgroup_subsys, &mem->css);
2361
2181 for_each_node_state(node, N_POSSIBLE) 2362 for_each_node_state(node, N_POSSIBLE)
2182 free_mem_cgroup_per_zone_info(mem, node); 2363 free_mem_cgroup_per_zone_info(mem, node);
2183 2364
@@ -2228,11 +2409,12 @@ static struct cgroup_subsys_state * __ref
2228mem_cgroup_create(struct cgroup_subsys *ss, struct cgroup *cont) 2409mem_cgroup_create(struct cgroup_subsys *ss, struct cgroup *cont)
2229{ 2410{
2230 struct mem_cgroup *mem, *parent; 2411 struct mem_cgroup *mem, *parent;
2412 long error = -ENOMEM;
2231 int node; 2413 int node;
2232 2414
2233 mem = mem_cgroup_alloc(); 2415 mem = mem_cgroup_alloc();
2234 if (!mem) 2416 if (!mem)
2235 return ERR_PTR(-ENOMEM); 2417 return ERR_PTR(error);
2236 2418
2237 for_each_node_state(node, N_POSSIBLE) 2419 for_each_node_state(node, N_POSSIBLE)
2238 if (alloc_mem_cgroup_per_zone_info(mem, node)) 2420 if (alloc_mem_cgroup_per_zone_info(mem, node))
@@ -2260,7 +2442,7 @@ mem_cgroup_create(struct cgroup_subsys *ss, struct cgroup *cont)
2260 res_counter_init(&mem->res, NULL); 2442 res_counter_init(&mem->res, NULL);
2261 res_counter_init(&mem->memsw, NULL); 2443 res_counter_init(&mem->memsw, NULL);
2262 } 2444 }
2263 mem->last_scanned_child = NULL; 2445 mem->last_scanned_child = 0;
2264 spin_lock_init(&mem->reclaim_param_lock); 2446 spin_lock_init(&mem->reclaim_param_lock);
2265 2447
2266 if (parent) 2448 if (parent)
@@ -2269,26 +2451,22 @@ mem_cgroup_create(struct cgroup_subsys *ss, struct cgroup *cont)
2269 return &mem->css; 2451 return &mem->css;
2270free_out: 2452free_out:
2271 __mem_cgroup_free(mem); 2453 __mem_cgroup_free(mem);
2272 return ERR_PTR(-ENOMEM); 2454 return ERR_PTR(error);
2273} 2455}
2274 2456
2275static void mem_cgroup_pre_destroy(struct cgroup_subsys *ss, 2457static int mem_cgroup_pre_destroy(struct cgroup_subsys *ss,
2276 struct cgroup *cont) 2458 struct cgroup *cont)
2277{ 2459{
2278 struct mem_cgroup *mem = mem_cgroup_from_cont(cont); 2460 struct mem_cgroup *mem = mem_cgroup_from_cont(cont);
2279 mem_cgroup_force_empty(mem, false); 2461
2462 return mem_cgroup_force_empty(mem, false);
2280} 2463}
2281 2464
2282static void mem_cgroup_destroy(struct cgroup_subsys *ss, 2465static void mem_cgroup_destroy(struct cgroup_subsys *ss,
2283 struct cgroup *cont) 2466 struct cgroup *cont)
2284{ 2467{
2285 struct mem_cgroup *mem = mem_cgroup_from_cont(cont); 2468 struct mem_cgroup *mem = mem_cgroup_from_cont(cont);
2286 struct mem_cgroup *last_scanned_child = mem->last_scanned_child;
2287 2469
2288 if (last_scanned_child) {
2289 VM_BUG_ON(!mem_cgroup_is_obsolete(last_scanned_child));
2290 mem_cgroup_put(last_scanned_child);
2291 }
2292 mem_cgroup_put(mem); 2470 mem_cgroup_put(mem);
2293} 2471}
2294 2472
@@ -2327,6 +2505,7 @@ struct cgroup_subsys mem_cgroup_subsys = {
2327 .populate = mem_cgroup_populate, 2505 .populate = mem_cgroup_populate,
2328 .attach = mem_cgroup_move_task, 2506 .attach = mem_cgroup_move_task,
2329 .early_init = 0, 2507 .early_init = 0,
2508 .use_id = 1,
2330}; 2509};
2331 2510
2332#ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP 2511#ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP