aboutsummaryrefslogtreecommitdiffstats
path: root/mm/memcontrol.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/memcontrol.c')
-rw-r--r--mm/memcontrol.c190
1 files changed, 96 insertions, 94 deletions
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 2e0bfc93484b..e46451e1d9b7 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -26,15 +26,18 @@
26#include <linux/backing-dev.h> 26#include <linux/backing-dev.h>
27#include <linux/bit_spinlock.h> 27#include <linux/bit_spinlock.h>
28#include <linux/rcupdate.h> 28#include <linux/rcupdate.h>
29#include <linux/slab.h>
29#include <linux/swap.h> 30#include <linux/swap.h>
30#include <linux/spinlock.h> 31#include <linux/spinlock.h>
31#include <linux/fs.h> 32#include <linux/fs.h>
32#include <linux/seq_file.h> 33#include <linux/seq_file.h>
34#include <linux/vmalloc.h>
33 35
34#include <asm/uaccess.h> 36#include <asm/uaccess.h>
35 37
36struct cgroup_subsys mem_cgroup_subsys; 38struct cgroup_subsys mem_cgroup_subsys;
37static const int MEM_CGROUP_RECLAIM_RETRIES = 5; 39static const int MEM_CGROUP_RECLAIM_RETRIES = 5;
40static struct kmem_cache *page_cgroup_cache;
38 41
39/* 42/*
40 * Statistics for memory cgroup. 43 * Statistics for memory cgroup.
@@ -45,6 +48,8 @@ enum mem_cgroup_stat_index {
45 */ 48 */
46 MEM_CGROUP_STAT_CACHE, /* # of pages charged as cache */ 49 MEM_CGROUP_STAT_CACHE, /* # of pages charged as cache */
47 MEM_CGROUP_STAT_RSS, /* # of pages charged as rss */ 50 MEM_CGROUP_STAT_RSS, /* # of pages charged as rss */
51 MEM_CGROUP_STAT_PGPGIN_COUNT, /* # of pages paged in */
52 MEM_CGROUP_STAT_PGPGOUT_COUNT, /* # of pages paged out */
48 53
49 MEM_CGROUP_STAT_NSTATS, 54 MEM_CGROUP_STAT_NSTATS,
50}; 55};
@@ -196,6 +201,13 @@ static void mem_cgroup_charge_statistics(struct mem_cgroup *mem, int flags,
196 __mem_cgroup_stat_add_safe(stat, MEM_CGROUP_STAT_CACHE, val); 201 __mem_cgroup_stat_add_safe(stat, MEM_CGROUP_STAT_CACHE, val);
197 else 202 else
198 __mem_cgroup_stat_add_safe(stat, MEM_CGROUP_STAT_RSS, val); 203 __mem_cgroup_stat_add_safe(stat, MEM_CGROUP_STAT_RSS, val);
204
205 if (charge)
206 __mem_cgroup_stat_add_safe(stat,
207 MEM_CGROUP_STAT_PGPGIN_COUNT, 1);
208 else
209 __mem_cgroup_stat_add_safe(stat,
210 MEM_CGROUP_STAT_PGPGOUT_COUNT, 1);
199} 211}
200 212
201static struct mem_cgroup_per_zone * 213static struct mem_cgroup_per_zone *
@@ -236,26 +248,12 @@ static struct mem_cgroup *mem_cgroup_from_cont(struct cgroup *cont)
236 css); 248 css);
237} 249}
238 250
239static struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p) 251struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p)
240{ 252{
241 return container_of(task_subsys_state(p, mem_cgroup_subsys_id), 253 return container_of(task_subsys_state(p, mem_cgroup_subsys_id),
242 struct mem_cgroup, css); 254 struct mem_cgroup, css);
243} 255}
244 256
245void mm_init_cgroup(struct mm_struct *mm, struct task_struct *p)
246{
247 struct mem_cgroup *mem;
248
249 mem = mem_cgroup_from_task(p);
250 css_get(&mem->css);
251 mm->mem_cgroup = mem;
252}
253
254void mm_free_cgroup(struct mm_struct *mm)
255{
256 css_put(&mm->mem_cgroup->css);
257}
258
259static inline int page_cgroup_locked(struct page *page) 257static inline int page_cgroup_locked(struct page *page)
260{ 258{
261 return bit_spin_is_locked(PAGE_CGROUP_LOCK_BIT, &page->page_cgroup); 259 return bit_spin_is_locked(PAGE_CGROUP_LOCK_BIT, &page->page_cgroup);
@@ -287,10 +285,10 @@ static void unlock_page_cgroup(struct page *page)
287 bit_spin_unlock(PAGE_CGROUP_LOCK_BIT, &page->page_cgroup); 285 bit_spin_unlock(PAGE_CGROUP_LOCK_BIT, &page->page_cgroup);
288} 286}
289 287
290static void __mem_cgroup_remove_list(struct page_cgroup *pc) 288static void __mem_cgroup_remove_list(struct mem_cgroup_per_zone *mz,
289 struct page_cgroup *pc)
291{ 290{
292 int from = pc->flags & PAGE_CGROUP_FLAG_ACTIVE; 291 int from = pc->flags & PAGE_CGROUP_FLAG_ACTIVE;
293 struct mem_cgroup_per_zone *mz = page_cgroup_zoneinfo(pc);
294 292
295 if (from) 293 if (from)
296 MEM_CGROUP_ZSTAT(mz, MEM_CGROUP_ZSTAT_ACTIVE) -= 1; 294 MEM_CGROUP_ZSTAT(mz, MEM_CGROUP_ZSTAT_ACTIVE) -= 1;
@@ -301,10 +299,10 @@ static void __mem_cgroup_remove_list(struct page_cgroup *pc)
301 list_del_init(&pc->lru); 299 list_del_init(&pc->lru);
302} 300}
303 301
304static void __mem_cgroup_add_list(struct page_cgroup *pc) 302static void __mem_cgroup_add_list(struct mem_cgroup_per_zone *mz,
303 struct page_cgroup *pc)
305{ 304{
306 int to = pc->flags & PAGE_CGROUP_FLAG_ACTIVE; 305 int to = pc->flags & PAGE_CGROUP_FLAG_ACTIVE;
307 struct mem_cgroup_per_zone *mz = page_cgroup_zoneinfo(pc);
308 306
309 if (!to) { 307 if (!to) {
310 MEM_CGROUP_ZSTAT(mz, MEM_CGROUP_ZSTAT_INACTIVE) += 1; 308 MEM_CGROUP_ZSTAT(mz, MEM_CGROUP_ZSTAT_INACTIVE) += 1;
@@ -476,6 +474,7 @@ unsigned long mem_cgroup_isolate_pages(unsigned long nr_to_scan,
476 int zid = zone_idx(z); 474 int zid = zone_idx(z);
477 struct mem_cgroup_per_zone *mz; 475 struct mem_cgroup_per_zone *mz;
478 476
477 BUG_ON(!mem_cont);
479 mz = mem_cgroup_zoneinfo(mem_cont, nid, zid); 478 mz = mem_cgroup_zoneinfo(mem_cont, nid, zid);
480 if (active) 479 if (active)
481 src = &mz->active_list; 480 src = &mz->active_list;
@@ -560,7 +559,7 @@ retry:
560 } 559 }
561 unlock_page_cgroup(page); 560 unlock_page_cgroup(page);
562 561
563 pc = kzalloc(sizeof(struct page_cgroup), gfp_mask); 562 pc = kmem_cache_zalloc(page_cgroup_cache, gfp_mask);
564 if (pc == NULL) 563 if (pc == NULL)
565 goto err; 564 goto err;
566 565
@@ -574,7 +573,7 @@ retry:
574 mm = &init_mm; 573 mm = &init_mm;
575 574
576 rcu_read_lock(); 575 rcu_read_lock();
577 mem = rcu_dereference(mm->mem_cgroup); 576 mem = mem_cgroup_from_task(rcu_dereference(mm->owner));
578 /* 577 /*
579 * For every charge from the cgroup, increment reference count 578 * For every charge from the cgroup, increment reference count
580 */ 579 */
@@ -602,7 +601,6 @@ retry:
602 mem_cgroup_out_of_memory(mem, gfp_mask); 601 mem_cgroup_out_of_memory(mem, gfp_mask);
603 goto out; 602 goto out;
604 } 603 }
605 congestion_wait(WRITE, HZ/10);
606 } 604 }
607 605
608 pc->ref_cnt = 1; 606 pc->ref_cnt = 1;
@@ -610,7 +608,7 @@ retry:
610 pc->page = page; 608 pc->page = page;
611 pc->flags = PAGE_CGROUP_FLAG_ACTIVE; 609 pc->flags = PAGE_CGROUP_FLAG_ACTIVE;
612 if (ctype == MEM_CGROUP_CHARGE_TYPE_CACHE) 610 if (ctype == MEM_CGROUP_CHARGE_TYPE_CACHE)
613 pc->flags |= PAGE_CGROUP_FLAG_CACHE; 611 pc->flags = PAGE_CGROUP_FLAG_CACHE;
614 612
615 lock_page_cgroup(page); 613 lock_page_cgroup(page);
616 if (page_get_page_cgroup(page)) { 614 if (page_get_page_cgroup(page)) {
@@ -622,14 +620,14 @@ retry:
622 */ 620 */
623 res_counter_uncharge(&mem->res, PAGE_SIZE); 621 res_counter_uncharge(&mem->res, PAGE_SIZE);
624 css_put(&mem->css); 622 css_put(&mem->css);
625 kfree(pc); 623 kmem_cache_free(page_cgroup_cache, pc);
626 goto retry; 624 goto retry;
627 } 625 }
628 page_assign_page_cgroup(page, pc); 626 page_assign_page_cgroup(page, pc);
629 627
630 mz = page_cgroup_zoneinfo(pc); 628 mz = page_cgroup_zoneinfo(pc);
631 spin_lock_irqsave(&mz->lru_lock, flags); 629 spin_lock_irqsave(&mz->lru_lock, flags);
632 __mem_cgroup_add_list(pc); 630 __mem_cgroup_add_list(mz, pc);
633 spin_unlock_irqrestore(&mz->lru_lock, flags); 631 spin_unlock_irqrestore(&mz->lru_lock, flags);
634 632
635 unlock_page_cgroup(page); 633 unlock_page_cgroup(page);
@@ -637,7 +635,7 @@ done:
637 return 0; 635 return 0;
638out: 636out:
639 css_put(&mem->css); 637 css_put(&mem->css);
640 kfree(pc); 638 kmem_cache_free(page_cgroup_cache, pc);
641err: 639err:
642 return -ENOMEM; 640 return -ENOMEM;
643} 641}
@@ -685,7 +683,7 @@ void mem_cgroup_uncharge_page(struct page *page)
685 if (--(pc->ref_cnt) == 0) { 683 if (--(pc->ref_cnt) == 0) {
686 mz = page_cgroup_zoneinfo(pc); 684 mz = page_cgroup_zoneinfo(pc);
687 spin_lock_irqsave(&mz->lru_lock, flags); 685 spin_lock_irqsave(&mz->lru_lock, flags);
688 __mem_cgroup_remove_list(pc); 686 __mem_cgroup_remove_list(mz, pc);
689 spin_unlock_irqrestore(&mz->lru_lock, flags); 687 spin_unlock_irqrestore(&mz->lru_lock, flags);
690 688
691 page_assign_page_cgroup(page, NULL); 689 page_assign_page_cgroup(page, NULL);
@@ -695,7 +693,7 @@ void mem_cgroup_uncharge_page(struct page *page)
695 res_counter_uncharge(&mem->res, PAGE_SIZE); 693 res_counter_uncharge(&mem->res, PAGE_SIZE);
696 css_put(&mem->css); 694 css_put(&mem->css);
697 695
698 kfree(pc); 696 kmem_cache_free(page_cgroup_cache, pc);
699 return; 697 return;
700 } 698 }
701 699
@@ -747,7 +745,7 @@ void mem_cgroup_page_migration(struct page *page, struct page *newpage)
747 745
748 mz = page_cgroup_zoneinfo(pc); 746 mz = page_cgroup_zoneinfo(pc);
749 spin_lock_irqsave(&mz->lru_lock, flags); 747 spin_lock_irqsave(&mz->lru_lock, flags);
750 __mem_cgroup_remove_list(pc); 748 __mem_cgroup_remove_list(mz, pc);
751 spin_unlock_irqrestore(&mz->lru_lock, flags); 749 spin_unlock_irqrestore(&mz->lru_lock, flags);
752 750
753 page_assign_page_cgroup(page, NULL); 751 page_assign_page_cgroup(page, NULL);
@@ -759,7 +757,7 @@ void mem_cgroup_page_migration(struct page *page, struct page *newpage)
759 757
760 mz = page_cgroup_zoneinfo(pc); 758 mz = page_cgroup_zoneinfo(pc);
761 spin_lock_irqsave(&mz->lru_lock, flags); 759 spin_lock_irqsave(&mz->lru_lock, flags);
762 __mem_cgroup_add_list(pc); 760 __mem_cgroup_add_list(mz, pc);
763 spin_unlock_irqrestore(&mz->lru_lock, flags); 761 spin_unlock_irqrestore(&mz->lru_lock, flags);
764 762
765 unlock_page_cgroup(newpage); 763 unlock_page_cgroup(newpage);
@@ -853,13 +851,10 @@ static int mem_cgroup_write_strategy(char *buf, unsigned long long *tmp)
853 return 0; 851 return 0;
854} 852}
855 853
856static ssize_t mem_cgroup_read(struct cgroup *cont, 854static u64 mem_cgroup_read(struct cgroup *cont, struct cftype *cft)
857 struct cftype *cft, struct file *file,
858 char __user *userbuf, size_t nbytes, loff_t *ppos)
859{ 855{
860 return res_counter_read(&mem_cgroup_from_cont(cont)->res, 856 return res_counter_read_u64(&mem_cgroup_from_cont(cont)->res,
861 cft->private, userbuf, nbytes, ppos, 857 cft->private);
862 NULL);
863} 858}
864 859
865static ssize_t mem_cgroup_write(struct cgroup *cont, struct cftype *cft, 860static ssize_t mem_cgroup_write(struct cgroup *cont, struct cftype *cft,
@@ -871,27 +866,25 @@ static ssize_t mem_cgroup_write(struct cgroup *cont, struct cftype *cft,
871 mem_cgroup_write_strategy); 866 mem_cgroup_write_strategy);
872} 867}
873 868
874static ssize_t mem_force_empty_write(struct cgroup *cont, 869static int mem_cgroup_reset(struct cgroup *cont, unsigned int event)
875 struct cftype *cft, struct file *file,
876 const char __user *userbuf,
877 size_t nbytes, loff_t *ppos)
878{ 870{
879 struct mem_cgroup *mem = mem_cgroup_from_cont(cont); 871 struct mem_cgroup *mem;
880 int ret = mem_cgroup_force_empty(mem); 872
881 if (!ret) 873 mem = mem_cgroup_from_cont(cont);
882 ret = nbytes; 874 switch (event) {
883 return ret; 875 case RES_MAX_USAGE:
876 res_counter_reset_max(&mem->res);
877 break;
878 case RES_FAILCNT:
879 res_counter_reset_failcnt(&mem->res);
880 break;
881 }
882 return 0;
884} 883}
885 884
886/* 885static int mem_force_empty_write(struct cgroup *cont, unsigned int event)
887 * Note: This should be removed if cgroup supports write-only file.
888 */
889static ssize_t mem_force_empty_read(struct cgroup *cont,
890 struct cftype *cft,
891 struct file *file, char __user *userbuf,
892 size_t nbytes, loff_t *ppos)
893{ 886{
894 return -EINVAL; 887 return mem_cgroup_force_empty(mem_cgroup_from_cont(cont));
895} 888}
896 889
897static const struct mem_cgroup_stat_desc { 890static const struct mem_cgroup_stat_desc {
@@ -900,11 +893,13 @@ static const struct mem_cgroup_stat_desc {
900} mem_cgroup_stat_desc[] = { 893} mem_cgroup_stat_desc[] = {
901 [MEM_CGROUP_STAT_CACHE] = { "cache", PAGE_SIZE, }, 894 [MEM_CGROUP_STAT_CACHE] = { "cache", PAGE_SIZE, },
902 [MEM_CGROUP_STAT_RSS] = { "rss", PAGE_SIZE, }, 895 [MEM_CGROUP_STAT_RSS] = { "rss", PAGE_SIZE, },
896 [MEM_CGROUP_STAT_PGPGIN_COUNT] = {"pgpgin", 1, },
897 [MEM_CGROUP_STAT_PGPGOUT_COUNT] = {"pgpgout", 1, },
903}; 898};
904 899
905static int mem_control_stat_show(struct seq_file *m, void *arg) 900static int mem_control_stat_show(struct cgroup *cont, struct cftype *cft,
901 struct cgroup_map_cb *cb)
906{ 902{
907 struct cgroup *cont = m->private;
908 struct mem_cgroup *mem_cont = mem_cgroup_from_cont(cont); 903 struct mem_cgroup *mem_cont = mem_cgroup_from_cont(cont);
909 struct mem_cgroup_stat *stat = &mem_cont->stat; 904 struct mem_cgroup_stat *stat = &mem_cont->stat;
910 int i; 905 int i;
@@ -914,8 +909,7 @@ static int mem_control_stat_show(struct seq_file *m, void *arg)
914 909
915 val = mem_cgroup_read_stat(stat, i); 910 val = mem_cgroup_read_stat(stat, i);
916 val *= mem_cgroup_stat_desc[i].unit; 911 val *= mem_cgroup_stat_desc[i].unit;
917 seq_printf(m, "%s %lld\n", mem_cgroup_stat_desc[i].msg, 912 cb->fill(cb, mem_cgroup_stat_desc[i].msg, val);
918 (long long)val);
919 } 913 }
920 /* showing # of active pages */ 914 /* showing # of active pages */
921 { 915 {
@@ -925,52 +919,43 @@ static int mem_control_stat_show(struct seq_file *m, void *arg)
925 MEM_CGROUP_ZSTAT_INACTIVE); 919 MEM_CGROUP_ZSTAT_INACTIVE);
926 active = mem_cgroup_get_all_zonestat(mem_cont, 920 active = mem_cgroup_get_all_zonestat(mem_cont,
927 MEM_CGROUP_ZSTAT_ACTIVE); 921 MEM_CGROUP_ZSTAT_ACTIVE);
928 seq_printf(m, "active %ld\n", (active) * PAGE_SIZE); 922 cb->fill(cb, "active", (active) * PAGE_SIZE);
929 seq_printf(m, "inactive %ld\n", (inactive) * PAGE_SIZE); 923 cb->fill(cb, "inactive", (inactive) * PAGE_SIZE);
930 } 924 }
931 return 0; 925 return 0;
932} 926}
933 927
934static const struct file_operations mem_control_stat_file_operations = {
935 .read = seq_read,
936 .llseek = seq_lseek,
937 .release = single_release,
938};
939
940static int mem_control_stat_open(struct inode *unused, struct file *file)
941{
942 /* XXX __d_cont */
943 struct cgroup *cont = file->f_dentry->d_parent->d_fsdata;
944
945 file->f_op = &mem_control_stat_file_operations;
946 return single_open(file, mem_control_stat_show, cont);
947}
948
949static struct cftype mem_cgroup_files[] = { 928static struct cftype mem_cgroup_files[] = {
950 { 929 {
951 .name = "usage_in_bytes", 930 .name = "usage_in_bytes",
952 .private = RES_USAGE, 931 .private = RES_USAGE,
953 .read = mem_cgroup_read, 932 .read_u64 = mem_cgroup_read,
933 },
934 {
935 .name = "max_usage_in_bytes",
936 .private = RES_MAX_USAGE,
937 .trigger = mem_cgroup_reset,
938 .read_u64 = mem_cgroup_read,
954 }, 939 },
955 { 940 {
956 .name = "limit_in_bytes", 941 .name = "limit_in_bytes",
957 .private = RES_LIMIT, 942 .private = RES_LIMIT,
958 .write = mem_cgroup_write, 943 .write = mem_cgroup_write,
959 .read = mem_cgroup_read, 944 .read_u64 = mem_cgroup_read,
960 }, 945 },
961 { 946 {
962 .name = "failcnt", 947 .name = "failcnt",
963 .private = RES_FAILCNT, 948 .private = RES_FAILCNT,
964 .read = mem_cgroup_read, 949 .trigger = mem_cgroup_reset,
950 .read_u64 = mem_cgroup_read,
965 }, 951 },
966 { 952 {
967 .name = "force_empty", 953 .name = "force_empty",
968 .write = mem_force_empty_write, 954 .trigger = mem_force_empty_write,
969 .read = mem_force_empty_read,
970 }, 955 },
971 { 956 {
972 .name = "stat", 957 .name = "stat",
973 .open = mem_control_stat_open, 958 .read_map = mem_control_stat_show,
974 }, 959 },
975}; 960};
976 961
@@ -1010,6 +995,29 @@ static void free_mem_cgroup_per_zone_info(struct mem_cgroup *mem, int node)
1010 kfree(mem->info.nodeinfo[node]); 995 kfree(mem->info.nodeinfo[node]);
1011} 996}
1012 997
998static struct mem_cgroup *mem_cgroup_alloc(void)
999{
1000 struct mem_cgroup *mem;
1001
1002 if (sizeof(*mem) < PAGE_SIZE)
1003 mem = kmalloc(sizeof(*mem), GFP_KERNEL);
1004 else
1005 mem = vmalloc(sizeof(*mem));
1006
1007 if (mem)
1008 memset(mem, 0, sizeof(*mem));
1009 return mem;
1010}
1011
1012static void mem_cgroup_free(struct mem_cgroup *mem)
1013{
1014 if (sizeof(*mem) < PAGE_SIZE)
1015 kfree(mem);
1016 else
1017 vfree(mem);
1018}
1019
1020
1013static struct cgroup_subsys_state * 1021static struct cgroup_subsys_state *
1014mem_cgroup_create(struct cgroup_subsys *ss, struct cgroup *cont) 1022mem_cgroup_create(struct cgroup_subsys *ss, struct cgroup *cont)
1015{ 1023{
@@ -1018,17 +1026,15 @@ mem_cgroup_create(struct cgroup_subsys *ss, struct cgroup *cont)
1018 1026
1019 if (unlikely((cont->parent) == NULL)) { 1027 if (unlikely((cont->parent) == NULL)) {
1020 mem = &init_mem_cgroup; 1028 mem = &init_mem_cgroup;
1021 init_mm.mem_cgroup = mem; 1029 page_cgroup_cache = KMEM_CACHE(page_cgroup, SLAB_PANIC);
1022 } else 1030 } else {
1023 mem = kzalloc(sizeof(struct mem_cgroup), GFP_KERNEL); 1031 mem = mem_cgroup_alloc();
1024 1032 if (!mem)
1025 if (mem == NULL) 1033 return ERR_PTR(-ENOMEM);
1026 return ERR_PTR(-ENOMEM); 1034 }
1027 1035
1028 res_counter_init(&mem->res); 1036 res_counter_init(&mem->res);
1029 1037
1030 memset(&mem->info, 0, sizeof(mem->info));
1031
1032 for_each_node_state(node, N_POSSIBLE) 1038 for_each_node_state(node, N_POSSIBLE)
1033 if (alloc_mem_cgroup_per_zone_info(mem, node)) 1039 if (alloc_mem_cgroup_per_zone_info(mem, node))
1034 goto free_out; 1040 goto free_out;
@@ -1038,7 +1044,7 @@ free_out:
1038 for_each_node_state(node, N_POSSIBLE) 1044 for_each_node_state(node, N_POSSIBLE)
1039 free_mem_cgroup_per_zone_info(mem, node); 1045 free_mem_cgroup_per_zone_info(mem, node);
1040 if (cont->parent != NULL) 1046 if (cont->parent != NULL)
1041 kfree(mem); 1047 mem_cgroup_free(mem);
1042 return ERR_PTR(-ENOMEM); 1048 return ERR_PTR(-ENOMEM);
1043} 1049}
1044 1050
@@ -1058,7 +1064,7 @@ static void mem_cgroup_destroy(struct cgroup_subsys *ss,
1058 for_each_node_state(node, N_POSSIBLE) 1064 for_each_node_state(node, N_POSSIBLE)
1059 free_mem_cgroup_per_zone_info(mem, node); 1065 free_mem_cgroup_per_zone_info(mem, node);
1060 1066
1061 kfree(mem_cgroup_from_cont(cont)); 1067 mem_cgroup_free(mem_cgroup_from_cont(cont));
1062} 1068}
1063 1069
1064static int mem_cgroup_populate(struct cgroup_subsys *ss, 1070static int mem_cgroup_populate(struct cgroup_subsys *ss,
@@ -1098,10 +1104,6 @@ static void mem_cgroup_move_task(struct cgroup_subsys *ss,
1098 if (!thread_group_leader(p)) 1104 if (!thread_group_leader(p))
1099 goto out; 1105 goto out;
1100 1106
1101 css_get(&mem->css);
1102 rcu_assign_pointer(mm->mem_cgroup, mem);
1103 css_put(&old_mem->css);
1104
1105out: 1107out:
1106 mmput(mm); 1108 mmput(mm);
1107} 1109}