aboutsummaryrefslogtreecommitdiffstats
path: root/mm/memcontrol.c
diff options
context:
space:
mode:
authorBalbir Singh <balbir@linux.vnet.ibm.com>2008-02-07 03:14:41 -0500
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2008-02-07 11:42:22 -0500
commit3c541e14bfa553133c3473a6ed3e4c0583ea2285 (patch)
tree4e8ca5d7272803d3fe1bb06963b132248d0b4240 /mm/memcontrol.c
parent072c56c13e1302fcdc39961dc64e76485731ad67 (diff)
Memory controller remove control_type feature
Based on the discussion at http://lkml.org/lkml/2007/12/20/383, it was felt that control_type might not be a good thing to implement right away. We can add this flexibility at a later point when required. Signed-off-by: Balbir Singh <balbir@linux.vnet.ibm.com> Acked-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/memcontrol.c')
-rw-r--r--mm/memcontrol.c91
1 files changed, 17 insertions, 74 deletions
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 315dee180129..5c2c702af617 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -131,7 +131,6 @@ struct mem_cgroup {
131 */ 131 */
132 struct mem_cgroup_lru_info info; 132 struct mem_cgroup_lru_info info;
133 133
134 unsigned long control_type; /* control RSS or RSS+Pagecache */
135 int prev_priority; /* for recording reclaim priority */ 134 int prev_priority; /* for recording reclaim priority */
136 /* 135 /*
137 * statistics. 136 * statistics.
@@ -709,24 +708,17 @@ int mem_cgroup_cache_charge(struct page *page, struct mm_struct *mm,
709 gfp_t gfp_mask) 708 gfp_t gfp_mask)
710{ 709{
711 int ret = 0; 710 int ret = 0;
712 struct mem_cgroup *mem;
713 if (!mm) 711 if (!mm)
714 mm = &init_mm; 712 mm = &init_mm;
715 713
716 rcu_read_lock(); 714 ret = mem_cgroup_charge_common(page, mm, gfp_mask,
717 mem = rcu_dereference(mm->mem_cgroup);
718 css_get(&mem->css);
719 rcu_read_unlock();
720 if (mem->control_type == MEM_CGROUP_TYPE_ALL)
721 ret = mem_cgroup_charge_common(page, mm, gfp_mask,
722 MEM_CGROUP_CHARGE_TYPE_CACHE); 715 MEM_CGROUP_CHARGE_TYPE_CACHE);
723 css_put(&mem->css);
724 return ret; 716 return ret;
725} 717}
726 718
727/* 719/*
728 * Uncharging is always a welcome operation, we never complain, simply 720 * Uncharging is always a welcome operation, we never complain, simply
729 * uncharge. 721 * uncharge. This routine should be called with lock_page_cgroup held
730 */ 722 */
731void mem_cgroup_uncharge(struct page_cgroup *pc) 723void mem_cgroup_uncharge(struct page_cgroup *pc)
732{ 724{
@@ -736,8 +728,7 @@ void mem_cgroup_uncharge(struct page_cgroup *pc)
736 unsigned long flags; 728 unsigned long flags;
737 729
738 /* 730 /*
739 * This can handle cases when a page is not charged at all and we 731 * Check if our page_cgroup is valid
740 * are switching between handling the control_type.
741 */ 732 */
742 if (!pc) 733 if (!pc)
743 return; 734 return;
@@ -749,6 +740,7 @@ void mem_cgroup_uncharge(struct page_cgroup *pc)
749 * get page->cgroup and clear it under lock. 740 * get page->cgroup and clear it under lock.
750 * force_empty can drop page->cgroup without checking refcnt. 741 * force_empty can drop page->cgroup without checking refcnt.
751 */ 742 */
743 unlock_page_cgroup(page);
752 if (clear_page_cgroup(page, pc) == pc) { 744 if (clear_page_cgroup(page, pc) == pc) {
753 mem = pc->mem_cgroup; 745 mem = pc->mem_cgroup;
754 css_put(&mem->css); 746 css_put(&mem->css);
@@ -758,9 +750,17 @@ void mem_cgroup_uncharge(struct page_cgroup *pc)
758 spin_unlock_irqrestore(&mz->lru_lock, flags); 750 spin_unlock_irqrestore(&mz->lru_lock, flags);
759 kfree(pc); 751 kfree(pc);
760 } 752 }
753 lock_page_cgroup(page);
761 } 754 }
762} 755}
763 756
757void mem_cgroup_uncharge_page(struct page *page)
758{
759 lock_page_cgroup(page);
760 mem_cgroup_uncharge(page_get_page_cgroup(page));
761 unlock_page_cgroup(page);
762}
763
764/* 764/*
765 * Returns non-zero if a page (under migration) has valid page_cgroup member. 765 * Returns non-zero if a page (under migration) has valid page_cgroup member.
766 * Refcnt of page_cgroup is incremented. 766 * Refcnt of page_cgroup is incremented.
@@ -780,8 +780,12 @@ int mem_cgroup_prepare_migration(struct page *page)
780 780
781void mem_cgroup_end_migration(struct page *page) 781void mem_cgroup_end_migration(struct page *page)
782{ 782{
783 struct page_cgroup *pc = page_get_page_cgroup(page); 783 struct page_cgroup *pc;
784
785 lock_page_cgroup(page);
786 pc = page_get_page_cgroup(page);
784 mem_cgroup_uncharge(pc); 787 mem_cgroup_uncharge(pc);
788 unlock_page_cgroup(page);
785} 789}
786/* 790/*
787 * We know both *page* and *newpage* are now not-on-LRU and Pg_locked. 791 * We know both *page* and *newpage* are now not-on-LRU and Pg_locked.
@@ -936,61 +940,6 @@ static ssize_t mem_cgroup_write(struct cgroup *cont, struct cftype *cft,
936 mem_cgroup_write_strategy); 940 mem_cgroup_write_strategy);
937} 941}
938 942
939static ssize_t mem_control_type_write(struct cgroup *cont,
940 struct cftype *cft, struct file *file,
941 const char __user *userbuf,
942 size_t nbytes, loff_t *pos)
943{
944 int ret;
945 char *buf, *end;
946 unsigned long tmp;
947 struct mem_cgroup *mem;
948
949 mem = mem_cgroup_from_cont(cont);
950 buf = kmalloc(nbytes + 1, GFP_KERNEL);
951 ret = -ENOMEM;
952 if (buf == NULL)
953 goto out;
954
955 buf[nbytes] = 0;
956 ret = -EFAULT;
957 if (copy_from_user(buf, userbuf, nbytes))
958 goto out_free;
959
960 ret = -EINVAL;
961 tmp = simple_strtoul(buf, &end, 10);
962 if (*end != '\0')
963 goto out_free;
964
965 if (tmp <= MEM_CGROUP_TYPE_UNSPEC || tmp >= MEM_CGROUP_TYPE_MAX)
966 goto out_free;
967
968 mem->control_type = tmp;
969 ret = nbytes;
970out_free:
971 kfree(buf);
972out:
973 return ret;
974}
975
976static ssize_t mem_control_type_read(struct cgroup *cont,
977 struct cftype *cft,
978 struct file *file, char __user *userbuf,
979 size_t nbytes, loff_t *ppos)
980{
981 unsigned long val;
982 char buf[64], *s;
983 struct mem_cgroup *mem;
984
985 mem = mem_cgroup_from_cont(cont);
986 s = buf;
987 val = mem->control_type;
988 s += sprintf(s, "%lu\n", val);
989 return simple_read_from_buffer((void __user *)userbuf, nbytes,
990 ppos, buf, s - buf);
991}
992
993
994static ssize_t mem_force_empty_write(struct cgroup *cont, 943static ssize_t mem_force_empty_write(struct cgroup *cont,
995 struct cftype *cft, struct file *file, 944 struct cftype *cft, struct file *file,
996 const char __user *userbuf, 945 const char __user *userbuf,
@@ -1089,11 +1038,6 @@ static struct cftype mem_cgroup_files[] = {
1089 .read = mem_cgroup_read, 1038 .read = mem_cgroup_read,
1090 }, 1039 },
1091 { 1040 {
1092 .name = "control_type",
1093 .write = mem_control_type_write,
1094 .read = mem_control_type_read,
1095 },
1096 {
1097 .name = "force_empty", 1041 .name = "force_empty",
1098 .write = mem_force_empty_write, 1042 .write = mem_force_empty_write,
1099 .read = mem_force_empty_read, 1043 .read = mem_force_empty_read,
@@ -1161,7 +1105,6 @@ mem_cgroup_create(struct cgroup_subsys *ss, struct cgroup *cont)
1161 1105
1162 res_counter_init(&mem->res); 1106 res_counter_init(&mem->res);
1163 1107
1164 mem->control_type = MEM_CGROUP_TYPE_ALL;
1165 memset(&mem->info, 0, sizeof(mem->info)); 1108 memset(&mem->info, 0, sizeof(mem->info));
1166 1109
1167 for_each_node_state(node, N_POSSIBLE) 1110 for_each_node_state(node, N_POSSIBLE)