aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorHugh Dickins <hughd@google.com>2012-03-21 19:34:18 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2012-03-21 20:55:00 -0400
commitd79154bb5223edad407db61f59b9b15b0080ed80 (patch)
tree28c7c706c5a78763fb75f9ff6e6d2b3a4d7fa82d /mm
parent31a79235fc75b506e282e43723107a40f3bc5c07 (diff)
memcg: replace mem and mem_cont stragglers
Replace mem and mem_cont stragglers in memcontrol.c by memcg. Signed-off-by: Hugh Dickins <hughd@google.com> Reviewed-by: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com> Acked-by: Kirill A. Shutemov <kirill@shutemov.name> Acked-by: Michal Hocko <mhocko@suse.cz> Acked-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/memcontrol.c84
1 files changed, 42 insertions, 42 deletions
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index bb04067269bc..e5370db7ad72 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -144,7 +144,7 @@ struct mem_cgroup_per_zone {
144 unsigned long long usage_in_excess;/* Set to the value by which */ 144 unsigned long long usage_in_excess;/* Set to the value by which */
145 /* the soft limit is exceeded*/ 145 /* the soft limit is exceeded*/
146 bool on_tree; 146 bool on_tree;
147 struct mem_cgroup *mem; /* Back pointer, we cannot */ 147 struct mem_cgroup *memcg; /* Back pointer, we cannot */
148 /* use container_of */ 148 /* use container_of */
149}; 149};
150/* Macro for accessing counter */ 150/* Macro for accessing counter */
@@ -612,9 +612,9 @@ retry:
612 * we will to add it back at the end of reclaim to its correct 612 * we will to add it back at the end of reclaim to its correct
613 * position in the tree. 613 * position in the tree.
614 */ 614 */
615 __mem_cgroup_remove_exceeded(mz->mem, mz, mctz); 615 __mem_cgroup_remove_exceeded(mz->memcg, mz, mctz);
616 if (!res_counter_soft_limit_excess(&mz->mem->res) || 616 if (!res_counter_soft_limit_excess(&mz->memcg->res) ||
617 !css_tryget(&mz->mem->css)) 617 !css_tryget(&mz->memcg->css))
618 goto retry; 618 goto retry;
619done: 619done:
620 return mz; 620 return mz;
@@ -1772,22 +1772,22 @@ static DEFINE_SPINLOCK(memcg_oom_lock);
1772static DECLARE_WAIT_QUEUE_HEAD(memcg_oom_waitq); 1772static DECLARE_WAIT_QUEUE_HEAD(memcg_oom_waitq);
1773 1773
1774struct oom_wait_info { 1774struct oom_wait_info {
1775 struct mem_cgroup *mem; 1775 struct mem_cgroup *memcg;
1776 wait_queue_t wait; 1776 wait_queue_t wait;
1777}; 1777};
1778 1778
1779static int memcg_oom_wake_function(wait_queue_t *wait, 1779static int memcg_oom_wake_function(wait_queue_t *wait,
1780 unsigned mode, int sync, void *arg) 1780 unsigned mode, int sync, void *arg)
1781{ 1781{
1782 struct mem_cgroup *wake_memcg = (struct mem_cgroup *)arg, 1782 struct mem_cgroup *wake_memcg = (struct mem_cgroup *)arg;
1783 *oom_wait_memcg; 1783 struct mem_cgroup *oom_wait_memcg;
1784 struct oom_wait_info *oom_wait_info; 1784 struct oom_wait_info *oom_wait_info;
1785 1785
1786 oom_wait_info = container_of(wait, struct oom_wait_info, wait); 1786 oom_wait_info = container_of(wait, struct oom_wait_info, wait);
1787 oom_wait_memcg = oom_wait_info->mem; 1787 oom_wait_memcg = oom_wait_info->memcg;
1788 1788
1789 /* 1789 /*
1790 * Both of oom_wait_info->mem and wake_mem are stable under us. 1790 * Both of oom_wait_info->memcg and wake_memcg are stable under us.
1791 * Then we can use css_is_ancestor without taking care of RCU. 1791 * Then we can use css_is_ancestor without taking care of RCU.
1792 */ 1792 */
1793 if (!mem_cgroup_same_or_subtree(oom_wait_memcg, wake_memcg) 1793 if (!mem_cgroup_same_or_subtree(oom_wait_memcg, wake_memcg)
@@ -1816,7 +1816,7 @@ bool mem_cgroup_handle_oom(struct mem_cgroup *memcg, gfp_t mask, int order)
1816 struct oom_wait_info owait; 1816 struct oom_wait_info owait;
1817 bool locked, need_to_kill; 1817 bool locked, need_to_kill;
1818 1818
1819 owait.mem = memcg; 1819 owait.memcg = memcg;
1820 owait.wait.flags = 0; 1820 owait.wait.flags = 0;
1821 owait.wait.func = memcg_oom_wake_function; 1821 owait.wait.func = memcg_oom_wake_function;
1822 owait.wait.private = current; 1822 owait.wait.private = current;
@@ -3549,7 +3549,7 @@ unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order,
3549 break; 3549 break;
3550 3550
3551 nr_scanned = 0; 3551 nr_scanned = 0;
3552 reclaimed = mem_cgroup_soft_reclaim(mz->mem, zone, 3552 reclaimed = mem_cgroup_soft_reclaim(mz->memcg, zone,
3553 gfp_mask, &nr_scanned); 3553 gfp_mask, &nr_scanned);
3554 nr_reclaimed += reclaimed; 3554 nr_reclaimed += reclaimed;
3555 *total_scanned += nr_scanned; 3555 *total_scanned += nr_scanned;
@@ -3576,13 +3576,13 @@ unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order,
3576 next_mz = 3576 next_mz =
3577 __mem_cgroup_largest_soft_limit_node(mctz); 3577 __mem_cgroup_largest_soft_limit_node(mctz);
3578 if (next_mz == mz) 3578 if (next_mz == mz)
3579 css_put(&next_mz->mem->css); 3579 css_put(&next_mz->memcg->css);
3580 else /* next_mz == NULL or other memcg */ 3580 else /* next_mz == NULL or other memcg */
3581 break; 3581 break;
3582 } while (1); 3582 } while (1);
3583 } 3583 }
3584 __mem_cgroup_remove_exceeded(mz->mem, mz, mctz); 3584 __mem_cgroup_remove_exceeded(mz->memcg, mz, mctz);
3585 excess = res_counter_soft_limit_excess(&mz->mem->res); 3585 excess = res_counter_soft_limit_excess(&mz->memcg->res);
3586 /* 3586 /*
3587 * One school of thought says that we should not add 3587 * One school of thought says that we should not add
3588 * back the node to the tree if reclaim returns 0. 3588 * back the node to the tree if reclaim returns 0.
@@ -3592,9 +3592,9 @@ unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order,
3592 * term TODO. 3592 * term TODO.
3593 */ 3593 */
3594 /* If excess == 0, no tree ops */ 3594 /* If excess == 0, no tree ops */
3595 __mem_cgroup_insert_exceeded(mz->mem, mz, mctz, excess); 3595 __mem_cgroup_insert_exceeded(mz->memcg, mz, mctz, excess);
3596 spin_unlock(&mctz->lock); 3596 spin_unlock(&mctz->lock);
3597 css_put(&mz->mem->css); 3597 css_put(&mz->memcg->css);
3598 loop++; 3598 loop++;
3599 /* 3599 /*
3600 * Could not reclaim anything and there are no more 3600 * Could not reclaim anything and there are no more
@@ -3607,7 +3607,7 @@ unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order,
3607 break; 3607 break;
3608 } while (!nr_reclaimed); 3608 } while (!nr_reclaimed);
3609 if (next_mz) 3609 if (next_mz)
3610 css_put(&next_mz->mem->css); 3610 css_put(&next_mz->memcg->css);
3611 return nr_reclaimed; 3611 return nr_reclaimed;
3612} 3612}
3613 3613
@@ -4098,38 +4098,38 @@ static int mem_control_numa_stat_show(struct seq_file *m, void *arg)
4098 unsigned long total_nr, file_nr, anon_nr, unevictable_nr; 4098 unsigned long total_nr, file_nr, anon_nr, unevictable_nr;
4099 unsigned long node_nr; 4099 unsigned long node_nr;
4100 struct cgroup *cont = m->private; 4100 struct cgroup *cont = m->private;
4101 struct mem_cgroup *mem_cont = mem_cgroup_from_cont(cont); 4101 struct mem_cgroup *memcg = mem_cgroup_from_cont(cont);
4102 4102
4103 total_nr = mem_cgroup_nr_lru_pages(mem_cont, LRU_ALL); 4103 total_nr = mem_cgroup_nr_lru_pages(memcg, LRU_ALL);
4104 seq_printf(m, "total=%lu", total_nr); 4104 seq_printf(m, "total=%lu", total_nr);
4105 for_each_node_state(nid, N_HIGH_MEMORY) { 4105 for_each_node_state(nid, N_HIGH_MEMORY) {
4106 node_nr = mem_cgroup_node_nr_lru_pages(mem_cont, nid, LRU_ALL); 4106 node_nr = mem_cgroup_node_nr_lru_pages(memcg, nid, LRU_ALL);
4107 seq_printf(m, " N%d=%lu", nid, node_nr); 4107 seq_printf(m, " N%d=%lu", nid, node_nr);
4108 } 4108 }
4109 seq_putc(m, '\n'); 4109 seq_putc(m, '\n');
4110 4110
4111 file_nr = mem_cgroup_nr_lru_pages(mem_cont, LRU_ALL_FILE); 4111 file_nr = mem_cgroup_nr_lru_pages(memcg, LRU_ALL_FILE);
4112 seq_printf(m, "file=%lu", file_nr); 4112 seq_printf(m, "file=%lu", file_nr);
4113 for_each_node_state(nid, N_HIGH_MEMORY) { 4113 for_each_node_state(nid, N_HIGH_MEMORY) {
4114 node_nr = mem_cgroup_node_nr_lru_pages(mem_cont, nid, 4114 node_nr = mem_cgroup_node_nr_lru_pages(memcg, nid,
4115 LRU_ALL_FILE); 4115 LRU_ALL_FILE);
4116 seq_printf(m, " N%d=%lu", nid, node_nr); 4116 seq_printf(m, " N%d=%lu", nid, node_nr);
4117 } 4117 }
4118 seq_putc(m, '\n'); 4118 seq_putc(m, '\n');
4119 4119
4120 anon_nr = mem_cgroup_nr_lru_pages(mem_cont, LRU_ALL_ANON); 4120 anon_nr = mem_cgroup_nr_lru_pages(memcg, LRU_ALL_ANON);
4121 seq_printf(m, "anon=%lu", anon_nr); 4121 seq_printf(m, "anon=%lu", anon_nr);
4122 for_each_node_state(nid, N_HIGH_MEMORY) { 4122 for_each_node_state(nid, N_HIGH_MEMORY) {
4123 node_nr = mem_cgroup_node_nr_lru_pages(mem_cont, nid, 4123 node_nr = mem_cgroup_node_nr_lru_pages(memcg, nid,
4124 LRU_ALL_ANON); 4124 LRU_ALL_ANON);
4125 seq_printf(m, " N%d=%lu", nid, node_nr); 4125 seq_printf(m, " N%d=%lu", nid, node_nr);
4126 } 4126 }
4127 seq_putc(m, '\n'); 4127 seq_putc(m, '\n');
4128 4128
4129 unevictable_nr = mem_cgroup_nr_lru_pages(mem_cont, BIT(LRU_UNEVICTABLE)); 4129 unevictable_nr = mem_cgroup_nr_lru_pages(memcg, BIT(LRU_UNEVICTABLE));
4130 seq_printf(m, "unevictable=%lu", unevictable_nr); 4130 seq_printf(m, "unevictable=%lu", unevictable_nr);
4131 for_each_node_state(nid, N_HIGH_MEMORY) { 4131 for_each_node_state(nid, N_HIGH_MEMORY) {
4132 node_nr = mem_cgroup_node_nr_lru_pages(mem_cont, nid, 4132 node_nr = mem_cgroup_node_nr_lru_pages(memcg, nid,
4133 BIT(LRU_UNEVICTABLE)); 4133 BIT(LRU_UNEVICTABLE));
4134 seq_printf(m, " N%d=%lu", nid, node_nr); 4134 seq_printf(m, " N%d=%lu", nid, node_nr);
4135 } 4135 }
@@ -4141,12 +4141,12 @@ static int mem_control_numa_stat_show(struct seq_file *m, void *arg)
4141static int mem_control_stat_show(struct cgroup *cont, struct cftype *cft, 4141static int mem_control_stat_show(struct cgroup *cont, struct cftype *cft,
4142 struct cgroup_map_cb *cb) 4142 struct cgroup_map_cb *cb)
4143{ 4143{
4144 struct mem_cgroup *mem_cont = mem_cgroup_from_cont(cont); 4144 struct mem_cgroup *memcg = mem_cgroup_from_cont(cont);
4145 struct mcs_total_stat mystat; 4145 struct mcs_total_stat mystat;
4146 int i; 4146 int i;
4147 4147
4148 memset(&mystat, 0, sizeof(mystat)); 4148 memset(&mystat, 0, sizeof(mystat));
4149 mem_cgroup_get_local_stat(mem_cont, &mystat); 4149 mem_cgroup_get_local_stat(memcg, &mystat);
4150 4150
4151 4151
4152 for (i = 0; i < NR_MCS_STAT; i++) { 4152 for (i = 0; i < NR_MCS_STAT; i++) {
@@ -4158,14 +4158,14 @@ static int mem_control_stat_show(struct cgroup *cont, struct cftype *cft,
4158 /* Hierarchical information */ 4158 /* Hierarchical information */
4159 { 4159 {
4160 unsigned long long limit, memsw_limit; 4160 unsigned long long limit, memsw_limit;
4161 memcg_get_hierarchical_limit(mem_cont, &limit, &memsw_limit); 4161 memcg_get_hierarchical_limit(memcg, &limit, &memsw_limit);
4162 cb->fill(cb, "hierarchical_memory_limit", limit); 4162 cb->fill(cb, "hierarchical_memory_limit", limit);
4163 if (do_swap_account) 4163 if (do_swap_account)
4164 cb->fill(cb, "hierarchical_memsw_limit", memsw_limit); 4164 cb->fill(cb, "hierarchical_memsw_limit", memsw_limit);
4165 } 4165 }
4166 4166
4167 memset(&mystat, 0, sizeof(mystat)); 4167 memset(&mystat, 0, sizeof(mystat));
4168 mem_cgroup_get_total_stat(mem_cont, &mystat); 4168 mem_cgroup_get_total_stat(memcg, &mystat);
4169 for (i = 0; i < NR_MCS_STAT; i++) { 4169 for (i = 0; i < NR_MCS_STAT; i++) {
4170 if (i == MCS_SWAP && !do_swap_account) 4170 if (i == MCS_SWAP && !do_swap_account)
4171 continue; 4171 continue;
@@ -4181,7 +4181,7 @@ static int mem_control_stat_show(struct cgroup *cont, struct cftype *cft,
4181 4181
4182 for_each_online_node(nid) 4182 for_each_online_node(nid)
4183 for (zid = 0; zid < MAX_NR_ZONES; zid++) { 4183 for (zid = 0; zid < MAX_NR_ZONES; zid++) {
4184 mz = mem_cgroup_zoneinfo(mem_cont, nid, zid); 4184 mz = mem_cgroup_zoneinfo(memcg, nid, zid);
4185 4185
4186 recent_rotated[0] += 4186 recent_rotated[0] +=
4187 mz->reclaim_stat.recent_rotated[0]; 4187 mz->reclaim_stat.recent_rotated[0];
@@ -4758,7 +4758,7 @@ static int alloc_mem_cgroup_per_zone_info(struct mem_cgroup *memcg, int node)
4758 INIT_LIST_HEAD(&mz->lruvec.lists[l]); 4758 INIT_LIST_HEAD(&mz->lruvec.lists[l]);
4759 mz->usage_in_excess = 0; 4759 mz->usage_in_excess = 0;
4760 mz->on_tree = false; 4760 mz->on_tree = false;
4761 mz->mem = memcg; 4761 mz->memcg = memcg;
4762 } 4762 }
4763 memcg->info.nodeinfo[node] = pn; 4763 memcg->info.nodeinfo[node] = pn;
4764 return 0; 4764 return 0;
@@ -4771,29 +4771,29 @@ static void free_mem_cgroup_per_zone_info(struct mem_cgroup *memcg, int node)
4771 4771
4772static struct mem_cgroup *mem_cgroup_alloc(void) 4772static struct mem_cgroup *mem_cgroup_alloc(void)
4773{ 4773{
4774 struct mem_cgroup *mem; 4774 struct mem_cgroup *memcg;
4775 int size = sizeof(struct mem_cgroup); 4775 int size = sizeof(struct mem_cgroup);
4776 4776
4777 /* Can be very big if MAX_NUMNODES is very big */ 4777 /* Can be very big if MAX_NUMNODES is very big */
4778 if (size < PAGE_SIZE) 4778 if (size < PAGE_SIZE)
4779 mem = kzalloc(size, GFP_KERNEL); 4779 memcg = kzalloc(size, GFP_KERNEL);
4780 else 4780 else
4781 mem = vzalloc(size); 4781 memcg = vzalloc(size);
4782 4782
4783 if (!mem) 4783 if (!memcg)
4784 return NULL; 4784 return NULL;
4785 4785
4786 mem->stat = alloc_percpu(struct mem_cgroup_stat_cpu); 4786 memcg->stat = alloc_percpu(struct mem_cgroup_stat_cpu);
4787 if (!mem->stat) 4787 if (!memcg->stat)
4788 goto out_free; 4788 goto out_free;
4789 spin_lock_init(&mem->pcp_counter_lock); 4789 spin_lock_init(&memcg->pcp_counter_lock);
4790 return mem; 4790 return memcg;
4791 4791
4792out_free: 4792out_free:
4793 if (size < PAGE_SIZE) 4793 if (size < PAGE_SIZE)
4794 kfree(mem); 4794 kfree(memcg);
4795 else 4795 else
4796 vfree(mem); 4796 vfree(memcg);
4797 return NULL; 4797 return NULL;
4798} 4798}
4799 4799