aboutsummaryrefslogtreecommitdiffstats
path: root/mm/memcontrol.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/memcontrol.c')
-rw-r--r--mm/memcontrol.c81
1 files changed, 54 insertions, 27 deletions
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index bd9052a5d3ad..cf7d027a8844 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -359,7 +359,7 @@ enum charge_type {
359static void mem_cgroup_get(struct mem_cgroup *mem); 359static void mem_cgroup_get(struct mem_cgroup *mem);
360static void mem_cgroup_put(struct mem_cgroup *mem); 360static void mem_cgroup_put(struct mem_cgroup *mem);
361static struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *mem); 361static struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *mem);
362static void drain_all_stock_async(void); 362static void drain_all_stock_async(struct mem_cgroup *mem);
363 363
364static struct mem_cgroup_per_zone * 364static struct mem_cgroup_per_zone *
365mem_cgroup_zoneinfo(struct mem_cgroup *mem, int nid, int zid) 365mem_cgroup_zoneinfo(struct mem_cgroup *mem, int nid, int zid)
@@ -735,7 +735,7 @@ struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p)
735 struct mem_cgroup, css); 735 struct mem_cgroup, css);
736} 736}
737 737
738static struct mem_cgroup *try_get_mem_cgroup_from_mm(struct mm_struct *mm) 738struct mem_cgroup *try_get_mem_cgroup_from_mm(struct mm_struct *mm)
739{ 739{
740 struct mem_cgroup *mem = NULL; 740 struct mem_cgroup *mem = NULL;
741 741
@@ -1663,15 +1663,21 @@ static int mem_cgroup_hierarchical_reclaim(struct mem_cgroup *root_mem,
1663 excess = res_counter_soft_limit_excess(&root_mem->res) >> PAGE_SHIFT; 1663 excess = res_counter_soft_limit_excess(&root_mem->res) >> PAGE_SHIFT;
1664 1664
1665 /* If memsw_is_minimum==1, swap-out is of-no-use. */ 1665 /* If memsw_is_minimum==1, swap-out is of-no-use. */
1666 if (root_mem->memsw_is_minimum) 1666 if (!check_soft && root_mem->memsw_is_minimum)
1667 noswap = true; 1667 noswap = true;
1668 1668
1669 while (1) { 1669 while (1) {
1670 victim = mem_cgroup_select_victim(root_mem); 1670 victim = mem_cgroup_select_victim(root_mem);
1671 if (victim == root_mem) { 1671 if (victim == root_mem) {
1672 loop++; 1672 loop++;
1673 if (loop >= 1) 1673 /*
1674 drain_all_stock_async(); 1674 * We are not draining per cpu cached charges during
1675 * soft limit reclaim because global reclaim doesn't
1676 * care about charges. It tries to free some memory and
1677 * charges will not give any.
1678 */
1679 if (!check_soft && loop >= 1)
1680 drain_all_stock_async(root_mem);
1675 if (loop >= 2) { 1681 if (loop >= 2) {
1676 /* 1682 /*
1677 * If we have not been able to reclaim 1683 * If we have not been able to reclaim
@@ -1934,9 +1940,11 @@ struct memcg_stock_pcp {
1934 struct mem_cgroup *cached; /* this never be root cgroup */ 1940 struct mem_cgroup *cached; /* this never be root cgroup */
1935 unsigned int nr_pages; 1941 unsigned int nr_pages;
1936 struct work_struct work; 1942 struct work_struct work;
1943 unsigned long flags;
1944#define FLUSHING_CACHED_CHARGE (0)
1937}; 1945};
1938static DEFINE_PER_CPU(struct memcg_stock_pcp, memcg_stock); 1946static DEFINE_PER_CPU(struct memcg_stock_pcp, memcg_stock);
1939static atomic_t memcg_drain_count; 1947static DEFINE_MUTEX(percpu_charge_mutex);
1940 1948
1941/* 1949/*
1942 * Try to consume stocked charge on this cpu. If success, one page is consumed 1950 * Try to consume stocked charge on this cpu. If success, one page is consumed
@@ -1984,6 +1992,7 @@ static void drain_local_stock(struct work_struct *dummy)
1984{ 1992{
1985 struct memcg_stock_pcp *stock = &__get_cpu_var(memcg_stock); 1993 struct memcg_stock_pcp *stock = &__get_cpu_var(memcg_stock);
1986 drain_stock(stock); 1994 drain_stock(stock);
1995 clear_bit(FLUSHING_CACHED_CHARGE, &stock->flags);
1987} 1996}
1988 1997
1989/* 1998/*
@@ -2008,26 +2017,45 @@ static void refill_stock(struct mem_cgroup *mem, unsigned int nr_pages)
2008 * expects some charges will be back to res_counter later but cannot wait for 2017 * expects some charges will be back to res_counter later but cannot wait for
2009 * it. 2018 * it.
2010 */ 2019 */
2011static void drain_all_stock_async(void) 2020static void drain_all_stock_async(struct mem_cgroup *root_mem)
2012{ 2021{
2013 int cpu; 2022 int cpu, curcpu;
2014 /* This function is for scheduling "drain" in asynchronous way. 2023 /*
2015 * The result of "drain" is not directly handled by callers. Then, 2024 * If someone calls draining, avoid adding more kworker runs.
2016 * if someone is calling drain, we don't have to call drain more.
2017 * Anyway, WORK_STRUCT_PENDING check in queue_work_on() will catch if
2018 * there is a race. We just do loose check here.
2019 */ 2025 */
2020 if (atomic_read(&memcg_drain_count)) 2026 if (!mutex_trylock(&percpu_charge_mutex))
2021 return; 2027 return;
2022 /* Notify other cpus that system-wide "drain" is running */ 2028 /* Notify other cpus that system-wide "drain" is running */
2023 atomic_inc(&memcg_drain_count);
2024 get_online_cpus(); 2029 get_online_cpus();
2030 /*
2031 * Get a hint for avoiding draining charges on the current cpu,
2032 * which must be exhausted by our charging. It is not required that
2033 * this be a precise check, so we use raw_smp_processor_id() instead of
2034 * getcpu()/putcpu().
2035 */
2036 curcpu = raw_smp_processor_id();
2025 for_each_online_cpu(cpu) { 2037 for_each_online_cpu(cpu) {
2026 struct memcg_stock_pcp *stock = &per_cpu(memcg_stock, cpu); 2038 struct memcg_stock_pcp *stock = &per_cpu(memcg_stock, cpu);
2027 schedule_work_on(cpu, &stock->work); 2039 struct mem_cgroup *mem;
2040
2041 if (cpu == curcpu)
2042 continue;
2043
2044 mem = stock->cached;
2045 if (!mem)
2046 continue;
2047 if (mem != root_mem) {
2048 if (!root_mem->use_hierarchy)
2049 continue;
2050 /* check whether "mem" is under tree of "root_mem" */
2051 if (!css_is_ancestor(&mem->css, &root_mem->css))
2052 continue;
2053 }
2054 if (!test_and_set_bit(FLUSHING_CACHED_CHARGE, &stock->flags))
2055 schedule_work_on(cpu, &stock->work);
2028 } 2056 }
2029 put_online_cpus(); 2057 put_online_cpus();
2030 atomic_dec(&memcg_drain_count); 2058 mutex_unlock(&percpu_charge_mutex);
2031 /* We don't wait for flush_work */ 2059 /* We don't wait for flush_work */
2032} 2060}
2033 2061
@@ -2035,9 +2063,9 @@ static void drain_all_stock_async(void)
2035static void drain_all_stock_sync(void) 2063static void drain_all_stock_sync(void)
2036{ 2064{
2037 /* called when force_empty is called */ 2065 /* called when force_empty is called */
2038 atomic_inc(&memcg_drain_count); 2066 mutex_lock(&percpu_charge_mutex);
2039 schedule_on_each_cpu(drain_local_stock); 2067 schedule_on_each_cpu(drain_local_stock);
2040 atomic_dec(&memcg_drain_count); 2068 mutex_unlock(&percpu_charge_mutex);
2041} 2069}
2042 2070
2043/* 2071/*
@@ -4640,6 +4668,7 @@ static struct cftype mem_cgroup_files[] = {
4640 { 4668 {
4641 .name = "numa_stat", 4669 .name = "numa_stat",
4642 .open = mem_control_numa_stat_open, 4670 .open = mem_control_numa_stat_open,
4671 .mode = S_IRUGO,
4643 }, 4672 },
4644#endif 4673#endif
4645}; 4674};
@@ -5414,18 +5443,16 @@ static void mem_cgroup_move_task(struct cgroup_subsys *ss,
5414 struct cgroup *old_cont, 5443 struct cgroup *old_cont,
5415 struct task_struct *p) 5444 struct task_struct *p)
5416{ 5445{
5417 struct mm_struct *mm; 5446 struct mm_struct *mm = get_task_mm(p);
5418 5447
5419 if (!mc.to)
5420 /* no need to move charge */
5421 return;
5422
5423 mm = get_task_mm(p);
5424 if (mm) { 5448 if (mm) {
5425 mem_cgroup_move_charge(mm); 5449 if (mc.to)
5450 mem_cgroup_move_charge(mm);
5451 put_swap_token(mm);
5426 mmput(mm); 5452 mmput(mm);
5427 } 5453 }
5428 mem_cgroup_clear_mc(); 5454 if (mc.to)
5455 mem_cgroup_clear_mc();
5429} 5456}
5430#else /* !CONFIG_MMU */ 5457#else /* !CONFIG_MMU */
5431static int mem_cgroup_can_attach(struct cgroup_subsys *ss, 5458static int mem_cgroup_can_attach(struct cgroup_subsys *ss,