aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
Diffstat (limited to 'mm')
-rw-r--r--mm/memcontrol.c54
1 files changed, 38 insertions, 16 deletions
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 0b1a32cbd74d..c39a177bb641 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -359,7 +359,7 @@ enum charge_type {
359static void mem_cgroup_get(struct mem_cgroup *mem); 359static void mem_cgroup_get(struct mem_cgroup *mem);
360static void mem_cgroup_put(struct mem_cgroup *mem); 360static void mem_cgroup_put(struct mem_cgroup *mem);
361static struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *mem); 361static struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *mem);
362static void drain_all_stock_async(void); 362static void drain_all_stock_async(struct mem_cgroup *mem);
363 363
364static struct mem_cgroup_per_zone * 364static struct mem_cgroup_per_zone *
365mem_cgroup_zoneinfo(struct mem_cgroup *mem, int nid, int zid) 365mem_cgroup_zoneinfo(struct mem_cgroup *mem, int nid, int zid)
@@ -1671,7 +1671,7 @@ static int mem_cgroup_hierarchical_reclaim(struct mem_cgroup *root_mem,
1671 if (victim == root_mem) { 1671 if (victim == root_mem) {
1672 loop++; 1672 loop++;
1673 if (loop >= 1) 1673 if (loop >= 1)
1674 drain_all_stock_async(); 1674 drain_all_stock_async(root_mem);
1675 if (loop >= 2) { 1675 if (loop >= 2) {
1676 /* 1676 /*
1677 * If we have not been able to reclaim 1677 * If we have not been able to reclaim
@@ -1934,9 +1934,11 @@ struct memcg_stock_pcp {
1934 struct mem_cgroup *cached; /* this never be root cgroup */ 1934 struct mem_cgroup *cached; /* this never be root cgroup */
1935 unsigned int nr_pages; 1935 unsigned int nr_pages;
1936 struct work_struct work; 1936 struct work_struct work;
1937 unsigned long flags;
1938#define FLUSHING_CACHED_CHARGE (0)
1937}; 1939};
1938static DEFINE_PER_CPU(struct memcg_stock_pcp, memcg_stock); 1940static DEFINE_PER_CPU(struct memcg_stock_pcp, memcg_stock);
1939static atomic_t memcg_drain_count; 1941static DEFINE_MUTEX(percpu_charge_mutex);
1940 1942
1941/* 1943/*
1942 * Try to consume stocked charge on this cpu. If success, one page is consumed 1944 * Try to consume stocked charge on this cpu. If success, one page is consumed
@@ -1984,6 +1986,7 @@ static void drain_local_stock(struct work_struct *dummy)
1984{ 1986{
1985 struct memcg_stock_pcp *stock = &__get_cpu_var(memcg_stock); 1987 struct memcg_stock_pcp *stock = &__get_cpu_var(memcg_stock);
1986 drain_stock(stock); 1988 drain_stock(stock);
1989 clear_bit(FLUSHING_CACHED_CHARGE, &stock->flags);
1987} 1990}
1988 1991
1989/* 1992/*
@@ -2008,26 +2011,45 @@ static void refill_stock(struct mem_cgroup *mem, unsigned int nr_pages)
2008 * expects some charges will be back to res_counter later but cannot wait for 2011 * expects some charges will be back to res_counter later but cannot wait for
2009 * it. 2012 * it.
2010 */ 2013 */
2011static void drain_all_stock_async(void) 2014static void drain_all_stock_async(struct mem_cgroup *root_mem)
2012{ 2015{
2013 int cpu; 2016 int cpu, curcpu;
2014 /* This function is for scheduling "drain" in asynchronous way. 2017 /*
2015 * The result of "drain" is not directly handled by callers. Then, 2018 * If someone calls draining, avoid adding more kworker runs.
2016 * if someone is calling drain, we don't have to call drain more.
2017 * Anyway, WORK_STRUCT_PENDING check in queue_work_on() will catch if
2018 * there is a race. We just do loose check here.
2019 */ 2019 */
2020 if (atomic_read(&memcg_drain_count)) 2020 if (!mutex_trylock(&percpu_charge_mutex))
2021 return; 2021 return;
2022 /* Notify other cpus that system-wide "drain" is running */ 2022 /* Notify other cpus that system-wide "drain" is running */
2023 atomic_inc(&memcg_drain_count);
2024 get_online_cpus(); 2023 get_online_cpus();
2024 /*
2025 * Get a hint for avoiding draining charges on the current cpu,
2026 * which must be exhausted by our charging. It is not required that
2027 * this be a precise check, so we use raw_smp_processor_id() instead of
2028 * getcpu()/putcpu().
2029 */
2030 curcpu = raw_smp_processor_id();
2025 for_each_online_cpu(cpu) { 2031 for_each_online_cpu(cpu) {
2026 struct memcg_stock_pcp *stock = &per_cpu(memcg_stock, cpu); 2032 struct memcg_stock_pcp *stock = &per_cpu(memcg_stock, cpu);
2027 schedule_work_on(cpu, &stock->work); 2033 struct mem_cgroup *mem;
2034
2035 if (cpu == curcpu)
2036 continue;
2037
2038 mem = stock->cached;
2039 if (!mem)
2040 continue;
2041 if (mem != root_mem) {
2042 if (!root_mem->use_hierarchy)
2043 continue;
2044 /* check whether "mem" is under tree of "root_mem" */
2045 if (!css_is_ancestor(&mem->css, &root_mem->css))
2046 continue;
2047 }
2048 if (!test_and_set_bit(FLUSHING_CACHED_CHARGE, &stock->flags))
2049 schedule_work_on(cpu, &stock->work);
2028 } 2050 }
2029 put_online_cpus(); 2051 put_online_cpus();
2030 atomic_dec(&memcg_drain_count); 2052 mutex_unlock(&percpu_charge_mutex);
2031 /* We don't wait for flush_work */ 2053 /* We don't wait for flush_work */
2032} 2054}
2033 2055
@@ -2035,9 +2057,9 @@ static void drain_all_stock_async(void)
2035static void drain_all_stock_sync(void) 2057static void drain_all_stock_sync(void)
2036{ 2058{
2037 /* called when force_empty is called */ 2059 /* called when force_empty is called */
2038 atomic_inc(&memcg_drain_count); 2060 mutex_lock(&percpu_charge_mutex);
2039 schedule_on_each_cpu(drain_local_stock); 2061 schedule_on_each_cpu(drain_local_stock);
2040 atomic_dec(&memcg_drain_count); 2062 mutex_unlock(&percpu_charge_mutex);
2041} 2063}
2042 2064
2043/* 2065/*