aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
Diffstat (limited to 'mm')
-rw-r--r--mm/memcontrol.c48
1 files changed, 34 insertions, 14 deletions
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 2f5534e1968c..af920d0f9025 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -2154,19 +2154,14 @@ static void refill_stock(struct mem_cgroup *mem, unsigned int nr_pages)
2154} 2154}
2155 2155
2156/* 2156/*
2157 * Tries to drain stocked charges in other cpus. This function is asynchronous 2157 * Drains all per-CPU charge caches for given root_mem resp. subtree
2158 * and just put a work per cpu for draining localy on each cpu. Caller can 2158 * of the hierarchy under it. sync flag says whether we should block
2159 * expects some charges will be back to res_counter later but cannot wait for 2159 * until the work is done.
2160 * it.
2161 */ 2160 */
2162static void drain_all_stock_async(struct mem_cgroup *root_mem) 2161static void drain_all_stock(struct mem_cgroup *root_mem, bool sync)
2163{ 2162{
2164 int cpu, curcpu; 2163 int cpu, curcpu;
2165 /* 2164
2166 * If someone calls draining, avoid adding more kworker runs.
2167 */
2168 if (!mutex_trylock(&percpu_charge_mutex))
2169 return;
2170 /* Notify other cpus that system-wide "drain" is running */ 2165 /* Notify other cpus that system-wide "drain" is running */
2171 get_online_cpus(); 2166 get_online_cpus();
2172 /* 2167 /*
@@ -2197,17 +2192,42 @@ static void drain_all_stock_async(struct mem_cgroup *root_mem)
2197 schedule_work_on(cpu, &stock->work); 2192 schedule_work_on(cpu, &stock->work);
2198 } 2193 }
2199 } 2194 }
2195
2196 if (!sync)
2197 goto out;
2198
2199 for_each_online_cpu(cpu) {
2200 struct memcg_stock_pcp *stock = &per_cpu(memcg_stock, cpu);
2201 if (test_bit(FLUSHING_CACHED_CHARGE, &stock->flags))
2202 flush_work(&stock->work);
2203 }
2204out:
2200 put_online_cpus(); 2205 put_online_cpus();
2206}
2207
2208/*
2209 * Tries to drain stocked charges in other cpus. This function is asynchronous
2210 * and just put a work per cpu for draining localy on each cpu. Caller can
2211 * expects some charges will be back to res_counter later but cannot wait for
2212 * it.
2213 */
2214static void drain_all_stock_async(struct mem_cgroup *root_mem)
2215{
2216 /*
2217 * If someone calls draining, avoid adding more kworker runs.
2218 */
2219 if (!mutex_trylock(&percpu_charge_mutex))
2220 return;
2221 drain_all_stock(root_mem, false);
2201 mutex_unlock(&percpu_charge_mutex); 2222 mutex_unlock(&percpu_charge_mutex);
2202 /* We don't wait for flush_work */
2203} 2223}
2204 2224
2205/* This is a synchronous drain interface. */ 2225/* This is a synchronous drain interface. */
2206static void drain_all_stock_sync(void) 2226static void drain_all_stock_sync(struct mem_cgroup *root_mem)
2207{ 2227{
2208 /* called when force_empty is called */ 2228 /* called when force_empty is called */
2209 mutex_lock(&percpu_charge_mutex); 2229 mutex_lock(&percpu_charge_mutex);
2210 schedule_on_each_cpu(drain_local_stock); 2230 drain_all_stock(root_mem, true);
2211 mutex_unlock(&percpu_charge_mutex); 2231 mutex_unlock(&percpu_charge_mutex);
2212} 2232}
2213 2233
@@ -3856,7 +3876,7 @@ move_account:
3856 goto out; 3876 goto out;
3857 /* This is for making all *used* pages to be on LRU. */ 3877 /* This is for making all *used* pages to be on LRU. */
3858 lru_add_drain_all(); 3878 lru_add_drain_all();
3859 drain_all_stock_sync(); 3879 drain_all_stock_sync(mem);
3860 ret = 0; 3880 ret = 0;
3861 mem_cgroup_start_move(mem); 3881 mem_cgroup_start_move(mem);
3862 for_each_node_state(node, N_HIGH_MEMORY) { 3882 for_each_node_state(node, N_HIGH_MEMORY) {