summaryrefslogtreecommitdiffstats
path: root/mm/page_alloc.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/page_alloc.c')
-rw-r--r--mm/page_alloc.c40
1 files changed, 22 insertions, 18 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 46c30fa26acd..41985aa4672d 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -92,6 +92,10 @@ EXPORT_PER_CPU_SYMBOL(_numa_mem_);
92int _node_numa_mem_[MAX_NUMNODES]; 92int _node_numa_mem_[MAX_NUMNODES];
93#endif 93#endif
94 94
95/* work_structs for global per-cpu drains */
96DEFINE_MUTEX(pcpu_drain_mutex);
97DEFINE_PER_CPU(struct work_struct, pcpu_drain);
98
95#ifdef CONFIG_GCC_PLUGIN_LATENT_ENTROPY 99#ifdef CONFIG_GCC_PLUGIN_LATENT_ENTROPY
96volatile unsigned long latent_entropy __latent_entropy; 100volatile unsigned long latent_entropy __latent_entropy;
97EXPORT_SYMBOL(latent_entropy); 101EXPORT_SYMBOL(latent_entropy);
@@ -2360,7 +2364,6 @@ static void drain_local_pages_wq(struct work_struct *work)
2360 */ 2364 */
2361void drain_all_pages(struct zone *zone) 2365void drain_all_pages(struct zone *zone)
2362{ 2366{
2363 struct work_struct __percpu *works;
2364 int cpu; 2367 int cpu;
2365 2368
2366 /* 2369 /*
@@ -2373,7 +2376,16 @@ void drain_all_pages(struct zone *zone)
2373 if (current->flags & PF_WQ_WORKER) 2376 if (current->flags & PF_WQ_WORKER)
2374 return; 2377 return;
2375 2378
2376 works = alloc_percpu_gfp(struct work_struct, GFP_ATOMIC); 2379 /*
2380 * Do not drain if one is already in progress unless it's specific to
2381 * a zone. Such callers are primarily CMA and memory hotplug and need
2382 * the drain to be complete when the call returns.
2383 */
2384 if (unlikely(!mutex_trylock(&pcpu_drain_mutex))) {
2385 if (!zone)
2386 return;
2387 mutex_lock(&pcpu_drain_mutex);
2388 }
2377 2389
2378 /* 2390 /*
2379 * We don't care about racing with CPU hotplug event 2391 * We don't care about racing with CPU hotplug event
@@ -2406,23 +2418,15 @@ void drain_all_pages(struct zone *zone)
2406 cpumask_clear_cpu(cpu, &cpus_with_pcps); 2418 cpumask_clear_cpu(cpu, &cpus_with_pcps);
2407 } 2419 }
2408 2420
2409 if (works) { 2421 for_each_cpu(cpu, &cpus_with_pcps) {
2410 for_each_cpu(cpu, &cpus_with_pcps) { 2422 struct work_struct *work = per_cpu_ptr(&pcpu_drain, cpu);
2411 struct work_struct *work = per_cpu_ptr(works, cpu); 2423 INIT_WORK(work, drain_local_pages_wq);
2412 INIT_WORK(work, drain_local_pages_wq); 2424 schedule_work_on(cpu, work);
2413 schedule_work_on(cpu, work);
2414 }
2415 for_each_cpu(cpu, &cpus_with_pcps)
2416 flush_work(per_cpu_ptr(works, cpu));
2417 } else {
2418 for_each_cpu(cpu, &cpus_with_pcps) {
2419 struct work_struct work;
2420
2421 INIT_WORK(&work, drain_local_pages_wq);
2422 schedule_work_on(cpu, &work);
2423 flush_work(&work);
2424 }
2425 } 2425 }
2426 for_each_cpu(cpu, &cpus_with_pcps)
2427 flush_work(per_cpu_ptr(&pcpu_drain, cpu));
2428
2429 mutex_unlock(&pcpu_drain_mutex);
2426} 2430}
2427 2431
2428#ifdef CONFIG_HIBERNATION 2432#ifdef CONFIG_HIBERNATION