aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2017-04-08 04:35:32 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2017-04-08 04:35:32 -0400
commit56c29979653f6313b6c47b22f54ed54150fd92ed (patch)
treee41a49f32e8f22a9c096bb7dfa04559db61858be
parentcefdc26e86728812aea54248a534fd4a5da2a43d (diff)
parentce612879ddc78ea7e4de4be80cba4ebf9caa07ee (diff)
Merge branch 'akpm' (patches from Andrew)
Merge misc fixes from Andrew Morton: "10 fixes" * emailed patches from Andrew Morton <akpm@linux-foundation.org>: mm: move pcp and lru-pcp draining into single wq mailmap: update Yakir Yang email address mm, swap_cgroup: reschedule when neeed in swap_cgroup_swapoff() dax: fix radix tree insertion race mm, thp: fix setting of defer+madvise thp defrag mode ptrace: fix PTRACE_LISTEN race corrupting task->state vmlinux.lds: add missing VMLINUX_SYMBOL macros mm/page_alloc.c: fix print order in show_free_areas() userfaultfd: report actual registered features in fdinfo mm: fix page_vma_mapped_walk() for ksm pages
-rw-r--r--.mailmap1
-rw-r--r--fs/dax.c35
-rw-r--r--fs/userfaultfd.c2
-rw-r--r--include/asm-generic/vmlinux.lds.h4
-rw-r--r--kernel/ptrace.c14
-rw-r--r--mm/huge_memory.c12
-rw-r--r--mm/internal.h7
-rw-r--r--mm/page_alloc.c11
-rw-r--r--mm/page_vma_mapped.c15
-rw-r--r--mm/swap.c27
-rw-r--r--mm/swap_cgroup.c2
-rw-r--r--mm/vmstat.c15
12 files changed, 85 insertions, 60 deletions
diff --git a/.mailmap b/.mailmap
index 67dc22ffc9a8..e229922dc7f0 100644
--- a/.mailmap
+++ b/.mailmap
@@ -171,6 +171,7 @@ Vlad Dogaru <ddvlad@gmail.com> <vlad.dogaru@intel.com>
171Vladimir Davydov <vdavydov.dev@gmail.com> <vdavydov@virtuozzo.com> 171Vladimir Davydov <vdavydov.dev@gmail.com> <vdavydov@virtuozzo.com>
172Vladimir Davydov <vdavydov.dev@gmail.com> <vdavydov@parallels.com> 172Vladimir Davydov <vdavydov.dev@gmail.com> <vdavydov@parallels.com>
173Takashi YOSHII <takashi.yoshii.zj@renesas.com> 173Takashi YOSHII <takashi.yoshii.zj@renesas.com>
174Yakir Yang <kuankuan.y@gmail.com> <ykk@rock-chips.com>
174Yusuke Goda <goda.yusuke@renesas.com> 175Yusuke Goda <goda.yusuke@renesas.com>
175Gustavo Padovan <gustavo@las.ic.unicamp.br> 176Gustavo Padovan <gustavo@las.ic.unicamp.br>
176Gustavo Padovan <padovan@profusion.mobi> 177Gustavo Padovan <padovan@profusion.mobi>
diff --git a/fs/dax.c b/fs/dax.c
index de622d4282a6..85abd741253d 100644
--- a/fs/dax.c
+++ b/fs/dax.c
@@ -373,6 +373,22 @@ restart:
373 } 373 }
374 spin_lock_irq(&mapping->tree_lock); 374 spin_lock_irq(&mapping->tree_lock);
375 375
376 if (!entry) {
377 /*
378 * We needed to drop the page_tree lock while calling
379 * radix_tree_preload() and we didn't have an entry to
380 * lock. See if another thread inserted an entry at
381 * our index during this time.
382 */
383 entry = __radix_tree_lookup(&mapping->page_tree, index,
384 NULL, &slot);
385 if (entry) {
386 radix_tree_preload_end();
387 spin_unlock_irq(&mapping->tree_lock);
388 goto restart;
389 }
390 }
391
376 if (pmd_downgrade) { 392 if (pmd_downgrade) {
377 radix_tree_delete(&mapping->page_tree, index); 393 radix_tree_delete(&mapping->page_tree, index);
378 mapping->nrexceptional--; 394 mapping->nrexceptional--;
@@ -388,19 +404,12 @@ restart:
388 if (err) { 404 if (err) {
389 spin_unlock_irq(&mapping->tree_lock); 405 spin_unlock_irq(&mapping->tree_lock);
390 /* 406 /*
391 * Someone already created the entry? This is a 407 * Our insertion of a DAX entry failed, most likely
392 * normal failure when inserting PMDs in a range 408 * because we were inserting a PMD entry and it
393 * that already contains PTEs. In that case we want 409 * collided with a PTE sized entry at a different
394 * to return -EEXIST immediately. 410 * index in the PMD range. We haven't inserted
395 */ 411 * anything into the radix tree and have no waiters to
396 if (err == -EEXIST && !(size_flag & RADIX_DAX_PMD)) 412 * wake.
397 goto restart;
398 /*
399 * Our insertion of a DAX PMD entry failed, most
400 * likely because it collided with a PTE sized entry
401 * at a different index in the PMD range. We haven't
402 * inserted anything into the radix tree and have no
403 * waiters to wake.
404 */ 413 */
405 return ERR_PTR(err); 414 return ERR_PTR(err);
406 } 415 }
diff --git a/fs/userfaultfd.c b/fs/userfaultfd.c
index 1d227b0fcf49..f7555fc25877 100644
--- a/fs/userfaultfd.c
+++ b/fs/userfaultfd.c
@@ -1756,7 +1756,7 @@ static void userfaultfd_show_fdinfo(struct seq_file *m, struct file *f)
1756 * protocols: aa:... bb:... 1756 * protocols: aa:... bb:...
1757 */ 1757 */
1758 seq_printf(m, "pending:\t%lu\ntotal:\t%lu\nAPI:\t%Lx:%x:%Lx\n", 1758 seq_printf(m, "pending:\t%lu\ntotal:\t%lu\nAPI:\t%Lx:%x:%Lx\n",
1759 pending, total, UFFD_API, UFFD_API_FEATURES, 1759 pending, total, UFFD_API, ctx->features,
1760 UFFD_API_IOCTLS|UFFD_API_RANGE_IOCTLS); 1760 UFFD_API_IOCTLS|UFFD_API_RANGE_IOCTLS);
1761} 1761}
1762#endif 1762#endif
diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
index 7cdfe167074f..143db9c523e2 100644
--- a/include/asm-generic/vmlinux.lds.h
+++ b/include/asm-generic/vmlinux.lds.h
@@ -261,9 +261,9 @@
261 */ 261 */
262#ifndef RO_AFTER_INIT_DATA 262#ifndef RO_AFTER_INIT_DATA
263#define RO_AFTER_INIT_DATA \ 263#define RO_AFTER_INIT_DATA \
264 __start_ro_after_init = .; \ 264 VMLINUX_SYMBOL(__start_ro_after_init) = .; \
265 *(.data..ro_after_init) \ 265 *(.data..ro_after_init) \
266 __end_ro_after_init = .; 266 VMLINUX_SYMBOL(__end_ro_after_init) = .;
267#endif 267#endif
268 268
269/* 269/*
diff --git a/kernel/ptrace.c b/kernel/ptrace.c
index 0af928712174..266ddcc1d8bb 100644
--- a/kernel/ptrace.c
+++ b/kernel/ptrace.c
@@ -184,11 +184,17 @@ static void ptrace_unfreeze_traced(struct task_struct *task)
184 184
185 WARN_ON(!task->ptrace || task->parent != current); 185 WARN_ON(!task->ptrace || task->parent != current);
186 186
187 /*
188 * PTRACE_LISTEN can allow ptrace_trap_notify to wake us up remotely.
189 * Recheck state under the lock to close this race.
190 */
187 spin_lock_irq(&task->sighand->siglock); 191 spin_lock_irq(&task->sighand->siglock);
188 if (__fatal_signal_pending(task)) 192 if (task->state == __TASK_TRACED) {
189 wake_up_state(task, __TASK_TRACED); 193 if (__fatal_signal_pending(task))
190 else 194 wake_up_state(task, __TASK_TRACED);
191 task->state = TASK_TRACED; 195 else
196 task->state = TASK_TRACED;
197 }
192 spin_unlock_irq(&task->sighand->siglock); 198 spin_unlock_irq(&task->sighand->siglock);
193} 199}
194 200
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index 1ebc93e179f3..fef4cf210cc7 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -240,18 +240,18 @@ static ssize_t defrag_store(struct kobject *kobj,
240 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, &transparent_hugepage_flags); 240 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, &transparent_hugepage_flags);
241 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, &transparent_hugepage_flags); 241 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, &transparent_hugepage_flags);
242 set_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags); 242 set_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags);
243 } else if (!memcmp("defer", buf,
244 min(sizeof("defer")-1, count))) {
245 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags);
246 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, &transparent_hugepage_flags);
247 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, &transparent_hugepage_flags);
248 set_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, &transparent_hugepage_flags);
249 } else if (!memcmp("defer+madvise", buf, 243 } else if (!memcmp("defer+madvise", buf,
250 min(sizeof("defer+madvise")-1, count))) { 244 min(sizeof("defer+madvise")-1, count))) {
251 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags); 245 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags);
252 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, &transparent_hugepage_flags); 246 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, &transparent_hugepage_flags);
253 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, &transparent_hugepage_flags); 247 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, &transparent_hugepage_flags);
254 set_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, &transparent_hugepage_flags); 248 set_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, &transparent_hugepage_flags);
249 } else if (!memcmp("defer", buf,
250 min(sizeof("defer")-1, count))) {
251 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags);
252 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, &transparent_hugepage_flags);
253 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, &transparent_hugepage_flags);
254 set_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, &transparent_hugepage_flags);
255 } else if (!memcmp("madvise", buf, 255 } else if (!memcmp("madvise", buf,
256 min(sizeof("madvise")-1, count))) { 256 min(sizeof("madvise")-1, count))) {
257 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags); 257 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags);
diff --git a/mm/internal.h b/mm/internal.h
index ccfc2a2969f4..266efaeaa370 100644
--- a/mm/internal.h
+++ b/mm/internal.h
@@ -481,6 +481,13 @@ unsigned long reclaim_clean_pages_from_list(struct zone *zone,
481enum ttu_flags; 481enum ttu_flags;
482struct tlbflush_unmap_batch; 482struct tlbflush_unmap_batch;
483 483
484
485/*
486 * only for MM internal work items which do not depend on
487 * any allocations or locks which might depend on allocations
488 */
489extern struct workqueue_struct *mm_percpu_wq;
490
484#ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH 491#ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH
485void try_to_unmap_flush(void); 492void try_to_unmap_flush(void);
486void try_to_unmap_flush_dirty(void); 493void try_to_unmap_flush_dirty(void);
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 6cbde310abed..f3d603cef2c0 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -2373,6 +2373,13 @@ void drain_all_pages(struct zone *zone)
2373 */ 2373 */
2374 static cpumask_t cpus_with_pcps; 2374 static cpumask_t cpus_with_pcps;
2375 2375
2376 /*
2377 * Make sure nobody triggers this path before mm_percpu_wq is fully
2378 * initialized.
2379 */
2380 if (WARN_ON_ONCE(!mm_percpu_wq))
2381 return;
2382
2376 /* Workqueues cannot recurse */ 2383 /* Workqueues cannot recurse */
2377 if (current->flags & PF_WQ_WORKER) 2384 if (current->flags & PF_WQ_WORKER)
2378 return; 2385 return;
@@ -2422,7 +2429,7 @@ void drain_all_pages(struct zone *zone)
2422 for_each_cpu(cpu, &cpus_with_pcps) { 2429 for_each_cpu(cpu, &cpus_with_pcps) {
2423 struct work_struct *work = per_cpu_ptr(&pcpu_drain, cpu); 2430 struct work_struct *work = per_cpu_ptr(&pcpu_drain, cpu);
2424 INIT_WORK(work, drain_local_pages_wq); 2431 INIT_WORK(work, drain_local_pages_wq);
2425 schedule_work_on(cpu, work); 2432 queue_work_on(cpu, mm_percpu_wq, work);
2426 } 2433 }
2427 for_each_cpu(cpu, &cpus_with_pcps) 2434 for_each_cpu(cpu, &cpus_with_pcps)
2428 flush_work(per_cpu_ptr(&pcpu_drain, cpu)); 2435 flush_work(per_cpu_ptr(&pcpu_drain, cpu));
@@ -4519,13 +4526,13 @@ void show_free_areas(unsigned int filter, nodemask_t *nodemask)
4519 K(node_page_state(pgdat, NR_FILE_MAPPED)), 4526 K(node_page_state(pgdat, NR_FILE_MAPPED)),
4520 K(node_page_state(pgdat, NR_FILE_DIRTY)), 4527 K(node_page_state(pgdat, NR_FILE_DIRTY)),
4521 K(node_page_state(pgdat, NR_WRITEBACK)), 4528 K(node_page_state(pgdat, NR_WRITEBACK)),
4529 K(node_page_state(pgdat, NR_SHMEM)),
4522#ifdef CONFIG_TRANSPARENT_HUGEPAGE 4530#ifdef CONFIG_TRANSPARENT_HUGEPAGE
4523 K(node_page_state(pgdat, NR_SHMEM_THPS) * HPAGE_PMD_NR), 4531 K(node_page_state(pgdat, NR_SHMEM_THPS) * HPAGE_PMD_NR),
4524 K(node_page_state(pgdat, NR_SHMEM_PMDMAPPED) 4532 K(node_page_state(pgdat, NR_SHMEM_PMDMAPPED)
4525 * HPAGE_PMD_NR), 4533 * HPAGE_PMD_NR),
4526 K(node_page_state(pgdat, NR_ANON_THPS) * HPAGE_PMD_NR), 4534 K(node_page_state(pgdat, NR_ANON_THPS) * HPAGE_PMD_NR),
4527#endif 4535#endif
4528 K(node_page_state(pgdat, NR_SHMEM)),
4529 K(node_page_state(pgdat, NR_WRITEBACK_TEMP)), 4536 K(node_page_state(pgdat, NR_WRITEBACK_TEMP)),
4530 K(node_page_state(pgdat, NR_UNSTABLE_NFS)), 4537 K(node_page_state(pgdat, NR_UNSTABLE_NFS)),
4531 node_page_state(pgdat, NR_PAGES_SCANNED), 4538 node_page_state(pgdat, NR_PAGES_SCANNED),
diff --git a/mm/page_vma_mapped.c b/mm/page_vma_mapped.c
index c4c9def8ffea..de9c40d7304a 100644
--- a/mm/page_vma_mapped.c
+++ b/mm/page_vma_mapped.c
@@ -111,12 +111,8 @@ bool page_vma_mapped_walk(struct page_vma_mapped_walk *pvmw)
111 if (pvmw->pmd && !pvmw->pte) 111 if (pvmw->pmd && !pvmw->pte)
112 return not_found(pvmw); 112 return not_found(pvmw);
113 113
114 /* Only for THP, seek to next pte entry makes sense */ 114 if (pvmw->pte)
115 if (pvmw->pte) {
116 if (!PageTransHuge(pvmw->page) || PageHuge(pvmw->page))
117 return not_found(pvmw);
118 goto next_pte; 115 goto next_pte;
119 }
120 116
121 if (unlikely(PageHuge(pvmw->page))) { 117 if (unlikely(PageHuge(pvmw->page))) {
122 /* when pud is not present, pte will be NULL */ 118 /* when pud is not present, pte will be NULL */
@@ -165,9 +161,14 @@ restart:
165 while (1) { 161 while (1) {
166 if (check_pte(pvmw)) 162 if (check_pte(pvmw))
167 return true; 163 return true;
168next_pte: do { 164next_pte:
165 /* Seek to next pte only makes sense for THP */
166 if (!PageTransHuge(pvmw->page) || PageHuge(pvmw->page))
167 return not_found(pvmw);
168 do {
169 pvmw->address += PAGE_SIZE; 169 pvmw->address += PAGE_SIZE;
170 if (pvmw->address >= 170 if (pvmw->address >= pvmw->vma->vm_end ||
171 pvmw->address >=
171 __vma_address(pvmw->page, pvmw->vma) + 172 __vma_address(pvmw->page, pvmw->vma) +
172 hpage_nr_pages(pvmw->page) * PAGE_SIZE) 173 hpage_nr_pages(pvmw->page) * PAGE_SIZE)
173 return not_found(pvmw); 174 return not_found(pvmw);
diff --git a/mm/swap.c b/mm/swap.c
index c4910f14f957..5dabf444d724 100644
--- a/mm/swap.c
+++ b/mm/swap.c
@@ -670,30 +670,19 @@ static void lru_add_drain_per_cpu(struct work_struct *dummy)
670 670
671static DEFINE_PER_CPU(struct work_struct, lru_add_drain_work); 671static DEFINE_PER_CPU(struct work_struct, lru_add_drain_work);
672 672
673/*
674 * lru_add_drain_wq is used to do lru_add_drain_all() from a WQ_MEM_RECLAIM
675 * workqueue, aiding in getting memory freed.
676 */
677static struct workqueue_struct *lru_add_drain_wq;
678
679static int __init lru_init(void)
680{
681 lru_add_drain_wq = alloc_workqueue("lru-add-drain", WQ_MEM_RECLAIM, 0);
682
683 if (WARN(!lru_add_drain_wq,
684 "Failed to create workqueue lru_add_drain_wq"))
685 return -ENOMEM;
686
687 return 0;
688}
689early_initcall(lru_init);
690
691void lru_add_drain_all(void) 673void lru_add_drain_all(void)
692{ 674{
693 static DEFINE_MUTEX(lock); 675 static DEFINE_MUTEX(lock);
694 static struct cpumask has_work; 676 static struct cpumask has_work;
695 int cpu; 677 int cpu;
696 678
679 /*
680 * Make sure nobody triggers this path before mm_percpu_wq is fully
681 * initialized.
682 */
683 if (WARN_ON(!mm_percpu_wq))
684 return;
685
697 mutex_lock(&lock); 686 mutex_lock(&lock);
698 get_online_cpus(); 687 get_online_cpus();
699 cpumask_clear(&has_work); 688 cpumask_clear(&has_work);
@@ -707,7 +696,7 @@ void lru_add_drain_all(void)
707 pagevec_count(&per_cpu(lru_deactivate_pvecs, cpu)) || 696 pagevec_count(&per_cpu(lru_deactivate_pvecs, cpu)) ||
708 need_activate_page_drain(cpu)) { 697 need_activate_page_drain(cpu)) {
709 INIT_WORK(work, lru_add_drain_per_cpu); 698 INIT_WORK(work, lru_add_drain_per_cpu);
710 queue_work_on(cpu, lru_add_drain_wq, work); 699 queue_work_on(cpu, mm_percpu_wq, work);
711 cpumask_set_cpu(cpu, &has_work); 700 cpumask_set_cpu(cpu, &has_work);
712 } 701 }
713 } 702 }
diff --git a/mm/swap_cgroup.c b/mm/swap_cgroup.c
index 310ac0b8f974..ac6318a064d3 100644
--- a/mm/swap_cgroup.c
+++ b/mm/swap_cgroup.c
@@ -201,6 +201,8 @@ void swap_cgroup_swapoff(int type)
201 struct page *page = map[i]; 201 struct page *page = map[i];
202 if (page) 202 if (page)
203 __free_page(page); 203 __free_page(page);
204 if (!(i % SWAP_CLUSTER_MAX))
205 cond_resched();
204 } 206 }
205 vfree(map); 207 vfree(map);
206 } 208 }
diff --git a/mm/vmstat.c b/mm/vmstat.c
index 89f95396ec46..809025ed97ea 100644
--- a/mm/vmstat.c
+++ b/mm/vmstat.c
@@ -1552,7 +1552,6 @@ static const struct file_operations proc_vmstat_file_operations = {
1552#endif /* CONFIG_PROC_FS */ 1552#endif /* CONFIG_PROC_FS */
1553 1553
1554#ifdef CONFIG_SMP 1554#ifdef CONFIG_SMP
1555static struct workqueue_struct *vmstat_wq;
1556static DEFINE_PER_CPU(struct delayed_work, vmstat_work); 1555static DEFINE_PER_CPU(struct delayed_work, vmstat_work);
1557int sysctl_stat_interval __read_mostly = HZ; 1556int sysctl_stat_interval __read_mostly = HZ;
1558 1557
@@ -1623,7 +1622,7 @@ static void vmstat_update(struct work_struct *w)
1623 * to occur in the future. Keep on running the 1622 * to occur in the future. Keep on running the
1624 * update worker thread. 1623 * update worker thread.
1625 */ 1624 */
1626 queue_delayed_work_on(smp_processor_id(), vmstat_wq, 1625 queue_delayed_work_on(smp_processor_id(), mm_percpu_wq,
1627 this_cpu_ptr(&vmstat_work), 1626 this_cpu_ptr(&vmstat_work),
1628 round_jiffies_relative(sysctl_stat_interval)); 1627 round_jiffies_relative(sysctl_stat_interval));
1629 } 1628 }
@@ -1702,7 +1701,7 @@ static void vmstat_shepherd(struct work_struct *w)
1702 struct delayed_work *dw = &per_cpu(vmstat_work, cpu); 1701 struct delayed_work *dw = &per_cpu(vmstat_work, cpu);
1703 1702
1704 if (!delayed_work_pending(dw) && need_update(cpu)) 1703 if (!delayed_work_pending(dw) && need_update(cpu))
1705 queue_delayed_work_on(cpu, vmstat_wq, dw, 0); 1704 queue_delayed_work_on(cpu, mm_percpu_wq, dw, 0);
1706 } 1705 }
1707 put_online_cpus(); 1706 put_online_cpus();
1708 1707
@@ -1718,7 +1717,6 @@ static void __init start_shepherd_timer(void)
1718 INIT_DEFERRABLE_WORK(per_cpu_ptr(&vmstat_work, cpu), 1717 INIT_DEFERRABLE_WORK(per_cpu_ptr(&vmstat_work, cpu),
1719 vmstat_update); 1718 vmstat_update);
1720 1719
1721 vmstat_wq = alloc_workqueue("vmstat", WQ_FREEZABLE|WQ_MEM_RECLAIM, 0);
1722 schedule_delayed_work(&shepherd, 1720 schedule_delayed_work(&shepherd,
1723 round_jiffies_relative(sysctl_stat_interval)); 1721 round_jiffies_relative(sysctl_stat_interval));
1724} 1722}
@@ -1764,11 +1762,16 @@ static int vmstat_cpu_dead(unsigned int cpu)
1764 1762
1765#endif 1763#endif
1766 1764
1765struct workqueue_struct *mm_percpu_wq;
1766
1767void __init init_mm_internals(void) 1767void __init init_mm_internals(void)
1768{ 1768{
1769#ifdef CONFIG_SMP 1769 int ret __maybe_unused;
1770 int ret;
1771 1770
1771 mm_percpu_wq = alloc_workqueue("mm_percpu_wq",
1772 WQ_FREEZABLE|WQ_MEM_RECLAIM, 0);
1773
1774#ifdef CONFIG_SMP
1772 ret = cpuhp_setup_state_nocalls(CPUHP_MM_VMSTAT_DEAD, "mm/vmstat:dead", 1775 ret = cpuhp_setup_state_nocalls(CPUHP_MM_VMSTAT_DEAD, "mm/vmstat:dead",
1773 NULL, vmstat_cpu_dead); 1776 NULL, vmstat_cpu_dead);
1774 if (ret < 0) 1777 if (ret < 0)