diff options
author | Tejun Heo <tj@kernel.org> | 2016-09-16 15:49:34 -0400 |
---|---|---|
committer | Tejun Heo <tj@kernel.org> | 2016-09-17 13:18:21 -0400 |
commit | eac0337af12b6a55f08c69429400d6530d602dff (patch) | |
tree | a154b887be6b2a848d2ca5a83c1efda5a8918c8b | |
parent | a81f80f3eb759de72d74d18e478873fc0575abc5 (diff) |
slab, workqueue: remove keventd_up() usage
Now that workqueue can handle work item queueing from very early
during boot, there is no need to gate schedule_delayed_work_on() while
!keventd_up(). Remove it.
Signed-off-by: Tejun Heo <tj@kernel.org>
Cc: Christoph Lameter <cl@linux.com>
Cc: Pekka Enberg <penberg@kernel.org>
Cc: David Rientjes <rientjes@google.com>
Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: linux-mm@kvack.org
-rw-r--r-- | mm/slab.c | 7 |
1 files changed, 1 insertions, 6 deletions
@@ -550,12 +550,7 @@ static void start_cpu_timer(int cpu) | |||
550 | { | 550 | { |
551 | struct delayed_work *reap_work = &per_cpu(slab_reap_work, cpu); | 551 | struct delayed_work *reap_work = &per_cpu(slab_reap_work, cpu); |
552 | 552 | ||
553 | /* | 553 | if (reap_work->work.func == NULL) { |
554 | * When this gets called from do_initcalls via cpucache_init(), | ||
555 | * init_workqueues() has already run, so keventd will be setup | ||
556 | * at that time. | ||
557 | */ | ||
558 | if (keventd_up() && reap_work->work.func == NULL) { | ||
559 | init_reap_node(cpu); | 554 | init_reap_node(cpu); |
560 | INIT_DEFERRABLE_WORK(reap_work, cache_reap); | 555 | INIT_DEFERRABLE_WORK(reap_work, cache_reap); |
561 | schedule_delayed_work_on(cpu, reap_work, | 556 | schedule_delayed_work_on(cpu, reap_work, |