aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorPetr Mladek <pmladek@suse.com>2016-10-11 16:55:20 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2016-10-11 18:06:33 -0400
commit3989144f863ac576e6efba298d24b0b02a10d4bb (patch)
treed9892a66501c0ab30d583ac5e1f609d66d842156
parente700591ae03896c16974d4e1ab58eb296aaa5f59 (diff)
kthread: kthread worker API cleanup
A good practice is to prefix the names of functions by the name of the subsystem. The kthread worker API is a mix of classic kthreads and workqueues. Each worker has a dedicated kthread. It runs a generic function that process queued works. It is implemented as part of the kthread subsystem. This patch renames the existing kthread worker API to use the corresponding name from the workqueues API prefixed by kthread_: __init_kthread_worker() -> __kthread_init_worker() init_kthread_worker() -> kthread_init_worker() init_kthread_work() -> kthread_init_work() insert_kthread_work() -> kthread_insert_work() queue_kthread_work() -> kthread_queue_work() flush_kthread_work() -> kthread_flush_work() flush_kthread_worker() -> kthread_flush_worker() Note that the names of DEFINE_KTHREAD_WORK*() macros stay as they are. It is common that the "DEFINE_" prefix has precedence over the subsystem names. Note that INIT() macros and init() functions use different naming scheme. There is no good solution. There are several reasons for this solution: + "init" in the function names stands for the verb "initialize" aka "initialize worker". While "INIT" in the macro names stands for the noun "INITIALIZER" aka "worker initializer". + INIT() macros are used only in DEFINE() macros + init() functions are used close to the other kthread() functions. It looks much better if all the functions use the same scheme. + There will be also kthread_destroy_worker() that will be used close to kthread_cancel_work(). It is related to the init() function. Again it looks better if all functions use the same naming scheme. + there are several precedents for such init() function names, e.g. amd_iommu_init_device(), free_area_init_node(), jump_label_init_type(), regmap_init_mmio_clk(), + It is not an argument but it was inconsistent even before. [arnd@arndb.de: fix linux-next merge conflict] Link: http://lkml.kernel.org/r/20160908135724.1311726-1-arnd@arndb.de Link: http://lkml.kernel.org/r/1470754545-17632-3-git-send-email-pmladek@suse.com Suggested-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Petr Mladek <pmladek@suse.com> Cc: Oleg Nesterov <oleg@redhat.com> Cc: Tejun Heo <tj@kernel.org> Cc: Ingo Molnar <mingo@redhat.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Steven Rostedt <rostedt@goodmis.org> Cc: "Paul E. McKenney" <paulmck@linux.vnet.ibm.com> Cc: Josh Triplett <josh@joshtriplett.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Jiri Kosina <jkosina@suse.cz> Cc: Borislav Petkov <bp@suse.de> Cc: Michal Hocko <mhocko@suse.cz> Cc: Vlastimil Babka <vbabka@suse.cz> Signed-off-by: Arnd Bergmann <arnd@arndb.de> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r--Documentation/RCU/lockdep-splat.txt2
-rw-r--r--arch/x86/kvm/i8254.c14
-rw-r--r--crypto/crypto_engine.c20
-rw-r--r--drivers/block/loop.c8
-rw-r--r--drivers/infiniband/sw/rdmavt/cq.c10
-rw-r--r--drivers/md/dm-rq.c6
-rw-r--r--drivers/md/dm.c4
-rw-r--r--drivers/media/pci/ivtv/ivtv-driver.c6
-rw-r--r--drivers/media/pci/ivtv/ivtv-irq.c2
-rw-r--r--drivers/net/ethernet/microchip/encx24j600.c10
-rw-r--r--drivers/spi/spi.c18
-rw-r--r--drivers/tty/serial/sc16is7xx.c22
-rw-r--r--include/linux/kthread.h18
-rw-r--r--kernel/kthread.c33
-rw-r--r--sound/soc/intel/baytrail/sst-baytrail-ipc.c2
-rw-r--r--sound/soc/intel/common/sst-ipc.c6
-rw-r--r--sound/soc/intel/haswell/sst-haswell-ipc.c2
-rw-r--r--sound/soc/intel/skylake/skl-sst-ipc.c2
18 files changed, 93 insertions, 92 deletions
diff --git a/Documentation/RCU/lockdep-splat.txt b/Documentation/RCU/lockdep-splat.txt
index bf9061142827..238e9f61352f 100644
--- a/Documentation/RCU/lockdep-splat.txt
+++ b/Documentation/RCU/lockdep-splat.txt
@@ -57,7 +57,7 @@ Call Trace:
57 [<ffffffff817db154>] kernel_thread_helper+0x4/0x10 57 [<ffffffff817db154>] kernel_thread_helper+0x4/0x10
58 [<ffffffff81066430>] ? finish_task_switch+0x80/0x110 58 [<ffffffff81066430>] ? finish_task_switch+0x80/0x110
59 [<ffffffff817d9c04>] ? retint_restore_args+0xe/0xe 59 [<ffffffff817d9c04>] ? retint_restore_args+0xe/0xe
60 [<ffffffff81097510>] ? __init_kthread_worker+0x70/0x70 60 [<ffffffff81097510>] ? __kthread_init_worker+0x70/0x70
61 [<ffffffff817db150>] ? gs_change+0xb/0xb 61 [<ffffffff817db150>] ? gs_change+0xb/0xb
62 62
63Line 2776 of block/cfq-iosched.c in v3.0-rc5 is as follows: 63Line 2776 of block/cfq-iosched.c in v3.0-rc5 is as follows:
diff --git a/arch/x86/kvm/i8254.c b/arch/x86/kvm/i8254.c
index 5fb6c620180e..16a7134eedac 100644
--- a/arch/x86/kvm/i8254.c
+++ b/arch/x86/kvm/i8254.c
@@ -212,7 +212,7 @@ static void kvm_pit_ack_irq(struct kvm_irq_ack_notifier *kian)
212 */ 212 */
213 smp_mb(); 213 smp_mb();
214 if (atomic_dec_if_positive(&ps->pending) > 0) 214 if (atomic_dec_if_positive(&ps->pending) > 0)
215 queue_kthread_work(&pit->worker, &pit->expired); 215 kthread_queue_work(&pit->worker, &pit->expired);
216} 216}
217 217
218void __kvm_migrate_pit_timer(struct kvm_vcpu *vcpu) 218void __kvm_migrate_pit_timer(struct kvm_vcpu *vcpu)
@@ -233,7 +233,7 @@ void __kvm_migrate_pit_timer(struct kvm_vcpu *vcpu)
233static void destroy_pit_timer(struct kvm_pit *pit) 233static void destroy_pit_timer(struct kvm_pit *pit)
234{ 234{
235 hrtimer_cancel(&pit->pit_state.timer); 235 hrtimer_cancel(&pit->pit_state.timer);
236 flush_kthread_work(&pit->expired); 236 kthread_flush_work(&pit->expired);
237} 237}
238 238
239static void pit_do_work(struct kthread_work *work) 239static void pit_do_work(struct kthread_work *work)
@@ -272,7 +272,7 @@ static enum hrtimer_restart pit_timer_fn(struct hrtimer *data)
272 if (atomic_read(&ps->reinject)) 272 if (atomic_read(&ps->reinject))
273 atomic_inc(&ps->pending); 273 atomic_inc(&ps->pending);
274 274
275 queue_kthread_work(&pt->worker, &pt->expired); 275 kthread_queue_work(&pt->worker, &pt->expired);
276 276
277 if (ps->is_periodic) { 277 if (ps->is_periodic) {
278 hrtimer_add_expires_ns(&ps->timer, ps->period); 278 hrtimer_add_expires_ns(&ps->timer, ps->period);
@@ -324,7 +324,7 @@ static void create_pit_timer(struct kvm_pit *pit, u32 val, int is_period)
324 324
325 /* TODO The new value only affected after the retriggered */ 325 /* TODO The new value only affected after the retriggered */
326 hrtimer_cancel(&ps->timer); 326 hrtimer_cancel(&ps->timer);
327 flush_kthread_work(&pit->expired); 327 kthread_flush_work(&pit->expired);
328 ps->period = interval; 328 ps->period = interval;
329 ps->is_periodic = is_period; 329 ps->is_periodic = is_period;
330 330
@@ -667,13 +667,13 @@ struct kvm_pit *kvm_create_pit(struct kvm *kvm, u32 flags)
667 pid_nr = pid_vnr(pid); 667 pid_nr = pid_vnr(pid);
668 put_pid(pid); 668 put_pid(pid);
669 669
670 init_kthread_worker(&pit->worker); 670 kthread_init_worker(&pit->worker);
671 pit->worker_task = kthread_run(kthread_worker_fn, &pit->worker, 671 pit->worker_task = kthread_run(kthread_worker_fn, &pit->worker,
672 "kvm-pit/%d", pid_nr); 672 "kvm-pit/%d", pid_nr);
673 if (IS_ERR(pit->worker_task)) 673 if (IS_ERR(pit->worker_task))
674 goto fail_kthread; 674 goto fail_kthread;
675 675
676 init_kthread_work(&pit->expired, pit_do_work); 676 kthread_init_work(&pit->expired, pit_do_work);
677 677
678 pit->kvm = kvm; 678 pit->kvm = kvm;
679 679
@@ -730,7 +730,7 @@ void kvm_free_pit(struct kvm *kvm)
730 kvm_io_bus_unregister_dev(kvm, KVM_PIO_BUS, &pit->speaker_dev); 730 kvm_io_bus_unregister_dev(kvm, KVM_PIO_BUS, &pit->speaker_dev);
731 kvm_pit_set_reinject(pit, false); 731 kvm_pit_set_reinject(pit, false);
732 hrtimer_cancel(&pit->pit_state.timer); 732 hrtimer_cancel(&pit->pit_state.timer);
733 flush_kthread_work(&pit->expired); 733 kthread_flush_work(&pit->expired);
734 kthread_stop(pit->worker_task); 734 kthread_stop(pit->worker_task);
735 kvm_free_irq_source_id(kvm, pit->irq_source_id); 735 kvm_free_irq_source_id(kvm, pit->irq_source_id);
736 kfree(pit); 736 kfree(pit);
diff --git a/crypto/crypto_engine.c b/crypto/crypto_engine.c
index bfb92ace2c91..6989ba0046df 100644
--- a/crypto/crypto_engine.c
+++ b/crypto/crypto_engine.c
@@ -47,7 +47,7 @@ static void crypto_pump_requests(struct crypto_engine *engine,
47 47
48 /* If another context is idling then defer */ 48 /* If another context is idling then defer */
49 if (engine->idling) { 49 if (engine->idling) {
50 queue_kthread_work(&engine->kworker, &engine->pump_requests); 50 kthread_queue_work(&engine->kworker, &engine->pump_requests);
51 goto out; 51 goto out;
52 } 52 }
53 53
@@ -58,7 +58,7 @@ static void crypto_pump_requests(struct crypto_engine *engine,
58 58
59 /* Only do teardown in the thread */ 59 /* Only do teardown in the thread */
60 if (!in_kthread) { 60 if (!in_kthread) {
61 queue_kthread_work(&engine->kworker, 61 kthread_queue_work(&engine->kworker,
62 &engine->pump_requests); 62 &engine->pump_requests);
63 goto out; 63 goto out;
64 } 64 }
@@ -189,7 +189,7 @@ int crypto_transfer_cipher_request(struct crypto_engine *engine,
189 ret = ablkcipher_enqueue_request(&engine->queue, req); 189 ret = ablkcipher_enqueue_request(&engine->queue, req);
190 190
191 if (!engine->busy && need_pump) 191 if (!engine->busy && need_pump)
192 queue_kthread_work(&engine->kworker, &engine->pump_requests); 192 kthread_queue_work(&engine->kworker, &engine->pump_requests);
193 193
194 spin_unlock_irqrestore(&engine->queue_lock, flags); 194 spin_unlock_irqrestore(&engine->queue_lock, flags);
195 return ret; 195 return ret;
@@ -231,7 +231,7 @@ int crypto_transfer_hash_request(struct crypto_engine *engine,
231 ret = ahash_enqueue_request(&engine->queue, req); 231 ret = ahash_enqueue_request(&engine->queue, req);
232 232
233 if (!engine->busy && need_pump) 233 if (!engine->busy && need_pump)
234 queue_kthread_work(&engine->kworker, &engine->pump_requests); 234 kthread_queue_work(&engine->kworker, &engine->pump_requests);
235 235
236 spin_unlock_irqrestore(&engine->queue_lock, flags); 236 spin_unlock_irqrestore(&engine->queue_lock, flags);
237 return ret; 237 return ret;
@@ -284,7 +284,7 @@ void crypto_finalize_cipher_request(struct crypto_engine *engine,
284 284
285 req->base.complete(&req->base, err); 285 req->base.complete(&req->base, err);
286 286
287 queue_kthread_work(&engine->kworker, &engine->pump_requests); 287 kthread_queue_work(&engine->kworker, &engine->pump_requests);
288} 288}
289EXPORT_SYMBOL_GPL(crypto_finalize_cipher_request); 289EXPORT_SYMBOL_GPL(crypto_finalize_cipher_request);
290 290
@@ -321,7 +321,7 @@ void crypto_finalize_hash_request(struct crypto_engine *engine,
321 321
322 req->base.complete(&req->base, err); 322 req->base.complete(&req->base, err);
323 323
324 queue_kthread_work(&engine->kworker, &engine->pump_requests); 324 kthread_queue_work(&engine->kworker, &engine->pump_requests);
325} 325}
326EXPORT_SYMBOL_GPL(crypto_finalize_hash_request); 326EXPORT_SYMBOL_GPL(crypto_finalize_hash_request);
327 327
@@ -345,7 +345,7 @@ int crypto_engine_start(struct crypto_engine *engine)
345 engine->running = true; 345 engine->running = true;
346 spin_unlock_irqrestore(&engine->queue_lock, flags); 346 spin_unlock_irqrestore(&engine->queue_lock, flags);
347 347
348 queue_kthread_work(&engine->kworker, &engine->pump_requests); 348 kthread_queue_work(&engine->kworker, &engine->pump_requests);
349 349
350 return 0; 350 return 0;
351} 351}
@@ -422,7 +422,7 @@ struct crypto_engine *crypto_engine_alloc_init(struct device *dev, bool rt)
422 crypto_init_queue(&engine->queue, CRYPTO_ENGINE_MAX_QLEN); 422 crypto_init_queue(&engine->queue, CRYPTO_ENGINE_MAX_QLEN);
423 spin_lock_init(&engine->queue_lock); 423 spin_lock_init(&engine->queue_lock);
424 424
425 init_kthread_worker(&engine->kworker); 425 kthread_init_worker(&engine->kworker);
426 engine->kworker_task = kthread_run(kthread_worker_fn, 426 engine->kworker_task = kthread_run(kthread_worker_fn,
427 &engine->kworker, "%s", 427 &engine->kworker, "%s",
428 engine->name); 428 engine->name);
@@ -430,7 +430,7 @@ struct crypto_engine *crypto_engine_alloc_init(struct device *dev, bool rt)
430 dev_err(dev, "failed to create crypto request pump task\n"); 430 dev_err(dev, "failed to create crypto request pump task\n");
431 return NULL; 431 return NULL;
432 } 432 }
433 init_kthread_work(&engine->pump_requests, crypto_pump_work); 433 kthread_init_work(&engine->pump_requests, crypto_pump_work);
434 434
435 if (engine->rt) { 435 if (engine->rt) {
436 dev_info(dev, "will run requests pump with realtime priority\n"); 436 dev_info(dev, "will run requests pump with realtime priority\n");
@@ -455,7 +455,7 @@ int crypto_engine_exit(struct crypto_engine *engine)
455 if (ret) 455 if (ret)
456 return ret; 456 return ret;
457 457
458 flush_kthread_worker(&engine->kworker); 458 kthread_flush_worker(&engine->kworker);
459 kthread_stop(engine->kworker_task); 459 kthread_stop(engine->kworker_task);
460 460
461 return 0; 461 return 0;
diff --git a/drivers/block/loop.c b/drivers/block/loop.c
index cbdb3b162718..fa1b7a90ba11 100644
--- a/drivers/block/loop.c
+++ b/drivers/block/loop.c
@@ -840,13 +840,13 @@ static void loop_config_discard(struct loop_device *lo)
840 840
841static void loop_unprepare_queue(struct loop_device *lo) 841static void loop_unprepare_queue(struct loop_device *lo)
842{ 842{
843 flush_kthread_worker(&lo->worker); 843 kthread_flush_worker(&lo->worker);
844 kthread_stop(lo->worker_task); 844 kthread_stop(lo->worker_task);
845} 845}
846 846
847static int loop_prepare_queue(struct loop_device *lo) 847static int loop_prepare_queue(struct loop_device *lo)
848{ 848{
849 init_kthread_worker(&lo->worker); 849 kthread_init_worker(&lo->worker);
850 lo->worker_task = kthread_run(kthread_worker_fn, 850 lo->worker_task = kthread_run(kthread_worker_fn,
851 &lo->worker, "loop%d", lo->lo_number); 851 &lo->worker, "loop%d", lo->lo_number);
852 if (IS_ERR(lo->worker_task)) 852 if (IS_ERR(lo->worker_task))
@@ -1658,7 +1658,7 @@ static int loop_queue_rq(struct blk_mq_hw_ctx *hctx,
1658 break; 1658 break;
1659 } 1659 }
1660 1660
1661 queue_kthread_work(&lo->worker, &cmd->work); 1661 kthread_queue_work(&lo->worker, &cmd->work);
1662 1662
1663 return BLK_MQ_RQ_QUEUE_OK; 1663 return BLK_MQ_RQ_QUEUE_OK;
1664} 1664}
@@ -1696,7 +1696,7 @@ static int loop_init_request(void *data, struct request *rq,
1696 struct loop_cmd *cmd = blk_mq_rq_to_pdu(rq); 1696 struct loop_cmd *cmd = blk_mq_rq_to_pdu(rq);
1697 1697
1698 cmd->rq = rq; 1698 cmd->rq = rq;
1699 init_kthread_work(&cmd->work, loop_queue_work); 1699 kthread_init_work(&cmd->work, loop_queue_work);
1700 1700
1701 return 0; 1701 return 0;
1702} 1702}
diff --git a/drivers/infiniband/sw/rdmavt/cq.c b/drivers/infiniband/sw/rdmavt/cq.c
index f2f229efbe64..6d9904a4a0ab 100644
--- a/drivers/infiniband/sw/rdmavt/cq.c
+++ b/drivers/infiniband/sw/rdmavt/cq.c
@@ -129,7 +129,7 @@ void rvt_cq_enter(struct rvt_cq *cq, struct ib_wc *entry, bool solicited)
129 if (likely(worker)) { 129 if (likely(worker)) {
130 cq->notify = RVT_CQ_NONE; 130 cq->notify = RVT_CQ_NONE;
131 cq->triggered++; 131 cq->triggered++;
132 queue_kthread_work(worker, &cq->comptask); 132 kthread_queue_work(worker, &cq->comptask);
133 } 133 }
134 } 134 }
135 135
@@ -265,7 +265,7 @@ struct ib_cq *rvt_create_cq(struct ib_device *ibdev,
265 cq->ibcq.cqe = entries; 265 cq->ibcq.cqe = entries;
266 cq->notify = RVT_CQ_NONE; 266 cq->notify = RVT_CQ_NONE;
267 spin_lock_init(&cq->lock); 267 spin_lock_init(&cq->lock);
268 init_kthread_work(&cq->comptask, send_complete); 268 kthread_init_work(&cq->comptask, send_complete);
269 cq->queue = wc; 269 cq->queue = wc;
270 270
271 ret = &cq->ibcq; 271 ret = &cq->ibcq;
@@ -295,7 +295,7 @@ int rvt_destroy_cq(struct ib_cq *ibcq)
295 struct rvt_cq *cq = ibcq_to_rvtcq(ibcq); 295 struct rvt_cq *cq = ibcq_to_rvtcq(ibcq);
296 struct rvt_dev_info *rdi = cq->rdi; 296 struct rvt_dev_info *rdi = cq->rdi;
297 297
298 flush_kthread_work(&cq->comptask); 298 kthread_flush_work(&cq->comptask);
299 spin_lock(&rdi->n_cqs_lock); 299 spin_lock(&rdi->n_cqs_lock);
300 rdi->n_cqs_allocated--; 300 rdi->n_cqs_allocated--;
301 spin_unlock(&rdi->n_cqs_lock); 301 spin_unlock(&rdi->n_cqs_lock);
@@ -514,7 +514,7 @@ int rvt_driver_cq_init(struct rvt_dev_info *rdi)
514 rdi->worker = kzalloc(sizeof(*rdi->worker), GFP_KERNEL); 514 rdi->worker = kzalloc(sizeof(*rdi->worker), GFP_KERNEL);
515 if (!rdi->worker) 515 if (!rdi->worker)
516 return -ENOMEM; 516 return -ENOMEM;
517 init_kthread_worker(rdi->worker); 517 kthread_init_worker(rdi->worker);
518 task = kthread_create_on_node( 518 task = kthread_create_on_node(
519 kthread_worker_fn, 519 kthread_worker_fn,
520 rdi->worker, 520 rdi->worker,
@@ -547,7 +547,7 @@ void rvt_cq_exit(struct rvt_dev_info *rdi)
547 /* blocks future queuing from send_complete() */ 547 /* blocks future queuing from send_complete() */
548 rdi->worker = NULL; 548 rdi->worker = NULL;
549 smp_wmb(); /* See rdi_cq_enter */ 549 smp_wmb(); /* See rdi_cq_enter */
550 flush_kthread_worker(worker); 550 kthread_flush_worker(worker);
551 kthread_stop(worker->task); 551 kthread_stop(worker->task);
552 kfree(worker); 552 kfree(worker);
553} 553}
diff --git a/drivers/md/dm-rq.c b/drivers/md/dm-rq.c
index 5eacce1ef88b..dc75bea0d541 100644
--- a/drivers/md/dm-rq.c
+++ b/drivers/md/dm-rq.c
@@ -581,7 +581,7 @@ static void init_tio(struct dm_rq_target_io *tio, struct request *rq,
581 if (!md->init_tio_pdu) 581 if (!md->init_tio_pdu)
582 memset(&tio->info, 0, sizeof(tio->info)); 582 memset(&tio->info, 0, sizeof(tio->info));
583 if (md->kworker_task) 583 if (md->kworker_task)
584 init_kthread_work(&tio->work, map_tio_request); 584 kthread_init_work(&tio->work, map_tio_request);
585} 585}
586 586
587static struct dm_rq_target_io *dm_old_prep_tio(struct request *rq, 587static struct dm_rq_target_io *dm_old_prep_tio(struct request *rq,
@@ -831,7 +831,7 @@ static void dm_old_request_fn(struct request_queue *q)
831 tio = tio_from_request(rq); 831 tio = tio_from_request(rq);
832 /* Establish tio->ti before queuing work (map_tio_request) */ 832 /* Establish tio->ti before queuing work (map_tio_request) */
833 tio->ti = ti; 833 tio->ti = ti;
834 queue_kthread_work(&md->kworker, &tio->work); 834 kthread_queue_work(&md->kworker, &tio->work);
835 BUG_ON(!irqs_disabled()); 835 BUG_ON(!irqs_disabled());
836 } 836 }
837} 837}
@@ -853,7 +853,7 @@ int dm_old_init_request_queue(struct mapped_device *md)
853 blk_queue_prep_rq(md->queue, dm_old_prep_fn); 853 blk_queue_prep_rq(md->queue, dm_old_prep_fn);
854 854
855 /* Initialize the request-based DM worker thread */ 855 /* Initialize the request-based DM worker thread */
856 init_kthread_worker(&md->kworker); 856 kthread_init_worker(&md->kworker);
857 md->kworker_task = kthread_run(kthread_worker_fn, &md->kworker, 857 md->kworker_task = kthread_run(kthread_worker_fn, &md->kworker,
858 "kdmwork-%s", dm_device_name(md)); 858 "kdmwork-%s", dm_device_name(md));
859 if (IS_ERR(md->kworker_task)) 859 if (IS_ERR(md->kworker_task))
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index be35258324c1..147af9536d0c 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -1891,7 +1891,7 @@ static void __dm_destroy(struct mapped_device *md, bool wait)
1891 spin_unlock_irq(q->queue_lock); 1891 spin_unlock_irq(q->queue_lock);
1892 1892
1893 if (dm_request_based(md) && md->kworker_task) 1893 if (dm_request_based(md) && md->kworker_task)
1894 flush_kthread_worker(&md->kworker); 1894 kthread_flush_worker(&md->kworker);
1895 1895
1896 /* 1896 /*
1897 * Take suspend_lock so that presuspend and postsuspend methods 1897 * Take suspend_lock so that presuspend and postsuspend methods
@@ -2147,7 +2147,7 @@ static int __dm_suspend(struct mapped_device *md, struct dm_table *map,
2147 if (dm_request_based(md)) { 2147 if (dm_request_based(md)) {
2148 dm_stop_queue(md->queue); 2148 dm_stop_queue(md->queue);
2149 if (md->kworker_task) 2149 if (md->kworker_task)
2150 flush_kthread_worker(&md->kworker); 2150 kthread_flush_worker(&md->kworker);
2151 } 2151 }
2152 2152
2153 flush_workqueue(md->wq); 2153 flush_workqueue(md->wq);
diff --git a/drivers/media/pci/ivtv/ivtv-driver.c b/drivers/media/pci/ivtv/ivtv-driver.c
index 374033a5bdaf..ee48c3e09de4 100644
--- a/drivers/media/pci/ivtv/ivtv-driver.c
+++ b/drivers/media/pci/ivtv/ivtv-driver.c
@@ -750,7 +750,7 @@ static int ivtv_init_struct1(struct ivtv *itv)
750 spin_lock_init(&itv->lock); 750 spin_lock_init(&itv->lock);
751 spin_lock_init(&itv->dma_reg_lock); 751 spin_lock_init(&itv->dma_reg_lock);
752 752
753 init_kthread_worker(&itv->irq_worker); 753 kthread_init_worker(&itv->irq_worker);
754 itv->irq_worker_task = kthread_run(kthread_worker_fn, &itv->irq_worker, 754 itv->irq_worker_task = kthread_run(kthread_worker_fn, &itv->irq_worker,
755 "%s", itv->v4l2_dev.name); 755 "%s", itv->v4l2_dev.name);
756 if (IS_ERR(itv->irq_worker_task)) { 756 if (IS_ERR(itv->irq_worker_task)) {
@@ -760,7 +760,7 @@ static int ivtv_init_struct1(struct ivtv *itv)
760 /* must use the FIFO scheduler as it is realtime sensitive */ 760 /* must use the FIFO scheduler as it is realtime sensitive */
761 sched_setscheduler(itv->irq_worker_task, SCHED_FIFO, &param); 761 sched_setscheduler(itv->irq_worker_task, SCHED_FIFO, &param);
762 762
763 init_kthread_work(&itv->irq_work, ivtv_irq_work_handler); 763 kthread_init_work(&itv->irq_work, ivtv_irq_work_handler);
764 764
765 /* Initial settings */ 765 /* Initial settings */
766 itv->cxhdl.port = CX2341X_PORT_MEMORY; 766 itv->cxhdl.port = CX2341X_PORT_MEMORY;
@@ -1441,7 +1441,7 @@ static void ivtv_remove(struct pci_dev *pdev)
1441 del_timer_sync(&itv->dma_timer); 1441 del_timer_sync(&itv->dma_timer);
1442 1442
1443 /* Kill irq worker */ 1443 /* Kill irq worker */
1444 flush_kthread_worker(&itv->irq_worker); 1444 kthread_flush_worker(&itv->irq_worker);
1445 kthread_stop(itv->irq_worker_task); 1445 kthread_stop(itv->irq_worker_task);
1446 1446
1447 ivtv_streams_cleanup(itv); 1447 ivtv_streams_cleanup(itv);
diff --git a/drivers/media/pci/ivtv/ivtv-irq.c b/drivers/media/pci/ivtv/ivtv-irq.c
index 36ca2d67c812..6efe1f71262c 100644
--- a/drivers/media/pci/ivtv/ivtv-irq.c
+++ b/drivers/media/pci/ivtv/ivtv-irq.c
@@ -1062,7 +1062,7 @@ irqreturn_t ivtv_irq_handler(int irq, void *dev_id)
1062 } 1062 }
1063 1063
1064 if (test_and_clear_bit(IVTV_F_I_HAVE_WORK, &itv->i_flags)) { 1064 if (test_and_clear_bit(IVTV_F_I_HAVE_WORK, &itv->i_flags)) {
1065 queue_kthread_work(&itv->irq_worker, &itv->irq_work); 1065 kthread_queue_work(&itv->irq_worker, &itv->irq_work);
1066 } 1066 }
1067 1067
1068 spin_unlock(&itv->dma_reg_lock); 1068 spin_unlock(&itv->dma_reg_lock);
diff --git a/drivers/net/ethernet/microchip/encx24j600.c b/drivers/net/ethernet/microchip/encx24j600.c
index 42e34076d2de..b14f0305aa31 100644
--- a/drivers/net/ethernet/microchip/encx24j600.c
+++ b/drivers/net/ethernet/microchip/encx24j600.c
@@ -821,7 +821,7 @@ static void encx24j600_set_multicast_list(struct net_device *dev)
821 } 821 }
822 822
823 if (oldfilter != priv->rxfilter) 823 if (oldfilter != priv->rxfilter)
824 queue_kthread_work(&priv->kworker, &priv->setrx_work); 824 kthread_queue_work(&priv->kworker, &priv->setrx_work);
825} 825}
826 826
827static void encx24j600_hw_tx(struct encx24j600_priv *priv) 827static void encx24j600_hw_tx(struct encx24j600_priv *priv)
@@ -879,7 +879,7 @@ static netdev_tx_t encx24j600_tx(struct sk_buff *skb, struct net_device *dev)
879 /* Remember the skb for deferred processing */ 879 /* Remember the skb for deferred processing */
880 priv->tx_skb = skb; 880 priv->tx_skb = skb;
881 881
882 queue_kthread_work(&priv->kworker, &priv->tx_work); 882 kthread_queue_work(&priv->kworker, &priv->tx_work);
883 883
884 return NETDEV_TX_OK; 884 return NETDEV_TX_OK;
885} 885}
@@ -1037,9 +1037,9 @@ static int encx24j600_spi_probe(struct spi_device *spi)
1037 goto out_free; 1037 goto out_free;
1038 } 1038 }
1039 1039
1040 init_kthread_worker(&priv->kworker); 1040 kthread_init_worker(&priv->kworker);
1041 init_kthread_work(&priv->tx_work, encx24j600_tx_proc); 1041 kthread_init_work(&priv->tx_work, encx24j600_tx_proc);
1042 init_kthread_work(&priv->setrx_work, encx24j600_setrx_proc); 1042 kthread_init_work(&priv->setrx_work, encx24j600_setrx_proc);
1043 1043
1044 priv->kworker_task = kthread_run(kthread_worker_fn, &priv->kworker, 1044 priv->kworker_task = kthread_run(kthread_worker_fn, &priv->kworker,
1045 "encx24j600"); 1045 "encx24j600");
diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
index 8146ccd35a1a..5787b723b593 100644
--- a/drivers/spi/spi.c
+++ b/drivers/spi/spi.c
@@ -1112,7 +1112,7 @@ static void __spi_pump_messages(struct spi_master *master, bool in_kthread)
1112 1112
1113 /* If another context is idling the device then defer */ 1113 /* If another context is idling the device then defer */
1114 if (master->idling) { 1114 if (master->idling) {
1115 queue_kthread_work(&master->kworker, &master->pump_messages); 1115 kthread_queue_work(&master->kworker, &master->pump_messages);
1116 spin_unlock_irqrestore(&master->queue_lock, flags); 1116 spin_unlock_irqrestore(&master->queue_lock, flags);
1117 return; 1117 return;
1118 } 1118 }
@@ -1126,7 +1126,7 @@ static void __spi_pump_messages(struct spi_master *master, bool in_kthread)
1126 1126
1127 /* Only do teardown in the thread */ 1127 /* Only do teardown in the thread */
1128 if (!in_kthread) { 1128 if (!in_kthread) {
1129 queue_kthread_work(&master->kworker, 1129 kthread_queue_work(&master->kworker,
1130 &master->pump_messages); 1130 &master->pump_messages);
1131 spin_unlock_irqrestore(&master->queue_lock, flags); 1131 spin_unlock_irqrestore(&master->queue_lock, flags);
1132 return; 1132 return;
@@ -1250,7 +1250,7 @@ static int spi_init_queue(struct spi_master *master)
1250 master->running = false; 1250 master->running = false;
1251 master->busy = false; 1251 master->busy = false;
1252 1252
1253 init_kthread_worker(&master->kworker); 1253 kthread_init_worker(&master->kworker);
1254 master->kworker_task = kthread_run(kthread_worker_fn, 1254 master->kworker_task = kthread_run(kthread_worker_fn,
1255 &master->kworker, "%s", 1255 &master->kworker, "%s",
1256 dev_name(&master->dev)); 1256 dev_name(&master->dev));
@@ -1258,7 +1258,7 @@ static int spi_init_queue(struct spi_master *master)
1258 dev_err(&master->dev, "failed to create message pump task\n"); 1258 dev_err(&master->dev, "failed to create message pump task\n");
1259 return PTR_ERR(master->kworker_task); 1259 return PTR_ERR(master->kworker_task);
1260 } 1260 }
1261 init_kthread_work(&master->pump_messages, spi_pump_messages); 1261 kthread_init_work(&master->pump_messages, spi_pump_messages);
1262 1262
1263 /* 1263 /*
1264 * Master config will indicate if this controller should run the 1264 * Master config will indicate if this controller should run the
@@ -1331,7 +1331,7 @@ void spi_finalize_current_message(struct spi_master *master)
1331 spin_lock_irqsave(&master->queue_lock, flags); 1331 spin_lock_irqsave(&master->queue_lock, flags);
1332 master->cur_msg = NULL; 1332 master->cur_msg = NULL;
1333 master->cur_msg_prepared = false; 1333 master->cur_msg_prepared = false;
1334 queue_kthread_work(&master->kworker, &master->pump_messages); 1334 kthread_queue_work(&master->kworker, &master->pump_messages);
1335 spin_unlock_irqrestore(&master->queue_lock, flags); 1335 spin_unlock_irqrestore(&master->queue_lock, flags);
1336 1336
1337 trace_spi_message_done(mesg); 1337 trace_spi_message_done(mesg);
@@ -1357,7 +1357,7 @@ static int spi_start_queue(struct spi_master *master)
1357 master->cur_msg = NULL; 1357 master->cur_msg = NULL;
1358 spin_unlock_irqrestore(&master->queue_lock, flags); 1358 spin_unlock_irqrestore(&master->queue_lock, flags);
1359 1359
1360 queue_kthread_work(&master->kworker, &master->pump_messages); 1360 kthread_queue_work(&master->kworker, &master->pump_messages);
1361 1361
1362 return 0; 1362 return 0;
1363} 1363}
@@ -1404,7 +1404,7 @@ static int spi_destroy_queue(struct spi_master *master)
1404 ret = spi_stop_queue(master); 1404 ret = spi_stop_queue(master);
1405 1405
1406 /* 1406 /*
1407 * flush_kthread_worker will block until all work is done. 1407 * kthread_flush_worker will block until all work is done.
1408 * If the reason that stop_queue timed out is that the work will never 1408 * If the reason that stop_queue timed out is that the work will never
1409 * finish, then it does no good to call flush/stop thread, so 1409 * finish, then it does no good to call flush/stop thread, so
1410 * return anyway. 1410 * return anyway.
@@ -1414,7 +1414,7 @@ static int spi_destroy_queue(struct spi_master *master)
1414 return ret; 1414 return ret;
1415 } 1415 }
1416 1416
1417 flush_kthread_worker(&master->kworker); 1417 kthread_flush_worker(&master->kworker);
1418 kthread_stop(master->kworker_task); 1418 kthread_stop(master->kworker_task);
1419 1419
1420 return 0; 1420 return 0;
@@ -1438,7 +1438,7 @@ static int __spi_queued_transfer(struct spi_device *spi,
1438 1438
1439 list_add_tail(&msg->queue, &master->queue); 1439 list_add_tail(&msg->queue, &master->queue);
1440 if (!master->busy && need_pump) 1440 if (!master->busy && need_pump)
1441 queue_kthread_work(&master->kworker, &master->pump_messages); 1441 kthread_queue_work(&master->kworker, &master->pump_messages);
1442 1442
1443 spin_unlock_irqrestore(&master->queue_lock, flags); 1443 spin_unlock_irqrestore(&master->queue_lock, flags);
1444 return 0; 1444 return 0;
diff --git a/drivers/tty/serial/sc16is7xx.c b/drivers/tty/serial/sc16is7xx.c
index a9d94f7cf683..2675792a8f59 100644
--- a/drivers/tty/serial/sc16is7xx.c
+++ b/drivers/tty/serial/sc16is7xx.c
@@ -708,7 +708,7 @@ static irqreturn_t sc16is7xx_irq(int irq, void *dev_id)
708{ 708{
709 struct sc16is7xx_port *s = (struct sc16is7xx_port *)dev_id; 709 struct sc16is7xx_port *s = (struct sc16is7xx_port *)dev_id;
710 710
711 queue_kthread_work(&s->kworker, &s->irq_work); 711 kthread_queue_work(&s->kworker, &s->irq_work);
712 712
713 return IRQ_HANDLED; 713 return IRQ_HANDLED;
714} 714}
@@ -784,7 +784,7 @@ static void sc16is7xx_ier_clear(struct uart_port *port, u8 bit)
784 784
785 one->config.flags |= SC16IS7XX_RECONF_IER; 785 one->config.flags |= SC16IS7XX_RECONF_IER;
786 one->config.ier_clear |= bit; 786 one->config.ier_clear |= bit;
787 queue_kthread_work(&s->kworker, &one->reg_work); 787 kthread_queue_work(&s->kworker, &one->reg_work);
788} 788}
789 789
790static void sc16is7xx_stop_tx(struct uart_port *port) 790static void sc16is7xx_stop_tx(struct uart_port *port)
@@ -802,7 +802,7 @@ static void sc16is7xx_start_tx(struct uart_port *port)
802 struct sc16is7xx_port *s = dev_get_drvdata(port->dev); 802 struct sc16is7xx_port *s = dev_get_drvdata(port->dev);
803 struct sc16is7xx_one *one = to_sc16is7xx_one(port, port); 803 struct sc16is7xx_one *one = to_sc16is7xx_one(port, port);
804 804
805 queue_kthread_work(&s->kworker, &one->tx_work); 805 kthread_queue_work(&s->kworker, &one->tx_work);
806} 806}
807 807
808static unsigned int sc16is7xx_tx_empty(struct uart_port *port) 808static unsigned int sc16is7xx_tx_empty(struct uart_port *port)
@@ -828,7 +828,7 @@ static void sc16is7xx_set_mctrl(struct uart_port *port, unsigned int mctrl)
828 struct sc16is7xx_one *one = to_sc16is7xx_one(port, port); 828 struct sc16is7xx_one *one = to_sc16is7xx_one(port, port);
829 829
830 one->config.flags |= SC16IS7XX_RECONF_MD; 830 one->config.flags |= SC16IS7XX_RECONF_MD;
831 queue_kthread_work(&s->kworker, &one->reg_work); 831 kthread_queue_work(&s->kworker, &one->reg_work);
832} 832}
833 833
834static void sc16is7xx_break_ctl(struct uart_port *port, int break_state) 834static void sc16is7xx_break_ctl(struct uart_port *port, int break_state)
@@ -957,7 +957,7 @@ static int sc16is7xx_config_rs485(struct uart_port *port,
957 957
958 port->rs485 = *rs485; 958 port->rs485 = *rs485;
959 one->config.flags |= SC16IS7XX_RECONF_RS485; 959 one->config.flags |= SC16IS7XX_RECONF_RS485;
960 queue_kthread_work(&s->kworker, &one->reg_work); 960 kthread_queue_work(&s->kworker, &one->reg_work);
961 961
962 return 0; 962 return 0;
963} 963}
@@ -1030,7 +1030,7 @@ static void sc16is7xx_shutdown(struct uart_port *port)
1030 1030
1031 sc16is7xx_power(port, 0); 1031 sc16is7xx_power(port, 0);
1032 1032
1033 flush_kthread_worker(&s->kworker); 1033 kthread_flush_worker(&s->kworker);
1034} 1034}
1035 1035
1036static const char *sc16is7xx_type(struct uart_port *port) 1036static const char *sc16is7xx_type(struct uart_port *port)
@@ -1176,8 +1176,8 @@ static int sc16is7xx_probe(struct device *dev,
1176 s->devtype = devtype; 1176 s->devtype = devtype;
1177 dev_set_drvdata(dev, s); 1177 dev_set_drvdata(dev, s);
1178 1178
1179 init_kthread_worker(&s->kworker); 1179 kthread_init_worker(&s->kworker);
1180 init_kthread_work(&s->irq_work, sc16is7xx_ist); 1180 kthread_init_work(&s->irq_work, sc16is7xx_ist);
1181 s->kworker_task = kthread_run(kthread_worker_fn, &s->kworker, 1181 s->kworker_task = kthread_run(kthread_worker_fn, &s->kworker,
1182 "sc16is7xx"); 1182 "sc16is7xx");
1183 if (IS_ERR(s->kworker_task)) { 1183 if (IS_ERR(s->kworker_task)) {
@@ -1234,8 +1234,8 @@ static int sc16is7xx_probe(struct device *dev,
1234 SC16IS7XX_EFCR_RXDISABLE_BIT | 1234 SC16IS7XX_EFCR_RXDISABLE_BIT |
1235 SC16IS7XX_EFCR_TXDISABLE_BIT); 1235 SC16IS7XX_EFCR_TXDISABLE_BIT);
1236 /* Initialize kthread work structs */ 1236 /* Initialize kthread work structs */
1237 init_kthread_work(&s->p[i].tx_work, sc16is7xx_tx_proc); 1237 kthread_init_work(&s->p[i].tx_work, sc16is7xx_tx_proc);
1238 init_kthread_work(&s->p[i].reg_work, sc16is7xx_reg_proc); 1238 kthread_init_work(&s->p[i].reg_work, sc16is7xx_reg_proc);
1239 /* Register port */ 1239 /* Register port */
1240 uart_add_one_port(&sc16is7xx_uart, &s->p[i].port); 1240 uart_add_one_port(&sc16is7xx_uart, &s->p[i].port);
1241 1241
@@ -1301,7 +1301,7 @@ static int sc16is7xx_remove(struct device *dev)
1301 sc16is7xx_power(&s->p[i].port, 0); 1301 sc16is7xx_power(&s->p[i].port, 0);
1302 } 1302 }
1303 1303
1304 flush_kthread_worker(&s->kworker); 1304 kthread_flush_worker(&s->kworker);
1305 kthread_stop(s->kworker_task); 1305 kthread_stop(s->kworker_task);
1306 1306
1307 if (!IS_ERR(s->clk)) 1307 if (!IS_ERR(s->clk))
diff --git a/include/linux/kthread.h b/include/linux/kthread.h
index c792ee1628d0..e2b095b8ca47 100644
--- a/include/linux/kthread.h
+++ b/include/linux/kthread.h
@@ -57,7 +57,7 @@ extern int tsk_fork_get_node(struct task_struct *tsk);
57 * Simple work processor based on kthread. 57 * Simple work processor based on kthread.
58 * 58 *
59 * This provides easier way to make use of kthreads. A kthread_work 59 * This provides easier way to make use of kthreads. A kthread_work
60 * can be queued and flushed using queue/flush_kthread_work() 60 * can be queued and flushed using queue/kthread_flush_work()
61 * respectively. Queued kthread_works are processed by a kthread 61 * respectively. Queued kthread_works are processed by a kthread
62 * running kthread_worker_fn(). 62 * running kthread_worker_fn().
63 */ 63 */
@@ -99,23 +99,23 @@ struct kthread_work {
99 */ 99 */
100#ifdef CONFIG_LOCKDEP 100#ifdef CONFIG_LOCKDEP
101# define KTHREAD_WORKER_INIT_ONSTACK(worker) \ 101# define KTHREAD_WORKER_INIT_ONSTACK(worker) \
102 ({ init_kthread_worker(&worker); worker; }) 102 ({ kthread_init_worker(&worker); worker; })
103# define DEFINE_KTHREAD_WORKER_ONSTACK(worker) \ 103# define DEFINE_KTHREAD_WORKER_ONSTACK(worker) \
104 struct kthread_worker worker = KTHREAD_WORKER_INIT_ONSTACK(worker) 104 struct kthread_worker worker = KTHREAD_WORKER_INIT_ONSTACK(worker)
105#else 105#else
106# define DEFINE_KTHREAD_WORKER_ONSTACK(worker) DEFINE_KTHREAD_WORKER(worker) 106# define DEFINE_KTHREAD_WORKER_ONSTACK(worker) DEFINE_KTHREAD_WORKER(worker)
107#endif 107#endif
108 108
109extern void __init_kthread_worker(struct kthread_worker *worker, 109extern void __kthread_init_worker(struct kthread_worker *worker,
110 const char *name, struct lock_class_key *key); 110 const char *name, struct lock_class_key *key);
111 111
112#define init_kthread_worker(worker) \ 112#define kthread_init_worker(worker) \
113 do { \ 113 do { \
114 static struct lock_class_key __key; \ 114 static struct lock_class_key __key; \
115 __init_kthread_worker((worker), "("#worker")->lock", &__key); \ 115 __kthread_init_worker((worker), "("#worker")->lock", &__key); \
116 } while (0) 116 } while (0)
117 117
118#define init_kthread_work(work, fn) \ 118#define kthread_init_work(work, fn) \
119 do { \ 119 do { \
120 memset((work), 0, sizeof(struct kthread_work)); \ 120 memset((work), 0, sizeof(struct kthread_work)); \
121 INIT_LIST_HEAD(&(work)->node); \ 121 INIT_LIST_HEAD(&(work)->node); \
@@ -124,9 +124,9 @@ extern void __init_kthread_worker(struct kthread_worker *worker,
124 124
125int kthread_worker_fn(void *worker_ptr); 125int kthread_worker_fn(void *worker_ptr);
126 126
127bool queue_kthread_work(struct kthread_worker *worker, 127bool kthread_queue_work(struct kthread_worker *worker,
128 struct kthread_work *work); 128 struct kthread_work *work);
129void flush_kthread_work(struct kthread_work *work); 129void kthread_flush_work(struct kthread_work *work);
130void flush_kthread_worker(struct kthread_worker *worker); 130void kthread_flush_worker(struct kthread_worker *worker);
131 131
132#endif /* _LINUX_KTHREAD_H */ 132#endif /* _LINUX_KTHREAD_H */
diff --git a/kernel/kthread.c b/kernel/kthread.c
index 7e77b728f96b..c52a05a8ec52 100644
--- a/kernel/kthread.c
+++ b/kernel/kthread.c
@@ -540,7 +540,7 @@ int kthreadd(void *unused)
540 return 0; 540 return 0;
541} 541}
542 542
543void __init_kthread_worker(struct kthread_worker *worker, 543void __kthread_init_worker(struct kthread_worker *worker,
544 const char *name, 544 const char *name,
545 struct lock_class_key *key) 545 struct lock_class_key *key)
546{ 546{
@@ -549,7 +549,7 @@ void __init_kthread_worker(struct kthread_worker *worker,
549 INIT_LIST_HEAD(&worker->work_list); 549 INIT_LIST_HEAD(&worker->work_list);
550 worker->task = NULL; 550 worker->task = NULL;
551} 551}
552EXPORT_SYMBOL_GPL(__init_kthread_worker); 552EXPORT_SYMBOL_GPL(__kthread_init_worker);
553 553
554/** 554/**
555 * kthread_worker_fn - kthread function to process kthread_worker 555 * kthread_worker_fn - kthread function to process kthread_worker
@@ -606,7 +606,7 @@ repeat:
606EXPORT_SYMBOL_GPL(kthread_worker_fn); 606EXPORT_SYMBOL_GPL(kthread_worker_fn);
607 607
608/* insert @work before @pos in @worker */ 608/* insert @work before @pos in @worker */
609static void insert_kthread_work(struct kthread_worker *worker, 609static void kthread_insert_work(struct kthread_worker *worker,
610 struct kthread_work *work, 610 struct kthread_work *work,
611 struct list_head *pos) 611 struct list_head *pos)
612{ 612{
@@ -619,7 +619,7 @@ static void insert_kthread_work(struct kthread_worker *worker,
619} 619}
620 620
621/** 621/**
622 * queue_kthread_work - queue a kthread_work 622 * kthread_queue_work - queue a kthread_work
623 * @worker: target kthread_worker 623 * @worker: target kthread_worker
624 * @work: kthread_work to queue 624 * @work: kthread_work to queue
625 * 625 *
@@ -627,7 +627,7 @@ static void insert_kthread_work(struct kthread_worker *worker,
627 * must have been created with kthread_worker_create(). Returns %true 627 * must have been created with kthread_worker_create(). Returns %true
628 * if @work was successfully queued, %false if it was already pending. 628 * if @work was successfully queued, %false if it was already pending.
629 */ 629 */
630bool queue_kthread_work(struct kthread_worker *worker, 630bool kthread_queue_work(struct kthread_worker *worker,
631 struct kthread_work *work) 631 struct kthread_work *work)
632{ 632{
633 bool ret = false; 633 bool ret = false;
@@ -635,13 +635,13 @@ bool queue_kthread_work(struct kthread_worker *worker,
635 635
636 spin_lock_irqsave(&worker->lock, flags); 636 spin_lock_irqsave(&worker->lock, flags);
637 if (list_empty(&work->node)) { 637 if (list_empty(&work->node)) {
638 insert_kthread_work(worker, work, &worker->work_list); 638 kthread_insert_work(worker, work, &worker->work_list);
639 ret = true; 639 ret = true;
640 } 640 }
641 spin_unlock_irqrestore(&worker->lock, flags); 641 spin_unlock_irqrestore(&worker->lock, flags);
642 return ret; 642 return ret;
643} 643}
644EXPORT_SYMBOL_GPL(queue_kthread_work); 644EXPORT_SYMBOL_GPL(kthread_queue_work);
645 645
646struct kthread_flush_work { 646struct kthread_flush_work {
647 struct kthread_work work; 647 struct kthread_work work;
@@ -656,12 +656,12 @@ static void kthread_flush_work_fn(struct kthread_work *work)
656} 656}
657 657
658/** 658/**
659 * flush_kthread_work - flush a kthread_work 659 * kthread_flush_work - flush a kthread_work
660 * @work: work to flush 660 * @work: work to flush
661 * 661 *
662 * If @work is queued or executing, wait for it to finish execution. 662 * If @work is queued or executing, wait for it to finish execution.
663 */ 663 */
664void flush_kthread_work(struct kthread_work *work) 664void kthread_flush_work(struct kthread_work *work)
665{ 665{
666 struct kthread_flush_work fwork = { 666 struct kthread_flush_work fwork = {
667 KTHREAD_WORK_INIT(fwork.work, kthread_flush_work_fn), 667 KTHREAD_WORK_INIT(fwork.work, kthread_flush_work_fn),
@@ -682,9 +682,10 @@ retry:
682 } 682 }
683 683
684 if (!list_empty(&work->node)) 684 if (!list_empty(&work->node))
685 insert_kthread_work(worker, &fwork.work, work->node.next); 685 kthread_insert_work(worker, &fwork.work, work->node.next);
686 else if (worker->current_work == work) 686 else if (worker->current_work == work)
687 insert_kthread_work(worker, &fwork.work, worker->work_list.next); 687 kthread_insert_work(worker, &fwork.work,
688 worker->work_list.next);
688 else 689 else
689 noop = true; 690 noop = true;
690 691
@@ -693,23 +694,23 @@ retry:
693 if (!noop) 694 if (!noop)
694 wait_for_completion(&fwork.done); 695 wait_for_completion(&fwork.done);
695} 696}
696EXPORT_SYMBOL_GPL(flush_kthread_work); 697EXPORT_SYMBOL_GPL(kthread_flush_work);
697 698
698/** 699/**
699 * flush_kthread_worker - flush all current works on a kthread_worker 700 * kthread_flush_worker - flush all current works on a kthread_worker
700 * @worker: worker to flush 701 * @worker: worker to flush
701 * 702 *
702 * Wait until all currently executing or pending works on @worker are 703 * Wait until all currently executing or pending works on @worker are
703 * finished. 704 * finished.
704 */ 705 */
705void flush_kthread_worker(struct kthread_worker *worker) 706void kthread_flush_worker(struct kthread_worker *worker)
706{ 707{
707 struct kthread_flush_work fwork = { 708 struct kthread_flush_work fwork = {
708 KTHREAD_WORK_INIT(fwork.work, kthread_flush_work_fn), 709 KTHREAD_WORK_INIT(fwork.work, kthread_flush_work_fn),
709 COMPLETION_INITIALIZER_ONSTACK(fwork.done), 710 COMPLETION_INITIALIZER_ONSTACK(fwork.done),
710 }; 711 };
711 712
712 queue_kthread_work(worker, &fwork.work); 713 kthread_queue_work(worker, &fwork.work);
713 wait_for_completion(&fwork.done); 714 wait_for_completion(&fwork.done);
714} 715}
715EXPORT_SYMBOL_GPL(flush_kthread_worker); 716EXPORT_SYMBOL_GPL(kthread_flush_worker);
diff --git a/sound/soc/intel/baytrail/sst-baytrail-ipc.c b/sound/soc/intel/baytrail/sst-baytrail-ipc.c
index c8455b47388b..7ab14ce65a73 100644
--- a/sound/soc/intel/baytrail/sst-baytrail-ipc.c
+++ b/sound/soc/intel/baytrail/sst-baytrail-ipc.c
@@ -338,7 +338,7 @@ static irqreturn_t sst_byt_irq_thread(int irq, void *context)
338 spin_unlock_irqrestore(&sst->spinlock, flags); 338 spin_unlock_irqrestore(&sst->spinlock, flags);
339 339
340 /* continue to send any remaining messages... */ 340 /* continue to send any remaining messages... */
341 queue_kthread_work(&ipc->kworker, &ipc->kwork); 341 kthread_queue_work(&ipc->kworker, &ipc->kwork);
342 342
343 return IRQ_HANDLED; 343 return IRQ_HANDLED;
344} 344}
diff --git a/sound/soc/intel/common/sst-ipc.c b/sound/soc/intel/common/sst-ipc.c
index a12c7bb08d3b..6c672ac79cce 100644
--- a/sound/soc/intel/common/sst-ipc.c
+++ b/sound/soc/intel/common/sst-ipc.c
@@ -111,7 +111,7 @@ static int ipc_tx_message(struct sst_generic_ipc *ipc, u64 header,
111 list_add_tail(&msg->list, &ipc->tx_list); 111 list_add_tail(&msg->list, &ipc->tx_list);
112 spin_unlock_irqrestore(&ipc->dsp->spinlock, flags); 112 spin_unlock_irqrestore(&ipc->dsp->spinlock, flags);
113 113
114 queue_kthread_work(&ipc->kworker, &ipc->kwork); 114 kthread_queue_work(&ipc->kworker, &ipc->kwork);
115 115
116 if (wait) 116 if (wait)
117 return tx_wait_done(ipc, msg, rx_data); 117 return tx_wait_done(ipc, msg, rx_data);
@@ -281,7 +281,7 @@ int sst_ipc_init(struct sst_generic_ipc *ipc)
281 return -ENOMEM; 281 return -ENOMEM;
282 282
283 /* start the IPC message thread */ 283 /* start the IPC message thread */
284 init_kthread_worker(&ipc->kworker); 284 kthread_init_worker(&ipc->kworker);
285 ipc->tx_thread = kthread_run(kthread_worker_fn, 285 ipc->tx_thread = kthread_run(kthread_worker_fn,
286 &ipc->kworker, "%s", 286 &ipc->kworker, "%s",
287 dev_name(ipc->dev)); 287 dev_name(ipc->dev));
@@ -292,7 +292,7 @@ int sst_ipc_init(struct sst_generic_ipc *ipc)
292 return ret; 292 return ret;
293 } 293 }
294 294
295 init_kthread_work(&ipc->kwork, ipc_tx_msgs); 295 kthread_init_work(&ipc->kwork, ipc_tx_msgs);
296 return 0; 296 return 0;
297} 297}
298EXPORT_SYMBOL_GPL(sst_ipc_init); 298EXPORT_SYMBOL_GPL(sst_ipc_init);
diff --git a/sound/soc/intel/haswell/sst-haswell-ipc.c b/sound/soc/intel/haswell/sst-haswell-ipc.c
index 91565229d074..e432a31fd9f2 100644
--- a/sound/soc/intel/haswell/sst-haswell-ipc.c
+++ b/sound/soc/intel/haswell/sst-haswell-ipc.c
@@ -818,7 +818,7 @@ static irqreturn_t hsw_irq_thread(int irq, void *context)
818 spin_unlock_irqrestore(&sst->spinlock, flags); 818 spin_unlock_irqrestore(&sst->spinlock, flags);
819 819
820 /* continue to send any remaining messages... */ 820 /* continue to send any remaining messages... */
821 queue_kthread_work(&ipc->kworker, &ipc->kwork); 821 kthread_queue_work(&ipc->kworker, &ipc->kwork);
822 822
823 return IRQ_HANDLED; 823 return IRQ_HANDLED;
824} 824}
diff --git a/sound/soc/intel/skylake/skl-sst-ipc.c b/sound/soc/intel/skylake/skl-sst-ipc.c
index 0bd01e62622c..797cf4053235 100644
--- a/sound/soc/intel/skylake/skl-sst-ipc.c
+++ b/sound/soc/intel/skylake/skl-sst-ipc.c
@@ -464,7 +464,7 @@ irqreturn_t skl_dsp_irq_thread_handler(int irq, void *context)
464 skl_ipc_int_enable(dsp); 464 skl_ipc_int_enable(dsp);
465 465
466 /* continue to send any remaining messages... */ 466 /* continue to send any remaining messages... */
467 queue_kthread_work(&ipc->kworker, &ipc->kwork); 467 kthread_queue_work(&ipc->kworker, &ipc->kwork);
468 468
469 return IRQ_HANDLED; 469 return IRQ_HANDLED;
470} 470}