aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2012-07-19 16:52:53 -0400
committerTejun Heo <tj@kernel.org>2012-07-22 13:15:28 -0400
commit46f3d976213452350f9d10b0c2780c2681f7075b (patch)
tree1a8e0b20c306f84b07eedc97c07308be65a060ce
parent9a2e03d8ed518a61154f18d83d6466628e519f94 (diff)
kthread_worker: reimplement flush_kthread_work() to allow freeing the work item being executed
kthread_worker provides minimalistic workqueue-like interface for users which need a dedicated worker thread (e.g. for realtime priority). It has basic queue, flush_work, flush_worker operations which mostly match the workqueue counterparts; however, due to the way flush_work() is implemented, it has a noticeable difference of not allowing work items to be freed while being executed. While the current users of kthread_worker are okay with the current behavior, the restriction does impede some valid use cases. Also, removing this difference isn't difficult and actually makes the code easier to understand. This patch reimplements flush_kthread_work() such that it uses a flush_work item instead of queue/done sequence numbers. Signed-off-by: Tejun Heo <tj@kernel.org>
-rw-r--r--include/linux/kthread.h8
-rw-r--r--kernel/kthread.c48
2 files changed, 29 insertions, 27 deletions
diff --git a/include/linux/kthread.h b/include/linux/kthread.h
index 0714b24c0e45..22ccf9dee177 100644
--- a/include/linux/kthread.h
+++ b/include/linux/kthread.h
@@ -49,8 +49,6 @@ extern int tsk_fork_get_node(struct task_struct *tsk);
49 * can be queued and flushed using queue/flush_kthread_work() 49 * can be queued and flushed using queue/flush_kthread_work()
50 * respectively. Queued kthread_works are processed by a kthread 50 * respectively. Queued kthread_works are processed by a kthread
51 * running kthread_worker_fn(). 51 * running kthread_worker_fn().
52 *
53 * A kthread_work can't be freed while it is executing.
54 */ 52 */
55struct kthread_work; 53struct kthread_work;
56typedef void (*kthread_work_func_t)(struct kthread_work *work); 54typedef void (*kthread_work_func_t)(struct kthread_work *work);
@@ -59,15 +57,14 @@ struct kthread_worker {
59 spinlock_t lock; 57 spinlock_t lock;
60 struct list_head work_list; 58 struct list_head work_list;
61 struct task_struct *task; 59 struct task_struct *task;
60 struct kthread_work *current_work;
62}; 61};
63 62
64struct kthread_work { 63struct kthread_work {
65 struct list_head node; 64 struct list_head node;
66 kthread_work_func_t func; 65 kthread_work_func_t func;
67 wait_queue_head_t done; 66 wait_queue_head_t done;
68 atomic_t flushing; 67 struct kthread_worker *worker;
69 int queue_seq;
70 int done_seq;
71}; 68};
72 69
73#define KTHREAD_WORKER_INIT(worker) { \ 70#define KTHREAD_WORKER_INIT(worker) { \
@@ -79,7 +76,6 @@ struct kthread_work {
79 .node = LIST_HEAD_INIT((work).node), \ 76 .node = LIST_HEAD_INIT((work).node), \
80 .func = (fn), \ 77 .func = (fn), \
81 .done = __WAIT_QUEUE_HEAD_INITIALIZER((work).done), \ 78 .done = __WAIT_QUEUE_HEAD_INITIALIZER((work).done), \
82 .flushing = ATOMIC_INIT(0), \
83 } 79 }
84 80
85#define DEFINE_KTHREAD_WORKER(worker) \ 81#define DEFINE_KTHREAD_WORKER(worker) \
diff --git a/kernel/kthread.c b/kernel/kthread.c
index 4bfbff36d447..b579af57ea10 100644
--- a/kernel/kthread.c
+++ b/kernel/kthread.c
@@ -360,16 +360,12 @@ repeat:
360 struct kthread_work, node); 360 struct kthread_work, node);
361 list_del_init(&work->node); 361 list_del_init(&work->node);
362 } 362 }
363 worker->current_work = work;
363 spin_unlock_irq(&worker->lock); 364 spin_unlock_irq(&worker->lock);
364 365
365 if (work) { 366 if (work) {
366 __set_current_state(TASK_RUNNING); 367 __set_current_state(TASK_RUNNING);
367 work->func(work); 368 work->func(work);
368 smp_wmb(); /* wmb worker-b0 paired with flush-b1 */
369 work->done_seq = work->queue_seq;
370 smp_mb(); /* mb worker-b1 paired with flush-b0 */
371 if (atomic_read(&work->flushing))
372 wake_up_all(&work->done);
373 } else if (!freezing(current)) 369 } else if (!freezing(current))
374 schedule(); 370 schedule();
375 371
@@ -386,7 +382,7 @@ static void insert_kthread_work(struct kthread_worker *worker,
386 lockdep_assert_held(&worker->lock); 382 lockdep_assert_held(&worker->lock);
387 383
388 list_add_tail(&work->node, pos); 384 list_add_tail(&work->node, pos);
389 work->queue_seq++; 385 work->worker = worker;
390 if (likely(worker->task)) 386 if (likely(worker->task))
391 wake_up_process(worker->task); 387 wake_up_process(worker->task);
392} 388}
@@ -436,25 +432,35 @@ static void kthread_flush_work_fn(struct kthread_work *work)
436 */ 432 */
437void flush_kthread_work(struct kthread_work *work) 433void flush_kthread_work(struct kthread_work *work)
438{ 434{
439 int seq = work->queue_seq; 435 struct kthread_flush_work fwork = {
436 KTHREAD_WORK_INIT(fwork.work, kthread_flush_work_fn),
437 COMPLETION_INITIALIZER_ONSTACK(fwork.done),
438 };
439 struct kthread_worker *worker;
440 bool noop = false;
441
442retry:
443 worker = work->worker;
444 if (!worker)
445 return;
440 446
441 atomic_inc(&work->flushing); 447 spin_lock_irq(&worker->lock);
448 if (work->worker != worker) {
449 spin_unlock_irq(&worker->lock);
450 goto retry;
451 }
442 452
443 /* 453 if (!list_empty(&work->node))
444 * mb flush-b0 paired with worker-b1, to make sure either 454 insert_kthread_work(worker, &fwork.work, work->node.next);
445 * worker sees the above increment or we see done_seq update. 455 else if (worker->current_work == work)
446 */ 456 insert_kthread_work(worker, &fwork.work, worker->work_list.next);
447 smp_mb__after_atomic_inc(); 457 else
458 noop = true;
448 459
449 /* A - B <= 0 tests whether B is in front of A regardless of overflow */ 460 spin_unlock_irq(&worker->lock);
450 wait_event(work->done, seq - work->done_seq <= 0);
451 atomic_dec(&work->flushing);
452 461
453 /* 462 if (!noop)
454 * rmb flush-b1 paired with worker-b0, to make sure our caller 463 wait_for_completion(&fwork.done);
455 * sees every change made by work->func().
456 */
457 smp_mb__after_atomic_dec();
458} 464}
459EXPORT_SYMBOL_GPL(flush_kthread_work); 465EXPORT_SYMBOL_GPL(flush_kthread_work);
460 466