diff options
Diffstat (limited to 'kernel/kthread.c')
| -rw-r--r-- | kernel/kthread.c | 88 |
1 files changed, 52 insertions, 36 deletions
diff --git a/kernel/kthread.c b/kernel/kthread.c index 3d3de633702e..b579af57ea10 100644 --- a/kernel/kthread.c +++ b/kernel/kthread.c | |||
| @@ -360,16 +360,12 @@ repeat: | |||
| 360 | struct kthread_work, node); | 360 | struct kthread_work, node); |
| 361 | list_del_init(&work->node); | 361 | list_del_init(&work->node); |
| 362 | } | 362 | } |
| 363 | worker->current_work = work; | ||
| 363 | spin_unlock_irq(&worker->lock); | 364 | spin_unlock_irq(&worker->lock); |
| 364 | 365 | ||
| 365 | if (work) { | 366 | if (work) { |
| 366 | __set_current_state(TASK_RUNNING); | 367 | __set_current_state(TASK_RUNNING); |
| 367 | work->func(work); | 368 | work->func(work); |
| 368 | smp_wmb(); /* wmb worker-b0 paired with flush-b1 */ | ||
| 369 | work->done_seq = work->queue_seq; | ||
| 370 | smp_mb(); /* mb worker-b1 paired with flush-b0 */ | ||
| 371 | if (atomic_read(&work->flushing)) | ||
| 372 | wake_up_all(&work->done); | ||
| 373 | } else if (!freezing(current)) | 369 | } else if (!freezing(current)) |
| 374 | schedule(); | 370 | schedule(); |
| 375 | 371 | ||
| @@ -378,6 +374,19 @@ repeat: | |||
| 378 | } | 374 | } |
| 379 | EXPORT_SYMBOL_GPL(kthread_worker_fn); | 375 | EXPORT_SYMBOL_GPL(kthread_worker_fn); |
| 380 | 376 | ||
| 377 | /* insert @work before @pos in @worker */ | ||
| 378 | static void insert_kthread_work(struct kthread_worker *worker, | ||
| 379 | struct kthread_work *work, | ||
| 380 | struct list_head *pos) | ||
| 381 | { | ||
| 382 | lockdep_assert_held(&worker->lock); | ||
| 383 | |||
| 384 | list_add_tail(&work->node, pos); | ||
| 385 | work->worker = worker; | ||
| 386 | if (likely(worker->task)) | ||
| 387 | wake_up_process(worker->task); | ||
| 388 | } | ||
| 389 | |||
| 381 | /** | 390 | /** |
| 382 | * queue_kthread_work - queue a kthread_work | 391 | * queue_kthread_work - queue a kthread_work |
| 383 | * @worker: target kthread_worker | 392 | * @worker: target kthread_worker |
| @@ -395,10 +404,7 @@ bool queue_kthread_work(struct kthread_worker *worker, | |||
| 395 | 404 | ||
| 396 | spin_lock_irqsave(&worker->lock, flags); | 405 | spin_lock_irqsave(&worker->lock, flags); |
| 397 | if (list_empty(&work->node)) { | 406 | if (list_empty(&work->node)) { |
| 398 | list_add_tail(&work->node, &worker->work_list); | 407 | insert_kthread_work(worker, work, &worker->work_list); |
| 399 | work->queue_seq++; | ||
| 400 | if (likely(worker->task)) | ||
| 401 | wake_up_process(worker->task); | ||
| 402 | ret = true; | 408 | ret = true; |
| 403 | } | 409 | } |
| 404 | spin_unlock_irqrestore(&worker->lock, flags); | 410 | spin_unlock_irqrestore(&worker->lock, flags); |
| @@ -406,6 +412,18 @@ bool queue_kthread_work(struct kthread_worker *worker, | |||
| 406 | } | 412 | } |
| 407 | EXPORT_SYMBOL_GPL(queue_kthread_work); | 413 | EXPORT_SYMBOL_GPL(queue_kthread_work); |
| 408 | 414 | ||
| 415 | struct kthread_flush_work { | ||
| 416 | struct kthread_work work; | ||
| 417 | struct completion done; | ||
| 418 | }; | ||
| 419 | |||
| 420 | static void kthread_flush_work_fn(struct kthread_work *work) | ||
| 421 | { | ||
| 422 | struct kthread_flush_work *fwork = | ||
| 423 | container_of(work, struct kthread_flush_work, work); | ||
| 424 | complete(&fwork->done); | ||
| 425 | } | ||
| 426 | |||
| 409 | /** | 427 | /** |
| 410 | * flush_kthread_work - flush a kthread_work | 428 | * flush_kthread_work - flush a kthread_work |
| 411 | * @work: work to flush | 429 | * @work: work to flush |
| @@ -414,39 +432,37 @@ EXPORT_SYMBOL_GPL(queue_kthread_work); | |||
| 414 | */ | 432 | */ |
| 415 | void flush_kthread_work(struct kthread_work *work) | 433 | void flush_kthread_work(struct kthread_work *work) |
| 416 | { | 434 | { |
| 417 | int seq = work->queue_seq; | 435 | struct kthread_flush_work fwork = { |
| 418 | 436 | KTHREAD_WORK_INIT(fwork.work, kthread_flush_work_fn), | |
| 419 | atomic_inc(&work->flushing); | 437 | COMPLETION_INITIALIZER_ONSTACK(fwork.done), |
| 438 | }; | ||
| 439 | struct kthread_worker *worker; | ||
| 440 | bool noop = false; | ||
| 420 | 441 | ||
| 421 | /* | 442 | retry: |
| 422 | * mb flush-b0 paired with worker-b1, to make sure either | 443 | worker = work->worker; |
| 423 | * worker sees the above increment or we see done_seq update. | 444 | if (!worker) |
| 424 | */ | 445 | return; |
| 425 | smp_mb__after_atomic_inc(); | ||
| 426 | 446 | ||
| 427 | /* A - B <= 0 tests whether B is in front of A regardless of overflow */ | 447 | spin_lock_irq(&worker->lock); |
| 428 | wait_event(work->done, seq - work->done_seq <= 0); | 448 | if (work->worker != worker) { |
| 429 | atomic_dec(&work->flushing); | 449 | spin_unlock_irq(&worker->lock); |
| 450 | goto retry; | ||
| 451 | } | ||
| 430 | 452 | ||
| 431 | /* | 453 | if (!list_empty(&work->node)) |
| 432 | * rmb flush-b1 paired with worker-b0, to make sure our caller | 454 | insert_kthread_work(worker, &fwork.work, work->node.next); |
| 433 | * sees every change made by work->func(). | 455 | else if (worker->current_work == work) |
| 434 | */ | 456 | insert_kthread_work(worker, &fwork.work, worker->work_list.next); |
| 435 | smp_mb__after_atomic_dec(); | 457 | else |
| 436 | } | 458 | noop = true; |
| 437 | EXPORT_SYMBOL_GPL(flush_kthread_work); | ||
| 438 | 459 | ||
| 439 | struct kthread_flush_work { | 460 | spin_unlock_irq(&worker->lock); |
| 440 | struct kthread_work work; | ||
| 441 | struct completion done; | ||
| 442 | }; | ||
| 443 | 461 | ||
| 444 | static void kthread_flush_work_fn(struct kthread_work *work) | 462 | if (!noop) |
| 445 | { | 463 | wait_for_completion(&fwork.done); |
| 446 | struct kthread_flush_work *fwork = | ||
| 447 | container_of(work, struct kthread_flush_work, work); | ||
| 448 | complete(&fwork->done); | ||
| 449 | } | 464 | } |
| 465 | EXPORT_SYMBOL_GPL(flush_kthread_work); | ||
| 450 | 466 | ||
| 451 | /** | 467 | /** |
| 452 | * flush_kthread_worker - flush all current works on a kthread_worker | 468 | * flush_kthread_worker - flush all current works on a kthread_worker |
