aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/md/dm-kcopyd.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/md/dm-kcopyd.c')
-rw-r--r--drivers/md/dm-kcopyd.c23
1 files changed, 15 insertions, 8 deletions
diff --git a/drivers/md/dm-kcopyd.c b/drivers/md/dm-kcopyd.c
index 0a225da21272..3e3fc06cb861 100644
--- a/drivers/md/dm-kcopyd.c
+++ b/drivers/md/dm-kcopyd.c
@@ -297,7 +297,8 @@ static int run_complete_job(struct kcopyd_job *job)
297 dm_kcopyd_notify_fn fn = job->fn; 297 dm_kcopyd_notify_fn fn = job->fn;
298 struct dm_kcopyd_client *kc = job->kc; 298 struct dm_kcopyd_client *kc = job->kc;
299 299
300 kcopyd_put_pages(kc, job->pages); 300 if (job->pages)
301 kcopyd_put_pages(kc, job->pages);
301 mempool_free(job, kc->job_pool); 302 mempool_free(job, kc->job_pool);
302 fn(read_err, write_err, context); 303 fn(read_err, write_err, context);
303 304
@@ -461,6 +462,7 @@ static void segment_complete(int read_err, unsigned long write_err,
461 sector_t progress = 0; 462 sector_t progress = 0;
462 sector_t count = 0; 463 sector_t count = 0;
463 struct kcopyd_job *job = (struct kcopyd_job *) context; 464 struct kcopyd_job *job = (struct kcopyd_job *) context;
465 struct dm_kcopyd_client *kc = job->kc;
464 466
465 mutex_lock(&job->lock); 467 mutex_lock(&job->lock);
466 468
@@ -490,7 +492,7 @@ static void segment_complete(int read_err, unsigned long write_err,
490 492
491 if (count) { 493 if (count) {
492 int i; 494 int i;
493 struct kcopyd_job *sub_job = mempool_alloc(job->kc->job_pool, 495 struct kcopyd_job *sub_job = mempool_alloc(kc->job_pool,
494 GFP_NOIO); 496 GFP_NOIO);
495 497
496 *sub_job = *job; 498 *sub_job = *job;
@@ -509,13 +511,16 @@ static void segment_complete(int read_err, unsigned long write_err,
509 } else if (atomic_dec_and_test(&job->sub_jobs)) { 511 } else if (atomic_dec_and_test(&job->sub_jobs)) {
510 512
511 /* 513 /*
512 * To avoid a race we must keep the job around 514 * Queue the completion callback to the kcopyd thread.
513 * until after the notify function has completed. 515 *
514 * Otherwise the client may try and stop the job 516 * Some callers assume that all the completions are called
515 * after we've completed. 517 * from a single thread and don't race with each other.
518 *
519 * We must not call the callback directly here because this
520 * code may not be executing in the thread.
516 */ 521 */
517 job->fn(read_err, write_err, job->context); 522 push(&kc->complete_jobs, job);
518 mempool_free(job, job->kc->job_pool); 523 wake(kc);
519 } 524 }
520} 525}
521 526
@@ -528,6 +533,8 @@ static void split_job(struct kcopyd_job *job)
528{ 533{
529 int i; 534 int i;
530 535
536 atomic_inc(&job->kc->nr_jobs);
537
531 atomic_set(&job->sub_jobs, SPLIT_COUNT); 538 atomic_set(&job->sub_jobs, SPLIT_COUNT);
532 for (i = 0; i < SPLIT_COUNT; i++) 539 for (i = 0; i < SPLIT_COUNT; i++)
533 segment_complete(0, 0u, job); 540 segment_complete(0, 0u, job);