diff options
Diffstat (limited to 'drivers/md/dm-kcopyd.c')
-rw-r--r-- | drivers/md/dm-kcopyd.c | 52 |
1 files changed, 5 insertions, 47 deletions
diff --git a/drivers/md/dm-kcopyd.c b/drivers/md/dm-kcopyd.c index 924f5f0084c2..400cf35094a4 100644 --- a/drivers/md/dm-kcopyd.c +++ b/drivers/md/dm-kcopyd.c | |||
@@ -37,13 +37,6 @@ struct dm_kcopyd_client { | |||
37 | unsigned int nr_pages; | 37 | unsigned int nr_pages; |
38 | unsigned int nr_free_pages; | 38 | unsigned int nr_free_pages; |
39 | 39 | ||
40 | /* | ||
41 | * Block devices to unplug. | ||
42 | * Non-NULL pointer means that a block device has some pending requests | ||
43 | * and needs to be unplugged. | ||
44 | */ | ||
45 | struct block_device *unplug[2]; | ||
46 | |||
47 | struct dm_io_client *io_client; | 40 | struct dm_io_client *io_client; |
48 | 41 | ||
49 | wait_queue_head_t destroyq; | 42 | wait_queue_head_t destroyq; |
@@ -315,31 +308,6 @@ static int run_complete_job(struct kcopyd_job *job) | |||
315 | return 0; | 308 | return 0; |
316 | } | 309 | } |
317 | 310 | ||
318 | /* | ||
319 | * Unplug the block device at the specified index. | ||
320 | */ | ||
321 | static void unplug(struct dm_kcopyd_client *kc, int rw) | ||
322 | { | ||
323 | if (kc->unplug[rw] != NULL) { | ||
324 | blk_unplug(bdev_get_queue(kc->unplug[rw])); | ||
325 | kc->unplug[rw] = NULL; | ||
326 | } | ||
327 | } | ||
328 | |||
329 | /* | ||
330 | * Prepare block device unplug. If there's another device | ||
331 | * to be unplugged at the same array index, we unplug that | ||
332 | * device first. | ||
333 | */ | ||
334 | static void prepare_unplug(struct dm_kcopyd_client *kc, int rw, | ||
335 | struct block_device *bdev) | ||
336 | { | ||
337 | if (likely(kc->unplug[rw] == bdev)) | ||
338 | return; | ||
339 | unplug(kc, rw); | ||
340 | kc->unplug[rw] = bdev; | ||
341 | } | ||
342 | |||
343 | static void complete_io(unsigned long error, void *context) | 311 | static void complete_io(unsigned long error, void *context) |
344 | { | 312 | { |
345 | struct kcopyd_job *job = (struct kcopyd_job *) context; | 313 | struct kcopyd_job *job = (struct kcopyd_job *) context; |
@@ -386,15 +354,12 @@ static int run_io_job(struct kcopyd_job *job) | |||
386 | .client = job->kc->io_client, | 354 | .client = job->kc->io_client, |
387 | }; | 355 | }; |
388 | 356 | ||
389 | if (job->rw == READ) { | 357 | if (job->rw == READ) |
390 | r = dm_io(&io_req, 1, &job->source, NULL); | 358 | r = dm_io(&io_req, 1, &job->source, NULL); |
391 | prepare_unplug(job->kc, READ, job->source.bdev); | 359 | else { |
392 | } else { | ||
393 | if (job->num_dests > 1) | 360 | if (job->num_dests > 1) |
394 | io_req.bi_rw |= REQ_UNPLUG; | 361 | io_req.bi_rw |= REQ_UNPLUG; |
395 | r = dm_io(&io_req, job->num_dests, job->dests, NULL); | 362 | r = dm_io(&io_req, job->num_dests, job->dests, NULL); |
396 | if (!(io_req.bi_rw & REQ_UNPLUG)) | ||
397 | prepare_unplug(job->kc, WRITE, job->dests[0].bdev); | ||
398 | } | 363 | } |
399 | 364 | ||
400 | return r; | 365 | return r; |
@@ -466,6 +431,7 @@ static void do_work(struct work_struct *work) | |||
466 | { | 431 | { |
467 | struct dm_kcopyd_client *kc = container_of(work, | 432 | struct dm_kcopyd_client *kc = container_of(work, |
468 | struct dm_kcopyd_client, kcopyd_work); | 433 | struct dm_kcopyd_client, kcopyd_work); |
434 | struct blk_plug plug; | ||
469 | 435 | ||
470 | /* | 436 | /* |
471 | * The order that these are called is *very* important. | 437 | * The order that these are called is *very* important. |
@@ -473,18 +439,12 @@ static void do_work(struct work_struct *work) | |||
473 | * Pages jobs when successful will jump onto the io jobs | 439 | * Pages jobs when successful will jump onto the io jobs |
474 | * list. io jobs call wake when they complete and it all | 440 | * list. io jobs call wake when they complete and it all |
475 | * starts again. | 441 | * starts again. |
476 | * | ||
477 | * Note that io_jobs add block devices to the unplug array, | ||
478 | * this array is cleared with "unplug" calls. It is thus | ||
479 | * forbidden to run complete_jobs after io_jobs and before | ||
480 | * unplug because the block device could be destroyed in | ||
481 | * job completion callback. | ||
482 | */ | 442 | */ |
443 | blk_start_plug(&plug); | ||
483 | process_jobs(&kc->complete_jobs, kc, run_complete_job); | 444 | process_jobs(&kc->complete_jobs, kc, run_complete_job); |
484 | process_jobs(&kc->pages_jobs, kc, run_pages_job); | 445 | process_jobs(&kc->pages_jobs, kc, run_pages_job); |
485 | process_jobs(&kc->io_jobs, kc, run_io_job); | 446 | process_jobs(&kc->io_jobs, kc, run_io_job); |
486 | unplug(kc, READ); | 447 | blk_finish_plug(&plug); |
487 | unplug(kc, WRITE); | ||
488 | } | 448 | } |
489 | 449 | ||
490 | /* | 450 | /* |
@@ -665,8 +625,6 @@ int dm_kcopyd_client_create(unsigned int nr_pages, | |||
665 | INIT_LIST_HEAD(&kc->io_jobs); | 625 | INIT_LIST_HEAD(&kc->io_jobs); |
666 | INIT_LIST_HEAD(&kc->pages_jobs); | 626 | INIT_LIST_HEAD(&kc->pages_jobs); |
667 | 627 | ||
668 | memset(kc->unplug, 0, sizeof(kc->unplug)); | ||
669 | |||
670 | kc->job_pool = mempool_create_slab_pool(MIN_JOBS, _job_cache); | 628 | kc->job_pool = mempool_create_slab_pool(MIN_JOBS, _job_cache); |
671 | if (!kc->job_pool) | 629 | if (!kc->job_pool) |
672 | goto bad_slab; | 630 | goto bad_slab; |