aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/md
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/md')
-rw-r--r--drivers/md/dm-kcopyd.c54
1 files changed, 51 insertions, 3 deletions
diff --git a/drivers/md/dm-kcopyd.c b/drivers/md/dm-kcopyd.c
index 5ad9231c8700..dad32f8bce7d 100644
--- a/drivers/md/dm-kcopyd.c
+++ b/drivers/md/dm-kcopyd.c
@@ -37,6 +37,13 @@ struct dm_kcopyd_client {
37 unsigned int nr_pages; 37 unsigned int nr_pages;
38 unsigned int nr_free_pages; 38 unsigned int nr_free_pages;
39 39
40 /*
41 * Block devices to unplug.
42 * Non-NULL pointer means that a block device has some pending requests
43 * and needs to be unplugged.
44 */
45 struct block_device *unplug[2];
46
40 struct dm_io_client *io_client; 47 struct dm_io_client *io_client;
41 48
42 wait_queue_head_t destroyq; 49 wait_queue_head_t destroyq;
@@ -308,6 +315,31 @@ static int run_complete_job(struct kcopyd_job *job)
308 return 0; 315 return 0;
309} 316}
310 317
318/*
319 * Unplug the block device at the specified index.
320 */
321static void unplug(struct dm_kcopyd_client *kc, int rw)
322{
323 if (kc->unplug[rw] != NULL) {
324 blk_unplug(bdev_get_queue(kc->unplug[rw]));
325 kc->unplug[rw] = NULL;
326 }
327}
328
329/*
330 * Prepare block device unplug. If there's another device
331 * to be unplugged at the same array index, we unplug that
332 * device first.
333 */
334static void prepare_unplug(struct dm_kcopyd_client *kc, int rw,
335 struct block_device *bdev)
336{
337 if (likely(kc->unplug[rw] == bdev))
338 return;
339 unplug(kc, rw);
340 kc->unplug[rw] = bdev;
341}
342
311static void complete_io(unsigned long error, void *context) 343static void complete_io(unsigned long error, void *context)
312{ 344{
313 struct kcopyd_job *job = (struct kcopyd_job *) context; 345 struct kcopyd_job *job = (struct kcopyd_job *) context;
@@ -345,7 +377,7 @@ static int run_io_job(struct kcopyd_job *job)
345{ 377{
346 int r; 378 int r;
347 struct dm_io_request io_req = { 379 struct dm_io_request io_req = {
348 .bi_rw = job->rw | REQ_UNPLUG, 380 .bi_rw = job->rw,
349 .mem.type = DM_IO_PAGE_LIST, 381 .mem.type = DM_IO_PAGE_LIST,
350 .mem.ptr.pl = job->pages, 382 .mem.ptr.pl = job->pages,
351 .mem.offset = job->offset, 383 .mem.offset = job->offset,
@@ -354,10 +386,16 @@ static int run_io_job(struct kcopyd_job *job)
354 .client = job->kc->io_client, 386 .client = job->kc->io_client,
355 }; 387 };
356 388
357 if (job->rw == READ) 389 if (job->rw == READ) {
358 r = dm_io(&io_req, 1, &job->source, NULL); 390 r = dm_io(&io_req, 1, &job->source, NULL);
359 else 391 prepare_unplug(job->kc, READ, job->source.bdev);
392 } else {
393 if (job->num_dests > 1)
394 io_req.bi_rw |= REQ_UNPLUG;
360 r = dm_io(&io_req, job->num_dests, job->dests, NULL); 395 r = dm_io(&io_req, job->num_dests, job->dests, NULL);
396 if (!(io_req.bi_rw & REQ_UNPLUG))
397 prepare_unplug(job->kc, WRITE, job->dests[0].bdev);
398 }
361 399
362 return r; 400 return r;
363} 401}
@@ -435,10 +473,18 @@ static void do_work(struct work_struct *work)
435 * Pages jobs when successful will jump onto the io jobs 473 * Pages jobs when successful will jump onto the io jobs
436 * list. io jobs call wake when they complete and it all 474 * list. io jobs call wake when they complete and it all
437 * starts again. 475 * starts again.
476 *
477 * Note that io_jobs add block devices to the unplug array,
478 * this array is cleared with "unplug" calls. It is thus
479 * forbidden to run complete_jobs after io_jobs and before
480 * unplug because the block device could be destroyed in
481 * job completion callback.
438 */ 482 */
439 process_jobs(&kc->complete_jobs, kc, run_complete_job); 483 process_jobs(&kc->complete_jobs, kc, run_complete_job);
440 process_jobs(&kc->pages_jobs, kc, run_pages_job); 484 process_jobs(&kc->pages_jobs, kc, run_pages_job);
441 process_jobs(&kc->io_jobs, kc, run_io_job); 485 process_jobs(&kc->io_jobs, kc, run_io_job);
486 unplug(kc, READ);
487 unplug(kc, WRITE);
442} 488}
443 489
444/* 490/*
@@ -619,6 +665,8 @@ int dm_kcopyd_client_create(unsigned int nr_pages,
619 INIT_LIST_HEAD(&kc->io_jobs); 665 INIT_LIST_HEAD(&kc->io_jobs);
620 INIT_LIST_HEAD(&kc->pages_jobs); 666 INIT_LIST_HEAD(&kc->pages_jobs);
621 667
668 memset(kc->unplug, 0, sizeof(kc->unplug));
669
622 kc->job_pool = mempool_create_slab_pool(MIN_JOBS, _job_cache); 670 kc->job_pool = mempool_create_slab_pool(MIN_JOBS, _job_cache);
623 if (!kc->job_pool) 671 if (!kc->job_pool)
624 goto bad_slab; 672 goto bad_slab;