aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorMikulas Patocka <mpatocka@redhat.com>2011-01-13 14:59:50 -0500
committerAlasdair G Kergon <agk@redhat.com>2011-01-13 14:59:50 -0500
commit8d35d3e37eed884ba15229a146df846f399909b4 (patch)
tree795c77c941229f413750e99242f1b2199bebe7a1 /drivers
parent4a038677df4da84e42fd68b5ab2dfa4d82baa444 (diff)
dm kcopyd: delay unplugging
Make kcopyd merge more I/O requests by using device unplugging. Without this patch, each I/O request is dispatched separately to the device. If the device supports tagged queuing, there are many small requests sent to the device. To improve performance, this patch will batch as many requests as possible, allowing the queue to merge consecutive requests, and send them to the device at once. In my tests (15k SCSI disk), this patch improves sequential write throughput: Sequential write throughput (chunksize of 4k, 32k, 512k) unpatched: 15.2, 18.5, 17.5 MB/s patched: 14.4, 22.6, 23.0 MB/s In most common uses (snapshot or two-way mirror), kcopyd is only used for two devices, one for reading and the other for writing, thus this optimization is implemented only for two devices. The optimization may be extended to n-way mirrors with some code complexity increase. We keep track of two block devices to unplug (one for read and the other for write) and unplug them when exiting "do_work" thread. If there are more devices used (in theory it could happen, in practice it is rare), we unplug immediately. Signed-off-by: Mikulas Patocka <mpatocka@redhat.com> Signed-off-by: Mike Snitzer <snitzer@redhat.com> Signed-off-by: Alasdair G Kergon <agk@redhat.com>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/md/dm-kcopyd.c54
1 files changed, 51 insertions, 3 deletions
diff --git a/drivers/md/dm-kcopyd.c b/drivers/md/dm-kcopyd.c
index 5ad9231c8700..dad32f8bce7d 100644
--- a/drivers/md/dm-kcopyd.c
+++ b/drivers/md/dm-kcopyd.c
@@ -37,6 +37,13 @@ struct dm_kcopyd_client {
37 unsigned int nr_pages; 37 unsigned int nr_pages;
38 unsigned int nr_free_pages; 38 unsigned int nr_free_pages;
39 39
40 /*
41 * Block devices to unplug.
42 * Non-NULL pointer means that a block device has some pending requests
43 * and needs to be unplugged.
44 */
45 struct block_device *unplug[2];
46
40 struct dm_io_client *io_client; 47 struct dm_io_client *io_client;
41 48
42 wait_queue_head_t destroyq; 49 wait_queue_head_t destroyq;
@@ -308,6 +315,31 @@ static int run_complete_job(struct kcopyd_job *job)
308 return 0; 315 return 0;
309} 316}
310 317
318/*
319 * Unplug the block device at the specified index.
320 */
321static void unplug(struct dm_kcopyd_client *kc, int rw)
322{
323 if (kc->unplug[rw] != NULL) {
324 blk_unplug(bdev_get_queue(kc->unplug[rw]));
325 kc->unplug[rw] = NULL;
326 }
327}
328
329/*
330 * Prepare block device unplug. If there's another device
331 * to be unplugged at the same array index, we unplug that
332 * device first.
333 */
334static void prepare_unplug(struct dm_kcopyd_client *kc, int rw,
335 struct block_device *bdev)
336{
337 if (likely(kc->unplug[rw] == bdev))
338 return;
339 unplug(kc, rw);
340 kc->unplug[rw] = bdev;
341}
342
311static void complete_io(unsigned long error, void *context) 343static void complete_io(unsigned long error, void *context)
312{ 344{
313 struct kcopyd_job *job = (struct kcopyd_job *) context; 345 struct kcopyd_job *job = (struct kcopyd_job *) context;
@@ -345,7 +377,7 @@ static int run_io_job(struct kcopyd_job *job)
345{ 377{
346 int r; 378 int r;
347 struct dm_io_request io_req = { 379 struct dm_io_request io_req = {
348 .bi_rw = job->rw | REQ_UNPLUG, 380 .bi_rw = job->rw,
349 .mem.type = DM_IO_PAGE_LIST, 381 .mem.type = DM_IO_PAGE_LIST,
350 .mem.ptr.pl = job->pages, 382 .mem.ptr.pl = job->pages,
351 .mem.offset = job->offset, 383 .mem.offset = job->offset,
@@ -354,10 +386,16 @@ static int run_io_job(struct kcopyd_job *job)
354 .client = job->kc->io_client, 386 .client = job->kc->io_client,
355 }; 387 };
356 388
357 if (job->rw == READ) 389 if (job->rw == READ) {
358 r = dm_io(&io_req, 1, &job->source, NULL); 390 r = dm_io(&io_req, 1, &job->source, NULL);
359 else 391 prepare_unplug(job->kc, READ, job->source.bdev);
392 } else {
393 if (job->num_dests > 1)
394 io_req.bi_rw |= REQ_UNPLUG;
360 r = dm_io(&io_req, job->num_dests, job->dests, NULL); 395 r = dm_io(&io_req, job->num_dests, job->dests, NULL);
396 if (!(io_req.bi_rw & REQ_UNPLUG))
397 prepare_unplug(job->kc, WRITE, job->dests[0].bdev);
398 }
361 399
362 return r; 400 return r;
363} 401}
@@ -435,10 +473,18 @@ static void do_work(struct work_struct *work)
435 * Pages jobs when successful will jump onto the io jobs 473 * Pages jobs when successful will jump onto the io jobs
436 * list. io jobs call wake when they complete and it all 474 * list. io jobs call wake when they complete and it all
437 * starts again. 475 * starts again.
476 *
477 * Note that io_jobs add block devices to the unplug array,
478 * this array is cleared with "unplug" calls. It is thus
479 * forbidden to run complete_jobs after io_jobs and before
480 * unplug because the block device could be destroyed in
481 * job completion callback.
438 */ 482 */
439 process_jobs(&kc->complete_jobs, kc, run_complete_job); 483 process_jobs(&kc->complete_jobs, kc, run_complete_job);
440 process_jobs(&kc->pages_jobs, kc, run_pages_job); 484 process_jobs(&kc->pages_jobs, kc, run_pages_job);
441 process_jobs(&kc->io_jobs, kc, run_io_job); 485 process_jobs(&kc->io_jobs, kc, run_io_job);
486 unplug(kc, READ);
487 unplug(kc, WRITE);
442} 488}
443 489
444/* 490/*
@@ -619,6 +665,8 @@ int dm_kcopyd_client_create(unsigned int nr_pages,
619 INIT_LIST_HEAD(&kc->io_jobs); 665 INIT_LIST_HEAD(&kc->io_jobs);
620 INIT_LIST_HEAD(&kc->pages_jobs); 666 INIT_LIST_HEAD(&kc->pages_jobs);
621 667
668 memset(kc->unplug, 0, sizeof(kc->unplug));
669
622 kc->job_pool = mempool_create_slab_pool(MIN_JOBS, _job_cache); 670 kc->job_pool = mempool_create_slab_pool(MIN_JOBS, _job_cache);
623 if (!kc->job_pool) 671 if (!kc->job_pool)
624 goto bad_slab; 672 goto bad_slab;