aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/md/dm-kcopyd.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/md/dm-kcopyd.c')
-rw-r--r--drivers/md/dm-kcopyd.c49
1 files changed, 29 insertions, 20 deletions
diff --git a/drivers/md/dm-kcopyd.c b/drivers/md/dm-kcopyd.c
index 505b6f5cd385..24fb42ed7b8e 100644
--- a/drivers/md/dm-kcopyd.c
+++ b/drivers/md/dm-kcopyd.c
@@ -27,6 +27,10 @@
27 27
28#include "dm.h" 28#include "dm.h"
29 29
30#define SUB_JOB_SIZE 128
31#define SPLIT_COUNT 8
32#define MIN_JOBS 8
33
30/*----------------------------------------------------------------- 34/*-----------------------------------------------------------------
31 * Each kcopyd client has its own little pool of preallocated 35 * Each kcopyd client has its own little pool of preallocated
32 * pages for kcopyd io. 36 * pages for kcopyd io.
@@ -216,16 +220,17 @@ struct kcopyd_job {
216 struct mutex lock; 220 struct mutex lock;
217 atomic_t sub_jobs; 221 atomic_t sub_jobs;
218 sector_t progress; 222 sector_t progress;
219};
220 223
221/* FIXME: this should scale with the number of pages */ 224 struct kcopyd_job *master_job;
222#define MIN_JOBS 512 225};
223 226
224static struct kmem_cache *_job_cache; 227static struct kmem_cache *_job_cache;
225 228
226int __init dm_kcopyd_init(void) 229int __init dm_kcopyd_init(void)
227{ 230{
228 _job_cache = KMEM_CACHE(kcopyd_job, 0); 231 _job_cache = kmem_cache_create("kcopyd_job",
232 sizeof(struct kcopyd_job) * (SPLIT_COUNT + 1),
233 __alignof__(struct kcopyd_job), 0, NULL);
229 if (!_job_cache) 234 if (!_job_cache)
230 return -ENOMEM; 235 return -ENOMEM;
231 236
@@ -299,7 +304,12 @@ static int run_complete_job(struct kcopyd_job *job)
299 304
300 if (job->pages) 305 if (job->pages)
301 kcopyd_put_pages(kc, job->pages); 306 kcopyd_put_pages(kc, job->pages);
302 mempool_free(job, kc->job_pool); 307 /*
308 * If this is the master job, the sub jobs have already
309 * completed so we can free everything.
310 */
311 if (job->master_job == job)
312 mempool_free(job, kc->job_pool);
303 fn(read_err, write_err, context); 313 fn(read_err, write_err, context);
304 314
305 if (atomic_dec_and_test(&kc->nr_jobs)) 315 if (atomic_dec_and_test(&kc->nr_jobs))
@@ -460,14 +470,14 @@ static void dispatch_job(struct kcopyd_job *job)
460 wake(kc); 470 wake(kc);
461} 471}
462 472
463#define SUB_JOB_SIZE 128
464static void segment_complete(int read_err, unsigned long write_err, 473static void segment_complete(int read_err, unsigned long write_err,
465 void *context) 474 void *context)
466{ 475{
467 /* FIXME: tidy this function */ 476 /* FIXME: tidy this function */
468 sector_t progress = 0; 477 sector_t progress = 0;
469 sector_t count = 0; 478 sector_t count = 0;
470 struct kcopyd_job *job = (struct kcopyd_job *) context; 479 struct kcopyd_job *sub_job = (struct kcopyd_job *) context;
480 struct kcopyd_job *job = sub_job->master_job;
471 struct dm_kcopyd_client *kc = job->kc; 481 struct dm_kcopyd_client *kc = job->kc;
472 482
473 mutex_lock(&job->lock); 483 mutex_lock(&job->lock);
@@ -498,8 +508,6 @@ static void segment_complete(int read_err, unsigned long write_err,
498 508
499 if (count) { 509 if (count) {
500 int i; 510 int i;
501 struct kcopyd_job *sub_job = mempool_alloc(kc->job_pool,
502 GFP_NOIO);
503 511
504 *sub_job = *job; 512 *sub_job = *job;
505 sub_job->source.sector += progress; 513 sub_job->source.sector += progress;
@@ -511,7 +519,7 @@ static void segment_complete(int read_err, unsigned long write_err,
511 } 519 }
512 520
513 sub_job->fn = segment_complete; 521 sub_job->fn = segment_complete;
514 sub_job->context = job; 522 sub_job->context = sub_job;
515 dispatch_job(sub_job); 523 dispatch_job(sub_job);
516 524
517 } else if (atomic_dec_and_test(&job->sub_jobs)) { 525 } else if (atomic_dec_and_test(&job->sub_jobs)) {
@@ -531,19 +539,19 @@ static void segment_complete(int read_err, unsigned long write_err,
531} 539}
532 540
533/* 541/*
534 * Create some little jobs that will do the move between 542 * Create some sub jobs to share the work between them.
535 * them.
536 */ 543 */
537#define SPLIT_COUNT 8 544static void split_job(struct kcopyd_job *master_job)
538static void split_job(struct kcopyd_job *job)
539{ 545{
540 int i; 546 int i;
541 547
542 atomic_inc(&job->kc->nr_jobs); 548 atomic_inc(&master_job->kc->nr_jobs);
543 549
544 atomic_set(&job->sub_jobs, SPLIT_COUNT); 550 atomic_set(&master_job->sub_jobs, SPLIT_COUNT);
545 for (i = 0; i < SPLIT_COUNT; i++) 551 for (i = 0; i < SPLIT_COUNT; i++) {
546 segment_complete(0, 0u, job); 552 master_job[i + 1].master_job = master_job;
553 segment_complete(0, 0u, &master_job[i + 1]);
554 }
547} 555}
548 556
549int dm_kcopyd_copy(struct dm_kcopyd_client *kc, struct dm_io_region *from, 557int dm_kcopyd_copy(struct dm_kcopyd_client *kc, struct dm_io_region *from,
@@ -553,7 +561,8 @@ int dm_kcopyd_copy(struct dm_kcopyd_client *kc, struct dm_io_region *from,
553 struct kcopyd_job *job; 561 struct kcopyd_job *job;
554 562
555 /* 563 /*
556 * Allocate a new job. 564 * Allocate an array of jobs consisting of one master job
565 * followed by SPLIT_COUNT sub jobs.
557 */ 566 */
558 job = mempool_alloc(kc->job_pool, GFP_NOIO); 567 job = mempool_alloc(kc->job_pool, GFP_NOIO);
559 568
@@ -577,10 +586,10 @@ int dm_kcopyd_copy(struct dm_kcopyd_client *kc, struct dm_io_region *from,
577 586
578 job->fn = fn; 587 job->fn = fn;
579 job->context = context; 588 job->context = context;
589 job->master_job = job;
580 590
581 if (job->source.count <= SUB_JOB_SIZE) 591 if (job->source.count <= SUB_JOB_SIZE)
582 dispatch_job(job); 592 dispatch_job(job);
583
584 else { 593 else {
585 mutex_init(&job->lock); 594 mutex_init(&job->lock);
586 job->progress = 0; 595 job->progress = 0;