aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/md
diff options
context:
space:
mode:
authorMikulas Patocka <mpatocka@redhat.com>2008-04-24 16:43:46 -0400
committerAlasdair G Kergon <agk@redhat.com>2008-04-25 08:26:50 -0400
commit08d8757a4d52d21d825b9170af36f2696d1da1a8 (patch)
treefd3e2b5620f3e8dca28464d2d0ade39c60d543c9 /drivers/md
parent8c0cbc2f79bb222d21b466422fde71fcc9bd37e3 (diff)
dm kcopyd: private mempool
Change the global mempool in kcopyd into a per-device mempool to avoid deadlock possibilities. Signed-off-by: Mikulas Patocka <mpatocka@redhat.com> Signed-off-by: Alasdair G Kergon <agk@redhat.com>
Diffstat (limited to 'drivers/md')
-rw-r--r--drivers/md/kcopyd.c32
1 files changed, 19 insertions, 13 deletions
diff --git a/drivers/md/kcopyd.c b/drivers/md/kcopyd.c
index 3fb6c8334a82..0b2907d59a3f 100644
--- a/drivers/md/kcopyd.c
+++ b/drivers/md/kcopyd.c
@@ -43,6 +43,8 @@ struct dm_kcopyd_client {
43 wait_queue_head_t destroyq; 43 wait_queue_head_t destroyq;
44 atomic_t nr_jobs; 44 atomic_t nr_jobs;
45 45
46 mempool_t *job_pool;
47
46 struct workqueue_struct *kcopyd_wq; 48 struct workqueue_struct *kcopyd_wq;
47 struct work_struct kcopyd_work; 49 struct work_struct kcopyd_work;
48 50
@@ -221,7 +223,6 @@ struct kcopyd_job {
221#define MIN_JOBS 512 223#define MIN_JOBS 512
222 224
223static struct kmem_cache *_job_cache; 225static struct kmem_cache *_job_cache;
224static mempool_t *_job_pool;
225 226
226static int jobs_init(void) 227static int jobs_init(void)
227{ 228{
@@ -229,20 +230,12 @@ static int jobs_init(void)
229 if (!_job_cache) 230 if (!_job_cache)
230 return -ENOMEM; 231 return -ENOMEM;
231 232
232 _job_pool = mempool_create_slab_pool(MIN_JOBS, _job_cache);
233 if (!_job_pool) {
234 kmem_cache_destroy(_job_cache);
235 return -ENOMEM;
236 }
237
238 return 0; 233 return 0;
239} 234}
240 235
241static void jobs_exit(void) 236static void jobs_exit(void)
242{ 237{
243 mempool_destroy(_job_pool);
244 kmem_cache_destroy(_job_cache); 238 kmem_cache_destroy(_job_cache);
245 _job_pool = NULL;
246 _job_cache = NULL; 239 _job_cache = NULL;
247} 240}
248 241
@@ -295,7 +288,7 @@ static int run_complete_job(struct kcopyd_job *job)
295 struct dm_kcopyd_client *kc = job->kc; 288 struct dm_kcopyd_client *kc = job->kc;
296 289
297 kcopyd_put_pages(kc, job->pages); 290 kcopyd_put_pages(kc, job->pages);
298 mempool_free(job, _job_pool); 291 mempool_free(job, kc->job_pool);
299 fn(read_err, write_err, context); 292 fn(read_err, write_err, context);
300 293
301 if (atomic_dec_and_test(&kc->nr_jobs)) 294 if (atomic_dec_and_test(&kc->nr_jobs))
@@ -487,7 +480,8 @@ static void segment_complete(int read_err, unsigned long write_err,
487 480
488 if (count) { 481 if (count) {
489 int i; 482 int i;
490 struct kcopyd_job *sub_job = mempool_alloc(_job_pool, GFP_NOIO); 483 struct kcopyd_job *sub_job = mempool_alloc(job->kc->job_pool,
484 GFP_NOIO);
491 485
492 *sub_job = *job; 486 *sub_job = *job;
493 sub_job->source.sector += progress; 487 sub_job->source.sector += progress;
@@ -511,7 +505,7 @@ static void segment_complete(int read_err, unsigned long write_err,
511 * after we've completed. 505 * after we've completed.
512 */ 506 */
513 job->fn(read_err, write_err, job->context); 507 job->fn(read_err, write_err, job->context);
514 mempool_free(job, _job_pool); 508 mempool_free(job, job->kc->job_pool);
515 } 509 }
516} 510}
517 511
@@ -538,7 +532,7 @@ int dm_kcopyd_copy(struct dm_kcopyd_client *kc, struct dm_io_region *from,
538 /* 532 /*
539 * Allocate a new job. 533 * Allocate a new job.
540 */ 534 */
541 job = mempool_alloc(_job_pool, GFP_NOIO); 535 job = mempool_alloc(kc->job_pool, GFP_NOIO);
542 536
543 /* 537 /*
544 * set up for the read. 538 * set up for the read.
@@ -666,10 +660,19 @@ int dm_kcopyd_client_create(unsigned int nr_pages,
666 INIT_LIST_HEAD(&kc->io_jobs); 660 INIT_LIST_HEAD(&kc->io_jobs);
667 INIT_LIST_HEAD(&kc->pages_jobs); 661 INIT_LIST_HEAD(&kc->pages_jobs);
668 662
663 kc->job_pool = mempool_create_slab_pool(MIN_JOBS, _job_cache);
664 if (!kc->job_pool) {
665 r = -ENOMEM;
666 kfree(kc);
667 kcopyd_exit();
668 return r;
669 }
670
669 INIT_WORK(&kc->kcopyd_work, do_work); 671 INIT_WORK(&kc->kcopyd_work, do_work);
670 kc->kcopyd_wq = create_singlethread_workqueue("kcopyd"); 672 kc->kcopyd_wq = create_singlethread_workqueue("kcopyd");
671 if (!kc->kcopyd_wq) { 673 if (!kc->kcopyd_wq) {
672 r = -ENOMEM; 674 r = -ENOMEM;
675 mempool_destroy(kc->job_pool);
673 kfree(kc); 676 kfree(kc);
674 kcopyd_exit(); 677 kcopyd_exit();
675 return r; 678 return r;
@@ -680,6 +683,7 @@ int dm_kcopyd_client_create(unsigned int nr_pages,
680 r = client_alloc_pages(kc, nr_pages); 683 r = client_alloc_pages(kc, nr_pages);
681 if (r) { 684 if (r) {
682 destroy_workqueue(kc->kcopyd_wq); 685 destroy_workqueue(kc->kcopyd_wq);
686 mempool_destroy(kc->job_pool);
683 kfree(kc); 687 kfree(kc);
684 kcopyd_exit(); 688 kcopyd_exit();
685 return r; 689 return r;
@@ -690,6 +694,7 @@ int dm_kcopyd_client_create(unsigned int nr_pages,
690 r = PTR_ERR(kc->io_client); 694 r = PTR_ERR(kc->io_client);
691 client_free_pages(kc); 695 client_free_pages(kc);
692 destroy_workqueue(kc->kcopyd_wq); 696 destroy_workqueue(kc->kcopyd_wq);
697 mempool_destroy(kc->job_pool);
693 kfree(kc); 698 kfree(kc);
694 kcopyd_exit(); 699 kcopyd_exit();
695 return r; 700 return r;
@@ -716,6 +721,7 @@ void dm_kcopyd_client_destroy(struct dm_kcopyd_client *kc)
716 dm_io_client_destroy(kc->io_client); 721 dm_io_client_destroy(kc->io_client);
717 client_free_pages(kc); 722 client_free_pages(kc);
718 client_del(kc); 723 client_del(kc);
724 mempool_destroy(kc->job_pool);
719 kfree(kc); 725 kfree(kc);
720 kcopyd_exit(); 726 kcopyd_exit();
721} 727}