aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/md/dm-snap.c
diff options
context:
space:
mode:
authorMikulas Patocka <mpatocka@redhat.com>2008-07-21 07:00:35 -0400
committerAlasdair G Kergon <agk@redhat.com>2008-07-21 07:00:35 -0400
commit92e868122edf08b9fc06b112e7e0c80ab94c1f93 (patch)
tree9cd2e5e2f15adcaf1b8d3f8f55d5a676ad40bfd6 /drivers/md/dm-snap.c
parenta8d41b59f3f5a7ac19452ef442a7fc1b5fa17366 (diff)
dm snapshot: use per device mempools
Change snapshot per-module mempool to per-device mempool. Per-module mempools could cause a deadlock if multiple snapshot devices are stacked above each other. Signed-off-by: Mikulas Patocka <mpatocka@redhat.com> Signed-off-by: Alasdair G Kergon <agk@redhat.com>
Diffstat (limited to 'drivers/md/dm-snap.c')
-rw-r--r--drivers/md/dm-snap.c40
1 files changed, 22 insertions, 18 deletions
diff --git a/drivers/md/dm-snap.c b/drivers/md/dm-snap.c
index f4fd0cee9c3d..6e5528aecc98 100644
--- a/drivers/md/dm-snap.c
+++ b/drivers/md/dm-snap.c
@@ -96,7 +96,6 @@ struct dm_snap_pending_exception {
96 */ 96 */
97static struct kmem_cache *exception_cache; 97static struct kmem_cache *exception_cache;
98static struct kmem_cache *pending_cache; 98static struct kmem_cache *pending_cache;
99static mempool_t *pending_pool;
100 99
101struct dm_snap_tracked_chunk { 100struct dm_snap_tracked_chunk {
102 struct hlist_node node; 101 struct hlist_node node;
@@ -364,14 +363,19 @@ static void free_exception(struct dm_snap_exception *e)
364 kmem_cache_free(exception_cache, e); 363 kmem_cache_free(exception_cache, e);
365} 364}
366 365
367static struct dm_snap_pending_exception *alloc_pending_exception(void) 366static struct dm_snap_pending_exception *alloc_pending_exception(struct dm_snapshot *s)
368{ 367{
369 return mempool_alloc(pending_pool, GFP_NOIO); 368 struct dm_snap_pending_exception *pe = mempool_alloc(s->pending_pool,
369 GFP_NOIO);
370
371 pe->snap = s;
372
373 return pe;
370} 374}
371 375
372static void free_pending_exception(struct dm_snap_pending_exception *pe) 376static void free_pending_exception(struct dm_snap_pending_exception *pe)
373{ 377{
374 mempool_free(pe, pending_pool); 378 mempool_free(pe, pe->snap->pending_pool);
375} 379}
376 380
377static void insert_completed_exception(struct dm_snapshot *s, 381static void insert_completed_exception(struct dm_snapshot *s,
@@ -627,12 +631,18 @@ static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv)
627 goto bad5; 631 goto bad5;
628 } 632 }
629 633
634 s->pending_pool = mempool_create_slab_pool(MIN_IOS, pending_cache);
635 if (!s->pending_pool) {
636 ti->error = "Could not allocate mempool for pending exceptions";
637 goto bad6;
638 }
639
630 s->tracked_chunk_pool = mempool_create_slab_pool(MIN_IOS, 640 s->tracked_chunk_pool = mempool_create_slab_pool(MIN_IOS,
631 tracked_chunk_cache); 641 tracked_chunk_cache);
632 if (!s->tracked_chunk_pool) { 642 if (!s->tracked_chunk_pool) {
633 ti->error = "Could not allocate tracked_chunk mempool for " 643 ti->error = "Could not allocate tracked_chunk mempool for "
634 "tracking reads"; 644 "tracking reads";
635 goto bad6; 645 goto bad_tracked_chunk_pool;
636 } 646 }
637 647
638 for (i = 0; i < DM_TRACKED_CHUNK_HASH_SIZE; i++) 648 for (i = 0; i < DM_TRACKED_CHUNK_HASH_SIZE; i++)
@@ -669,6 +679,9 @@ static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv)
669 bad_load_and_register: 679 bad_load_and_register:
670 mempool_destroy(s->tracked_chunk_pool); 680 mempool_destroy(s->tracked_chunk_pool);
671 681
682 bad_tracked_chunk_pool:
683 mempool_destroy(s->pending_pool);
684
672 bad6: 685 bad6:
673 dm_kcopyd_client_destroy(s->kcopyd_client); 686 dm_kcopyd_client_destroy(s->kcopyd_client);
674 687
@@ -723,6 +736,8 @@ static void snapshot_dtr(struct dm_target *ti)
723 736
724 __free_exceptions(s); 737 __free_exceptions(s);
725 738
739 mempool_destroy(s->pending_pool);
740
726 dm_put_device(ti, s->origin); 741 dm_put_device(ti, s->origin);
727 dm_put_device(ti, s->cow); 742 dm_put_device(ti, s->cow);
728 743
@@ -969,7 +984,7 @@ __find_pending_exception(struct dm_snapshot *s, struct bio *bio)
969 * to hold the lock while we do this. 984 * to hold the lock while we do this.
970 */ 985 */
971 up_write(&s->lock); 986 up_write(&s->lock);
972 pe = alloc_pending_exception(); 987 pe = alloc_pending_exception(s);
973 down_write(&s->lock); 988 down_write(&s->lock);
974 989
975 if (!s->valid) { 990 if (!s->valid) {
@@ -989,7 +1004,6 @@ __find_pending_exception(struct dm_snapshot *s, struct bio *bio)
989 bio_list_init(&pe->snapshot_bios); 1004 bio_list_init(&pe->snapshot_bios);
990 pe->primary_pe = NULL; 1005 pe->primary_pe = NULL;
991 atomic_set(&pe->ref_count, 0); 1006 atomic_set(&pe->ref_count, 0);
992 pe->snap = s;
993 pe->started = 0; 1007 pe->started = 0;
994 1008
995 if (s->store.prepare_exception(&s->store, &pe->e)) { 1009 if (s->store.prepare_exception(&s->store, &pe->e)) {
@@ -1418,24 +1432,15 @@ static int __init dm_snapshot_init(void)
1418 goto bad5; 1432 goto bad5;
1419 } 1433 }
1420 1434
1421 pending_pool = mempool_create_slab_pool(128, pending_cache);
1422 if (!pending_pool) {
1423 DMERR("Couldn't create pending pool.");
1424 r = -ENOMEM;
1425 goto bad_pending_pool;
1426 }
1427
1428 ksnapd = create_singlethread_workqueue("ksnapd"); 1435 ksnapd = create_singlethread_workqueue("ksnapd");
1429 if (!ksnapd) { 1436 if (!ksnapd) {
1430 DMERR("Failed to create ksnapd workqueue."); 1437 DMERR("Failed to create ksnapd workqueue.");
1431 r = -ENOMEM; 1438 r = -ENOMEM;
1432 goto bad6; 1439 goto bad_pending_pool;
1433 } 1440 }
1434 1441
1435 return 0; 1442 return 0;
1436 1443
1437 bad6:
1438 mempool_destroy(pending_pool);
1439 bad_pending_pool: 1444 bad_pending_pool:
1440 kmem_cache_destroy(tracked_chunk_cache); 1445 kmem_cache_destroy(tracked_chunk_cache);
1441 bad5: 1446 bad5:
@@ -1466,7 +1471,6 @@ static void __exit dm_snapshot_exit(void)
1466 DMERR("origin unregister failed %d", r); 1471 DMERR("origin unregister failed %d", r);
1467 1472
1468 exit_origin_hash(); 1473 exit_origin_hash();
1469 mempool_destroy(pending_pool);
1470 kmem_cache_destroy(pending_cache); 1474 kmem_cache_destroy(pending_cache);
1471 kmem_cache_destroy(exception_cache); 1475 kmem_cache_destroy(exception_cache);
1472 kmem_cache_destroy(tracked_chunk_cache); 1476 kmem_cache_destroy(tracked_chunk_cache);