diff options
| -rw-r--r-- | drivers/md/dm-snap.c | 107 | ||||
| -rw-r--r-- | drivers/md/dm-snap.h | 9 |
2 files changed, 106 insertions, 10 deletions
diff --git a/drivers/md/dm-snap.c b/drivers/md/dm-snap.c index 1ba8a47d61b1..de302702ab3e 100644 --- a/drivers/md/dm-snap.c +++ b/drivers/md/dm-snap.c | |||
| @@ -40,6 +40,11 @@ | |||
| 40 | */ | 40 | */ |
| 41 | #define SNAPSHOT_PAGES (((1UL << 20) >> PAGE_SHIFT) ? : 1) | 41 | #define SNAPSHOT_PAGES (((1UL << 20) >> PAGE_SHIFT) ? : 1) |
| 42 | 42 | ||
| 43 | /* | ||
| 44 | * The size of the mempool used to track chunks in use. | ||
| 45 | */ | ||
| 46 | #define MIN_IOS 256 | ||
| 47 | |||
| 43 | static struct workqueue_struct *ksnapd; | 48 | static struct workqueue_struct *ksnapd; |
| 44 | static void flush_queued_bios(struct work_struct *work); | 49 | static void flush_queued_bios(struct work_struct *work); |
| 45 | 50 | ||
| @@ -93,6 +98,42 @@ static struct kmem_cache *exception_cache; | |||
| 93 | static struct kmem_cache *pending_cache; | 98 | static struct kmem_cache *pending_cache; |
| 94 | static mempool_t *pending_pool; | 99 | static mempool_t *pending_pool; |
| 95 | 100 | ||
| 101 | struct dm_snap_tracked_chunk { | ||
| 102 | struct hlist_node node; | ||
| 103 | chunk_t chunk; | ||
| 104 | }; | ||
| 105 | |||
| 106 | static struct kmem_cache *tracked_chunk_cache; | ||
| 107 | |||
| 108 | static struct dm_snap_tracked_chunk *track_chunk(struct dm_snapshot *s, | ||
| 109 | chunk_t chunk) | ||
| 110 | { | ||
| 111 | struct dm_snap_tracked_chunk *c = mempool_alloc(s->tracked_chunk_pool, | ||
| 112 | GFP_NOIO); | ||
| 113 | unsigned long flags; | ||
| 114 | |||
| 115 | c->chunk = chunk; | ||
| 116 | |||
| 117 | spin_lock_irqsave(&s->tracked_chunk_lock, flags); | ||
| 118 | hlist_add_head(&c->node, | ||
| 119 | &s->tracked_chunk_hash[DM_TRACKED_CHUNK_HASH(chunk)]); | ||
| 120 | spin_unlock_irqrestore(&s->tracked_chunk_lock, flags); | ||
| 121 | |||
| 122 | return c; | ||
| 123 | } | ||
| 124 | |||
| 125 | static void stop_tracking_chunk(struct dm_snapshot *s, | ||
| 126 | struct dm_snap_tracked_chunk *c) | ||
| 127 | { | ||
| 128 | unsigned long flags; | ||
| 129 | |||
| 130 | spin_lock_irqsave(&s->tracked_chunk_lock, flags); | ||
| 131 | hlist_del(&c->node); | ||
| 132 | spin_unlock_irqrestore(&s->tracked_chunk_lock, flags); | ||
| 133 | |||
| 134 | mempool_free(c, s->tracked_chunk_pool); | ||
| 135 | } | ||
| 136 | |||
| 96 | /* | 137 | /* |
| 97 | * One of these per registered origin, held in the snapshot_origins hash | 138 | * One of these per registered origin, held in the snapshot_origins hash |
| 98 | */ | 139 | */ |
| @@ -482,6 +523,7 @@ static int set_chunk_size(struct dm_snapshot *s, const char *chunk_size_arg, | |||
| 482 | static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv) | 523 | static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv) |
| 483 | { | 524 | { |
| 484 | struct dm_snapshot *s; | 525 | struct dm_snapshot *s; |
| 526 | int i; | ||
| 485 | int r = -EINVAL; | 527 | int r = -EINVAL; |
| 486 | char persistent; | 528 | char persistent; |
| 487 | char *origin_path; | 529 | char *origin_path; |
| @@ -564,11 +606,24 @@ static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv) | |||
| 564 | goto bad5; | 606 | goto bad5; |
| 565 | } | 607 | } |
| 566 | 608 | ||
| 609 | s->tracked_chunk_pool = mempool_create_slab_pool(MIN_IOS, | ||
| 610 | tracked_chunk_cache); | ||
| 611 | if (!s->tracked_chunk_pool) { | ||
| 612 | ti->error = "Could not allocate tracked_chunk mempool for " | ||
| 613 | "tracking reads"; | ||
| 614 | goto bad6; | ||
| 615 | } | ||
| 616 | |||
| 617 | for (i = 0; i < DM_TRACKED_CHUNK_HASH_SIZE; i++) | ||
| 618 | INIT_HLIST_HEAD(&s->tracked_chunk_hash[i]); | ||
| 619 | |||
| 620 | spin_lock_init(&s->tracked_chunk_lock); | ||
| 621 | |||
| 567 | /* Metadata must only be loaded into one table at once */ | 622 | /* Metadata must only be loaded into one table at once */ |
| 568 | r = s->store.read_metadata(&s->store); | 623 | r = s->store.read_metadata(&s->store); |
| 569 | if (r < 0) { | 624 | if (r < 0) { |
| 570 | ti->error = "Failed to read snapshot metadata"; | 625 | ti->error = "Failed to read snapshot metadata"; |
| 571 | goto bad6; | 626 | goto bad_load_and_register; |
| 572 | } else if (r > 0) { | 627 | } else if (r > 0) { |
| 573 | s->valid = 0; | 628 | s->valid = 0; |
| 574 | DMWARN("Snapshot is marked invalid."); | 629 | DMWARN("Snapshot is marked invalid."); |
| @@ -582,7 +637,7 @@ static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv) | |||
| 582 | if (register_snapshot(s)) { | 637 | if (register_snapshot(s)) { |
| 583 | r = -EINVAL; | 638 | r = -EINVAL; |
| 584 | ti->error = "Cannot register snapshot origin"; | 639 | ti->error = "Cannot register snapshot origin"; |
| 585 | goto bad6; | 640 | goto bad_load_and_register; |
| 586 | } | 641 | } |
| 587 | 642 | ||
| 588 | ti->private = s; | 643 | ti->private = s; |
| @@ -590,6 +645,9 @@ static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv) | |||
| 590 | 645 | ||
| 591 | return 0; | 646 | return 0; |
| 592 | 647 | ||
| 648 | bad_load_and_register: | ||
| 649 | mempool_destroy(s->tracked_chunk_pool); | ||
| 650 | |||
| 593 | bad6: | 651 | bad6: |
| 594 | dm_kcopyd_client_destroy(s->kcopyd_client); | 652 | dm_kcopyd_client_destroy(s->kcopyd_client); |
| 595 | 653 | ||
| @@ -624,6 +682,9 @@ static void __free_exceptions(struct dm_snapshot *s) | |||
| 624 | 682 | ||
| 625 | static void snapshot_dtr(struct dm_target *ti) | 683 | static void snapshot_dtr(struct dm_target *ti) |
| 626 | { | 684 | { |
| 685 | #ifdef CONFIG_DM_DEBUG | ||
| 686 | int i; | ||
| 687 | #endif | ||
| 627 | struct dm_snapshot *s = ti->private; | 688 | struct dm_snapshot *s = ti->private; |
| 628 | 689 | ||
| 629 | flush_workqueue(ksnapd); | 690 | flush_workqueue(ksnapd); |
| @@ -632,6 +693,13 @@ static void snapshot_dtr(struct dm_target *ti) | |||
| 632 | /* After this returns there can be no new kcopyd jobs. */ | 693 | /* After this returns there can be no new kcopyd jobs. */ |
| 633 | unregister_snapshot(s); | 694 | unregister_snapshot(s); |
| 634 | 695 | ||
| 696 | #ifdef CONFIG_DM_DEBUG | ||
| 697 | for (i = 0; i < DM_TRACKED_CHUNK_HASH_SIZE; i++) | ||
| 698 | BUG_ON(!hlist_empty(&s->tracked_chunk_hash[i])); | ||
| 699 | #endif | ||
| 700 | |||
| 701 | mempool_destroy(s->tracked_chunk_pool); | ||
| 702 | |||
| 635 | __free_exceptions(s); | 703 | __free_exceptions(s); |
| 636 | 704 | ||
| 637 | dm_put_device(ti, s->origin); | 705 | dm_put_device(ti, s->origin); |
| @@ -974,14 +1042,10 @@ static int snapshot_map(struct dm_target *ti, struct bio *bio, | |||
| 974 | start_copy(pe); | 1042 | start_copy(pe); |
| 975 | goto out; | 1043 | goto out; |
| 976 | } | 1044 | } |
| 977 | } else | 1045 | } else { |
| 978 | /* | ||
| 979 | * FIXME: this read path scares me because we | ||
| 980 | * always use the origin when we have a pending | ||
| 981 | * exception. However I can't think of a | ||
| 982 | * situation where this is wrong - ejt. | ||
| 983 | */ | ||
| 984 | bio->bi_bdev = s->origin->bdev; | 1046 | bio->bi_bdev = s->origin->bdev; |
| 1047 | map_context->ptr = track_chunk(s, chunk); | ||
| 1048 | } | ||
| 985 | 1049 | ||
| 986 | out_unlock: | 1050 | out_unlock: |
| 987 | up_write(&s->lock); | 1051 | up_write(&s->lock); |
| @@ -989,6 +1053,18 @@ static int snapshot_map(struct dm_target *ti, struct bio *bio, | |||
| 989 | return r; | 1053 | return r; |
| 990 | } | 1054 | } |
| 991 | 1055 | ||
| 1056 | static int snapshot_end_io(struct dm_target *ti, struct bio *bio, | ||
| 1057 | int error, union map_info *map_context) | ||
| 1058 | { | ||
| 1059 | struct dm_snapshot *s = ti->private; | ||
| 1060 | struct dm_snap_tracked_chunk *c = map_context->ptr; | ||
| 1061 | |||
| 1062 | if (c) | ||
| 1063 | stop_tracking_chunk(s, c); | ||
| 1064 | |||
| 1065 | return 0; | ||
| 1066 | } | ||
| 1067 | |||
| 992 | static void snapshot_resume(struct dm_target *ti) | 1068 | static void snapshot_resume(struct dm_target *ti) |
| 993 | { | 1069 | { |
| 994 | struct dm_snapshot *s = ti->private; | 1070 | struct dm_snapshot *s = ti->private; |
| @@ -1266,6 +1342,7 @@ static struct target_type snapshot_target = { | |||
| 1266 | .ctr = snapshot_ctr, | 1342 | .ctr = snapshot_ctr, |
| 1267 | .dtr = snapshot_dtr, | 1343 | .dtr = snapshot_dtr, |
| 1268 | .map = snapshot_map, | 1344 | .map = snapshot_map, |
| 1345 | .end_io = snapshot_end_io, | ||
| 1269 | .resume = snapshot_resume, | 1346 | .resume = snapshot_resume, |
| 1270 | .status = snapshot_status, | 1347 | .status = snapshot_status, |
| 1271 | }; | 1348 | }; |
| @@ -1306,11 +1383,18 @@ static int __init dm_snapshot_init(void) | |||
| 1306 | goto bad4; | 1383 | goto bad4; |
| 1307 | } | 1384 | } |
| 1308 | 1385 | ||
| 1386 | tracked_chunk_cache = KMEM_CACHE(dm_snap_tracked_chunk, 0); | ||
| 1387 | if (!tracked_chunk_cache) { | ||
| 1388 | DMERR("Couldn't create cache to track chunks in use."); | ||
| 1389 | r = -ENOMEM; | ||
| 1390 | goto bad5; | ||
| 1391 | } | ||
| 1392 | |||
| 1309 | pending_pool = mempool_create_slab_pool(128, pending_cache); | 1393 | pending_pool = mempool_create_slab_pool(128, pending_cache); |
| 1310 | if (!pending_pool) { | 1394 | if (!pending_pool) { |
| 1311 | DMERR("Couldn't create pending pool."); | 1395 | DMERR("Couldn't create pending pool."); |
| 1312 | r = -ENOMEM; | 1396 | r = -ENOMEM; |
| 1313 | goto bad5; | 1397 | goto bad_pending_pool; |
| 1314 | } | 1398 | } |
| 1315 | 1399 | ||
| 1316 | ksnapd = create_singlethread_workqueue("ksnapd"); | 1400 | ksnapd = create_singlethread_workqueue("ksnapd"); |
| @@ -1324,6 +1408,8 @@ static int __init dm_snapshot_init(void) | |||
| 1324 | 1408 | ||
| 1325 | bad6: | 1409 | bad6: |
| 1326 | mempool_destroy(pending_pool); | 1410 | mempool_destroy(pending_pool); |
| 1411 | bad_pending_pool: | ||
| 1412 | kmem_cache_destroy(tracked_chunk_cache); | ||
| 1327 | bad5: | 1413 | bad5: |
| 1328 | kmem_cache_destroy(pending_cache); | 1414 | kmem_cache_destroy(pending_cache); |
| 1329 | bad4: | 1415 | bad4: |
| @@ -1355,6 +1441,7 @@ static void __exit dm_snapshot_exit(void) | |||
| 1355 | mempool_destroy(pending_pool); | 1441 | mempool_destroy(pending_pool); |
| 1356 | kmem_cache_destroy(pending_cache); | 1442 | kmem_cache_destroy(pending_cache); |
| 1357 | kmem_cache_destroy(exception_cache); | 1443 | kmem_cache_destroy(exception_cache); |
| 1444 | kmem_cache_destroy(tracked_chunk_cache); | ||
| 1358 | } | 1445 | } |
| 1359 | 1446 | ||
| 1360 | /* Module hooks */ | 1447 | /* Module hooks */ |
diff --git a/drivers/md/dm-snap.h b/drivers/md/dm-snap.h index 24f9fb73b982..70dc961f40d8 100644 --- a/drivers/md/dm-snap.h +++ b/drivers/md/dm-snap.h | |||
| @@ -130,6 +130,10 @@ struct exception_store { | |||
| 130 | void *context; | 130 | void *context; |
| 131 | }; | 131 | }; |
| 132 | 132 | ||
| 133 | #define DM_TRACKED_CHUNK_HASH_SIZE 16 | ||
| 134 | #define DM_TRACKED_CHUNK_HASH(x) ((unsigned long)(x) & \ | ||
| 135 | (DM_TRACKED_CHUNK_HASH_SIZE - 1)) | ||
| 136 | |||
| 133 | struct dm_snapshot { | 137 | struct dm_snapshot { |
| 134 | struct rw_semaphore lock; | 138 | struct rw_semaphore lock; |
| 135 | struct dm_target *ti; | 139 | struct dm_target *ti; |
| @@ -174,6 +178,11 @@ struct dm_snapshot { | |||
| 174 | /* Queue of snapshot writes for ksnapd to flush */ | 178 | /* Queue of snapshot writes for ksnapd to flush */ |
| 175 | struct bio_list queued_bios; | 179 | struct bio_list queued_bios; |
| 176 | struct work_struct queued_bios_work; | 180 | struct work_struct queued_bios_work; |
| 181 | |||
| 182 | /* Chunks with outstanding reads */ | ||
| 183 | mempool_t *tracked_chunk_pool; | ||
| 184 | spinlock_t tracked_chunk_lock; | ||
| 185 | struct hlist_head tracked_chunk_hash[DM_TRACKED_CHUNK_HASH_SIZE]; | ||
| 177 | }; | 186 | }; |
| 178 | 187 | ||
| 179 | /* | 188 | /* |
