aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2008-07-21 13:30:10 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2008-07-21 13:30:10 -0400
commitb7e6f62fe259187f2578d00960ef1b0e6ff6afd5 (patch)
tree796fd3c878a2f8d8d55ea1e52ef236742a48c3dd /drivers
parent8a392625b665c676a77c62f8608d10ff430bcb83 (diff)
parentd41e26b901111f4e540aa2c27ec7a1681c782be9 (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/agk/linux-2.6-dm
* git://git.kernel.org/pub/scm/linux/kernel/git/agk/linux-2.6-dm: dm crypt: add merge dm table: remove merge_bvec sector restriction dm: linear add merge dm: introduce merge_bvec_fn dm snapshot: use per device mempools dm snapshot: fix race during exception creation dm snapshot: track snapshot reads dm mpath: fix test for reinstate_path dm mpath: return parameter error dm io: remove struct padding dm log: make dm_dirty_log init and exit static dm mpath: free path selector on invalid args
Diffstat (limited to 'drivers')
-rw-r--r--drivers/md/dm-crypt.c18
-rw-r--r--drivers/md/dm-linear.c38
-rw-r--r--drivers/md/dm-log.c4
-rw-r--r--drivers/md/dm-mpath.c10
-rw-r--r--drivers/md/dm-snap.c163
-rw-r--r--drivers/md/dm-snap.h11
-rw-r--r--drivers/md/dm-table.c13
-rw-r--r--drivers/md/dm.c46
-rw-r--r--drivers/md/dm.h6
9 files changed, 262 insertions, 47 deletions
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
index ab6a61db63ce..13956437bc81 100644
--- a/drivers/md/dm-crypt.c
+++ b/drivers/md/dm-crypt.c
@@ -1216,9 +1216,24 @@ error:
1216 return -EINVAL; 1216 return -EINVAL;
1217} 1217}
1218 1218
1219static int crypt_merge(struct dm_target *ti, struct bvec_merge_data *bvm,
1220 struct bio_vec *biovec, int max_size)
1221{
1222 struct crypt_config *cc = ti->private;
1223 struct request_queue *q = bdev_get_queue(cc->dev->bdev);
1224
1225 if (!q->merge_bvec_fn)
1226 return max_size;
1227
1228 bvm->bi_bdev = cc->dev->bdev;
1229 bvm->bi_sector = cc->start + bvm->bi_sector - ti->begin;
1230
1231 return min(max_size, q->merge_bvec_fn(q, bvm, biovec));
1232}
1233
1219static struct target_type crypt_target = { 1234static struct target_type crypt_target = {
1220 .name = "crypt", 1235 .name = "crypt",
1221 .version= {1, 5, 0}, 1236 .version= {1, 6, 0},
1222 .module = THIS_MODULE, 1237 .module = THIS_MODULE,
1223 .ctr = crypt_ctr, 1238 .ctr = crypt_ctr,
1224 .dtr = crypt_dtr, 1239 .dtr = crypt_dtr,
@@ -1228,6 +1243,7 @@ static struct target_type crypt_target = {
1228 .preresume = crypt_preresume, 1243 .preresume = crypt_preresume,
1229 .resume = crypt_resume, 1244 .resume = crypt_resume,
1230 .message = crypt_message, 1245 .message = crypt_message,
1246 .merge = crypt_merge,
1231}; 1247};
1232 1248
1233static int __init dm_crypt_init(void) 1249static int __init dm_crypt_init(void)
diff --git a/drivers/md/dm-linear.c b/drivers/md/dm-linear.c
index 17753d80ad22..6449bcdf84ca 100644
--- a/drivers/md/dm-linear.c
+++ b/drivers/md/dm-linear.c
@@ -69,13 +69,25 @@ static void linear_dtr(struct dm_target *ti)
69 kfree(lc); 69 kfree(lc);
70} 70}
71 71
72static int linear_map(struct dm_target *ti, struct bio *bio, 72static sector_t linear_map_sector(struct dm_target *ti, sector_t bi_sector)
73 union map_info *map_context)
74{ 73{
75 struct linear_c *lc = (struct linear_c *) ti->private; 74 struct linear_c *lc = ti->private;
75
76 return lc->start + (bi_sector - ti->begin);
77}
78
79static void linear_map_bio(struct dm_target *ti, struct bio *bio)
80{
81 struct linear_c *lc = ti->private;
76 82
77 bio->bi_bdev = lc->dev->bdev; 83 bio->bi_bdev = lc->dev->bdev;
78 bio->bi_sector = lc->start + (bio->bi_sector - ti->begin); 84 bio->bi_sector = linear_map_sector(ti, bio->bi_sector);
85}
86
87static int linear_map(struct dm_target *ti, struct bio *bio,
88 union map_info *map_context)
89{
90 linear_map_bio(ti, bio);
79 91
80 return DM_MAPIO_REMAPPED; 92 return DM_MAPIO_REMAPPED;
81} 93}
@@ -114,15 +126,31 @@ static int linear_ioctl(struct dm_target *ti, struct inode *inode,
114 return blkdev_driver_ioctl(bdev->bd_inode, &fake_file, bdev->bd_disk, cmd, arg); 126 return blkdev_driver_ioctl(bdev->bd_inode, &fake_file, bdev->bd_disk, cmd, arg);
115} 127}
116 128
129static int linear_merge(struct dm_target *ti, struct bvec_merge_data *bvm,
130 struct bio_vec *biovec, int max_size)
131{
132 struct linear_c *lc = ti->private;
133 struct request_queue *q = bdev_get_queue(lc->dev->bdev);
134
135 if (!q->merge_bvec_fn)
136 return max_size;
137
138 bvm->bi_bdev = lc->dev->bdev;
139 bvm->bi_sector = linear_map_sector(ti, bvm->bi_sector);
140
141 return min(max_size, q->merge_bvec_fn(q, bvm, biovec));
142}
143
117static struct target_type linear_target = { 144static struct target_type linear_target = {
118 .name = "linear", 145 .name = "linear",
119 .version= {1, 0, 2}, 146 .version= {1, 0, 3},
120 .module = THIS_MODULE, 147 .module = THIS_MODULE,
121 .ctr = linear_ctr, 148 .ctr = linear_ctr,
122 .dtr = linear_dtr, 149 .dtr = linear_dtr,
123 .map = linear_map, 150 .map = linear_map,
124 .status = linear_status, 151 .status = linear_status,
125 .ioctl = linear_ioctl, 152 .ioctl = linear_ioctl,
153 .merge = linear_merge,
126}; 154};
127 155
128int __init dm_linear_init(void) 156int __init dm_linear_init(void)
diff --git a/drivers/md/dm-log.c b/drivers/md/dm-log.c
index 67a6f31b7fc3..5b48478c79f5 100644
--- a/drivers/md/dm-log.c
+++ b/drivers/md/dm-log.c
@@ -831,7 +831,7 @@ static struct dm_dirty_log_type _disk_type = {
831 .status = disk_status, 831 .status = disk_status,
832}; 832};
833 833
834int __init dm_dirty_log_init(void) 834static int __init dm_dirty_log_init(void)
835{ 835{
836 int r; 836 int r;
837 837
@@ -848,7 +848,7 @@ int __init dm_dirty_log_init(void)
848 return r; 848 return r;
849} 849}
850 850
851void __exit dm_dirty_log_exit(void) 851static void __exit dm_dirty_log_exit(void)
852{ 852{
853 dm_dirty_log_type_unregister(&_disk_type); 853 dm_dirty_log_type_unregister(&_disk_type);
854 dm_dirty_log_type_unregister(&_core_type); 854 dm_dirty_log_type_unregister(&_core_type);
diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c
index 9f7302d4878d..fea966d66f98 100644
--- a/drivers/md/dm-mpath.c
+++ b/drivers/md/dm-mpath.c
@@ -525,8 +525,10 @@ static int parse_path_selector(struct arg_set *as, struct priority_group *pg,
525 } 525 }
526 526
527 r = read_param(_params, shift(as), &ps_argc, &ti->error); 527 r = read_param(_params, shift(as), &ps_argc, &ti->error);
528 if (r) 528 if (r) {
529 dm_put_path_selector(pst);
529 return -EINVAL; 530 return -EINVAL;
531 }
530 532
531 r = pst->create(&pg->ps, ps_argc, as->argv); 533 r = pst->create(&pg->ps, ps_argc, as->argv);
532 if (r) { 534 if (r) {
@@ -623,8 +625,10 @@ static struct priority_group *parse_priority_group(struct arg_set *as,
623 struct pgpath *pgpath; 625 struct pgpath *pgpath;
624 struct arg_set path_args; 626 struct arg_set path_args;
625 627
626 if (as->argc < nr_params) 628 if (as->argc < nr_params) {
629 ti->error = "not enough path parameters";
627 goto bad; 630 goto bad;
631 }
628 632
629 path_args.argc = nr_params; 633 path_args.argc = nr_params;
630 path_args.argv = as->argv; 634 path_args.argv = as->argv;
@@ -867,7 +871,7 @@ static int reinstate_path(struct pgpath *pgpath)
867 if (pgpath->path.is_active) 871 if (pgpath->path.is_active)
868 goto out; 872 goto out;
869 873
870 if (!pgpath->pg->ps.type) { 874 if (!pgpath->pg->ps.type->reinstate_path) {
871 DMWARN("Reinstate path not supported by path selector %s", 875 DMWARN("Reinstate path not supported by path selector %s",
872 pgpath->pg->ps.type->name); 876 pgpath->pg->ps.type->name);
873 r = -EINVAL; 877 r = -EINVAL;
diff --git a/drivers/md/dm-snap.c b/drivers/md/dm-snap.c
index 1ba8a47d61b1..6e5528aecc98 100644
--- a/drivers/md/dm-snap.c
+++ b/drivers/md/dm-snap.c
@@ -40,6 +40,11 @@
40 */ 40 */
41#define SNAPSHOT_PAGES (((1UL << 20) >> PAGE_SHIFT) ? : 1) 41#define SNAPSHOT_PAGES (((1UL << 20) >> PAGE_SHIFT) ? : 1)
42 42
43/*
44 * The size of the mempool used to track chunks in use.
45 */
46#define MIN_IOS 256
47
43static struct workqueue_struct *ksnapd; 48static struct workqueue_struct *ksnapd;
44static void flush_queued_bios(struct work_struct *work); 49static void flush_queued_bios(struct work_struct *work);
45 50
@@ -91,7 +96,63 @@ struct dm_snap_pending_exception {
91 */ 96 */
92static struct kmem_cache *exception_cache; 97static struct kmem_cache *exception_cache;
93static struct kmem_cache *pending_cache; 98static struct kmem_cache *pending_cache;
94static mempool_t *pending_pool; 99
100struct dm_snap_tracked_chunk {
101 struct hlist_node node;
102 chunk_t chunk;
103};
104
105static struct kmem_cache *tracked_chunk_cache;
106
107static struct dm_snap_tracked_chunk *track_chunk(struct dm_snapshot *s,
108 chunk_t chunk)
109{
110 struct dm_snap_tracked_chunk *c = mempool_alloc(s->tracked_chunk_pool,
111 GFP_NOIO);
112 unsigned long flags;
113
114 c->chunk = chunk;
115
116 spin_lock_irqsave(&s->tracked_chunk_lock, flags);
117 hlist_add_head(&c->node,
118 &s->tracked_chunk_hash[DM_TRACKED_CHUNK_HASH(chunk)]);
119 spin_unlock_irqrestore(&s->tracked_chunk_lock, flags);
120
121 return c;
122}
123
124static void stop_tracking_chunk(struct dm_snapshot *s,
125 struct dm_snap_tracked_chunk *c)
126{
127 unsigned long flags;
128
129 spin_lock_irqsave(&s->tracked_chunk_lock, flags);
130 hlist_del(&c->node);
131 spin_unlock_irqrestore(&s->tracked_chunk_lock, flags);
132
133 mempool_free(c, s->tracked_chunk_pool);
134}
135
136static int __chunk_is_tracked(struct dm_snapshot *s, chunk_t chunk)
137{
138 struct dm_snap_tracked_chunk *c;
139 struct hlist_node *hn;
140 int found = 0;
141
142 spin_lock_irq(&s->tracked_chunk_lock);
143
144 hlist_for_each_entry(c, hn,
145 &s->tracked_chunk_hash[DM_TRACKED_CHUNK_HASH(chunk)], node) {
146 if (c->chunk == chunk) {
147 found = 1;
148 break;
149 }
150 }
151
152 spin_unlock_irq(&s->tracked_chunk_lock);
153
154 return found;
155}
95 156
96/* 157/*
97 * One of these per registered origin, held in the snapshot_origins hash 158 * One of these per registered origin, held in the snapshot_origins hash
@@ -302,14 +363,19 @@ static void free_exception(struct dm_snap_exception *e)
302 kmem_cache_free(exception_cache, e); 363 kmem_cache_free(exception_cache, e);
303} 364}
304 365
305static struct dm_snap_pending_exception *alloc_pending_exception(void) 366static struct dm_snap_pending_exception *alloc_pending_exception(struct dm_snapshot *s)
306{ 367{
307 return mempool_alloc(pending_pool, GFP_NOIO); 368 struct dm_snap_pending_exception *pe = mempool_alloc(s->pending_pool,
369 GFP_NOIO);
370
371 pe->snap = s;
372
373 return pe;
308} 374}
309 375
310static void free_pending_exception(struct dm_snap_pending_exception *pe) 376static void free_pending_exception(struct dm_snap_pending_exception *pe)
311{ 377{
312 mempool_free(pe, pending_pool); 378 mempool_free(pe, pe->snap->pending_pool);
313} 379}
314 380
315static void insert_completed_exception(struct dm_snapshot *s, 381static void insert_completed_exception(struct dm_snapshot *s,
@@ -482,6 +548,7 @@ static int set_chunk_size(struct dm_snapshot *s, const char *chunk_size_arg,
482static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv) 548static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv)
483{ 549{
484 struct dm_snapshot *s; 550 struct dm_snapshot *s;
551 int i;
485 int r = -EINVAL; 552 int r = -EINVAL;
486 char persistent; 553 char persistent;
487 char *origin_path; 554 char *origin_path;
@@ -564,11 +631,30 @@ static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv)
564 goto bad5; 631 goto bad5;
565 } 632 }
566 633
634 s->pending_pool = mempool_create_slab_pool(MIN_IOS, pending_cache);
635 if (!s->pending_pool) {
636 ti->error = "Could not allocate mempool for pending exceptions";
637 goto bad6;
638 }
639
640 s->tracked_chunk_pool = mempool_create_slab_pool(MIN_IOS,
641 tracked_chunk_cache);
642 if (!s->tracked_chunk_pool) {
643 ti->error = "Could not allocate tracked_chunk mempool for "
644 "tracking reads";
645 goto bad_tracked_chunk_pool;
646 }
647
648 for (i = 0; i < DM_TRACKED_CHUNK_HASH_SIZE; i++)
649 INIT_HLIST_HEAD(&s->tracked_chunk_hash[i]);
650
651 spin_lock_init(&s->tracked_chunk_lock);
652
567 /* Metadata must only be loaded into one table at once */ 653 /* Metadata must only be loaded into one table at once */
568 r = s->store.read_metadata(&s->store); 654 r = s->store.read_metadata(&s->store);
569 if (r < 0) { 655 if (r < 0) {
570 ti->error = "Failed to read snapshot metadata"; 656 ti->error = "Failed to read snapshot metadata";
571 goto bad6; 657 goto bad_load_and_register;
572 } else if (r > 0) { 658 } else if (r > 0) {
573 s->valid = 0; 659 s->valid = 0;
574 DMWARN("Snapshot is marked invalid."); 660 DMWARN("Snapshot is marked invalid.");
@@ -582,7 +668,7 @@ static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv)
582 if (register_snapshot(s)) { 668 if (register_snapshot(s)) {
583 r = -EINVAL; 669 r = -EINVAL;
584 ti->error = "Cannot register snapshot origin"; 670 ti->error = "Cannot register snapshot origin";
585 goto bad6; 671 goto bad_load_and_register;
586 } 672 }
587 673
588 ti->private = s; 674 ti->private = s;
@@ -590,6 +676,12 @@ static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv)
590 676
591 return 0; 677 return 0;
592 678
679 bad_load_and_register:
680 mempool_destroy(s->tracked_chunk_pool);
681
682 bad_tracked_chunk_pool:
683 mempool_destroy(s->pending_pool);
684
593 bad6: 685 bad6:
594 dm_kcopyd_client_destroy(s->kcopyd_client); 686 dm_kcopyd_client_destroy(s->kcopyd_client);
595 687
@@ -624,6 +716,9 @@ static void __free_exceptions(struct dm_snapshot *s)
624 716
625static void snapshot_dtr(struct dm_target *ti) 717static void snapshot_dtr(struct dm_target *ti)
626{ 718{
719#ifdef CONFIG_DM_DEBUG
720 int i;
721#endif
627 struct dm_snapshot *s = ti->private; 722 struct dm_snapshot *s = ti->private;
628 723
629 flush_workqueue(ksnapd); 724 flush_workqueue(ksnapd);
@@ -632,8 +727,17 @@ static void snapshot_dtr(struct dm_target *ti)
632 /* After this returns there can be no new kcopyd jobs. */ 727 /* After this returns there can be no new kcopyd jobs. */
633 unregister_snapshot(s); 728 unregister_snapshot(s);
634 729
730#ifdef CONFIG_DM_DEBUG
731 for (i = 0; i < DM_TRACKED_CHUNK_HASH_SIZE; i++)
732 BUG_ON(!hlist_empty(&s->tracked_chunk_hash[i]));
733#endif
734
735 mempool_destroy(s->tracked_chunk_pool);
736
635 __free_exceptions(s); 737 __free_exceptions(s);
636 738
739 mempool_destroy(s->pending_pool);
740
637 dm_put_device(ti, s->origin); 741 dm_put_device(ti, s->origin);
638 dm_put_device(ti, s->cow); 742 dm_put_device(ti, s->cow);
639 743
@@ -772,6 +876,13 @@ static void pending_complete(struct dm_snap_pending_exception *pe, int success)
772 } 876 }
773 877
774 /* 878 /*
879 * Check for conflicting reads. This is extremely improbable,
880 * so yield() is sufficient and there is no need for a wait queue.
881 */
882 while (__chunk_is_tracked(s, pe->e.old_chunk))
883 yield();
884
885 /*
775 * Add a proper exception, and remove the 886 * Add a proper exception, and remove the
776 * in-flight exception from the list. 887 * in-flight exception from the list.
777 */ 888 */
@@ -873,7 +984,7 @@ __find_pending_exception(struct dm_snapshot *s, struct bio *bio)
873 * to hold the lock while we do this. 984 * to hold the lock while we do this.
874 */ 985 */
875 up_write(&s->lock); 986 up_write(&s->lock);
876 pe = alloc_pending_exception(); 987 pe = alloc_pending_exception(s);
877 down_write(&s->lock); 988 down_write(&s->lock);
878 989
879 if (!s->valid) { 990 if (!s->valid) {
@@ -893,7 +1004,6 @@ __find_pending_exception(struct dm_snapshot *s, struct bio *bio)
893 bio_list_init(&pe->snapshot_bios); 1004 bio_list_init(&pe->snapshot_bios);
894 pe->primary_pe = NULL; 1005 pe->primary_pe = NULL;
895 atomic_set(&pe->ref_count, 0); 1006 atomic_set(&pe->ref_count, 0);
896 pe->snap = s;
897 pe->started = 0; 1007 pe->started = 0;
898 1008
899 if (s->store.prepare_exception(&s->store, &pe->e)) { 1009 if (s->store.prepare_exception(&s->store, &pe->e)) {
@@ -974,14 +1084,10 @@ static int snapshot_map(struct dm_target *ti, struct bio *bio,
974 start_copy(pe); 1084 start_copy(pe);
975 goto out; 1085 goto out;
976 } 1086 }
977 } else 1087 } else {
978 /*
979 * FIXME: this read path scares me because we
980 * always use the origin when we have a pending
981 * exception. However I can't think of a
982 * situation where this is wrong - ejt.
983 */
984 bio->bi_bdev = s->origin->bdev; 1088 bio->bi_bdev = s->origin->bdev;
1089 map_context->ptr = track_chunk(s, chunk);
1090 }
985 1091
986 out_unlock: 1092 out_unlock:
987 up_write(&s->lock); 1093 up_write(&s->lock);
@@ -989,6 +1095,18 @@ static int snapshot_map(struct dm_target *ti, struct bio *bio,
989 return r; 1095 return r;
990} 1096}
991 1097
1098static int snapshot_end_io(struct dm_target *ti, struct bio *bio,
1099 int error, union map_info *map_context)
1100{
1101 struct dm_snapshot *s = ti->private;
1102 struct dm_snap_tracked_chunk *c = map_context->ptr;
1103
1104 if (c)
1105 stop_tracking_chunk(s, c);
1106
1107 return 0;
1108}
1109
992static void snapshot_resume(struct dm_target *ti) 1110static void snapshot_resume(struct dm_target *ti)
993{ 1111{
994 struct dm_snapshot *s = ti->private; 1112 struct dm_snapshot *s = ti->private;
@@ -1266,6 +1384,7 @@ static struct target_type snapshot_target = {
1266 .ctr = snapshot_ctr, 1384 .ctr = snapshot_ctr,
1267 .dtr = snapshot_dtr, 1385 .dtr = snapshot_dtr,
1268 .map = snapshot_map, 1386 .map = snapshot_map,
1387 .end_io = snapshot_end_io,
1269 .resume = snapshot_resume, 1388 .resume = snapshot_resume,
1270 .status = snapshot_status, 1389 .status = snapshot_status,
1271}; 1390};
@@ -1306,9 +1425,9 @@ static int __init dm_snapshot_init(void)
1306 goto bad4; 1425 goto bad4;
1307 } 1426 }
1308 1427
1309 pending_pool = mempool_create_slab_pool(128, pending_cache); 1428 tracked_chunk_cache = KMEM_CACHE(dm_snap_tracked_chunk, 0);
1310 if (!pending_pool) { 1429 if (!tracked_chunk_cache) {
1311 DMERR("Couldn't create pending pool."); 1430 DMERR("Couldn't create cache to track chunks in use.");
1312 r = -ENOMEM; 1431 r = -ENOMEM;
1313 goto bad5; 1432 goto bad5;
1314 } 1433 }
@@ -1317,13 +1436,13 @@ static int __init dm_snapshot_init(void)
1317 if (!ksnapd) { 1436 if (!ksnapd) {
1318 DMERR("Failed to create ksnapd workqueue."); 1437 DMERR("Failed to create ksnapd workqueue.");
1319 r = -ENOMEM; 1438 r = -ENOMEM;
1320 goto bad6; 1439 goto bad_pending_pool;
1321 } 1440 }
1322 1441
1323 return 0; 1442 return 0;
1324 1443
1325 bad6: 1444 bad_pending_pool:
1326 mempool_destroy(pending_pool); 1445 kmem_cache_destroy(tracked_chunk_cache);
1327 bad5: 1446 bad5:
1328 kmem_cache_destroy(pending_cache); 1447 kmem_cache_destroy(pending_cache);
1329 bad4: 1448 bad4:
@@ -1352,9 +1471,9 @@ static void __exit dm_snapshot_exit(void)
1352 DMERR("origin unregister failed %d", r); 1471 DMERR("origin unregister failed %d", r);
1353 1472
1354 exit_origin_hash(); 1473 exit_origin_hash();
1355 mempool_destroy(pending_pool);
1356 kmem_cache_destroy(pending_cache); 1474 kmem_cache_destroy(pending_cache);
1357 kmem_cache_destroy(exception_cache); 1475 kmem_cache_destroy(exception_cache);
1476 kmem_cache_destroy(tracked_chunk_cache);
1358} 1477}
1359 1478
1360/* Module hooks */ 1479/* Module hooks */
diff --git a/drivers/md/dm-snap.h b/drivers/md/dm-snap.h
index 24f9fb73b982..292c15609ae3 100644
--- a/drivers/md/dm-snap.h
+++ b/drivers/md/dm-snap.h
@@ -130,6 +130,10 @@ struct exception_store {
130 void *context; 130 void *context;
131}; 131};
132 132
133#define DM_TRACKED_CHUNK_HASH_SIZE 16
134#define DM_TRACKED_CHUNK_HASH(x) ((unsigned long)(x) & \
135 (DM_TRACKED_CHUNK_HASH_SIZE - 1))
136
133struct dm_snapshot { 137struct dm_snapshot {
134 struct rw_semaphore lock; 138 struct rw_semaphore lock;
135 struct dm_target *ti; 139 struct dm_target *ti;
@@ -157,6 +161,8 @@ struct dm_snapshot {
157 /* The last percentage we notified */ 161 /* The last percentage we notified */
158 int last_percent; 162 int last_percent;
159 163
164 mempool_t *pending_pool;
165
160 struct exception_table pending; 166 struct exception_table pending;
161 struct exception_table complete; 167 struct exception_table complete;
162 168
@@ -174,6 +180,11 @@ struct dm_snapshot {
174 /* Queue of snapshot writes for ksnapd to flush */ 180 /* Queue of snapshot writes for ksnapd to flush */
175 struct bio_list queued_bios; 181 struct bio_list queued_bios;
176 struct work_struct queued_bios_work; 182 struct work_struct queued_bios_work;
183
184 /* Chunks with outstanding reads */
185 mempool_t *tracked_chunk_pool;
186 spinlock_t tracked_chunk_lock;
187 struct hlist_head tracked_chunk_hash[DM_TRACKED_CHUNK_HASH_SIZE];
177}; 188};
178 189
179/* 190/*
diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
index 94116eaf4709..798e468103b8 100644
--- a/drivers/md/dm-table.c
+++ b/drivers/md/dm-table.c
@@ -506,14 +506,13 @@ void dm_set_device_limits(struct dm_target *ti, struct block_device *bdev)
506 rs->max_sectors = 506 rs->max_sectors =
507 min_not_zero(rs->max_sectors, q->max_sectors); 507 min_not_zero(rs->max_sectors, q->max_sectors);
508 508
509 /* FIXME: Device-Mapper on top of RAID-0 breaks because DM 509 /*
510 * currently doesn't honor MD's merge_bvec_fn routine. 510 * Check if merge fn is supported.
511 * In this case, we'll force DM to use PAGE_SIZE or 511 * If not we'll force DM to use PAGE_SIZE or
512 * smaller I/O, just to be safe. A better fix is in the 512 * smaller I/O, just to be safe.
513 * works, but add this for the time being so it will at
514 * least operate correctly.
515 */ 513 */
516 if (q->merge_bvec_fn) 514
515 if (q->merge_bvec_fn && !ti->type->merge)
517 rs->max_sectors = 516 rs->max_sectors =
518 min_not_zero(rs->max_sectors, 517 min_not_zero(rs->max_sectors,
519 (unsigned int) (PAGE_SIZE >> 9)); 518 (unsigned int) (PAGE_SIZE >> 9));
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index 372369b1cc20..bca448e11878 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -37,8 +37,8 @@ static DEFINE_SPINLOCK(_minor_lock);
37struct dm_io { 37struct dm_io {
38 struct mapped_device *md; 38 struct mapped_device *md;
39 int error; 39 int error;
40 struct bio *bio;
41 atomic_t io_count; 40 atomic_t io_count;
41 struct bio *bio;
42 unsigned long start_time; 42 unsigned long start_time;
43}; 43};
44 44
@@ -829,6 +829,49 @@ static int __split_bio(struct mapped_device *md, struct bio *bio)
829 * CRUD END 829 * CRUD END
830 *---------------------------------------------------------------*/ 830 *---------------------------------------------------------------*/
831 831
832static int dm_merge_bvec(struct request_queue *q,
833 struct bvec_merge_data *bvm,
834 struct bio_vec *biovec)
835{
836 struct mapped_device *md = q->queuedata;
837 struct dm_table *map = dm_get_table(md);
838 struct dm_target *ti;
839 sector_t max_sectors;
840 int max_size;
841
842 if (unlikely(!map))
843 return 0;
844
845 ti = dm_table_find_target(map, bvm->bi_sector);
846
847 /*
848 * Find maximum amount of I/O that won't need splitting
849 */
850 max_sectors = min(max_io_len(md, bvm->bi_sector, ti),
851 (sector_t) BIO_MAX_SECTORS);
852 max_size = (max_sectors << SECTOR_SHIFT) - bvm->bi_size;
853 if (max_size < 0)
854 max_size = 0;
855
856 /*
857 * merge_bvec_fn() returns number of bytes
858 * it can accept at this offset
859 * max is precomputed maximal io size
860 */
861 if (max_size && ti->type->merge)
862 max_size = ti->type->merge(ti, bvm, biovec, max_size);
863
864 /*
865 * Always allow an entire first page
866 */
867 if (max_size <= biovec->bv_len && !(bvm->bi_size >> SECTOR_SHIFT))
868 max_size = biovec->bv_len;
869
870 dm_table_put(map);
871
872 return max_size;
873}
874
832/* 875/*
833 * The request function that just remaps the bio built up by 876 * The request function that just remaps the bio built up by
834 * dm_merge_bvec. 877 * dm_merge_bvec.
@@ -1032,6 +1075,7 @@ static struct mapped_device *alloc_dev(int minor)
1032 blk_queue_make_request(md->queue, dm_request); 1075 blk_queue_make_request(md->queue, dm_request);
1033 blk_queue_bounce_limit(md->queue, BLK_BOUNCE_ANY); 1076 blk_queue_bounce_limit(md->queue, BLK_BOUNCE_ANY);
1034 md->queue->unplug_fn = dm_unplug_all; 1077 md->queue->unplug_fn = dm_unplug_all;
1078 blk_queue_merge_bvec(md->queue, dm_merge_bvec);
1035 1079
1036 md->io_pool = mempool_create_slab_pool(MIN_IOS, _io_cache); 1080 md->io_pool = mempool_create_slab_pool(MIN_IOS, _io_cache);
1037 if (!md->io_pool) 1081 if (!md->io_pool)
diff --git a/drivers/md/dm.h b/drivers/md/dm.h
index 8c03b634e62e..1e59a0b0a78a 100644
--- a/drivers/md/dm.h
+++ b/drivers/md/dm.h
@@ -100,12 +100,6 @@ int dm_lock_for_deletion(struct mapped_device *md);
100 100
101void dm_kobject_uevent(struct mapped_device *md); 101void dm_kobject_uevent(struct mapped_device *md);
102 102
103/*
104 * Dirty log
105 */
106int dm_dirty_log_init(void);
107void dm_dirty_log_exit(void);
108
109int dm_kcopyd_init(void); 103int dm_kcopyd_init(void);
110void dm_kcopyd_exit(void); 104void dm_kcopyd_exit(void);
111 105