aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2012-06-02 20:39:40 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2012-06-02 20:39:40 -0400
commit912afc3616b94c5c4af584972c7f5903b53cf15a (patch)
tree3edd4596fe53ce5bc8224b1cccd5d34dcac040a7
parent4fc3acf2918fa158dc651a0c824a23944e956919 (diff)
parentcc8394d86f045b86ff303d3c9e4ce47d97148951 (diff)
Merge tag 'dm-3.5-changes-1' of git://git.kernel.org/pub/scm/linux/kernel/git/agk/linux-dm
Pull device-mapper updates from Alasdair G Kergon: "Improve multipath's retrying mechanism in some defined circumstances and provide a simple reserve/release mechanism for userspace tools to access thin provisioning metadata while the pool is in use." * tag 'dm-3.5-changes-1' of git://git.kernel.org/pub/scm/linux/kernel/git/agk/linux-dm: dm thin: provide userspace access to pool metadata dm thin: use slab mempools dm mpath: allow ioctls to trigger pg init dm mpath: delay retry of bypassed pg dm mpath: reduce size of struct multipath
-rw-r--r--Documentation/device-mapper/thin-provisioning.txt11
-rw-r--r--drivers/md/dm-mpath.c47
-rw-r--r--drivers/md/dm-thin-metadata.c136
-rw-r--r--drivers/md/dm-thin-metadata.h13
-rw-r--r--drivers/md/dm-thin.c203
-rw-r--r--drivers/md/persistent-data/dm-transaction-manager.c2
6 files changed, 322 insertions, 90 deletions
diff --git a/Documentation/device-mapper/thin-provisioning.txt b/Documentation/device-mapper/thin-provisioning.txt
index 3370bc4d7b98..f5cfc62b7ad3 100644
--- a/Documentation/device-mapper/thin-provisioning.txt
+++ b/Documentation/device-mapper/thin-provisioning.txt
@@ -287,6 +287,17 @@ iii) Messages
287 the current transaction id is when you change it with this 287 the current transaction id is when you change it with this
288 compare-and-swap message. 288 compare-and-swap message.
289 289
290 reserve_metadata_snap
291
292 Reserve a copy of the data mapping btree for use by userland.
293 This allows userland to inspect the mappings as they were when
294 this message was executed. Use the pool's status command to
295 get the root block associated with the metadata snapshot.
296
297 release_metadata_snap
298
299 Release a previously reserved copy of the data mapping btree.
300
290'thin' target 301'thin' target
291------------- 302-------------
292 303
diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c
index 754f38f8a692..638dae048b4f 100644
--- a/drivers/md/dm-mpath.c
+++ b/drivers/md/dm-mpath.c
@@ -18,6 +18,7 @@
18#include <linux/slab.h> 18#include <linux/slab.h>
19#include <linux/time.h> 19#include <linux/time.h>
20#include <linux/workqueue.h> 20#include <linux/workqueue.h>
21#include <linux/delay.h>
21#include <scsi/scsi_dh.h> 22#include <scsi/scsi_dh.h>
22#include <linux/atomic.h> 23#include <linux/atomic.h>
23 24
@@ -61,11 +62,11 @@ struct multipath {
61 struct list_head list; 62 struct list_head list;
62 struct dm_target *ti; 63 struct dm_target *ti;
63 64
64 spinlock_t lock;
65
66 const char *hw_handler_name; 65 const char *hw_handler_name;
67 char *hw_handler_params; 66 char *hw_handler_params;
68 67
68 spinlock_t lock;
69
69 unsigned nr_priority_groups; 70 unsigned nr_priority_groups;
70 struct list_head priority_groups; 71 struct list_head priority_groups;
71 72
@@ -81,16 +82,17 @@ struct multipath {
81 struct priority_group *next_pg; /* Switch to this PG if set */ 82 struct priority_group *next_pg; /* Switch to this PG if set */
82 unsigned repeat_count; /* I/Os left before calling PS again */ 83 unsigned repeat_count; /* I/Os left before calling PS again */
83 84
84 unsigned queue_io; /* Must we queue all I/O? */ 85 unsigned queue_io:1; /* Must we queue all I/O? */
85 unsigned queue_if_no_path; /* Queue I/O if last path fails? */ 86 unsigned queue_if_no_path:1; /* Queue I/O if last path fails? */
86 unsigned saved_queue_if_no_path;/* Saved state during suspension */ 87 unsigned saved_queue_if_no_path:1; /* Saved state during suspension */
88
87 unsigned pg_init_retries; /* Number of times to retry pg_init */ 89 unsigned pg_init_retries; /* Number of times to retry pg_init */
88 unsigned pg_init_count; /* Number of times pg_init called */ 90 unsigned pg_init_count; /* Number of times pg_init called */
89 unsigned pg_init_delay_msecs; /* Number of msecs before pg_init retry */ 91 unsigned pg_init_delay_msecs; /* Number of msecs before pg_init retry */
90 92
93 unsigned queue_size;
91 struct work_struct process_queued_ios; 94 struct work_struct process_queued_ios;
92 struct list_head queued_ios; 95 struct list_head queued_ios;
93 unsigned queue_size;
94 96
95 struct work_struct trigger_event; 97 struct work_struct trigger_event;
96 98
@@ -328,14 +330,18 @@ static void __choose_pgpath(struct multipath *m, size_t nr_bytes)
328 /* 330 /*
329 * Loop through priority groups until we find a valid path. 331 * Loop through priority groups until we find a valid path.
330 * First time we skip PGs marked 'bypassed'. 332 * First time we skip PGs marked 'bypassed'.
331 * Second time we only try the ones we skipped. 333 * Second time we only try the ones we skipped, but set
334 * pg_init_delay_retry so we do not hammer controllers.
332 */ 335 */
333 do { 336 do {
334 list_for_each_entry(pg, &m->priority_groups, list) { 337 list_for_each_entry(pg, &m->priority_groups, list) {
335 if (pg->bypassed == bypassed) 338 if (pg->bypassed == bypassed)
336 continue; 339 continue;
337 if (!__choose_path_in_pg(m, pg, nr_bytes)) 340 if (!__choose_path_in_pg(m, pg, nr_bytes)) {
341 if (!bypassed)
342 m->pg_init_delay_retry = 1;
338 return; 343 return;
344 }
339 } 345 }
340 } while (bypassed--); 346 } while (bypassed--);
341 347
@@ -481,9 +487,6 @@ static void process_queued_ios(struct work_struct *work)
481 487
482 spin_lock_irqsave(&m->lock, flags); 488 spin_lock_irqsave(&m->lock, flags);
483 489
484 if (!m->queue_size)
485 goto out;
486
487 if (!m->current_pgpath) 490 if (!m->current_pgpath)
488 __choose_pgpath(m, 0); 491 __choose_pgpath(m, 0);
489 492
@@ -496,7 +499,6 @@ static void process_queued_ios(struct work_struct *work)
496 if (m->pg_init_required && !m->pg_init_in_progress && pgpath) 499 if (m->pg_init_required && !m->pg_init_in_progress && pgpath)
497 __pg_init_all_paths(m); 500 __pg_init_all_paths(m);
498 501
499out:
500 spin_unlock_irqrestore(&m->lock, flags); 502 spin_unlock_irqrestore(&m->lock, flags);
501 if (!must_queue) 503 if (!must_queue)
502 dispatch_queued_ios(m); 504 dispatch_queued_ios(m);
@@ -1517,11 +1519,16 @@ out:
1517static int multipath_ioctl(struct dm_target *ti, unsigned int cmd, 1519static int multipath_ioctl(struct dm_target *ti, unsigned int cmd,
1518 unsigned long arg) 1520 unsigned long arg)
1519{ 1521{
1520 struct multipath *m = (struct multipath *) ti->private; 1522 struct multipath *m = ti->private;
1521 struct block_device *bdev = NULL; 1523 struct block_device *bdev;
1522 fmode_t mode = 0; 1524 fmode_t mode;
1523 unsigned long flags; 1525 unsigned long flags;
1524 int r = 0; 1526 int r;
1527
1528again:
1529 bdev = NULL;
1530 mode = 0;
1531 r = 0;
1525 1532
1526 spin_lock_irqsave(&m->lock, flags); 1533 spin_lock_irqsave(&m->lock, flags);
1527 1534
@@ -1546,6 +1553,12 @@ static int multipath_ioctl(struct dm_target *ti, unsigned int cmd,
1546 if (!r && ti->len != i_size_read(bdev->bd_inode) >> SECTOR_SHIFT) 1553 if (!r && ti->len != i_size_read(bdev->bd_inode) >> SECTOR_SHIFT)
1547 r = scsi_verify_blk_ioctl(NULL, cmd); 1554 r = scsi_verify_blk_ioctl(NULL, cmd);
1548 1555
1556 if (r == -EAGAIN && !fatal_signal_pending(current)) {
1557 queue_work(kmultipathd, &m->process_queued_ios);
1558 msleep(10);
1559 goto again;
1560 }
1561
1549 return r ? : __blkdev_driver_ioctl(bdev, mode, cmd, arg); 1562 return r ? : __blkdev_driver_ioctl(bdev, mode, cmd, arg);
1550} 1563}
1551 1564
@@ -1643,7 +1656,7 @@ out:
1643 *---------------------------------------------------------------*/ 1656 *---------------------------------------------------------------*/
1644static struct target_type multipath_target = { 1657static struct target_type multipath_target = {
1645 .name = "multipath", 1658 .name = "multipath",
1646 .version = {1, 3, 0}, 1659 .version = {1, 4, 0},
1647 .module = THIS_MODULE, 1660 .module = THIS_MODULE,
1648 .ctr = multipath_ctr, 1661 .ctr = multipath_ctr,
1649 .dtr = multipath_dtr, 1662 .dtr = multipath_dtr,
diff --git a/drivers/md/dm-thin-metadata.c b/drivers/md/dm-thin-metadata.c
index 737d38865b69..3e2907f0bc46 100644
--- a/drivers/md/dm-thin-metadata.c
+++ b/drivers/md/dm-thin-metadata.c
@@ -1082,12 +1082,89 @@ int dm_pool_get_metadata_transaction_id(struct dm_pool_metadata *pmd,
1082 return 0; 1082 return 0;
1083} 1083}
1084 1084
1085static int __get_held_metadata_root(struct dm_pool_metadata *pmd, 1085static int __reserve_metadata_snap(struct dm_pool_metadata *pmd)
1086 dm_block_t *result) 1086{
1087 int r, inc;
1088 struct thin_disk_superblock *disk_super;
1089 struct dm_block *copy, *sblock;
1090 dm_block_t held_root;
1091
1092 /*
1093 * Copy the superblock.
1094 */
1095 dm_sm_inc_block(pmd->metadata_sm, THIN_SUPERBLOCK_LOCATION);
1096 r = dm_tm_shadow_block(pmd->tm, THIN_SUPERBLOCK_LOCATION,
1097 &sb_validator, &copy, &inc);
1098 if (r)
1099 return r;
1100
1101 BUG_ON(!inc);
1102
1103 held_root = dm_block_location(copy);
1104 disk_super = dm_block_data(copy);
1105
1106 if (le64_to_cpu(disk_super->held_root)) {
1107 DMWARN("Pool metadata snapshot already exists: release this before taking another.");
1108
1109 dm_tm_dec(pmd->tm, held_root);
1110 dm_tm_unlock(pmd->tm, copy);
1111 pmd->need_commit = 1;
1112
1113 return -EBUSY;
1114 }
1115
1116 /*
1117 * Wipe the spacemap since we're not publishing this.
1118 */
1119 memset(&disk_super->data_space_map_root, 0,
1120 sizeof(disk_super->data_space_map_root));
1121 memset(&disk_super->metadata_space_map_root, 0,
1122 sizeof(disk_super->metadata_space_map_root));
1123
1124 /*
1125 * Increment the data structures that need to be preserved.
1126 */
1127 dm_tm_inc(pmd->tm, le64_to_cpu(disk_super->data_mapping_root));
1128 dm_tm_inc(pmd->tm, le64_to_cpu(disk_super->device_details_root));
1129 dm_tm_unlock(pmd->tm, copy);
1130
1131 /*
1132 * Write the held root into the superblock.
1133 */
1134 r = dm_bm_write_lock(pmd->bm, THIN_SUPERBLOCK_LOCATION,
1135 &sb_validator, &sblock);
1136 if (r) {
1137 dm_tm_dec(pmd->tm, held_root);
1138 pmd->need_commit = 1;
1139 return r;
1140 }
1141
1142 disk_super = dm_block_data(sblock);
1143 disk_super->held_root = cpu_to_le64(held_root);
1144 dm_bm_unlock(sblock);
1145
1146 pmd->need_commit = 1;
1147
1148 return 0;
1149}
1150
1151int dm_pool_reserve_metadata_snap(struct dm_pool_metadata *pmd)
1152{
1153 int r;
1154
1155 down_write(&pmd->root_lock);
1156 r = __reserve_metadata_snap(pmd);
1157 up_write(&pmd->root_lock);
1158
1159 return r;
1160}
1161
1162static int __release_metadata_snap(struct dm_pool_metadata *pmd)
1087{ 1163{
1088 int r; 1164 int r;
1089 struct thin_disk_superblock *disk_super; 1165 struct thin_disk_superblock *disk_super;
1090 struct dm_block *sblock; 1166 struct dm_block *sblock, *copy;
1167 dm_block_t held_root;
1091 1168
1092 r = dm_bm_write_lock(pmd->bm, THIN_SUPERBLOCK_LOCATION, 1169 r = dm_bm_write_lock(pmd->bm, THIN_SUPERBLOCK_LOCATION,
1093 &sb_validator, &sblock); 1170 &sb_validator, &sblock);
@@ -1095,18 +1172,65 @@ static int __get_held_metadata_root(struct dm_pool_metadata *pmd,
1095 return r; 1172 return r;
1096 1173
1097 disk_super = dm_block_data(sblock); 1174 disk_super = dm_block_data(sblock);
1175 held_root = le64_to_cpu(disk_super->held_root);
1176 disk_super->held_root = cpu_to_le64(0);
1177 pmd->need_commit = 1;
1178
1179 dm_bm_unlock(sblock);
1180
1181 if (!held_root) {
1182 DMWARN("No pool metadata snapshot found: nothing to release.");
1183 return -EINVAL;
1184 }
1185
1186 r = dm_tm_read_lock(pmd->tm, held_root, &sb_validator, &copy);
1187 if (r)
1188 return r;
1189
1190 disk_super = dm_block_data(copy);
1191 dm_sm_dec_block(pmd->metadata_sm, le64_to_cpu(disk_super->data_mapping_root));
1192 dm_sm_dec_block(pmd->metadata_sm, le64_to_cpu(disk_super->device_details_root));
1193 dm_sm_dec_block(pmd->metadata_sm, held_root);
1194
1195 return dm_tm_unlock(pmd->tm, copy);
1196}
1197
1198int dm_pool_release_metadata_snap(struct dm_pool_metadata *pmd)
1199{
1200 int r;
1201
1202 down_write(&pmd->root_lock);
1203 r = __release_metadata_snap(pmd);
1204 up_write(&pmd->root_lock);
1205
1206 return r;
1207}
1208
1209static int __get_metadata_snap(struct dm_pool_metadata *pmd,
1210 dm_block_t *result)
1211{
1212 int r;
1213 struct thin_disk_superblock *disk_super;
1214 struct dm_block *sblock;
1215
1216 r = dm_bm_read_lock(pmd->bm, THIN_SUPERBLOCK_LOCATION,
1217 &sb_validator, &sblock);
1218 if (r)
1219 return r;
1220
1221 disk_super = dm_block_data(sblock);
1098 *result = le64_to_cpu(disk_super->held_root); 1222 *result = le64_to_cpu(disk_super->held_root);
1099 1223
1100 return dm_bm_unlock(sblock); 1224 return dm_bm_unlock(sblock);
1101} 1225}
1102 1226
1103int dm_pool_get_held_metadata_root(struct dm_pool_metadata *pmd, 1227int dm_pool_get_metadata_snap(struct dm_pool_metadata *pmd,
1104 dm_block_t *result) 1228 dm_block_t *result)
1105{ 1229{
1106 int r; 1230 int r;
1107 1231
1108 down_read(&pmd->root_lock); 1232 down_read(&pmd->root_lock);
1109 r = __get_held_metadata_root(pmd, result); 1233 r = __get_metadata_snap(pmd, result);
1110 up_read(&pmd->root_lock); 1234 up_read(&pmd->root_lock);
1111 1235
1112 return r; 1236 return r;
diff --git a/drivers/md/dm-thin-metadata.h b/drivers/md/dm-thin-metadata.h
index ed4725e67c96..b88918ccdaf6 100644
--- a/drivers/md/dm-thin-metadata.h
+++ b/drivers/md/dm-thin-metadata.h
@@ -90,11 +90,18 @@ int dm_pool_get_metadata_transaction_id(struct dm_pool_metadata *pmd,
90 90
91/* 91/*
92 * Hold/get root for userspace transaction. 92 * Hold/get root for userspace transaction.
93 *
94 * The metadata snapshot is a copy of the current superblock (minus the
95 * space maps). Userland can access the data structures for READ
96 * operations only. A small performance hit is incurred by providing this
97 * copy of the metadata to userland due to extra copy-on-write operations
98 * on the metadata nodes. Release this as soon as you finish with it.
93 */ 99 */
94int dm_pool_hold_metadata_root(struct dm_pool_metadata *pmd); 100int dm_pool_reserve_metadata_snap(struct dm_pool_metadata *pmd);
101int dm_pool_release_metadata_snap(struct dm_pool_metadata *pmd);
95 102
96int dm_pool_get_held_metadata_root(struct dm_pool_metadata *pmd, 103int dm_pool_get_metadata_snap(struct dm_pool_metadata *pmd,
97 dm_block_t *result); 104 dm_block_t *result);
98 105
99/* 106/*
100 * Actions on a single virtual device. 107 * Actions on a single virtual device.
diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c
index eb3d138ff55a..37fdaf81bd1f 100644
--- a/drivers/md/dm-thin.c
+++ b/drivers/md/dm-thin.c
@@ -111,7 +111,7 @@ struct cell_key {
111 dm_block_t block; 111 dm_block_t block;
112}; 112};
113 113
114struct cell { 114struct dm_bio_prison_cell {
115 struct hlist_node list; 115 struct hlist_node list;
116 struct bio_prison *prison; 116 struct bio_prison *prison;
117 struct cell_key key; 117 struct cell_key key;
@@ -141,6 +141,8 @@ static uint32_t calc_nr_buckets(unsigned nr_cells)
141 return n; 141 return n;
142} 142}
143 143
144static struct kmem_cache *_cell_cache;
145
144/* 146/*
145 * @nr_cells should be the number of cells you want in use _concurrently_. 147 * @nr_cells should be the number of cells you want in use _concurrently_.
146 * Don't confuse it with the number of distinct keys. 148 * Don't confuse it with the number of distinct keys.
@@ -157,8 +159,7 @@ static struct bio_prison *prison_create(unsigned nr_cells)
157 return NULL; 159 return NULL;
158 160
159 spin_lock_init(&prison->lock); 161 spin_lock_init(&prison->lock);
160 prison->cell_pool = mempool_create_kmalloc_pool(nr_cells, 162 prison->cell_pool = mempool_create_slab_pool(nr_cells, _cell_cache);
161 sizeof(struct cell));
162 if (!prison->cell_pool) { 163 if (!prison->cell_pool) {
163 kfree(prison); 164 kfree(prison);
164 return NULL; 165 return NULL;
@@ -194,10 +195,10 @@ static int keys_equal(struct cell_key *lhs, struct cell_key *rhs)
194 (lhs->block == rhs->block); 195 (lhs->block == rhs->block);
195} 196}
196 197
197static struct cell *__search_bucket(struct hlist_head *bucket, 198static struct dm_bio_prison_cell *__search_bucket(struct hlist_head *bucket,
198 struct cell_key *key) 199 struct cell_key *key)
199{ 200{
200 struct cell *cell; 201 struct dm_bio_prison_cell *cell;
201 struct hlist_node *tmp; 202 struct hlist_node *tmp;
202 203
203 hlist_for_each_entry(cell, tmp, bucket, list) 204 hlist_for_each_entry(cell, tmp, bucket, list)
@@ -214,12 +215,12 @@ static struct cell *__search_bucket(struct hlist_head *bucket,
214 * Returns 1 if the cell was already held, 0 if @inmate is the new holder. 215 * Returns 1 if the cell was already held, 0 if @inmate is the new holder.
215 */ 216 */
216static int bio_detain(struct bio_prison *prison, struct cell_key *key, 217static int bio_detain(struct bio_prison *prison, struct cell_key *key,
217 struct bio *inmate, struct cell **ref) 218 struct bio *inmate, struct dm_bio_prison_cell **ref)
218{ 219{
219 int r = 1; 220 int r = 1;
220 unsigned long flags; 221 unsigned long flags;
221 uint32_t hash = hash_key(prison, key); 222 uint32_t hash = hash_key(prison, key);
222 struct cell *cell, *cell2; 223 struct dm_bio_prison_cell *cell, *cell2;
223 224
224 BUG_ON(hash > prison->nr_buckets); 225 BUG_ON(hash > prison->nr_buckets);
225 226
@@ -273,7 +274,7 @@ out:
273/* 274/*
274 * @inmates must have been initialised prior to this call 275 * @inmates must have been initialised prior to this call
275 */ 276 */
276static void __cell_release(struct cell *cell, struct bio_list *inmates) 277static void __cell_release(struct dm_bio_prison_cell *cell, struct bio_list *inmates)
277{ 278{
278 struct bio_prison *prison = cell->prison; 279 struct bio_prison *prison = cell->prison;
279 280
@@ -287,7 +288,7 @@ static void __cell_release(struct cell *cell, struct bio_list *inmates)
287 mempool_free(cell, prison->cell_pool); 288 mempool_free(cell, prison->cell_pool);
288} 289}
289 290
290static void cell_release(struct cell *cell, struct bio_list *bios) 291static void cell_release(struct dm_bio_prison_cell *cell, struct bio_list *bios)
291{ 292{
292 unsigned long flags; 293 unsigned long flags;
293 struct bio_prison *prison = cell->prison; 294 struct bio_prison *prison = cell->prison;
@@ -303,7 +304,7 @@ static void cell_release(struct cell *cell, struct bio_list *bios)
303 * bio may be in the cell. This function releases the cell, and also does 304 * bio may be in the cell. This function releases the cell, and also does
304 * a sanity check. 305 * a sanity check.
305 */ 306 */
306static void __cell_release_singleton(struct cell *cell, struct bio *bio) 307static void __cell_release_singleton(struct dm_bio_prison_cell *cell, struct bio *bio)
307{ 308{
308 BUG_ON(cell->holder != bio); 309 BUG_ON(cell->holder != bio);
309 BUG_ON(!bio_list_empty(&cell->bios)); 310 BUG_ON(!bio_list_empty(&cell->bios));
@@ -311,7 +312,7 @@ static void __cell_release_singleton(struct cell *cell, struct bio *bio)
311 __cell_release(cell, NULL); 312 __cell_release(cell, NULL);
312} 313}
313 314
314static void cell_release_singleton(struct cell *cell, struct bio *bio) 315static void cell_release_singleton(struct dm_bio_prison_cell *cell, struct bio *bio)
315{ 316{
316 unsigned long flags; 317 unsigned long flags;
317 struct bio_prison *prison = cell->prison; 318 struct bio_prison *prison = cell->prison;
@@ -324,7 +325,8 @@ static void cell_release_singleton(struct cell *cell, struct bio *bio)
324/* 325/*
325 * Sometimes we don't want the holder, just the additional bios. 326 * Sometimes we don't want the holder, just the additional bios.
326 */ 327 */
327static void __cell_release_no_holder(struct cell *cell, struct bio_list *inmates) 328static void __cell_release_no_holder(struct dm_bio_prison_cell *cell,
329 struct bio_list *inmates)
328{ 330{
329 struct bio_prison *prison = cell->prison; 331 struct bio_prison *prison = cell->prison;
330 332
@@ -334,7 +336,8 @@ static void __cell_release_no_holder(struct cell *cell, struct bio_list *inmates
334 mempool_free(cell, prison->cell_pool); 336 mempool_free(cell, prison->cell_pool);
335} 337}
336 338
337static void cell_release_no_holder(struct cell *cell, struct bio_list *inmates) 339static void cell_release_no_holder(struct dm_bio_prison_cell *cell,
340 struct bio_list *inmates)
338{ 341{
339 unsigned long flags; 342 unsigned long flags;
340 struct bio_prison *prison = cell->prison; 343 struct bio_prison *prison = cell->prison;
@@ -344,7 +347,7 @@ static void cell_release_no_holder(struct cell *cell, struct bio_list *inmates)
344 spin_unlock_irqrestore(&prison->lock, flags); 347 spin_unlock_irqrestore(&prison->lock, flags);
345} 348}
346 349
347static void cell_error(struct cell *cell) 350static void cell_error(struct dm_bio_prison_cell *cell)
348{ 351{
349 struct bio_prison *prison = cell->prison; 352 struct bio_prison *prison = cell->prison;
350 struct bio_list bios; 353 struct bio_list bios;
@@ -491,7 +494,7 @@ static void build_virtual_key(struct dm_thin_device *td, dm_block_t b,
491 * also provides the interface for creating and destroying internal 494 * also provides the interface for creating and destroying internal
492 * devices. 495 * devices.
493 */ 496 */
494struct new_mapping; 497struct dm_thin_new_mapping;
495 498
496struct pool_features { 499struct pool_features {
497 unsigned zero_new_blocks:1; 500 unsigned zero_new_blocks:1;
@@ -537,7 +540,7 @@ struct pool {
537 struct deferred_set shared_read_ds; 540 struct deferred_set shared_read_ds;
538 struct deferred_set all_io_ds; 541 struct deferred_set all_io_ds;
539 542
540 struct new_mapping *next_mapping; 543 struct dm_thin_new_mapping *next_mapping;
541 mempool_t *mapping_pool; 544 mempool_t *mapping_pool;
542 mempool_t *endio_hook_pool; 545 mempool_t *endio_hook_pool;
543}; 546};
@@ -630,11 +633,11 @@ static struct pool *__pool_table_lookup_metadata_dev(struct block_device *md_dev
630 633
631/*----------------------------------------------------------------*/ 634/*----------------------------------------------------------------*/
632 635
633struct endio_hook { 636struct dm_thin_endio_hook {
634 struct thin_c *tc; 637 struct thin_c *tc;
635 struct deferred_entry *shared_read_entry; 638 struct deferred_entry *shared_read_entry;
636 struct deferred_entry *all_io_entry; 639 struct deferred_entry *all_io_entry;
637 struct new_mapping *overwrite_mapping; 640 struct dm_thin_new_mapping *overwrite_mapping;
638}; 641};
639 642
640static void __requeue_bio_list(struct thin_c *tc, struct bio_list *master) 643static void __requeue_bio_list(struct thin_c *tc, struct bio_list *master)
@@ -647,7 +650,8 @@ static void __requeue_bio_list(struct thin_c *tc, struct bio_list *master)
647 bio_list_init(master); 650 bio_list_init(master);
648 651
649 while ((bio = bio_list_pop(&bios))) { 652 while ((bio = bio_list_pop(&bios))) {
650 struct endio_hook *h = dm_get_mapinfo(bio)->ptr; 653 struct dm_thin_endio_hook *h = dm_get_mapinfo(bio)->ptr;
654
651 if (h->tc == tc) 655 if (h->tc == tc)
652 bio_endio(bio, DM_ENDIO_REQUEUE); 656 bio_endio(bio, DM_ENDIO_REQUEUE);
653 else 657 else
@@ -736,7 +740,7 @@ static void wake_worker(struct pool *pool)
736/* 740/*
737 * Bio endio functions. 741 * Bio endio functions.
738 */ 742 */
739struct new_mapping { 743struct dm_thin_new_mapping {
740 struct list_head list; 744 struct list_head list;
741 745
742 unsigned quiesced:1; 746 unsigned quiesced:1;
@@ -746,7 +750,7 @@ struct new_mapping {
746 struct thin_c *tc; 750 struct thin_c *tc;
747 dm_block_t virt_block; 751 dm_block_t virt_block;
748 dm_block_t data_block; 752 dm_block_t data_block;
749 struct cell *cell, *cell2; 753 struct dm_bio_prison_cell *cell, *cell2;
750 int err; 754 int err;
751 755
752 /* 756 /*
@@ -759,7 +763,7 @@ struct new_mapping {
759 bio_end_io_t *saved_bi_end_io; 763 bio_end_io_t *saved_bi_end_io;
760}; 764};
761 765
762static void __maybe_add_mapping(struct new_mapping *m) 766static void __maybe_add_mapping(struct dm_thin_new_mapping *m)
763{ 767{
764 struct pool *pool = m->tc->pool; 768 struct pool *pool = m->tc->pool;
765 769
@@ -772,7 +776,7 @@ static void __maybe_add_mapping(struct new_mapping *m)
772static void copy_complete(int read_err, unsigned long write_err, void *context) 776static void copy_complete(int read_err, unsigned long write_err, void *context)
773{ 777{
774 unsigned long flags; 778 unsigned long flags;
775 struct new_mapping *m = context; 779 struct dm_thin_new_mapping *m = context;
776 struct pool *pool = m->tc->pool; 780 struct pool *pool = m->tc->pool;
777 781
778 m->err = read_err || write_err ? -EIO : 0; 782 m->err = read_err || write_err ? -EIO : 0;
@@ -786,8 +790,8 @@ static void copy_complete(int read_err, unsigned long write_err, void *context)
786static void overwrite_endio(struct bio *bio, int err) 790static void overwrite_endio(struct bio *bio, int err)
787{ 791{
788 unsigned long flags; 792 unsigned long flags;
789 struct endio_hook *h = dm_get_mapinfo(bio)->ptr; 793 struct dm_thin_endio_hook *h = dm_get_mapinfo(bio)->ptr;
790 struct new_mapping *m = h->overwrite_mapping; 794 struct dm_thin_new_mapping *m = h->overwrite_mapping;
791 struct pool *pool = m->tc->pool; 795 struct pool *pool = m->tc->pool;
792 796
793 m->err = err; 797 m->err = err;
@@ -811,7 +815,7 @@ static void overwrite_endio(struct bio *bio, int err)
811/* 815/*
812 * This sends the bios in the cell back to the deferred_bios list. 816 * This sends the bios in the cell back to the deferred_bios list.
813 */ 817 */
814static void cell_defer(struct thin_c *tc, struct cell *cell, 818static void cell_defer(struct thin_c *tc, struct dm_bio_prison_cell *cell,
815 dm_block_t data_block) 819 dm_block_t data_block)
816{ 820{
817 struct pool *pool = tc->pool; 821 struct pool *pool = tc->pool;
@@ -828,7 +832,7 @@ static void cell_defer(struct thin_c *tc, struct cell *cell,
828 * Same as cell_defer above, except it omits one particular detainee, 832 * Same as cell_defer above, except it omits one particular detainee,
829 * a write bio that covers the block and has already been processed. 833 * a write bio that covers the block and has already been processed.
830 */ 834 */
831static void cell_defer_except(struct thin_c *tc, struct cell *cell) 835static void cell_defer_except(struct thin_c *tc, struct dm_bio_prison_cell *cell)
832{ 836{
833 struct bio_list bios; 837 struct bio_list bios;
834 struct pool *pool = tc->pool; 838 struct pool *pool = tc->pool;
@@ -843,7 +847,7 @@ static void cell_defer_except(struct thin_c *tc, struct cell *cell)
843 wake_worker(pool); 847 wake_worker(pool);
844} 848}
845 849
846static void process_prepared_mapping(struct new_mapping *m) 850static void process_prepared_mapping(struct dm_thin_new_mapping *m)
847{ 851{
848 struct thin_c *tc = m->tc; 852 struct thin_c *tc = m->tc;
849 struct bio *bio; 853 struct bio *bio;
@@ -886,7 +890,7 @@ static void process_prepared_mapping(struct new_mapping *m)
886 mempool_free(m, tc->pool->mapping_pool); 890 mempool_free(m, tc->pool->mapping_pool);
887} 891}
888 892
889static void process_prepared_discard(struct new_mapping *m) 893static void process_prepared_discard(struct dm_thin_new_mapping *m)
890{ 894{
891 int r; 895 int r;
892 struct thin_c *tc = m->tc; 896 struct thin_c *tc = m->tc;
@@ -909,11 +913,11 @@ static void process_prepared_discard(struct new_mapping *m)
909} 913}
910 914
911static void process_prepared(struct pool *pool, struct list_head *head, 915static void process_prepared(struct pool *pool, struct list_head *head,
912 void (*fn)(struct new_mapping *)) 916 void (*fn)(struct dm_thin_new_mapping *))
913{ 917{
914 unsigned long flags; 918 unsigned long flags;
915 struct list_head maps; 919 struct list_head maps;
916 struct new_mapping *m, *tmp; 920 struct dm_thin_new_mapping *m, *tmp;
917 921
918 INIT_LIST_HEAD(&maps); 922 INIT_LIST_HEAD(&maps);
919 spin_lock_irqsave(&pool->lock, flags); 923 spin_lock_irqsave(&pool->lock, flags);
@@ -957,9 +961,9 @@ static int ensure_next_mapping(struct pool *pool)
957 return pool->next_mapping ? 0 : -ENOMEM; 961 return pool->next_mapping ? 0 : -ENOMEM;
958} 962}
959 963
960static struct new_mapping *get_next_mapping(struct pool *pool) 964static struct dm_thin_new_mapping *get_next_mapping(struct pool *pool)
961{ 965{
962 struct new_mapping *r = pool->next_mapping; 966 struct dm_thin_new_mapping *r = pool->next_mapping;
963 967
964 BUG_ON(!pool->next_mapping); 968 BUG_ON(!pool->next_mapping);
965 969
@@ -971,11 +975,11 @@ static struct new_mapping *get_next_mapping(struct pool *pool)
971static void schedule_copy(struct thin_c *tc, dm_block_t virt_block, 975static void schedule_copy(struct thin_c *tc, dm_block_t virt_block,
972 struct dm_dev *origin, dm_block_t data_origin, 976 struct dm_dev *origin, dm_block_t data_origin,
973 dm_block_t data_dest, 977 dm_block_t data_dest,
974 struct cell *cell, struct bio *bio) 978 struct dm_bio_prison_cell *cell, struct bio *bio)
975{ 979{
976 int r; 980 int r;
977 struct pool *pool = tc->pool; 981 struct pool *pool = tc->pool;
978 struct new_mapping *m = get_next_mapping(pool); 982 struct dm_thin_new_mapping *m = get_next_mapping(pool);
979 983
980 INIT_LIST_HEAD(&m->list); 984 INIT_LIST_HEAD(&m->list);
981 m->quiesced = 0; 985 m->quiesced = 0;
@@ -997,7 +1001,8 @@ static void schedule_copy(struct thin_c *tc, dm_block_t virt_block,
997 * bio immediately. Otherwise we use kcopyd to clone the data first. 1001 * bio immediately. Otherwise we use kcopyd to clone the data first.
998 */ 1002 */
999 if (io_overwrites_block(pool, bio)) { 1003 if (io_overwrites_block(pool, bio)) {
1000 struct endio_hook *h = dm_get_mapinfo(bio)->ptr; 1004 struct dm_thin_endio_hook *h = dm_get_mapinfo(bio)->ptr;
1005
1001 h->overwrite_mapping = m; 1006 h->overwrite_mapping = m;
1002 m->bio = bio; 1007 m->bio = bio;
1003 save_and_set_endio(bio, &m->saved_bi_end_io, overwrite_endio); 1008 save_and_set_endio(bio, &m->saved_bi_end_io, overwrite_endio);
@@ -1025,7 +1030,7 @@ static void schedule_copy(struct thin_c *tc, dm_block_t virt_block,
1025 1030
1026static void schedule_internal_copy(struct thin_c *tc, dm_block_t virt_block, 1031static void schedule_internal_copy(struct thin_c *tc, dm_block_t virt_block,
1027 dm_block_t data_origin, dm_block_t data_dest, 1032 dm_block_t data_origin, dm_block_t data_dest,
1028 struct cell *cell, struct bio *bio) 1033 struct dm_bio_prison_cell *cell, struct bio *bio)
1029{ 1034{
1030 schedule_copy(tc, virt_block, tc->pool_dev, 1035 schedule_copy(tc, virt_block, tc->pool_dev,
1031 data_origin, data_dest, cell, bio); 1036 data_origin, data_dest, cell, bio);
@@ -1033,18 +1038,18 @@ static void schedule_internal_copy(struct thin_c *tc, dm_block_t virt_block,
1033 1038
1034static void schedule_external_copy(struct thin_c *tc, dm_block_t virt_block, 1039static void schedule_external_copy(struct thin_c *tc, dm_block_t virt_block,
1035 dm_block_t data_dest, 1040 dm_block_t data_dest,
1036 struct cell *cell, struct bio *bio) 1041 struct dm_bio_prison_cell *cell, struct bio *bio)
1037{ 1042{
1038 schedule_copy(tc, virt_block, tc->origin_dev, 1043 schedule_copy(tc, virt_block, tc->origin_dev,
1039 virt_block, data_dest, cell, bio); 1044 virt_block, data_dest, cell, bio);
1040} 1045}
1041 1046
1042static void schedule_zero(struct thin_c *tc, dm_block_t virt_block, 1047static void schedule_zero(struct thin_c *tc, dm_block_t virt_block,
1043 dm_block_t data_block, struct cell *cell, 1048 dm_block_t data_block, struct dm_bio_prison_cell *cell,
1044 struct bio *bio) 1049 struct bio *bio)
1045{ 1050{
1046 struct pool *pool = tc->pool; 1051 struct pool *pool = tc->pool;
1047 struct new_mapping *m = get_next_mapping(pool); 1052 struct dm_thin_new_mapping *m = get_next_mapping(pool);
1048 1053
1049 INIT_LIST_HEAD(&m->list); 1054 INIT_LIST_HEAD(&m->list);
1050 m->quiesced = 1; 1055 m->quiesced = 1;
@@ -1065,12 +1070,12 @@ static void schedule_zero(struct thin_c *tc, dm_block_t virt_block,
1065 process_prepared_mapping(m); 1070 process_prepared_mapping(m);
1066 1071
1067 else if (io_overwrites_block(pool, bio)) { 1072 else if (io_overwrites_block(pool, bio)) {
1068 struct endio_hook *h = dm_get_mapinfo(bio)->ptr; 1073 struct dm_thin_endio_hook *h = dm_get_mapinfo(bio)->ptr;
1074
1069 h->overwrite_mapping = m; 1075 h->overwrite_mapping = m;
1070 m->bio = bio; 1076 m->bio = bio;
1071 save_and_set_endio(bio, &m->saved_bi_end_io, overwrite_endio); 1077 save_and_set_endio(bio, &m->saved_bi_end_io, overwrite_endio);
1072 remap_and_issue(tc, bio, data_block); 1078 remap_and_issue(tc, bio, data_block);
1073
1074 } else { 1079 } else {
1075 int r; 1080 int r;
1076 struct dm_io_region to; 1081 struct dm_io_region to;
@@ -1155,7 +1160,7 @@ static int alloc_data_block(struct thin_c *tc, dm_block_t *result)
1155 */ 1160 */
1156static void retry_on_resume(struct bio *bio) 1161static void retry_on_resume(struct bio *bio)
1157{ 1162{
1158 struct endio_hook *h = dm_get_mapinfo(bio)->ptr; 1163 struct dm_thin_endio_hook *h = dm_get_mapinfo(bio)->ptr;
1159 struct thin_c *tc = h->tc; 1164 struct thin_c *tc = h->tc;
1160 struct pool *pool = tc->pool; 1165 struct pool *pool = tc->pool;
1161 unsigned long flags; 1166 unsigned long flags;
@@ -1165,7 +1170,7 @@ static void retry_on_resume(struct bio *bio)
1165 spin_unlock_irqrestore(&pool->lock, flags); 1170 spin_unlock_irqrestore(&pool->lock, flags);
1166} 1171}
1167 1172
1168static void no_space(struct cell *cell) 1173static void no_space(struct dm_bio_prison_cell *cell)
1169{ 1174{
1170 struct bio *bio; 1175 struct bio *bio;
1171 struct bio_list bios; 1176 struct bio_list bios;
@@ -1182,11 +1187,11 @@ static void process_discard(struct thin_c *tc, struct bio *bio)
1182 int r; 1187 int r;
1183 unsigned long flags; 1188 unsigned long flags;
1184 struct pool *pool = tc->pool; 1189 struct pool *pool = tc->pool;
1185 struct cell *cell, *cell2; 1190 struct dm_bio_prison_cell *cell, *cell2;
1186 struct cell_key key, key2; 1191 struct cell_key key, key2;
1187 dm_block_t block = get_bio_block(tc, bio); 1192 dm_block_t block = get_bio_block(tc, bio);
1188 struct dm_thin_lookup_result lookup_result; 1193 struct dm_thin_lookup_result lookup_result;
1189 struct new_mapping *m; 1194 struct dm_thin_new_mapping *m;
1190 1195
1191 build_virtual_key(tc->td, block, &key); 1196 build_virtual_key(tc->td, block, &key);
1192 if (bio_detain(tc->pool->prison, &key, bio, &cell)) 1197 if (bio_detain(tc->pool->prison, &key, bio, &cell))
@@ -1263,7 +1268,7 @@ static void process_discard(struct thin_c *tc, struct bio *bio)
1263static void break_sharing(struct thin_c *tc, struct bio *bio, dm_block_t block, 1268static void break_sharing(struct thin_c *tc, struct bio *bio, dm_block_t block,
1264 struct cell_key *key, 1269 struct cell_key *key,
1265 struct dm_thin_lookup_result *lookup_result, 1270 struct dm_thin_lookup_result *lookup_result,
1266 struct cell *cell) 1271 struct dm_bio_prison_cell *cell)
1267{ 1272{
1268 int r; 1273 int r;
1269 dm_block_t data_block; 1274 dm_block_t data_block;
@@ -1290,7 +1295,7 @@ static void process_shared_bio(struct thin_c *tc, struct bio *bio,
1290 dm_block_t block, 1295 dm_block_t block,
1291 struct dm_thin_lookup_result *lookup_result) 1296 struct dm_thin_lookup_result *lookup_result)
1292{ 1297{
1293 struct cell *cell; 1298 struct dm_bio_prison_cell *cell;
1294 struct pool *pool = tc->pool; 1299 struct pool *pool = tc->pool;
1295 struct cell_key key; 1300 struct cell_key key;
1296 1301
@@ -1305,7 +1310,7 @@ static void process_shared_bio(struct thin_c *tc, struct bio *bio,
1305 if (bio_data_dir(bio) == WRITE) 1310 if (bio_data_dir(bio) == WRITE)
1306 break_sharing(tc, bio, block, &key, lookup_result, cell); 1311 break_sharing(tc, bio, block, &key, lookup_result, cell);
1307 else { 1312 else {
1308 struct endio_hook *h = dm_get_mapinfo(bio)->ptr; 1313 struct dm_thin_endio_hook *h = dm_get_mapinfo(bio)->ptr;
1309 1314
1310 h->shared_read_entry = ds_inc(&pool->shared_read_ds); 1315 h->shared_read_entry = ds_inc(&pool->shared_read_ds);
1311 1316
@@ -1315,7 +1320,7 @@ static void process_shared_bio(struct thin_c *tc, struct bio *bio,
1315} 1320}
1316 1321
1317static void provision_block(struct thin_c *tc, struct bio *bio, dm_block_t block, 1322static void provision_block(struct thin_c *tc, struct bio *bio, dm_block_t block,
1318 struct cell *cell) 1323 struct dm_bio_prison_cell *cell)
1319{ 1324{
1320 int r; 1325 int r;
1321 dm_block_t data_block; 1326 dm_block_t data_block;
@@ -1363,7 +1368,7 @@ static void process_bio(struct thin_c *tc, struct bio *bio)
1363{ 1368{
1364 int r; 1369 int r;
1365 dm_block_t block = get_bio_block(tc, bio); 1370 dm_block_t block = get_bio_block(tc, bio);
1366 struct cell *cell; 1371 struct dm_bio_prison_cell *cell;
1367 struct cell_key key; 1372 struct cell_key key;
1368 struct dm_thin_lookup_result lookup_result; 1373 struct dm_thin_lookup_result lookup_result;
1369 1374
@@ -1432,7 +1437,7 @@ static void process_deferred_bios(struct pool *pool)
1432 spin_unlock_irqrestore(&pool->lock, flags); 1437 spin_unlock_irqrestore(&pool->lock, flags);
1433 1438
1434 while ((bio = bio_list_pop(&bios))) { 1439 while ((bio = bio_list_pop(&bios))) {
1435 struct endio_hook *h = dm_get_mapinfo(bio)->ptr; 1440 struct dm_thin_endio_hook *h = dm_get_mapinfo(bio)->ptr;
1436 struct thin_c *tc = h->tc; 1441 struct thin_c *tc = h->tc;
1437 1442
1438 /* 1443 /*
@@ -1522,10 +1527,10 @@ static void thin_defer_bio(struct thin_c *tc, struct bio *bio)
1522 wake_worker(pool); 1527 wake_worker(pool);
1523} 1528}
1524 1529
1525static struct endio_hook *thin_hook_bio(struct thin_c *tc, struct bio *bio) 1530static struct dm_thin_endio_hook *thin_hook_bio(struct thin_c *tc, struct bio *bio)
1526{ 1531{
1527 struct pool *pool = tc->pool; 1532 struct pool *pool = tc->pool;
1528 struct endio_hook *h = mempool_alloc(pool->endio_hook_pool, GFP_NOIO); 1533 struct dm_thin_endio_hook *h = mempool_alloc(pool->endio_hook_pool, GFP_NOIO);
1529 1534
1530 h->tc = tc; 1535 h->tc = tc;
1531 h->shared_read_entry = NULL; 1536 h->shared_read_entry = NULL;
@@ -1687,6 +1692,9 @@ static void __pool_destroy(struct pool *pool)
1687 kfree(pool); 1692 kfree(pool);
1688} 1693}
1689 1694
1695static struct kmem_cache *_new_mapping_cache;
1696static struct kmem_cache *_endio_hook_cache;
1697
1690static struct pool *pool_create(struct mapped_device *pool_md, 1698static struct pool *pool_create(struct mapped_device *pool_md,
1691 struct block_device *metadata_dev, 1699 struct block_device *metadata_dev,
1692 unsigned long block_size, char **error) 1700 unsigned long block_size, char **error)
@@ -1755,16 +1763,16 @@ static struct pool *pool_create(struct mapped_device *pool_md,
1755 ds_init(&pool->all_io_ds); 1763 ds_init(&pool->all_io_ds);
1756 1764
1757 pool->next_mapping = NULL; 1765 pool->next_mapping = NULL;
1758 pool->mapping_pool = 1766 pool->mapping_pool = mempool_create_slab_pool(MAPPING_POOL_SIZE,
1759 mempool_create_kmalloc_pool(MAPPING_POOL_SIZE, sizeof(struct new_mapping)); 1767 _new_mapping_cache);
1760 if (!pool->mapping_pool) { 1768 if (!pool->mapping_pool) {
1761 *error = "Error creating pool's mapping mempool"; 1769 *error = "Error creating pool's mapping mempool";
1762 err_p = ERR_PTR(-ENOMEM); 1770 err_p = ERR_PTR(-ENOMEM);
1763 goto bad_mapping_pool; 1771 goto bad_mapping_pool;
1764 } 1772 }
1765 1773
1766 pool->endio_hook_pool = 1774 pool->endio_hook_pool = mempool_create_slab_pool(ENDIO_HOOK_POOL_SIZE,
1767 mempool_create_kmalloc_pool(ENDIO_HOOK_POOL_SIZE, sizeof(struct endio_hook)); 1775 _endio_hook_cache);
1768 if (!pool->endio_hook_pool) { 1776 if (!pool->endio_hook_pool) {
1769 *error = "Error creating pool's endio_hook mempool"; 1777 *error = "Error creating pool's endio_hook mempool";
1770 err_p = ERR_PTR(-ENOMEM); 1778 err_p = ERR_PTR(-ENOMEM);
@@ -2276,6 +2284,36 @@ static int process_set_transaction_id_mesg(unsigned argc, char **argv, struct po
2276 return 0; 2284 return 0;
2277} 2285}
2278 2286
2287static int process_reserve_metadata_snap_mesg(unsigned argc, char **argv, struct pool *pool)
2288{
2289 int r;
2290
2291 r = check_arg_count(argc, 1);
2292 if (r)
2293 return r;
2294
2295 r = dm_pool_reserve_metadata_snap(pool->pmd);
2296 if (r)
2297 DMWARN("reserve_metadata_snap message failed.");
2298
2299 return r;
2300}
2301
2302static int process_release_metadata_snap_mesg(unsigned argc, char **argv, struct pool *pool)
2303{
2304 int r;
2305
2306 r = check_arg_count(argc, 1);
2307 if (r)
2308 return r;
2309
2310 r = dm_pool_release_metadata_snap(pool->pmd);
2311 if (r)
2312 DMWARN("release_metadata_snap message failed.");
2313
2314 return r;
2315}
2316
2279/* 2317/*
2280 * Messages supported: 2318 * Messages supported:
2281 * create_thin <dev_id> 2319 * create_thin <dev_id>
@@ -2283,6 +2321,8 @@ static int process_set_transaction_id_mesg(unsigned argc, char **argv, struct po
2283 * delete <dev_id> 2321 * delete <dev_id>
2284 * trim <dev_id> <new_size_in_sectors> 2322 * trim <dev_id> <new_size_in_sectors>
2285 * set_transaction_id <current_trans_id> <new_trans_id> 2323 * set_transaction_id <current_trans_id> <new_trans_id>
2324 * reserve_metadata_snap
2325 * release_metadata_snap
2286 */ 2326 */
2287static int pool_message(struct dm_target *ti, unsigned argc, char **argv) 2327static int pool_message(struct dm_target *ti, unsigned argc, char **argv)
2288{ 2328{
@@ -2302,6 +2342,12 @@ static int pool_message(struct dm_target *ti, unsigned argc, char **argv)
2302 else if (!strcasecmp(argv[0], "set_transaction_id")) 2342 else if (!strcasecmp(argv[0], "set_transaction_id"))
2303 r = process_set_transaction_id_mesg(argc, argv, pool); 2343 r = process_set_transaction_id_mesg(argc, argv, pool);
2304 2344
2345 else if (!strcasecmp(argv[0], "reserve_metadata_snap"))
2346 r = process_reserve_metadata_snap_mesg(argc, argv, pool);
2347
2348 else if (!strcasecmp(argv[0], "release_metadata_snap"))
2349 r = process_release_metadata_snap_mesg(argc, argv, pool);
2350
2305 else 2351 else
2306 DMWARN("Unrecognised thin pool target message received: %s", argv[0]); 2352 DMWARN("Unrecognised thin pool target message received: %s", argv[0]);
2307 2353
@@ -2361,7 +2407,7 @@ static int pool_status(struct dm_target *ti, status_type_t type,
2361 if (r) 2407 if (r)
2362 return r; 2408 return r;
2363 2409
2364 r = dm_pool_get_held_metadata_root(pool->pmd, &held_root); 2410 r = dm_pool_get_metadata_snap(pool->pmd, &held_root);
2365 if (r) 2411 if (r)
2366 return r; 2412 return r;
2367 2413
@@ -2457,7 +2503,7 @@ static struct target_type pool_target = {
2457 .name = "thin-pool", 2503 .name = "thin-pool",
2458 .features = DM_TARGET_SINGLETON | DM_TARGET_ALWAYS_WRITEABLE | 2504 .features = DM_TARGET_SINGLETON | DM_TARGET_ALWAYS_WRITEABLE |
2459 DM_TARGET_IMMUTABLE, 2505 DM_TARGET_IMMUTABLE,
2460 .version = {1, 1, 0}, 2506 .version = {1, 2, 0},
2461 .module = THIS_MODULE, 2507 .module = THIS_MODULE,
2462 .ctr = pool_ctr, 2508 .ctr = pool_ctr,
2463 .dtr = pool_dtr, 2509 .dtr = pool_dtr,
@@ -2613,9 +2659,9 @@ static int thin_endio(struct dm_target *ti,
2613 union map_info *map_context) 2659 union map_info *map_context)
2614{ 2660{
2615 unsigned long flags; 2661 unsigned long flags;
2616 struct endio_hook *h = map_context->ptr; 2662 struct dm_thin_endio_hook *h = map_context->ptr;
2617 struct list_head work; 2663 struct list_head work;
2618 struct new_mapping *m, *tmp; 2664 struct dm_thin_new_mapping *m, *tmp;
2619 struct pool *pool = h->tc->pool; 2665 struct pool *pool = h->tc->pool;
2620 2666
2621 if (h->shared_read_entry) { 2667 if (h->shared_read_entry) {
@@ -2755,7 +2801,32 @@ static int __init dm_thin_init(void)
2755 2801
2756 r = dm_register_target(&pool_target); 2802 r = dm_register_target(&pool_target);
2757 if (r) 2803 if (r)
2758 dm_unregister_target(&thin_target); 2804 goto bad_pool_target;
2805
2806 r = -ENOMEM;
2807
2808 _cell_cache = KMEM_CACHE(dm_bio_prison_cell, 0);
2809 if (!_cell_cache)
2810 goto bad_cell_cache;
2811
2812 _new_mapping_cache = KMEM_CACHE(dm_thin_new_mapping, 0);
2813 if (!_new_mapping_cache)
2814 goto bad_new_mapping_cache;
2815
2816 _endio_hook_cache = KMEM_CACHE(dm_thin_endio_hook, 0);
2817 if (!_endio_hook_cache)
2818 goto bad_endio_hook_cache;
2819
2820 return 0;
2821
2822bad_endio_hook_cache:
2823 kmem_cache_destroy(_new_mapping_cache);
2824bad_new_mapping_cache:
2825 kmem_cache_destroy(_cell_cache);
2826bad_cell_cache:
2827 dm_unregister_target(&pool_target);
2828bad_pool_target:
2829 dm_unregister_target(&thin_target);
2759 2830
2760 return r; 2831 return r;
2761} 2832}
@@ -2764,6 +2835,10 @@ static void dm_thin_exit(void)
2764{ 2835{
2765 dm_unregister_target(&thin_target); 2836 dm_unregister_target(&thin_target);
2766 dm_unregister_target(&pool_target); 2837 dm_unregister_target(&pool_target);
2838
2839 kmem_cache_destroy(_cell_cache);
2840 kmem_cache_destroy(_new_mapping_cache);
2841 kmem_cache_destroy(_endio_hook_cache);
2767} 2842}
2768 2843
2769module_init(dm_thin_init); 2844module_init(dm_thin_init);
diff --git a/drivers/md/persistent-data/dm-transaction-manager.c b/drivers/md/persistent-data/dm-transaction-manager.c
index 6f8d38747d7f..400fe144c0cd 100644
--- a/drivers/md/persistent-data/dm-transaction-manager.c
+++ b/drivers/md/persistent-data/dm-transaction-manager.c
@@ -249,6 +249,7 @@ int dm_tm_shadow_block(struct dm_transaction_manager *tm, dm_block_t orig,
249 249
250 return r; 250 return r;
251} 251}
252EXPORT_SYMBOL_GPL(dm_tm_shadow_block);
252 253
253int dm_tm_read_lock(struct dm_transaction_manager *tm, dm_block_t b, 254int dm_tm_read_lock(struct dm_transaction_manager *tm, dm_block_t b,
254 struct dm_block_validator *v, 255 struct dm_block_validator *v,
@@ -259,6 +260,7 @@ int dm_tm_read_lock(struct dm_transaction_manager *tm, dm_block_t b,
259 260
260 return dm_bm_read_lock(tm->bm, b, v, blk); 261 return dm_bm_read_lock(tm->bm, b, v, blk);
261} 262}
263EXPORT_SYMBOL_GPL(dm_tm_read_lock);
262 264
263int dm_tm_unlock(struct dm_transaction_manager *tm, struct dm_block *b) 265int dm_tm_unlock(struct dm_transaction_manager *tm, struct dm_block *b)
264{ 266{