aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/md/dm-snap.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/md/dm-snap.c')
-rw-r--r--drivers/md/dm-snap.c90
1 files changed, 34 insertions, 56 deletions
diff --git a/drivers/md/dm-snap.c b/drivers/md/dm-snap.c
index a143921feaf6..59fc18ae52c2 100644
--- a/drivers/md/dm-snap.c
+++ b/drivers/md/dm-snap.c
@@ -79,7 +79,6 @@ struct dm_snapshot {
79 79
80 /* Chunks with outstanding reads */ 80 /* Chunks with outstanding reads */
81 spinlock_t tracked_chunk_lock; 81 spinlock_t tracked_chunk_lock;
82 mempool_t *tracked_chunk_pool;
83 struct hlist_head tracked_chunk_hash[DM_TRACKED_CHUNK_HASH_SIZE]; 82 struct hlist_head tracked_chunk_hash[DM_TRACKED_CHUNK_HASH_SIZE];
84 83
85 /* The on disk metadata handler */ 84 /* The on disk metadata handler */
@@ -191,35 +190,38 @@ struct dm_snap_tracked_chunk {
191 chunk_t chunk; 190 chunk_t chunk;
192}; 191};
193 192
194static struct kmem_cache *tracked_chunk_cache; 193static void init_tracked_chunk(struct bio *bio)
194{
195 struct dm_snap_tracked_chunk *c = dm_per_bio_data(bio, sizeof(struct dm_snap_tracked_chunk));
196 INIT_HLIST_NODE(&c->node);
197}
195 198
196static struct dm_snap_tracked_chunk *track_chunk(struct dm_snapshot *s, 199static bool is_bio_tracked(struct bio *bio)
197 chunk_t chunk)
198{ 200{
199 struct dm_snap_tracked_chunk *c = mempool_alloc(s->tracked_chunk_pool, 201 struct dm_snap_tracked_chunk *c = dm_per_bio_data(bio, sizeof(struct dm_snap_tracked_chunk));
200 GFP_NOIO); 202 return !hlist_unhashed(&c->node);
201 unsigned long flags; 203}
204
205static void track_chunk(struct dm_snapshot *s, struct bio *bio, chunk_t chunk)
206{
207 struct dm_snap_tracked_chunk *c = dm_per_bio_data(bio, sizeof(struct dm_snap_tracked_chunk));
202 208
203 c->chunk = chunk; 209 c->chunk = chunk;
204 210
205 spin_lock_irqsave(&s->tracked_chunk_lock, flags); 211 spin_lock_irq(&s->tracked_chunk_lock);
206 hlist_add_head(&c->node, 212 hlist_add_head(&c->node,
207 &s->tracked_chunk_hash[DM_TRACKED_CHUNK_HASH(chunk)]); 213 &s->tracked_chunk_hash[DM_TRACKED_CHUNK_HASH(chunk)]);
208 spin_unlock_irqrestore(&s->tracked_chunk_lock, flags); 214 spin_unlock_irq(&s->tracked_chunk_lock);
209
210 return c;
211} 215}
212 216
213static void stop_tracking_chunk(struct dm_snapshot *s, 217static void stop_tracking_chunk(struct dm_snapshot *s, struct bio *bio)
214 struct dm_snap_tracked_chunk *c)
215{ 218{
219 struct dm_snap_tracked_chunk *c = dm_per_bio_data(bio, sizeof(struct dm_snap_tracked_chunk));
216 unsigned long flags; 220 unsigned long flags;
217 221
218 spin_lock_irqsave(&s->tracked_chunk_lock, flags); 222 spin_lock_irqsave(&s->tracked_chunk_lock, flags);
219 hlist_del(&c->node); 223 hlist_del(&c->node);
220 spin_unlock_irqrestore(&s->tracked_chunk_lock, flags); 224 spin_unlock_irqrestore(&s->tracked_chunk_lock, flags);
221
222 mempool_free(c, s->tracked_chunk_pool);
223} 225}
224 226
225static int __chunk_is_tracked(struct dm_snapshot *s, chunk_t chunk) 227static int __chunk_is_tracked(struct dm_snapshot *s, chunk_t chunk)
@@ -1120,14 +1122,6 @@ static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv)
1120 goto bad_pending_pool; 1122 goto bad_pending_pool;
1121 } 1123 }
1122 1124
1123 s->tracked_chunk_pool = mempool_create_slab_pool(MIN_IOS,
1124 tracked_chunk_cache);
1125 if (!s->tracked_chunk_pool) {
1126 ti->error = "Could not allocate tracked_chunk mempool for "
1127 "tracking reads";
1128 goto bad_tracked_chunk_pool;
1129 }
1130
1131 for (i = 0; i < DM_TRACKED_CHUNK_HASH_SIZE; i++) 1125 for (i = 0; i < DM_TRACKED_CHUNK_HASH_SIZE; i++)
1132 INIT_HLIST_HEAD(&s->tracked_chunk_hash[i]); 1126 INIT_HLIST_HEAD(&s->tracked_chunk_hash[i]);
1133 1127
@@ -1135,6 +1129,7 @@ static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv)
1135 1129
1136 ti->private = s; 1130 ti->private = s;
1137 ti->num_flush_requests = num_flush_requests; 1131 ti->num_flush_requests = num_flush_requests;
1132 ti->per_bio_data_size = sizeof(struct dm_snap_tracked_chunk);
1138 1133
1139 /* Add snapshot to the list of snapshots for this origin */ 1134 /* Add snapshot to the list of snapshots for this origin */
1140 /* Exceptions aren't triggered till snapshot_resume() is called */ 1135 /* Exceptions aren't triggered till snapshot_resume() is called */
@@ -1183,9 +1178,6 @@ bad_read_metadata:
1183 unregister_snapshot(s); 1178 unregister_snapshot(s);
1184 1179
1185bad_load_and_register: 1180bad_load_and_register:
1186 mempool_destroy(s->tracked_chunk_pool);
1187
1188bad_tracked_chunk_pool:
1189 mempool_destroy(s->pending_pool); 1181 mempool_destroy(s->pending_pool);
1190 1182
1191bad_pending_pool: 1183bad_pending_pool:
@@ -1290,8 +1282,6 @@ static void snapshot_dtr(struct dm_target *ti)
1290 BUG_ON(!hlist_empty(&s->tracked_chunk_hash[i])); 1282 BUG_ON(!hlist_empty(&s->tracked_chunk_hash[i]));
1291#endif 1283#endif
1292 1284
1293 mempool_destroy(s->tracked_chunk_pool);
1294
1295 __free_exceptions(s); 1285 __free_exceptions(s);
1296 1286
1297 mempool_destroy(s->pending_pool); 1287 mempool_destroy(s->pending_pool);
@@ -1577,8 +1567,7 @@ static void remap_exception(struct dm_snapshot *s, struct dm_exception *e,
1577 s->store->chunk_mask); 1567 s->store->chunk_mask);
1578} 1568}
1579 1569
1580static int snapshot_map(struct dm_target *ti, struct bio *bio, 1570static int snapshot_map(struct dm_target *ti, struct bio *bio)
1581 union map_info *map_context)
1582{ 1571{
1583 struct dm_exception *e; 1572 struct dm_exception *e;
1584 struct dm_snapshot *s = ti->private; 1573 struct dm_snapshot *s = ti->private;
@@ -1586,6 +1575,8 @@ static int snapshot_map(struct dm_target *ti, struct bio *bio,
1586 chunk_t chunk; 1575 chunk_t chunk;
1587 struct dm_snap_pending_exception *pe = NULL; 1576 struct dm_snap_pending_exception *pe = NULL;
1588 1577
1578 init_tracked_chunk(bio);
1579
1589 if (bio->bi_rw & REQ_FLUSH) { 1580 if (bio->bi_rw & REQ_FLUSH) {
1590 bio->bi_bdev = s->cow->bdev; 1581 bio->bi_bdev = s->cow->bdev;
1591 return DM_MAPIO_REMAPPED; 1582 return DM_MAPIO_REMAPPED;
@@ -1670,7 +1661,7 @@ static int snapshot_map(struct dm_target *ti, struct bio *bio,
1670 } 1661 }
1671 } else { 1662 } else {
1672 bio->bi_bdev = s->origin->bdev; 1663 bio->bi_bdev = s->origin->bdev;
1673 map_context->ptr = track_chunk(s, chunk); 1664 track_chunk(s, bio, chunk);
1674 } 1665 }
1675 1666
1676out_unlock: 1667out_unlock:
@@ -1691,20 +1682,20 @@ out:
1691 * If merging is currently taking place on the chunk in question, the 1682 * If merging is currently taking place on the chunk in question, the
1692 * I/O is deferred by adding it to s->bios_queued_during_merge. 1683 * I/O is deferred by adding it to s->bios_queued_during_merge.
1693 */ 1684 */
1694static int snapshot_merge_map(struct dm_target *ti, struct bio *bio, 1685static int snapshot_merge_map(struct dm_target *ti, struct bio *bio)
1695 union map_info *map_context)
1696{ 1686{
1697 struct dm_exception *e; 1687 struct dm_exception *e;
1698 struct dm_snapshot *s = ti->private; 1688 struct dm_snapshot *s = ti->private;
1699 int r = DM_MAPIO_REMAPPED; 1689 int r = DM_MAPIO_REMAPPED;
1700 chunk_t chunk; 1690 chunk_t chunk;
1701 1691
1692 init_tracked_chunk(bio);
1693
1702 if (bio->bi_rw & REQ_FLUSH) { 1694 if (bio->bi_rw & REQ_FLUSH) {
1703 if (!map_context->target_request_nr) 1695 if (!dm_bio_get_target_request_nr(bio))
1704 bio->bi_bdev = s->origin->bdev; 1696 bio->bi_bdev = s->origin->bdev;
1705 else 1697 else
1706 bio->bi_bdev = s->cow->bdev; 1698 bio->bi_bdev = s->cow->bdev;
1707 map_context->ptr = NULL;
1708 return DM_MAPIO_REMAPPED; 1699 return DM_MAPIO_REMAPPED;
1709 } 1700 }
1710 1701
@@ -1733,7 +1724,7 @@ static int snapshot_merge_map(struct dm_target *ti, struct bio *bio,
1733 remap_exception(s, e, bio, chunk); 1724 remap_exception(s, e, bio, chunk);
1734 1725
1735 if (bio_rw(bio) == WRITE) 1726 if (bio_rw(bio) == WRITE)
1736 map_context->ptr = track_chunk(s, chunk); 1727 track_chunk(s, bio, chunk);
1737 goto out_unlock; 1728 goto out_unlock;
1738 } 1729 }
1739 1730
@@ -1751,14 +1742,12 @@ out_unlock:
1751 return r; 1742 return r;
1752} 1743}
1753 1744
1754static int snapshot_end_io(struct dm_target *ti, struct bio *bio, 1745static int snapshot_end_io(struct dm_target *ti, struct bio *bio, int error)
1755 int error, union map_info *map_context)
1756{ 1746{
1757 struct dm_snapshot *s = ti->private; 1747 struct dm_snapshot *s = ti->private;
1758 struct dm_snap_tracked_chunk *c = map_context->ptr;
1759 1748
1760 if (c) 1749 if (is_bio_tracked(bio))
1761 stop_tracking_chunk(s, c); 1750 stop_tracking_chunk(s, bio);
1762 1751
1763 return 0; 1752 return 0;
1764} 1753}
@@ -2127,8 +2116,7 @@ static void origin_dtr(struct dm_target *ti)
2127 dm_put_device(ti, dev); 2116 dm_put_device(ti, dev);
2128} 2117}
2129 2118
2130static int origin_map(struct dm_target *ti, struct bio *bio, 2119static int origin_map(struct dm_target *ti, struct bio *bio)
2131 union map_info *map_context)
2132{ 2120{
2133 struct dm_dev *dev = ti->private; 2121 struct dm_dev *dev = ti->private;
2134 bio->bi_bdev = dev->bdev; 2122 bio->bi_bdev = dev->bdev;
@@ -2193,7 +2181,7 @@ static int origin_iterate_devices(struct dm_target *ti,
2193 2181
2194static struct target_type origin_target = { 2182static struct target_type origin_target = {
2195 .name = "snapshot-origin", 2183 .name = "snapshot-origin",
2196 .version = {1, 7, 1}, 2184 .version = {1, 8, 0},
2197 .module = THIS_MODULE, 2185 .module = THIS_MODULE,
2198 .ctr = origin_ctr, 2186 .ctr = origin_ctr,
2199 .dtr = origin_dtr, 2187 .dtr = origin_dtr,
@@ -2206,7 +2194,7 @@ static struct target_type origin_target = {
2206 2194
2207static struct target_type snapshot_target = { 2195static struct target_type snapshot_target = {
2208 .name = "snapshot", 2196 .name = "snapshot",
2209 .version = {1, 10, 0}, 2197 .version = {1, 11, 0},
2210 .module = THIS_MODULE, 2198 .module = THIS_MODULE,
2211 .ctr = snapshot_ctr, 2199 .ctr = snapshot_ctr,
2212 .dtr = snapshot_dtr, 2200 .dtr = snapshot_dtr,
@@ -2220,7 +2208,7 @@ static struct target_type snapshot_target = {
2220 2208
2221static struct target_type merge_target = { 2209static struct target_type merge_target = {
2222 .name = dm_snapshot_merge_target_name, 2210 .name = dm_snapshot_merge_target_name,
2223 .version = {1, 1, 0}, 2211 .version = {1, 2, 0},
2224 .module = THIS_MODULE, 2212 .module = THIS_MODULE,
2225 .ctr = snapshot_ctr, 2213 .ctr = snapshot_ctr,
2226 .dtr = snapshot_dtr, 2214 .dtr = snapshot_dtr,
@@ -2281,17 +2269,8 @@ static int __init dm_snapshot_init(void)
2281 goto bad_pending_cache; 2269 goto bad_pending_cache;
2282 } 2270 }
2283 2271
2284 tracked_chunk_cache = KMEM_CACHE(dm_snap_tracked_chunk, 0);
2285 if (!tracked_chunk_cache) {
2286 DMERR("Couldn't create cache to track chunks in use.");
2287 r = -ENOMEM;
2288 goto bad_tracked_chunk_cache;
2289 }
2290
2291 return 0; 2272 return 0;
2292 2273
2293bad_tracked_chunk_cache:
2294 kmem_cache_destroy(pending_cache);
2295bad_pending_cache: 2274bad_pending_cache:
2296 kmem_cache_destroy(exception_cache); 2275 kmem_cache_destroy(exception_cache);
2297bad_exception_cache: 2276bad_exception_cache:
@@ -2317,7 +2296,6 @@ static void __exit dm_snapshot_exit(void)
2317 exit_origin_hash(); 2296 exit_origin_hash();
2318 kmem_cache_destroy(pending_cache); 2297 kmem_cache_destroy(pending_cache);
2319 kmem_cache_destroy(exception_cache); 2298 kmem_cache_destroy(exception_cache);
2320 kmem_cache_destroy(tracked_chunk_cache);
2321 2299
2322 dm_exception_store_exit(); 2300 dm_exception_store_exit();
2323} 2301}