aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorMike Snitzer <snitzer@redhat.com>2010-08-11 23:14:08 -0400
committerAlasdair G Kergon <agk@redhat.com>2010-08-11 23:14:08 -0400
commit5ae89a8720c28caf35c4e53711d77df2856c404e (patch)
tree712ddc158309d7ad77e3c19e70f0ae9fafb94446 /drivers
parent5ebaee6d290279d1df6ce45d6d54de8cfc473273 (diff)
dm: linear support discard
Allow discards to be passed through to linear mappings if at least one underlying device supports it. Discards will be forwarded only to devices that support them. A target that supports discards should set num_discard_requests to indicate how many times each discard request must be submitted to it. Verify table's underlying devices support discards prior to setting the associated DM device as capable of discards (via QUEUE_FLAG_DISCARD). Signed-off-by: Mike Snitzer <snitzer@redhat.com> Signed-off-by: Mikulas Patocka <mpatocka@redhat.com> Reviewed-by: Joe Thornber <thornber@redhat.com> Signed-off-by: Alasdair G Kergon <agk@redhat.com>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/md/dm-linear.c1
-rw-r--r--drivers/md/dm-table.c44
-rw-r--r--drivers/md/dm.c65
-rw-r--r--drivers/md/dm.h1
4 files changed, 99 insertions, 12 deletions
diff --git a/drivers/md/dm-linear.c b/drivers/md/dm-linear.c
index 9200dbf2391a..f043b5f433b2 100644
--- a/drivers/md/dm-linear.c
+++ b/drivers/md/dm-linear.c
@@ -53,6 +53,7 @@ static int linear_ctr(struct dm_target *ti, unsigned int argc, char **argv)
53 } 53 }
54 54
55 ti->num_flush_requests = 1; 55 ti->num_flush_requests = 1;
56 ti->num_discard_requests = 1;
56 ti->private = lc; 57 ti->private = lc;
57 return 0; 58 return 0;
58 59
diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
index bc60ef77a0d8..f9fc07d7a4b9 100644
--- a/drivers/md/dm-table.c
+++ b/drivers/md/dm-table.c
@@ -54,6 +54,8 @@ struct dm_table {
54 sector_t *highs; 54 sector_t *highs;
55 struct dm_target *targets; 55 struct dm_target *targets;
56 56
57 unsigned discards_supported:1;
58
57 /* 59 /*
58 * Indicates the rw permissions for the new logical 60 * Indicates the rw permissions for the new logical
59 * device. This should be a combination of FMODE_READ 61 * device. This should be a combination of FMODE_READ
@@ -203,6 +205,7 @@ int dm_table_create(struct dm_table **result, fmode_t mode,
203 205
204 INIT_LIST_HEAD(&t->devices); 206 INIT_LIST_HEAD(&t->devices);
205 atomic_set(&t->holders, 0); 207 atomic_set(&t->holders, 0);
208 t->discards_supported = 1;
206 209
207 if (!num_targets) 210 if (!num_targets)
208 num_targets = KEYS_PER_NODE; 211 num_targets = KEYS_PER_NODE;
@@ -770,6 +773,9 @@ int dm_table_add_target(struct dm_table *t, const char *type,
770 773
771 t->highs[t->num_targets++] = tgt->begin + tgt->len - 1; 774 t->highs[t->num_targets++] = tgt->begin + tgt->len - 1;
772 775
776 if (!tgt->num_discard_requests)
777 t->discards_supported = 0;
778
773 return 0; 779 return 0;
774 780
775 bad: 781 bad:
@@ -1135,6 +1141,11 @@ void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
1135 else 1141 else
1136 queue_flag_set_unlocked(QUEUE_FLAG_CLUSTER, q); 1142 queue_flag_set_unlocked(QUEUE_FLAG_CLUSTER, q);
1137 1143
1144 if (!dm_table_supports_discards(t))
1145 queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD, q);
1146 else
1147 queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, q);
1148
1138 dm_table_set_integrity(t); 1149 dm_table_set_integrity(t);
1139 1150
1140 /* 1151 /*
@@ -1281,6 +1292,39 @@ struct mapped_device *dm_table_get_md(struct dm_table *t)
1281 return t->md; 1292 return t->md;
1282} 1293}
1283 1294
1295static int device_discard_capable(struct dm_target *ti, struct dm_dev *dev,
1296 sector_t start, sector_t len, void *data)
1297{
1298 struct request_queue *q = bdev_get_queue(dev->bdev);
1299
1300 return q && blk_queue_discard(q);
1301}
1302
1303bool dm_table_supports_discards(struct dm_table *t)
1304{
1305 struct dm_target *ti;
1306 unsigned i = 0;
1307
1308 if (!t->discards_supported)
1309 return 0;
1310
1311 /*
1312 * Ensure that at least one underlying device supports discards.
1313 * t->devices includes internal dm devices such as mirror logs
1314 * so we need to use iterate_devices here, which targets
1315 * supporting discard must provide.
1316 */
1317 while (i < dm_table_get_num_targets(t)) {
1318 ti = dm_table_get_target(t, i++);
1319
1320 if (ti->type->iterate_devices &&
1321 ti->type->iterate_devices(ti, device_discard_capable, NULL))
1322 return 1;
1323 }
1324
1325 return 0;
1326}
1327
1284EXPORT_SYMBOL(dm_vcalloc); 1328EXPORT_SYMBOL(dm_vcalloc);
1285EXPORT_SYMBOL(dm_get_device); 1329EXPORT_SYMBOL(dm_get_device);
1286EXPORT_SYMBOL(dm_put_device); 1330EXPORT_SYMBOL(dm_put_device);
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index 0d4710175885..44aba29154fc 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -1212,6 +1212,53 @@ static int __clone_and_map_empty_barrier(struct clone_info *ci)
1212 return 0; 1212 return 0;
1213} 1213}
1214 1214
1215/*
1216 * Perform all io with a single clone.
1217 */
1218static void __clone_and_map_simple(struct clone_info *ci, struct dm_target *ti)
1219{
1220 struct bio *clone, *bio = ci->bio;
1221 struct dm_target_io *tio;
1222
1223 tio = alloc_tio(ci, ti);
1224 clone = clone_bio(bio, ci->sector, ci->idx,
1225 bio->bi_vcnt - ci->idx, ci->sector_count,
1226 ci->md->bs);
1227 __map_bio(ti, clone, tio);
1228 ci->sector_count = 0;
1229}
1230
1231static int __clone_and_map_discard(struct clone_info *ci)
1232{
1233 struct dm_target *ti;
1234 sector_t max;
1235
1236 ti = dm_table_find_target(ci->map, ci->sector);
1237 if (!dm_target_is_valid(ti))
1238 return -EIO;
1239
1240 /*
1241 * Even though the device advertised discard support,
1242 * reconfiguration might have changed that since the
1243 * check was performed.
1244 */
1245
1246 if (!ti->num_discard_requests)
1247 return -EOPNOTSUPP;
1248
1249 max = max_io_len(ci->md, ci->sector, ti);
1250
1251 if (ci->sector_count > max)
1252 /*
1253 * FIXME: Handle a discard that spans two or more targets.
1254 */
1255 return -EOPNOTSUPP;
1256
1257 __clone_and_map_simple(ci, ti);
1258
1259 return 0;
1260}
1261
1215static int __clone_and_map(struct clone_info *ci) 1262static int __clone_and_map(struct clone_info *ci)
1216{ 1263{
1217 struct bio *clone, *bio = ci->bio; 1264 struct bio *clone, *bio = ci->bio;
@@ -1222,27 +1269,21 @@ static int __clone_and_map(struct clone_info *ci)
1222 if (unlikely(bio_empty_barrier(bio))) 1269 if (unlikely(bio_empty_barrier(bio)))
1223 return __clone_and_map_empty_barrier(ci); 1270 return __clone_and_map_empty_barrier(ci);
1224 1271
1272 if (unlikely(bio->bi_rw & REQ_DISCARD))
1273 return __clone_and_map_discard(ci);
1274
1225 ti = dm_table_find_target(ci->map, ci->sector); 1275 ti = dm_table_find_target(ci->map, ci->sector);
1226 if (!dm_target_is_valid(ti)) 1276 if (!dm_target_is_valid(ti))
1227 return -EIO; 1277 return -EIO;
1228 1278
1229 max = max_io_len(ci->md, ci->sector, ti); 1279 max = max_io_len(ci->md, ci->sector, ti);
1230 1280
1231 /*
1232 * Allocate a target io object.
1233 */
1234 tio = alloc_tio(ci, ti);
1235
1236 if (ci->sector_count <= max) { 1281 if (ci->sector_count <= max) {
1237 /* 1282 /*
1238 * Optimise for the simple case where we can do all of 1283 * Optimise for the simple case where we can do all of
1239 * the remaining io with a single clone. 1284 * the remaining io with a single clone.
1240 */ 1285 */
1241 clone = clone_bio(bio, ci->sector, ci->idx, 1286 __clone_and_map_simple(ci, ti);
1242 bio->bi_vcnt - ci->idx, ci->sector_count,
1243 ci->md->bs);
1244 __map_bio(ti, clone, tio);
1245 ci->sector_count = 0;
1246 1287
1247 } else if (to_sector(bio->bi_io_vec[ci->idx].bv_len) <= max) { 1288 } else if (to_sector(bio->bi_io_vec[ci->idx].bv_len) <= max) {
1248 /* 1289 /*
@@ -1263,6 +1304,7 @@ static int __clone_and_map(struct clone_info *ci)
1263 len += bv_len; 1304 len += bv_len;
1264 } 1305 }
1265 1306
1307 tio = alloc_tio(ci, ti);
1266 clone = clone_bio(bio, ci->sector, ci->idx, i - ci->idx, len, 1308 clone = clone_bio(bio, ci->sector, ci->idx, i - ci->idx, len,
1267 ci->md->bs); 1309 ci->md->bs);
1268 __map_bio(ti, clone, tio); 1310 __map_bio(ti, clone, tio);
@@ -1286,12 +1328,11 @@ static int __clone_and_map(struct clone_info *ci)
1286 return -EIO; 1328 return -EIO;
1287 1329
1288 max = max_io_len(ci->md, ci->sector, ti); 1330 max = max_io_len(ci->md, ci->sector, ti);
1289
1290 tio = alloc_tio(ci, ti);
1291 } 1331 }
1292 1332
1293 len = min(remaining, max); 1333 len = min(remaining, max);
1294 1334
1335 tio = alloc_tio(ci, ti);
1295 clone = split_bvec(bio, ci->sector, ci->idx, 1336 clone = split_bvec(bio, ci->sector, ci->idx,
1296 bv->bv_offset + offset, len, 1337 bv->bv_offset + offset, len,
1297 ci->md->bs); 1338 ci->md->bs);
diff --git a/drivers/md/dm.h b/drivers/md/dm.h
index 0d7b374c5dc2..0c2dd5f4af76 100644
--- a/drivers/md/dm.h
+++ b/drivers/md/dm.h
@@ -61,6 +61,7 @@ int dm_table_any_congested(struct dm_table *t, int bdi_bits);
61int dm_table_any_busy_target(struct dm_table *t); 61int dm_table_any_busy_target(struct dm_table *t);
62unsigned dm_table_get_type(struct dm_table *t); 62unsigned dm_table_get_type(struct dm_table *t);
63bool dm_table_request_based(struct dm_table *t); 63bool dm_table_request_based(struct dm_table *t);
64bool dm_table_supports_discards(struct dm_table *t);
64int dm_table_alloc_md_mempools(struct dm_table *t); 65int dm_table_alloc_md_mempools(struct dm_table *t);
65void dm_table_free_md_mempools(struct dm_table *t); 66void dm_table_free_md_mempools(struct dm_table *t);
66struct dm_md_mempools *dm_table_get_md_mempools(struct dm_table *t); 67struct dm_md_mempools *dm_table_get_md_mempools(struct dm_table *t);