aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorAlasdair G Kergon <agk@redhat.com>2013-03-01 17:45:47 -0500
committerAlasdair G Kergon <agk@redhat.com>2013-03-01 17:45:47 -0500
commite4c938111f25dbbf2579e65ce4a7cb2d20a59308 (patch)
tree4aa990cb93b7ea3318a1ebde458044c57f87d1f0 /drivers
parent14fe594d679c9ba8c8e3d6ad1a3ed9c0ba336df0 (diff)
dm: refactor bio cloning
Refactor part of the bio splitting and cloning code to try to make it easier to understand. Signed-off-by: Alasdair G Kergon <agk@redhat.com>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/md/dm.c164
1 files changed, 96 insertions, 68 deletions
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index 02079cfccaf5..0890abd9dffa 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -1087,7 +1087,7 @@ static void clone_split_bio(struct dm_target_io *tio, struct bio *bio,
1087 */ 1087 */
1088static void clone_bio(struct dm_target_io *tio, struct bio *bio, 1088static void clone_bio(struct dm_target_io *tio, struct bio *bio,
1089 sector_t sector, unsigned short idx, 1089 sector_t sector, unsigned short idx,
1090 unsigned short bv_count, unsigned int len) 1090 unsigned short bv_count, unsigned len)
1091{ 1091{
1092 struct bio *clone = &tio->clone; 1092 struct bio *clone = &tio->clone;
1093 unsigned trim = 0; 1093 unsigned trim = 0;
@@ -1159,17 +1159,23 @@ static int __send_empty_flush(struct clone_info *ci)
1159 return 0; 1159 return 0;
1160} 1160}
1161 1161
1162static void __clone_and_map_data_bio(struct clone_info *ci, 1162static void __clone_and_map_data_bio(struct clone_info *ci, struct dm_target *ti,
1163 struct dm_target *ti) 1163 sector_t sector, int nr_iovecs,
1164 unsigned short idx, unsigned short bv_count,
1165 unsigned offset, unsigned len,
1166 unsigned split_bvec)
1164{ 1167{
1165 struct bio *bio = ci->bio; 1168 struct bio *bio = ci->bio;
1166 struct dm_target_io *tio; 1169 struct dm_target_io *tio;
1167 1170
1168 tio = alloc_tio(ci, ti, bio->bi_max_vecs, 0); 1171 tio = alloc_tio(ci, ti, nr_iovecs, 0);
1169 clone_bio(tio, bio, ci->sector, ci->idx, bio->bi_vcnt - ci->idx, 1172
1170 ci->sector_count); 1173 if (split_bvec)
1174 clone_split_bio(tio, bio, sector, idx, offset, len);
1175 else
1176 clone_bio(tio, bio, sector, idx, bv_count, len);
1177
1171 __map_bio(tio); 1178 __map_bio(tio);
1172 ci->sector_count = 0;
1173} 1179}
1174 1180
1175typedef unsigned (*get_num_bios_fn)(struct dm_target *ti); 1181typedef unsigned (*get_num_bios_fn)(struct dm_target *ti);
@@ -1238,12 +1244,69 @@ static int __send_write_same(struct clone_info *ci)
1238 return __send_changing_extent_only(ci, get_num_write_same_bios, NULL); 1244 return __send_changing_extent_only(ci, get_num_write_same_bios, NULL);
1239} 1245}
1240 1246
1247/*
1248 * Find maximum number of sectors / bvecs we can process with a single bio.
1249 */
1250static sector_t __len_within_target(struct clone_info *ci, sector_t max, int *idx)
1251{
1252 struct bio *bio = ci->bio;
1253 sector_t bv_len, total_len = 0;
1254
1255 for (*idx = ci->idx; max && (*idx < bio->bi_vcnt); (*idx)++) {
1256 bv_len = to_sector(bio->bi_io_vec[*idx].bv_len);
1257
1258 if (bv_len > max)
1259 break;
1260
1261 max -= bv_len;
1262 total_len += bv_len;
1263 }
1264
1265 return total_len;
1266}
1267
1268static int __split_bvec_across_targets(struct clone_info *ci,
1269 struct dm_target *ti, sector_t max)
1270{
1271 struct bio *bio = ci->bio;
1272 struct bio_vec *bv = bio->bi_io_vec + ci->idx;
1273 sector_t remaining = to_sector(bv->bv_len);
1274 unsigned offset = 0;
1275 sector_t len;
1276
1277 do {
1278 if (offset) {
1279 ti = dm_table_find_target(ci->map, ci->sector);
1280 if (!dm_target_is_valid(ti))
1281 return -EIO;
1282
1283 max = max_io_len(ci->sector, ti);
1284 }
1285
1286 len = min(remaining, max);
1287
1288 __clone_and_map_data_bio(ci, ti, ci->sector, 1, ci->idx, 0,
1289 bv->bv_offset + offset, len, 1);
1290
1291 ci->sector += len;
1292 ci->sector_count -= len;
1293 offset += to_bytes(len);
1294 } while (remaining -= len);
1295
1296 ci->idx++;
1297
1298 return 0;
1299}
1300
1301/*
1302 * Select the correct strategy for processing a non-flush bio.
1303 */
1241static int __split_and_process_non_flush(struct clone_info *ci) 1304static int __split_and_process_non_flush(struct clone_info *ci)
1242{ 1305{
1243 struct bio *bio = ci->bio; 1306 struct bio *bio = ci->bio;
1244 struct dm_target *ti; 1307 struct dm_target *ti;
1245 sector_t len = 0, max; 1308 sector_t len, max;
1246 struct dm_target_io *tio; 1309 int idx;
1247 1310
1248 if (unlikely(bio->bi_rw & REQ_DISCARD)) 1311 if (unlikely(bio->bi_rw & REQ_DISCARD))
1249 return __send_discard(ci); 1312 return __send_discard(ci);
@@ -1256,74 +1319,39 @@ static int __split_and_process_non_flush(struct clone_info *ci)
1256 1319
1257 max = max_io_len(ci->sector, ti); 1320 max = max_io_len(ci->sector, ti);
1258 1321
1322 /*
1323 * Optimise for the simple case where we can do all of
1324 * the remaining io with a single clone.
1325 */
1259 if (ci->sector_count <= max) { 1326 if (ci->sector_count <= max) {
1260 /* 1327 __clone_and_map_data_bio(ci, ti, ci->sector, bio->bi_max_vecs,
1261 * Optimise for the simple case where we can do all of 1328 ci->idx, bio->bi_vcnt - ci->idx, 0,
1262 * the remaining io with a single clone. 1329 ci->sector_count, 0);
1263 */ 1330 ci->sector_count = 0;
1264 __clone_and_map_data_bio(ci, ti); 1331 return 0;
1265 1332 }
1266 } else if (to_sector(bio->bi_io_vec[ci->idx].bv_len) <= max) {
1267 /*
1268 * There are some bvecs that don't span targets.
1269 * Do as many of these as possible.
1270 */
1271 int i;
1272 sector_t remaining = max;
1273 sector_t bv_len;
1274
1275 for (i = ci->idx; remaining && (i < bio->bi_vcnt); i++) {
1276 bv_len = to_sector(bio->bi_io_vec[i].bv_len);
1277
1278 if (bv_len > remaining)
1279 break;
1280 1333
1281 remaining -= bv_len; 1334 /*
1282 len += bv_len; 1335 * There are some bvecs that don't span targets.
1283 } 1336 * Do as many of these as possible.
1337 */
1338 if (to_sector(bio->bi_io_vec[ci->idx].bv_len) <= max) {
1339 len = __len_within_target(ci, max, &idx);
1284 1340
1285 tio = alloc_tio(ci, ti, bio->bi_max_vecs, 0); 1341 __clone_and_map_data_bio(ci, ti, ci->sector, bio->bi_max_vecs,
1286 clone_bio(tio, bio, ci->sector, ci->idx, i - ci->idx, len); 1342 ci->idx, idx - ci->idx, 0, len, 0);
1287 __map_bio(tio);
1288 1343
1289 ci->sector += len; 1344 ci->sector += len;
1290 ci->sector_count -= len; 1345 ci->sector_count -= len;
1291 ci->idx = i; 1346 ci->idx = idx;
1292
1293 } else {
1294 /*
1295 * Handle a bvec that must be split between two or more targets.
1296 */
1297 struct bio_vec *bv = bio->bi_io_vec + ci->idx;
1298 sector_t remaining = to_sector(bv->bv_len);
1299 unsigned int offset = 0;
1300
1301 do {
1302 if (offset) {
1303 ti = dm_table_find_target(ci->map, ci->sector);
1304 if (!dm_target_is_valid(ti))
1305 return -EIO;
1306
1307 max = max_io_len(ci->sector, ti);
1308 }
1309
1310 len = min(remaining, max);
1311 1347
1312 tio = alloc_tio(ci, ti, 1, 0); 1348 return 0;
1313 clone_split_bio(tio, bio, ci->sector, ci->idx,
1314 bv->bv_offset + offset, len);
1315
1316 __map_bio(tio);
1317
1318 ci->sector += len;
1319 ci->sector_count -= len;
1320 offset += to_bytes(len);
1321 } while (remaining -= len);
1322
1323 ci->idx++;
1324 } 1349 }
1325 1350
1326 return 0; 1351 /*
1352 * Handle a bvec that must be split between two or more targets.
1353 */
1354 return __split_bvec_across_targets(ci, ti, max);
1327} 1355}
1328 1356
1329/* 1357/*