aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/md/dm-table.c
diff options
context:
space:
mode:
authorGlenn Elliott <gelliott@cs.unc.edu>2012-03-04 19:47:13 -0500
committerGlenn Elliott <gelliott@cs.unc.edu>2012-03-04 19:47:13 -0500
commitc71c03bda1e86c9d5198c5d83f712e695c4f2a1e (patch)
treeecb166cb3e2b7e2adb3b5e292245fefd23381ac8 /drivers/md/dm-table.c
parentea53c912f8a86a8567697115b6a0d8152beee5c8 (diff)
parent6a00f206debf8a5c8899055726ad127dbeeed098 (diff)
Merge branch 'mpi-master' into wip-k-fmlpwip-k-fmlp
Conflicts: litmus/sched_cedf.c
Diffstat (limited to 'drivers/md/dm-table.c')
-rw-r--r--drivers/md/dm-table.c207
1 files changed, 132 insertions, 75 deletions
diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
index f9fc07d7a4b9..451c3bb176d2 100644
--- a/drivers/md/dm-table.c
+++ b/drivers/md/dm-table.c
@@ -55,6 +55,7 @@ struct dm_table {
55 struct dm_target *targets; 55 struct dm_target *targets;
56 56
57 unsigned discards_supported:1; 57 unsigned discards_supported:1;
58 unsigned integrity_supported:1;
58 59
59 /* 60 /*
60 * Indicates the rw permissions for the new logical 61 * Indicates the rw permissions for the new logical
@@ -71,6 +72,8 @@ struct dm_table {
71 void *event_context; 72 void *event_context;
72 73
73 struct dm_md_mempools *mempools; 74 struct dm_md_mempools *mempools;
75
76 struct list_head target_callbacks;
74}; 77};
75 78
76/* 79/*
@@ -204,6 +207,7 @@ int dm_table_create(struct dm_table **result, fmode_t mode,
204 return -ENOMEM; 207 return -ENOMEM;
205 208
206 INIT_LIST_HEAD(&t->devices); 209 INIT_LIST_HEAD(&t->devices);
210 INIT_LIST_HEAD(&t->target_callbacks);
207 atomic_set(&t->holders, 0); 211 atomic_set(&t->holders, 0);
208 t->discards_supported = 1; 212 t->discards_supported = 1;
209 213
@@ -325,15 +329,18 @@ static int open_dev(struct dm_dev_internal *d, dev_t dev,
325 329
326 BUG_ON(d->dm_dev.bdev); 330 BUG_ON(d->dm_dev.bdev);
327 331
328 bdev = open_by_devnum(dev, d->dm_dev.mode); 332 bdev = blkdev_get_by_dev(dev, d->dm_dev.mode | FMODE_EXCL, _claim_ptr);
329 if (IS_ERR(bdev)) 333 if (IS_ERR(bdev))
330 return PTR_ERR(bdev); 334 return PTR_ERR(bdev);
331 r = bd_claim_by_disk(bdev, _claim_ptr, dm_disk(md)); 335
332 if (r) 336 r = bd_link_disk_holder(bdev, dm_disk(md));
333 blkdev_put(bdev, d->dm_dev.mode); 337 if (r) {
334 else 338 blkdev_put(bdev, d->dm_dev.mode | FMODE_EXCL);
335 d->dm_dev.bdev = bdev; 339 return r;
336 return r; 340 }
341
342 d->dm_dev.bdev = bdev;
343 return 0;
337} 344}
338 345
339/* 346/*
@@ -344,8 +351,8 @@ static void close_dev(struct dm_dev_internal *d, struct mapped_device *md)
344 if (!d->dm_dev.bdev) 351 if (!d->dm_dev.bdev)
345 return; 352 return;
346 353
347 bd_release_from_disk(d->dm_dev.bdev, dm_disk(md)); 354 bd_unlink_disk_holder(d->dm_dev.bdev, dm_disk(md));
348 blkdev_put(d->dm_dev.bdev, d->dm_dev.mode); 355 blkdev_put(d->dm_dev.bdev, d->dm_dev.mode | FMODE_EXCL);
349 d->dm_dev.bdev = NULL; 356 d->dm_dev.bdev = NULL;
350} 357}
351 358
@@ -355,6 +362,7 @@ static void close_dev(struct dm_dev_internal *d, struct mapped_device *md)
355static int device_area_is_invalid(struct dm_target *ti, struct dm_dev *dev, 362static int device_area_is_invalid(struct dm_target *ti, struct dm_dev *dev,
356 sector_t start, sector_t len, void *data) 363 sector_t start, sector_t len, void *data)
357{ 364{
365 struct request_queue *q;
358 struct queue_limits *limits = data; 366 struct queue_limits *limits = data;
359 struct block_device *bdev = dev->bdev; 367 struct block_device *bdev = dev->bdev;
360 sector_t dev_size = 368 sector_t dev_size =
@@ -363,6 +371,22 @@ static int device_area_is_invalid(struct dm_target *ti, struct dm_dev *dev,
363 limits->logical_block_size >> SECTOR_SHIFT; 371 limits->logical_block_size >> SECTOR_SHIFT;
364 char b[BDEVNAME_SIZE]; 372 char b[BDEVNAME_SIZE];
365 373
374 /*
375 * Some devices exist without request functions,
376 * such as loop devices not yet bound to backing files.
377 * Forbid the use of such devices.
378 */
379 q = bdev_get_queue(bdev);
380 if (!q || !q->make_request_fn) {
381 DMWARN("%s: %s is not yet initialised: "
382 "start=%llu, len=%llu, dev_size=%llu",
383 dm_device_name(ti->table->md), bdevname(bdev, b),
384 (unsigned long long)start,
385 (unsigned long long)len,
386 (unsigned long long)dev_size);
387 return 1;
388 }
389
366 if (!dev_size) 390 if (!dev_size)
367 return 0; 391 return 0;
368 392
@@ -486,11 +510,6 @@ static int __table_get_device(struct dm_table *t, struct dm_target *ti,
486 return 0; 510 return 0;
487} 511}
488 512
489/*
490 * Returns the minimum that is _not_ zero, unless both are zero.
491 */
492#define min_not_zero(l, r) (l == 0) ? r : ((r == 0) ? l : min(l, r))
493
494int dm_set_device_limits(struct dm_target *ti, struct dm_dev *dev, 513int dm_set_device_limits(struct dm_target *ti, struct dm_dev *dev,
495 sector_t start, sector_t len, void *data) 514 sector_t start, sector_t len, void *data)
496{ 515{
@@ -522,9 +541,8 @@ int dm_set_device_limits(struct dm_target *ti, struct dm_dev *dev,
522 */ 541 */
523 542
524 if (q->merge_bvec_fn && !ti->type->merge) 543 if (q->merge_bvec_fn && !ti->type->merge)
525 limits->max_sectors = 544 blk_limits_max_hw_sectors(limits,
526 min_not_zero(limits->max_sectors, 545 (unsigned int) (PAGE_SIZE >> 9));
527 (unsigned int) (PAGE_SIZE >> 9));
528 return 0; 546 return 0;
529} 547}
530EXPORT_SYMBOL_GPL(dm_set_device_limits); 548EXPORT_SYMBOL_GPL(dm_set_device_limits);
@@ -859,7 +877,7 @@ int dm_table_alloc_md_mempools(struct dm_table *t)
859 return -EINVAL; 877 return -EINVAL;
860 } 878 }
861 879
862 t->mempools = dm_alloc_md_mempools(type); 880 t->mempools = dm_alloc_md_mempools(type, t->integrity_supported);
863 if (!t->mempools) 881 if (!t->mempools)
864 return -ENOMEM; 882 return -ENOMEM;
865 883
@@ -926,18 +944,80 @@ static int dm_table_build_index(struct dm_table *t)
926} 944}
927 945
928/* 946/*
947 * Get a disk whose integrity profile reflects the table's profile.
948 * If %match_all is true, all devices' profiles must match.
949 * If %match_all is false, all devices must at least have an
950 * allocated integrity profile; but uninitialized is ok.
951 * Returns NULL if integrity support was inconsistent or unavailable.
952 */
953static struct gendisk * dm_table_get_integrity_disk(struct dm_table *t,
954 bool match_all)
955{
956 struct list_head *devices = dm_table_get_devices(t);
957 struct dm_dev_internal *dd = NULL;
958 struct gendisk *prev_disk = NULL, *template_disk = NULL;
959
960 list_for_each_entry(dd, devices, list) {
961 template_disk = dd->dm_dev.bdev->bd_disk;
962 if (!blk_get_integrity(template_disk))
963 goto no_integrity;
964 if (!match_all && !blk_integrity_is_initialized(template_disk))
965 continue; /* skip uninitialized profiles */
966 else if (prev_disk &&
967 blk_integrity_compare(prev_disk, template_disk) < 0)
968 goto no_integrity;
969 prev_disk = template_disk;
970 }
971
972 return template_disk;
973
974no_integrity:
975 if (prev_disk)
976 DMWARN("%s: integrity not set: %s and %s profile mismatch",
977 dm_device_name(t->md),
978 prev_disk->disk_name,
979 template_disk->disk_name);
980 return NULL;
981}
982
983/*
929 * Register the mapped device for blk_integrity support if 984 * Register the mapped device for blk_integrity support if
930 * the underlying devices support it. 985 * the underlying devices have an integrity profile. But all devices
986 * may not have matching profiles (checking all devices isn't reliable
987 * during table load because this table may use other DM device(s) which
988 * must be resumed before they will have an initialized integity profile).
989 * Stacked DM devices force a 2 stage integrity profile validation:
990 * 1 - during load, validate all initialized integrity profiles match
991 * 2 - during resume, validate all integrity profiles match
931 */ 992 */
932static int dm_table_prealloc_integrity(struct dm_table *t, struct mapped_device *md) 993static int dm_table_prealloc_integrity(struct dm_table *t, struct mapped_device *md)
933{ 994{
934 struct list_head *devices = dm_table_get_devices(t); 995 struct gendisk *template_disk = NULL;
935 struct dm_dev_internal *dd; 996
997 template_disk = dm_table_get_integrity_disk(t, false);
998 if (!template_disk)
999 return 0;
936 1000
937 list_for_each_entry(dd, devices, list) 1001 if (!blk_integrity_is_initialized(dm_disk(md))) {
938 if (bdev_get_integrity(dd->dm_dev.bdev)) 1002 t->integrity_supported = 1;
939 return blk_integrity_register(dm_disk(md), NULL); 1003 return blk_integrity_register(dm_disk(md), NULL);
1004 }
940 1005
1006 /*
1007 * If DM device already has an initalized integrity
1008 * profile the new profile should not conflict.
1009 */
1010 if (blk_integrity_is_initialized(template_disk) &&
1011 blk_integrity_compare(dm_disk(md), template_disk) < 0) {
1012 DMWARN("%s: conflict with existing integrity profile: "
1013 "%s profile mismatch",
1014 dm_device_name(t->md),
1015 template_disk->disk_name);
1016 return 1;
1017 }
1018
1019 /* Preserve existing initialized integrity profile */
1020 t->integrity_supported = 1;
941 return 0; 1021 return 0;
942} 1022}
943 1023
@@ -1091,41 +1171,27 @@ combine_limits:
1091 1171
1092/* 1172/*
1093 * Set the integrity profile for this device if all devices used have 1173 * Set the integrity profile for this device if all devices used have
1094 * matching profiles. 1174 * matching profiles. We're quite deep in the resume path but still
1175 * don't know if all devices (particularly DM devices this device
1176 * may be stacked on) have matching profiles. Even if the profiles
1177 * don't match we have no way to fail (to resume) at this point.
1095 */ 1178 */
1096static void dm_table_set_integrity(struct dm_table *t) 1179static void dm_table_set_integrity(struct dm_table *t)
1097{ 1180{
1098 struct list_head *devices = dm_table_get_devices(t); 1181 struct gendisk *template_disk = NULL;
1099 struct dm_dev_internal *prev = NULL, *dd = NULL;
1100 1182
1101 if (!blk_get_integrity(dm_disk(t->md))) 1183 if (!blk_get_integrity(dm_disk(t->md)))
1102 return; 1184 return;
1103 1185
1104 list_for_each_entry(dd, devices, list) { 1186 template_disk = dm_table_get_integrity_disk(t, true);
1105 if (prev && 1187 if (!template_disk &&
1106 blk_integrity_compare(prev->dm_dev.bdev->bd_disk, 1188 blk_integrity_is_initialized(dm_disk(t->md))) {
1107 dd->dm_dev.bdev->bd_disk) < 0) { 1189 DMWARN("%s: device no longer has a valid integrity profile",
1108 DMWARN("%s: integrity not set: %s and %s mismatch", 1190 dm_device_name(t->md));
1109 dm_device_name(t->md), 1191 return;
1110 prev->dm_dev.bdev->bd_disk->disk_name,
1111 dd->dm_dev.bdev->bd_disk->disk_name);
1112 goto no_integrity;
1113 }
1114 prev = dd;
1115 } 1192 }
1116
1117 if (!prev || !bdev_get_integrity(prev->dm_dev.bdev))
1118 goto no_integrity;
1119
1120 blk_integrity_register(dm_disk(t->md), 1193 blk_integrity_register(dm_disk(t->md),
1121 bdev_get_integrity(prev->dm_dev.bdev)); 1194 blk_get_integrity(template_disk));
1122
1123 return;
1124
1125no_integrity:
1126 blk_integrity_register(dm_disk(t->md), NULL);
1127
1128 return;
1129} 1195}
1130 1196
1131void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q, 1197void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
@@ -1136,11 +1202,6 @@ void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
1136 */ 1202 */
1137 q->limits = *limits; 1203 q->limits = *limits;
1138 1204
1139 if (limits->no_cluster)
1140 queue_flag_clear_unlocked(QUEUE_FLAG_CLUSTER, q);
1141 else
1142 queue_flag_set_unlocked(QUEUE_FLAG_CLUSTER, q);
1143
1144 if (!dm_table_supports_discards(t)) 1205 if (!dm_table_supports_discards(t))
1145 queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD, q); 1206 queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD, q);
1146 else 1207 else
@@ -1234,10 +1295,17 @@ int dm_table_resume_targets(struct dm_table *t)
1234 return 0; 1295 return 0;
1235} 1296}
1236 1297
1298void dm_table_add_target_callbacks(struct dm_table *t, struct dm_target_callbacks *cb)
1299{
1300 list_add(&cb->list, &t->target_callbacks);
1301}
1302EXPORT_SYMBOL_GPL(dm_table_add_target_callbacks);
1303
1237int dm_table_any_congested(struct dm_table *t, int bdi_bits) 1304int dm_table_any_congested(struct dm_table *t, int bdi_bits)
1238{ 1305{
1239 struct dm_dev_internal *dd; 1306 struct dm_dev_internal *dd;
1240 struct list_head *devices = dm_table_get_devices(t); 1307 struct list_head *devices = dm_table_get_devices(t);
1308 struct dm_target_callbacks *cb;
1241 int r = 0; 1309 int r = 0;
1242 1310
1243 list_for_each_entry(dd, devices, list) { 1311 list_for_each_entry(dd, devices, list) {
@@ -1252,6 +1320,10 @@ int dm_table_any_congested(struct dm_table *t, int bdi_bits)
1252 bdevname(dd->dm_dev.bdev, b)); 1320 bdevname(dd->dm_dev.bdev, b));
1253 } 1321 }
1254 1322
1323 list_for_each_entry(cb, &t->target_callbacks, list)
1324 if (cb->congested_fn)
1325 r |= cb->congested_fn(cb, bdi_bits);
1326
1255 return r; 1327 return r;
1256} 1328}
1257 1329
@@ -1269,24 +1341,6 @@ int dm_table_any_busy_target(struct dm_table *t)
1269 return 0; 1341 return 0;
1270} 1342}
1271 1343
1272void dm_table_unplug_all(struct dm_table *t)
1273{
1274 struct dm_dev_internal *dd;
1275 struct list_head *devices = dm_table_get_devices(t);
1276
1277 list_for_each_entry(dd, devices, list) {
1278 struct request_queue *q = bdev_get_queue(dd->dm_dev.bdev);
1279 char b[BDEVNAME_SIZE];
1280
1281 if (likely(q))
1282 blk_unplug(q);
1283 else
1284 DMWARN_LIMIT("%s: Cannot unplug nonexistent device %s",
1285 dm_device_name(t->md),
1286 bdevname(dd->dm_dev.bdev, b));
1287 }
1288}
1289
1290struct mapped_device *dm_table_get_md(struct dm_table *t) 1344struct mapped_device *dm_table_get_md(struct dm_table *t)
1291{ 1345{
1292 return t->md; 1346 return t->md;
@@ -1309,7 +1363,8 @@ bool dm_table_supports_discards(struct dm_table *t)
1309 return 0; 1363 return 0;
1310 1364
1311 /* 1365 /*
1312 * Ensure that at least one underlying device supports discards. 1366 * Unless any target used by the table set discards_supported,
1367 * require at least one underlying device to support discards.
1313 * t->devices includes internal dm devices such as mirror logs 1368 * t->devices includes internal dm devices such as mirror logs
1314 * so we need to use iterate_devices here, which targets 1369 * so we need to use iterate_devices here, which targets
1315 * supporting discard must provide. 1370 * supporting discard must provide.
@@ -1317,6 +1372,9 @@ bool dm_table_supports_discards(struct dm_table *t)
1317 while (i < dm_table_get_num_targets(t)) { 1372 while (i < dm_table_get_num_targets(t)) {
1318 ti = dm_table_get_target(t, i++); 1373 ti = dm_table_get_target(t, i++);
1319 1374
1375 if (ti->discards_supported)
1376 return 1;
1377
1320 if (ti->type->iterate_devices && 1378 if (ti->type->iterate_devices &&
1321 ti->type->iterate_devices(ti, device_discard_capable, NULL)) 1379 ti->type->iterate_devices(ti, device_discard_capable, NULL))
1322 return 1; 1380 return 1;
@@ -1334,4 +1392,3 @@ EXPORT_SYMBOL(dm_table_get_mode);
1334EXPORT_SYMBOL(dm_table_get_md); 1392EXPORT_SYMBOL(dm_table_get_md);
1335EXPORT_SYMBOL(dm_table_put); 1393EXPORT_SYMBOL(dm_table_put);
1336EXPORT_SYMBOL(dm_table_get); 1394EXPORT_SYMBOL(dm_table_get);
1337EXPORT_SYMBOL(dm_table_unplug_all);