diff options
Diffstat (limited to 'drivers/md/dm-table.c')
-rw-r--r-- | drivers/md/dm-table.c | 187 |
1 files changed, 148 insertions, 39 deletions
diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c index 451c3bb176d..bc04518e9d8 100644 --- a/drivers/md/dm-table.c +++ b/drivers/md/dm-table.c | |||
@@ -17,7 +17,7 @@ | |||
17 | #include <linux/interrupt.h> | 17 | #include <linux/interrupt.h> |
18 | #include <linux/mutex.h> | 18 | #include <linux/mutex.h> |
19 | #include <linux/delay.h> | 19 | #include <linux/delay.h> |
20 | #include <asm/atomic.h> | 20 | #include <linux/atomic.h> |
21 | 21 | ||
22 | #define DM_MSG_PREFIX "table" | 22 | #define DM_MSG_PREFIX "table" |
23 | 23 | ||
@@ -54,7 +54,6 @@ struct dm_table { | |||
54 | sector_t *highs; | 54 | sector_t *highs; |
55 | struct dm_target *targets; | 55 | struct dm_target *targets; |
56 | 56 | ||
57 | unsigned discards_supported:1; | ||
58 | unsigned integrity_supported:1; | 57 | unsigned integrity_supported:1; |
59 | 58 | ||
60 | /* | 59 | /* |
@@ -154,12 +153,11 @@ void *dm_vcalloc(unsigned long nmemb, unsigned long elem_size) | |||
154 | return NULL; | 153 | return NULL; |
155 | 154 | ||
156 | size = nmemb * elem_size; | 155 | size = nmemb * elem_size; |
157 | addr = vmalloc(size); | 156 | addr = vzalloc(size); |
158 | if (addr) | ||
159 | memset(addr, 0, size); | ||
160 | 157 | ||
161 | return addr; | 158 | return addr; |
162 | } | 159 | } |
160 | EXPORT_SYMBOL(dm_vcalloc); | ||
163 | 161 | ||
164 | /* | 162 | /* |
165 | * highs, and targets are managed as dynamic arrays during a | 163 | * highs, and targets are managed as dynamic arrays during a |
@@ -209,7 +207,6 @@ int dm_table_create(struct dm_table **result, fmode_t mode, | |||
209 | INIT_LIST_HEAD(&t->devices); | 207 | INIT_LIST_HEAD(&t->devices); |
210 | INIT_LIST_HEAD(&t->target_callbacks); | 208 | INIT_LIST_HEAD(&t->target_callbacks); |
211 | atomic_set(&t->holders, 0); | 209 | atomic_set(&t->holders, 0); |
212 | t->discards_supported = 1; | ||
213 | 210 | ||
214 | if (!num_targets) | 211 | if (!num_targets) |
215 | num_targets = KEYS_PER_NODE; | 212 | num_targets = KEYS_PER_NODE; |
@@ -281,6 +278,7 @@ void dm_table_get(struct dm_table *t) | |||
281 | { | 278 | { |
282 | atomic_inc(&t->holders); | 279 | atomic_inc(&t->holders); |
283 | } | 280 | } |
281 | EXPORT_SYMBOL(dm_table_get); | ||
284 | 282 | ||
285 | void dm_table_put(struct dm_table *t) | 283 | void dm_table_put(struct dm_table *t) |
286 | { | 284 | { |
@@ -290,6 +288,7 @@ void dm_table_put(struct dm_table *t) | |||
290 | smp_mb__before_atomic_dec(); | 288 | smp_mb__before_atomic_dec(); |
291 | atomic_dec(&t->holders); | 289 | atomic_dec(&t->holders); |
292 | } | 290 | } |
291 | EXPORT_SYMBOL(dm_table_put); | ||
293 | 292 | ||
294 | /* | 293 | /* |
295 | * Checks to see if we need to extend highs or targets. | 294 | * Checks to see if we need to extend highs or targets. |
@@ -455,13 +454,14 @@ static int upgrade_mode(struct dm_dev_internal *dd, fmode_t new_mode, | |||
455 | * Add a device to the list, or just increment the usage count if | 454 | * Add a device to the list, or just increment the usage count if |
456 | * it's already present. | 455 | * it's already present. |
457 | */ | 456 | */ |
458 | static int __table_get_device(struct dm_table *t, struct dm_target *ti, | 457 | int dm_get_device(struct dm_target *ti, const char *path, fmode_t mode, |
459 | const char *path, fmode_t mode, struct dm_dev **result) | 458 | struct dm_dev **result) |
460 | { | 459 | { |
461 | int r; | 460 | int r; |
462 | dev_t uninitialized_var(dev); | 461 | dev_t uninitialized_var(dev); |
463 | struct dm_dev_internal *dd; | 462 | struct dm_dev_internal *dd; |
464 | unsigned int major, minor; | 463 | unsigned int major, minor; |
464 | struct dm_table *t = ti->table; | ||
465 | 465 | ||
466 | BUG_ON(!t); | 466 | BUG_ON(!t); |
467 | 467 | ||
@@ -509,6 +509,7 @@ static int __table_get_device(struct dm_table *t, struct dm_target *ti, | |||
509 | *result = &dd->dm_dev; | 509 | *result = &dd->dm_dev; |
510 | return 0; | 510 | return 0; |
511 | } | 511 | } |
512 | EXPORT_SYMBOL(dm_get_device); | ||
512 | 513 | ||
513 | int dm_set_device_limits(struct dm_target *ti, struct dm_dev *dev, | 514 | int dm_set_device_limits(struct dm_target *ti, struct dm_dev *dev, |
514 | sector_t start, sector_t len, void *data) | 515 | sector_t start, sector_t len, void *data) |
@@ -539,23 +540,15 @@ int dm_set_device_limits(struct dm_target *ti, struct dm_dev *dev, | |||
539 | * If not we'll force DM to use PAGE_SIZE or | 540 | * If not we'll force DM to use PAGE_SIZE or |
540 | * smaller I/O, just to be safe. | 541 | * smaller I/O, just to be safe. |
541 | */ | 542 | */ |
542 | 543 | if (dm_queue_merge_is_compulsory(q) && !ti->type->merge) | |
543 | if (q->merge_bvec_fn && !ti->type->merge) | ||
544 | blk_limits_max_hw_sectors(limits, | 544 | blk_limits_max_hw_sectors(limits, |
545 | (unsigned int) (PAGE_SIZE >> 9)); | 545 | (unsigned int) (PAGE_SIZE >> 9)); |
546 | return 0; | 546 | return 0; |
547 | } | 547 | } |
548 | EXPORT_SYMBOL_GPL(dm_set_device_limits); | 548 | EXPORT_SYMBOL_GPL(dm_set_device_limits); |
549 | 549 | ||
550 | int dm_get_device(struct dm_target *ti, const char *path, fmode_t mode, | ||
551 | struct dm_dev **result) | ||
552 | { | ||
553 | return __table_get_device(ti->table, ti, path, mode, result); | ||
554 | } | ||
555 | |||
556 | |||
557 | /* | 550 | /* |
558 | * Decrement a devices use count and remove it if necessary. | 551 | * Decrement a device's use count and remove it if necessary. |
559 | */ | 552 | */ |
560 | void dm_put_device(struct dm_target *ti, struct dm_dev *d) | 553 | void dm_put_device(struct dm_target *ti, struct dm_dev *d) |
561 | { | 554 | { |
@@ -568,6 +561,7 @@ void dm_put_device(struct dm_target *ti, struct dm_dev *d) | |||
568 | kfree(dd); | 561 | kfree(dd); |
569 | } | 562 | } |
570 | } | 563 | } |
564 | EXPORT_SYMBOL(dm_put_device); | ||
571 | 565 | ||
572 | /* | 566 | /* |
573 | * Checks to see if the target joins onto the end of the table. | 567 | * Checks to see if the target joins onto the end of the table. |
@@ -791,8 +785,9 @@ int dm_table_add_target(struct dm_table *t, const char *type, | |||
791 | 785 | ||
792 | t->highs[t->num_targets++] = tgt->begin + tgt->len - 1; | 786 | t->highs[t->num_targets++] = tgt->begin + tgt->len - 1; |
793 | 787 | ||
794 | if (!tgt->num_discard_requests) | 788 | if (!tgt->num_discard_requests && tgt->discards_supported) |
795 | t->discards_supported = 0; | 789 | DMWARN("%s: %s: ignoring discards_supported because num_discard_requests is zero.", |
790 | dm_device_name(t->md), type); | ||
796 | 791 | ||
797 | return 0; | 792 | return 0; |
798 | 793 | ||
@@ -802,6 +797,63 @@ int dm_table_add_target(struct dm_table *t, const char *type, | |||
802 | return r; | 797 | return r; |
803 | } | 798 | } |
804 | 799 | ||
800 | /* | ||
801 | * Target argument parsing helpers. | ||
802 | */ | ||
803 | static int validate_next_arg(struct dm_arg *arg, struct dm_arg_set *arg_set, | ||
804 | unsigned *value, char **error, unsigned grouped) | ||
805 | { | ||
806 | const char *arg_str = dm_shift_arg(arg_set); | ||
807 | |||
808 | if (!arg_str || | ||
809 | (sscanf(arg_str, "%u", value) != 1) || | ||
810 | (*value < arg->min) || | ||
811 | (*value > arg->max) || | ||
812 | (grouped && arg_set->argc < *value)) { | ||
813 | *error = arg->error; | ||
814 | return -EINVAL; | ||
815 | } | ||
816 | |||
817 | return 0; | ||
818 | } | ||
819 | |||
820 | int dm_read_arg(struct dm_arg *arg, struct dm_arg_set *arg_set, | ||
821 | unsigned *value, char **error) | ||
822 | { | ||
823 | return validate_next_arg(arg, arg_set, value, error, 0); | ||
824 | } | ||
825 | EXPORT_SYMBOL(dm_read_arg); | ||
826 | |||
827 | int dm_read_arg_group(struct dm_arg *arg, struct dm_arg_set *arg_set, | ||
828 | unsigned *value, char **error) | ||
829 | { | ||
830 | return validate_next_arg(arg, arg_set, value, error, 1); | ||
831 | } | ||
832 | EXPORT_SYMBOL(dm_read_arg_group); | ||
833 | |||
834 | const char *dm_shift_arg(struct dm_arg_set *as) | ||
835 | { | ||
836 | char *r; | ||
837 | |||
838 | if (as->argc) { | ||
839 | as->argc--; | ||
840 | r = *as->argv; | ||
841 | as->argv++; | ||
842 | return r; | ||
843 | } | ||
844 | |||
845 | return NULL; | ||
846 | } | ||
847 | EXPORT_SYMBOL(dm_shift_arg); | ||
848 | |||
849 | void dm_consume_args(struct dm_arg_set *as, unsigned num_args) | ||
850 | { | ||
851 | BUG_ON(as->argc < num_args); | ||
852 | as->argc -= num_args; | ||
853 | as->argv += num_args; | ||
854 | } | ||
855 | EXPORT_SYMBOL(dm_consume_args); | ||
856 | |||
805 | static int dm_table_set_type(struct dm_table *t) | 857 | static int dm_table_set_type(struct dm_table *t) |
806 | { | 858 | { |
807 | unsigned i; | 859 | unsigned i; |
@@ -1077,11 +1129,13 @@ void dm_table_event(struct dm_table *t) | |||
1077 | t->event_fn(t->event_context); | 1129 | t->event_fn(t->event_context); |
1078 | mutex_unlock(&_event_lock); | 1130 | mutex_unlock(&_event_lock); |
1079 | } | 1131 | } |
1132 | EXPORT_SYMBOL(dm_table_event); | ||
1080 | 1133 | ||
1081 | sector_t dm_table_get_size(struct dm_table *t) | 1134 | sector_t dm_table_get_size(struct dm_table *t) |
1082 | { | 1135 | { |
1083 | return t->num_targets ? (t->highs[t->num_targets - 1] + 1) : 0; | 1136 | return t->num_targets ? (t->highs[t->num_targets - 1] + 1) : 0; |
1084 | } | 1137 | } |
1138 | EXPORT_SYMBOL(dm_table_get_size); | ||
1085 | 1139 | ||
1086 | struct dm_target *dm_table_get_target(struct dm_table *t, unsigned int index) | 1140 | struct dm_target *dm_table_get_target(struct dm_table *t, unsigned int index) |
1087 | { | 1141 | { |
@@ -1184,19 +1238,72 @@ static void dm_table_set_integrity(struct dm_table *t) | |||
1184 | return; | 1238 | return; |
1185 | 1239 | ||
1186 | template_disk = dm_table_get_integrity_disk(t, true); | 1240 | template_disk = dm_table_get_integrity_disk(t, true); |
1187 | if (!template_disk && | 1241 | if (template_disk) |
1188 | blk_integrity_is_initialized(dm_disk(t->md))) { | 1242 | blk_integrity_register(dm_disk(t->md), |
1243 | blk_get_integrity(template_disk)); | ||
1244 | else if (blk_integrity_is_initialized(dm_disk(t->md))) | ||
1189 | DMWARN("%s: device no longer has a valid integrity profile", | 1245 | DMWARN("%s: device no longer has a valid integrity profile", |
1190 | dm_device_name(t->md)); | 1246 | dm_device_name(t->md)); |
1191 | return; | 1247 | else |
1248 | DMWARN("%s: unable to establish an integrity profile", | ||
1249 | dm_device_name(t->md)); | ||
1250 | } | ||
1251 | |||
1252 | static int device_flush_capable(struct dm_target *ti, struct dm_dev *dev, | ||
1253 | sector_t start, sector_t len, void *data) | ||
1254 | { | ||
1255 | unsigned flush = (*(unsigned *)data); | ||
1256 | struct request_queue *q = bdev_get_queue(dev->bdev); | ||
1257 | |||
1258 | return q && (q->flush_flags & flush); | ||
1259 | } | ||
1260 | |||
1261 | static bool dm_table_supports_flush(struct dm_table *t, unsigned flush) | ||
1262 | { | ||
1263 | struct dm_target *ti; | ||
1264 | unsigned i = 0; | ||
1265 | |||
1266 | /* | ||
1267 | * Require at least one underlying device to support flushes. | ||
1268 | * t->devices includes internal dm devices such as mirror logs | ||
1269 | * so we need to use iterate_devices here, which targets | ||
1270 | * supporting flushes must provide. | ||
1271 | */ | ||
1272 | while (i < dm_table_get_num_targets(t)) { | ||
1273 | ti = dm_table_get_target(t, i++); | ||
1274 | |||
1275 | if (!ti->num_flush_requests) | ||
1276 | continue; | ||
1277 | |||
1278 | if (ti->type->iterate_devices && | ||
1279 | ti->type->iterate_devices(ti, device_flush_capable, &flush)) | ||
1280 | return 1; | ||
1281 | } | ||
1282 | |||
1283 | return 0; | ||
1284 | } | ||
1285 | |||
1286 | static bool dm_table_discard_zeroes_data(struct dm_table *t) | ||
1287 | { | ||
1288 | struct dm_target *ti; | ||
1289 | unsigned i = 0; | ||
1290 | |||
1291 | /* Ensure that all targets supports discard_zeroes_data. */ | ||
1292 | while (i < dm_table_get_num_targets(t)) { | ||
1293 | ti = dm_table_get_target(t, i++); | ||
1294 | |||
1295 | if (ti->discard_zeroes_data_unsupported) | ||
1296 | return 0; | ||
1192 | } | 1297 | } |
1193 | blk_integrity_register(dm_disk(t->md), | 1298 | |
1194 | blk_get_integrity(template_disk)); | 1299 | return 1; |
1195 | } | 1300 | } |
1196 | 1301 | ||
1197 | void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q, | 1302 | void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q, |
1198 | struct queue_limits *limits) | 1303 | struct queue_limits *limits) |
1199 | { | 1304 | { |
1305 | unsigned flush = 0; | ||
1306 | |||
1200 | /* | 1307 | /* |
1201 | * Copy table's limits to the DM device's request_queue | 1308 | * Copy table's limits to the DM device's request_queue |
1202 | */ | 1309 | */ |
@@ -1207,6 +1314,16 @@ void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q, | |||
1207 | else | 1314 | else |
1208 | queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, q); | 1315 | queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, q); |
1209 | 1316 | ||
1317 | if (dm_table_supports_flush(t, REQ_FLUSH)) { | ||
1318 | flush |= REQ_FLUSH; | ||
1319 | if (dm_table_supports_flush(t, REQ_FUA)) | ||
1320 | flush |= REQ_FUA; | ||
1321 | } | ||
1322 | blk_queue_flush(q, flush); | ||
1323 | |||
1324 | if (!dm_table_discard_zeroes_data(t)) | ||
1325 | q->limits.discard_zeroes_data = 0; | ||
1326 | |||
1210 | dm_table_set_integrity(t); | 1327 | dm_table_set_integrity(t); |
1211 | 1328 | ||
1212 | /* | 1329 | /* |
@@ -1237,6 +1354,7 @@ fmode_t dm_table_get_mode(struct dm_table *t) | |||
1237 | { | 1354 | { |
1238 | return t->mode; | 1355 | return t->mode; |
1239 | } | 1356 | } |
1357 | EXPORT_SYMBOL(dm_table_get_mode); | ||
1240 | 1358 | ||
1241 | static void suspend_targets(struct dm_table *t, unsigned postsuspend) | 1359 | static void suspend_targets(struct dm_table *t, unsigned postsuspend) |
1242 | { | 1360 | { |
@@ -1345,6 +1463,7 @@ struct mapped_device *dm_table_get_md(struct dm_table *t) | |||
1345 | { | 1463 | { |
1346 | return t->md; | 1464 | return t->md; |
1347 | } | 1465 | } |
1466 | EXPORT_SYMBOL(dm_table_get_md); | ||
1348 | 1467 | ||
1349 | static int device_discard_capable(struct dm_target *ti, struct dm_dev *dev, | 1468 | static int device_discard_capable(struct dm_target *ti, struct dm_dev *dev, |
1350 | sector_t start, sector_t len, void *data) | 1469 | sector_t start, sector_t len, void *data) |
@@ -1359,19 +1478,19 @@ bool dm_table_supports_discards(struct dm_table *t) | |||
1359 | struct dm_target *ti; | 1478 | struct dm_target *ti; |
1360 | unsigned i = 0; | 1479 | unsigned i = 0; |
1361 | 1480 | ||
1362 | if (!t->discards_supported) | ||
1363 | return 0; | ||
1364 | |||
1365 | /* | 1481 | /* |
1366 | * Unless any target used by the table set discards_supported, | 1482 | * Unless any target used by the table set discards_supported, |
1367 | * require at least one underlying device to support discards. | 1483 | * require at least one underlying device to support discards. |
1368 | * t->devices includes internal dm devices such as mirror logs | 1484 | * t->devices includes internal dm devices such as mirror logs |
1369 | * so we need to use iterate_devices here, which targets | 1485 | * so we need to use iterate_devices here, which targets |
1370 | * supporting discard must provide. | 1486 | * supporting discard selectively must provide. |
1371 | */ | 1487 | */ |
1372 | while (i < dm_table_get_num_targets(t)) { | 1488 | while (i < dm_table_get_num_targets(t)) { |
1373 | ti = dm_table_get_target(t, i++); | 1489 | ti = dm_table_get_target(t, i++); |
1374 | 1490 | ||
1491 | if (!ti->num_discard_requests) | ||
1492 | continue; | ||
1493 | |||
1375 | if (ti->discards_supported) | 1494 | if (ti->discards_supported) |
1376 | return 1; | 1495 | return 1; |
1377 | 1496 | ||
@@ -1382,13 +1501,3 @@ bool dm_table_supports_discards(struct dm_table *t) | |||
1382 | 1501 | ||
1383 | return 0; | 1502 | return 0; |
1384 | } | 1503 | } |
1385 | |||
1386 | EXPORT_SYMBOL(dm_vcalloc); | ||
1387 | EXPORT_SYMBOL(dm_get_device); | ||
1388 | EXPORT_SYMBOL(dm_put_device); | ||
1389 | EXPORT_SYMBOL(dm_table_event); | ||
1390 | EXPORT_SYMBOL(dm_table_get_size); | ||
1391 | EXPORT_SYMBOL(dm_table_get_mode); | ||
1392 | EXPORT_SYMBOL(dm_table_get_md); | ||
1393 | EXPORT_SYMBOL(dm_table_put); | ||
1394 | EXPORT_SYMBOL(dm_table_get); | ||