diff options
Diffstat (limited to 'drivers/md/dm-table.c')
-rw-r--r-- | drivers/md/dm-table.c | 61 |
1 files changed, 57 insertions, 4 deletions
diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c index f90069029aae..100368eb7991 100644 --- a/drivers/md/dm-table.c +++ b/drivers/md/dm-table.c | |||
@@ -1212,6 +1212,41 @@ struct dm_target *dm_table_find_target(struct dm_table *t, sector_t sector) | |||
1212 | return &t->targets[(KEYS_PER_NODE * n) + k]; | 1212 | return &t->targets[(KEYS_PER_NODE * n) + k]; |
1213 | } | 1213 | } |
1214 | 1214 | ||
1215 | static int count_device(struct dm_target *ti, struct dm_dev *dev, | ||
1216 | sector_t start, sector_t len, void *data) | ||
1217 | { | ||
1218 | unsigned *num_devices = data; | ||
1219 | |||
1220 | (*num_devices)++; | ||
1221 | |||
1222 | return 0; | ||
1223 | } | ||
1224 | |||
1225 | /* | ||
1226 | * Check whether a table has no data devices attached using each | ||
1227 | * target's iterate_devices method. | ||
1228 | * Returns false if the result is unknown because a target doesn't | ||
1229 | * support iterate_devices. | ||
1230 | */ | ||
1231 | bool dm_table_has_no_data_devices(struct dm_table *table) | ||
1232 | { | ||
1233 | struct dm_target *uninitialized_var(ti); | ||
1234 | unsigned i = 0, num_devices = 0; | ||
1235 | |||
1236 | while (i < dm_table_get_num_targets(table)) { | ||
1237 | ti = dm_table_get_target(table, i++); | ||
1238 | |||
1239 | if (!ti->type->iterate_devices) | ||
1240 | return false; | ||
1241 | |||
1242 | ti->type->iterate_devices(ti, count_device, &num_devices); | ||
1243 | if (num_devices) | ||
1244 | return false; | ||
1245 | } | ||
1246 | |||
1247 | return true; | ||
1248 | } | ||
1249 | |||
1215 | /* | 1250 | /* |
1216 | * Establish the new table's queue_limits and validate them. | 1251 | * Establish the new table's queue_limits and validate them. |
1217 | */ | 1252 | */ |
@@ -1354,17 +1389,25 @@ static int device_is_nonrot(struct dm_target *ti, struct dm_dev *dev, | |||
1354 | return q && blk_queue_nonrot(q); | 1389 | return q && blk_queue_nonrot(q); |
1355 | } | 1390 | } |
1356 | 1391 | ||
1357 | static bool dm_table_is_nonrot(struct dm_table *t) | 1392 | static int device_is_not_random(struct dm_target *ti, struct dm_dev *dev, |
1393 | sector_t start, sector_t len, void *data) | ||
1394 | { | ||
1395 | struct request_queue *q = bdev_get_queue(dev->bdev); | ||
1396 | |||
1397 | return q && !blk_queue_add_random(q); | ||
1398 | } | ||
1399 | |||
1400 | static bool dm_table_all_devices_attribute(struct dm_table *t, | ||
1401 | iterate_devices_callout_fn func) | ||
1358 | { | 1402 | { |
1359 | struct dm_target *ti; | 1403 | struct dm_target *ti; |
1360 | unsigned i = 0; | 1404 | unsigned i = 0; |
1361 | 1405 | ||
1362 | /* Ensure that all underlying device are non-rotational. */ | ||
1363 | while (i < dm_table_get_num_targets(t)) { | 1406 | while (i < dm_table_get_num_targets(t)) { |
1364 | ti = dm_table_get_target(t, i++); | 1407 | ti = dm_table_get_target(t, i++); |
1365 | 1408 | ||
1366 | if (!ti->type->iterate_devices || | 1409 | if (!ti->type->iterate_devices || |
1367 | !ti->type->iterate_devices(ti, device_is_nonrot, NULL)) | 1410 | !ti->type->iterate_devices(ti, func, NULL)) |
1368 | return 0; | 1411 | return 0; |
1369 | } | 1412 | } |
1370 | 1413 | ||
@@ -1396,7 +1439,8 @@ void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q, | |||
1396 | if (!dm_table_discard_zeroes_data(t)) | 1439 | if (!dm_table_discard_zeroes_data(t)) |
1397 | q->limits.discard_zeroes_data = 0; | 1440 | q->limits.discard_zeroes_data = 0; |
1398 | 1441 | ||
1399 | if (dm_table_is_nonrot(t)) | 1442 | /* Ensure that all underlying devices are non-rotational. */ |
1443 | if (dm_table_all_devices_attribute(t, device_is_nonrot)) | ||
1400 | queue_flag_set_unlocked(QUEUE_FLAG_NONROT, q); | 1444 | queue_flag_set_unlocked(QUEUE_FLAG_NONROT, q); |
1401 | else | 1445 | else |
1402 | queue_flag_clear_unlocked(QUEUE_FLAG_NONROT, q); | 1446 | queue_flag_clear_unlocked(QUEUE_FLAG_NONROT, q); |
@@ -1404,6 +1448,15 @@ void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q, | |||
1404 | dm_table_set_integrity(t); | 1448 | dm_table_set_integrity(t); |
1405 | 1449 | ||
1406 | /* | 1450 | /* |
1451 | * Determine whether or not this queue's I/O timings contribute | ||
1452 | * to the entropy pool, Only request-based targets use this. | ||
1453 | * Clear QUEUE_FLAG_ADD_RANDOM if any underlying device does not | ||
1454 | * have it set. | ||
1455 | */ | ||
1456 | if (blk_queue_add_random(q) && dm_table_all_devices_attribute(t, device_is_not_random)) | ||
1457 | queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, q); | ||
1458 | |||
1459 | /* | ||
1407 | * QUEUE_FLAG_STACKABLE must be set after all queue settings are | 1460 | * QUEUE_FLAG_STACKABLE must be set after all queue settings are |
1408 | * visible to other CPUs because, once the flag is set, incoming bios | 1461 | * visible to other CPUs because, once the flag is set, incoming bios |
1409 | * are processed by request-based dm, which refers to the queue | 1462 | * are processed by request-based dm, which refers to the queue |