aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorMilan Broz <mbroz@redhat.com>2012-09-26 18:45:43 -0400
committerAlasdair G Kergon <agk@redhat.com>2012-09-26 18:45:43 -0400
commitc3c4555edd10dbc0b388a0125b9c50de5e79af05 (patch)
tree6beec93488391802ad897ebeb4c208af432a5791 /drivers
parentba1cbad93dd47223b1f3b8edd50dd9ef2abcb2ed (diff)
dm table: clear add_random unless all devices have it set
Always clear QUEUE_FLAG_ADD_RANDOM if any underlying device does not have it set. Otherwise devices with predictable characteristics may contribute entropy. QUEUE_FLAG_ADD_RANDOM specifies whether or not queue IO timings contribute to the random pool. For bio-based targets this flag is always 0 because such devices have no real queue. For request-based devices this flag was always set to 1 by default. Now set it according to the flags on underlying devices. If there is at least one device which should not contribute, set the flag to zero: If a device, such as fast SSD storage, is not suitable for supplying entropy, a request-based queue stacked over it will not be either. Because the checking logic is exactly same as for the rotational flag, share the iteration function with device_is_nonrot(). Signed-off-by: Milan Broz <mbroz@redhat.com> Cc: stable@vger.kernel.org Signed-off-by: Alasdair G Kergon <agk@redhat.com>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/md/dm-table.c26
1 files changed, 22 insertions, 4 deletions
diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
index f90069029aae..77b90ae66991 100644
--- a/drivers/md/dm-table.c
+++ b/drivers/md/dm-table.c
@@ -1354,17 +1354,25 @@ static int device_is_nonrot(struct dm_target *ti, struct dm_dev *dev,
1354 return q && blk_queue_nonrot(q); 1354 return q && blk_queue_nonrot(q);
1355} 1355}
1356 1356
1357static bool dm_table_is_nonrot(struct dm_table *t) 1357static int device_is_not_random(struct dm_target *ti, struct dm_dev *dev,
1358 sector_t start, sector_t len, void *data)
1359{
1360 struct request_queue *q = bdev_get_queue(dev->bdev);
1361
1362 return q && !blk_queue_add_random(q);
1363}
1364
1365static bool dm_table_all_devices_attribute(struct dm_table *t,
1366 iterate_devices_callout_fn func)
1358{ 1367{
1359 struct dm_target *ti; 1368 struct dm_target *ti;
1360 unsigned i = 0; 1369 unsigned i = 0;
1361 1370
1362 /* Ensure that all underlying device are non-rotational. */
1363 while (i < dm_table_get_num_targets(t)) { 1371 while (i < dm_table_get_num_targets(t)) {
1364 ti = dm_table_get_target(t, i++); 1372 ti = dm_table_get_target(t, i++);
1365 1373
1366 if (!ti->type->iterate_devices || 1374 if (!ti->type->iterate_devices ||
1367 !ti->type->iterate_devices(ti, device_is_nonrot, NULL)) 1375 !ti->type->iterate_devices(ti, func, NULL))
1368 return 0; 1376 return 0;
1369 } 1377 }
1370 1378
@@ -1396,7 +1404,8 @@ void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
1396 if (!dm_table_discard_zeroes_data(t)) 1404 if (!dm_table_discard_zeroes_data(t))
1397 q->limits.discard_zeroes_data = 0; 1405 q->limits.discard_zeroes_data = 0;
1398 1406
1399 if (dm_table_is_nonrot(t)) 1407 /* Ensure that all underlying devices are non-rotational. */
1408 if (dm_table_all_devices_attribute(t, device_is_nonrot))
1400 queue_flag_set_unlocked(QUEUE_FLAG_NONROT, q); 1409 queue_flag_set_unlocked(QUEUE_FLAG_NONROT, q);
1401 else 1410 else
1402 queue_flag_clear_unlocked(QUEUE_FLAG_NONROT, q); 1411 queue_flag_clear_unlocked(QUEUE_FLAG_NONROT, q);
@@ -1404,6 +1413,15 @@ void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
1404 dm_table_set_integrity(t); 1413 dm_table_set_integrity(t);
1405 1414
1406 /* 1415 /*
1416 * Determine whether or not this queue's I/O timings contribute
1417 * to the entropy pool, Only request-based targets use this.
1418 * Clear QUEUE_FLAG_ADD_RANDOM if any underlying device does not
1419 * have it set.
1420 */
1421 if (blk_queue_add_random(q) && dm_table_all_devices_attribute(t, device_is_not_random))
1422 queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, q);
1423
1424 /*
1407 * QUEUE_FLAG_STACKABLE must be set after all queue settings are 1425 * QUEUE_FLAG_STACKABLE must be set after all queue settings are
1408 * visible to other CPUs because, once the flag is set, incoming bios 1426 * visible to other CPUs because, once the flag is set, incoming bios
1409 * are processed by request-based dm, which refers to the queue 1427 * are processed by request-based dm, which refers to the queue