diff options
author | Mikulas Patocka <mpatocka@redhat.com> | 2017-04-18 16:51:46 -0400 |
---|---|---|
committer | Mike Snitzer <snitzer@redhat.com> | 2017-04-24 12:04:31 -0400 |
commit | 3c12016910061c2a19d985fba7f7dec19d6a3a09 (patch) | |
tree | a14e713808ce97a29ab9daa99b628bb89334cc4b | |
parent | cc7e394024770d4bfd8463fab1a9e2e262a7d7c1 (diff) |
dm table: replace while loops with for loops
Also remove some unnecessary use of uninitialized_var().
Signed-off-by: Mikulas Patocka <mpatocka@redhat.com>
Signed-off-by: Mike Snitzer <snitzer@redhat.com>
-rw-r--r-- | drivers/md/dm-table.c | 63 |
1 files changed, 32 insertions, 31 deletions
diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c index b0600840e734..7fb29db478cd 100644 --- a/drivers/md/dm-table.c +++ b/drivers/md/dm-table.c | |||
@@ -373,7 +373,7 @@ static int upgrade_mode(struct dm_dev_internal *dd, fmode_t new_mode, | |||
373 | */ | 373 | */ |
374 | dev_t dm_get_dev_t(const char *path) | 374 | dev_t dm_get_dev_t(const char *path) |
375 | { | 375 | { |
376 | dev_t uninitialized_var(dev); | 376 | dev_t dev; |
377 | struct block_device *bdev; | 377 | struct block_device *bdev; |
378 | 378 | ||
379 | bdev = lookup_bdev(path); | 379 | bdev = lookup_bdev(path); |
@@ -627,13 +627,13 @@ static int validate_hardware_logical_block_alignment(struct dm_table *table, | |||
627 | 627 | ||
628 | struct dm_target *uninitialized_var(ti); | 628 | struct dm_target *uninitialized_var(ti); |
629 | struct queue_limits ti_limits; | 629 | struct queue_limits ti_limits; |
630 | unsigned i = 0; | 630 | unsigned i; |
631 | 631 | ||
632 | /* | 632 | /* |
633 | * Check each entry in the table in turn. | 633 | * Check each entry in the table in turn. |
634 | */ | 634 | */ |
635 | while (i < dm_table_get_num_targets(table)) { | 635 | for (i = 0; i < dm_table_get_num_targets(table); i++) { |
636 | ti = dm_table_get_target(table, i++); | 636 | ti = dm_table_get_target(table, i); |
637 | 637 | ||
638 | blk_set_stacking_limits(&ti_limits); | 638 | blk_set_stacking_limits(&ti_limits); |
639 | 639 | ||
@@ -854,11 +854,11 @@ static int device_supports_dax(struct dm_target *ti, struct dm_dev *dev, | |||
854 | static bool dm_table_supports_dax(struct dm_table *t) | 854 | static bool dm_table_supports_dax(struct dm_table *t) |
855 | { | 855 | { |
856 | struct dm_target *ti; | 856 | struct dm_target *ti; |
857 | unsigned i = 0; | 857 | unsigned i; |
858 | 858 | ||
859 | /* Ensure that all targets support DAX. */ | 859 | /* Ensure that all targets support DAX. */ |
860 | while (i < dm_table_get_num_targets(t)) { | 860 | for (i = 0; i < dm_table_get_num_targets(t); i++) { |
861 | ti = dm_table_get_target(t, i++); | 861 | ti = dm_table_get_target(t, i); |
862 | 862 | ||
863 | if (!ti->type->direct_access) | 863 | if (!ti->type->direct_access) |
864 | return false; | 864 | return false; |
@@ -1010,11 +1010,11 @@ struct dm_target *dm_table_get_immutable_target(struct dm_table *t) | |||
1010 | 1010 | ||
1011 | struct dm_target *dm_table_get_wildcard_target(struct dm_table *t) | 1011 | struct dm_target *dm_table_get_wildcard_target(struct dm_table *t) |
1012 | { | 1012 | { |
1013 | struct dm_target *uninitialized_var(ti); | 1013 | struct dm_target *ti; |
1014 | unsigned i = 0; | 1014 | unsigned i; |
1015 | 1015 | ||
1016 | while (i < dm_table_get_num_targets(t)) { | 1016 | for (i = 0; i < dm_table_get_num_targets(t); i++) { |
1017 | ti = dm_table_get_target(t, i++); | 1017 | ti = dm_table_get_target(t, i); |
1018 | if (dm_target_is_wildcard(ti->type)) | 1018 | if (dm_target_is_wildcard(ti->type)) |
1019 | return ti; | 1019 | return ti; |
1020 | } | 1020 | } |
@@ -1321,15 +1321,16 @@ static int count_device(struct dm_target *ti, struct dm_dev *dev, | |||
1321 | */ | 1321 | */ |
1322 | bool dm_table_has_no_data_devices(struct dm_table *table) | 1322 | bool dm_table_has_no_data_devices(struct dm_table *table) |
1323 | { | 1323 | { |
1324 | struct dm_target *uninitialized_var(ti); | 1324 | struct dm_target *ti; |
1325 | unsigned i = 0, num_devices = 0; | 1325 | unsigned i, num_devices; |
1326 | 1326 | ||
1327 | while (i < dm_table_get_num_targets(table)) { | 1327 | for (i = 0; i < dm_table_get_num_targets(table); i++) { |
1328 | ti = dm_table_get_target(table, i++); | 1328 | ti = dm_table_get_target(table, i); |
1329 | 1329 | ||
1330 | if (!ti->type->iterate_devices) | 1330 | if (!ti->type->iterate_devices) |
1331 | return false; | 1331 | return false; |
1332 | 1332 | ||
1333 | num_devices = 0; | ||
1333 | ti->type->iterate_devices(ti, count_device, &num_devices); | 1334 | ti->type->iterate_devices(ti, count_device, &num_devices); |
1334 | if (num_devices) | 1335 | if (num_devices) |
1335 | return false; | 1336 | return false; |
@@ -1344,16 +1345,16 @@ bool dm_table_has_no_data_devices(struct dm_table *table) | |||
1344 | int dm_calculate_queue_limits(struct dm_table *table, | 1345 | int dm_calculate_queue_limits(struct dm_table *table, |
1345 | struct queue_limits *limits) | 1346 | struct queue_limits *limits) |
1346 | { | 1347 | { |
1347 | struct dm_target *uninitialized_var(ti); | 1348 | struct dm_target *ti; |
1348 | struct queue_limits ti_limits; | 1349 | struct queue_limits ti_limits; |
1349 | unsigned i = 0; | 1350 | unsigned i; |
1350 | 1351 | ||
1351 | blk_set_stacking_limits(limits); | 1352 | blk_set_stacking_limits(limits); |
1352 | 1353 | ||
1353 | while (i < dm_table_get_num_targets(table)) { | 1354 | for (i = 0; i < dm_table_get_num_targets(table); i++) { |
1354 | blk_set_stacking_limits(&ti_limits); | 1355 | blk_set_stacking_limits(&ti_limits); |
1355 | 1356 | ||
1356 | ti = dm_table_get_target(table, i++); | 1357 | ti = dm_table_get_target(table, i); |
1357 | 1358 | ||
1358 | if (!ti->type->iterate_devices) | 1359 | if (!ti->type->iterate_devices) |
1359 | goto combine_limits; | 1360 | goto combine_limits; |
@@ -1435,7 +1436,7 @@ static int device_flush_capable(struct dm_target *ti, struct dm_dev *dev, | |||
1435 | static bool dm_table_supports_flush(struct dm_table *t, unsigned long flush) | 1436 | static bool dm_table_supports_flush(struct dm_table *t, unsigned long flush) |
1436 | { | 1437 | { |
1437 | struct dm_target *ti; | 1438 | struct dm_target *ti; |
1438 | unsigned i = 0; | 1439 | unsigned i; |
1439 | 1440 | ||
1440 | /* | 1441 | /* |
1441 | * Require at least one underlying device to support flushes. | 1442 | * Require at least one underlying device to support flushes. |
@@ -1443,8 +1444,8 @@ static bool dm_table_supports_flush(struct dm_table *t, unsigned long flush) | |||
1443 | * so we need to use iterate_devices here, which targets | 1444 | * so we need to use iterate_devices here, which targets |
1444 | * supporting flushes must provide. | 1445 | * supporting flushes must provide. |
1445 | */ | 1446 | */ |
1446 | while (i < dm_table_get_num_targets(t)) { | 1447 | for (i = 0; i < dm_table_get_num_targets(t); i++) { |
1447 | ti = dm_table_get_target(t, i++); | 1448 | ti = dm_table_get_target(t, i); |
1448 | 1449 | ||
1449 | if (!ti->num_flush_bios) | 1450 | if (!ti->num_flush_bios) |
1450 | continue; | 1451 | continue; |
@@ -1504,10 +1505,10 @@ static bool dm_table_all_devices_attribute(struct dm_table *t, | |||
1504 | iterate_devices_callout_fn func) | 1505 | iterate_devices_callout_fn func) |
1505 | { | 1506 | { |
1506 | struct dm_target *ti; | 1507 | struct dm_target *ti; |
1507 | unsigned i = 0; | 1508 | unsigned i; |
1508 | 1509 | ||
1509 | while (i < dm_table_get_num_targets(t)) { | 1510 | for (i = 0; i < dm_table_get_num_targets(t); i++) { |
1510 | ti = dm_table_get_target(t, i++); | 1511 | ti = dm_table_get_target(t, i); |
1511 | 1512 | ||
1512 | if (!ti->type->iterate_devices || | 1513 | if (!ti->type->iterate_devices || |
1513 | !ti->type->iterate_devices(ti, func, NULL)) | 1514 | !ti->type->iterate_devices(ti, func, NULL)) |
@@ -1528,10 +1529,10 @@ static int device_not_write_same_capable(struct dm_target *ti, struct dm_dev *de | |||
1528 | static bool dm_table_supports_write_same(struct dm_table *t) | 1529 | static bool dm_table_supports_write_same(struct dm_table *t) |
1529 | { | 1530 | { |
1530 | struct dm_target *ti; | 1531 | struct dm_target *ti; |
1531 | unsigned i = 0; | 1532 | unsigned i; |
1532 | 1533 | ||
1533 | while (i < dm_table_get_num_targets(t)) { | 1534 | for (i = 0; i < dm_table_get_num_targets(t); i++) { |
1534 | ti = dm_table_get_target(t, i++); | 1535 | ti = dm_table_get_target(t, i); |
1535 | 1536 | ||
1536 | if (!ti->num_write_same_bios) | 1537 | if (!ti->num_write_same_bios) |
1537 | return false; | 1538 | return false; |
@@ -1555,7 +1556,7 @@ static int device_discard_capable(struct dm_target *ti, struct dm_dev *dev, | |||
1555 | static bool dm_table_supports_discards(struct dm_table *t) | 1556 | static bool dm_table_supports_discards(struct dm_table *t) |
1556 | { | 1557 | { |
1557 | struct dm_target *ti; | 1558 | struct dm_target *ti; |
1558 | unsigned i = 0; | 1559 | unsigned i; |
1559 | 1560 | ||
1560 | /* | 1561 | /* |
1561 | * Unless any target used by the table set discards_supported, | 1562 | * Unless any target used by the table set discards_supported, |
@@ -1564,8 +1565,8 @@ static bool dm_table_supports_discards(struct dm_table *t) | |||
1564 | * so we need to use iterate_devices here, which targets | 1565 | * so we need to use iterate_devices here, which targets |
1565 | * supporting discard selectively must provide. | 1566 | * supporting discard selectively must provide. |
1566 | */ | 1567 | */ |
1567 | while (i < dm_table_get_num_targets(t)) { | 1568 | for (i = 0; i < dm_table_get_num_targets(t); i++) { |
1568 | ti = dm_table_get_target(t, i++); | 1569 | ti = dm_table_get_target(t, i); |
1569 | 1570 | ||
1570 | if (!ti->num_discard_bios) | 1571 | if (!ti->num_discard_bios) |
1571 | continue; | 1572 | continue; |