diff options
author | Heinz Mauelshagen <heinzm@redhat.com> | 2016-07-19 08:03:51 -0400 |
---|---|---|
committer | Mike Snitzer <snitzer@redhat.com> | 2016-07-19 11:37:07 -0400 |
commit | 094f394df6a33f959888d445b362a9086823a2fb (patch) | |
tree | 19ba09b0dfa5c516904a1c5eceb019abd9a86eef /drivers/md/dm-raid.c | |
parent | 9c72bad1f31af96d9012025639552cd5732bb0a5 (diff) |
dm raid: address checkpatch.pl complaints
Use 'unsigned int' where appropriate.
Return negative errors.
Correct an indentation.
Signed-off-by: Heinz Mauelshagen <heinzm@redhat.com>
Signed-off-by: Mike Snitzer <snitzer@redhat.com>
Diffstat (limited to 'drivers/md/dm-raid.c')
-rw-r--r-- | drivers/md/dm-raid.c | 42 |
1 files changed, 21 insertions, 21 deletions
diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c index 0aaf4ef7152c..10c136f789b9 100644 --- a/drivers/md/dm-raid.c +++ b/drivers/md/dm-raid.c | |||
@@ -256,10 +256,10 @@ static void rs_config_restore(struct raid_set *rs, struct rs_layout *l) | |||
256 | static struct raid_type { | 256 | static struct raid_type { |
257 | const char *name; /* RAID algorithm. */ | 257 | const char *name; /* RAID algorithm. */ |
258 | const char *descr; /* Descriptor text for logging. */ | 258 | const char *descr; /* Descriptor text for logging. */ |
259 | const unsigned parity_devs; /* # of parity devices. */ | 259 | const unsigned int parity_devs; /* # of parity devices. */ |
260 | const unsigned minimal_devs; /* minimal # of devices in set. */ | 260 | const unsigned int minimal_devs;/* minimal # of devices in set. */ |
261 | const unsigned level; /* RAID level. */ | 261 | const unsigned int level; /* RAID level. */ |
262 | const unsigned algorithm; /* RAID algorithm. */ | 262 | const unsigned int algorithm; /* RAID algorithm. */ |
263 | } raid_types[] = { | 263 | } raid_types[] = { |
264 | {"raid0", "raid0 (striping)", 0, 2, 0, 0 /* NONE */}, | 264 | {"raid0", "raid0 (striping)", 0, 2, 0, 0 /* NONE */}, |
265 | {"raid1", "raid1 (mirroring)", 0, 2, 1, 0 /* NONE */}, | 265 | {"raid1", "raid1 (mirroring)", 0, 2, 1, 0 /* NONE */}, |
@@ -665,9 +665,9 @@ static void rs_set_new(struct raid_set *rs) | |||
665 | } | 665 | } |
666 | 666 | ||
667 | static struct raid_set *raid_set_alloc(struct dm_target *ti, struct raid_type *raid_type, | 667 | static struct raid_set *raid_set_alloc(struct dm_target *ti, struct raid_type *raid_type, |
668 | unsigned raid_devs) | 668 | unsigned int raid_devs) |
669 | { | 669 | { |
670 | unsigned i; | 670 | unsigned int i; |
671 | struct raid_set *rs; | 671 | struct raid_set *rs; |
672 | 672 | ||
673 | if (raid_devs <= raid_type->parity_devs) { | 673 | if (raid_devs <= raid_type->parity_devs) { |
@@ -920,9 +920,9 @@ static int validate_region_size(struct raid_set *rs, unsigned long region_size) | |||
920 | */ | 920 | */ |
921 | static int validate_raid_redundancy(struct raid_set *rs) | 921 | static int validate_raid_redundancy(struct raid_set *rs) |
922 | { | 922 | { |
923 | unsigned i, rebuild_cnt = 0; | 923 | unsigned int i, rebuild_cnt = 0; |
924 | unsigned rebuilds_per_group = 0, copies; | 924 | unsigned int rebuilds_per_group = 0, copies; |
925 | unsigned group_size, last_group_start; | 925 | unsigned int group_size, last_group_start; |
926 | 926 | ||
927 | for (i = 0; i < rs->md.raid_disks; i++) | 927 | for (i = 0; i < rs->md.raid_disks; i++) |
928 | if (!test_bit(In_sync, &rs->dev[i].rdev.flags) || | 928 | if (!test_bit(In_sync, &rs->dev[i].rdev.flags) || |
@@ -1030,12 +1030,12 @@ too_many: | |||
1030 | * [raid10_format <near|far|offset>] Layout algorithm. (Default: near) | 1030 | * [raid10_format <near|far|offset>] Layout algorithm. (Default: near) |
1031 | */ | 1031 | */ |
1032 | static int parse_raid_params(struct raid_set *rs, struct dm_arg_set *as, | 1032 | static int parse_raid_params(struct raid_set *rs, struct dm_arg_set *as, |
1033 | unsigned num_raid_params) | 1033 | unsigned int num_raid_params) |
1034 | { | 1034 | { |
1035 | int value, raid10_format = ALGORITHM_RAID10_DEFAULT; | 1035 | int value, raid10_format = ALGORITHM_RAID10_DEFAULT; |
1036 | unsigned raid10_copies = 2; | 1036 | unsigned int raid10_copies = 2; |
1037 | unsigned i, write_mostly = 0; | 1037 | unsigned int i, write_mostly = 0; |
1038 | unsigned region_size = 0; | 1038 | unsigned int region_size = 0; |
1039 | sector_t max_io_len; | 1039 | sector_t max_io_len; |
1040 | const char *arg, *key; | 1040 | const char *arg, *key; |
1041 | struct raid_dev *rd; | 1041 | struct raid_dev *rd; |
@@ -1447,7 +1447,7 @@ static int rs_set_dev_and_array_sectors(struct raid_set *rs, bool use_mddev) | |||
1447 | if (rs->raid10_copies < 2 || | 1447 | if (rs->raid10_copies < 2 || |
1448 | delta_disks < 0) { | 1448 | delta_disks < 0) { |
1449 | rs->ti->error = "Bogus raid10 data copies or delta disks"; | 1449 | rs->ti->error = "Bogus raid10 data copies or delta disks"; |
1450 | return EINVAL; | 1450 | return -EINVAL; |
1451 | } | 1451 | } |
1452 | 1452 | ||
1453 | dev_sectors *= rs->raid10_copies; | 1453 | dev_sectors *= rs->raid10_copies; |
@@ -1474,7 +1474,7 @@ static int rs_set_dev_and_array_sectors(struct raid_set *rs, bool use_mddev) | |||
1474 | return 0; | 1474 | return 0; |
1475 | bad: | 1475 | bad: |
1476 | rs->ti->error = "Target length not divisible by number of data devices"; | 1476 | rs->ti->error = "Target length not divisible by number of data devices"; |
1477 | return EINVAL; | 1477 | return -EINVAL; |
1478 | } | 1478 | } |
1479 | 1479 | ||
1480 | /* Setup recovery on @rs */ | 1480 | /* Setup recovery on @rs */ |
@@ -2511,7 +2511,7 @@ static int rs_setup_takeover(struct raid_set *rs) | |||
2511 | /* raid1 -> raid10_near layout */ | 2511 | /* raid1 -> raid10_near layout */ |
2512 | mddev->layout = raid10_format_to_md_layout(rs, ALGORITHM_RAID10_NEAR, | 2512 | mddev->layout = raid10_format_to_md_layout(rs, ALGORITHM_RAID10_NEAR, |
2513 | rs->raid_disks); | 2513 | rs->raid_disks); |
2514 | else | 2514 | else |
2515 | return -EINVAL; | 2515 | return -EINVAL; |
2516 | 2516 | ||
2517 | } | 2517 | } |
@@ -2758,12 +2758,12 @@ static void configure_discard_support(struct raid_set *rs) | |||
2758 | * enforce recreation based on the passed in table parameters. | 2758 | * enforce recreation based on the passed in table parameters. |
2759 | * | 2759 | * |
2760 | */ | 2760 | */ |
2761 | static int raid_ctr(struct dm_target *ti, unsigned argc, char **argv) | 2761 | static int raid_ctr(struct dm_target *ti, unsigned int argc, char **argv) |
2762 | { | 2762 | { |
2763 | int r; | 2763 | int r; |
2764 | bool resize; | 2764 | bool resize; |
2765 | struct raid_type *rt; | 2765 | struct raid_type *rt; |
2766 | unsigned num_raid_params, num_raid_devs; | 2766 | unsigned int num_raid_params, num_raid_devs; |
2767 | sector_t calculated_dev_sectors; | 2767 | sector_t calculated_dev_sectors; |
2768 | struct raid_set *rs = NULL; | 2768 | struct raid_set *rs = NULL; |
2769 | const char *arg; | 2769 | const char *arg; |
@@ -3299,7 +3299,7 @@ static void raid_status(struct dm_target *ti, status_type_t type, | |||
3299 | } | 3299 | } |
3300 | } | 3300 | } |
3301 | 3301 | ||
3302 | static int raid_message(struct dm_target *ti, unsigned argc, char **argv) | 3302 | static int raid_message(struct dm_target *ti, unsigned int argc, char **argv) |
3303 | { | 3303 | { |
3304 | struct raid_set *rs = ti->private; | 3304 | struct raid_set *rs = ti->private; |
3305 | struct mddev *mddev = &rs->md; | 3305 | struct mddev *mddev = &rs->md; |
@@ -3351,7 +3351,7 @@ static int raid_iterate_devices(struct dm_target *ti, | |||
3351 | iterate_devices_callout_fn fn, void *data) | 3351 | iterate_devices_callout_fn fn, void *data) |
3352 | { | 3352 | { |
3353 | struct raid_set *rs = ti->private; | 3353 | struct raid_set *rs = ti->private; |
3354 | unsigned i; | 3354 | unsigned int i; |
3355 | int r = 0; | 3355 | int r = 0; |
3356 | 3356 | ||
3357 | for (i = 0; !r && i < rs->md.raid_disks; i++) | 3357 | for (i = 0; !r && i < rs->md.raid_disks; i++) |
@@ -3368,7 +3368,7 @@ static int raid_iterate_devices(struct dm_target *ti, | |||
3368 | static void raid_io_hints(struct dm_target *ti, struct queue_limits *limits) | 3368 | static void raid_io_hints(struct dm_target *ti, struct queue_limits *limits) |
3369 | { | 3369 | { |
3370 | struct raid_set *rs = ti->private; | 3370 | struct raid_set *rs = ti->private; |
3371 | unsigned chunk_size = rs->md.chunk_sectors << 9; | 3371 | unsigned int chunk_size = rs->md.chunk_sectors << 9; |
3372 | struct r5conf *conf = rs->md.private; | 3372 | struct r5conf *conf = rs->md.private; |
3373 | 3373 | ||
3374 | blk_limits_io_min(limits, chunk_size); | 3374 | blk_limits_io_min(limits, chunk_size); |