aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/md/dm-raid.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/md/dm-raid.c')
-rw-r--r--drivers/md/dm-raid.c79
1 files changed, 53 insertions, 26 deletions
diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c
index a002dd85db1e..c2907d836e4e 100644
--- a/drivers/md/dm-raid.c
+++ b/drivers/md/dm-raid.c
@@ -6,6 +6,7 @@
6 */ 6 */
7 7
8#include <linux/slab.h> 8#include <linux/slab.h>
9#include <linux/module.h>
9 10
10#include "md.h" 11#include "md.h"
11#include "raid1.h" 12#include "raid1.h"
@@ -37,7 +38,7 @@ struct raid_dev {
37 */ 38 */
38 struct dm_dev *meta_dev; 39 struct dm_dev *meta_dev;
39 struct dm_dev *data_dev; 40 struct dm_dev *data_dev;
40 struct mdk_rdev_s rdev; 41 struct md_rdev rdev;
41}; 42};
42 43
43/* 44/*
@@ -57,7 +58,7 @@ struct raid_set {
57 58
58 uint64_t print_flags; 59 uint64_t print_flags;
59 60
60 struct mddev_s md; 61 struct mddev md;
61 struct raid_type *raid_type; 62 struct raid_type *raid_type;
62 struct dm_target_callbacks callbacks; 63 struct dm_target_callbacks callbacks;
63 64
@@ -449,7 +450,7 @@ static int parse_raid_params(struct raid_set *rs, char **argv,
449 rs->ti->error = "write_mostly option is only valid for RAID1"; 450 rs->ti->error = "write_mostly option is only valid for RAID1";
450 return -EINVAL; 451 return -EINVAL;
451 } 452 }
452 if (value > rs->md.raid_disks) { 453 if (value >= rs->md.raid_disks) {
453 rs->ti->error = "Invalid write_mostly drive index given"; 454 rs->ti->error = "Invalid write_mostly drive index given";
454 return -EINVAL; 455 return -EINVAL;
455 } 456 }
@@ -594,7 +595,7 @@ struct dm_raid_superblock {
594 /* Always set to 0 when writing. */ 595 /* Always set to 0 when writing. */
595} __packed; 596} __packed;
596 597
597static int read_disk_sb(mdk_rdev_t *rdev, int size) 598static int read_disk_sb(struct md_rdev *rdev, int size)
598{ 599{
599 BUG_ON(!rdev->sb_page); 600 BUG_ON(!rdev->sb_page);
600 601
@@ -611,9 +612,9 @@ static int read_disk_sb(mdk_rdev_t *rdev, int size)
611 return 0; 612 return 0;
612} 613}
613 614
614static void super_sync(mddev_t *mddev, mdk_rdev_t *rdev) 615static void super_sync(struct mddev *mddev, struct md_rdev *rdev)
615{ 616{
616 mdk_rdev_t *r, *t; 617 struct md_rdev *r, *t;
617 uint64_t failed_devices; 618 uint64_t failed_devices;
618 struct dm_raid_superblock *sb; 619 struct dm_raid_superblock *sb;
619 620
@@ -651,7 +652,7 @@ static void super_sync(mddev_t *mddev, mdk_rdev_t *rdev)
651 * 652 *
652 * Return: 1 if use rdev, 0 if use refdev, -Exxx otherwise 653 * Return: 1 if use rdev, 0 if use refdev, -Exxx otherwise
653 */ 654 */
654static int super_load(mdk_rdev_t *rdev, mdk_rdev_t *refdev) 655static int super_load(struct md_rdev *rdev, struct md_rdev *refdev)
655{ 656{
656 int ret; 657 int ret;
657 struct dm_raid_superblock *sb; 658 struct dm_raid_superblock *sb;
@@ -689,7 +690,7 @@ static int super_load(mdk_rdev_t *rdev, mdk_rdev_t *refdev)
689 return (events_sb > events_refsb) ? 1 : 0; 690 return (events_sb > events_refsb) ? 1 : 0;
690} 691}
691 692
692static int super_init_validation(mddev_t *mddev, mdk_rdev_t *rdev) 693static int super_init_validation(struct mddev *mddev, struct md_rdev *rdev)
693{ 694{
694 int role; 695 int role;
695 struct raid_set *rs = container_of(mddev, struct raid_set, md); 696 struct raid_set *rs = container_of(mddev, struct raid_set, md);
@@ -698,7 +699,7 @@ static int super_init_validation(mddev_t *mddev, mdk_rdev_t *rdev)
698 struct dm_raid_superblock *sb; 699 struct dm_raid_superblock *sb;
699 uint32_t new_devs = 0; 700 uint32_t new_devs = 0;
700 uint32_t rebuilds = 0; 701 uint32_t rebuilds = 0;
701 mdk_rdev_t *r, *t; 702 struct md_rdev *r, *t;
702 struct dm_raid_superblock *sb2; 703 struct dm_raid_superblock *sb2;
703 704
704 sb = page_address(rdev->sb_page); 705 sb = page_address(rdev->sb_page);
@@ -809,7 +810,7 @@ static int super_init_validation(mddev_t *mddev, mdk_rdev_t *rdev)
809 return 0; 810 return 0;
810} 811}
811 812
812static int super_validate(mddev_t *mddev, mdk_rdev_t *rdev) 813static int super_validate(struct mddev *mddev, struct md_rdev *rdev)
813{ 814{
814 struct dm_raid_superblock *sb = page_address(rdev->sb_page); 815 struct dm_raid_superblock *sb = page_address(rdev->sb_page);
815 816
@@ -849,8 +850,8 @@ static int super_validate(mddev_t *mddev, mdk_rdev_t *rdev)
849static int analyse_superblocks(struct dm_target *ti, struct raid_set *rs) 850static int analyse_superblocks(struct dm_target *ti, struct raid_set *rs)
850{ 851{
851 int ret; 852 int ret;
852 mdk_rdev_t *rdev, *freshest, *tmp; 853 struct md_rdev *rdev, *freshest, *tmp;
853 mddev_t *mddev = &rs->md; 854 struct mddev *mddev = &rs->md;
854 855
855 freshest = NULL; 856 freshest = NULL;
856 rdev_for_each(rdev, tmp, mddev) { 857 rdev_for_each(rdev, tmp, mddev) {
@@ -1004,7 +1005,7 @@ static void raid_dtr(struct dm_target *ti)
1004static int raid_map(struct dm_target *ti, struct bio *bio, union map_info *map_context) 1005static int raid_map(struct dm_target *ti, struct bio *bio, union map_info *map_context)
1005{ 1006{
1006 struct raid_set *rs = ti->private; 1007 struct raid_set *rs = ti->private;
1007 mddev_t *mddev = &rs->md; 1008 struct mddev *mddev = &rs->md;
1008 1009
1009 mddev->pers->make_request(mddev, bio); 1010 mddev->pers->make_request(mddev, bio);
1010 1011
@@ -1017,30 +1018,56 @@ static int raid_status(struct dm_target *ti, status_type_t type,
1017 struct raid_set *rs = ti->private; 1018 struct raid_set *rs = ti->private;
1018 unsigned raid_param_cnt = 1; /* at least 1 for chunksize */ 1019 unsigned raid_param_cnt = 1; /* at least 1 for chunksize */
1019 unsigned sz = 0; 1020 unsigned sz = 0;
1020 int i; 1021 int i, array_in_sync = 0;
1021 sector_t sync; 1022 sector_t sync;
1022 1023
1023 switch (type) { 1024 switch (type) {
1024 case STATUSTYPE_INFO: 1025 case STATUSTYPE_INFO:
1025 DMEMIT("%s %d ", rs->raid_type->name, rs->md.raid_disks); 1026 DMEMIT("%s %d ", rs->raid_type->name, rs->md.raid_disks);
1026 1027
1027 for (i = 0; i < rs->md.raid_disks; i++) {
1028 if (test_bit(Faulty, &rs->dev[i].rdev.flags))
1029 DMEMIT("D");
1030 else if (test_bit(In_sync, &rs->dev[i].rdev.flags))
1031 DMEMIT("A");
1032 else
1033 DMEMIT("a");
1034 }
1035
1036 if (test_bit(MD_RECOVERY_RUNNING, &rs->md.recovery)) 1028 if (test_bit(MD_RECOVERY_RUNNING, &rs->md.recovery))
1037 sync = rs->md.curr_resync_completed; 1029 sync = rs->md.curr_resync_completed;
1038 else 1030 else
1039 sync = rs->md.recovery_cp; 1031 sync = rs->md.recovery_cp;
1040 1032
1041 if (sync > rs->md.resync_max_sectors) 1033 if (sync >= rs->md.resync_max_sectors) {
1034 array_in_sync = 1;
1042 sync = rs->md.resync_max_sectors; 1035 sync = rs->md.resync_max_sectors;
1036 } else {
1037 /*
1038 * The array may be doing an initial sync, or it may
1039 * be rebuilding individual components. If all the
1040 * devices are In_sync, then it is the array that is
1041 * being initialized.
1042 */
1043 for (i = 0; i < rs->md.raid_disks; i++)
1044 if (!test_bit(In_sync, &rs->dev[i].rdev.flags))
1045 array_in_sync = 1;
1046 }
1047 /*
1048 * Status characters:
1049 * 'D' = Dead/Failed device
1050 * 'a' = Alive but not in-sync
1051 * 'A' = Alive and in-sync
1052 */
1053 for (i = 0; i < rs->md.raid_disks; i++) {
1054 if (test_bit(Faulty, &rs->dev[i].rdev.flags))
1055 DMEMIT("D");
1056 else if (!array_in_sync ||
1057 !test_bit(In_sync, &rs->dev[i].rdev.flags))
1058 DMEMIT("a");
1059 else
1060 DMEMIT("A");
1061 }
1043 1062
1063 /*
1064 * In-sync ratio:
1065 * The in-sync ratio shows the progress of:
1066 * - Initializing the array
1067 * - Rebuilding a subset of devices of the array
1068 * The user can distinguish between the two by referring
1069 * to the status characters.
1070 */
1044 DMEMIT(" %llu/%llu", 1071 DMEMIT(" %llu/%llu",
1045 (unsigned long long) sync, 1072 (unsigned long long) sync,
1046 (unsigned long long) rs->md.resync_max_sectors); 1073 (unsigned long long) rs->md.resync_max_sectors);
@@ -1097,7 +1124,7 @@ static int raid_status(struct dm_target *ti, status_type_t type,
1097 rs->md.bitmap_info.max_write_behind); 1124 rs->md.bitmap_info.max_write_behind);
1098 1125
1099 if (rs->print_flags & DMPF_STRIPE_CACHE) { 1126 if (rs->print_flags & DMPF_STRIPE_CACHE) {
1100 raid5_conf_t *conf = rs->md.private; 1127 struct r5conf *conf = rs->md.private;
1101 1128
1102 /* convert from kiB to sectors */ 1129 /* convert from kiB to sectors */
1103 DMEMIT(" stripe_cache %d", 1130 DMEMIT(" stripe_cache %d",
@@ -1146,7 +1173,7 @@ static void raid_io_hints(struct dm_target *ti, struct queue_limits *limits)
1146{ 1173{
1147 struct raid_set *rs = ti->private; 1174 struct raid_set *rs = ti->private;
1148 unsigned chunk_size = rs->md.chunk_sectors << 9; 1175 unsigned chunk_size = rs->md.chunk_sectors << 9;
1149 raid5_conf_t *conf = rs->md.private; 1176 struct r5conf *conf = rs->md.private;
1150 1177
1151 blk_limits_io_min(limits, chunk_size); 1178 blk_limits_io_min(limits, chunk_size);
1152 blk_limits_io_opt(limits, chunk_size * (conf->raid_disks - conf->max_degraded)); 1179 blk_limits_io_opt(limits, chunk_size * (conf->raid_disks - conf->max_degraded));