diff options
| -rw-r--r-- | drivers/md/raid1.c | 99 | 
1 files changed, 78 insertions, 21 deletions
| diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c index ea1f1eb93c77..14a8fe0349c7 100644 --- a/drivers/md/raid1.c +++ b/drivers/md/raid1.c | |||
| @@ -1072,9 +1072,7 @@ abort: | |||
| 1072 | 1072 | ||
| 1073 | static int end_sync_read(struct bio *bio, unsigned int bytes_done, int error) | 1073 | static int end_sync_read(struct bio *bio, unsigned int bytes_done, int error) | 
| 1074 | { | 1074 | { | 
| 1075 | int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags); | ||
| 1076 | r1bio_t * r1_bio = (r1bio_t *)(bio->bi_private); | 1075 | r1bio_t * r1_bio = (r1bio_t *)(bio->bi_private); | 
| 1077 | conf_t *conf = mddev_to_conf(r1_bio->mddev); | ||
| 1078 | 1076 | ||
| 1079 | if (bio->bi_size) | 1077 | if (bio->bi_size) | 
| 1080 | return 1; | 1078 | return 1; | 
| @@ -1087,10 +1085,7 @@ static int end_sync_read(struct bio *bio, unsigned int bytes_done, int error) | |||
| 1087 | * or re-read if the read failed. | 1085 | * or re-read if the read failed. | 
| 1088 | * We don't do much here, just schedule handling by raid1d | 1086 | * We don't do much here, just schedule handling by raid1d | 
| 1089 | */ | 1087 | */ | 
| 1090 | if (!uptodate) { | 1088 | if (test_bit(BIO_UPTODATE, &bio->bi_flags)) | 
| 1091 | md_error(r1_bio->mddev, | ||
| 1092 | conf->mirrors[r1_bio->read_disk].rdev); | ||
| 1093 | } else | ||
| 1094 | set_bit(R1BIO_Uptodate, &r1_bio->state); | 1089 | set_bit(R1BIO_Uptodate, &r1_bio->state); | 
| 1095 | reschedule_retry(r1_bio); | 1090 | reschedule_retry(r1_bio); | 
| 1096 | return 0; | 1091 | return 0; | 
| @@ -1134,27 +1129,89 @@ static void sync_request_write(mddev_t *mddev, r1bio_t *r1_bio) | |||
| 1134 | 1129 | ||
| 1135 | bio = r1_bio->bios[r1_bio->read_disk]; | 1130 | bio = r1_bio->bios[r1_bio->read_disk]; | 
| 1136 | 1131 | ||
| 1137 | /* | 1132 | |
| 1138 | if (r1_bio->sector == 0) printk("First sync write startss\n"); | ||
| 1139 | */ | ||
| 1140 | /* | 1133 | /* | 
| 1141 | * schedule writes | 1134 | * schedule writes | 
| 1142 | */ | 1135 | */ | 
| 1143 | if (!test_bit(R1BIO_Uptodate, &r1_bio->state)) { | 1136 | if (!test_bit(R1BIO_Uptodate, &r1_bio->state)) { | 
| 1144 | /* | 1137 | /* ouch - failed to read all of that. | 
| 1145 | * There is no point trying a read-for-reconstruct as | 1138 | * Try some synchronous reads of other devices to get | 
| 1146 | * reconstruct is about to be aborted | 1139 | * good data, much like with normal read errors. Only | 
| 1140 | * read into the pages we already have so they we don't | ||
| 1141 | * need to re-issue the read request. | ||
| 1142 | * We don't need to freeze the array, because being in an | ||
| 1143 | * active sync request, there is no normal IO, and | ||
| 1144 | * no overlapping syncs. | ||
| 1147 | */ | 1145 | */ | 
| 1148 | char b[BDEVNAME_SIZE]; | 1146 | sector_t sect = r1_bio->sector; | 
| 1149 | printk(KERN_ALERT "raid1: %s: unrecoverable I/O read error" | 1147 | int sectors = r1_bio->sectors; | 
| 1150 | " for block %llu\n", | 1148 | int idx = 0; | 
| 1151 | bdevname(bio->bi_bdev,b), | 1149 | |
| 1152 | (unsigned long long)r1_bio->sector); | 1150 | while(sectors) { | 
| 1153 | md_done_sync(mddev, r1_bio->sectors, 0); | 1151 | int s = sectors; | 
| 1154 | put_buf(r1_bio); | 1152 | int d = r1_bio->read_disk; | 
| 1155 | return; | 1153 | int success = 0; | 
| 1154 | mdk_rdev_t *rdev; | ||
| 1155 | |||
| 1156 | if (s > (PAGE_SIZE>>9)) | ||
| 1157 | s = PAGE_SIZE >> 9; | ||
| 1158 | do { | ||
| 1159 | if (r1_bio->bios[d]->bi_end_io == end_sync_read) { | ||
| 1160 | rdev = conf->mirrors[d].rdev; | ||
| 1161 | if (sync_page_io(rdev->bdev, | ||
| 1162 | sect + rdev->data_offset, | ||
| 1163 | s<<9, | ||
| 1164 | bio->bi_io_vec[idx].bv_page, | ||
| 1165 | READ)) { | ||
| 1166 | success = 1; | ||
| 1167 | break; | ||
| 1168 | } | ||
| 1169 | } | ||
| 1170 | d++; | ||
| 1171 | if (d == conf->raid_disks) | ||
| 1172 | d = 0; | ||
| 1173 | } while (!success && d != r1_bio->read_disk); | ||
| 1174 | |||
| 1175 | if (success) { | ||
| 1176 | /* write it back and re-read */ | ||
| 1177 | set_bit(R1BIO_Uptodate, &r1_bio->state); | ||
| 1178 | while (d != r1_bio->read_disk) { | ||
| 1179 | if (d == 0) | ||
| 1180 | d = conf->raid_disks; | ||
| 1181 | d--; | ||
| 1182 | if (r1_bio->bios[d]->bi_end_io != end_sync_read) | ||
| 1183 | continue; | ||
| 1184 | rdev = conf->mirrors[d].rdev; | ||
| 1185 | if (sync_page_io(rdev->bdev, | ||
| 1186 | sect + rdev->data_offset, | ||
| 1187 | s<<9, | ||
| 1188 | bio->bi_io_vec[idx].bv_page, | ||
| 1189 | WRITE) == 0 || | ||
| 1190 | sync_page_io(rdev->bdev, | ||
| 1191 | sect + rdev->data_offset, | ||
| 1192 | s<<9, | ||
| 1193 | bio->bi_io_vec[idx].bv_page, | ||
| 1194 | READ) == 0) { | ||
| 1195 | md_error(mddev, rdev); | ||
| 1196 | } | ||
| 1197 | } | ||
| 1198 | } else { | ||
| 1199 | char b[BDEVNAME_SIZE]; | ||
| 1200 | /* Cannot read from anywhere, array is toast */ | ||
| 1201 | md_error(mddev, conf->mirrors[r1_bio->read_disk].rdev); | ||
| 1202 | printk(KERN_ALERT "raid1: %s: unrecoverable I/O read error" | ||
| 1203 | " for block %llu\n", | ||
| 1204 | bdevname(bio->bi_bdev,b), | ||
| 1205 | (unsigned long long)r1_bio->sector); | ||
| 1206 | md_done_sync(mddev, r1_bio->sectors, 0); | ||
| 1207 | put_buf(r1_bio); | ||
| 1208 | return; | ||
| 1209 | } | ||
| 1210 | sectors -= s; | ||
| 1211 | sect += s; | ||
| 1212 | idx ++; | ||
| 1213 | } | ||
| 1156 | } | 1214 | } | 
| 1157 | |||
| 1158 | atomic_set(&r1_bio->remaining, 1); | 1215 | atomic_set(&r1_bio->remaining, 1); | 
| 1159 | for (i = 0; i < disks ; i++) { | 1216 | for (i = 0; i < disks ; i++) { | 
| 1160 | wbio = r1_bio->bios[i]; | 1217 | wbio = r1_bio->bios[i]; | 
