diff options
Diffstat (limited to 'drivers/md/raid10.c')
| -rw-r--r-- | drivers/md/raid10.c | 29 |
1 files changed, 19 insertions, 10 deletions
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c index 59d4daa5f4c7..6ddae2501b9a 100644 --- a/drivers/md/raid10.c +++ b/drivers/md/raid10.c | |||
| @@ -490,7 +490,17 @@ static void raid10_end_write_request(struct bio *bio, int error) | |||
| 490 | sector_t first_bad; | 490 | sector_t first_bad; |
| 491 | int bad_sectors; | 491 | int bad_sectors; |
| 492 | 492 | ||
| 493 | set_bit(R10BIO_Uptodate, &r10_bio->state); | 493 | /* |
| 494 | * Do not set R10BIO_Uptodate if the current device is | ||
| 495 | * rebuilding or Faulty. This is because we cannot use | ||
| 496 | * such device for properly reading the data back (we could | ||
| 497 | * potentially use it, if the current write would have felt | ||
| 498 | * before rdev->recovery_offset, but for simplicity we don't | ||
| 499 | * check this here. | ||
| 500 | */ | ||
| 501 | if (test_bit(In_sync, &rdev->flags) && | ||
| 502 | !test_bit(Faulty, &rdev->flags)) | ||
| 503 | set_bit(R10BIO_Uptodate, &r10_bio->state); | ||
| 494 | 504 | ||
| 495 | /* Maybe we can clear some bad blocks. */ | 505 | /* Maybe we can clear some bad blocks. */ |
| 496 | if (is_badblock(rdev, | 506 | if (is_badblock(rdev, |
| @@ -1055,17 +1065,17 @@ static void allow_barrier(struct r10conf *conf) | |||
| 1055 | wake_up(&conf->wait_barrier); | 1065 | wake_up(&conf->wait_barrier); |
| 1056 | } | 1066 | } |
| 1057 | 1067 | ||
| 1058 | static void freeze_array(struct r10conf *conf) | 1068 | static void freeze_array(struct r10conf *conf, int extra) |
| 1059 | { | 1069 | { |
| 1060 | /* stop syncio and normal IO and wait for everything to | 1070 | /* stop syncio and normal IO and wait for everything to |
| 1061 | * go quiet. | 1071 | * go quiet. |
| 1062 | * We increment barrier and nr_waiting, and then | 1072 | * We increment barrier and nr_waiting, and then |
| 1063 | * wait until nr_pending match nr_queued+1 | 1073 | * wait until nr_pending match nr_queued+extra |
| 1064 | * This is called in the context of one normal IO request | 1074 | * This is called in the context of one normal IO request |
| 1065 | * that has failed. Thus any sync request that might be pending | 1075 | * that has failed. Thus any sync request that might be pending |
| 1066 | * will be blocked by nr_pending, and we need to wait for | 1076 | * will be blocked by nr_pending, and we need to wait for |
| 1067 | * pending IO requests to complete or be queued for re-try. | 1077 | * pending IO requests to complete or be queued for re-try. |
| 1068 | * Thus the number queued (nr_queued) plus this request (1) | 1078 | * Thus the number queued (nr_queued) plus this request (extra) |
| 1069 | * must match the number of pending IOs (nr_pending) before | 1079 | * must match the number of pending IOs (nr_pending) before |
| 1070 | * we continue. | 1080 | * we continue. |
| 1071 | */ | 1081 | */ |
| @@ -1073,7 +1083,7 @@ static void freeze_array(struct r10conf *conf) | |||
| 1073 | conf->barrier++; | 1083 | conf->barrier++; |
| 1074 | conf->nr_waiting++; | 1084 | conf->nr_waiting++; |
| 1075 | wait_event_lock_irq_cmd(conf->wait_barrier, | 1085 | wait_event_lock_irq_cmd(conf->wait_barrier, |
| 1076 | conf->nr_pending == conf->nr_queued+1, | 1086 | conf->nr_pending == conf->nr_queued+extra, |
| 1077 | conf->resync_lock, | 1087 | conf->resync_lock, |
| 1078 | flush_pending_writes(conf)); | 1088 | flush_pending_writes(conf)); |
| 1079 | 1089 | ||
| @@ -1837,8 +1847,8 @@ static int raid10_add_disk(struct mddev *mddev, struct md_rdev *rdev) | |||
| 1837 | * we wait for all outstanding requests to complete. | 1847 | * we wait for all outstanding requests to complete. |
| 1838 | */ | 1848 | */ |
| 1839 | synchronize_sched(); | 1849 | synchronize_sched(); |
| 1840 | raise_barrier(conf, 0); | 1850 | freeze_array(conf, 0); |
| 1841 | lower_barrier(conf); | 1851 | unfreeze_array(conf); |
| 1842 | clear_bit(Unmerged, &rdev->flags); | 1852 | clear_bit(Unmerged, &rdev->flags); |
| 1843 | } | 1853 | } |
| 1844 | md_integrity_add_rdev(rdev, mddev); | 1854 | md_integrity_add_rdev(rdev, mddev); |
| @@ -2612,7 +2622,7 @@ static void handle_read_error(struct mddev *mddev, struct r10bio *r10_bio) | |||
| 2612 | r10_bio->devs[slot].bio = NULL; | 2622 | r10_bio->devs[slot].bio = NULL; |
| 2613 | 2623 | ||
| 2614 | if (mddev->ro == 0) { | 2624 | if (mddev->ro == 0) { |
| 2615 | freeze_array(conf); | 2625 | freeze_array(conf, 1); |
| 2616 | fix_read_error(conf, mddev, r10_bio); | 2626 | fix_read_error(conf, mddev, r10_bio); |
| 2617 | unfreeze_array(conf); | 2627 | unfreeze_array(conf); |
| 2618 | } else | 2628 | } else |
| @@ -3609,8 +3619,7 @@ static int run(struct mddev *mddev) | |||
| 3609 | if (mddev->queue) { | 3619 | if (mddev->queue) { |
| 3610 | blk_queue_max_discard_sectors(mddev->queue, | 3620 | blk_queue_max_discard_sectors(mddev->queue, |
| 3611 | mddev->chunk_sectors); | 3621 | mddev->chunk_sectors); |
| 3612 | blk_queue_max_write_same_sectors(mddev->queue, | 3622 | blk_queue_max_write_same_sectors(mddev->queue, 0); |
| 3613 | mddev->chunk_sectors); | ||
| 3614 | blk_queue_io_min(mddev->queue, chunk_size); | 3623 | blk_queue_io_min(mddev->queue, chunk_size); |
| 3615 | if (conf->geo.raid_disks % conf->geo.near_copies) | 3624 | if (conf->geo.raid_disks % conf->geo.near_copies) |
| 3616 | blk_queue_io_opt(mddev->queue, chunk_size * conf->geo.raid_disks); | 3625 | blk_queue_io_opt(mddev->queue, chunk_size * conf->geo.raid_disks); |
