diff options
Diffstat (limited to 'drivers')
-rw-r--r-- | drivers/md/md.c | 35 | ||||
-rw-r--r-- | drivers/md/raid1.c | 8 | ||||
-rw-r--r-- | drivers/md/raid10.c | 19 | ||||
-rw-r--r-- | drivers/md/raid5.c | 6 | ||||
-rw-r--r-- | drivers/md/raid6main.c | 6 |
5 files changed, 45 insertions, 29 deletions
diff --git a/drivers/md/md.c b/drivers/md/md.c index 177d2a7d7cea..fa608a1a5c20 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c | |||
@@ -3241,12 +3241,13 @@ static void md_do_sync(mddev_t *mddev) | |||
3241 | mddev_t *mddev2; | 3241 | mddev_t *mddev2; |
3242 | unsigned int currspeed = 0, | 3242 | unsigned int currspeed = 0, |
3243 | window; | 3243 | window; |
3244 | sector_t max_sectors,j; | 3244 | sector_t max_sectors,j, io_sectors; |
3245 | unsigned long mark[SYNC_MARKS]; | 3245 | unsigned long mark[SYNC_MARKS]; |
3246 | sector_t mark_cnt[SYNC_MARKS]; | 3246 | sector_t mark_cnt[SYNC_MARKS]; |
3247 | int last_mark,m; | 3247 | int last_mark,m; |
3248 | struct list_head *tmp; | 3248 | struct list_head *tmp; |
3249 | sector_t last_check; | 3249 | sector_t last_check; |
3250 | int skipped = 0; | ||
3250 | 3251 | ||
3251 | /* just incase thread restarts... */ | 3252 | /* just incase thread restarts... */ |
3252 | if (test_bit(MD_RECOVERY_DONE, &mddev->recovery)) | 3253 | if (test_bit(MD_RECOVERY_DONE, &mddev->recovery)) |
@@ -3312,7 +3313,7 @@ static void md_do_sync(mddev_t *mddev) | |||
3312 | 3313 | ||
3313 | if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) | 3314 | if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) |
3314 | /* resync follows the size requested by the personality, | 3315 | /* resync follows the size requested by the personality, |
3315 | * which default to physical size, but can be virtual size | 3316 | * which defaults to physical size, but can be virtual size |
3316 | */ | 3317 | */ |
3317 | max_sectors = mddev->resync_max_sectors; | 3318 | max_sectors = mddev->resync_max_sectors; |
3318 | else | 3319 | else |
@@ -3331,9 +3332,10 @@ static void md_do_sync(mddev_t *mddev) | |||
3331 | j = mddev->recovery_cp; | 3332 | j = mddev->recovery_cp; |
3332 | else | 3333 | else |
3333 | j = 0; | 3334 | j = 0; |
3335 | io_sectors = 0; | ||
3334 | for (m = 0; m < SYNC_MARKS; m++) { | 3336 | for (m = 0; m < SYNC_MARKS; m++) { |
3335 | mark[m] = jiffies; | 3337 | mark[m] = jiffies; |
3336 | mark_cnt[m] = j; | 3338 | mark_cnt[m] = io_sectors; |
3337 | } | 3339 | } |
3338 | last_mark = 0; | 3340 | last_mark = 0; |
3339 | mddev->resync_mark = mark[last_mark]; | 3341 | mddev->resync_mark = mark[last_mark]; |
@@ -3358,21 +3360,29 @@ static void md_do_sync(mddev_t *mddev) | |||
3358 | } | 3360 | } |
3359 | 3361 | ||
3360 | while (j < max_sectors) { | 3362 | while (j < max_sectors) { |
3361 | int sectors; | 3363 | sector_t sectors; |
3362 | 3364 | ||
3363 | sectors = mddev->pers->sync_request(mddev, j, currspeed < sysctl_speed_limit_min); | 3365 | skipped = 0; |
3364 | if (sectors < 0) { | 3366 | sectors = mddev->pers->sync_request(mddev, j, &skipped, |
3367 | currspeed < sysctl_speed_limit_min); | ||
3368 | if (sectors == 0) { | ||
3365 | set_bit(MD_RECOVERY_ERR, &mddev->recovery); | 3369 | set_bit(MD_RECOVERY_ERR, &mddev->recovery); |
3366 | goto out; | 3370 | goto out; |
3367 | } | 3371 | } |
3368 | atomic_add(sectors, &mddev->recovery_active); | 3372 | |
3373 | if (!skipped) { /* actual IO requested */ | ||
3374 | io_sectors += sectors; | ||
3375 | atomic_add(sectors, &mddev->recovery_active); | ||
3376 | } | ||
3377 | |||
3369 | j += sectors; | 3378 | j += sectors; |
3370 | if (j>1) mddev->curr_resync = j; | 3379 | if (j>1) mddev->curr_resync = j; |
3371 | 3380 | ||
3372 | if (last_check + window > j || j == max_sectors) | 3381 | |
3382 | if (last_check + window > io_sectors || j == max_sectors) | ||
3373 | continue; | 3383 | continue; |
3374 | 3384 | ||
3375 | last_check = j; | 3385 | last_check = io_sectors; |
3376 | 3386 | ||
3377 | if (test_bit(MD_RECOVERY_INTR, &mddev->recovery) || | 3387 | if (test_bit(MD_RECOVERY_INTR, &mddev->recovery) || |
3378 | test_bit(MD_RECOVERY_ERR, &mddev->recovery)) | 3388 | test_bit(MD_RECOVERY_ERR, &mddev->recovery)) |
@@ -3386,7 +3396,7 @@ static void md_do_sync(mddev_t *mddev) | |||
3386 | mddev->resync_mark = mark[next]; | 3396 | mddev->resync_mark = mark[next]; |
3387 | mddev->resync_mark_cnt = mark_cnt[next]; | 3397 | mddev->resync_mark_cnt = mark_cnt[next]; |
3388 | mark[next] = jiffies; | 3398 | mark[next] = jiffies; |
3389 | mark_cnt[next] = j - atomic_read(&mddev->recovery_active); | 3399 | mark_cnt[next] = io_sectors - atomic_read(&mddev->recovery_active); |
3390 | last_mark = next; | 3400 | last_mark = next; |
3391 | } | 3401 | } |
3392 | 3402 | ||
@@ -3413,7 +3423,8 @@ static void md_do_sync(mddev_t *mddev) | |||
3413 | mddev->queue->unplug_fn(mddev->queue); | 3423 | mddev->queue->unplug_fn(mddev->queue); |
3414 | cond_resched(); | 3424 | cond_resched(); |
3415 | 3425 | ||
3416 | currspeed = ((unsigned long)(j-mddev->resync_mark_cnt))/2/((jiffies-mddev->resync_mark)/HZ +1) +1; | 3426 | currspeed = ((unsigned long)(io_sectors-mddev->resync_mark_cnt))/2 |
3427 | /((jiffies-mddev->resync_mark)/HZ +1) +1; | ||
3417 | 3428 | ||
3418 | if (currspeed > sysctl_speed_limit_min) { | 3429 | if (currspeed > sysctl_speed_limit_min) { |
3419 | if ((currspeed > sysctl_speed_limit_max) || | 3430 | if ((currspeed > sysctl_speed_limit_max) || |
@@ -3433,7 +3444,7 @@ static void md_do_sync(mddev_t *mddev) | |||
3433 | wait_event(mddev->recovery_wait, !atomic_read(&mddev->recovery_active)); | 3444 | wait_event(mddev->recovery_wait, !atomic_read(&mddev->recovery_active)); |
3434 | 3445 | ||
3435 | /* tell personality that we are finished */ | 3446 | /* tell personality that we are finished */ |
3436 | mddev->pers->sync_request(mddev, max_sectors, 1); | 3447 | mddev->pers->sync_request(mddev, max_sectors, &skipped, 1); |
3437 | 3448 | ||
3438 | if (!test_bit(MD_RECOVERY_ERR, &mddev->recovery) && | 3449 | if (!test_bit(MD_RECOVERY_ERR, &mddev->recovery) && |
3439 | mddev->curr_resync > 2 && | 3450 | mddev->curr_resync > 2 && |
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c index 3f1280bbaf39..3c5c916cb09e 100644 --- a/drivers/md/raid1.c +++ b/drivers/md/raid1.c | |||
@@ -1010,7 +1010,7 @@ static int init_resync(conf_t *conf) | |||
1010 | * that can be installed to exclude normal IO requests. | 1010 | * that can be installed to exclude normal IO requests. |
1011 | */ | 1011 | */ |
1012 | 1012 | ||
1013 | static int sync_request(mddev_t *mddev, sector_t sector_nr, int go_faster) | 1013 | static sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, int go_faster) |
1014 | { | 1014 | { |
1015 | conf_t *conf = mddev_to_conf(mddev); | 1015 | conf_t *conf = mddev_to_conf(mddev); |
1016 | mirror_info_t *mirror; | 1016 | mirror_info_t *mirror; |
@@ -1023,7 +1023,7 @@ static int sync_request(mddev_t *mddev, sector_t sector_nr, int go_faster) | |||
1023 | 1023 | ||
1024 | if (!conf->r1buf_pool) | 1024 | if (!conf->r1buf_pool) |
1025 | if (init_resync(conf)) | 1025 | if (init_resync(conf)) |
1026 | return -ENOMEM; | 1026 | return 0; |
1027 | 1027 | ||
1028 | max_sector = mddev->size << 1; | 1028 | max_sector = mddev->size << 1; |
1029 | if (sector_nr >= max_sector) { | 1029 | if (sector_nr >= max_sector) { |
@@ -1107,8 +1107,8 @@ static int sync_request(mddev_t *mddev, sector_t sector_nr, int go_faster) | |||
1107 | /* There is nowhere to write, so all non-sync | 1107 | /* There is nowhere to write, so all non-sync |
1108 | * drives must be failed - so we are finished | 1108 | * drives must be failed - so we are finished |
1109 | */ | 1109 | */ |
1110 | int rv = max_sector - sector_nr; | 1110 | sector_t rv = max_sector - sector_nr; |
1111 | md_done_sync(mddev, rv, 1); | 1111 | *skipped = 1; |
1112 | put_buf(r1_bio); | 1112 | put_buf(r1_bio); |
1113 | rdev_dec_pending(conf->mirrors[disk].rdev, mddev); | 1113 | rdev_dec_pending(conf->mirrors[disk].rdev, mddev); |
1114 | return rv; | 1114 | return rv; |
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c index bfc9f52f0ecf..8476515bfdc7 100644 --- a/drivers/md/raid10.c +++ b/drivers/md/raid10.c | |||
@@ -1321,7 +1321,7 @@ static int init_resync(conf_t *conf) | |||
1321 | * | 1321 | * |
1322 | */ | 1322 | */ |
1323 | 1323 | ||
1324 | static int sync_request(mddev_t *mddev, sector_t sector_nr, int go_faster) | 1324 | static sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, int go_faster) |
1325 | { | 1325 | { |
1326 | conf_t *conf = mddev_to_conf(mddev); | 1326 | conf_t *conf = mddev_to_conf(mddev); |
1327 | r10bio_t *r10_bio; | 1327 | r10bio_t *r10_bio; |
@@ -1335,7 +1335,7 @@ static int sync_request(mddev_t *mddev, sector_t sector_nr, int go_faster) | |||
1335 | 1335 | ||
1336 | if (!conf->r10buf_pool) | 1336 | if (!conf->r10buf_pool) |
1337 | if (init_resync(conf)) | 1337 | if (init_resync(conf)) |
1338 | return -ENOMEM; | 1338 | return 0; |
1339 | 1339 | ||
1340 | skipped: | 1340 | skipped: |
1341 | max_sector = mddev->size << 1; | 1341 | max_sector = mddev->size << 1; |
@@ -1343,15 +1343,15 @@ static int sync_request(mddev_t *mddev, sector_t sector_nr, int go_faster) | |||
1343 | max_sector = mddev->resync_max_sectors; | 1343 | max_sector = mddev->resync_max_sectors; |
1344 | if (sector_nr >= max_sector) { | 1344 | if (sector_nr >= max_sector) { |
1345 | close_sync(conf); | 1345 | close_sync(conf); |
1346 | *skipped = 1; | ||
1346 | return sectors_skipped; | 1347 | return sectors_skipped; |
1347 | } | 1348 | } |
1348 | if (chunks_skipped >= conf->raid_disks) { | 1349 | if (chunks_skipped >= conf->raid_disks) { |
1349 | /* if there has been nothing to do on any drive, | 1350 | /* if there has been nothing to do on any drive, |
1350 | * then there is nothing to do at all.. | 1351 | * then there is nothing to do at all.. |
1351 | */ | 1352 | */ |
1352 | sector_t sec = max_sector - sector_nr; | 1353 | *skipped = 1; |
1353 | md_done_sync(mddev, sec, 1); | 1354 | return (max_sector - sector_nr) + sectors_skipped; |
1354 | return sec + sectors_skipped; | ||
1355 | } | 1355 | } |
1356 | 1356 | ||
1357 | /* make sure whole request will fit in a chunk - if chunks | 1357 | /* make sure whole request will fit in a chunk - if chunks |
@@ -1565,17 +1565,22 @@ static int sync_request(mddev_t *mddev, sector_t sector_nr, int go_faster) | |||
1565 | } | 1565 | } |
1566 | } | 1566 | } |
1567 | 1567 | ||
1568 | if (sectors_skipped) | ||
1569 | /* pretend they weren't skipped, it makes | ||
1570 | * no important difference in this case | ||
1571 | */ | ||
1572 | md_done_sync(mddev, sectors_skipped, 1); | ||
1573 | |||
1568 | return sectors_skipped + nr_sectors; | 1574 | return sectors_skipped + nr_sectors; |
1569 | giveup: | 1575 | giveup: |
1570 | /* There is nowhere to write, so all non-sync | 1576 | /* There is nowhere to write, so all non-sync |
1571 | * drives must be failed, so try the next chunk... | 1577 | * drives must be failed, so try the next chunk... |
1572 | */ | 1578 | */ |
1573 | { | 1579 | { |
1574 | int sec = max_sector - sector_nr; | 1580 | sector_t sec = max_sector - sector_nr; |
1575 | sectors_skipped += sec; | 1581 | sectors_skipped += sec; |
1576 | chunks_skipped ++; | 1582 | chunks_skipped ++; |
1577 | sector_nr = max_sector; | 1583 | sector_nr = max_sector; |
1578 | md_done_sync(mddev, sec, 1); | ||
1579 | goto skipped; | 1584 | goto skipped; |
1580 | } | 1585 | } |
1581 | } | 1586 | } |
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index 677ce49078da..1ce3f5aaa984 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c | |||
@@ -1477,7 +1477,7 @@ static int make_request (request_queue_t *q, struct bio * bi) | |||
1477 | } | 1477 | } |
1478 | 1478 | ||
1479 | /* FIXME go_faster isn't used */ | 1479 | /* FIXME go_faster isn't used */ |
1480 | static int sync_request (mddev_t *mddev, sector_t sector_nr, int go_faster) | 1480 | static sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, int go_faster) |
1481 | { | 1481 | { |
1482 | raid5_conf_t *conf = (raid5_conf_t *) mddev->private; | 1482 | raid5_conf_t *conf = (raid5_conf_t *) mddev->private; |
1483 | struct stripe_head *sh; | 1483 | struct stripe_head *sh; |
@@ -1500,8 +1500,8 @@ static int sync_request (mddev_t *mddev, sector_t sector_nr, int go_faster) | |||
1500 | * nothing we can do. | 1500 | * nothing we can do. |
1501 | */ | 1501 | */ |
1502 | if (mddev->degraded >= 1 && test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) { | 1502 | if (mddev->degraded >= 1 && test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) { |
1503 | int rv = (mddev->size << 1) - sector_nr; | 1503 | sector_t rv = (mddev->size << 1) - sector_nr; |
1504 | md_done_sync(mddev, rv, 1); | 1504 | *skipped = 1; |
1505 | return rv; | 1505 | return rv; |
1506 | } | 1506 | } |
1507 | 1507 | ||
diff --git a/drivers/md/raid6main.c b/drivers/md/raid6main.c index fede16c4e8f3..d9c385496dc5 100644 --- a/drivers/md/raid6main.c +++ b/drivers/md/raid6main.c | |||
@@ -1636,7 +1636,7 @@ static int make_request (request_queue_t *q, struct bio * bi) | |||
1636 | } | 1636 | } |
1637 | 1637 | ||
1638 | /* FIXME go_faster isn't used */ | 1638 | /* FIXME go_faster isn't used */ |
1639 | static int sync_request (mddev_t *mddev, sector_t sector_nr, int go_faster) | 1639 | static sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, int go_faster) |
1640 | { | 1640 | { |
1641 | raid6_conf_t *conf = (raid6_conf_t *) mddev->private; | 1641 | raid6_conf_t *conf = (raid6_conf_t *) mddev->private; |
1642 | struct stripe_head *sh; | 1642 | struct stripe_head *sh; |
@@ -1659,8 +1659,8 @@ static int sync_request (mddev_t *mddev, sector_t sector_nr, int go_faster) | |||
1659 | * nothing we can do. | 1659 | * nothing we can do. |
1660 | */ | 1660 | */ |
1661 | if (mddev->degraded >= 2 && test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) { | 1661 | if (mddev->degraded >= 2 && test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) { |
1662 | int rv = (mddev->size << 1) - sector_nr; | 1662 | sector_t rv = (mddev->size << 1) - sector_nr; |
1663 | md_done_sync(mddev, rv, 1); | 1663 | *skipped = 1; |
1664 | return rv; | 1664 | return rv; |
1665 | } | 1665 | } |
1666 | 1666 | ||