aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorLars Ellenberg <lars.ellenberg@linbit.com>2010-05-14 13:08:55 -0400
committerPhilipp Reisner <philipp.reisner@linbit.com>2010-05-17 20:02:36 -0400
commitbb3d000cb99aa0924b78c1ae5f5943484527868a (patch)
tree276a13a6f1484ae255725d0faab55ff071b68a98 /drivers
parent45bb912bd5ea4d2b3a270a93cbdf767a0e2df6f5 (diff)
drbd: allow resync requests to be larger than max_segment_size
this should allow for better background resync performance. Signed-off-by: Philipp Reisner <philipp.reisner@linbit.com> Signed-off-by: Lars Ellenberg <lars.ellenberg@linbit.com>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/block/drbd/drbd_worker.c13
1 files changed, 6 insertions, 7 deletions
diff --git a/drivers/block/drbd/drbd_worker.c b/drivers/block/drbd/drbd_worker.c
index d771b1e0424b..91085c1ab52f 100644
--- a/drivers/block/drbd/drbd_worker.c
+++ b/drivers/block/drbd/drbd_worker.c
@@ -462,7 +462,7 @@ int w_make_resync_request(struct drbd_conf *mdev,
462 unsigned long bit; 462 unsigned long bit;
463 sector_t sector; 463 sector_t sector;
464 const sector_t capacity = drbd_get_capacity(mdev->this_bdev); 464 const sector_t capacity = drbd_get_capacity(mdev->this_bdev);
465 int max_segment_size = queue_max_segment_size(mdev->rq_queue); 465 int max_segment_size;
466 int number, i, size, pe, mx; 466 int number, i, size, pe, mx;
467 int align, queued, sndbuf; 467 int align, queued, sndbuf;
468 468
@@ -488,6 +488,11 @@ int w_make_resync_request(struct drbd_conf *mdev,
488 return 1; 488 return 1;
489 } 489 }
490 490
491 /* starting with drbd 8.3.8, we can handle multi-bio EEs,
492 * if it should be necessary */
493 max_segment_size = mdev->agreed_pro_version < 94 ?
494 queue_max_segment_size(mdev->rq_queue) : DRBD_MAX_SEGMENT_SIZE;
495
491 mdev->c_sync_rate = calc_resync_rate(mdev); 496 mdev->c_sync_rate = calc_resync_rate(mdev);
492 number = SLEEP_TIME * mdev->c_sync_rate / ((BM_BLOCK_SIZE / 1024) * HZ); 497 number = SLEEP_TIME * mdev->c_sync_rate / ((BM_BLOCK_SIZE / 1024) * HZ);
493 pe = atomic_read(&mdev->rs_pending_cnt); 498 pe = atomic_read(&mdev->rs_pending_cnt);
@@ -552,12 +557,6 @@ next_sector:
552 * 557 *
553 * Additionally always align bigger requests, in order to 558 * Additionally always align bigger requests, in order to
554 * be prepared for all stripe sizes of software RAIDs. 559 * be prepared for all stripe sizes of software RAIDs.
555 *
556 * we _do_ care about the agreed-upon q->max_segment_size
557 * here, as splitting up the requests on the other side is more
558 * difficult. the consequence is, that on lvm and md and other
559 * "indirect" devices, this is dead code, since
560 * q->max_segment_size will be PAGE_SIZE.
561 */ 560 */
562 align = 1; 561 align = 1;
563 for (;;) { 562 for (;;) {