aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLars Ellenberg <lars.ellenberg@linbit.com>2010-11-05 05:04:07 -0400
committerPhilipp Reisner <philipp.reisner@linbit.com>2011-03-10 05:19:01 -0500
commite65f440d474d7d6a6fd8a2c844e851d8c96ed9c5 (patch)
tree92fb1b9fea75ac13154dbc844a4153ea6d07ab30
parent9bd28d3c90c80c7ec46085de281b38f67331da41 (diff)
drbd: factor out drbd_rs_number_requests
Preparation patch to be able to use the auto-throttling resync controller for online-verify requests as well. Signed-off-by: Philipp Reisner <philipp.reisner@linbit.com> Signed-off-by: Lars Ellenberg <lars.ellenberg@linbit.com>
-rw-r--r--drivers/block/drbd/drbd_worker.c61
1 files changed, 29 insertions, 32 deletions
diff --git a/drivers/block/drbd/drbd_worker.c b/drivers/block/drbd/drbd_worker.c
index 971e9b3b13b..6d111c8515f 100644
--- a/drivers/block/drbd/drbd_worker.c
+++ b/drivers/block/drbd/drbd_worker.c
@@ -492,6 +492,32 @@ int drbd_rs_controller(struct drbd_conf *mdev)
492 return req_sect; 492 return req_sect;
493} 493}
494 494
495int drbd_rs_number_requests(struct drbd_conf *mdev)
496{
497 int number;
498 if (mdev->rs_plan_s.size) { /* mdev->sync_conf.c_plan_ahead */
499 number = drbd_rs_controller(mdev) >> (BM_BLOCK_SHIFT - 9);
500 mdev->c_sync_rate = number * HZ * (BM_BLOCK_SIZE / 1024) / SLEEP_TIME;
501 } else {
502 mdev->c_sync_rate = mdev->sync_conf.rate;
503 number = SLEEP_TIME * mdev->c_sync_rate / ((BM_BLOCK_SIZE / 1024) * HZ);
504 }
505
506 /* Throttle resync on lower level disk activity, which may also be
507 * caused by application IO on Primary/SyncTarget.
508 * Keep this after the call to drbd_rs_controller, as that assumes
509 * to be called as precisely as possible every SLEEP_TIME,
510 * and would be confused otherwise. */
511 if (number && drbd_rs_should_slow_down(mdev)) {
512 mdev->c_sync_rate = 1;
513 number = 0;
514 }
515
516 /* ignore the amount of pending requests, the resync controller should
517 * throttle down to incoming reply rate soon enough anyways. */
518 return number;
519}
520
495int w_make_resync_request(struct drbd_conf *mdev, 521int w_make_resync_request(struct drbd_conf *mdev,
496 struct drbd_work *w, int cancel) 522 struct drbd_work *w, int cancel)
497{ 523{
@@ -499,7 +525,7 @@ int w_make_resync_request(struct drbd_conf *mdev,
499 sector_t sector; 525 sector_t sector;
500 const sector_t capacity = drbd_get_capacity(mdev->this_bdev); 526 const sector_t capacity = drbd_get_capacity(mdev->this_bdev);
501 int max_segment_size; 527 int max_segment_size;
502 int number, rollback_i, size, pe, mx; 528 int number, rollback_i, size;
503 int align, queued, sndbuf; 529 int align, queued, sndbuf;
504 int i = 0; 530 int i = 0;
505 531
@@ -537,39 +563,10 @@ int w_make_resync_request(struct drbd_conf *mdev,
537 mdev->agreed_pro_version < 94 ? queue_max_segment_size(mdev->rq_queue) : 563 mdev->agreed_pro_version < 94 ? queue_max_segment_size(mdev->rq_queue) :
538 mdev->agreed_pro_version < 95 ? DRBD_MAX_SIZE_H80_PACKET : DRBD_MAX_SEGMENT_SIZE; 564 mdev->agreed_pro_version < 95 ? DRBD_MAX_SIZE_H80_PACKET : DRBD_MAX_SEGMENT_SIZE;
539 565
540 if (mdev->rs_plan_s.size) { /* mdev->sync_conf.c_plan_ahead */ 566 number = drbd_rs_number_requests(mdev);
541 number = drbd_rs_controller(mdev) >> (BM_BLOCK_SHIFT - 9); 567 if (number == 0)
542 mdev->c_sync_rate = number * HZ * (BM_BLOCK_SIZE / 1024) / SLEEP_TIME;
543 } else {
544 mdev->c_sync_rate = mdev->sync_conf.rate;
545 number = SLEEP_TIME * mdev->c_sync_rate / ((BM_BLOCK_SIZE / 1024) * HZ);
546 }
547
548 /* Throttle resync on lower level disk activity, which may also be
549 * caused by application IO on Primary/SyncTarget.
550 * Keep this after the call to drbd_rs_controller, as that assumes
551 * to be called as precisely as possible every SLEEP_TIME,
552 * and would be confused otherwise. */
553 if (drbd_rs_should_slow_down(mdev))
554 goto requeue; 568 goto requeue;
555 569
556 mutex_lock(&mdev->data.mutex);
557 if (mdev->data.socket)
558 mx = mdev->data.socket->sk->sk_rcvbuf / sizeof(struct p_block_req);
559 else
560 mx = 1;
561 mutex_unlock(&mdev->data.mutex);
562
563 /* For resync rates >160MB/sec, allow more pending RS requests */
564 if (number > mx)
565 mx = number;
566
567 /* Limit the number of pending RS requests to no more than the peer's receive buffer */
568 pe = atomic_read(&mdev->rs_pending_cnt);
569 if ((pe + number) > mx) {
570 number = mx - pe;
571 }
572
573 for (i = 0; i < number; i++) { 570 for (i = 0; i < number; i++) {
574 /* Stop generating RS requests, when half of the send buffer is filled */ 571 /* Stop generating RS requests, when half of the send buffer is filled */
575 mutex_lock(&mdev->data.mutex); 572 mutex_lock(&mdev->data.mutex);