aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/block/drbd/drbd_worker.c
diff options
context:
space:
mode:
authorLars Ellenberg <lars.ellenberg@linbit.com>2014-01-31 08:55:12 -0500
committerPhilipp Reisner <philipp.reisner@linbit.com>2014-07-10 12:35:04 -0400
commit506afb6248af577eb702c73f3da52a12f4c56a38 (patch)
treed4339c873049cff5224e3b449ae8c6c1b8ed3357 /drivers/block/drbd/drbd_worker.c
parent659b2e3bb8b149f5f7e2f8551599044b715bcc21 (diff)
drbd: improve resync request throttling due to sendbuf size
If we throttle resync because the socket sendbuffer is filling up, tell TCP about it, so it may expand the sendbuffer for us. Signed-off-by: Philipp Reisner <philipp.reisner@linbit.com> Signed-off-by: Lars Ellenberg <lars.ellenberg@linbit.com>
Diffstat (limited to 'drivers/block/drbd/drbd_worker.c')
-rw-r--r--drivers/block/drbd/drbd_worker.c23
1 files changed, 14 insertions, 9 deletions
diff --git a/drivers/block/drbd/drbd_worker.c b/drivers/block/drbd/drbd_worker.c
index 6532a697cf49..0b5e4294acf9 100644
--- a/drivers/block/drbd/drbd_worker.c
+++ b/drivers/block/drbd/drbd_worker.c
@@ -592,7 +592,7 @@ static int make_resync_request(struct drbd_device *const device, int cancel)
592 const sector_t capacity = drbd_get_capacity(device->this_bdev); 592 const sector_t capacity = drbd_get_capacity(device->this_bdev);
593 int max_bio_size; 593 int max_bio_size;
594 int number, rollback_i, size; 594 int number, rollback_i, size;
595 int align, queued, sndbuf; 595 int align, requeue = 0;
596 int i = 0; 596 int i = 0;
597 597
598 if (unlikely(cancel)) 598 if (unlikely(cancel))
@@ -619,17 +619,22 @@ static int make_resync_request(struct drbd_device *const device, int cancel)
619 goto requeue; 619 goto requeue;
620 620
621 for (i = 0; i < number; i++) { 621 for (i = 0; i < number; i++) {
622 /* Stop generating RS requests, when half of the send buffer is filled */ 622 /* Stop generating RS requests when half of the send buffer is filled,
623 * but notify TCP that we'd like to have more space. */
623 mutex_lock(&connection->data.mutex); 624 mutex_lock(&connection->data.mutex);
624 if (connection->data.socket) { 625 if (connection->data.socket) {
625 queued = connection->data.socket->sk->sk_wmem_queued; 626 struct sock *sk = connection->data.socket->sk;
626 sndbuf = connection->data.socket->sk->sk_sndbuf; 627 int queued = sk->sk_wmem_queued;
627 } else { 628 int sndbuf = sk->sk_sndbuf;
628 queued = 1; 629 if (queued > sndbuf / 2) {
629 sndbuf = 0; 630 requeue = 1;
630 } 631 if (sk->sk_socket)
632 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
633 }
634 } else
635 requeue = 1;
631 mutex_unlock(&connection->data.mutex); 636 mutex_unlock(&connection->data.mutex);
632 if (queued > sndbuf / 2) 637 if (requeue)
633 goto requeue; 638 goto requeue;
634 639
635next_sector: 640next_sector: