aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/block/drbd/drbd_req.c
diff options
context:
space:
mode:
authorPhilipp Reisner <philipp.reisner@linbit.com>2010-10-27 08:33:00 -0400
committerPhilipp Reisner <philipp.reisner@linbit.com>2011-03-10 05:34:48 -0500
commit73a01a18b9c28a0fab1131ece5b0a9bc00a879b8 (patch)
tree36ed1ea31bd18def918717906a58355a77b39cba /drivers/block/drbd/drbd_req.c
parent67531718d8f1259f01ab84c2aa25f7b03c7afd46 (diff)
drbd: New packet for Ahead/Behind mode: P_OUT_OF_SYNC
Signed-off-by: Philipp Reisner <philipp.reisner@linbit.com> Signed-off-by: Lars Ellenberg <lars.ellenberg@linbit.com>
Diffstat (limited to 'drivers/block/drbd/drbd_req.c')
-rw-r--r--drivers/block/drbd/drbd_req.c44
1 files changed, 32 insertions, 12 deletions
diff --git a/drivers/block/drbd/drbd_req.c b/drivers/block/drbd/drbd_req.c
index 60288fb3c4d7..a8d1ff2bda27 100644
--- a/drivers/block/drbd/drbd_req.c
+++ b/drivers/block/drbd/drbd_req.c
@@ -142,7 +142,7 @@ static void _about_to_complete_local_write(struct drbd_conf *mdev,
142 142
143 /* before we can signal completion to the upper layers, 143 /* before we can signal completion to the upper layers,
144 * we may need to close the current epoch */ 144 * we may need to close the current epoch */
145 if (mdev->state.conn >= C_CONNECTED && 145 if (mdev->state.conn >= C_CONNECTED && mdev->state.conn < C_AHEAD &&
146 req->epoch == mdev->newest_tle->br_number) 146 req->epoch == mdev->newest_tle->br_number)
147 queue_barrier(mdev); 147 queue_barrier(mdev);
148 148
@@ -545,6 +545,14 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what,
545 545
546 break; 546 break;
547 547
548 case queue_for_send_oos:
549 req->rq_state |= RQ_NET_QUEUED;
550 req->w.cb = w_send_oos;
551 drbd_queue_work(&mdev->data.work, &req->w);
552 break;
553
554 case oos_handed_to_network:
555 /* actually the same */
548 case send_canceled: 556 case send_canceled:
549 /* treat it the same */ 557 /* treat it the same */
550 case send_failed: 558 case send_failed:
@@ -756,7 +764,7 @@ static int drbd_make_request_common(struct drbd_conf *mdev, struct bio *bio)
756 const sector_t sector = bio->bi_sector; 764 const sector_t sector = bio->bi_sector;
757 struct drbd_tl_epoch *b = NULL; 765 struct drbd_tl_epoch *b = NULL;
758 struct drbd_request *req; 766 struct drbd_request *req;
759 int local, remote; 767 int local, remote, send_oos = 0;
760 int err = -EIO; 768 int err = -EIO;
761 int ret = 0; 769 int ret = 0;
762 770
@@ -820,8 +828,11 @@ static int drbd_make_request_common(struct drbd_conf *mdev, struct bio *bio)
820 } 828 }
821 829
822 remote = remote && (mdev->state.pdsk == D_UP_TO_DATE || 830 remote = remote && (mdev->state.pdsk == D_UP_TO_DATE ||
823 (mdev->state.pdsk == D_INCONSISTENT && 831 (mdev->state.pdsk >= D_INCONSISTENT &&
824 mdev->state.conn >= C_CONNECTED)); 832 mdev->state.conn >= C_CONNECTED &&
833 mdev->state.conn < C_AHEAD));
834 send_oos = (rw == WRITE && mdev->state.conn == C_AHEAD &&
835 mdev->state.pdsk >= D_INCONSISTENT);
825 836
826 if (!(local || remote) && !is_susp(mdev->state)) { 837 if (!(local || remote) && !is_susp(mdev->state)) {
827 if (__ratelimit(&drbd_ratelimit_state)) 838 if (__ratelimit(&drbd_ratelimit_state))
@@ -835,7 +846,7 @@ static int drbd_make_request_common(struct drbd_conf *mdev, struct bio *bio)
835 * but there is a race between testing the bit and pointer outside the 846 * but there is a race between testing the bit and pointer outside the
836 * spinlock, and grabbing the spinlock. 847 * spinlock, and grabbing the spinlock.
837 * if we lost that race, we retry. */ 848 * if we lost that race, we retry. */
838 if (rw == WRITE && remote && 849 if (rw == WRITE && (remote || send_oos) &&
839 mdev->unused_spare_tle == NULL && 850 mdev->unused_spare_tle == NULL &&
840 test_bit(CREATE_BARRIER, &mdev->flags)) { 851 test_bit(CREATE_BARRIER, &mdev->flags)) {
841allocate_barrier: 852allocate_barrier:
@@ -860,11 +871,15 @@ allocate_barrier:
860 goto fail_free_complete; 871 goto fail_free_complete;
861 } 872 }
862 873
863 if (remote) { 874 if (remote || send_oos) {
864 remote = (mdev->state.pdsk == D_UP_TO_DATE || 875 remote = (mdev->state.pdsk == D_UP_TO_DATE ||
865 (mdev->state.pdsk == D_INCONSISTENT && 876 (mdev->state.pdsk >= D_INCONSISTENT &&
866 mdev->state.conn >= C_CONNECTED)); 877 mdev->state.conn >= C_CONNECTED &&
867 if (!remote) 878 mdev->state.conn < C_AHEAD));
879 send_oos = (rw == WRITE && mdev->state.conn == C_AHEAD &&
880 mdev->state.pdsk >= D_INCONSISTENT);
881
882 if (!(remote || send_oos))
868 dev_warn(DEV, "lost connection while grabbing the req_lock!\n"); 883 dev_warn(DEV, "lost connection while grabbing the req_lock!\n");
869 if (!(local || remote)) { 884 if (!(local || remote)) {
870 dev_err(DEV, "IO ERROR: neither local nor remote disk\n"); 885 dev_err(DEV, "IO ERROR: neither local nor remote disk\n");
@@ -877,7 +892,7 @@ allocate_barrier:
877 mdev->unused_spare_tle = b; 892 mdev->unused_spare_tle = b;
878 b = NULL; 893 b = NULL;
879 } 894 }
880 if (rw == WRITE && remote && 895 if (rw == WRITE && (remote || send_oos) &&
881 mdev->unused_spare_tle == NULL && 896 mdev->unused_spare_tle == NULL &&
882 test_bit(CREATE_BARRIER, &mdev->flags)) { 897 test_bit(CREATE_BARRIER, &mdev->flags)) {
883 /* someone closed the current epoch 898 /* someone closed the current epoch
@@ -900,7 +915,7 @@ allocate_barrier:
900 * barrier packet. To get the write ordering right, we only have to 915 * barrier packet. To get the write ordering right, we only have to
901 * make sure that, if this is a write request and it triggered a 916 * make sure that, if this is a write request and it triggered a
902 * barrier packet, this request is queued within the same spinlock. */ 917 * barrier packet, this request is queued within the same spinlock. */
903 if (remote && mdev->unused_spare_tle && 918 if ((remote || send_oos) && mdev->unused_spare_tle &&
904 test_and_clear_bit(CREATE_BARRIER, &mdev->flags)) { 919 test_and_clear_bit(CREATE_BARRIER, &mdev->flags)) {
905 _tl_add_barrier(mdev, mdev->unused_spare_tle); 920 _tl_add_barrier(mdev, mdev->unused_spare_tle);
906 mdev->unused_spare_tle = NULL; 921 mdev->unused_spare_tle = NULL;
@@ -948,8 +963,11 @@ allocate_barrier:
948 ? queue_for_net_write 963 ? queue_for_net_write
949 : queue_for_net_read); 964 : queue_for_net_read);
950 } 965 }
966 if (send_oos && drbd_set_out_of_sync(mdev, sector, size))
967 _req_mod(req, queue_for_send_oos);
951 968
952 if (remote && mdev->net_conf->on_congestion != OC_BLOCK) { 969 if (remote &&
970 mdev->net_conf->on_congestion != OC_BLOCK && mdev->agreed_pro_version >= 96) {
953 int congested = 0; 971 int congested = 0;
954 972
955 if (mdev->net_conf->cong_fill && 973 if (mdev->net_conf->cong_fill &&
@@ -964,6 +982,8 @@ allocate_barrier:
964 } 982 }
965 983
966 if (congested) { 984 if (congested) {
985 queue_barrier(mdev);
986
967 if (mdev->net_conf->on_congestion == OC_PULL_AHEAD) 987 if (mdev->net_conf->on_congestion == OC_PULL_AHEAD)
968 _drbd_set_state(_NS(mdev, conn, C_AHEAD), 0, NULL); 988 _drbd_set_state(_NS(mdev, conn, C_AHEAD), 0, NULL);
969 else /*mdev->net_conf->on_congestion == OC_DISCONNECT */ 989 else /*mdev->net_conf->on_congestion == OC_DISCONNECT */