aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/block/drbd/drbd_req.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/block/drbd/drbd_req.c')
-rw-r--r--drivers/block/drbd/drbd_req.c48
1 files changed, 24 insertions, 24 deletions
diff --git a/drivers/block/drbd/drbd_req.c b/drivers/block/drbd/drbd_req.c
index c871ef2414fa..74179f7986e1 100644
--- a/drivers/block/drbd/drbd_req.c
+++ b/drivers/block/drbd/drbd_req.c
@@ -120,7 +120,7 @@ static void queue_barrier(struct drbd_conf *mdev)
120 if (test_bit(CREATE_BARRIER, &mdev->flags)) 120 if (test_bit(CREATE_BARRIER, &mdev->flags))
121 return; 121 return;
122 122
123 b = mdev->newest_tle; 123 b = mdev->tconn->newest_tle;
124 b->w.cb = w_send_barrier; 124 b->w.cb = w_send_barrier;
125 /* inc_ap_pending done here, so we won't 125 /* inc_ap_pending done here, so we won't
126 * get imbalanced on connection loss. 126 * get imbalanced on connection loss.
@@ -144,7 +144,7 @@ static void _about_to_complete_local_write(struct drbd_conf *mdev,
144 */ 144 */
145 if (mdev->state.conn >= C_CONNECTED && 145 if (mdev->state.conn >= C_CONNECTED &&
146 (s & RQ_NET_SENT) != 0 && 146 (s & RQ_NET_SENT) != 0 &&
147 req->epoch == mdev->newest_tle->br_number) 147 req->epoch == mdev->tconn->newest_tle->br_number)
148 queue_barrier(mdev); 148 queue_barrier(mdev);
149 149
150 /* we need to do the conflict detection stuff, 150 /* we need to do the conflict detection stuff,
@@ -516,10 +516,10 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what,
516 * just after it grabs the req_lock */ 516 * just after it grabs the req_lock */
517 D_ASSERT(test_bit(CREATE_BARRIER, &mdev->flags) == 0); 517 D_ASSERT(test_bit(CREATE_BARRIER, &mdev->flags) == 0);
518 518
519 req->epoch = mdev->newest_tle->br_number; 519 req->epoch = mdev->tconn->newest_tle->br_number;
520 520
521 /* increment size of current epoch */ 521 /* increment size of current epoch */
522 mdev->newest_tle->n_writes++; 522 mdev->tconn->newest_tle->n_writes++;
523 523
524 /* queue work item to send data */ 524 /* queue work item to send data */
525 D_ASSERT(req->rq_state & RQ_NET_PENDING); 525 D_ASSERT(req->rq_state & RQ_NET_PENDING);
@@ -528,7 +528,7 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what,
528 drbd_queue_work(&mdev->tconn->data.work, &req->w); 528 drbd_queue_work(&mdev->tconn->data.work, &req->w);
529 529
530 /* close the epoch, in case it outgrew the limit */ 530 /* close the epoch, in case it outgrew the limit */
531 if (mdev->newest_tle->n_writes >= mdev->tconn->net_conf->max_epoch_size) 531 if (mdev->tconn->newest_tle->n_writes >= mdev->tconn->net_conf->max_epoch_size)
532 queue_barrier(mdev); 532 queue_barrier(mdev);
533 533
534 break; 534 break;
@@ -693,7 +693,7 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what,
693 * this is bad, because if the connection is lost now, 693 * this is bad, because if the connection is lost now,
694 * we won't be able to clean them up... */ 694 * we won't be able to clean them up... */
695 dev_err(DEV, "FIXME (BARRIER_ACKED but pending)\n"); 695 dev_err(DEV, "FIXME (BARRIER_ACKED but pending)\n");
696 list_move(&req->tl_requests, &mdev->out_of_sequence_requests); 696 list_move(&req->tl_requests, &mdev->tconn->out_of_sequence_requests);
697 } 697 }
698 if ((req->rq_state & RQ_NET_MASK) != 0) { 698 if ((req->rq_state & RQ_NET_MASK) != 0) {
699 req->rq_state |= RQ_NET_DONE; 699 req->rq_state |= RQ_NET_DONE;
@@ -834,7 +834,7 @@ static int drbd_make_request_common(struct drbd_conf *mdev, struct bio *bio, uns
834 * spinlock, and grabbing the spinlock. 834 * spinlock, and grabbing the spinlock.
835 * if we lost that race, we retry. */ 835 * if we lost that race, we retry. */
836 if (rw == WRITE && (remote || send_oos) && 836 if (rw == WRITE && (remote || send_oos) &&
837 mdev->unused_spare_tle == NULL && 837 mdev->tconn->unused_spare_tle == NULL &&
838 test_bit(CREATE_BARRIER, &mdev->flags)) { 838 test_bit(CREATE_BARRIER, &mdev->flags)) {
839allocate_barrier: 839allocate_barrier:
840 b = kmalloc(sizeof(struct drbd_tl_epoch), GFP_NOIO); 840 b = kmalloc(sizeof(struct drbd_tl_epoch), GFP_NOIO);
@@ -846,7 +846,7 @@ allocate_barrier:
846 } 846 }
847 847
848 /* GOOD, everything prepared, grab the spin_lock */ 848 /* GOOD, everything prepared, grab the spin_lock */
849 spin_lock_irq(&mdev->req_lock); 849 spin_lock_irq(&mdev->tconn->req_lock);
850 850
851 if (is_susp(mdev->state)) { 851 if (is_susp(mdev->state)) {
852 /* If we got suspended, use the retry mechanism of 852 /* If we got suspended, use the retry mechanism of
@@ -854,7 +854,7 @@ allocate_barrier:
854 bio. In the next call to drbd_make_request 854 bio. In the next call to drbd_make_request
855 we sleep in inc_ap_bio() */ 855 we sleep in inc_ap_bio() */
856 ret = 1; 856 ret = 1;
857 spin_unlock_irq(&mdev->req_lock); 857 spin_unlock_irq(&mdev->tconn->req_lock);
858 goto fail_free_complete; 858 goto fail_free_complete;
859 } 859 }
860 860
@@ -867,21 +867,21 @@ allocate_barrier:
867 dev_warn(DEV, "lost connection while grabbing the req_lock!\n"); 867 dev_warn(DEV, "lost connection while grabbing the req_lock!\n");
868 if (!(local || remote)) { 868 if (!(local || remote)) {
869 dev_err(DEV, "IO ERROR: neither local nor remote disk\n"); 869 dev_err(DEV, "IO ERROR: neither local nor remote disk\n");
870 spin_unlock_irq(&mdev->req_lock); 870 spin_unlock_irq(&mdev->tconn->req_lock);
871 goto fail_free_complete; 871 goto fail_free_complete;
872 } 872 }
873 } 873 }
874 874
875 if (b && mdev->unused_spare_tle == NULL) { 875 if (b && mdev->tconn->unused_spare_tle == NULL) {
876 mdev->unused_spare_tle = b; 876 mdev->tconn->unused_spare_tle = b;
877 b = NULL; 877 b = NULL;
878 } 878 }
879 if (rw == WRITE && (remote || send_oos) && 879 if (rw == WRITE && (remote || send_oos) &&
880 mdev->unused_spare_tle == NULL && 880 mdev->tconn->unused_spare_tle == NULL &&
881 test_bit(CREATE_BARRIER, &mdev->flags)) { 881 test_bit(CREATE_BARRIER, &mdev->flags)) {
882 /* someone closed the current epoch 882 /* someone closed the current epoch
883 * while we were grabbing the spinlock */ 883 * while we were grabbing the spinlock */
884 spin_unlock_irq(&mdev->req_lock); 884 spin_unlock_irq(&mdev->tconn->req_lock);
885 goto allocate_barrier; 885 goto allocate_barrier;
886 } 886 }
887 887
@@ -899,10 +899,10 @@ allocate_barrier:
899 * barrier packet. To get the write ordering right, we only have to 899 * barrier packet. To get the write ordering right, we only have to
900 * make sure that, if this is a write request and it triggered a 900 * make sure that, if this is a write request and it triggered a
901 * barrier packet, this request is queued within the same spinlock. */ 901 * barrier packet, this request is queued within the same spinlock. */
902 if ((remote || send_oos) && mdev->unused_spare_tle && 902 if ((remote || send_oos) && mdev->tconn->unused_spare_tle &&
903 test_and_clear_bit(CREATE_BARRIER, &mdev->flags)) { 903 test_and_clear_bit(CREATE_BARRIER, &mdev->flags)) {
904 _tl_add_barrier(mdev, mdev->unused_spare_tle); 904 _tl_add_barrier(mdev, mdev->tconn->unused_spare_tle);
905 mdev->unused_spare_tle = NULL; 905 mdev->tconn->unused_spare_tle = NULL;
906 } else { 906 } else {
907 D_ASSERT(!(remote && rw == WRITE && 907 D_ASSERT(!(remote && rw == WRITE &&
908 test_bit(CREATE_BARRIER, &mdev->flags))); 908 test_bit(CREATE_BARRIER, &mdev->flags)));
@@ -934,7 +934,7 @@ allocate_barrier:
934 if (rw == WRITE && _req_conflicts(req)) 934 if (rw == WRITE && _req_conflicts(req))
935 goto fail_conflicting; 935 goto fail_conflicting;
936 936
937 list_add_tail(&req->tl_requests, &mdev->newest_tle->requests); 937 list_add_tail(&req->tl_requests, &mdev->tconn->newest_tle->requests);
938 938
939 /* NOTE remote first: to get the concurrent write detection right, 939 /* NOTE remote first: to get the concurrent write detection right,
940 * we must register the request before start of local IO. */ 940 * we must register the request before start of local IO. */
@@ -975,7 +975,7 @@ allocate_barrier:
975 } 975 }
976 } 976 }
977 977
978 spin_unlock_irq(&mdev->req_lock); 978 spin_unlock_irq(&mdev->tconn->req_lock);
979 kfree(b); /* if someone else has beaten us to it... */ 979 kfree(b); /* if someone else has beaten us to it... */
980 980
981 if (local) { 981 if (local) {
@@ -1008,7 +1008,7 @@ fail_conflicting:
1008 * pretend that it was successfully served right now. 1008 * pretend that it was successfully served right now.
1009 */ 1009 */
1010 _drbd_end_io_acct(mdev, req); 1010 _drbd_end_io_acct(mdev, req);
1011 spin_unlock_irq(&mdev->req_lock); 1011 spin_unlock_irq(&mdev->tconn->req_lock);
1012 if (remote) 1012 if (remote)
1013 dec_ap_pending(mdev); 1013 dec_ap_pending(mdev);
1014 /* THINK: do we want to fail it (-EIO), or pretend success? 1014 /* THINK: do we want to fail it (-EIO), or pretend success?
@@ -1188,10 +1188,10 @@ void request_timer_fn(unsigned long data)
1188 if (!et || mdev->state.conn < C_WF_REPORT_PARAMS) 1188 if (!et || mdev->state.conn < C_WF_REPORT_PARAMS)
1189 return; /* Recurring timer stopped */ 1189 return; /* Recurring timer stopped */
1190 1190
1191 spin_lock_irq(&mdev->req_lock); 1191 spin_lock_irq(&mdev->tconn->req_lock);
1192 le = &mdev->oldest_tle->requests; 1192 le = &mdev->tconn->oldest_tle->requests;
1193 if (list_empty(le)) { 1193 if (list_empty(le)) {
1194 spin_unlock_irq(&mdev->req_lock); 1194 spin_unlock_irq(&mdev->tconn->req_lock);
1195 mod_timer(&mdev->request_timer, jiffies + et); 1195 mod_timer(&mdev->request_timer, jiffies + et);
1196 return; 1196 return;
1197 } 1197 }
@@ -1210,5 +1210,5 @@ void request_timer_fn(unsigned long data)
1210 mod_timer(&mdev->request_timer, req->start_time + et); 1210 mod_timer(&mdev->request_timer, req->start_time + et);
1211 } 1211 }
1212 1212
1213 spin_unlock_irq(&mdev->req_lock); 1213 spin_unlock_irq(&mdev->tconn->req_lock);
1214} 1214}