aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/block/drbd/drbd_receiver.c
diff options
context:
space:
mode:
authorPhilipp Reisner <philipp.reisner@linbit.com>2011-01-19 08:16:30 -0500
committerPhilipp Reisner <philipp.reisner@linbit.com>2011-08-29 05:30:15 -0400
commit87eeee41f8740451b61a1e7d37a494333a906861 (patch)
treef7b328aa626ed5153ec5cf5b9dbd94c23676b6a8 /drivers/block/drbd/drbd_receiver.c
parent31890f4ab299c4116cf0a104ca9ce4f9ca2c5da0 (diff)
drbd: moved req_lock and transfer log from mdev to tconn
sed -i \ -e 's/mdev->req_lock/mdev->tconn->req_lock/g' \ -e 's/mdev->unused_spare_tle/mdev->tconn->unused_spare_tle/g' \ -e 's/mdev->newest_tle/mdev->tconn->newest_tle/g' \ -e 's/mdev->oldest_tle/mdev->tconn->oldest_tle/g' \ -e 's/mdev->out_of_sequence_requests/mdev->tconn->out_of_sequence_requests/g' \ *.[ch] Signed-off-by: Philipp Reisner <philipp.reisner@linbit.com> Signed-off-by: Lars Ellenberg <lars.ellenberg@linbit.com>
Diffstat (limited to 'drivers/block/drbd/drbd_receiver.c')
-rw-r--r--drivers/block/drbd/drbd_receiver.c96
1 files changed, 48 insertions, 48 deletions
diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c
index 27a8363510dd..af968a0bae07 100644
--- a/drivers/block/drbd/drbd_receiver.c
+++ b/drivers/block/drbd/drbd_receiver.c
@@ -210,9 +210,9 @@ static void drbd_kick_lo_and_reclaim_net(struct drbd_conf *mdev)
210 LIST_HEAD(reclaimed); 210 LIST_HEAD(reclaimed);
211 struct drbd_epoch_entry *e, *t; 211 struct drbd_epoch_entry *e, *t;
212 212
213 spin_lock_irq(&mdev->req_lock); 213 spin_lock_irq(&mdev->tconn->req_lock);
214 reclaim_net_ee(mdev, &reclaimed); 214 reclaim_net_ee(mdev, &reclaimed);
215 spin_unlock_irq(&mdev->req_lock); 215 spin_unlock_irq(&mdev->tconn->req_lock);
216 216
217 list_for_each_entry_safe(e, t, &reclaimed, w.list) 217 list_for_each_entry_safe(e, t, &reclaimed, w.list)
218 drbd_free_net_ee(mdev, e); 218 drbd_free_net_ee(mdev, e);
@@ -269,7 +269,7 @@ static struct page *drbd_pp_alloc(struct drbd_conf *mdev, unsigned number, bool
269} 269}
270 270
271/* Must not be used from irq, as that may deadlock: see drbd_pp_alloc. 271/* Must not be used from irq, as that may deadlock: see drbd_pp_alloc.
272 * Is also used from inside an other spin_lock_irq(&mdev->req_lock); 272 * Is also used from inside an other spin_lock_irq(&mdev->tconn->req_lock);
273 * Either links the page chain back to the global pool, 273 * Either links the page chain back to the global pool,
274 * or returns all pages to the system. */ 274 * or returns all pages to the system. */
275static void drbd_pp_free(struct drbd_conf *mdev, struct page *page, int is_net) 275static void drbd_pp_free(struct drbd_conf *mdev, struct page *page, int is_net)
@@ -371,9 +371,9 @@ int drbd_release_ee(struct drbd_conf *mdev, struct list_head *list)
371 int count = 0; 371 int count = 0;
372 int is_net = list == &mdev->net_ee; 372 int is_net = list == &mdev->net_ee;
373 373
374 spin_lock_irq(&mdev->req_lock); 374 spin_lock_irq(&mdev->tconn->req_lock);
375 list_splice_init(list, &work_list); 375 list_splice_init(list, &work_list);
376 spin_unlock_irq(&mdev->req_lock); 376 spin_unlock_irq(&mdev->tconn->req_lock);
377 377
378 list_for_each_entry_safe(e, t, &work_list, w.list) { 378 list_for_each_entry_safe(e, t, &work_list, w.list) {
379 drbd_free_some_ee(mdev, e, is_net); 379 drbd_free_some_ee(mdev, e, is_net);
@@ -399,10 +399,10 @@ static int drbd_process_done_ee(struct drbd_conf *mdev)
399 struct drbd_epoch_entry *e, *t; 399 struct drbd_epoch_entry *e, *t;
400 int ok = (mdev->state.conn >= C_WF_REPORT_PARAMS); 400 int ok = (mdev->state.conn >= C_WF_REPORT_PARAMS);
401 401
402 spin_lock_irq(&mdev->req_lock); 402 spin_lock_irq(&mdev->tconn->req_lock);
403 reclaim_net_ee(mdev, &reclaimed); 403 reclaim_net_ee(mdev, &reclaimed);
404 list_splice_init(&mdev->done_ee, &work_list); 404 list_splice_init(&mdev->done_ee, &work_list);
405 spin_unlock_irq(&mdev->req_lock); 405 spin_unlock_irq(&mdev->tconn->req_lock);
406 406
407 list_for_each_entry_safe(e, t, &reclaimed, w.list) 407 list_for_each_entry_safe(e, t, &reclaimed, w.list)
408 drbd_free_net_ee(mdev, e); 408 drbd_free_net_ee(mdev, e);
@@ -429,18 +429,18 @@ void _drbd_wait_ee_list_empty(struct drbd_conf *mdev, struct list_head *head)
429 * and calling prepare_to_wait in the fast path */ 429 * and calling prepare_to_wait in the fast path */
430 while (!list_empty(head)) { 430 while (!list_empty(head)) {
431 prepare_to_wait(&mdev->ee_wait, &wait, TASK_UNINTERRUPTIBLE); 431 prepare_to_wait(&mdev->ee_wait, &wait, TASK_UNINTERRUPTIBLE);
432 spin_unlock_irq(&mdev->req_lock); 432 spin_unlock_irq(&mdev->tconn->req_lock);
433 io_schedule(); 433 io_schedule();
434 finish_wait(&mdev->ee_wait, &wait); 434 finish_wait(&mdev->ee_wait, &wait);
435 spin_lock_irq(&mdev->req_lock); 435 spin_lock_irq(&mdev->tconn->req_lock);
436 } 436 }
437} 437}
438 438
439void drbd_wait_ee_list_empty(struct drbd_conf *mdev, struct list_head *head) 439void drbd_wait_ee_list_empty(struct drbd_conf *mdev, struct list_head *head)
440{ 440{
441 spin_lock_irq(&mdev->req_lock); 441 spin_lock_irq(&mdev->tconn->req_lock);
442 _drbd_wait_ee_list_empty(mdev, head); 442 _drbd_wait_ee_list_empty(mdev, head);
443 spin_unlock_irq(&mdev->req_lock); 443 spin_unlock_irq(&mdev->tconn->req_lock);
444} 444}
445 445
446/* see also kernel_accept; which is only present since 2.6.18. 446/* see also kernel_accept; which is only present since 2.6.18.
@@ -1452,9 +1452,9 @@ static int recv_resync_read(struct drbd_conf *mdev, sector_t sector, int data_si
1452 1452
1453 e->w.cb = e_end_resync_block; 1453 e->w.cb = e_end_resync_block;
1454 1454
1455 spin_lock_irq(&mdev->req_lock); 1455 spin_lock_irq(&mdev->tconn->req_lock);
1456 list_add(&e->w.list, &mdev->sync_ee); 1456 list_add(&e->w.list, &mdev->sync_ee);
1457 spin_unlock_irq(&mdev->req_lock); 1457 spin_unlock_irq(&mdev->tconn->req_lock);
1458 1458
1459 atomic_add(data_size >> 9, &mdev->rs_sect_ev); 1459 atomic_add(data_size >> 9, &mdev->rs_sect_ev);
1460 if (drbd_submit_ee(mdev, e, WRITE, DRBD_FAULT_RS_WR) == 0) 1460 if (drbd_submit_ee(mdev, e, WRITE, DRBD_FAULT_RS_WR) == 0)
@@ -1462,9 +1462,9 @@ static int recv_resync_read(struct drbd_conf *mdev, sector_t sector, int data_si
1462 1462
1463 /* don't care for the reason here */ 1463 /* don't care for the reason here */
1464 dev_err(DEV, "submit failed, triggering re-connect\n"); 1464 dev_err(DEV, "submit failed, triggering re-connect\n");
1465 spin_lock_irq(&mdev->req_lock); 1465 spin_lock_irq(&mdev->tconn->req_lock);
1466 list_del(&e->w.list); 1466 list_del(&e->w.list);
1467 spin_unlock_irq(&mdev->req_lock); 1467 spin_unlock_irq(&mdev->tconn->req_lock);
1468 1468
1469 drbd_free_ee(mdev, e); 1469 drbd_free_ee(mdev, e);
1470fail: 1470fail:
@@ -1498,9 +1498,9 @@ static int receive_DataReply(struct drbd_conf *mdev, enum drbd_packets cmd, unsi
1498 1498
1499 sector = be64_to_cpu(p->sector); 1499 sector = be64_to_cpu(p->sector);
1500 1500
1501 spin_lock_irq(&mdev->req_lock); 1501 spin_lock_irq(&mdev->tconn->req_lock);
1502 req = find_request(mdev, &mdev->read_requests, p->block_id, sector, false, __func__); 1502 req = find_request(mdev, &mdev->read_requests, p->block_id, sector, false, __func__);
1503 spin_unlock_irq(&mdev->req_lock); 1503 spin_unlock_irq(&mdev->tconn->req_lock);
1504 if (unlikely(!req)) 1504 if (unlikely(!req))
1505 return false; 1505 return false;
1506 1506
@@ -1574,11 +1574,11 @@ static int e_end_block(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
1574 /* we delete from the conflict detection hash _after_ we sent out the 1574 /* we delete from the conflict detection hash _after_ we sent out the
1575 * P_WRITE_ACK / P_NEG_ACK, to get the sequence number right. */ 1575 * P_WRITE_ACK / P_NEG_ACK, to get the sequence number right. */
1576 if (mdev->tconn->net_conf->two_primaries) { 1576 if (mdev->tconn->net_conf->two_primaries) {
1577 spin_lock_irq(&mdev->req_lock); 1577 spin_lock_irq(&mdev->tconn->req_lock);
1578 D_ASSERT(!drbd_interval_empty(&e->i)); 1578 D_ASSERT(!drbd_interval_empty(&e->i));
1579 drbd_remove_interval(&mdev->epoch_entries, &e->i); 1579 drbd_remove_interval(&mdev->epoch_entries, &e->i);
1580 drbd_clear_interval(&e->i); 1580 drbd_clear_interval(&e->i);
1581 spin_unlock_irq(&mdev->req_lock); 1581 spin_unlock_irq(&mdev->tconn->req_lock);
1582 } else 1582 } else
1583 D_ASSERT(drbd_interval_empty(&e->i)); 1583 D_ASSERT(drbd_interval_empty(&e->i));
1584 1584
@@ -1595,11 +1595,11 @@ static int e_send_discard_ack(struct drbd_conf *mdev, struct drbd_work *w, int u
1595 D_ASSERT(mdev->tconn->net_conf->wire_protocol == DRBD_PROT_C); 1595 D_ASSERT(mdev->tconn->net_conf->wire_protocol == DRBD_PROT_C);
1596 ok = drbd_send_ack(mdev, P_DISCARD_ACK, e); 1596 ok = drbd_send_ack(mdev, P_DISCARD_ACK, e);
1597 1597
1598 spin_lock_irq(&mdev->req_lock); 1598 spin_lock_irq(&mdev->tconn->req_lock);
1599 D_ASSERT(!drbd_interval_empty(&e->i)); 1599 D_ASSERT(!drbd_interval_empty(&e->i));
1600 drbd_remove_interval(&mdev->epoch_entries, &e->i); 1600 drbd_remove_interval(&mdev->epoch_entries, &e->i);
1601 drbd_clear_interval(&e->i); 1601 drbd_clear_interval(&e->i);
1602 spin_unlock_irq(&mdev->req_lock); 1602 spin_unlock_irq(&mdev->tconn->req_lock);
1603 1603
1604 dec_unacked(mdev); 1604 dec_unacked(mdev);
1605 1605
@@ -1718,7 +1718,7 @@ static int receive_Data(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned
1718 1718
1719 /* I'm the receiver, I do hold a net_cnt reference. */ 1719 /* I'm the receiver, I do hold a net_cnt reference. */
1720 if (!mdev->tconn->net_conf->two_primaries) { 1720 if (!mdev->tconn->net_conf->two_primaries) {
1721 spin_lock_irq(&mdev->req_lock); 1721 spin_lock_irq(&mdev->tconn->req_lock);
1722 } else { 1722 } else {
1723 /* don't get the req_lock yet, 1723 /* don't get the req_lock yet,
1724 * we may sleep in drbd_wait_peer_seq */ 1724 * we may sleep in drbd_wait_peer_seq */
@@ -1765,7 +1765,7 @@ static int receive_Data(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned
1765 if (drbd_wait_peer_seq(mdev, be32_to_cpu(p->seq_num))) 1765 if (drbd_wait_peer_seq(mdev, be32_to_cpu(p->seq_num)))
1766 goto out_interrupted; 1766 goto out_interrupted;
1767 1767
1768 spin_lock_irq(&mdev->req_lock); 1768 spin_lock_irq(&mdev->tconn->req_lock);
1769 1769
1770 drbd_insert_interval(&mdev->epoch_entries, &e->i); 1770 drbd_insert_interval(&mdev->epoch_entries, &e->i);
1771 1771
@@ -1805,7 +1805,7 @@ static int receive_Data(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned
1805 e->w.cb = e_send_discard_ack; 1805 e->w.cb = e_send_discard_ack;
1806 list_add_tail(&e->w.list, &mdev->done_ee); 1806 list_add_tail(&e->w.list, &mdev->done_ee);
1807 1807
1808 spin_unlock_irq(&mdev->req_lock); 1808 spin_unlock_irq(&mdev->tconn->req_lock);
1809 1809
1810 /* we could probably send that P_DISCARD_ACK ourselves, 1810 /* we could probably send that P_DISCARD_ACK ourselves,
1811 * but I don't like the receiver using the msock */ 1811 * but I don't like the receiver using the msock */
@@ -1820,13 +1820,13 @@ static int receive_Data(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned
1820 drbd_remove_interval(&mdev->epoch_entries, &e->i); 1820 drbd_remove_interval(&mdev->epoch_entries, &e->i);
1821 drbd_clear_interval(&e->i); 1821 drbd_clear_interval(&e->i);
1822 1822
1823 spin_unlock_irq(&mdev->req_lock); 1823 spin_unlock_irq(&mdev->tconn->req_lock);
1824 1824
1825 finish_wait(&mdev->misc_wait, &wait); 1825 finish_wait(&mdev->misc_wait, &wait);
1826 goto out_interrupted; 1826 goto out_interrupted;
1827 } 1827 }
1828 1828
1829 spin_unlock_irq(&mdev->req_lock); 1829 spin_unlock_irq(&mdev->tconn->req_lock);
1830 if (first) { 1830 if (first) {
1831 first = 0; 1831 first = 0;
1832 dev_alert(DEV, "Concurrent write! [W AFTERWARDS] " 1832 dev_alert(DEV, "Concurrent write! [W AFTERWARDS] "
@@ -1837,13 +1837,13 @@ static int receive_Data(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned
1837 D_ASSERT(have_unacked == 0); 1837 D_ASSERT(have_unacked == 0);
1838 } 1838 }
1839 schedule(); 1839 schedule();
1840 spin_lock_irq(&mdev->req_lock); 1840 spin_lock_irq(&mdev->tconn->req_lock);
1841 } 1841 }
1842 finish_wait(&mdev->misc_wait, &wait); 1842 finish_wait(&mdev->misc_wait, &wait);
1843 } 1843 }
1844 1844
1845 list_add(&e->w.list, &mdev->active_ee); 1845 list_add(&e->w.list, &mdev->active_ee);
1846 spin_unlock_irq(&mdev->req_lock); 1846 spin_unlock_irq(&mdev->tconn->req_lock);
1847 1847
1848 switch (mdev->tconn->net_conf->wire_protocol) { 1848 switch (mdev->tconn->net_conf->wire_protocol) {
1849 case DRBD_PROT_C: 1849 case DRBD_PROT_C:
@@ -1874,11 +1874,11 @@ static int receive_Data(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned
1874 1874
1875 /* don't care for the reason here */ 1875 /* don't care for the reason here */
1876 dev_err(DEV, "submit failed, triggering re-connect\n"); 1876 dev_err(DEV, "submit failed, triggering re-connect\n");
1877 spin_lock_irq(&mdev->req_lock); 1877 spin_lock_irq(&mdev->tconn->req_lock);
1878 list_del(&e->w.list); 1878 list_del(&e->w.list);
1879 drbd_remove_interval(&mdev->epoch_entries, &e->i); 1879 drbd_remove_interval(&mdev->epoch_entries, &e->i);
1880 drbd_clear_interval(&e->i); 1880 drbd_clear_interval(&e->i);
1881 spin_unlock_irq(&mdev->req_lock); 1881 spin_unlock_irq(&mdev->tconn->req_lock);
1882 if (e->flags & EE_CALL_AL_COMPLETE_IO) 1882 if (e->flags & EE_CALL_AL_COMPLETE_IO)
1883 drbd_al_complete_io(mdev, e->i.sector); 1883 drbd_al_complete_io(mdev, e->i.sector);
1884 1884
@@ -2122,18 +2122,18 @@ submit_for_resync:
2122 2122
2123submit: 2123submit:
2124 inc_unacked(mdev); 2124 inc_unacked(mdev);
2125 spin_lock_irq(&mdev->req_lock); 2125 spin_lock_irq(&mdev->tconn->req_lock);
2126 list_add_tail(&e->w.list, &mdev->read_ee); 2126 list_add_tail(&e->w.list, &mdev->read_ee);
2127 spin_unlock_irq(&mdev->req_lock); 2127 spin_unlock_irq(&mdev->tconn->req_lock);
2128 2128
2129 if (drbd_submit_ee(mdev, e, READ, fault_type) == 0) 2129 if (drbd_submit_ee(mdev, e, READ, fault_type) == 0)
2130 return true; 2130 return true;
2131 2131
2132 /* don't care for the reason here */ 2132 /* don't care for the reason here */
2133 dev_err(DEV, "submit failed, triggering re-connect\n"); 2133 dev_err(DEV, "submit failed, triggering re-connect\n");
2134 spin_lock_irq(&mdev->req_lock); 2134 spin_lock_irq(&mdev->tconn->req_lock);
2135 list_del(&e->w.list); 2135 list_del(&e->w.list);
2136 spin_unlock_irq(&mdev->req_lock); 2136 spin_unlock_irq(&mdev->tconn->req_lock);
2137 /* no drbd_rs_complete_io(), we are dropping the connection anyways */ 2137 /* no drbd_rs_complete_io(), we are dropping the connection anyways */
2138 2138
2139out_free_e: 2139out_free_e:
@@ -3183,10 +3183,10 @@ static int receive_state(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned
3183 dev_info(DEV, "real peer disk state = %s\n", drbd_disk_str(real_peer_disk)); 3183 dev_info(DEV, "real peer disk state = %s\n", drbd_disk_str(real_peer_disk));
3184 } 3184 }
3185 3185
3186 spin_lock_irq(&mdev->req_lock); 3186 spin_lock_irq(&mdev->tconn->req_lock);
3187 retry: 3187 retry:
3188 os = ns = mdev->state; 3188 os = ns = mdev->state;
3189 spin_unlock_irq(&mdev->req_lock); 3189 spin_unlock_irq(&mdev->tconn->req_lock);
3190 3190
3191 /* peer says his disk is uptodate, while we think it is inconsistent, 3191 /* peer says his disk is uptodate, while we think it is inconsistent,
3192 * and this happens while we think we have a sync going on. */ 3192 * and this happens while we think we have a sync going on. */
@@ -3270,7 +3270,7 @@ static int receive_state(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned
3270 } 3270 }
3271 } 3271 }
3272 3272
3273 spin_lock_irq(&mdev->req_lock); 3273 spin_lock_irq(&mdev->tconn->req_lock);
3274 if (mdev->state.i != os.i) 3274 if (mdev->state.i != os.i)
3275 goto retry; 3275 goto retry;
3276 clear_bit(CONSIDER_RESYNC, &mdev->flags); 3276 clear_bit(CONSIDER_RESYNC, &mdev->flags);
@@ -3284,7 +3284,7 @@ static int receive_state(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned
3284 test_bit(NEW_CUR_UUID, &mdev->flags)) { 3284 test_bit(NEW_CUR_UUID, &mdev->flags)) {
3285 /* Do not allow tl_restart(RESEND) for a rebooted peer. We can only allow this 3285 /* Do not allow tl_restart(RESEND) for a rebooted peer. We can only allow this
3286 for temporal network outages! */ 3286 for temporal network outages! */
3287 spin_unlock_irq(&mdev->req_lock); 3287 spin_unlock_irq(&mdev->tconn->req_lock);
3288 dev_err(DEV, "Aborting Connect, can not thaw IO with an only Consistent peer\n"); 3288 dev_err(DEV, "Aborting Connect, can not thaw IO with an only Consistent peer\n");
3289 tl_clear(mdev); 3289 tl_clear(mdev);
3290 drbd_uuid_new_current(mdev); 3290 drbd_uuid_new_current(mdev);
@@ -3294,7 +3294,7 @@ static int receive_state(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned
3294 } 3294 }
3295 rv = _drbd_set_state(mdev, ns, cs_flags, NULL); 3295 rv = _drbd_set_state(mdev, ns, cs_flags, NULL);
3296 ns = mdev->state; 3296 ns = mdev->state;
3297 spin_unlock_irq(&mdev->req_lock); 3297 spin_unlock_irq(&mdev->tconn->req_lock);
3298 3298
3299 if (rv < SS_SUCCESS) { 3299 if (rv < SS_SUCCESS) {
3300 drbd_force_state(mdev, NS(conn, C_DISCONNECTING)); 3300 drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
@@ -3772,11 +3772,11 @@ static void drbd_disconnect(struct drbd_conf *mdev)
3772 drbd_free_sock(mdev); 3772 drbd_free_sock(mdev);
3773 3773
3774 /* wait for current activity to cease. */ 3774 /* wait for current activity to cease. */
3775 spin_lock_irq(&mdev->req_lock); 3775 spin_lock_irq(&mdev->tconn->req_lock);
3776 _drbd_wait_ee_list_empty(mdev, &mdev->active_ee); 3776 _drbd_wait_ee_list_empty(mdev, &mdev->active_ee);
3777 _drbd_wait_ee_list_empty(mdev, &mdev->sync_ee); 3777 _drbd_wait_ee_list_empty(mdev, &mdev->sync_ee);
3778 _drbd_wait_ee_list_empty(mdev, &mdev->read_ee); 3778 _drbd_wait_ee_list_empty(mdev, &mdev->read_ee);
3779 spin_unlock_irq(&mdev->req_lock); 3779 spin_unlock_irq(&mdev->tconn->req_lock);
3780 3780
3781 /* We do not have data structures that would allow us to 3781 /* We do not have data structures that would allow us to
3782 * get the rs_pending_cnt down to 0 again. 3782 * get the rs_pending_cnt down to 0 again.
@@ -3828,7 +3828,7 @@ static void drbd_disconnect(struct drbd_conf *mdev)
3828 if (mdev->state.role == R_PRIMARY && fp >= FP_RESOURCE && mdev->state.pdsk >= D_UNKNOWN) 3828 if (mdev->state.role == R_PRIMARY && fp >= FP_RESOURCE && mdev->state.pdsk >= D_UNKNOWN)
3829 drbd_try_outdate_peer_async(mdev); 3829 drbd_try_outdate_peer_async(mdev);
3830 3830
3831 spin_lock_irq(&mdev->req_lock); 3831 spin_lock_irq(&mdev->tconn->req_lock);
3832 os = mdev->state; 3832 os = mdev->state;
3833 if (os.conn >= C_UNCONNECTED) { 3833 if (os.conn >= C_UNCONNECTED) {
3834 /* Do not restart in case we are C_DISCONNECTING */ 3834 /* Do not restart in case we are C_DISCONNECTING */
@@ -3836,7 +3836,7 @@ static void drbd_disconnect(struct drbd_conf *mdev)
3836 ns.conn = C_UNCONNECTED; 3836 ns.conn = C_UNCONNECTED;
3837 rv = _drbd_set_state(mdev, ns, CS_VERBOSE, NULL); 3837 rv = _drbd_set_state(mdev, ns, CS_VERBOSE, NULL);
3838 } 3838 }
3839 spin_unlock_irq(&mdev->req_lock); 3839 spin_unlock_irq(&mdev->tconn->req_lock);
3840 3840
3841 if (os.conn == C_DISCONNECTING) { 3841 if (os.conn == C_DISCONNECTING) {
3842 wait_event(mdev->tconn->net_cnt_wait, atomic_read(&mdev->tconn->net_cnt) == 0); 3842 wait_event(mdev->tconn->net_cnt_wait, atomic_read(&mdev->tconn->net_cnt) == 0);
@@ -4245,14 +4245,14 @@ validate_req_change_req_state(struct drbd_conf *mdev, u64 id, sector_t sector,
4245 struct drbd_request *req; 4245 struct drbd_request *req;
4246 struct bio_and_error m; 4246 struct bio_and_error m;
4247 4247
4248 spin_lock_irq(&mdev->req_lock); 4248 spin_lock_irq(&mdev->tconn->req_lock);
4249 req = find_request(mdev, root, id, sector, missing_ok, func); 4249 req = find_request(mdev, root, id, sector, missing_ok, func);
4250 if (unlikely(!req)) { 4250 if (unlikely(!req)) {
4251 spin_unlock_irq(&mdev->req_lock); 4251 spin_unlock_irq(&mdev->tconn->req_lock);
4252 return false; 4252 return false;
4253 } 4253 }
4254 __req_mod(req, what, &m); 4254 __req_mod(req, what, &m);
4255 spin_unlock_irq(&mdev->req_lock); 4255 spin_unlock_irq(&mdev->tconn->req_lock);
4256 4256
4257 if (m.bio) 4257 if (m.bio)
4258 complete_master_bio(mdev, &m); 4258 complete_master_bio(mdev, &m);
@@ -4518,9 +4518,9 @@ int drbd_asender(struct drbd_thread *thi)
4518 goto reconnect; 4518 goto reconnect;
4519 /* to avoid race with newly queued ACKs */ 4519 /* to avoid race with newly queued ACKs */
4520 set_bit(SIGNAL_ASENDER, &mdev->flags); 4520 set_bit(SIGNAL_ASENDER, &mdev->flags);
4521 spin_lock_irq(&mdev->req_lock); 4521 spin_lock_irq(&mdev->tconn->req_lock);
4522 empty = list_empty(&mdev->done_ee); 4522 empty = list_empty(&mdev->done_ee);
4523 spin_unlock_irq(&mdev->req_lock); 4523 spin_unlock_irq(&mdev->tconn->req_lock);
4524 /* new ack may have been queued right here, 4524 /* new ack may have been queued right here,
4525 * but then there is also a signal pending, 4525 * but then there is also a signal pending,
4526 * and we start over... */ 4526 * and we start over... */