aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/block/drbd/drbd_receiver.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/block/drbd/drbd_receiver.c')
-rw-r--r--drivers/block/drbd/drbd_receiver.c17
1 files changed, 12 insertions, 5 deletions
diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c
index e5686a81f42c..e13134f83fae 100644
--- a/drivers/block/drbd/drbd_receiver.c
+++ b/drivers/block/drbd/drbd_receiver.c
@@ -2599,7 +2599,8 @@ static enum drbd_conns drbd_sync_handshake(struct drbd_conf *mdev, enum drbd_rol
2599 2599
2600 if (abs(hg) >= 2) { 2600 if (abs(hg) >= 2) {
2601 dev_info(DEV, "Writing the whole bitmap, full sync required after drbd_sync_handshake.\n"); 2601 dev_info(DEV, "Writing the whole bitmap, full sync required after drbd_sync_handshake.\n");
2602 if (drbd_bitmap_io(mdev, &drbd_bmio_set_n_write, "set_n_write from sync_handshake")) 2602 if (drbd_bitmap_io(mdev, &drbd_bmio_set_n_write, "set_n_write from sync_handshake",
2603 BM_LOCKED_SET_ALLOWED))
2603 return C_MASK; 2604 return C_MASK;
2604 } 2605 }
2605 2606
@@ -3053,7 +3054,8 @@ static int receive_uuids(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned
3053 if (skip_initial_sync) { 3054 if (skip_initial_sync) {
3054 dev_info(DEV, "Accepted new current UUID, preparing to skip initial sync\n"); 3055 dev_info(DEV, "Accepted new current UUID, preparing to skip initial sync\n");
3055 drbd_bitmap_io(mdev, &drbd_bmio_clear_n_write, 3056 drbd_bitmap_io(mdev, &drbd_bmio_clear_n_write,
3056 "clear_n_write from receive_uuids"); 3057 "clear_n_write from receive_uuids",
3058 BM_LOCKED_TEST_ALLOWED);
3057 _drbd_uuid_set(mdev, UI_CURRENT, p_uuid[UI_CURRENT]); 3059 _drbd_uuid_set(mdev, UI_CURRENT, p_uuid[UI_CURRENT]);
3058 _drbd_uuid_set(mdev, UI_BITMAP, 0); 3060 _drbd_uuid_set(mdev, UI_BITMAP, 0);
3059 _drbd_set_state(_NS2(mdev, disk, D_UP_TO_DATE, pdsk, D_UP_TO_DATE), 3061 _drbd_set_state(_NS2(mdev, disk, D_UP_TO_DATE, pdsk, D_UP_TO_DATE),
@@ -3494,7 +3496,9 @@ static int receive_bitmap(struct drbd_conf *mdev, enum drbd_packets cmd, unsigne
3494 int ok = false; 3496 int ok = false;
3495 struct p_header80 *h = &mdev->data.rbuf.header.h80; 3497 struct p_header80 *h = &mdev->data.rbuf.header.h80;
3496 3498
3497 /* drbd_bm_lock(mdev, "receive bitmap"); By intention no bm_lock */ 3499 drbd_bm_lock(mdev, "receive bitmap", BM_LOCKED_SET_ALLOWED);
3500 /* you are supposed to send additional out-of-sync information
3501 * if you actually set bits during this phase */
3498 3502
3499 /* maybe we should use some per thread scratch page, 3503 /* maybe we should use some per thread scratch page,
3500 * and allocate that during initial device creation? */ 3504 * and allocate that during initial device creation? */
@@ -3568,7 +3572,7 @@ static int receive_bitmap(struct drbd_conf *mdev, enum drbd_packets cmd, unsigne
3568 3572
3569 ok = true; 3573 ok = true;
3570 out: 3574 out:
3571 /* drbd_bm_unlock(mdev); by intention no lock */ 3575 drbd_bm_unlock(mdev);
3572 if (ok && mdev->state.conn == C_WF_BITMAP_S) 3576 if (ok && mdev->state.conn == C_WF_BITMAP_S)
3573 drbd_start_resync(mdev, C_SYNC_SOURCE); 3577 drbd_start_resync(mdev, C_SYNC_SOURCE);
3574 free_page((unsigned long) buffer); 3578 free_page((unsigned long) buffer);
@@ -3817,7 +3821,6 @@ static void drbd_disconnect(struct drbd_conf *mdev)
3817 3821
3818 fp = FP_DONT_CARE; 3822 fp = FP_DONT_CARE;
3819 if (get_ldev(mdev)) { 3823 if (get_ldev(mdev)) {
3820 drbd_bitmap_io(mdev, &drbd_bm_write, "write from disconnect");
3821 fp = mdev->ldev->dc.fencing; 3824 fp = mdev->ldev->dc.fencing;
3822 put_ldev(mdev); 3825 put_ldev(mdev);
3823 } 3826 }
@@ -3846,6 +3849,10 @@ static void drbd_disconnect(struct drbd_conf *mdev)
3846 drbd_request_state(mdev, NS(conn, C_STANDALONE)); 3849 drbd_request_state(mdev, NS(conn, C_STANDALONE));
3847 } 3850 }
3848 3851
3852 /* serialize with bitmap writeout triggered by the state change,
3853 * if any. */
3854 wait_event(mdev->misc_wait, !test_bit(BITMAP_IO, &mdev->flags));
3855
3849 /* tcp_close and release of sendpage pages can be deferred. I don't 3856 /* tcp_close and release of sendpage pages can be deferred. I don't
3850 * want to use SO_LINGER, because apparently it can be deferred for 3857 * want to use SO_LINGER, because apparently it can be deferred for
3851 * more than 20 seconds (longest time I checked). 3858 * more than 20 seconds (longest time I checked).