aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/block/drbd/drbd_main.c
diff options
context:
space:
mode:
authorLars Ellenberg <lars.ellenberg@linbit.com>2010-12-17 15:14:23 -0500
committerPhilipp Reisner <philipp.reisner@linbit.com>2011-03-10 05:43:35 -0500
commit5a22db8968a69bec835d1ed9a96ab3381719e0c0 (patch)
tree6dd29c3cec008a2f846f54a02cdb139c4ce94be3 /drivers/block/drbd/drbd_main.c
parentf735e3635430c6d1c319664d82b34376e3f9aa17 (diff)
drbd: serialize sending of resync uuid with pending w_send_oos
To improve the latency of IO requests during bitmap exchange, we recently allowed writes while waiting for the bitmap, sending "set out-of-sync" information packets for any newly dirtied bits. We have to make sure that the new resync-uuid does not overtake these "set oos" packets. Once the resync-uuid is received, the sync target starts the resync process, and expects the bitmap to only be cleared, not re-set. If we use this protocol extension, we queue the generation and sending of the resync-uuid on the worker, which naturally serializes with all previously queued packets. Signed-off-by: Philipp Reisner <philipp.reisner@linbit.com> Signed-off-by: Lars Ellenberg <lars.ellenberg@linbit.com>
Diffstat (limited to 'drivers/block/drbd/drbd_main.c')
-rw-r--r--drivers/block/drbd/drbd_main.c22
1 files changed, 19 insertions, 3 deletions
diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c
index 4da6f11cc82e..2190064d59bd 100644
--- a/drivers/block/drbd/drbd_main.c
+++ b/drivers/block/drbd/drbd_main.c
@@ -1387,6 +1387,17 @@ static void after_state_ch(struct drbd_conf *mdev, union drbd_state os,
1387 spin_unlock_irq(&mdev->req_lock); 1387 spin_unlock_irq(&mdev->req_lock);
1388 } 1388 }
1389 1389
1390 /* Became sync source. With protocol >= 96, we still need to send out
1391 * the sync uuid now. Need to do that before any drbd_send_state, or
1392 * the other side may go "paused sync" before receiving the sync uuids,
1393 * which is unexpected. */
1394 if ((os.conn != C_SYNC_SOURCE && os.conn != C_PAUSED_SYNC_S) &&
1395 (ns.conn == C_SYNC_SOURCE || ns.conn == C_PAUSED_SYNC_S) &&
1396 mdev->agreed_pro_version >= 96 && get_ldev(mdev)) {
1397 drbd_gen_and_send_sync_uuid(mdev);
1398 put_ldev(mdev);
1399 }
1400
1390 /* Do not change the order of the if above and the two below... */ 1401 /* Do not change the order of the if above and the two below... */
1391 if (os.pdsk == D_DISKLESS && ns.pdsk > D_DISKLESS) { /* attach on the peer */ 1402 if (os.pdsk == D_DISKLESS && ns.pdsk > D_DISKLESS) { /* attach on the peer */
1392 drbd_send_uuids(mdev); 1403 drbd_send_uuids(mdev);
@@ -1980,12 +1991,17 @@ int drbd_send_uuids_skip_initial_sync(struct drbd_conf *mdev)
1980 return _drbd_send_uuids(mdev, 8); 1991 return _drbd_send_uuids(mdev, 8);
1981} 1992}
1982 1993
1983 1994int drbd_gen_and_send_sync_uuid(struct drbd_conf *mdev)
1984int drbd_send_sync_uuid(struct drbd_conf *mdev, u64 val)
1985{ 1995{
1986 struct p_rs_uuid p; 1996 struct p_rs_uuid p;
1997 u64 uuid;
1998
1999 D_ASSERT(mdev->state.disk == D_UP_TO_DATE);
1987 2000
1988 p.uuid = cpu_to_be64(val); 2001 get_random_bytes(&uuid, sizeof(u64));
2002 drbd_uuid_set(mdev, UI_BITMAP, uuid);
2003 drbd_md_sync(mdev);
2004 p.uuid = cpu_to_be64(uuid);
1989 2005
1990 return drbd_send_cmd(mdev, USE_DATA_SOCKET, P_SYNC_UUID, 2006 return drbd_send_cmd(mdev, USE_DATA_SOCKET, P_SYNC_UUID,
1991 (struct p_header80 *)&p, sizeof(p)); 2007 (struct p_header80 *)&p, sizeof(p));