aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/block
diff options
context:
space:
mode:
authorLars Ellenberg <lars.ellenberg@linbit.com>2013-11-22 06:32:01 -0500
committerPhilipp Reisner <philipp.reisner@linbit.com>2014-07-10 12:35:11 -0400
commite5f891b2234dbab8c8797111a61519d0728ef855 (patch)
tree743808e1d2252876c0b0f3550c6dba70f7e8e779 /drivers/block
parente37d2438d8e5e4c1225cf94d45347fa207835447 (diff)
drbd: gather detailed timing statistics for drbd_requests
Record (in jiffies) how much time a request spends in which stages. Followup commits will use and present this additional timing information so we can better locate and tackle the root causes of latency spikes, or present the backlog for asynchronous replication. Signed-off-by: Philipp Reisner <philipp.reisner@linbit.com> Signed-off-by: Lars Ellenberg <lars.ellenberg@linbit.com>
Diffstat (limited to 'drivers/block')
-rw-r--r--drivers/block/drbd/drbd_int.h54
-rw-r--r--drivers/block/drbd/drbd_main.c7
-rw-r--r--drivers/block/drbd/drbd_req.c93
-rw-r--r--drivers/block/drbd/drbd_worker.c3
4 files changed, 119 insertions, 38 deletions
diff --git a/drivers/block/drbd/drbd_int.h b/drivers/block/drbd/drbd_int.h
index 3f8281bbea53..08fa2dc8cdba 100644
--- a/drivers/block/drbd/drbd_int.h
+++ b/drivers/block/drbd/drbd_int.h
@@ -317,7 +317,59 @@ struct drbd_request {
317 317
318 struct list_head tl_requests; /* ring list in the transfer log */ 318 struct list_head tl_requests; /* ring list in the transfer log */
319 struct bio *master_bio; /* master bio pointer */ 319 struct bio *master_bio; /* master bio pointer */
320 unsigned long start_time; 320
321 /* for generic IO accounting */
322 unsigned long start_jif;
323
324 /* for DRBD internal statistics */
325
326 /* Minimal set of time stamps to determine if we wait for activity log
327 * transactions, local disk or peer. 32 bit "jiffies" are good enough,
328 * we don't expect a DRBD request to be stalled for several month.
329 */
330
331 /* before actual request processing */
332 unsigned long in_actlog_jif;
333
334 /* local disk */
335 unsigned long pre_submit_jif;
336
337 /* per connection */
338 unsigned long pre_send_jif;
339 unsigned long acked_jif;
340 unsigned long net_done_jif;
341
342 /* Possibly even more detail to track each phase:
343 * master_completion_jif
344 * how long did it take to complete the master bio
345 * (application visible latency)
346 * allocated_jif
347 * how long the master bio was blocked until we finally allocated
348 * a tracking struct
349 * in_actlog_jif
350 * how long did we wait for activity log transactions
351 *
352 * net_queued_jif
353 * when did we finally queue it for sending
354 * pre_send_jif
355 * when did we start sending it
356 * post_send_jif
357 * how long did we block in the network stack trying to send it
358 * acked_jif
359 * when did we receive (or fake, in protocol A) a remote ACK
360 * net_done_jif
361 * when did we receive final acknowledgement (P_BARRIER_ACK),
362 * or decide, e.g. on connection loss, that we do no longer expect
363 * anything from this peer for this request.
364 *
365 * pre_submit_jif
366 * post_sub_jif
367 * when did we start submiting to the lower level device,
368 * and how long did we block in that submit function
369 * local_completion_jif
370 * how long did it take the lower level device to complete this request
371 */
372
321 373
322 /* once it hits 0, we may complete the master_bio */ 374 /* once it hits 0, we may complete the master_bio */
323 atomic_t completion_ref; 375 atomic_t completion_ref;
diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c
index 3ab74619c8eb..0baec7a3fa81 100644
--- a/drivers/block/drbd/drbd_main.c
+++ b/drivers/block/drbd/drbd_main.c
@@ -29,6 +29,7 @@
29#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 29#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
30 30
31#include <linux/module.h> 31#include <linux/module.h>
32#include <linux/jiffies.h>
32#include <linux/drbd.h> 33#include <linux/drbd.h>
33#include <asm/uaccess.h> 34#include <asm/uaccess.h>
34#include <asm/types.h> 35#include <asm/types.h>
@@ -264,7 +265,7 @@ bail:
264 265
265/** 266/**
266 * _tl_restart() - Walks the transfer log, and applies an action to all requests 267 * _tl_restart() - Walks the transfer log, and applies an action to all requests
267 * @device: DRBD device. 268 * @connection: DRBD connection to operate on.
268 * @what: The action/event to perform with all request objects 269 * @what: The action/event to perform with all request objects
269 * 270 *
270 * @what might be one of CONNECTION_LOST_WHILE_PENDING, RESEND, FAIL_FROZEN_DISK_IO, 271 * @what might be one of CONNECTION_LOST_WHILE_PENDING, RESEND, FAIL_FROZEN_DISK_IO,
@@ -2228,7 +2229,7 @@ static void do_retry(struct work_struct *ws)
2228 list_for_each_entry_safe(req, tmp, &writes, tl_requests) { 2229 list_for_each_entry_safe(req, tmp, &writes, tl_requests) {
2229 struct drbd_device *device = req->device; 2230 struct drbd_device *device = req->device;
2230 struct bio *bio = req->master_bio; 2231 struct bio *bio = req->master_bio;
2231 unsigned long start_time = req->start_time; 2232 unsigned long start_jif = req->start_jif;
2232 bool expected; 2233 bool expected;
2233 2234
2234 expected = 2235 expected =
@@ -2263,7 +2264,7 @@ static void do_retry(struct work_struct *ws)
2263 /* We are not just doing generic_make_request(), 2264 /* We are not just doing generic_make_request(),
2264 * as we want to keep the start_time information. */ 2265 * as we want to keep the start_time information. */
2265 inc_ap_bio(device); 2266 inc_ap_bio(device);
2266 __drbd_make_request(device, bio, start_time); 2267 __drbd_make_request(device, bio, start_jif);
2267 } 2268 }
2268} 2269}
2269 2270
diff --git a/drivers/block/drbd/drbd_req.c b/drivers/block/drbd/drbd_req.c
index 3824d5c737e6..1319beab1b37 100644
--- a/drivers/block/drbd/drbd_req.c
+++ b/drivers/block/drbd/drbd_req.c
@@ -52,7 +52,7 @@ static void _drbd_start_io_acct(struct drbd_device *device, struct drbd_request
52static void _drbd_end_io_acct(struct drbd_device *device, struct drbd_request *req) 52static void _drbd_end_io_acct(struct drbd_device *device, struct drbd_request *req)
53{ 53{
54 int rw = bio_data_dir(req->master_bio); 54 int rw = bio_data_dir(req->master_bio);
55 unsigned long duration = jiffies - req->start_time; 55 unsigned long duration = jiffies - req->start_jif;
56 int cpu; 56 int cpu;
57 cpu = part_stat_lock(); 57 cpu = part_stat_lock();
58 part_stat_add(cpu, &device->vdisk->part0, ticks[rw], duration); 58 part_stat_add(cpu, &device->vdisk->part0, ticks[rw], duration);
@@ -66,7 +66,7 @@ static struct drbd_request *drbd_req_new(struct drbd_device *device,
66{ 66{
67 struct drbd_request *req; 67 struct drbd_request *req;
68 68
69 req = mempool_alloc(drbd_request_mempool, GFP_NOIO); 69 req = mempool_alloc(drbd_request_mempool, GFP_NOIO | __GFP_ZERO);
70 if (!req) 70 if (!req)
71 return NULL; 71 return NULL;
72 72
@@ -366,14 +366,18 @@ static void mod_rq_state(struct drbd_request *req, struct bio_and_error *m,
366 atomic_inc(&req->completion_ref); 366 atomic_inc(&req->completion_ref);
367 } 367 }
368 368
369 if (!(s & RQ_NET_QUEUED) && (set & RQ_NET_QUEUED)) 369 if (!(s & RQ_NET_QUEUED) && (set & RQ_NET_QUEUED)) {
370 atomic_inc(&req->completion_ref); 370 atomic_inc(&req->completion_ref);
371 }
371 372
372 if (!(s & RQ_EXP_BARR_ACK) && (set & RQ_EXP_BARR_ACK)) 373 if (!(s & RQ_EXP_BARR_ACK) && (set & RQ_EXP_BARR_ACK))
373 kref_get(&req->kref); /* wait for the DONE */ 374 kref_get(&req->kref); /* wait for the DONE */
374 375
375 if (!(s & RQ_NET_SENT) && (set & RQ_NET_SENT)) 376 if (!(s & RQ_NET_SENT) && (set & RQ_NET_SENT)) {
376 atomic_add(req->i.size >> 9, &device->ap_in_flight); 377 /* potentially already completed in the asender thread */
378 if (!(s & RQ_NET_DONE))
379 atomic_add(req->i.size >> 9, &device->ap_in_flight);
380 }
377 381
378 if (!(s & RQ_COMPLETION_SUSP) && (set & RQ_COMPLETION_SUSP)) 382 if (!(s & RQ_COMPLETION_SUSP) && (set & RQ_COMPLETION_SUSP))
379 atomic_inc(&req->completion_ref); 383 atomic_inc(&req->completion_ref);
@@ -401,15 +405,18 @@ static void mod_rq_state(struct drbd_request *req, struct bio_and_error *m,
401 if ((s & RQ_NET_PENDING) && (clear & RQ_NET_PENDING)) { 405 if ((s & RQ_NET_PENDING) && (clear & RQ_NET_PENDING)) {
402 dec_ap_pending(device); 406 dec_ap_pending(device);
403 ++c_put; 407 ++c_put;
408 req->acked_jif = jiffies;
404 } 409 }
405 410
406 if ((s & RQ_NET_QUEUED) && (clear & RQ_NET_QUEUED)) 411 if ((s & RQ_NET_QUEUED) && (clear & RQ_NET_QUEUED))
407 ++c_put; 412 ++c_put;
408 413
409 if ((s & RQ_EXP_BARR_ACK) && !(s & RQ_NET_DONE) && (set & RQ_NET_DONE)) { 414 if (!(s & RQ_NET_DONE) && (set & RQ_NET_DONE)) {
410 if (req->rq_state & RQ_NET_SENT) 415 if (s & RQ_NET_SENT)
411 atomic_sub(req->i.size >> 9, &device->ap_in_flight); 416 atomic_sub(req->i.size >> 9, &device->ap_in_flight);
412 ++k_put; 417 if (s & RQ_EXP_BARR_ACK)
418 ++k_put;
419 req->net_done_jif = jiffies;
413 } 420 }
414 421
415 /* potentially complete and destroy */ 422 /* potentially complete and destroy */
@@ -449,6 +456,19 @@ static void drbd_report_io_error(struct drbd_device *device, struct drbd_request
449 bdevname(device->ldev->backing_bdev, b)); 456 bdevname(device->ldev->backing_bdev, b));
450} 457}
451 458
459/* Helper for HANDED_OVER_TO_NETWORK.
460 * Is this a protocol A write (neither WRITE_ACK nor RECEIVE_ACK expected)?
461 * Is it also still "PENDING"?
462 * --> If so, clear PENDING and set NET_OK below.
463 * If it is a protocol A write, but not RQ_PENDING anymore, neg-ack was faster
464 * (and we must not set RQ_NET_OK) */
465static inline bool is_pending_write_protocol_A(struct drbd_request *req)
466{
467 return (req->rq_state &
468 (RQ_WRITE|RQ_NET_PENDING|RQ_EXP_WRITE_ACK|RQ_EXP_RECEIVE_ACK))
469 == (RQ_WRITE|RQ_NET_PENDING);
470}
471
452/* obviously this could be coded as many single functions 472/* obviously this could be coded as many single functions
453 * instead of one huge switch, 473 * instead of one huge switch,
454 * or by putting the code directly in the respective locations 474 * or by putting the code directly in the respective locations
@@ -627,18 +647,16 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what,
627 647
628 case HANDED_OVER_TO_NETWORK: 648 case HANDED_OVER_TO_NETWORK:
629 /* assert something? */ 649 /* assert something? */
630 if (bio_data_dir(req->master_bio) == WRITE && 650 if (is_pending_write_protocol_A(req))
631 !(req->rq_state & (RQ_EXP_RECEIVE_ACK | RQ_EXP_WRITE_ACK))) {
632 /* this is what is dangerous about protocol A: 651 /* this is what is dangerous about protocol A:
633 * pretend it was successfully written on the peer. */ 652 * pretend it was successfully written on the peer. */
634 if (req->rq_state & RQ_NET_PENDING) 653 mod_rq_state(req, m, RQ_NET_QUEUED|RQ_NET_PENDING,
635 mod_rq_state(req, m, RQ_NET_PENDING, RQ_NET_OK); 654 RQ_NET_SENT|RQ_NET_OK);
636 /* else: neg-ack was faster... */ 655 else
637 /* it is still not yet RQ_NET_DONE until the 656 mod_rq_state(req, m, RQ_NET_QUEUED, RQ_NET_SENT);
638 * corresponding epoch barrier got acked as well, 657 /* It is still not yet RQ_NET_DONE until the
639 * so we know what to dirty on connection loss */ 658 * corresponding epoch barrier got acked as well,
640 } 659 * so we know what to dirty on connection loss. */
641 mod_rq_state(req, m, RQ_NET_QUEUED, RQ_NET_SENT);
642 break; 660 break;
643 661
644 case OOS_HANDED_TO_NETWORK: 662 case OOS_HANDED_TO_NETWORK:
@@ -1037,6 +1055,7 @@ drbd_submit_req_private_bio(struct drbd_request *req)
1037 * stable storage, and this is a WRITE, we may not even submit 1055 * stable storage, and this is a WRITE, we may not even submit
1038 * this bio. */ 1056 * this bio. */
1039 if (get_ldev(device)) { 1057 if (get_ldev(device)) {
1058 req->pre_submit_jif = jiffies;
1040 if (drbd_insert_fault(device, 1059 if (drbd_insert_fault(device,
1041 rw == WRITE ? DRBD_FAULT_DT_WR 1060 rw == WRITE ? DRBD_FAULT_DT_WR
1042 : rw == READ ? DRBD_FAULT_DT_RD 1061 : rw == READ ? DRBD_FAULT_DT_RD
@@ -1063,7 +1082,7 @@ static void drbd_queue_write(struct drbd_device *device, struct drbd_request *re
1063 * Returns ERR_PTR(-ENOMEM) if we cannot allocate a drbd_request. 1082 * Returns ERR_PTR(-ENOMEM) if we cannot allocate a drbd_request.
1064 */ 1083 */
1065static struct drbd_request * 1084static struct drbd_request *
1066drbd_request_prepare(struct drbd_device *device, struct bio *bio, unsigned long start_time) 1085drbd_request_prepare(struct drbd_device *device, struct bio *bio, unsigned long start_jif)
1067{ 1086{
1068 const int rw = bio_data_dir(bio); 1087 const int rw = bio_data_dir(bio);
1069 struct drbd_request *req; 1088 struct drbd_request *req;
@@ -1078,7 +1097,7 @@ drbd_request_prepare(struct drbd_device *device, struct bio *bio, unsigned long
1078 bio_endio(bio, -ENOMEM); 1097 bio_endio(bio, -ENOMEM);
1079 return ERR_PTR(-ENOMEM); 1098 return ERR_PTR(-ENOMEM);
1080 } 1099 }
1081 req->start_time = start_time; 1100 req->start_jif = start_jif;
1082 1101
1083 if (!get_ldev(device)) { 1102 if (!get_ldev(device)) {
1084 bio_put(req->private_bio); 1103 bio_put(req->private_bio);
@@ -1095,6 +1114,7 @@ drbd_request_prepare(struct drbd_device *device, struct bio *bio, unsigned long
1095 return NULL; 1114 return NULL;
1096 } 1115 }
1097 req->rq_state |= RQ_IN_ACT_LOG; 1116 req->rq_state |= RQ_IN_ACT_LOG;
1117 req->in_actlog_jif = jiffies;
1098 } 1118 }
1099 1119
1100 return req; 1120 return req;
@@ -1197,9 +1217,9 @@ out:
1197 complete_master_bio(device, &m); 1217 complete_master_bio(device, &m);
1198} 1218}
1199 1219
1200void __drbd_make_request(struct drbd_device *device, struct bio *bio, unsigned long start_time) 1220void __drbd_make_request(struct drbd_device *device, struct bio *bio, unsigned long start_jif)
1201{ 1221{
1202 struct drbd_request *req = drbd_request_prepare(device, bio, start_time); 1222 struct drbd_request *req = drbd_request_prepare(device, bio, start_jif);
1203 if (IS_ERR_OR_NULL(req)) 1223 if (IS_ERR_OR_NULL(req))
1204 return; 1224 return;
1205 drbd_send_and_submit(device, req); 1225 drbd_send_and_submit(device, req);
@@ -1218,6 +1238,7 @@ static void submit_fast_path(struct drbd_device *device, struct list_head *incom
1218 continue; 1238 continue;
1219 1239
1220 req->rq_state |= RQ_IN_ACT_LOG; 1240 req->rq_state |= RQ_IN_ACT_LOG;
1241 req->in_actlog_jif = jiffies;
1221 } 1242 }
1222 1243
1223 list_del_init(&req->tl_requests); 1244 list_del_init(&req->tl_requests);
@@ -1240,7 +1261,6 @@ static bool prepare_al_transaction_nonblock(struct drbd_device *device,
1240 wake = 1; 1261 wake = 1;
1241 if (err) 1262 if (err)
1242 continue; 1263 continue;
1243 req->rq_state |= RQ_IN_ACT_LOG;
1244 list_move_tail(&req->tl_requests, pending); 1264 list_move_tail(&req->tl_requests, pending);
1245 } 1265 }
1246 spin_unlock_irq(&device->al_lock); 1266 spin_unlock_irq(&device->al_lock);
@@ -1302,6 +1322,8 @@ skip_fast_path:
1302 drbd_al_begin_io_commit(device); 1322 drbd_al_begin_io_commit(device);
1303 1323
1304 list_for_each_entry_safe(req, tmp, &pending, tl_requests) { 1324 list_for_each_entry_safe(req, tmp, &pending, tl_requests) {
1325 req->rq_state |= RQ_IN_ACT_LOG;
1326 req->in_actlog_jif = jiffies;
1305 list_del_init(&req->tl_requests); 1327 list_del_init(&req->tl_requests);
1306 drbd_send_and_submit(device, req); 1328 drbd_send_and_submit(device, req);
1307 } 1329 }
@@ -1311,9 +1333,12 @@ skip_fast_path:
1311 * requests to cold extents. In that case, prepare one request 1333 * requests to cold extents. In that case, prepare one request
1312 * in blocking mode. */ 1334 * in blocking mode. */
1313 list_for_each_entry_safe(req, tmp, &incoming, tl_requests) { 1335 list_for_each_entry_safe(req, tmp, &incoming, tl_requests) {
1336 bool was_cold;
1314 list_del_init(&req->tl_requests); 1337 list_del_init(&req->tl_requests);
1315 req->rq_state |= RQ_IN_ACT_LOG; 1338 was_cold = drbd_al_begin_io_prepare(device, &req->i);
1316 if (!drbd_al_begin_io_prepare(device, &req->i)) { 1339 if (!was_cold) {
1340 req->rq_state |= RQ_IN_ACT_LOG;
1341 req->in_actlog_jif = jiffies;
1317 /* Corresponding extent was hot after all? */ 1342 /* Corresponding extent was hot after all? */
1318 drbd_send_and_submit(device, req); 1343 drbd_send_and_submit(device, req);
1319 } else { 1344 } else {
@@ -1330,9 +1355,9 @@ skip_fast_path:
1330void drbd_make_request(struct request_queue *q, struct bio *bio) 1355void drbd_make_request(struct request_queue *q, struct bio *bio)
1331{ 1356{
1332 struct drbd_device *device = (struct drbd_device *) q->queuedata; 1357 struct drbd_device *device = (struct drbd_device *) q->queuedata;
1333 unsigned long start_time; 1358 unsigned long start_jif;
1334 1359
1335 start_time = jiffies; 1360 start_jif = jiffies;
1336 1361
1337 /* 1362 /*
1338 * what we "blindly" assume: 1363 * what we "blindly" assume:
@@ -1340,7 +1365,7 @@ void drbd_make_request(struct request_queue *q, struct bio *bio)
1340 D_ASSERT(device, IS_ALIGNED(bio->bi_iter.bi_size, 512)); 1365 D_ASSERT(device, IS_ALIGNED(bio->bi_iter.bi_size, 512));
1341 1366
1342 inc_ap_bio(device); 1367 inc_ap_bio(device);
1343 __drbd_make_request(device, bio, start_time); 1368 __drbd_make_request(device, bio, start_jif);
1344} 1369}
1345 1370
1346/* This is called by bio_add_page(). 1371/* This is called by bio_add_page().
@@ -1453,13 +1478,13 @@ void request_timer_fn(unsigned long data)
1453 * to expire twice (worst case) to become effective. Good enough. 1478 * to expire twice (worst case) to become effective. Good enough.
1454 */ 1479 */
1455 if (ent && req_peer && 1480 if (ent && req_peer &&
1456 time_after(now, req_peer->start_time + ent) && 1481 time_after(now, req_peer->start_jif + ent) &&
1457 !time_in_range(now, connection->last_reconnect_jif, connection->last_reconnect_jif + ent)) { 1482 !time_in_range(now, connection->last_reconnect_jif, connection->last_reconnect_jif + ent)) {
1458 drbd_warn(device, "Remote failed to finish a request within ko-count * timeout\n"); 1483 drbd_warn(device, "Remote failed to finish a request within ko-count * timeout\n");
1459 _drbd_set_state(_NS(device, conn, C_TIMEOUT), CS_VERBOSE | CS_HARD, NULL); 1484 _drbd_set_state(_NS(device, conn, C_TIMEOUT), CS_VERBOSE | CS_HARD, NULL);
1460 } 1485 }
1461 if (dt && req_disk && 1486 if (dt && req_disk &&
1462 time_after(now, req_disk->start_time + dt) && 1487 time_after(now, req_disk->start_jif + dt) &&
1463 !time_in_range(now, device->last_reattach_jif, device->last_reattach_jif + dt)) { 1488 !time_in_range(now, device->last_reattach_jif, device->last_reattach_jif + dt)) {
1464 drbd_warn(device, "Local backing device failed to meet the disk-timeout\n"); 1489 drbd_warn(device, "Local backing device failed to meet the disk-timeout\n");
1465 __drbd_chk_io_error(device, DRBD_FORCE_DETACH); 1490 __drbd_chk_io_error(device, DRBD_FORCE_DETACH);
@@ -1467,10 +1492,10 @@ void request_timer_fn(unsigned long data)
1467 1492
1468 /* Reschedule timer for the nearest not already expired timeout. 1493 /* Reschedule timer for the nearest not already expired timeout.
1469 * Fallback to now + min(effective network timeout, disk timeout). */ 1494 * Fallback to now + min(effective network timeout, disk timeout). */
1470 ent = (ent && req_peer && time_before(now, req_peer->start_time + ent)) 1495 ent = (ent && req_peer && time_before(now, req_peer->start_jif + ent))
1471 ? req_peer->start_time + ent : now + et; 1496 ? req_peer->start_jif + ent : now + et;
1472 dt = (dt && req_disk && time_before(now, req_disk->start_time + dt)) 1497 dt = (dt && req_disk && time_before(now, req_disk->start_jif + dt))
1473 ? req_disk->start_time + dt : now + et; 1498 ? req_disk->start_jif + dt : now + et;
1474 nt = time_before(ent, dt) ? ent : dt; 1499 nt = time_before(ent, dt) ? ent : dt;
1475 spin_unlock_irq(&connection->resource->req_lock); 1500 spin_unlock_irq(&connection->resource->req_lock);
1476 mod_timer(&device->request_timer, nt); 1501 mod_timer(&device->request_timer, nt);
diff --git a/drivers/block/drbd/drbd_worker.c b/drivers/block/drbd/drbd_worker.c
index 3978d9ec6f00..0ff8f4637741 100644
--- a/drivers/block/drbd/drbd_worker.c
+++ b/drivers/block/drbd/drbd_worker.c
@@ -1368,6 +1368,7 @@ int w_send_out_of_sync(struct drbd_work *w, int cancel)
1368 req_mod(req, SEND_CANCELED); 1368 req_mod(req, SEND_CANCELED);
1369 return 0; 1369 return 0;
1370 } 1370 }
1371 req->pre_send_jif = jiffies;
1371 1372
1372 /* this time, no connection->send.current_epoch_writes++; 1373 /* this time, no connection->send.current_epoch_writes++;
1373 * If it was sent, it was the closing barrier for the last 1374 * If it was sent, it was the closing barrier for the last
@@ -1398,6 +1399,7 @@ int w_send_dblock(struct drbd_work *w, int cancel)
1398 req_mod(req, SEND_CANCELED); 1399 req_mod(req, SEND_CANCELED);
1399 return 0; 1400 return 0;
1400 } 1401 }
1402 req->pre_send_jif = jiffies;
1401 1403
1402 re_init_if_first_write(connection, req->epoch); 1404 re_init_if_first_write(connection, req->epoch);
1403 maybe_send_barrier(connection, req->epoch); 1405 maybe_send_barrier(connection, req->epoch);
@@ -1426,6 +1428,7 @@ int w_send_read_req(struct drbd_work *w, int cancel)
1426 req_mod(req, SEND_CANCELED); 1428 req_mod(req, SEND_CANCELED);
1427 return 0; 1429 return 0;
1428 } 1430 }
1431 req->pre_send_jif = jiffies;
1429 1432
1430 /* Even read requests may close a write epoch, 1433 /* Even read requests may close a write epoch,
1431 * if there was any yet. */ 1434 * if there was any yet. */