aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorAndreas Gruenbacher <agruen@linbit.com>2011-02-04 09:57:48 -0500
committerPhilipp Reisner <philipp.reisner@linbit.com>2011-09-28 04:26:42 -0400
commitdb830c464b69e26ea4d371e38bb2320c99c82f41 (patch)
treea19f1e4432d48830d2895f28e2e93566e3aa46cb
parent6c852beca185b18e89ad7783ab15793c0911f86b (diff)
drbd: Local variable renames: e -> peer_req
Signed-off-by: Philipp Reisner <philipp.reisner@linbit.com> Signed-off-by: Lars Ellenberg <lars.ellenberg@linbit.com>
-rw-r--r--drivers/block/drbd/drbd_int.h4
-rw-r--r--drivers/block/drbd/drbd_main.c36
-rw-r--r--drivers/block/drbd/drbd_nl.c18
-rw-r--r--drivers/block/drbd/drbd_receiver.c256
-rw-r--r--drivers/block/drbd/drbd_worker.c197
5 files changed, 259 insertions, 252 deletions
diff --git a/drivers/block/drbd/drbd_int.h b/drivers/block/drbd/drbd_int.h
index c7504579c46e..302ccc6d9432 100644
--- a/drivers/block/drbd/drbd_int.h
+++ b/drivers/block/drbd/drbd_int.h
@@ -1701,9 +1701,9 @@ static inline int drbd_bio_has_active_page(struct bio *bio)
1701 return 0; 1701 return 0;
1702} 1702}
1703 1703
1704static inline int drbd_ee_has_active_page(struct drbd_peer_request *e) 1704static inline int drbd_ee_has_active_page(struct drbd_peer_request *peer_req)
1705{ 1705{
1706 struct page *page = e->pages; 1706 struct page *page = peer_req->pages;
1707 page_chain_for_each(page) { 1707 page_chain_for_each(page) {
1708 if (page_count(page) > 1) 1708 if (page_count(page) > 1)
1709 return 1; 1709 return 1;
diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c
index 3bc900f48f96..7728d1613406 100644
--- a/drivers/block/drbd/drbd_main.c
+++ b/drivers/block/drbd/drbd_main.c
@@ -2429,17 +2429,17 @@ int drbd_send_ack_rp(struct drbd_conf *mdev, enum drbd_packet cmd,
2429 2429
2430/** 2430/**
2431 * drbd_send_ack() - Sends an ack packet 2431 * drbd_send_ack() - Sends an ack packet
2432 * @mdev: DRBD device. 2432 * @mdev: DRBD device
2433 * @cmd: Packet command code. 2433 * @cmd: packet command code
2434 * @e: Epoch entry. 2434 * @peer_req: peer request
2435 */ 2435 */
2436int drbd_send_ack(struct drbd_conf *mdev, enum drbd_packet cmd, 2436int drbd_send_ack(struct drbd_conf *mdev, enum drbd_packet cmd,
2437 struct drbd_peer_request *e) 2437 struct drbd_peer_request *peer_req)
2438{ 2438{
2439 return _drbd_send_ack(mdev, cmd, 2439 return _drbd_send_ack(mdev, cmd,
2440 cpu_to_be64(e->i.sector), 2440 cpu_to_be64(peer_req->i.sector),
2441 cpu_to_be32(e->i.size), 2441 cpu_to_be32(peer_req->i.size),
2442 e->block_id); 2442 peer_req->block_id);
2443} 2443}
2444 2444
2445/* This function misuses the block_id field to signal if the blocks 2445/* This function misuses the block_id field to signal if the blocks
@@ -2641,10 +2641,12 @@ static int _drbd_send_zc_bio(struct drbd_conf *mdev, struct bio *bio)
2641 return 1; 2641 return 1;
2642} 2642}
2643 2643
2644static int _drbd_send_zc_ee(struct drbd_conf *mdev, struct drbd_peer_request *e) 2644static int _drbd_send_zc_ee(struct drbd_conf *mdev,
2645 struct drbd_peer_request *peer_req)
2645{ 2646{
2646 struct page *page = e->pages; 2647 struct page *page = peer_req->pages;
2647 unsigned len = e->i.size; 2648 unsigned len = peer_req->i.size;
2649
2648 /* hint all but last page with MSG_MORE */ 2650 /* hint all but last page with MSG_MORE */
2649 page_chain_for_each(page) { 2651 page_chain_for_each(page) {
2650 unsigned l = min_t(unsigned, len, PAGE_SIZE); 2652 unsigned l = min_t(unsigned, len, PAGE_SIZE);
@@ -2747,7 +2749,7 @@ int drbd_send_dblock(struct drbd_conf *mdev, struct drbd_request *req)
2747 * C_SYNC_SOURCE -> C_SYNC_TARGET (P_RS_DATA_REPLY) 2749 * C_SYNC_SOURCE -> C_SYNC_TARGET (P_RS_DATA_REPLY)
2748 */ 2750 */
2749int drbd_send_block(struct drbd_conf *mdev, enum drbd_packet cmd, 2751int drbd_send_block(struct drbd_conf *mdev, enum drbd_packet cmd,
2750 struct drbd_peer_request *e) 2752 struct drbd_peer_request *peer_req)
2751{ 2753{
2752 int ok; 2754 int ok;
2753 struct p_data p; 2755 struct p_data p;
@@ -2757,9 +2759,11 @@ int drbd_send_block(struct drbd_conf *mdev, enum drbd_packet cmd,
2757 dgs = (mdev->tconn->agreed_pro_version >= 87 && mdev->tconn->integrity_w_tfm) ? 2759 dgs = (mdev->tconn->agreed_pro_version >= 87 && mdev->tconn->integrity_w_tfm) ?
2758 crypto_hash_digestsize(mdev->tconn->integrity_w_tfm) : 0; 2760 crypto_hash_digestsize(mdev->tconn->integrity_w_tfm) : 0;
2759 2761
2760 prepare_header(mdev, &p.head, cmd, sizeof(p) - sizeof(struct p_header80) + dgs + e->i.size); 2762 prepare_header(mdev, &p.head, cmd, sizeof(p) -
2761 p.sector = cpu_to_be64(e->i.sector); 2763 sizeof(struct p_header80) +
2762 p.block_id = e->block_id; 2764 dgs + peer_req->i.size);
2765 p.sector = cpu_to_be64(peer_req->i.sector);
2766 p.block_id = peer_req->block_id;
2763 p.seq_num = 0; /* unused */ 2767 p.seq_num = 0; /* unused */
2764 2768
2765 /* Only called by our kernel thread. 2769 /* Only called by our kernel thread.
@@ -2772,11 +2776,11 @@ int drbd_send_block(struct drbd_conf *mdev, enum drbd_packet cmd,
2772 ok = sizeof(p) == drbd_send(mdev, mdev->tconn->data.socket, &p, sizeof(p), dgs ? MSG_MORE : 0); 2776 ok = sizeof(p) == drbd_send(mdev, mdev->tconn->data.socket, &p, sizeof(p), dgs ? MSG_MORE : 0);
2773 if (ok && dgs) { 2777 if (ok && dgs) {
2774 dgb = mdev->tconn->int_dig_out; 2778 dgb = mdev->tconn->int_dig_out;
2775 drbd_csum_ee(mdev, mdev->tconn->integrity_w_tfm, e, dgb); 2779 drbd_csum_ee(mdev, mdev->tconn->integrity_w_tfm, peer_req, dgb);
2776 ok = dgs == drbd_send(mdev, mdev->tconn->data.socket, dgb, dgs, 0); 2780 ok = dgs == drbd_send(mdev, mdev->tconn->data.socket, dgb, dgs, 0);
2777 } 2781 }
2778 if (ok) 2782 if (ok)
2779 ok = _drbd_send_zc_ee(mdev, e); 2783 ok = _drbd_send_zc_ee(mdev, peer_req);
2780 2784
2781 drbd_put_data_sock(mdev); 2785 drbd_put_data_sock(mdev);
2782 2786
diff --git a/drivers/block/drbd/drbd_nl.c b/drivers/block/drbd/drbd_nl.c
index ee00ffa04653..e30d52ba3fcf 100644
--- a/drivers/block/drbd/drbd_nl.c
+++ b/drivers/block/drbd/drbd_nl.c
@@ -2445,7 +2445,7 @@ void drbd_bcast_ev_helper(struct drbd_conf *mdev, char *helper_name)
2445 2445
2446void drbd_bcast_ee(struct drbd_conf *mdev, const char *reason, const int dgs, 2446void drbd_bcast_ee(struct drbd_conf *mdev, const char *reason, const int dgs,
2447 const char *seen_hash, const char *calc_hash, 2447 const char *seen_hash, const char *calc_hash,
2448 const struct drbd_peer_request *e) 2448 const struct drbd_peer_request *peer_req)
2449{ 2449{
2450 struct cn_msg *cn_reply; 2450 struct cn_msg *cn_reply;
2451 struct drbd_nl_cfg_reply *reply; 2451 struct drbd_nl_cfg_reply *reply;
@@ -2453,7 +2453,7 @@ void drbd_bcast_ee(struct drbd_conf *mdev, const char *reason, const int dgs,
2453 struct page *page; 2453 struct page *page;
2454 unsigned len; 2454 unsigned len;
2455 2455
2456 if (!e) 2456 if (!peer_req)
2457 return; 2457 return;
2458 if (!reason || !reason[0]) 2458 if (!reason || !reason[0])
2459 return; 2459 return;
@@ -2472,8 +2472,10 @@ void drbd_bcast_ee(struct drbd_conf *mdev, const char *reason, const int dgs,
2472 GFP_NOIO); 2472 GFP_NOIO);
2473 2473
2474 if (!cn_reply) { 2474 if (!cn_reply) {
2475 dev_err(DEV, "could not kmalloc buffer for drbd_bcast_ee, sector %llu, size %u\n", 2475 dev_err(DEV, "could not kmalloc buffer for drbd_bcast_ee, "
2476 (unsigned long long)e->i.sector, e->i.size); 2476 "sector %llu, size %u\n",
2477 (unsigned long long)peer_req->i.sector,
2478 peer_req->i.size);
2477 return; 2479 return;
2478 } 2480 }
2479 2481
@@ -2483,15 +2485,15 @@ void drbd_bcast_ee(struct drbd_conf *mdev, const char *reason, const int dgs,
2483 tl = tl_add_str(tl, T_dump_ee_reason, reason); 2485 tl = tl_add_str(tl, T_dump_ee_reason, reason);
2484 tl = tl_add_blob(tl, T_seen_digest, seen_hash, dgs); 2486 tl = tl_add_blob(tl, T_seen_digest, seen_hash, dgs);
2485 tl = tl_add_blob(tl, T_calc_digest, calc_hash, dgs); 2487 tl = tl_add_blob(tl, T_calc_digest, calc_hash, dgs);
2486 tl = tl_add_int(tl, T_ee_sector, &e->i.sector); 2488 tl = tl_add_int(tl, T_ee_sector, &peer_req->i.sector);
2487 tl = tl_add_int(tl, T_ee_block_id, &e->block_id); 2489 tl = tl_add_int(tl, T_ee_block_id, &peer_req->block_id);
2488 2490
2489 /* dump the first 32k */ 2491 /* dump the first 32k */
2490 len = min_t(unsigned, e->i.size, 32 << 10); 2492 len = min_t(unsigned, peer_req->i.size, 32 << 10);
2491 put_unaligned(T_ee_data, tl++); 2493 put_unaligned(T_ee_data, tl++);
2492 put_unaligned(len, tl++); 2494 put_unaligned(len, tl++);
2493 2495
2494 page = e->pages; 2496 page = peer_req->pages;
2495 page_chain_for_each(page) { 2497 page_chain_for_each(page) {
2496 void *d = kmap_atomic(page, KM_USER0); 2498 void *d = kmap_atomic(page, KM_USER0);
2497 unsigned l = min_t(unsigned, len, PAGE_SIZE); 2499 unsigned l = min_t(unsigned, len, PAGE_SIZE);
diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c
index 6ba94febfab1..3a9cd31e094b 100644
--- a/drivers/block/drbd/drbd_receiver.c
+++ b/drivers/block/drbd/drbd_receiver.c
@@ -189,7 +189,7 @@ static struct page *drbd_pp_first_pages_or_try_alloc(struct drbd_conf *mdev, int
189 189
190static void reclaim_net_ee(struct drbd_conf *mdev, struct list_head *to_be_freed) 190static void reclaim_net_ee(struct drbd_conf *mdev, struct list_head *to_be_freed)
191{ 191{
192 struct drbd_peer_request *e; 192 struct drbd_peer_request *peer_req;
193 struct list_head *le, *tle; 193 struct list_head *le, *tle;
194 194
195 /* The EEs are always appended to the end of the list. Since 195 /* The EEs are always appended to the end of the list. Since
@@ -198,8 +198,8 @@ static void reclaim_net_ee(struct drbd_conf *mdev, struct list_head *to_be_freed
198 stop to examine the list... */ 198 stop to examine the list... */
199 199
200 list_for_each_safe(le, tle, &mdev->net_ee) { 200 list_for_each_safe(le, tle, &mdev->net_ee) {
201 e = list_entry(le, struct drbd_peer_request, w.list); 201 peer_req = list_entry(le, struct drbd_peer_request, w.list);
202 if (drbd_ee_has_active_page(e)) 202 if (drbd_ee_has_active_page(peer_req))
203 break; 203 break;
204 list_move(le, to_be_freed); 204 list_move(le, to_be_freed);
205 } 205 }
@@ -208,14 +208,14 @@ static void reclaim_net_ee(struct drbd_conf *mdev, struct list_head *to_be_freed
208static void drbd_kick_lo_and_reclaim_net(struct drbd_conf *mdev) 208static void drbd_kick_lo_and_reclaim_net(struct drbd_conf *mdev)
209{ 209{
210 LIST_HEAD(reclaimed); 210 LIST_HEAD(reclaimed);
211 struct drbd_peer_request *e, *t; 211 struct drbd_peer_request *peer_req, *t;
212 212
213 spin_lock_irq(&mdev->tconn->req_lock); 213 spin_lock_irq(&mdev->tconn->req_lock);
214 reclaim_net_ee(mdev, &reclaimed); 214 reclaim_net_ee(mdev, &reclaimed);
215 spin_unlock_irq(&mdev->tconn->req_lock); 215 spin_unlock_irq(&mdev->tconn->req_lock);
216 216
217 list_for_each_entry_safe(e, t, &reclaimed, w.list) 217 list_for_each_entry_safe(peer_req, t, &reclaimed, w.list)
218 drbd_free_net_ee(mdev, e); 218 drbd_free_net_ee(mdev, peer_req);
219} 219}
220 220
221/** 221/**
@@ -313,15 +313,15 @@ struct drbd_peer_request *
313drbd_alloc_ee(struct drbd_conf *mdev, u64 id, sector_t sector, 313drbd_alloc_ee(struct drbd_conf *mdev, u64 id, sector_t sector,
314 unsigned int data_size, gfp_t gfp_mask) __must_hold(local) 314 unsigned int data_size, gfp_t gfp_mask) __must_hold(local)
315{ 315{
316 struct drbd_peer_request *e; 316 struct drbd_peer_request *peer_req;
317 struct page *page; 317 struct page *page;
318 unsigned nr_pages = (data_size + PAGE_SIZE -1) >> PAGE_SHIFT; 318 unsigned nr_pages = (data_size + PAGE_SIZE -1) >> PAGE_SHIFT;
319 319
320 if (drbd_insert_fault(mdev, DRBD_FAULT_AL_EE)) 320 if (drbd_insert_fault(mdev, DRBD_FAULT_AL_EE))
321 return NULL; 321 return NULL;
322 322
323 e = mempool_alloc(drbd_ee_mempool, gfp_mask & ~__GFP_HIGHMEM); 323 peer_req = mempool_alloc(drbd_ee_mempool, gfp_mask & ~__GFP_HIGHMEM);
324 if (!e) { 324 if (!peer_req) {
325 if (!(gfp_mask & __GFP_NOWARN)) 325 if (!(gfp_mask & __GFP_NOWARN))
326 dev_err(DEV, "alloc_ee: Allocation of an EE failed\n"); 326 dev_err(DEV, "alloc_ee: Allocation of an EE failed\n");
327 return NULL; 327 return NULL;
@@ -331,45 +331,45 @@ drbd_alloc_ee(struct drbd_conf *mdev, u64 id, sector_t sector,
331 if (!page) 331 if (!page)
332 goto fail; 332 goto fail;
333 333
334 drbd_clear_interval(&e->i); 334 drbd_clear_interval(&peer_req->i);
335 e->i.size = data_size; 335 peer_req->i.size = data_size;
336 e->i.sector = sector; 336 peer_req->i.sector = sector;
337 e->i.local = false; 337 peer_req->i.local = false;
338 e->i.waiting = false; 338 peer_req->i.waiting = false;
339 339
340 e->epoch = NULL; 340 peer_req->epoch = NULL;
341 e->mdev = mdev; 341 peer_req->mdev = mdev;
342 e->pages = page; 342 peer_req->pages = page;
343 atomic_set(&e->pending_bios, 0); 343 atomic_set(&peer_req->pending_bios, 0);
344 e->flags = 0; 344 peer_req->flags = 0;
345 /* 345 /*
346 * The block_id is opaque to the receiver. It is not endianness 346 * The block_id is opaque to the receiver. It is not endianness
347 * converted, and sent back to the sender unchanged. 347 * converted, and sent back to the sender unchanged.
348 */ 348 */
349 e->block_id = id; 349 peer_req->block_id = id;
350 350
351 return e; 351 return peer_req;
352 352
353 fail: 353 fail:
354 mempool_free(e, drbd_ee_mempool); 354 mempool_free(peer_req, drbd_ee_mempool);
355 return NULL; 355 return NULL;
356} 356}
357 357
358void drbd_free_some_ee(struct drbd_conf *mdev, struct drbd_peer_request *e, 358void drbd_free_some_ee(struct drbd_conf *mdev, struct drbd_peer_request *peer_req,
359 int is_net) 359 int is_net)
360{ 360{
361 if (e->flags & EE_HAS_DIGEST) 361 if (peer_req->flags & EE_HAS_DIGEST)
362 kfree(e->digest); 362 kfree(peer_req->digest);
363 drbd_pp_free(mdev, e->pages, is_net); 363 drbd_pp_free(mdev, peer_req->pages, is_net);
364 D_ASSERT(atomic_read(&e->pending_bios) == 0); 364 D_ASSERT(atomic_read(&peer_req->pending_bios) == 0);
365 D_ASSERT(drbd_interval_empty(&e->i)); 365 D_ASSERT(drbd_interval_empty(&peer_req->i));
366 mempool_free(e, drbd_ee_mempool); 366 mempool_free(peer_req, drbd_ee_mempool);
367} 367}
368 368
369int drbd_release_ee(struct drbd_conf *mdev, struct list_head *list) 369int drbd_release_ee(struct drbd_conf *mdev, struct list_head *list)
370{ 370{
371 LIST_HEAD(work_list); 371 LIST_HEAD(work_list);
372 struct drbd_peer_request *e, *t; 372 struct drbd_peer_request *peer_req, *t;
373 int count = 0; 373 int count = 0;
374 int is_net = list == &mdev->net_ee; 374 int is_net = list == &mdev->net_ee;
375 375
@@ -377,8 +377,8 @@ int drbd_release_ee(struct drbd_conf *mdev, struct list_head *list)
377 list_splice_init(list, &work_list); 377 list_splice_init(list, &work_list);
378 spin_unlock_irq(&mdev->tconn->req_lock); 378 spin_unlock_irq(&mdev->tconn->req_lock);
379 379
380 list_for_each_entry_safe(e, t, &work_list, w.list) { 380 list_for_each_entry_safe(peer_req, t, &work_list, w.list) {
381 drbd_free_some_ee(mdev, e, is_net); 381 drbd_free_some_ee(mdev, peer_req, is_net);
382 count++; 382 count++;
383 } 383 }
384 return count; 384 return count;
@@ -398,7 +398,7 @@ static int drbd_process_done_ee(struct drbd_conf *mdev)
398{ 398{
399 LIST_HEAD(work_list); 399 LIST_HEAD(work_list);
400 LIST_HEAD(reclaimed); 400 LIST_HEAD(reclaimed);
401 struct drbd_peer_request *e, *t; 401 struct drbd_peer_request *peer_req, *t;
402 int ok = (mdev->state.conn >= C_WF_REPORT_PARAMS); 402 int ok = (mdev->state.conn >= C_WF_REPORT_PARAMS);
403 403
404 spin_lock_irq(&mdev->tconn->req_lock); 404 spin_lock_irq(&mdev->tconn->req_lock);
@@ -406,17 +406,17 @@ static int drbd_process_done_ee(struct drbd_conf *mdev)
406 list_splice_init(&mdev->done_ee, &work_list); 406 list_splice_init(&mdev->done_ee, &work_list);
407 spin_unlock_irq(&mdev->tconn->req_lock); 407 spin_unlock_irq(&mdev->tconn->req_lock);
408 408
409 list_for_each_entry_safe(e, t, &reclaimed, w.list) 409 list_for_each_entry_safe(peer_req, t, &reclaimed, w.list)
410 drbd_free_net_ee(mdev, e); 410 drbd_free_net_ee(mdev, peer_req);
411 411
412 /* possible callbacks here: 412 /* possible callbacks here:
413 * e_end_block, and e_end_resync_block, e_send_discard_ack. 413 * e_end_block, and e_end_resync_block, e_send_discard_ack.
414 * all ignore the last argument. 414 * all ignore the last argument.
415 */ 415 */
416 list_for_each_entry_safe(e, t, &work_list, w.list) { 416 list_for_each_entry_safe(peer_req, t, &work_list, w.list) {
417 /* list_del not necessary, next/prev members not touched */ 417 /* list_del not necessary, next/prev members not touched */
418 ok = e->w.cb(mdev, &e->w, !ok) && ok; 418 ok = peer_req->w.cb(mdev, &peer_req->w, !ok) && ok;
419 drbd_free_ee(mdev, e); 419 drbd_free_ee(mdev, peer_req);
420 } 420 }
421 wake_up(&mdev->ee_wait); 421 wake_up(&mdev->ee_wait);
422 422
@@ -1085,7 +1085,7 @@ void drbd_bump_write_ordering(struct drbd_conf *mdev, enum write_ordering_e wo)
1085/** 1085/**
1086 * drbd_submit_ee() 1086 * drbd_submit_ee()
1087 * @mdev: DRBD device. 1087 * @mdev: DRBD device.
1088 * @e: peer request 1088 * @peer_req: peer request
1089 * @rw: flag field, see bio->bi_rw 1089 * @rw: flag field, see bio->bi_rw
1090 * 1090 *
1091 * May spread the pages to multiple bios, 1091 * May spread the pages to multiple bios,
@@ -1099,14 +1099,14 @@ void drbd_bump_write_ordering(struct drbd_conf *mdev, enum write_ordering_e wo)
1099 * on certain Xen deployments. 1099 * on certain Xen deployments.
1100 */ 1100 */
1101/* TODO allocate from our own bio_set. */ 1101/* TODO allocate from our own bio_set. */
1102int drbd_submit_ee(struct drbd_conf *mdev, struct drbd_peer_request *e, 1102int drbd_submit_ee(struct drbd_conf *mdev, struct drbd_peer_request *peer_req,
1103 const unsigned rw, const int fault_type) 1103 const unsigned rw, const int fault_type)
1104{ 1104{
1105 struct bio *bios = NULL; 1105 struct bio *bios = NULL;
1106 struct bio *bio; 1106 struct bio *bio;
1107 struct page *page = e->pages; 1107 struct page *page = peer_req->pages;
1108 sector_t sector = e->i.sector; 1108 sector_t sector = peer_req->i.sector;
1109 unsigned ds = e->i.size; 1109 unsigned ds = peer_req->i.size;
1110 unsigned n_bios = 0; 1110 unsigned n_bios = 0;
1111 unsigned nr_pages = (ds + PAGE_SIZE -1) >> PAGE_SHIFT; 1111 unsigned nr_pages = (ds + PAGE_SIZE -1) >> PAGE_SHIFT;
1112 int err = -ENOMEM; 1112 int err = -ENOMEM;
@@ -1121,11 +1121,11 @@ next_bio:
1121 dev_err(DEV, "submit_ee: Allocation of a bio failed\n"); 1121 dev_err(DEV, "submit_ee: Allocation of a bio failed\n");
1122 goto fail; 1122 goto fail;
1123 } 1123 }
1124 /* > e->i.sector, unless this is the first bio */ 1124 /* > peer_req->i.sector, unless this is the first bio */
1125 bio->bi_sector = sector; 1125 bio->bi_sector = sector;
1126 bio->bi_bdev = mdev->ldev->backing_bdev; 1126 bio->bi_bdev = mdev->ldev->backing_bdev;
1127 bio->bi_rw = rw; 1127 bio->bi_rw = rw;
1128 bio->bi_private = e; 1128 bio->bi_private = peer_req;
1129 bio->bi_end_io = drbd_endio_sec; 1129 bio->bi_end_io = drbd_endio_sec;
1130 1130
1131 bio->bi_next = bios; 1131 bio->bi_next = bios;
@@ -1155,7 +1155,7 @@ next_bio:
1155 D_ASSERT(page == NULL); 1155 D_ASSERT(page == NULL);
1156 D_ASSERT(ds == 0); 1156 D_ASSERT(ds == 0);
1157 1157
1158 atomic_set(&e->pending_bios, n_bios); 1158 atomic_set(&peer_req->pending_bios, n_bios);
1159 do { 1159 do {
1160 bio = bios; 1160 bio = bios;
1161 bios = bios->bi_next; 1161 bios = bios->bi_next;
@@ -1175,9 +1175,9 @@ fail:
1175} 1175}
1176 1176
1177static void drbd_remove_epoch_entry_interval(struct drbd_conf *mdev, 1177static void drbd_remove_epoch_entry_interval(struct drbd_conf *mdev,
1178 struct drbd_peer_request *e) 1178 struct drbd_peer_request *peer_req)
1179{ 1179{
1180 struct drbd_interval *i = &e->i; 1180 struct drbd_interval *i = &peer_req->i;
1181 1181
1182 drbd_remove_interval(&mdev->write_requests, i); 1182 drbd_remove_interval(&mdev->write_requests, i);
1183 drbd_clear_interval(i); 1183 drbd_clear_interval(i);
@@ -1266,7 +1266,7 @@ read_in_block(struct drbd_conf *mdev, u64 id, sector_t sector,
1266 int data_size) __must_hold(local) 1266 int data_size) __must_hold(local)
1267{ 1267{
1268 const sector_t capacity = drbd_get_capacity(mdev->this_bdev); 1268 const sector_t capacity = drbd_get_capacity(mdev->this_bdev);
1269 struct drbd_peer_request *e; 1269 struct drbd_peer_request *peer_req;
1270 struct page *page; 1270 struct page *page;
1271 int dgs, ds, rr; 1271 int dgs, ds, rr;
1272 void *dig_in = mdev->tconn->int_dig_in; 1272 void *dig_in = mdev->tconn->int_dig_in;
@@ -1309,12 +1309,12 @@ read_in_block(struct drbd_conf *mdev, u64 id, sector_t sector,
1309 /* GFP_NOIO, because we must not cause arbitrary write-out: in a DRBD 1309 /* GFP_NOIO, because we must not cause arbitrary write-out: in a DRBD
1310 * "criss-cross" setup, that might cause write-out on some other DRBD, 1310 * "criss-cross" setup, that might cause write-out on some other DRBD,
1311 * which in turn might block on the other node at this very place. */ 1311 * which in turn might block on the other node at this very place. */
1312 e = drbd_alloc_ee(mdev, id, sector, data_size, GFP_NOIO); 1312 peer_req = drbd_alloc_ee(mdev, id, sector, data_size, GFP_NOIO);
1313 if (!e) 1313 if (!peer_req)
1314 return NULL; 1314 return NULL;
1315 1315
1316 ds = data_size; 1316 ds = data_size;
1317 page = e->pages; 1317 page = peer_req->pages;
1318 page_chain_for_each(page) { 1318 page_chain_for_each(page) {
1319 unsigned len = min_t(int, ds, PAGE_SIZE); 1319 unsigned len = min_t(int, ds, PAGE_SIZE);
1320 data = kmap(page); 1320 data = kmap(page);
@@ -1325,7 +1325,7 @@ read_in_block(struct drbd_conf *mdev, u64 id, sector_t sector,
1325 } 1325 }
1326 kunmap(page); 1326 kunmap(page);
1327 if (rr != len) { 1327 if (rr != len) {
1328 drbd_free_ee(mdev, e); 1328 drbd_free_ee(mdev, peer_req);
1329 if (!signal_pending(current)) 1329 if (!signal_pending(current))
1330 dev_warn(DEV, "short read receiving data: read %d expected %d\n", 1330 dev_warn(DEV, "short read receiving data: read %d expected %d\n",
1331 rr, len); 1331 rr, len);
@@ -1335,18 +1335,18 @@ read_in_block(struct drbd_conf *mdev, u64 id, sector_t sector,
1335 } 1335 }
1336 1336
1337 if (dgs) { 1337 if (dgs) {
1338 drbd_csum_ee(mdev, mdev->tconn->integrity_r_tfm, e, dig_vv); 1338 drbd_csum_ee(mdev, mdev->tconn->integrity_r_tfm, peer_req, dig_vv);
1339 if (memcmp(dig_in, dig_vv, dgs)) { 1339 if (memcmp(dig_in, dig_vv, dgs)) {
1340 dev_err(DEV, "Digest integrity check FAILED: %llus +%u\n", 1340 dev_err(DEV, "Digest integrity check FAILED: %llus +%u\n",
1341 (unsigned long long)sector, data_size); 1341 (unsigned long long)sector, data_size);
1342 drbd_bcast_ee(mdev, "digest failed", 1342 drbd_bcast_ee(mdev, "digest failed",
1343 dgs, dig_in, dig_vv, e); 1343 dgs, dig_in, dig_vv, peer_req);
1344 drbd_free_ee(mdev, e); 1344 drbd_free_ee(mdev, peer_req);
1345 return NULL; 1345 return NULL;
1346 } 1346 }
1347 } 1347 }
1348 mdev->recv_cnt += data_size>>9; 1348 mdev->recv_cnt += data_size>>9;
1349 return e; 1349 return peer_req;
1350} 1350}
1351 1351
1352/* drbd_drain_block() just takes a data block 1352/* drbd_drain_block() just takes a data block
@@ -1445,20 +1445,20 @@ static int recv_dless_read(struct drbd_conf *mdev, struct drbd_request *req,
1445 * drbd_process_done_ee() by asender only */ 1445 * drbd_process_done_ee() by asender only */
1446static int e_end_resync_block(struct drbd_conf *mdev, struct drbd_work *w, int unused) 1446static int e_end_resync_block(struct drbd_conf *mdev, struct drbd_work *w, int unused)
1447{ 1447{
1448 struct drbd_peer_request *e = (struct drbd_peer_request *)w; 1448 struct drbd_peer_request *peer_req = (struct drbd_peer_request *)w;
1449 sector_t sector = e->i.sector; 1449 sector_t sector = peer_req->i.sector;
1450 int ok; 1450 int ok;
1451 1451
1452 D_ASSERT(drbd_interval_empty(&e->i)); 1452 D_ASSERT(drbd_interval_empty(&peer_req->i));
1453 1453
1454 if (likely((e->flags & EE_WAS_ERROR) == 0)) { 1454 if (likely((peer_req->flags & EE_WAS_ERROR) == 0)) {
1455 drbd_set_in_sync(mdev, sector, e->i.size); 1455 drbd_set_in_sync(mdev, sector, peer_req->i.size);
1456 ok = drbd_send_ack(mdev, P_RS_WRITE_ACK, e); 1456 ok = drbd_send_ack(mdev, P_RS_WRITE_ACK, peer_req);
1457 } else { 1457 } else {
1458 /* Record failure to sync */ 1458 /* Record failure to sync */
1459 drbd_rs_failed_io(mdev, sector, e->i.size); 1459 drbd_rs_failed_io(mdev, sector, peer_req->i.size);
1460 1460
1461 ok = drbd_send_ack(mdev, P_NEG_ACK, e); 1461 ok = drbd_send_ack(mdev, P_NEG_ACK, peer_req);
1462 } 1462 }
1463 dec_unacked(mdev); 1463 dec_unacked(mdev);
1464 1464
@@ -1467,10 +1467,10 @@ static int e_end_resync_block(struct drbd_conf *mdev, struct drbd_work *w, int u
1467 1467
1468static int recv_resync_read(struct drbd_conf *mdev, sector_t sector, int data_size) __releases(local) 1468static int recv_resync_read(struct drbd_conf *mdev, sector_t sector, int data_size) __releases(local)
1469{ 1469{
1470 struct drbd_peer_request *e; 1470 struct drbd_peer_request *peer_req;
1471 1471
1472 e = read_in_block(mdev, ID_SYNCER, sector, data_size); 1472 peer_req = read_in_block(mdev, ID_SYNCER, sector, data_size);
1473 if (!e) 1473 if (!peer_req)
1474 goto fail; 1474 goto fail;
1475 1475
1476 dec_rs_pending(mdev); 1476 dec_rs_pending(mdev);
@@ -1479,23 +1479,23 @@ static int recv_resync_read(struct drbd_conf *mdev, sector_t sector, int data_si
1479 /* corresponding dec_unacked() in e_end_resync_block() 1479 /* corresponding dec_unacked() in e_end_resync_block()
1480 * respective _drbd_clear_done_ee */ 1480 * respective _drbd_clear_done_ee */
1481 1481
1482 e->w.cb = e_end_resync_block; 1482 peer_req->w.cb = e_end_resync_block;
1483 1483
1484 spin_lock_irq(&mdev->tconn->req_lock); 1484 spin_lock_irq(&mdev->tconn->req_lock);
1485 list_add(&e->w.list, &mdev->sync_ee); 1485 list_add(&peer_req->w.list, &mdev->sync_ee);
1486 spin_unlock_irq(&mdev->tconn->req_lock); 1486 spin_unlock_irq(&mdev->tconn->req_lock);
1487 1487
1488 atomic_add(data_size >> 9, &mdev->rs_sect_ev); 1488 atomic_add(data_size >> 9, &mdev->rs_sect_ev);
1489 if (drbd_submit_ee(mdev, e, WRITE, DRBD_FAULT_RS_WR) == 0) 1489 if (drbd_submit_ee(mdev, peer_req, WRITE, DRBD_FAULT_RS_WR) == 0)
1490 return true; 1490 return true;
1491 1491
1492 /* don't care for the reason here */ 1492 /* don't care for the reason here */
1493 dev_err(DEV, "submit failed, triggering re-connect\n"); 1493 dev_err(DEV, "submit failed, triggering re-connect\n");
1494 spin_lock_irq(&mdev->tconn->req_lock); 1494 spin_lock_irq(&mdev->tconn->req_lock);
1495 list_del(&e->w.list); 1495 list_del(&peer_req->w.list);
1496 spin_unlock_irq(&mdev->tconn->req_lock); 1496 spin_unlock_irq(&mdev->tconn->req_lock);
1497 1497
1498 drbd_free_ee(mdev, e); 1498 drbd_free_ee(mdev, peer_req);
1499fail: 1499fail:
1500 put_ldev(mdev); 1500 put_ldev(mdev);
1501 return false; 1501 return false;
@@ -1582,21 +1582,21 @@ static int receive_RSDataReply(struct drbd_conf *mdev, enum drbd_packet cmd,
1582 */ 1582 */
1583static int e_end_block(struct drbd_conf *mdev, struct drbd_work *w, int cancel) 1583static int e_end_block(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
1584{ 1584{
1585 struct drbd_peer_request *e = (struct drbd_peer_request *)w; 1585 struct drbd_peer_request *peer_req = (struct drbd_peer_request *)w;
1586 sector_t sector = e->i.sector; 1586 sector_t sector = peer_req->i.sector;
1587 int ok = 1, pcmd; 1587 int ok = 1, pcmd;
1588 1588
1589 if (mdev->tconn->net_conf->wire_protocol == DRBD_PROT_C) { 1589 if (mdev->tconn->net_conf->wire_protocol == DRBD_PROT_C) {
1590 if (likely((e->flags & EE_WAS_ERROR) == 0)) { 1590 if (likely((peer_req->flags & EE_WAS_ERROR) == 0)) {
1591 pcmd = (mdev->state.conn >= C_SYNC_SOURCE && 1591 pcmd = (mdev->state.conn >= C_SYNC_SOURCE &&
1592 mdev->state.conn <= C_PAUSED_SYNC_T && 1592 mdev->state.conn <= C_PAUSED_SYNC_T &&
1593 e->flags & EE_MAY_SET_IN_SYNC) ? 1593 peer_req->flags & EE_MAY_SET_IN_SYNC) ?
1594 P_RS_WRITE_ACK : P_WRITE_ACK; 1594 P_RS_WRITE_ACK : P_WRITE_ACK;
1595 ok &= drbd_send_ack(mdev, pcmd, e); 1595 ok &= drbd_send_ack(mdev, pcmd, peer_req);
1596 if (pcmd == P_RS_WRITE_ACK) 1596 if (pcmd == P_RS_WRITE_ACK)
1597 drbd_set_in_sync(mdev, sector, e->i.size); 1597 drbd_set_in_sync(mdev, sector, peer_req->i.size);
1598 } else { 1598 } else {
1599 ok = drbd_send_ack(mdev, P_NEG_ACK, e); 1599 ok = drbd_send_ack(mdev, P_NEG_ACK, peer_req);
1600 /* we expect it to be marked out of sync anyways... 1600 /* we expect it to be marked out of sync anyways...
1601 * maybe assert this? */ 1601 * maybe assert this? */
1602 } 1602 }
@@ -1606,28 +1606,28 @@ static int e_end_block(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
1606 * P_WRITE_ACK / P_NEG_ACK, to get the sequence number right. */ 1606 * P_WRITE_ACK / P_NEG_ACK, to get the sequence number right. */
1607 if (mdev->tconn->net_conf->two_primaries) { 1607 if (mdev->tconn->net_conf->two_primaries) {
1608 spin_lock_irq(&mdev->tconn->req_lock); 1608 spin_lock_irq(&mdev->tconn->req_lock);
1609 D_ASSERT(!drbd_interval_empty(&e->i)); 1609 D_ASSERT(!drbd_interval_empty(&peer_req->i));
1610 drbd_remove_epoch_entry_interval(mdev, e); 1610 drbd_remove_epoch_entry_interval(mdev, peer_req);
1611 spin_unlock_irq(&mdev->tconn->req_lock); 1611 spin_unlock_irq(&mdev->tconn->req_lock);
1612 } else 1612 } else
1613 D_ASSERT(drbd_interval_empty(&e->i)); 1613 D_ASSERT(drbd_interval_empty(&peer_req->i));
1614 1614
1615 drbd_may_finish_epoch(mdev, e->epoch, EV_PUT + (cancel ? EV_CLEANUP : 0)); 1615 drbd_may_finish_epoch(mdev, peer_req->epoch, EV_PUT + (cancel ? EV_CLEANUP : 0));
1616 1616
1617 return ok; 1617 return ok;
1618} 1618}
1619 1619
1620static int e_send_discard_ack(struct drbd_conf *mdev, struct drbd_work *w, int unused) 1620static int e_send_discard_ack(struct drbd_conf *mdev, struct drbd_work *w, int unused)
1621{ 1621{
1622 struct drbd_peer_request *e = (struct drbd_peer_request *)w; 1622 struct drbd_peer_request *peer_req = (struct drbd_peer_request *)w;
1623 int ok = 1; 1623 int ok = 1;
1624 1624
1625 D_ASSERT(mdev->tconn->net_conf->wire_protocol == DRBD_PROT_C); 1625 D_ASSERT(mdev->tconn->net_conf->wire_protocol == DRBD_PROT_C);
1626 ok = drbd_send_ack(mdev, P_DISCARD_ACK, e); 1626 ok = drbd_send_ack(mdev, P_DISCARD_ACK, peer_req);
1627 1627
1628 spin_lock_irq(&mdev->tconn->req_lock); 1628 spin_lock_irq(&mdev->tconn->req_lock);
1629 D_ASSERT(!drbd_interval_empty(&e->i)); 1629 D_ASSERT(!drbd_interval_empty(&peer_req->i));
1630 drbd_remove_epoch_entry_interval(mdev, e); 1630 drbd_remove_epoch_entry_interval(mdev, peer_req);
1631 spin_unlock_irq(&mdev->tconn->req_lock); 1631 spin_unlock_irq(&mdev->tconn->req_lock);
1632 1632
1633 dec_unacked(mdev); 1633 dec_unacked(mdev);
@@ -1731,7 +1731,7 @@ static int receive_Data(struct drbd_conf *mdev, enum drbd_packet cmd,
1731 unsigned int data_size) 1731 unsigned int data_size)
1732{ 1732{
1733 sector_t sector; 1733 sector_t sector;
1734 struct drbd_peer_request *e; 1734 struct drbd_peer_request *peer_req;
1735 struct p_data *p = &mdev->tconn->data.rbuf.data; 1735 struct p_data *p = &mdev->tconn->data.rbuf.data;
1736 int rw = WRITE; 1736 int rw = WRITE;
1737 u32 dp_flags; 1737 u32 dp_flags;
@@ -1753,24 +1753,24 @@ static int receive_Data(struct drbd_conf *mdev, enum drbd_packet cmd,
1753 * the end of this function. */ 1753 * the end of this function. */
1754 1754
1755 sector = be64_to_cpu(p->sector); 1755 sector = be64_to_cpu(p->sector);
1756 e = read_in_block(mdev, p->block_id, sector, data_size); 1756 peer_req = read_in_block(mdev, p->block_id, sector, data_size);
1757 if (!e) { 1757 if (!peer_req) {
1758 put_ldev(mdev); 1758 put_ldev(mdev);
1759 return false; 1759 return false;
1760 } 1760 }
1761 1761
1762 e->w.cb = e_end_block; 1762 peer_req->w.cb = e_end_block;
1763 1763
1764 dp_flags = be32_to_cpu(p->dp_flags); 1764 dp_flags = be32_to_cpu(p->dp_flags);
1765 rw |= wire_flags_to_bio(mdev, dp_flags); 1765 rw |= wire_flags_to_bio(mdev, dp_flags);
1766 1766
1767 if (dp_flags & DP_MAY_SET_IN_SYNC) 1767 if (dp_flags & DP_MAY_SET_IN_SYNC)
1768 e->flags |= EE_MAY_SET_IN_SYNC; 1768 peer_req->flags |= EE_MAY_SET_IN_SYNC;
1769 1769
1770 spin_lock(&mdev->epoch_lock); 1770 spin_lock(&mdev->epoch_lock);
1771 e->epoch = mdev->current_epoch; 1771 peer_req->epoch = mdev->current_epoch;
1772 atomic_inc(&e->epoch->epoch_size); 1772 atomic_inc(&peer_req->epoch->epoch_size);
1773 atomic_inc(&e->epoch->active); 1773 atomic_inc(&peer_req->epoch->active);
1774 spin_unlock(&mdev->epoch_lock); 1774 spin_unlock(&mdev->epoch_lock);
1775 1775
1776 /* I'm the receiver, I do hold a net_cnt reference. */ 1776 /* I'm the receiver, I do hold a net_cnt reference. */
@@ -1779,7 +1779,7 @@ static int receive_Data(struct drbd_conf *mdev, enum drbd_packet cmd,
1779 } else { 1779 } else {
1780 /* don't get the req_lock yet, 1780 /* don't get the req_lock yet,
1781 * we may sleep in drbd_wait_peer_seq */ 1781 * we may sleep in drbd_wait_peer_seq */
1782 const int size = e->i.size; 1782 const int size = peer_req->i.size;
1783 const int discard = test_bit(DISCARD_CONCURRENT, &mdev->flags); 1783 const int discard = test_bit(DISCARD_CONCURRENT, &mdev->flags);
1784 DEFINE_WAIT(wait); 1784 DEFINE_WAIT(wait);
1785 int first; 1785 int first;
@@ -1856,8 +1856,8 @@ static int receive_Data(struct drbd_conf *mdev, enum drbd_packet cmd,
1856 dev_alert(DEV, "Concurrent write! [DISCARD BY FLAG] sec=%llus\n", 1856 dev_alert(DEV, "Concurrent write! [DISCARD BY FLAG] sec=%llus\n",
1857 (unsigned long long)sector); 1857 (unsigned long long)sector);
1858 inc_unacked(mdev); 1858 inc_unacked(mdev);
1859 e->w.cb = e_send_discard_ack; 1859 peer_req->w.cb = e_send_discard_ack;
1860 list_add_tail(&e->w.list, &mdev->done_ee); 1860 list_add_tail(&peer_req->w.list, &mdev->done_ee);
1861 1861
1862 spin_unlock_irq(&mdev->tconn->req_lock); 1862 spin_unlock_irq(&mdev->tconn->req_lock);
1863 1863
@@ -1894,10 +1894,10 @@ static int receive_Data(struct drbd_conf *mdev, enum drbd_packet cmd,
1894 } 1894 }
1895 finish_wait(&mdev->misc_wait, &wait); 1895 finish_wait(&mdev->misc_wait, &wait);
1896 1896
1897 drbd_insert_interval(&mdev->write_requests, &e->i); 1897 drbd_insert_interval(&mdev->write_requests, &peer_req->i);
1898 } 1898 }
1899 1899
1900 list_add(&e->w.list, &mdev->active_ee); 1900 list_add(&peer_req->w.list, &mdev->active_ee);
1901 spin_unlock_irq(&mdev->tconn->req_lock); 1901 spin_unlock_irq(&mdev->tconn->req_lock);
1902 1902
1903 switch (mdev->tconn->net_conf->wire_protocol) { 1903 switch (mdev->tconn->net_conf->wire_protocol) {
@@ -1909,7 +1909,7 @@ static int receive_Data(struct drbd_conf *mdev, enum drbd_packet cmd,
1909 case DRBD_PROT_B: 1909 case DRBD_PROT_B:
1910 /* I really don't like it that the receiver thread 1910 /* I really don't like it that the receiver thread
1911 * sends on the msock, but anyways */ 1911 * sends on the msock, but anyways */
1912 drbd_send_ack(mdev, P_RECV_ACK, e); 1912 drbd_send_ack(mdev, P_RECV_ACK, peer_req);
1913 break; 1913 break;
1914 case DRBD_PROT_A: 1914 case DRBD_PROT_A:
1915 /* nothing to do */ 1915 /* nothing to do */
@@ -1918,28 +1918,28 @@ static int receive_Data(struct drbd_conf *mdev, enum drbd_packet cmd,
1918 1918
1919 if (mdev->state.pdsk < D_INCONSISTENT) { 1919 if (mdev->state.pdsk < D_INCONSISTENT) {
1920 /* In case we have the only disk of the cluster, */ 1920 /* In case we have the only disk of the cluster, */
1921 drbd_set_out_of_sync(mdev, e->i.sector, e->i.size); 1921 drbd_set_out_of_sync(mdev, peer_req->i.sector, peer_req->i.size);
1922 e->flags |= EE_CALL_AL_COMPLETE_IO; 1922 peer_req->flags |= EE_CALL_AL_COMPLETE_IO;
1923 e->flags &= ~EE_MAY_SET_IN_SYNC; 1923 peer_req->flags &= ~EE_MAY_SET_IN_SYNC;
1924 drbd_al_begin_io(mdev, e->i.sector); 1924 drbd_al_begin_io(mdev, peer_req->i.sector);
1925 } 1925 }
1926 1926
1927 if (drbd_submit_ee(mdev, e, rw, DRBD_FAULT_DT_WR) == 0) 1927 if (drbd_submit_ee(mdev, peer_req, rw, DRBD_FAULT_DT_WR) == 0)
1928 return true; 1928 return true;
1929 1929
1930 /* don't care for the reason here */ 1930 /* don't care for the reason here */
1931 dev_err(DEV, "submit failed, triggering re-connect\n"); 1931 dev_err(DEV, "submit failed, triggering re-connect\n");
1932 spin_lock_irq(&mdev->tconn->req_lock); 1932 spin_lock_irq(&mdev->tconn->req_lock);
1933 list_del(&e->w.list); 1933 list_del(&peer_req->w.list);
1934 drbd_remove_epoch_entry_interval(mdev, e); 1934 drbd_remove_epoch_entry_interval(mdev, peer_req);
1935 spin_unlock_irq(&mdev->tconn->req_lock); 1935 spin_unlock_irq(&mdev->tconn->req_lock);
1936 if (e->flags & EE_CALL_AL_COMPLETE_IO) 1936 if (peer_req->flags & EE_CALL_AL_COMPLETE_IO)
1937 drbd_al_complete_io(mdev, e->i.sector); 1937 drbd_al_complete_io(mdev, peer_req->i.sector);
1938 1938
1939out_interrupted: 1939out_interrupted:
1940 drbd_may_finish_epoch(mdev, e->epoch, EV_PUT + EV_CLEANUP); 1940 drbd_may_finish_epoch(mdev, peer_req->epoch, EV_PUT + EV_CLEANUP);
1941 put_ldev(mdev); 1941 put_ldev(mdev);
1942 drbd_free_ee(mdev, e); 1942 drbd_free_ee(mdev, peer_req);
1943 return false; 1943 return false;
1944} 1944}
1945 1945
@@ -2015,7 +2015,7 @@ static int receive_DataRequest(struct drbd_conf *mdev, enum drbd_packet cmd,
2015{ 2015{
2016 sector_t sector; 2016 sector_t sector;
2017 const sector_t capacity = drbd_get_capacity(mdev->this_bdev); 2017 const sector_t capacity = drbd_get_capacity(mdev->this_bdev);
2018 struct drbd_peer_request *e; 2018 struct drbd_peer_request *peer_req;
2019 struct digest_info *di = NULL; 2019 struct digest_info *di = NULL;
2020 int size, verb; 2020 int size, verb;
2021 unsigned int fault_type; 2021 unsigned int fault_type;
@@ -2066,21 +2066,21 @@ static int receive_DataRequest(struct drbd_conf *mdev, enum drbd_packet cmd,
2066 /* GFP_NOIO, because we must not cause arbitrary write-out: in a DRBD 2066 /* GFP_NOIO, because we must not cause arbitrary write-out: in a DRBD
2067 * "criss-cross" setup, that might cause write-out on some other DRBD, 2067 * "criss-cross" setup, that might cause write-out on some other DRBD,
2068 * which in turn might block on the other node at this very place. */ 2068 * which in turn might block on the other node at this very place. */
2069 e = drbd_alloc_ee(mdev, p->block_id, sector, size, GFP_NOIO); 2069 peer_req = drbd_alloc_ee(mdev, p->block_id, sector, size, GFP_NOIO);
2070 if (!e) { 2070 if (!peer_req) {
2071 put_ldev(mdev); 2071 put_ldev(mdev);
2072 return false; 2072 return false;
2073 } 2073 }
2074 2074
2075 switch (cmd) { 2075 switch (cmd) {
2076 case P_DATA_REQUEST: 2076 case P_DATA_REQUEST:
2077 e->w.cb = w_e_end_data_req; 2077 peer_req->w.cb = w_e_end_data_req;
2078 fault_type = DRBD_FAULT_DT_RD; 2078 fault_type = DRBD_FAULT_DT_RD;
2079 /* application IO, don't drbd_rs_begin_io */ 2079 /* application IO, don't drbd_rs_begin_io */
2080 goto submit; 2080 goto submit;
2081 2081
2082 case P_RS_DATA_REQUEST: 2082 case P_RS_DATA_REQUEST:
2083 e->w.cb = w_e_end_rsdata_req; 2083 peer_req->w.cb = w_e_end_rsdata_req;
2084 fault_type = DRBD_FAULT_RS_RD; 2084 fault_type = DRBD_FAULT_RS_RD;
2085 /* used in the sector offset progress display */ 2085 /* used in the sector offset progress display */
2086 mdev->bm_resync_fo = BM_SECT_TO_BIT(sector); 2086 mdev->bm_resync_fo = BM_SECT_TO_BIT(sector);
@@ -2096,21 +2096,21 @@ static int receive_DataRequest(struct drbd_conf *mdev, enum drbd_packet cmd,
2096 di->digest_size = digest_size; 2096 di->digest_size = digest_size;
2097 di->digest = (((char *)di)+sizeof(struct digest_info)); 2097 di->digest = (((char *)di)+sizeof(struct digest_info));
2098 2098
2099 e->digest = di; 2099 peer_req->digest = di;
2100 e->flags |= EE_HAS_DIGEST; 2100 peer_req->flags |= EE_HAS_DIGEST;
2101 2101
2102 if (drbd_recv(mdev, di->digest, digest_size) != digest_size) 2102 if (drbd_recv(mdev, di->digest, digest_size) != digest_size)
2103 goto out_free_e; 2103 goto out_free_e;
2104 2104
2105 if (cmd == P_CSUM_RS_REQUEST) { 2105 if (cmd == P_CSUM_RS_REQUEST) {
2106 D_ASSERT(mdev->tconn->agreed_pro_version >= 89); 2106 D_ASSERT(mdev->tconn->agreed_pro_version >= 89);
2107 e->w.cb = w_e_end_csum_rs_req; 2107 peer_req->w.cb = w_e_end_csum_rs_req;
2108 /* used in the sector offset progress display */ 2108 /* used in the sector offset progress display */
2109 mdev->bm_resync_fo = BM_SECT_TO_BIT(sector); 2109 mdev->bm_resync_fo = BM_SECT_TO_BIT(sector);
2110 } else if (cmd == P_OV_REPLY) { 2110 } else if (cmd == P_OV_REPLY) {
2111 /* track progress, we may need to throttle */ 2111 /* track progress, we may need to throttle */
2112 atomic_add(size >> 9, &mdev->rs_sect_in); 2112 atomic_add(size >> 9, &mdev->rs_sect_in);
2113 e->w.cb = w_e_end_ov_reply; 2113 peer_req->w.cb = w_e_end_ov_reply;
2114 dec_rs_pending(mdev); 2114 dec_rs_pending(mdev);
2115 /* drbd_rs_begin_io done when we sent this request, 2115 /* drbd_rs_begin_io done when we sent this request,
2116 * but accounting still needs to be done. */ 2116 * but accounting still needs to be done. */
@@ -2134,7 +2134,7 @@ static int receive_DataRequest(struct drbd_conf *mdev, enum drbd_packet cmd,
2134 dev_info(DEV, "Online Verify start sector: %llu\n", 2134 dev_info(DEV, "Online Verify start sector: %llu\n",
2135 (unsigned long long)sector); 2135 (unsigned long long)sector);
2136 } 2136 }
2137 e->w.cb = w_e_end_ov_req; 2137 peer_req->w.cb = w_e_end_ov_req;
2138 fault_type = DRBD_FAULT_RS_RD; 2138 fault_type = DRBD_FAULT_RS_RD;
2139 break; 2139 break;
2140 2140
@@ -2178,22 +2178,22 @@ submit_for_resync:
2178submit: 2178submit:
2179 inc_unacked(mdev); 2179 inc_unacked(mdev);
2180 spin_lock_irq(&mdev->tconn->req_lock); 2180 spin_lock_irq(&mdev->tconn->req_lock);
2181 list_add_tail(&e->w.list, &mdev->read_ee); 2181 list_add_tail(&peer_req->w.list, &mdev->read_ee);
2182 spin_unlock_irq(&mdev->tconn->req_lock); 2182 spin_unlock_irq(&mdev->tconn->req_lock);
2183 2183
2184 if (drbd_submit_ee(mdev, e, READ, fault_type) == 0) 2184 if (drbd_submit_ee(mdev, peer_req, READ, fault_type) == 0)
2185 return true; 2185 return true;
2186 2186
2187 /* don't care for the reason here */ 2187 /* don't care for the reason here */
2188 dev_err(DEV, "submit failed, triggering re-connect\n"); 2188 dev_err(DEV, "submit failed, triggering re-connect\n");
2189 spin_lock_irq(&mdev->tconn->req_lock); 2189 spin_lock_irq(&mdev->tconn->req_lock);
2190 list_del(&e->w.list); 2190 list_del(&peer_req->w.list);
2191 spin_unlock_irq(&mdev->tconn->req_lock); 2191 spin_unlock_irq(&mdev->tconn->req_lock);
2192 /* no drbd_rs_complete_io(), we are dropping the connection anyways */ 2192 /* no drbd_rs_complete_io(), we are dropping the connection anyways */
2193 2193
2194out_free_e: 2194out_free_e:
2195 put_ldev(mdev); 2195 put_ldev(mdev);
2196 drbd_free_ee(mdev, e); 2196 drbd_free_ee(mdev, peer_req);
2197 return false; 2197 return false;
2198} 2198}
2199 2199
diff --git a/drivers/block/drbd/drbd_worker.c b/drivers/block/drbd/drbd_worker.c
index 06628d1504b8..f13d56c2bf05 100644
--- a/drivers/block/drbd/drbd_worker.c
+++ b/drivers/block/drbd/drbd_worker.c
@@ -80,47 +80,47 @@ void drbd_md_io_complete(struct bio *bio, int error)
80/* reads on behalf of the partner, 80/* reads on behalf of the partner,
81 * "submitted" by the receiver 81 * "submitted" by the receiver
82 */ 82 */
83void drbd_endio_read_sec_final(struct drbd_peer_request *e) __releases(local) 83void drbd_endio_read_sec_final(struct drbd_peer_request *peer_req) __releases(local)
84{ 84{
85 unsigned long flags = 0; 85 unsigned long flags = 0;
86 struct drbd_conf *mdev = e->mdev; 86 struct drbd_conf *mdev = peer_req->mdev;
87 87
88 spin_lock_irqsave(&mdev->tconn->req_lock, flags); 88 spin_lock_irqsave(&mdev->tconn->req_lock, flags);
89 mdev->read_cnt += e->i.size >> 9; 89 mdev->read_cnt += peer_req->i.size >> 9;
90 list_del(&e->w.list); 90 list_del(&peer_req->w.list);
91 if (list_empty(&mdev->read_ee)) 91 if (list_empty(&mdev->read_ee))
92 wake_up(&mdev->ee_wait); 92 wake_up(&mdev->ee_wait);
93 if (test_bit(__EE_WAS_ERROR, &e->flags)) 93 if (test_bit(__EE_WAS_ERROR, &peer_req->flags))
94 __drbd_chk_io_error(mdev, false); 94 __drbd_chk_io_error(mdev, false);
95 spin_unlock_irqrestore(&mdev->tconn->req_lock, flags); 95 spin_unlock_irqrestore(&mdev->tconn->req_lock, flags);
96 96
97 drbd_queue_work(&mdev->tconn->data.work, &e->w); 97 drbd_queue_work(&mdev->tconn->data.work, &peer_req->w);
98 put_ldev(mdev); 98 put_ldev(mdev);
99} 99}
100 100
101/* writes on behalf of the partner, or resync writes, 101/* writes on behalf of the partner, or resync writes,
102 * "submitted" by the receiver, final stage. */ 102 * "submitted" by the receiver, final stage. */
103static void drbd_endio_write_sec_final(struct drbd_peer_request *e) __releases(local) 103static void drbd_endio_write_sec_final(struct drbd_peer_request *peer_req) __releases(local)
104{ 104{
105 unsigned long flags = 0; 105 unsigned long flags = 0;
106 struct drbd_conf *mdev = e->mdev; 106 struct drbd_conf *mdev = peer_req->mdev;
107 sector_t e_sector; 107 sector_t e_sector;
108 int do_wake; 108 int do_wake;
109 u64 block_id; 109 u64 block_id;
110 int do_al_complete_io; 110 int do_al_complete_io;
111 111
112 /* after we moved e to done_ee, 112 /* after we moved peer_req to done_ee,
113 * we may no longer access it, 113 * we may no longer access it,
114 * it may be freed/reused already! 114 * it may be freed/reused already!
115 * (as soon as we release the req_lock) */ 115 * (as soon as we release the req_lock) */
116 e_sector = e->i.sector; 116 e_sector = peer_req->i.sector;
117 do_al_complete_io = e->flags & EE_CALL_AL_COMPLETE_IO; 117 do_al_complete_io = peer_req->flags & EE_CALL_AL_COMPLETE_IO;
118 block_id = e->block_id; 118 block_id = peer_req->block_id;
119 119
120 spin_lock_irqsave(&mdev->tconn->req_lock, flags); 120 spin_lock_irqsave(&mdev->tconn->req_lock, flags);
121 mdev->writ_cnt += e->i.size >> 9; 121 mdev->writ_cnt += peer_req->i.size >> 9;
122 list_del(&e->w.list); /* has been on active_ee or sync_ee */ 122 list_del(&peer_req->w.list); /* has been on active_ee or sync_ee */
123 list_add_tail(&e->w.list, &mdev->done_ee); 123 list_add_tail(&peer_req->w.list, &mdev->done_ee);
124 124
125 /* 125 /*
126 * Do not remove from the write_requests tree here: we did not send the 126 * Do not remove from the write_requests tree here: we did not send the
@@ -132,7 +132,7 @@ static void drbd_endio_write_sec_final(struct drbd_peer_request *e) __releases(l
132 132
133 do_wake = list_empty(block_id == ID_SYNCER ? &mdev->sync_ee : &mdev->active_ee); 133 do_wake = list_empty(block_id == ID_SYNCER ? &mdev->sync_ee : &mdev->active_ee);
134 134
135 if (test_bit(__EE_WAS_ERROR, &e->flags)) 135 if (test_bit(__EE_WAS_ERROR, &peer_req->flags))
136 __drbd_chk_io_error(mdev, false); 136 __drbd_chk_io_error(mdev, false);
137 spin_unlock_irqrestore(&mdev->tconn->req_lock, flags); 137 spin_unlock_irqrestore(&mdev->tconn->req_lock, flags);
138 138
@@ -154,20 +154,20 @@ static void drbd_endio_write_sec_final(struct drbd_peer_request *e) __releases(l
154 */ 154 */
155void drbd_endio_sec(struct bio *bio, int error) 155void drbd_endio_sec(struct bio *bio, int error)
156{ 156{
157 struct drbd_peer_request *e = bio->bi_private; 157 struct drbd_peer_request *peer_req = bio->bi_private;
158 struct drbd_conf *mdev = e->mdev; 158 struct drbd_conf *mdev = peer_req->mdev;
159 int uptodate = bio_flagged(bio, BIO_UPTODATE); 159 int uptodate = bio_flagged(bio, BIO_UPTODATE);
160 int is_write = bio_data_dir(bio) == WRITE; 160 int is_write = bio_data_dir(bio) == WRITE;
161 161
162 if (error && __ratelimit(&drbd_ratelimit_state)) 162 if (error && __ratelimit(&drbd_ratelimit_state))
163 dev_warn(DEV, "%s: error=%d s=%llus\n", 163 dev_warn(DEV, "%s: error=%d s=%llus\n",
164 is_write ? "write" : "read", error, 164 is_write ? "write" : "read", error,
165 (unsigned long long)e->i.sector); 165 (unsigned long long)peer_req->i.sector);
166 if (!error && !uptodate) { 166 if (!error && !uptodate) {
167 if (__ratelimit(&drbd_ratelimit_state)) 167 if (__ratelimit(&drbd_ratelimit_state))
168 dev_warn(DEV, "%s: setting error to -EIO s=%llus\n", 168 dev_warn(DEV, "%s: setting error to -EIO s=%llus\n",
169 is_write ? "write" : "read", 169 is_write ? "write" : "read",
170 (unsigned long long)e->i.sector); 170 (unsigned long long)peer_req->i.sector);
171 /* strange behavior of some lower level drivers... 171 /* strange behavior of some lower level drivers...
172 * fail the request by clearing the uptodate flag, 172 * fail the request by clearing the uptodate flag,
173 * but do not return any error?! */ 173 * but do not return any error?! */
@@ -175,14 +175,14 @@ void drbd_endio_sec(struct bio *bio, int error)
175 } 175 }
176 176
177 if (error) 177 if (error)
178 set_bit(__EE_WAS_ERROR, &e->flags); 178 set_bit(__EE_WAS_ERROR, &peer_req->flags);
179 179
180 bio_put(bio); /* no need for the bio anymore */ 180 bio_put(bio); /* no need for the bio anymore */
181 if (atomic_dec_and_test(&e->pending_bios)) { 181 if (atomic_dec_and_test(&peer_req->pending_bios)) {
182 if (is_write) 182 if (is_write)
183 drbd_endio_write_sec_final(e); 183 drbd_endio_write_sec_final(peer_req);
184 else 184 else
185 drbd_endio_read_sec_final(e); 185 drbd_endio_read_sec_final(peer_req);
186 } 186 }
187} 187}
188 188
@@ -248,11 +248,11 @@ int w_read_retry_remote(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
248} 248}
249 249
250void drbd_csum_ee(struct drbd_conf *mdev, struct crypto_hash *tfm, 250void drbd_csum_ee(struct drbd_conf *mdev, struct crypto_hash *tfm,
251 struct drbd_peer_request *e, void *digest) 251 struct drbd_peer_request *peer_req, void *digest)
252{ 252{
253 struct hash_desc desc; 253 struct hash_desc desc;
254 struct scatterlist sg; 254 struct scatterlist sg;
255 struct page *page = e->pages; 255 struct page *page = peer_req->pages;
256 struct page *tmp; 256 struct page *tmp;
257 unsigned len; 257 unsigned len;
258 258
@@ -269,7 +269,7 @@ void drbd_csum_ee(struct drbd_conf *mdev, struct crypto_hash *tfm,
269 page = tmp; 269 page = tmp;
270 } 270 }
271 /* and now the last, possibly only partially used page */ 271 /* and now the last, possibly only partially used page */
272 len = e->i.size & (PAGE_SIZE - 1); 272 len = peer_req->i.size & (PAGE_SIZE - 1);
273 sg_set_page(&sg, page, len ?: PAGE_SIZE, 0); 273 sg_set_page(&sg, page, len ?: PAGE_SIZE, 0);
274 crypto_hash_update(&desc, &sg, sg.length); 274 crypto_hash_update(&desc, &sg, sg.length);
275 crypto_hash_final(&desc, digest); 275 crypto_hash_final(&desc, digest);
@@ -298,7 +298,8 @@ void drbd_csum_bio(struct drbd_conf *mdev, struct crypto_hash *tfm, struct bio *
298/* TODO merge common code with w_e_end_ov_req */ 298/* TODO merge common code with w_e_end_ov_req */
299int w_e_send_csum(struct drbd_conf *mdev, struct drbd_work *w, int cancel) 299int w_e_send_csum(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
300{ 300{
301 struct drbd_peer_request *e = container_of(w, struct drbd_peer_request, w); 301 struct drbd_peer_request *peer_req =
302 container_of(w, struct drbd_peer_request, w);
302 int digest_size; 303 int digest_size;
303 void *digest; 304 void *digest;
304 int ok = 1; 305 int ok = 1;
@@ -306,22 +307,22 @@ int w_e_send_csum(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
306 if (unlikely(cancel)) 307 if (unlikely(cancel))
307 goto out; 308 goto out;
308 309
309 if (likely((e->flags & EE_WAS_ERROR) != 0)) 310 if (likely((peer_req->flags & EE_WAS_ERROR) != 0))
310 goto out; 311 goto out;
311 312
312 digest_size = crypto_hash_digestsize(mdev->csums_tfm); 313 digest_size = crypto_hash_digestsize(mdev->csums_tfm);
313 digest = kmalloc(digest_size, GFP_NOIO); 314 digest = kmalloc(digest_size, GFP_NOIO);
314 if (digest) { 315 if (digest) {
315 sector_t sector = e->i.sector; 316 sector_t sector = peer_req->i.sector;
316 unsigned int size = e->i.size; 317 unsigned int size = peer_req->i.size;
317 drbd_csum_ee(mdev, mdev->csums_tfm, e, digest); 318 drbd_csum_ee(mdev, mdev->csums_tfm, peer_req, digest);
318 /* Free e and pages before send. 319 /* Free e and pages before send.
319 * In case we block on congestion, we could otherwise run into 320 * In case we block on congestion, we could otherwise run into
320 * some distributed deadlock, if the other side blocks on 321 * some distributed deadlock, if the other side blocks on
321 * congestion as well, because our receiver blocks in 322 * congestion as well, because our receiver blocks in
322 * drbd_pp_alloc due to pp_in_use > max_buffers. */ 323 * drbd_pp_alloc due to pp_in_use > max_buffers. */
323 drbd_free_ee(mdev, e); 324 drbd_free_ee(mdev, peer_req);
324 e = NULL; 325 peer_req = NULL;
325 inc_rs_pending(mdev); 326 inc_rs_pending(mdev);
326 ok = drbd_send_drequest_csum(mdev, sector, size, 327 ok = drbd_send_drequest_csum(mdev, sector, size,
327 digest, digest_size, 328 digest, digest_size,
@@ -333,8 +334,8 @@ int w_e_send_csum(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
333 } 334 }
334 335
335out: 336out:
336 if (e) 337 if (peer_req)
337 drbd_free_ee(mdev, e); 338 drbd_free_ee(mdev, peer_req);
338 339
339 if (unlikely(!ok)) 340 if (unlikely(!ok))
340 dev_err(DEV, "drbd_send_drequest(..., csum) failed\n"); 341 dev_err(DEV, "drbd_send_drequest(..., csum) failed\n");
@@ -345,7 +346,7 @@ out:
345 346
346static int read_for_csum(struct drbd_conf *mdev, sector_t sector, int size) 347static int read_for_csum(struct drbd_conf *mdev, sector_t sector, int size)
347{ 348{
348 struct drbd_peer_request *e; 349 struct drbd_peer_request *peer_req;
349 350
350 if (!get_ldev(mdev)) 351 if (!get_ldev(mdev))
351 return -EIO; 352 return -EIO;
@@ -355,17 +356,17 @@ static int read_for_csum(struct drbd_conf *mdev, sector_t sector, int size)
355 356
356 /* GFP_TRY, because if there is no memory available right now, this may 357 /* GFP_TRY, because if there is no memory available right now, this may
357 * be rescheduled for later. It is "only" background resync, after all. */ 358 * be rescheduled for later. It is "only" background resync, after all. */
358 e = drbd_alloc_ee(mdev, ID_SYNCER /* unused */, sector, size, GFP_TRY); 359 peer_req = drbd_alloc_ee(mdev, ID_SYNCER /* unused */, sector, size, GFP_TRY);
359 if (!e) 360 if (!peer_req)
360 goto defer; 361 goto defer;
361 362
362 e->w.cb = w_e_send_csum; 363 peer_req->w.cb = w_e_send_csum;
363 spin_lock_irq(&mdev->tconn->req_lock); 364 spin_lock_irq(&mdev->tconn->req_lock);
364 list_add(&e->w.list, &mdev->read_ee); 365 list_add(&peer_req->w.list, &mdev->read_ee);
365 spin_unlock_irq(&mdev->tconn->req_lock); 366 spin_unlock_irq(&mdev->tconn->req_lock);
366 367
367 atomic_add(size >> 9, &mdev->rs_sect_ev); 368 atomic_add(size >> 9, &mdev->rs_sect_ev);
368 if (drbd_submit_ee(mdev, e, READ, DRBD_FAULT_RS_RD) == 0) 369 if (drbd_submit_ee(mdev, peer_req, READ, DRBD_FAULT_RS_RD) == 0)
369 return 0; 370 return 0;
370 371
371 /* If it failed because of ENOMEM, retry should help. If it failed 372 /* If it failed because of ENOMEM, retry should help. If it failed
@@ -373,10 +374,10 @@ static int read_for_csum(struct drbd_conf *mdev, sector_t sector, int size)
373 * retry may or may not help. 374 * retry may or may not help.
374 * If it does not, you may need to force disconnect. */ 375 * If it does not, you may need to force disconnect. */
375 spin_lock_irq(&mdev->tconn->req_lock); 376 spin_lock_irq(&mdev->tconn->req_lock);
376 list_del(&e->w.list); 377 list_del(&peer_req->w.list);
377 spin_unlock_irq(&mdev->tconn->req_lock); 378 spin_unlock_irq(&mdev->tconn->req_lock);
378 379
379 drbd_free_ee(mdev, e); 380 drbd_free_ee(mdev, peer_req);
380defer: 381defer:
381 put_ldev(mdev); 382 put_ldev(mdev);
382 return -EAGAIN; 383 return -EAGAIN;
@@ -901,19 +902,19 @@ out:
901} 902}
902 903
903/* helper */ 904/* helper */
904static void move_to_net_ee_or_free(struct drbd_conf *mdev, struct drbd_peer_request *e) 905static void move_to_net_ee_or_free(struct drbd_conf *mdev, struct drbd_peer_request *peer_req)
905{ 906{
906 if (drbd_ee_has_active_page(e)) { 907 if (drbd_ee_has_active_page(peer_req)) {
907 /* This might happen if sendpage() has not finished */ 908 /* This might happen if sendpage() has not finished */
908 int i = (e->i.size + PAGE_SIZE -1) >> PAGE_SHIFT; 909 int i = (peer_req->i.size + PAGE_SIZE -1) >> PAGE_SHIFT;
909 atomic_add(i, &mdev->pp_in_use_by_net); 910 atomic_add(i, &mdev->pp_in_use_by_net);
910 atomic_sub(i, &mdev->pp_in_use); 911 atomic_sub(i, &mdev->pp_in_use);
911 spin_lock_irq(&mdev->tconn->req_lock); 912 spin_lock_irq(&mdev->tconn->req_lock);
912 list_add_tail(&e->w.list, &mdev->net_ee); 913 list_add_tail(&peer_req->w.list, &mdev->net_ee);
913 spin_unlock_irq(&mdev->tconn->req_lock); 914 spin_unlock_irq(&mdev->tconn->req_lock);
914 wake_up(&drbd_pp_wait); 915 wake_up(&drbd_pp_wait);
915 } else 916 } else
916 drbd_free_ee(mdev, e); 917 drbd_free_ee(mdev, peer_req);
917} 918}
918 919
919/** 920/**
@@ -924,28 +925,28 @@ static void move_to_net_ee_or_free(struct drbd_conf *mdev, struct drbd_peer_requ
924 */ 925 */
925int w_e_end_data_req(struct drbd_conf *mdev, struct drbd_work *w, int cancel) 926int w_e_end_data_req(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
926{ 927{
927 struct drbd_peer_request *e = container_of(w, struct drbd_peer_request, w); 928 struct drbd_peer_request *peer_req = container_of(w, struct drbd_peer_request, w);
928 int ok; 929 int ok;
929 930
930 if (unlikely(cancel)) { 931 if (unlikely(cancel)) {
931 drbd_free_ee(mdev, e); 932 drbd_free_ee(mdev, peer_req);
932 dec_unacked(mdev); 933 dec_unacked(mdev);
933 return 1; 934 return 1;
934 } 935 }
935 936
936 if (likely((e->flags & EE_WAS_ERROR) == 0)) { 937 if (likely((peer_req->flags & EE_WAS_ERROR) == 0)) {
937 ok = drbd_send_block(mdev, P_DATA_REPLY, e); 938 ok = drbd_send_block(mdev, P_DATA_REPLY, peer_req);
938 } else { 939 } else {
939 if (__ratelimit(&drbd_ratelimit_state)) 940 if (__ratelimit(&drbd_ratelimit_state))
940 dev_err(DEV, "Sending NegDReply. sector=%llus.\n", 941 dev_err(DEV, "Sending NegDReply. sector=%llus.\n",
941 (unsigned long long)e->i.sector); 942 (unsigned long long)peer_req->i.sector);
942 943
943 ok = drbd_send_ack(mdev, P_NEG_DREPLY, e); 944 ok = drbd_send_ack(mdev, P_NEG_DREPLY, peer_req);
944 } 945 }
945 946
946 dec_unacked(mdev); 947 dec_unacked(mdev);
947 948
948 move_to_net_ee_or_free(mdev, e); 949 move_to_net_ee_or_free(mdev, peer_req);
949 950
950 if (unlikely(!ok)) 951 if (unlikely(!ok))
951 dev_err(DEV, "drbd_send_block() failed\n"); 952 dev_err(DEV, "drbd_send_block() failed\n");
@@ -960,26 +961,26 @@ int w_e_end_data_req(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
960 */ 961 */
961int w_e_end_rsdata_req(struct drbd_conf *mdev, struct drbd_work *w, int cancel) 962int w_e_end_rsdata_req(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
962{ 963{
963 struct drbd_peer_request *e = container_of(w, struct drbd_peer_request, w); 964 struct drbd_peer_request *peer_req = container_of(w, struct drbd_peer_request, w);
964 int ok; 965 int ok;
965 966
966 if (unlikely(cancel)) { 967 if (unlikely(cancel)) {
967 drbd_free_ee(mdev, e); 968 drbd_free_ee(mdev, peer_req);
968 dec_unacked(mdev); 969 dec_unacked(mdev);
969 return 1; 970 return 1;
970 } 971 }
971 972
972 if (get_ldev_if_state(mdev, D_FAILED)) { 973 if (get_ldev_if_state(mdev, D_FAILED)) {
973 drbd_rs_complete_io(mdev, e->i.sector); 974 drbd_rs_complete_io(mdev, peer_req->i.sector);
974 put_ldev(mdev); 975 put_ldev(mdev);
975 } 976 }
976 977
977 if (mdev->state.conn == C_AHEAD) { 978 if (mdev->state.conn == C_AHEAD) {
978 ok = drbd_send_ack(mdev, P_RS_CANCEL, e); 979 ok = drbd_send_ack(mdev, P_RS_CANCEL, peer_req);
979 } else if (likely((e->flags & EE_WAS_ERROR) == 0)) { 980 } else if (likely((peer_req->flags & EE_WAS_ERROR) == 0)) {
980 if (likely(mdev->state.pdsk >= D_INCONSISTENT)) { 981 if (likely(mdev->state.pdsk >= D_INCONSISTENT)) {
981 inc_rs_pending(mdev); 982 inc_rs_pending(mdev);
982 ok = drbd_send_block(mdev, P_RS_DATA_REPLY, e); 983 ok = drbd_send_block(mdev, P_RS_DATA_REPLY, peer_req);
983 } else { 984 } else {
984 if (__ratelimit(&drbd_ratelimit_state)) 985 if (__ratelimit(&drbd_ratelimit_state))
985 dev_err(DEV, "Not sending RSDataReply, " 986 dev_err(DEV, "Not sending RSDataReply, "
@@ -989,17 +990,17 @@ int w_e_end_rsdata_req(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
989 } else { 990 } else {
990 if (__ratelimit(&drbd_ratelimit_state)) 991 if (__ratelimit(&drbd_ratelimit_state))
991 dev_err(DEV, "Sending NegRSDReply. sector %llus.\n", 992 dev_err(DEV, "Sending NegRSDReply. sector %llus.\n",
992 (unsigned long long)e->i.sector); 993 (unsigned long long)peer_req->i.sector);
993 994
994 ok = drbd_send_ack(mdev, P_NEG_RS_DREPLY, e); 995 ok = drbd_send_ack(mdev, P_NEG_RS_DREPLY, peer_req);
995 996
996 /* update resync data with failure */ 997 /* update resync data with failure */
997 drbd_rs_failed_io(mdev, e->i.sector, e->i.size); 998 drbd_rs_failed_io(mdev, peer_req->i.sector, peer_req->i.size);
998 } 999 }
999 1000
1000 dec_unacked(mdev); 1001 dec_unacked(mdev);
1001 1002
1002 move_to_net_ee_or_free(mdev, e); 1003 move_to_net_ee_or_free(mdev, peer_req);
1003 1004
1004 if (unlikely(!ok)) 1005 if (unlikely(!ok))
1005 dev_err(DEV, "drbd_send_block() failed\n"); 1006 dev_err(DEV, "drbd_send_block() failed\n");
@@ -1008,26 +1009,26 @@ int w_e_end_rsdata_req(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
1008 1009
1009int w_e_end_csum_rs_req(struct drbd_conf *mdev, struct drbd_work *w, int cancel) 1010int w_e_end_csum_rs_req(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
1010{ 1011{
1011 struct drbd_peer_request *e = container_of(w, struct drbd_peer_request, w); 1012 struct drbd_peer_request *peer_req = container_of(w, struct drbd_peer_request, w);
1012 struct digest_info *di; 1013 struct digest_info *di;
1013 int digest_size; 1014 int digest_size;
1014 void *digest = NULL; 1015 void *digest = NULL;
1015 int ok, eq = 0; 1016 int ok, eq = 0;
1016 1017
1017 if (unlikely(cancel)) { 1018 if (unlikely(cancel)) {
1018 drbd_free_ee(mdev, e); 1019 drbd_free_ee(mdev, peer_req);
1019 dec_unacked(mdev); 1020 dec_unacked(mdev);
1020 return 1; 1021 return 1;
1021 } 1022 }
1022 1023
1023 if (get_ldev(mdev)) { 1024 if (get_ldev(mdev)) {
1024 drbd_rs_complete_io(mdev, e->i.sector); 1025 drbd_rs_complete_io(mdev, peer_req->i.sector);
1025 put_ldev(mdev); 1026 put_ldev(mdev);
1026 } 1027 }
1027 1028
1028 di = e->digest; 1029 di = peer_req->digest;
1029 1030
1030 if (likely((e->flags & EE_WAS_ERROR) == 0)) { 1031 if (likely((peer_req->flags & EE_WAS_ERROR) == 0)) {
1031 /* quick hack to try to avoid a race against reconfiguration. 1032 /* quick hack to try to avoid a race against reconfiguration.
1032 * a real fix would be much more involved, 1033 * a real fix would be much more involved,
1033 * introducing more locking mechanisms */ 1034 * introducing more locking mechanisms */
@@ -1037,31 +1038,31 @@ int w_e_end_csum_rs_req(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
1037 digest = kmalloc(digest_size, GFP_NOIO); 1038 digest = kmalloc(digest_size, GFP_NOIO);
1038 } 1039 }
1039 if (digest) { 1040 if (digest) {
1040 drbd_csum_ee(mdev, mdev->csums_tfm, e, digest); 1041 drbd_csum_ee(mdev, mdev->csums_tfm, peer_req, digest);
1041 eq = !memcmp(digest, di->digest, digest_size); 1042 eq = !memcmp(digest, di->digest, digest_size);
1042 kfree(digest); 1043 kfree(digest);
1043 } 1044 }
1044 1045
1045 if (eq) { 1046 if (eq) {
1046 drbd_set_in_sync(mdev, e->i.sector, e->i.size); 1047 drbd_set_in_sync(mdev, peer_req->i.sector, peer_req->i.size);
1047 /* rs_same_csums unit is BM_BLOCK_SIZE */ 1048 /* rs_same_csums unit is BM_BLOCK_SIZE */
1048 mdev->rs_same_csum += e->i.size >> BM_BLOCK_SHIFT; 1049 mdev->rs_same_csum += peer_req->i.size >> BM_BLOCK_SHIFT;
1049 ok = drbd_send_ack(mdev, P_RS_IS_IN_SYNC, e); 1050 ok = drbd_send_ack(mdev, P_RS_IS_IN_SYNC, peer_req);
1050 } else { 1051 } else {
1051 inc_rs_pending(mdev); 1052 inc_rs_pending(mdev);
1052 e->block_id = ID_SYNCER; /* By setting block_id, digest pointer becomes invalid! */ 1053 peer_req->block_id = ID_SYNCER; /* By setting block_id, digest pointer becomes invalid! */
1053 e->flags &= ~EE_HAS_DIGEST; /* This e no longer has a digest pointer */ 1054 peer_req->flags &= ~EE_HAS_DIGEST; /* This peer request no longer has a digest pointer */
1054 kfree(di); 1055 kfree(di);
1055 ok = drbd_send_block(mdev, P_RS_DATA_REPLY, e); 1056 ok = drbd_send_block(mdev, P_RS_DATA_REPLY, peer_req);
1056 } 1057 }
1057 } else { 1058 } else {
1058 ok = drbd_send_ack(mdev, P_NEG_RS_DREPLY, e); 1059 ok = drbd_send_ack(mdev, P_NEG_RS_DREPLY, peer_req);
1059 if (__ratelimit(&drbd_ratelimit_state)) 1060 if (__ratelimit(&drbd_ratelimit_state))
1060 dev_err(DEV, "Sending NegDReply. I guess it gets messy.\n"); 1061 dev_err(DEV, "Sending NegDReply. I guess it gets messy.\n");
1061 } 1062 }
1062 1063
1063 dec_unacked(mdev); 1064 dec_unacked(mdev);
1064 move_to_net_ee_or_free(mdev, e); 1065 move_to_net_ee_or_free(mdev, peer_req);
1065 1066
1066 if (unlikely(!ok)) 1067 if (unlikely(!ok))
1067 dev_err(DEV, "drbd_send_block/ack() failed\n"); 1068 dev_err(DEV, "drbd_send_block/ack() failed\n");
@@ -1071,9 +1072,9 @@ int w_e_end_csum_rs_req(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
1071/* TODO merge common code with w_e_send_csum */ 1072/* TODO merge common code with w_e_send_csum */
1072int w_e_end_ov_req(struct drbd_conf *mdev, struct drbd_work *w, int cancel) 1073int w_e_end_ov_req(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
1073{ 1074{
1074 struct drbd_peer_request *e = container_of(w, struct drbd_peer_request, w); 1075 struct drbd_peer_request *peer_req = container_of(w, struct drbd_peer_request, w);
1075 sector_t sector = e->i.sector; 1076 sector_t sector = peer_req->i.sector;
1076 unsigned int size = e->i.size; 1077 unsigned int size = peer_req->i.size;
1077 int digest_size; 1078 int digest_size;
1078 void *digest; 1079 void *digest;
1079 int ok = 1; 1080 int ok = 1;
@@ -1088,8 +1089,8 @@ int w_e_end_ov_req(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
1088 goto out; 1089 goto out;
1089 } 1090 }
1090 1091
1091 if (likely(!(e->flags & EE_WAS_ERROR))) 1092 if (likely(!(peer_req->flags & EE_WAS_ERROR)))
1092 drbd_csum_ee(mdev, mdev->verify_tfm, e, digest); 1093 drbd_csum_ee(mdev, mdev->verify_tfm, peer_req, digest);
1093 else 1094 else
1094 memset(digest, 0, digest_size); 1095 memset(digest, 0, digest_size);
1095 1096
@@ -1098,8 +1099,8 @@ int w_e_end_ov_req(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
1098 * some distributed deadlock, if the other side blocks on 1099 * some distributed deadlock, if the other side blocks on
1099 * congestion as well, because our receiver blocks in 1100 * congestion as well, because our receiver blocks in
1100 * drbd_pp_alloc due to pp_in_use > max_buffers. */ 1101 * drbd_pp_alloc due to pp_in_use > max_buffers. */
1101 drbd_free_ee(mdev, e); 1102 drbd_free_ee(mdev, peer_req);
1102 e = NULL; 1103 peer_req = NULL;
1103 inc_rs_pending(mdev); 1104 inc_rs_pending(mdev);
1104 ok = drbd_send_drequest_csum(mdev, sector, size, 1105 ok = drbd_send_drequest_csum(mdev, sector, size,
1105 digest, digest_size, 1106 digest, digest_size,
@@ -1109,8 +1110,8 @@ int w_e_end_ov_req(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
1109 kfree(digest); 1110 kfree(digest);
1110 1111
1111out: 1112out:
1112 if (e) 1113 if (peer_req)
1113 drbd_free_ee(mdev, e); 1114 drbd_free_ee(mdev, peer_req);
1114 dec_unacked(mdev); 1115 dec_unacked(mdev);
1115 return ok; 1116 return ok;
1116} 1117}
@@ -1128,16 +1129,16 @@ void drbd_ov_oos_found(struct drbd_conf *mdev, sector_t sector, int size)
1128 1129
1129int w_e_end_ov_reply(struct drbd_conf *mdev, struct drbd_work *w, int cancel) 1130int w_e_end_ov_reply(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
1130{ 1131{
1131 struct drbd_peer_request *e = container_of(w, struct drbd_peer_request, w); 1132 struct drbd_peer_request *peer_req = container_of(w, struct drbd_peer_request, w);
1132 struct digest_info *di; 1133 struct digest_info *di;
1133 void *digest; 1134 void *digest;
1134 sector_t sector = e->i.sector; 1135 sector_t sector = peer_req->i.sector;
1135 unsigned int size = e->i.size; 1136 unsigned int size = peer_req->i.size;
1136 int digest_size; 1137 int digest_size;
1137 int ok, eq = 0; 1138 int ok, eq = 0;
1138 1139
1139 if (unlikely(cancel)) { 1140 if (unlikely(cancel)) {
1140 drbd_free_ee(mdev, e); 1141 drbd_free_ee(mdev, peer_req);
1141 dec_unacked(mdev); 1142 dec_unacked(mdev);
1142 return 1; 1143 return 1;
1143 } 1144 }
@@ -1145,17 +1146,17 @@ int w_e_end_ov_reply(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
1145 /* after "cancel", because after drbd_disconnect/drbd_rs_cancel_all 1146 /* after "cancel", because after drbd_disconnect/drbd_rs_cancel_all
1146 * the resync lru has been cleaned up already */ 1147 * the resync lru has been cleaned up already */
1147 if (get_ldev(mdev)) { 1148 if (get_ldev(mdev)) {
1148 drbd_rs_complete_io(mdev, e->i.sector); 1149 drbd_rs_complete_io(mdev, peer_req->i.sector);
1149 put_ldev(mdev); 1150 put_ldev(mdev);
1150 } 1151 }
1151 1152
1152 di = e->digest; 1153 di = peer_req->digest;
1153 1154
1154 if (likely((e->flags & EE_WAS_ERROR) == 0)) { 1155 if (likely((peer_req->flags & EE_WAS_ERROR) == 0)) {
1155 digest_size = crypto_hash_digestsize(mdev->verify_tfm); 1156 digest_size = crypto_hash_digestsize(mdev->verify_tfm);
1156 digest = kmalloc(digest_size, GFP_NOIO); 1157 digest = kmalloc(digest_size, GFP_NOIO);
1157 if (digest) { 1158 if (digest) {
1158 drbd_csum_ee(mdev, mdev->verify_tfm, e, digest); 1159 drbd_csum_ee(mdev, mdev->verify_tfm, peer_req, digest);
1159 1160
1160 D_ASSERT(digest_size == di->digest_size); 1161 D_ASSERT(digest_size == di->digest_size);
1161 eq = !memcmp(digest, di->digest, digest_size); 1162 eq = !memcmp(digest, di->digest, digest_size);
@@ -1168,7 +1169,7 @@ int w_e_end_ov_reply(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
1168 * some distributed deadlock, if the other side blocks on 1169 * some distributed deadlock, if the other side blocks on
1169 * congestion as well, because our receiver blocks in 1170 * congestion as well, because our receiver blocks in
1170 * drbd_pp_alloc due to pp_in_use > max_buffers. */ 1171 * drbd_pp_alloc due to pp_in_use > max_buffers. */
1171 drbd_free_ee(mdev, e); 1172 drbd_free_ee(mdev, peer_req);
1172 if (!eq) 1173 if (!eq)
1173 drbd_ov_oos_found(mdev, sector, size); 1174 drbd_ov_oos_found(mdev, sector, size);
1174 else 1175 else