aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/block/drbd/drbd_worker.c
diff options
context:
space:
mode:
authorAndreas Gruenbacher <agruen@linbit.com>2011-04-06 10:16:56 -0400
committerPhilipp Reisner <philipp.reisner@linbit.com>2012-11-08 10:45:12 -0500
commit3967deb192e147328e1a6085a443ea6afef54dbb (patch)
tree622bea7b53f44f0d86fa1ea4d5a8e18aba85b352 /drivers/block/drbd/drbd_worker.c
parent0db55363cb1e6cfe2bedecb7e47c05f8992c612e (diff)
drbd: Rename drbd_free_ee() and variants to *_peer_req()
Signed-off-by: Philipp Reisner <philipp.reisner@linbit.com> Signed-off-by: Lars Ellenberg <lars.ellenberg@linbit.com>
Diffstat (limited to 'drivers/block/drbd/drbd_worker.c')
-rw-r--r--drivers/block/drbd/drbd_worker.c22
1 files changed, 11 insertions, 11 deletions
diff --git a/drivers/block/drbd/drbd_worker.c b/drivers/block/drbd/drbd_worker.c
index 396f0d019eaf..befbb56443b8 100644
--- a/drivers/block/drbd/drbd_worker.c
+++ b/drivers/block/drbd/drbd_worker.c
@@ -319,7 +319,7 @@ static int w_e_send_csum(struct drbd_work *w, int cancel)
319 * some distributed deadlock, if the other side blocks on 319 * some distributed deadlock, if the other side blocks on
320 * congestion as well, because our receiver blocks in 320 * congestion as well, because our receiver blocks in
321 * drbd_pp_alloc due to pp_in_use > max_buffers. */ 321 * drbd_pp_alloc due to pp_in_use > max_buffers. */
322 drbd_free_ee(mdev, peer_req); 322 drbd_free_peer_req(mdev, peer_req);
323 peer_req = NULL; 323 peer_req = NULL;
324 inc_rs_pending(mdev); 324 inc_rs_pending(mdev);
325 err = drbd_send_drequest_csum(mdev, sector, size, 325 err = drbd_send_drequest_csum(mdev, sector, size,
@@ -333,7 +333,7 @@ static int w_e_send_csum(struct drbd_work *w, int cancel)
333 333
334out: 334out:
335 if (peer_req) 335 if (peer_req)
336 drbd_free_ee(mdev, peer_req); 336 drbd_free_peer_req(mdev, peer_req);
337 337
338 if (unlikely(err)) 338 if (unlikely(err))
339 dev_err(DEV, "drbd_send_drequest(..., csum) failed\n"); 339 dev_err(DEV, "drbd_send_drequest(..., csum) failed\n");
@@ -376,7 +376,7 @@ static int read_for_csum(struct drbd_conf *mdev, sector_t sector, int size)
376 list_del(&peer_req->w.list); 376 list_del(&peer_req->w.list);
377 spin_unlock_irq(&mdev->tconn->req_lock); 377 spin_unlock_irq(&mdev->tconn->req_lock);
378 378
379 drbd_free_ee(mdev, peer_req); 379 drbd_free_peer_req(mdev, peer_req);
380defer: 380defer:
381 put_ldev(mdev); 381 put_ldev(mdev);
382 return -EAGAIN; 382 return -EAGAIN;
@@ -900,7 +900,7 @@ static void move_to_net_ee_or_free(struct drbd_conf *mdev, struct drbd_peer_requ
900 spin_unlock_irq(&mdev->tconn->req_lock); 900 spin_unlock_irq(&mdev->tconn->req_lock);
901 wake_up(&drbd_pp_wait); 901 wake_up(&drbd_pp_wait);
902 } else 902 } else
903 drbd_free_ee(mdev, peer_req); 903 drbd_free_peer_req(mdev, peer_req);
904} 904}
905 905
906/** 906/**
@@ -916,7 +916,7 @@ int w_e_end_data_req(struct drbd_work *w, int cancel)
916 int err; 916 int err;
917 917
918 if (unlikely(cancel)) { 918 if (unlikely(cancel)) {
919 drbd_free_ee(mdev, peer_req); 919 drbd_free_peer_req(mdev, peer_req);
920 dec_unacked(mdev); 920 dec_unacked(mdev);
921 return 0; 921 return 0;
922 } 922 }
@@ -953,7 +953,7 @@ int w_e_end_rsdata_req(struct drbd_work *w, int cancel)
953 int err; 953 int err;
954 954
955 if (unlikely(cancel)) { 955 if (unlikely(cancel)) {
956 drbd_free_ee(mdev, peer_req); 956 drbd_free_peer_req(mdev, peer_req);
957 dec_unacked(mdev); 957 dec_unacked(mdev);
958 return 0; 958 return 0;
959 } 959 }
@@ -1005,7 +1005,7 @@ int w_e_end_csum_rs_req(struct drbd_work *w, int cancel)
1005 int err, eq = 0; 1005 int err, eq = 0;
1006 1006
1007 if (unlikely(cancel)) { 1007 if (unlikely(cancel)) {
1008 drbd_free_ee(mdev, peer_req); 1008 drbd_free_peer_req(mdev, peer_req);
1009 dec_unacked(mdev); 1009 dec_unacked(mdev);
1010 return 0; 1010 return 0;
1011 } 1011 }
@@ -1088,7 +1088,7 @@ int w_e_end_ov_req(struct drbd_work *w, int cancel)
1088 * some distributed deadlock, if the other side blocks on 1088 * some distributed deadlock, if the other side blocks on
1089 * congestion as well, because our receiver blocks in 1089 * congestion as well, because our receiver blocks in
1090 * drbd_pp_alloc due to pp_in_use > max_buffers. */ 1090 * drbd_pp_alloc due to pp_in_use > max_buffers. */
1091 drbd_free_ee(mdev, peer_req); 1091 drbd_free_peer_req(mdev, peer_req);
1092 peer_req = NULL; 1092 peer_req = NULL;
1093 inc_rs_pending(mdev); 1093 inc_rs_pending(mdev);
1094 err = drbd_send_drequest_csum(mdev, sector, size, digest, digest_size, P_OV_REPLY); 1094 err = drbd_send_drequest_csum(mdev, sector, size, digest, digest_size, P_OV_REPLY);
@@ -1098,7 +1098,7 @@ int w_e_end_ov_req(struct drbd_work *w, int cancel)
1098 1098
1099out: 1099out:
1100 if (peer_req) 1100 if (peer_req)
1101 drbd_free_ee(mdev, peer_req); 1101 drbd_free_peer_req(mdev, peer_req);
1102 dec_unacked(mdev); 1102 dec_unacked(mdev);
1103 return err; 1103 return err;
1104} 1104}
@@ -1126,7 +1126,7 @@ int w_e_end_ov_reply(struct drbd_work *w, int cancel)
1126 int err, eq = 0; 1126 int err, eq = 0;
1127 1127
1128 if (unlikely(cancel)) { 1128 if (unlikely(cancel)) {
1129 drbd_free_ee(mdev, peer_req); 1129 drbd_free_peer_req(mdev, peer_req);
1130 dec_unacked(mdev); 1130 dec_unacked(mdev);
1131 return 0; 1131 return 0;
1132 } 1132 }
@@ -1157,7 +1157,7 @@ int w_e_end_ov_reply(struct drbd_work *w, int cancel)
1157 * some distributed deadlock, if the other side blocks on 1157 * some distributed deadlock, if the other side blocks on
1158 * congestion as well, because our receiver blocks in 1158 * congestion as well, because our receiver blocks in
1159 * drbd_pp_alloc due to pp_in_use > max_buffers. */ 1159 * drbd_pp_alloc due to pp_in_use > max_buffers. */
1160 drbd_free_ee(mdev, peer_req); 1160 drbd_free_peer_req(mdev, peer_req);
1161 if (!eq) 1161 if (!eq)
1162 drbd_ov_out_of_sync_found(mdev, sector, size); 1162 drbd_ov_out_of_sync_found(mdev, sector, size);
1163 else 1163 else