aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorAndreas Gruenbacher <agruen@kernel.org>2011-09-13 04:39:41 -0400
committerPhilipp Reisner <philipp.reisner@linbit.com>2014-02-17 10:50:47 -0500
commit6780139c0ab96fc9c605bea33db30fc9378016b7 (patch)
tree2a980dad18e5f9c3c390116dffdd241f76783b54
parent81f0ffd2a2fdbd233a8ccebd4ae152da0a1b15f8 (diff)
drbd: Use the right peer device
in w_e_ (peer request) callbacks and in peer request I/O completion handlers Signed-off-by: Andreas Gruenbacher <agruen@linbit.com> Signed-off-by: Philipp Reisner <philipp.reisner@linbit.com>
-rw-r--r--drivers/block/drbd/drbd_worker.c69
1 files changed, 38 insertions, 31 deletions
diff --git a/drivers/block/drbd/drbd_worker.c b/drivers/block/drbd/drbd_worker.c
index be9c4b894988..2c4ce42c3657 100644
--- a/drivers/block/drbd/drbd_worker.c
+++ b/drivers/block/drbd/drbd_worker.c
@@ -100,7 +100,8 @@ void drbd_md_io_complete(struct bio *bio, int error)
100static void drbd_endio_read_sec_final(struct drbd_peer_request *peer_req) __releases(local) 100static void drbd_endio_read_sec_final(struct drbd_peer_request *peer_req) __releases(local)
101{ 101{
102 unsigned long flags = 0; 102 unsigned long flags = 0;
103 struct drbd_device *device = peer_req->peer_device->device; 103 struct drbd_peer_device *peer_device = peer_req->peer_device;
104 struct drbd_device *device = peer_device->device;
104 105
105 spin_lock_irqsave(&device->resource->req_lock, flags); 106 spin_lock_irqsave(&device->resource->req_lock, flags);
106 device->read_cnt += peer_req->i.size >> 9; 107 device->read_cnt += peer_req->i.size >> 9;
@@ -111,8 +112,7 @@ static void drbd_endio_read_sec_final(struct drbd_peer_request *peer_req) __rele
111 __drbd_chk_io_error(device, DRBD_READ_ERROR); 112 __drbd_chk_io_error(device, DRBD_READ_ERROR);
112 spin_unlock_irqrestore(&device->resource->req_lock, flags); 113 spin_unlock_irqrestore(&device->resource->req_lock, flags);
113 114
114 drbd_queue_work(&first_peer_device(device)->connection->sender_work, 115 drbd_queue_work(&peer_device->connection->sender_work, &peer_req->w);
115 &peer_req->w);
116 put_ldev(device); 116 put_ldev(device);
117} 117}
118 118
@@ -121,7 +121,8 @@ static void drbd_endio_read_sec_final(struct drbd_peer_request *peer_req) __rele
121static void drbd_endio_write_sec_final(struct drbd_peer_request *peer_req) __releases(local) 121static void drbd_endio_write_sec_final(struct drbd_peer_request *peer_req) __releases(local)
122{ 122{
123 unsigned long flags = 0; 123 unsigned long flags = 0;
124 struct drbd_device *device = peer_req->peer_device->device; 124 struct drbd_peer_device *peer_device = peer_req->peer_device;
125 struct drbd_device *device = peer_device->device;
125 struct drbd_interval i; 126 struct drbd_interval i;
126 int do_wake; 127 int do_wake;
127 u64 block_id; 128 u64 block_id;
@@ -162,7 +163,7 @@ static void drbd_endio_write_sec_final(struct drbd_peer_request *peer_req) __rel
162 if (do_al_complete_io) 163 if (do_al_complete_io)
163 drbd_al_complete_io(device, &i); 164 drbd_al_complete_io(device, &i);
164 165
165 wake_asender(first_peer_device(device)->connection); 166 wake_asender(peer_device->connection);
166 put_ldev(device); 167 put_ldev(device);
167} 168}
168 169
@@ -334,7 +335,8 @@ void drbd_csum_bio(struct crypto_hash *tfm, struct bio *bio, void *digest)
334static int w_e_send_csum(struct drbd_work *w, int cancel) 335static int w_e_send_csum(struct drbd_work *w, int cancel)
335{ 336{
336 struct drbd_peer_request *peer_req = container_of(w, struct drbd_peer_request, w); 337 struct drbd_peer_request *peer_req = container_of(w, struct drbd_peer_request, w);
337 struct drbd_device *device = peer_req->peer_device->device; 338 struct drbd_peer_device *peer_device = peer_req->peer_device;
339 struct drbd_device *device = peer_device->device;
338 int digest_size; 340 int digest_size;
339 void *digest; 341 void *digest;
340 int err = 0; 342 int err = 0;
@@ -345,12 +347,12 @@ static int w_e_send_csum(struct drbd_work *w, int cancel)
345 if (unlikely((peer_req->flags & EE_WAS_ERROR) != 0)) 347 if (unlikely((peer_req->flags & EE_WAS_ERROR) != 0))
346 goto out; 348 goto out;
347 349
348 digest_size = crypto_hash_digestsize(first_peer_device(device)->connection->csums_tfm); 350 digest_size = crypto_hash_digestsize(peer_device->connection->csums_tfm);
349 digest = kmalloc(digest_size, GFP_NOIO); 351 digest = kmalloc(digest_size, GFP_NOIO);
350 if (digest) { 352 if (digest) {
351 sector_t sector = peer_req->i.sector; 353 sector_t sector = peer_req->i.sector;
352 unsigned int size = peer_req->i.size; 354 unsigned int size = peer_req->i.size;
353 drbd_csum_ee(first_peer_device(device)->connection->csums_tfm, peer_req, digest); 355 drbd_csum_ee(peer_device->connection->csums_tfm, peer_req, digest);
354 /* Free peer_req and pages before send. 356 /* Free peer_req and pages before send.
355 * In case we block on congestion, we could otherwise run into 357 * In case we block on congestion, we could otherwise run into
356 * some distributed deadlock, if the other side blocks on 358 * some distributed deadlock, if the other side blocks on
@@ -359,7 +361,7 @@ static int w_e_send_csum(struct drbd_work *w, int cancel)
359 drbd_free_peer_req(device, peer_req); 361 drbd_free_peer_req(device, peer_req);
360 peer_req = NULL; 362 peer_req = NULL;
361 inc_rs_pending(device); 363 inc_rs_pending(device);
362 err = drbd_send_drequest_csum(first_peer_device(device), sector, size, 364 err = drbd_send_drequest_csum(peer_device, sector, size,
363 digest, digest_size, 365 digest, digest_size,
364 P_CSUM_RS_REQUEST); 366 P_CSUM_RS_REQUEST);
365 kfree(digest); 367 kfree(digest);
@@ -995,7 +997,8 @@ static void move_to_net_ee_or_free(struct drbd_device *device, struct drbd_peer_
995int w_e_end_data_req(struct drbd_work *w, int cancel) 997int w_e_end_data_req(struct drbd_work *w, int cancel)
996{ 998{
997 struct drbd_peer_request *peer_req = container_of(w, struct drbd_peer_request, w); 999 struct drbd_peer_request *peer_req = container_of(w, struct drbd_peer_request, w);
998 struct drbd_device *device = peer_req->peer_device->device; 1000 struct drbd_peer_device *peer_device = peer_req->peer_device;
1001 struct drbd_device *device = peer_device->device;
999 int err; 1002 int err;
1000 1003
1001 if (unlikely(cancel)) { 1004 if (unlikely(cancel)) {
@@ -1005,13 +1008,13 @@ int w_e_end_data_req(struct drbd_work *w, int cancel)
1005 } 1008 }
1006 1009
1007 if (likely((peer_req->flags & EE_WAS_ERROR) == 0)) { 1010 if (likely((peer_req->flags & EE_WAS_ERROR) == 0)) {
1008 err = drbd_send_block(first_peer_device(device), P_DATA_REPLY, peer_req); 1011 err = drbd_send_block(peer_device, P_DATA_REPLY, peer_req);
1009 } else { 1012 } else {
1010 if (__ratelimit(&drbd_ratelimit_state)) 1013 if (__ratelimit(&drbd_ratelimit_state))
1011 drbd_err(device, "Sending NegDReply. sector=%llus.\n", 1014 drbd_err(device, "Sending NegDReply. sector=%llus.\n",
1012 (unsigned long long)peer_req->i.sector); 1015 (unsigned long long)peer_req->i.sector);
1013 1016
1014 err = drbd_send_ack(first_peer_device(device), P_NEG_DREPLY, peer_req); 1017 err = drbd_send_ack(peer_device, P_NEG_DREPLY, peer_req);
1015 } 1018 }
1016 1019
1017 dec_unacked(device); 1020 dec_unacked(device);
@@ -1031,7 +1034,8 @@ int w_e_end_data_req(struct drbd_work *w, int cancel)
1031int w_e_end_rsdata_req(struct drbd_work *w, int cancel) 1034int w_e_end_rsdata_req(struct drbd_work *w, int cancel)
1032{ 1035{
1033 struct drbd_peer_request *peer_req = container_of(w, struct drbd_peer_request, w); 1036 struct drbd_peer_request *peer_req = container_of(w, struct drbd_peer_request, w);
1034 struct drbd_device *device = peer_req->peer_device->device; 1037 struct drbd_peer_device *peer_device = peer_req->peer_device;
1038 struct drbd_device *device = peer_device->device;
1035 int err; 1039 int err;
1036 1040
1037 if (unlikely(cancel)) { 1041 if (unlikely(cancel)) {
@@ -1046,11 +1050,11 @@ int w_e_end_rsdata_req(struct drbd_work *w, int cancel)
1046 } 1050 }
1047 1051
1048 if (device->state.conn == C_AHEAD) { 1052 if (device->state.conn == C_AHEAD) {
1049 err = drbd_send_ack(first_peer_device(device), P_RS_CANCEL, peer_req); 1053 err = drbd_send_ack(peer_device, P_RS_CANCEL, peer_req);
1050 } else if (likely((peer_req->flags & EE_WAS_ERROR) == 0)) { 1054 } else if (likely((peer_req->flags & EE_WAS_ERROR) == 0)) {
1051 if (likely(device->state.pdsk >= D_INCONSISTENT)) { 1055 if (likely(device->state.pdsk >= D_INCONSISTENT)) {
1052 inc_rs_pending(device); 1056 inc_rs_pending(device);
1053 err = drbd_send_block(first_peer_device(device), P_RS_DATA_REPLY, peer_req); 1057 err = drbd_send_block(peer_device, P_RS_DATA_REPLY, peer_req);
1054 } else { 1058 } else {
1055 if (__ratelimit(&drbd_ratelimit_state)) 1059 if (__ratelimit(&drbd_ratelimit_state))
1056 drbd_err(device, "Not sending RSDataReply, " 1060 drbd_err(device, "Not sending RSDataReply, "
@@ -1062,7 +1066,7 @@ int w_e_end_rsdata_req(struct drbd_work *w, int cancel)
1062 drbd_err(device, "Sending NegRSDReply. sector %llus.\n", 1066 drbd_err(device, "Sending NegRSDReply. sector %llus.\n",
1063 (unsigned long long)peer_req->i.sector); 1067 (unsigned long long)peer_req->i.sector);
1064 1068
1065 err = drbd_send_ack(first_peer_device(device), P_NEG_RS_DREPLY, peer_req); 1069 err = drbd_send_ack(peer_device, P_NEG_RS_DREPLY, peer_req);
1066 1070
1067 /* update resync data with failure */ 1071 /* update resync data with failure */
1068 drbd_rs_failed_io(device, peer_req->i.sector, peer_req->i.size); 1072 drbd_rs_failed_io(device, peer_req->i.sector, peer_req->i.size);
@@ -1080,7 +1084,8 @@ int w_e_end_rsdata_req(struct drbd_work *w, int cancel)
1080int w_e_end_csum_rs_req(struct drbd_work *w, int cancel) 1084int w_e_end_csum_rs_req(struct drbd_work *w, int cancel)
1081{ 1085{
1082 struct drbd_peer_request *peer_req = container_of(w, struct drbd_peer_request, w); 1086 struct drbd_peer_request *peer_req = container_of(w, struct drbd_peer_request, w);
1083 struct drbd_device *device = peer_req->peer_device->device; 1087 struct drbd_peer_device *peer_device = peer_req->peer_device;
1088 struct drbd_device *device = peer_device->device;
1084 struct digest_info *di; 1089 struct digest_info *di;
1085 int digest_size; 1090 int digest_size;
1086 void *digest = NULL; 1091 void *digest = NULL;
@@ -1103,13 +1108,13 @@ int w_e_end_csum_rs_req(struct drbd_work *w, int cancel)
1103 /* quick hack to try to avoid a race against reconfiguration. 1108 /* quick hack to try to avoid a race against reconfiguration.
1104 * a real fix would be much more involved, 1109 * a real fix would be much more involved,
1105 * introducing more locking mechanisms */ 1110 * introducing more locking mechanisms */
1106 if (first_peer_device(device)->connection->csums_tfm) { 1111 if (peer_device->connection->csums_tfm) {
1107 digest_size = crypto_hash_digestsize(first_peer_device(device)->connection->csums_tfm); 1112 digest_size = crypto_hash_digestsize(peer_device->connection->csums_tfm);
1108 D_ASSERT(device, digest_size == di->digest_size); 1113 D_ASSERT(device, digest_size == di->digest_size);
1109 digest = kmalloc(digest_size, GFP_NOIO); 1114 digest = kmalloc(digest_size, GFP_NOIO);
1110 } 1115 }
1111 if (digest) { 1116 if (digest) {
1112 drbd_csum_ee(first_peer_device(device)->connection->csums_tfm, peer_req, digest); 1117 drbd_csum_ee(peer_device->connection->csums_tfm, peer_req, digest);
1113 eq = !memcmp(digest, di->digest, digest_size); 1118 eq = !memcmp(digest, di->digest, digest_size);
1114 kfree(digest); 1119 kfree(digest);
1115 } 1120 }
@@ -1118,16 +1123,16 @@ int w_e_end_csum_rs_req(struct drbd_work *w, int cancel)
1118 drbd_set_in_sync(device, peer_req->i.sector, peer_req->i.size); 1123 drbd_set_in_sync(device, peer_req->i.sector, peer_req->i.size);
1119 /* rs_same_csums unit is BM_BLOCK_SIZE */ 1124 /* rs_same_csums unit is BM_BLOCK_SIZE */
1120 device->rs_same_csum += peer_req->i.size >> BM_BLOCK_SHIFT; 1125 device->rs_same_csum += peer_req->i.size >> BM_BLOCK_SHIFT;
1121 err = drbd_send_ack(first_peer_device(device), P_RS_IS_IN_SYNC, peer_req); 1126 err = drbd_send_ack(peer_device, P_RS_IS_IN_SYNC, peer_req);
1122 } else { 1127 } else {
1123 inc_rs_pending(device); 1128 inc_rs_pending(device);
1124 peer_req->block_id = ID_SYNCER; /* By setting block_id, digest pointer becomes invalid! */ 1129 peer_req->block_id = ID_SYNCER; /* By setting block_id, digest pointer becomes invalid! */
1125 peer_req->flags &= ~EE_HAS_DIGEST; /* This peer request no longer has a digest pointer */ 1130 peer_req->flags &= ~EE_HAS_DIGEST; /* This peer request no longer has a digest pointer */
1126 kfree(di); 1131 kfree(di);
1127 err = drbd_send_block(first_peer_device(device), P_RS_DATA_REPLY, peer_req); 1132 err = drbd_send_block(peer_device, P_RS_DATA_REPLY, peer_req);
1128 } 1133 }
1129 } else { 1134 } else {
1130 err = drbd_send_ack(first_peer_device(device), P_NEG_RS_DREPLY, peer_req); 1135 err = drbd_send_ack(peer_device, P_NEG_RS_DREPLY, peer_req);
1131 if (__ratelimit(&drbd_ratelimit_state)) 1136 if (__ratelimit(&drbd_ratelimit_state))
1132 drbd_err(device, "Sending NegDReply. I guess it gets messy.\n"); 1137 drbd_err(device, "Sending NegDReply. I guess it gets messy.\n");
1133 } 1138 }
@@ -1143,7 +1148,8 @@ int w_e_end_csum_rs_req(struct drbd_work *w, int cancel)
1143int w_e_end_ov_req(struct drbd_work *w, int cancel) 1148int w_e_end_ov_req(struct drbd_work *w, int cancel)
1144{ 1149{
1145 struct drbd_peer_request *peer_req = container_of(w, struct drbd_peer_request, w); 1150 struct drbd_peer_request *peer_req = container_of(w, struct drbd_peer_request, w);
1146 struct drbd_device *device = peer_req->peer_device->device; 1151 struct drbd_peer_device *peer_device = peer_req->peer_device;
1152 struct drbd_device *device = peer_device->device;
1147 sector_t sector = peer_req->i.sector; 1153 sector_t sector = peer_req->i.sector;
1148 unsigned int size = peer_req->i.size; 1154 unsigned int size = peer_req->i.size;
1149 int digest_size; 1155 int digest_size;
@@ -1153,7 +1159,7 @@ int w_e_end_ov_req(struct drbd_work *w, int cancel)
1153 if (unlikely(cancel)) 1159 if (unlikely(cancel))
1154 goto out; 1160 goto out;
1155 1161
1156 digest_size = crypto_hash_digestsize(first_peer_device(device)->connection->verify_tfm); 1162 digest_size = crypto_hash_digestsize(peer_device->connection->verify_tfm);
1157 digest = kmalloc(digest_size, GFP_NOIO); 1163 digest = kmalloc(digest_size, GFP_NOIO);
1158 if (!digest) { 1164 if (!digest) {
1159 err = 1; /* terminate the connection in case the allocation failed */ 1165 err = 1; /* terminate the connection in case the allocation failed */
@@ -1161,7 +1167,7 @@ int w_e_end_ov_req(struct drbd_work *w, int cancel)
1161 } 1167 }
1162 1168
1163 if (likely(!(peer_req->flags & EE_WAS_ERROR))) 1169 if (likely(!(peer_req->flags & EE_WAS_ERROR)))
1164 drbd_csum_ee(first_peer_device(device)->connection->verify_tfm, peer_req, digest); 1170 drbd_csum_ee(peer_device->connection->verify_tfm, peer_req, digest);
1165 else 1171 else
1166 memset(digest, 0, digest_size); 1172 memset(digest, 0, digest_size);
1167 1173
@@ -1173,7 +1179,7 @@ int w_e_end_ov_req(struct drbd_work *w, int cancel)
1173 drbd_free_peer_req(device, peer_req); 1179 drbd_free_peer_req(device, peer_req);
1174 peer_req = NULL; 1180 peer_req = NULL;
1175 inc_rs_pending(device); 1181 inc_rs_pending(device);
1176 err = drbd_send_drequest_csum(first_peer_device(device), sector, size, digest, digest_size, P_OV_REPLY); 1182 err = drbd_send_drequest_csum(peer_device, sector, size, digest, digest_size, P_OV_REPLY);
1177 if (err) 1183 if (err)
1178 dec_rs_pending(device); 1184 dec_rs_pending(device);
1179 kfree(digest); 1185 kfree(digest);
@@ -1199,7 +1205,8 @@ void drbd_ov_out_of_sync_found(struct drbd_device *device, sector_t sector, int
1199int w_e_end_ov_reply(struct drbd_work *w, int cancel) 1205int w_e_end_ov_reply(struct drbd_work *w, int cancel)
1200{ 1206{
1201 struct drbd_peer_request *peer_req = container_of(w, struct drbd_peer_request, w); 1207 struct drbd_peer_request *peer_req = container_of(w, struct drbd_peer_request, w);
1202 struct drbd_device *device = peer_req->peer_device->device; 1208 struct drbd_peer_device *peer_device = peer_req->peer_device;
1209 struct drbd_device *device = peer_device->device;
1203 struct digest_info *di; 1210 struct digest_info *di;
1204 void *digest; 1211 void *digest;
1205 sector_t sector = peer_req->i.sector; 1212 sector_t sector = peer_req->i.sector;
@@ -1224,10 +1231,10 @@ int w_e_end_ov_reply(struct drbd_work *w, int cancel)
1224 di = peer_req->digest; 1231 di = peer_req->digest;
1225 1232
1226 if (likely((peer_req->flags & EE_WAS_ERROR) == 0)) { 1233 if (likely((peer_req->flags & EE_WAS_ERROR) == 0)) {
1227 digest_size = crypto_hash_digestsize(first_peer_device(device)->connection->verify_tfm); 1234 digest_size = crypto_hash_digestsize(peer_device->connection->verify_tfm);
1228 digest = kmalloc(digest_size, GFP_NOIO); 1235 digest = kmalloc(digest_size, GFP_NOIO);
1229 if (digest) { 1236 if (digest) {
1230 drbd_csum_ee(first_peer_device(device)->connection->verify_tfm, peer_req, digest); 1237 drbd_csum_ee(peer_device->connection->verify_tfm, peer_req, digest);
1231 1238
1232 D_ASSERT(device, digest_size == di->digest_size); 1239 D_ASSERT(device, digest_size == di->digest_size);
1233 eq = !memcmp(digest, di->digest, digest_size); 1240 eq = !memcmp(digest, di->digest, digest_size);
@@ -1246,7 +1253,7 @@ int w_e_end_ov_reply(struct drbd_work *w, int cancel)
1246 else 1253 else
1247 ov_out_of_sync_print(device); 1254 ov_out_of_sync_print(device);
1248 1255
1249 err = drbd_send_ack_ex(first_peer_device(device), P_OV_RESULT, sector, size, 1256 err = drbd_send_ack_ex(peer_device, P_OV_RESULT, sector, size,
1250 eq ? ID_IN_SYNC : ID_OUT_OF_SYNC); 1257 eq ? ID_IN_SYNC : ID_OUT_OF_SYNC);
1251 1258
1252 dec_unacked(device); 1259 dec_unacked(device);