aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/block/drbd
diff options
context:
space:
mode:
authorLars Ellenberg <lars.ellenberg@linbit.com>2012-01-24 11:19:42 -0500
committerPhilipp Reisner <philipp.reisner@linbit.com>2012-11-08 10:58:36 -0500
commita0d856dfaed16efb9600b2a7d147cb6dbc11ff94 (patch)
tree48b11d912c045a0bf7ff2eed3cfd1abe62c9e31f /drivers/block/drbd
parentb406777e6496de346e8ee12fa64e1fe0adc02a78 (diff)
drbd: base completion and destruction of requests on ref counts
cherry-picked and adapted from drbd 9 devel branch The logic for when to get or put a reference is in mod_rq_state(). To not get confused in the freeze/thaw respectively resend/restart paths, or when cleaning up requests waiting for P_BARRIER_ACK, this also introduces additional state flags: RQ_COMPLETION_SUSP, and RQ_EXP_BARR_ACK. Signed-off-by: Philipp Reisner <philipp.reisner@linbit.com> Signed-off-by: Lars Ellenberg <lars.ellenberg@linbit.com>
Diffstat (limited to 'drivers/block/drbd')
-rw-r--r--drivers/block/drbd/drbd_int.h3
-rw-r--r--drivers/block/drbd/drbd_main.c2
-rw-r--r--drivers/block/drbd/drbd_req.c518
-rw-r--r--drivers/block/drbd/drbd_req.h9
4 files changed, 277 insertions, 255 deletions
diff --git a/drivers/block/drbd/drbd_int.h b/drivers/block/drbd/drbd_int.h
index 52ad1bfce85a..8b26a2c954de 100644
--- a/drivers/block/drbd/drbd_int.h
+++ b/drivers/block/drbd/drbd_int.h
@@ -575,13 +575,14 @@ struct drbd_request {
575 575
576 struct list_head tl_requests; /* ring list in the transfer log */ 576 struct list_head tl_requests; /* ring list in the transfer log */
577 struct bio *master_bio; /* master bio pointer */ 577 struct bio *master_bio; /* master bio pointer */
578 unsigned long rq_state; /* see comments above _req_mod() */
579 unsigned long start_time; 578 unsigned long start_time;
580 579
581 /* once it hits 0, we may complete the master_bio */ 580 /* once it hits 0, we may complete the master_bio */
582 atomic_t completion_ref; 581 atomic_t completion_ref;
583 /* once it hits 0, we may destroy this drbd_request object */ 582 /* once it hits 0, we may destroy this drbd_request object */
584 struct kref kref; 583 struct kref kref;
584
585 unsigned rq_state; /* see comments above _req_mod() */
585}; 586};
586 587
587struct drbd_epoch { 588struct drbd_epoch {
diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c
index bedfeeccd513..d07cb31a36ea 100644
--- a/drivers/block/drbd/drbd_main.c
+++ b/drivers/block/drbd/drbd_main.c
@@ -210,7 +210,7 @@ void tl_release(struct drbd_tconn *tconn, unsigned int barrier_nr,
210 /* find latest not yet barrier-acked write request, 210 /* find latest not yet barrier-acked write request,
211 * count writes in its epoch. */ 211 * count writes in its epoch. */
212 list_for_each_entry(r, &tconn->transfer_log, tl_requests) { 212 list_for_each_entry(r, &tconn->transfer_log, tl_requests) {
213 const unsigned long s = r->rq_state; 213 const unsigned s = r->rq_state;
214 if (!req) { 214 if (!req) {
215 if (!(s & RQ_WRITE)) 215 if (!(s & RQ_WRITE))
216 continue; 216 continue;
diff --git a/drivers/block/drbd/drbd_req.c b/drivers/block/drbd/drbd_req.c
index ae894af428c1..329528d9dec7 100644
--- a/drivers/block/drbd/drbd_req.c
+++ b/drivers/block/drbd/drbd_req.c
@@ -85,7 +85,9 @@ static struct drbd_request *drbd_req_new(struct drbd_conf *mdev,
85 INIT_LIST_HEAD(&req->tl_requests); 85 INIT_LIST_HEAD(&req->tl_requests);
86 INIT_LIST_HEAD(&req->w.list); 86 INIT_LIST_HEAD(&req->w.list);
87 87
88 /* one reference to be put by __drbd_make_request */
88 atomic_set(&req->completion_ref, 1); 89 atomic_set(&req->completion_ref, 1);
90 /* one kref as long as completion_ref > 0 */
89 kref_init(&req->kref); 91 kref_init(&req->kref);
90 return req; 92 return req;
91} 93}
@@ -94,7 +96,16 @@ static void drbd_req_destroy(struct kref *kref)
94{ 96{
95 struct drbd_request *req = container_of(kref, struct drbd_request, kref); 97 struct drbd_request *req = container_of(kref, struct drbd_request, kref);
96 struct drbd_conf *mdev = req->w.mdev; 98 struct drbd_conf *mdev = req->w.mdev;
97 const unsigned long s = req->rq_state; 99 const unsigned s = req->rq_state;
100
101 if ((req->master_bio && !(s & RQ_POSTPONED)) ||
102 atomic_read(&req->completion_ref) ||
103 (s & RQ_LOCAL_PENDING) ||
104 ((s & RQ_NET_MASK) && !(s & RQ_NET_DONE))) {
105 dev_err(DEV, "drbd_req_destroy: Logic BUG rq_state = 0x%x, completion_ref = %d\n",
106 s, atomic_read(&req->completion_ref));
107 return;
108 }
98 109
99 /* remove it from the transfer log. 110 /* remove it from the transfer log.
100 * well, only if it had been there in the first 111 * well, only if it had been there in the first
@@ -180,44 +191,6 @@ static void drbd_remove_request_interval(struct rb_root *root,
180 wake_up(&mdev->misc_wait); 191 wake_up(&mdev->misc_wait);
181} 192}
182 193
183static void maybe_wakeup_conflicting_requests(struct drbd_request *req)
184{
185 const unsigned long s = req->rq_state;
186 if (s & RQ_LOCAL_PENDING && !(s & RQ_LOCAL_ABORTED))
187 return;
188 if (req->i.waiting)
189 /* Retry all conflicting peer requests. */
190 wake_up(&req->w.mdev->misc_wait);
191}
192
193static
194void req_may_be_done(struct drbd_request *req)
195{
196 const unsigned long s = req->rq_state;
197
198 /* req->master_bio still present means: Not yet completed.
199 *
200 * Unless this is RQ_POSTPONED, which will cause drbd_req_destroy() to
201 * queue it on the retry workqueue instead of destroying it.
202 */
203 if (req->master_bio && !(s & RQ_POSTPONED))
204 return;
205
206 /* Local still pending, even though master_bio is already completed?
207 * may happen for RQ_LOCAL_ABORTED requests. */
208 if (s & RQ_LOCAL_PENDING)
209 return;
210
211 if ((s & RQ_NET_MASK) == 0 || (s & RQ_NET_DONE)) {
212 /* this is disconnected (local only) operation,
213 * or protocol A, B, or C P_BARRIER_ACK,
214 * or killed from the transfer log due to connection loss. */
215 kref_put(&req->kref, drbd_req_destroy);
216 }
217 /* else: network part and not DONE yet. that is
218 * protocol A, B, or C, barrier ack still pending... */
219}
220
221/* Helper for __req_mod(). 194/* Helper for __req_mod().
222 * Set m->bio to the master bio, if it is fit to be completed, 195 * Set m->bio to the master bio, if it is fit to be completed,
223 * or leave it alone (it is initialized to NULL in __req_mod), 196 * or leave it alone (it is initialized to NULL in __req_mod),
@@ -225,10 +198,12 @@ void req_may_be_done(struct drbd_request *req)
225 * If m->bio is set, the error status to be returned is placed in m->error. 198 * If m->bio is set, the error status to be returned is placed in m->error.
226 */ 199 */
227static 200static
228void req_may_be_completed(struct drbd_request *req, struct bio_and_error *m) 201void drbd_req_complete(struct drbd_request *req, struct bio_and_error *m)
229{ 202{
230 const unsigned long s = req->rq_state; 203 const unsigned s = req->rq_state;
231 struct drbd_conf *mdev = req->w.mdev; 204 struct drbd_conf *mdev = req->w.mdev;
205 int rw;
206 int error, ok;
232 207
233 /* we must not complete the master bio, while it is 208 /* we must not complete the master bio, while it is
234 * still being processed by _drbd_send_zc_bio (drbd_send_dblock) 209 * still being processed by _drbd_send_zc_bio (drbd_send_dblock)
@@ -239,116 +214,208 @@ void req_may_be_completed(struct drbd_request *req, struct bio_and_error *m)
239 * the receiver, 214 * the receiver,
240 * the bio_endio completion callbacks. 215 * the bio_endio completion callbacks.
241 */ 216 */
242 if (s & RQ_LOCAL_PENDING && !(s & RQ_LOCAL_ABORTED)) 217 if ((s & RQ_LOCAL_PENDING && !(s & RQ_LOCAL_ABORTED)) ||
243 return; 218 (s & RQ_NET_QUEUED) || (s & RQ_NET_PENDING) ||
244 if (s & RQ_NET_QUEUED) 219 (s & RQ_COMPLETION_SUSP)) {
220 dev_err(DEV, "drbd_req_complete: Logic BUG rq_state = 0x%x\n", s);
245 return; 221 return;
246 if (s & RQ_NET_PENDING) 222 }
223
224 if (!req->master_bio) {
225 dev_err(DEV, "drbd_req_complete: Logic BUG, master_bio == NULL!\n");
247 return; 226 return;
227 }
248 228
249 /* FIXME 229 rw = bio_rw(req->master_bio);
250 * instead of all the RQ_FLAGS, actually use the completion_ref
251 * to decide if this is ready to be completed. */
252 if (req->master_bio) {
253 int complete = atomic_dec_and_test(&req->completion_ref);
254 D_ASSERT(complete != 0);
255 } else
256 D_ASSERT(atomic_read(&req->completion_ref) == 0);
257 230
258 if (req->master_bio) { 231 /*
259 int rw = bio_rw(req->master_bio); 232 * figure out whether to report success or failure.
233 *
234 * report success when at least one of the operations succeeded.
235 * or, to put the other way,
236 * only report failure, when both operations failed.
237 *
238 * what to do about the failures is handled elsewhere.
239 * what we need to do here is just: complete the master_bio.
240 *
241 * local completion error, if any, has been stored as ERR_PTR
242 * in private_bio within drbd_request_endio.
243 */
244 ok = (s & RQ_LOCAL_OK) || (s & RQ_NET_OK);
245 error = PTR_ERR(req->private_bio);
260 246
261 /* this is DATA_RECEIVED (remote read) 247 /* remove the request from the conflict detection
262 * or protocol C P_WRITE_ACK 248 * respective block_id verification hash */
263 * or protocol B P_RECV_ACK 249 if (!drbd_interval_empty(&req->i)) {
264 * or protocol A "HANDED_OVER_TO_NETWORK" (SendAck) 250 struct rb_root *root;
265 * or canceled or failed,
266 * or killed from the transfer log due to connection loss.
267 */
268 251
269 /* 252 if (rw == WRITE)
270 * figure out whether to report success or failure. 253 root = &mdev->write_requests;
271 * 254 else
272 * report success when at least one of the operations succeeded. 255 root = &mdev->read_requests;
273 * or, to put the other way, 256 drbd_remove_request_interval(root, req);
274 * only report failure, when both operations failed. 257 } else if (!(s & RQ_POSTPONED))
275 * 258 D_ASSERT((s & (RQ_NET_MASK & ~RQ_NET_DONE)) == 0);
276 * what to do about the failures is handled elsewhere. 259
277 * what we need to do here is just: complete the master_bio. 260 /* Before we can signal completion to the upper layers,
278 * 261 * we may need to close the current transfer log epoch.
279 * local completion error, if any, has been stored as ERR_PTR 262 * We are within the request lock, so we can simply compare
280 * in private_bio within drbd_request_endio. 263 * the request epoch number with the current transfer log
281 */ 264 * epoch number. If they match, increase the current_tle_nr,
282 int ok = (s & RQ_LOCAL_OK) || (s & RQ_NET_OK); 265 * and reset the transfer log epoch write_cnt.
283 int error = PTR_ERR(req->private_bio); 266 */
284 267 if (rw == WRITE &&
285 /* remove the request from the conflict detection 268 req->epoch == atomic_read(&mdev->tconn->current_tle_nr))
286 * respective block_id verification hash */ 269 start_new_tl_epoch(mdev->tconn);
287 if (!drbd_interval_empty(&req->i)) {
288 struct rb_root *root;
289
290 if (rw == WRITE)
291 root = &mdev->write_requests;
292 else
293 root = &mdev->read_requests;
294 drbd_remove_request_interval(root, req);
295 } else if (!(s & RQ_POSTPONED))
296 D_ASSERT((s & (RQ_NET_MASK & ~RQ_NET_DONE)) == 0);
297
298 /* Before we can signal completion to the upper layers,
299 * we may need to close the current transfer log epoch.
300 * We are within the request lock, so we can simply compare
301 * the request epoch number with the current transfer log
302 * epoch number. If they match, increase the current_tle_nr,
303 * and reset the transfer log epoch write_cnt.
304 */
305 if (rw == WRITE &&
306 req->epoch == atomic_read(&mdev->tconn->current_tle_nr))
307 start_new_tl_epoch(mdev->tconn);
308 270
309 /* Update disk stats */ 271 /* Update disk stats */
310 _drbd_end_io_acct(mdev, req); 272 _drbd_end_io_acct(mdev, req);
311 273
312 /* If READ failed, 274 /* If READ failed,
313 * have it be pushed back to the retry work queue, 275 * have it be pushed back to the retry work queue,
314 * so it will re-enter __drbd_make_request(), 276 * so it will re-enter __drbd_make_request(),
315 * and be re-assigned to a suitable local or remote path, 277 * and be re-assigned to a suitable local or remote path,
316 * or failed if we do not have access to good data anymore. 278 * or failed if we do not have access to good data anymore.
317 * 279 *
318 * Unless it was failed early by __drbd_make_request(), 280 * Unless it was failed early by __drbd_make_request(),
319 * because no path was available, in which case 281 * because no path was available, in which case
320 * it was not even added to the transfer_log. 282 * it was not even added to the transfer_log.
321 * 283 *
322 * READA may fail, and will not be retried. 284 * READA may fail, and will not be retried.
323 * 285 *
324 * WRITE should have used all available paths already. 286 * WRITE should have used all available paths already.
325 */ 287 */
326 if (!ok && rw == READ && !list_empty(&req->tl_requests)) 288 if (!ok && rw == READ && !list_empty(&req->tl_requests))
327 req->rq_state |= RQ_POSTPONED; 289 req->rq_state |= RQ_POSTPONED;
328 290
329 if (!(req->rq_state & RQ_POSTPONED)) { 291 if (!(req->rq_state & RQ_POSTPONED)) {
330 m->error = ok ? 0 : (error ?: -EIO); 292 m->error = ok ? 0 : (error ?: -EIO);
331 m->bio = req->master_bio; 293 m->bio = req->master_bio;
332 req->master_bio = NULL; 294 req->master_bio = NULL;
333 } else { 295 } else {
334 /* Assert that this will be _req_is_done() 296 /* Assert that this will be drbd_req_destroy()ed
335 * with this very invokation. */ 297 * with this very invokation. */
336 /* FIXME: 298 D_ASSERT(atomic_read(&req->kref.refcount) == 1);
337 * what about (RQ_LOCAL_PENDING | RQ_LOCAL_ABORTED)?
338 */
339 D_ASSERT(!(s & RQ_LOCAL_PENDING));
340 D_ASSERT((s & RQ_NET_MASK) == 0 || (s & RQ_NET_DONE));
341 }
342 } 299 }
343 req_may_be_done(req);
344} 300}
345 301
346static void req_may_be_completed_not_susp(struct drbd_request *req, struct bio_and_error *m) 302static int drbd_req_put_completion_ref(struct drbd_request *req, struct bio_and_error *m, int put)
347{ 303{
348 struct drbd_conf *mdev = req->w.mdev; 304 struct drbd_conf *mdev = req->w.mdev;
305 D_ASSERT(m || (req->rq_state & RQ_POSTPONED));
306
307 if (!atomic_sub_and_test(put, &req->completion_ref))
308 return 0;
309
310 if (drbd_suspended(mdev)) {
311 /* We do not allow completion while suspended. Re-get a
312 * reference, so whatever happens when this is resumed
313 * may put and complete. */
314
315 D_ASSERT(!(req->rq_state & RQ_COMPLETION_SUSP));
316 req->rq_state |= RQ_COMPLETION_SUSP;
317 atomic_inc(&req->completion_ref);
318 return 0;
319 }
320
321 /* else */
322 drbd_req_complete(req, m);
323 return 1;
324}
325
326/* I'd like this to be the only place that manipulates
327 * req->completion_ref and req->kref. */
328static void mod_rq_state(struct drbd_request *req, struct bio_and_error *m,
329 int clear, int set)
330{
331 struct drbd_conf *mdev = req->w.mdev;
332 unsigned s = req->rq_state;
333 int c_put = 0;
334 int k_put = 0;
335
336 /* apply */
337
338 req->rq_state &= ~clear;
339 req->rq_state |= set;
340
341 /* no change? */
342 if (req->rq_state == s)
343 return;
344
345 /* intent: get references */
349 346
350 if (!drbd_suspended(mdev)) 347 if (!(s & RQ_LOCAL_PENDING) && (set & RQ_LOCAL_PENDING))
351 req_may_be_completed(req, m); 348 atomic_inc(&req->completion_ref);
349
350 if (!(s & RQ_NET_PENDING) && (set & RQ_NET_PENDING)) {
351 inc_ap_pending(mdev);
352 atomic_inc(&req->completion_ref);
353 }
354
355 if (!(s & RQ_NET_QUEUED) && (set & RQ_NET_QUEUED))
356 atomic_inc(&req->completion_ref);
357
358 if (!(s & RQ_EXP_BARR_ACK) && (set & RQ_EXP_BARR_ACK))
359 kref_get(&req->kref); /* wait for the DONE */
360
361 if (!(s & RQ_NET_SENT) && (set & RQ_NET_SENT))
362 atomic_add(req->i.size >> 9, &mdev->ap_in_flight);
363
364 /* progress: put references */
365
366 if ((s & RQ_COMPLETION_SUSP) && (clear & RQ_COMPLETION_SUSP))
367 ++c_put;
368
369 if (!(s & RQ_LOCAL_ABORTED) && (set & RQ_LOCAL_ABORTED)) {
370 D_ASSERT(req->rq_state & RQ_LOCAL_PENDING);
371 /* local completion may still come in later,
372 * we need to keep the req object around. */
373 kref_get(&req->kref);
374 ++c_put;
375 }
376
377 if ((s & RQ_LOCAL_PENDING) && (clear & RQ_LOCAL_PENDING)) {
378 if (req->rq_state & RQ_LOCAL_ABORTED)
379 ++k_put;
380 else
381 ++c_put;
382 }
383
384 if ((s & RQ_NET_PENDING) && (clear & RQ_NET_PENDING)) {
385 dec_ap_pending(mdev);
386 ++c_put;
387 }
388
389 if ((s & RQ_NET_QUEUED) && (clear & RQ_NET_QUEUED))
390 ++c_put;
391
392 if ((s & RQ_EXP_BARR_ACK) && !(s & RQ_NET_DONE) && (set & RQ_NET_DONE)) {
393 if (req->rq_state & RQ_NET_SENT)
394 atomic_sub(req->i.size >> 9, &mdev->ap_in_flight);
395 ++k_put;
396 }
397
398 /* potentially complete and destroy */
399
400 if (k_put || c_put) {
401 /* Completion does it's own kref_put. If we are going to
402 * kref_sub below, we need req to be still around then. */
403 int at_least = k_put + !!c_put;
404 int refcount = atomic_read(&req->kref.refcount);
405 if (refcount < at_least)
406 dev_err(DEV,
407 "mod_rq_state: Logic BUG: %x -> %x: refcount = %d, should be >= %d\n",
408 s, req->rq_state, refcount, at_least);
409 }
410
411 /* If we made progress, retry conflicting peer requests, if any. */
412 if (req->i.waiting)
413 wake_up(&mdev->misc_wait);
414
415 if (c_put)
416 k_put += drbd_req_put_completion_ref(req, m, c_put);
417 if (k_put)
418 kref_sub(&req->kref, k_put, drbd_req_destroy);
352} 419}
353 420
354/* obviously this could be coded as many single functions 421/* obviously this could be coded as many single functions
@@ -388,7 +455,6 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what,
388 /* reached via __drbd_make_request 455 /* reached via __drbd_make_request
389 * and from w_read_retry_remote */ 456 * and from w_read_retry_remote */
390 D_ASSERT(!(req->rq_state & RQ_NET_MASK)); 457 D_ASSERT(!(req->rq_state & RQ_NET_MASK));
391 req->rq_state |= RQ_NET_PENDING;
392 rcu_read_lock(); 458 rcu_read_lock();
393 nc = rcu_dereference(mdev->tconn->net_conf); 459 nc = rcu_dereference(mdev->tconn->net_conf);
394 p = nc->wire_protocol; 460 p = nc->wire_protocol;
@@ -396,13 +462,13 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what,
396 req->rq_state |= 462 req->rq_state |=
397 p == DRBD_PROT_C ? RQ_EXP_WRITE_ACK : 463 p == DRBD_PROT_C ? RQ_EXP_WRITE_ACK :
398 p == DRBD_PROT_B ? RQ_EXP_RECEIVE_ACK : 0; 464 p == DRBD_PROT_B ? RQ_EXP_RECEIVE_ACK : 0;
399 inc_ap_pending(mdev); 465 mod_rq_state(req, m, 0, RQ_NET_PENDING);
400 break; 466 break;
401 467
402 case TO_BE_SUBMITTED: /* locally */ 468 case TO_BE_SUBMITTED: /* locally */
403 /* reached via __drbd_make_request */ 469 /* reached via __drbd_make_request */
404 D_ASSERT(!(req->rq_state & RQ_LOCAL_MASK)); 470 D_ASSERT(!(req->rq_state & RQ_LOCAL_MASK));
405 req->rq_state |= RQ_LOCAL_PENDING; 471 mod_rq_state(req, m, 0, RQ_LOCAL_PENDING);
406 break; 472 break;
407 473
408 case COMPLETED_OK: 474 case COMPLETED_OK:
@@ -411,44 +477,23 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what,
411 else 477 else
412 mdev->read_cnt += req->i.size >> 9; 478 mdev->read_cnt += req->i.size >> 9;
413 479
414 req->rq_state |= (RQ_LOCAL_COMPLETED|RQ_LOCAL_OK); 480 mod_rq_state(req, m, RQ_LOCAL_PENDING,
415 req->rq_state &= ~RQ_LOCAL_PENDING; 481 RQ_LOCAL_COMPLETED|RQ_LOCAL_OK);
416
417 maybe_wakeup_conflicting_requests(req);
418 req_may_be_completed_not_susp(req, m);
419 break; 482 break;
420 483
421 case ABORT_DISK_IO: 484 case ABORT_DISK_IO:
422 req->rq_state |= RQ_LOCAL_ABORTED; 485 mod_rq_state(req, m, 0, RQ_LOCAL_ABORTED);
423 req_may_be_completed_not_susp(req, m);
424 break;
425
426 case WRITE_COMPLETED_WITH_ERROR:
427 req->rq_state |= RQ_LOCAL_COMPLETED;
428 req->rq_state &= ~RQ_LOCAL_PENDING;
429
430 __drbd_chk_io_error(mdev, false);
431 maybe_wakeup_conflicting_requests(req);
432 req_may_be_completed_not_susp(req, m);
433 break;
434
435 case READ_AHEAD_COMPLETED_WITH_ERROR:
436 /* it is legal to fail READA */
437 req->rq_state |= RQ_LOCAL_COMPLETED;
438 req->rq_state &= ~RQ_LOCAL_PENDING;
439 req_may_be_completed_not_susp(req, m);
440 break; 486 break;
441 487
442 case READ_COMPLETED_WITH_ERROR: 488 case READ_COMPLETED_WITH_ERROR:
443 drbd_set_out_of_sync(mdev, req->i.sector, req->i.size); 489 drbd_set_out_of_sync(mdev, req->i.sector, req->i.size);
444 490 /* fall through. */
445 req->rq_state |= RQ_LOCAL_COMPLETED; 491 case WRITE_COMPLETED_WITH_ERROR:
446 req->rq_state &= ~RQ_LOCAL_PENDING;
447
448 D_ASSERT(!(req->rq_state & RQ_NET_MASK));
449
450 __drbd_chk_io_error(mdev, false); 492 __drbd_chk_io_error(mdev, false);
451 req_may_be_completed_not_susp(req, m); 493 /* fall through. */
494 case READ_AHEAD_COMPLETED_WITH_ERROR:
495 /* it is legal to fail READA, no __drbd_chk_io_error in that case. */
496 mod_rq_state(req, m, RQ_LOCAL_PENDING, RQ_LOCAL_COMPLETED);
452 break; 497 break;
453 498
454 case QUEUE_FOR_NET_READ: 499 case QUEUE_FOR_NET_READ:
@@ -461,7 +506,7 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what,
461 506
462 /* So we can verify the handle in the answer packet. 507 /* So we can verify the handle in the answer packet.
463 * Corresponding drbd_remove_request_interval is in 508 * Corresponding drbd_remove_request_interval is in
464 * req_may_be_completed() */ 509 * drbd_req_complete() */
465 D_ASSERT(drbd_interval_empty(&req->i)); 510 D_ASSERT(drbd_interval_empty(&req->i));
466 drbd_insert_interval(&mdev->read_requests, &req->i); 511 drbd_insert_interval(&mdev->read_requests, &req->i);
467 512
@@ -469,7 +514,7 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what,
469 514
470 D_ASSERT(req->rq_state & RQ_NET_PENDING); 515 D_ASSERT(req->rq_state & RQ_NET_PENDING);
471 D_ASSERT((req->rq_state & RQ_LOCAL_MASK) == 0); 516 D_ASSERT((req->rq_state & RQ_LOCAL_MASK) == 0);
472 req->rq_state |= RQ_NET_QUEUED; 517 mod_rq_state(req, m, 0, RQ_NET_QUEUED);
473 req->w.cb = w_send_read_req; 518 req->w.cb = w_send_read_req;
474 drbd_queue_work(&mdev->tconn->sender_work, &req->w); 519 drbd_queue_work(&mdev->tconn->sender_work, &req->w);
475 break; 520 break;
@@ -479,7 +524,7 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what,
479 /* from __drbd_make_request only */ 524 /* from __drbd_make_request only */
480 525
481 /* Corresponding drbd_remove_request_interval is in 526 /* Corresponding drbd_remove_request_interval is in
482 * req_may_be_completed() */ 527 * drbd_req_complete() */
483 D_ASSERT(drbd_interval_empty(&req->i)); 528 D_ASSERT(drbd_interval_empty(&req->i));
484 drbd_insert_interval(&mdev->write_requests, &req->i); 529 drbd_insert_interval(&mdev->write_requests, &req->i);
485 530
@@ -504,7 +549,7 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what,
504 549
505 /* queue work item to send data */ 550 /* queue work item to send data */
506 D_ASSERT(req->rq_state & RQ_NET_PENDING); 551 D_ASSERT(req->rq_state & RQ_NET_PENDING);
507 req->rq_state |= RQ_NET_QUEUED; 552 mod_rq_state(req, m, 0, RQ_NET_QUEUED|RQ_EXP_BARR_ACK);
508 req->w.cb = w_send_dblock; 553 req->w.cb = w_send_dblock;
509 drbd_queue_work(&mdev->tconn->sender_work, &req->w); 554 drbd_queue_work(&mdev->tconn->sender_work, &req->w);
510 555
@@ -519,7 +564,7 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what,
519 break; 564 break;
520 565
521 case QUEUE_FOR_SEND_OOS: 566 case QUEUE_FOR_SEND_OOS:
522 req->rq_state |= RQ_NET_QUEUED; 567 mod_rq_state(req, m, 0, RQ_NET_QUEUED);
523 req->w.cb = w_send_out_of_sync; 568 req->w.cb = w_send_out_of_sync;
524 drbd_queue_work(&mdev->tconn->sender_work, &req->w); 569 drbd_queue_work(&mdev->tconn->sender_work, &req->w);
525 break; 570 break;
@@ -529,64 +574,43 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what,
529 case SEND_FAILED: 574 case SEND_FAILED:
530 /* real cleanup will be done from tl_clear. just update flags 575 /* real cleanup will be done from tl_clear. just update flags
531 * so it is no longer marked as on the worker queue */ 576 * so it is no longer marked as on the worker queue */
532 req->rq_state &= ~RQ_NET_QUEUED; 577 mod_rq_state(req, m, RQ_NET_QUEUED, 0);
533 /* if we did it right, tl_clear should be scheduled only after
534 * this, so this should not be necessary! */
535 req_may_be_completed_not_susp(req, m);
536 break; 578 break;
537 579
538 case HANDED_OVER_TO_NETWORK: 580 case HANDED_OVER_TO_NETWORK:
539 /* assert something? */ 581 /* assert something? */
540 if (bio_data_dir(req->master_bio) == WRITE)
541 atomic_add(req->i.size >> 9, &mdev->ap_in_flight);
542
543 if (bio_data_dir(req->master_bio) == WRITE && 582 if (bio_data_dir(req->master_bio) == WRITE &&
544 !(req->rq_state & (RQ_EXP_RECEIVE_ACK | RQ_EXP_WRITE_ACK))) { 583 !(req->rq_state & (RQ_EXP_RECEIVE_ACK | RQ_EXP_WRITE_ACK))) {
545 /* this is what is dangerous about protocol A: 584 /* this is what is dangerous about protocol A:
546 * pretend it was successfully written on the peer. */ 585 * pretend it was successfully written on the peer. */
547 if (req->rq_state & RQ_NET_PENDING) { 586 if (req->rq_state & RQ_NET_PENDING)
548 dec_ap_pending(mdev); 587 mod_rq_state(req, m, RQ_NET_PENDING, RQ_NET_OK);
549 req->rq_state &= ~RQ_NET_PENDING; 588 /* else: neg-ack was faster... */
550 req->rq_state |= RQ_NET_OK;
551 } /* else: neg-ack was faster... */
552 /* it is still not yet RQ_NET_DONE until the 589 /* it is still not yet RQ_NET_DONE until the
553 * corresponding epoch barrier got acked as well, 590 * corresponding epoch barrier got acked as well,
554 * so we know what to dirty on connection loss */ 591 * so we know what to dirty on connection loss */
555 } 592 }
556 req->rq_state &= ~RQ_NET_QUEUED; 593 mod_rq_state(req, m, RQ_NET_QUEUED, RQ_NET_SENT);
557 req->rq_state |= RQ_NET_SENT;
558 req_may_be_completed_not_susp(req, m);
559 break; 594 break;
560 595
561 case OOS_HANDED_TO_NETWORK: 596 case OOS_HANDED_TO_NETWORK:
562 /* Was not set PENDING, no longer QUEUED, so is now DONE 597 /* Was not set PENDING, no longer QUEUED, so is now DONE
563 * as far as this connection is concerned. */ 598 * as far as this connection is concerned. */
564 req->rq_state &= ~RQ_NET_QUEUED; 599 mod_rq_state(req, m, RQ_NET_QUEUED, RQ_NET_DONE);
565 req->rq_state |= RQ_NET_DONE;
566 req_may_be_completed_not_susp(req, m);
567 break; 600 break;
568 601
569 case CONNECTION_LOST_WHILE_PENDING: 602 case CONNECTION_LOST_WHILE_PENDING:
570 /* transfer log cleanup after connection loss */ 603 /* transfer log cleanup after connection loss */
571 /* assert something? */ 604 mod_rq_state(req, m,
572 if (req->rq_state & RQ_NET_PENDING) 605 RQ_NET_OK|RQ_NET_PENDING|RQ_COMPLETION_SUSP,
573 dec_ap_pending(mdev); 606 RQ_NET_DONE);
574
575 p = !(req->rq_state & RQ_WRITE) && req->rq_state & RQ_NET_PENDING;
576
577 req->rq_state &= ~(RQ_NET_OK|RQ_NET_PENDING);
578 req->rq_state |= RQ_NET_DONE;
579 if (req->rq_state & RQ_NET_SENT && req->rq_state & RQ_WRITE)
580 atomic_sub(req->i.size >> 9, &mdev->ap_in_flight);
581
582 req_may_be_completed(req, m); /* Allowed while state.susp */
583 break; 607 break;
584 608
585 case DISCARD_WRITE: 609 case DISCARD_WRITE:
586 /* for discarded conflicting writes of multiple primaries, 610 /* for discarded conflicting writes of multiple primaries,
587 * there is no need to keep anything in the tl, potential 611 * there is no need to keep anything in the tl, potential
588 * node crashes are covered by the activity log. */ 612 * node crashes are covered by the activity log. */
589 req->rq_state |= RQ_NET_DONE; 613 mod_rq_state(req, NULL, 0, RQ_NET_DONE);
590 /* fall through */ 614 /* fall through */
591 case WRITE_ACKED_BY_PEER_AND_SIS: 615 case WRITE_ACKED_BY_PEER_AND_SIS:
592 case WRITE_ACKED_BY_PEER: 616 case WRITE_ACKED_BY_PEER:
@@ -605,13 +629,8 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what,
605 * see also notes above in HANDED_OVER_TO_NETWORK about 629 * see also notes above in HANDED_OVER_TO_NETWORK about
606 * protocol != C */ 630 * protocol != C */
607 ack_common: 631 ack_common:
608 req->rq_state |= RQ_NET_OK;
609 D_ASSERT(req->rq_state & RQ_NET_PENDING); 632 D_ASSERT(req->rq_state & RQ_NET_PENDING);
610 dec_ap_pending(mdev); 633 mod_rq_state(req, m, RQ_NET_PENDING, RQ_NET_OK);
611 atomic_sub(req->i.size >> 9, &mdev->ap_in_flight);
612 req->rq_state &= ~RQ_NET_PENDING;
613 maybe_wakeup_conflicting_requests(req);
614 req_may_be_completed_not_susp(req, m);
615 break; 634 break;
616 635
617 case POSTPONE_WRITE: 636 case POSTPONE_WRITE:
@@ -622,64 +641,61 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what,
622 */ 641 */
623 D_ASSERT(req->rq_state & RQ_NET_PENDING); 642 D_ASSERT(req->rq_state & RQ_NET_PENDING);
624 req->rq_state |= RQ_POSTPONED; 643 req->rq_state |= RQ_POSTPONED;
625 maybe_wakeup_conflicting_requests(req); 644 if (req->i.waiting)
626 req_may_be_completed_not_susp(req, m); 645 wake_up(&mdev->misc_wait);
646 /* Do not clear RQ_NET_PENDING. This request will make further
647 * progress via restart_conflicting_writes() or
648 * fail_postponed_requests(). Hopefully. */
627 break; 649 break;
628 650
629 case NEG_ACKED: 651 case NEG_ACKED:
630 /* assert something? */ 652 mod_rq_state(req, m, RQ_NET_OK|RQ_NET_PENDING, RQ_NET_DONE);
631 if (req->rq_state & RQ_NET_PENDING) {
632 dec_ap_pending(mdev);
633 if (req->rq_state & RQ_WRITE)
634 atomic_sub(req->i.size >> 9, &mdev->ap_in_flight);
635 }
636 req->rq_state &= ~(RQ_NET_OK|RQ_NET_PENDING);
637
638 req->rq_state |= RQ_NET_DONE;
639
640 maybe_wakeup_conflicting_requests(req);
641 req_may_be_completed_not_susp(req, m);
642 /* else: done by HANDED_OVER_TO_NETWORK */
643 break; 653 break;
644 654
645 case FAIL_FROZEN_DISK_IO: 655 case FAIL_FROZEN_DISK_IO:
646 if (!(req->rq_state & RQ_LOCAL_COMPLETED)) 656 if (!(req->rq_state & RQ_LOCAL_COMPLETED))
647 break; 657 break;
648 658 mod_rq_state(req, m, RQ_COMPLETION_SUSP, 0);
649 req_may_be_completed(req, m); /* Allowed while state.susp */
650 break; 659 break;
651 660
652 case RESTART_FROZEN_DISK_IO: 661 case RESTART_FROZEN_DISK_IO:
653 if (!(req->rq_state & RQ_LOCAL_COMPLETED)) 662 if (!(req->rq_state & RQ_LOCAL_COMPLETED))
654 break; 663 break;
655 664
656 req->rq_state &= ~RQ_LOCAL_COMPLETED; 665 mod_rq_state(req, m,
666 RQ_COMPLETION_SUSP|RQ_LOCAL_COMPLETED,
667 RQ_LOCAL_PENDING);
657 668
658 rv = MR_READ; 669 rv = MR_READ;
659 if (bio_data_dir(req->master_bio) == WRITE) 670 if (bio_data_dir(req->master_bio) == WRITE)
660 rv = MR_WRITE; 671 rv = MR_WRITE;
661 672
662 get_ldev(mdev); 673 get_ldev(mdev); /* always succeeds in this call path */
663 req->w.cb = w_restart_disk_io; 674 req->w.cb = w_restart_disk_io;
664 drbd_queue_work(&mdev->tconn->sender_work, &req->w); 675 drbd_queue_work(&mdev->tconn->sender_work, &req->w);
665 break; 676 break;
666 677
667 case RESEND: 678 case RESEND:
668 /* If RQ_NET_OK is already set, we got a P_WRITE_ACK or P_RECV_ACK 679 /* If RQ_NET_OK is already set, we got a P_WRITE_ACK or P_RECV_ACK
669 before the connection loss (B&C only); only P_BARRIER_ACK was missing. 680 before the connection loss (B&C only); only P_BARRIER_ACK
681 (or the local completion?) was missing when we suspended.
670 Throwing them out of the TL here by pretending we got a BARRIER_ACK. 682 Throwing them out of the TL here by pretending we got a BARRIER_ACK.
671 During connection handshake, we ensure that the peer was not rebooted. */ 683 During connection handshake, we ensure that the peer was not rebooted. */
672 if (!(req->rq_state & RQ_NET_OK)) { 684 if (!(req->rq_state & RQ_NET_OK)) {
685 /* FIXME could this possibly be a req->w.cb == w_send_out_of_sync?
686 * in that case we must not set RQ_NET_PENDING. */
687
688 mod_rq_state(req, m, RQ_COMPLETION_SUSP, RQ_NET_QUEUED|RQ_NET_PENDING);
673 if (req->w.cb) { 689 if (req->w.cb) {
674 /* w.cb expected to be w_send_dblock, or w_send_read_req */
675 drbd_queue_work(&mdev->tconn->sender_work, &req->w); 690 drbd_queue_work(&mdev->tconn->sender_work, &req->w);
676 rv = req->rq_state & RQ_WRITE ? MR_WRITE : MR_READ; 691 rv = req->rq_state & RQ_WRITE ? MR_WRITE : MR_READ;
677 } 692 } /* else: FIXME can this happen? */
678 break; 693 break;
679 } 694 }
680 /* else, fall through to BARRIER_ACKED */ 695 /* else, fall through to BARRIER_ACKED */
681 696
682 case BARRIER_ACKED: 697 case BARRIER_ACKED:
698 /* barrier ack for READ requests does not make sense */
683 if (!(req->rq_state & RQ_WRITE)) 699 if (!(req->rq_state & RQ_WRITE))
684 break; 700 break;
685 701
@@ -689,20 +705,17 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what,
689 * we won't be able to clean them up... */ 705 * we won't be able to clean them up... */
690 dev_err(DEV, "FIXME (BARRIER_ACKED but pending)\n"); 706 dev_err(DEV, "FIXME (BARRIER_ACKED but pending)\n");
691 } 707 }
692 if ((req->rq_state & RQ_NET_MASK) != 0) { 708 /* Allowed to complete requests, even while suspended.
693 req->rq_state |= RQ_NET_DONE; 709 * As this is called for all requests within a matching epoch,
694 if (!(req->rq_state & (RQ_EXP_RECEIVE_ACK | RQ_EXP_WRITE_ACK))) 710 * we need to filter, and only set RQ_NET_DONE for those that
695 atomic_sub(req->i.size>>9, &mdev->ap_in_flight); 711 * have actually been on the wire. */
696 } 712 mod_rq_state(req, m, RQ_COMPLETION_SUSP,
697 req_may_be_done(req); /* Allowed while state.susp */ 713 (req->rq_state & RQ_NET_MASK) ? RQ_NET_DONE : 0);
698 break; 714 break;
699 715
700 case DATA_RECEIVED: 716 case DATA_RECEIVED:
701 D_ASSERT(req->rq_state & RQ_NET_PENDING); 717 D_ASSERT(req->rq_state & RQ_NET_PENDING);
702 dec_ap_pending(mdev); 718 mod_rq_state(req, m, RQ_NET_PENDING, RQ_NET_OK|RQ_NET_DONE);
703 req->rq_state &= ~RQ_NET_PENDING;
704 req->rq_state |= (RQ_NET_OK|RQ_NET_DONE);
705 req_may_be_completed_not_susp(req, m);
706 break; 719 break;
707 }; 720 };
708 721
@@ -867,6 +880,9 @@ static bool do_remote_read(struct drbd_request *req)
867 if (mdev->state.pdsk != D_UP_TO_DATE) 880 if (mdev->state.pdsk != D_UP_TO_DATE)
868 return false; 881 return false;
869 882
883 if (req->private_bio == NULL)
884 return true;
885
870 /* TODO: improve read balancing decisions, take into account drbd 886 /* TODO: improve read balancing decisions, take into account drbd
871 * protocol, pending requests etc. */ 887 * protocol, pending requests etc. */
872 888
@@ -877,9 +893,6 @@ static bool do_remote_read(struct drbd_request *req)
877 if (rbm == RB_PREFER_LOCAL && req->private_bio) 893 if (rbm == RB_PREFER_LOCAL && req->private_bio)
878 return false; /* submit locally */ 894 return false; /* submit locally */
879 895
880 if (req->private_bio == NULL)
881 return true;
882
883 if (remote_due_to_read_balancing(mdev, req->i.sector, rbm)) { 896 if (remote_due_to_read_balancing(mdev, req->i.sector, rbm)) {
884 if (req->private_bio) { 897 if (req->private_bio) {
885 bio_put(req->private_bio); 898 bio_put(req->private_bio);
@@ -1010,7 +1023,7 @@ void __drbd_make_request(struct drbd_conf *mdev, struct bio *bio, unsigned long
1010 1023
1011 /* We fail READ/READA early, if we can not serve it. 1024 /* We fail READ/READA early, if we can not serve it.
1012 * We must do this before req is registered on any lists. 1025 * We must do this before req is registered on any lists.
1013 * Otherwise, req_may_be_completed() will queue failed READ for retry. */ 1026 * Otherwise, drbd_req_complete() will queue failed READ for retry. */
1014 if (rw != WRITE) { 1027 if (rw != WRITE) {
1015 if (!do_remote_read(req) && !req->private_bio) 1028 if (!do_remote_read(req) && !req->private_bio)
1016 goto nodata; 1029 goto nodata;
@@ -1042,19 +1055,18 @@ void __drbd_make_request(struct drbd_conf *mdev, struct bio *bio, unsigned long
1042 /* but we need to give up the spinlock to submit */ 1055 /* but we need to give up the spinlock to submit */
1043 spin_unlock_irq(&mdev->tconn->req_lock); 1056 spin_unlock_irq(&mdev->tconn->req_lock);
1044 drbd_submit_req_private_bio(req); 1057 drbd_submit_req_private_bio(req);
1045 /* once we have submitted, we must no longer look at req, 1058 spin_lock_irq(&mdev->tconn->req_lock);
1046 * it may already be destroyed. */
1047 return;
1048 } else if (no_remote) { 1059 } else if (no_remote) {
1049nodata: 1060nodata:
1050 if (__ratelimit(&drbd_ratelimit_state)) 1061 if (__ratelimit(&drbd_ratelimit_state))
1051 dev_err(DEV, "IO ERROR: neither local nor remote disk\n"); 1062 dev_err(DEV, "IO ERROR: neither local nor remote disk\n");
1052 /* A write may have been queued for send_oos, however. 1063 /* A write may have been queued for send_oos, however.
1053 * So we can not simply free it, we must go through req_may_be_completed() */ 1064 * So we can not simply free it, we must go through drbd_req_put_completion_ref() */
1054 } 1065 }
1055 1066
1056out: 1067out:
1057 req_may_be_completed(req, &m); 1068 if (drbd_req_put_completion_ref(req, &m, 1))
1069 kref_put(&req->kref, drbd_req_destroy);
1058 spin_unlock_irq(&mdev->tconn->req_lock); 1070 spin_unlock_irq(&mdev->tconn->req_lock);
1059 1071
1060 if (m.bio) 1072 if (m.bio)
diff --git a/drivers/block/drbd/drbd_req.h b/drivers/block/drbd/drbd_req.h
index f80af27fa5ed..90e5a1eea727 100644
--- a/drivers/block/drbd/drbd_req.h
+++ b/drivers/block/drbd/drbd_req.h
@@ -203,11 +203,18 @@ enum drbd_req_state_bits {
203 /* The peer has sent a retry ACK */ 203 /* The peer has sent a retry ACK */
204 __RQ_POSTPONED, 204 __RQ_POSTPONED,
205 205
206 /* would have been completed,
207 * but was not, because of drbd_suspended() */
208 __RQ_COMPLETION_SUSP,
209
206 /* We expect a receive ACK (wire proto B) */ 210 /* We expect a receive ACK (wire proto B) */
207 __RQ_EXP_RECEIVE_ACK, 211 __RQ_EXP_RECEIVE_ACK,
208 212
209 /* We expect a write ACK (wite proto C) */ 213 /* We expect a write ACK (wite proto C) */
210 __RQ_EXP_WRITE_ACK, 214 __RQ_EXP_WRITE_ACK,
215
216 /* waiting for a barrier ack, did an extra kref_get */
217 __RQ_EXP_BARR_ACK,
211}; 218};
212 219
213#define RQ_LOCAL_PENDING (1UL << __RQ_LOCAL_PENDING) 220#define RQ_LOCAL_PENDING (1UL << __RQ_LOCAL_PENDING)
@@ -230,8 +237,10 @@ enum drbd_req_state_bits {
230#define RQ_WRITE (1UL << __RQ_WRITE) 237#define RQ_WRITE (1UL << __RQ_WRITE)
231#define RQ_IN_ACT_LOG (1UL << __RQ_IN_ACT_LOG) 238#define RQ_IN_ACT_LOG (1UL << __RQ_IN_ACT_LOG)
232#define RQ_POSTPONED (1UL << __RQ_POSTPONED) 239#define RQ_POSTPONED (1UL << __RQ_POSTPONED)
240#define RQ_COMPLETION_SUSP (1UL << __RQ_COMPLETION_SUSP)
233#define RQ_EXP_RECEIVE_ACK (1UL << __RQ_EXP_RECEIVE_ACK) 241#define RQ_EXP_RECEIVE_ACK (1UL << __RQ_EXP_RECEIVE_ACK)
234#define RQ_EXP_WRITE_ACK (1UL << __RQ_EXP_WRITE_ACK) 242#define RQ_EXP_WRITE_ACK (1UL << __RQ_EXP_WRITE_ACK)
243#define RQ_EXP_BARR_ACK (1UL << __RQ_EXP_BARR_ACK)
235 244
236/* For waking up the frozen transfer log mod_req() has to return if the request 245/* For waking up the frozen transfer log mod_req() has to return if the request
237 should be counted in the epoch object*/ 246 should be counted in the epoch object*/