diff options
Diffstat (limited to 'drivers/block/drbd/drbd_req.c')
-rw-r--r-- | drivers/block/drbd/drbd_req.c | 65 |
1 files changed, 19 insertions, 46 deletions
diff --git a/drivers/block/drbd/drbd_req.c b/drivers/block/drbd/drbd_req.c index f708aa1809f0..1249672519ca 100644 --- a/drivers/block/drbd/drbd_req.c +++ b/drivers/block/drbd/drbd_req.c | |||
@@ -263,7 +263,6 @@ void req_may_be_completed(struct drbd_request *req, struct bio_and_error *m) | |||
263 | { | 263 | { |
264 | const unsigned long s = req->rq_state; | 264 | const unsigned long s = req->rq_state; |
265 | struct drbd_conf *mdev = req->w.mdev; | 265 | struct drbd_conf *mdev = req->w.mdev; |
266 | int rw = req->rq_state & RQ_WRITE ? WRITE : READ; | ||
267 | 266 | ||
268 | /* we must not complete the master bio, while it is | 267 | /* we must not complete the master bio, while it is |
269 | * still being processed by _drbd_send_zc_bio (drbd_send_dblock) | 268 | * still being processed by _drbd_send_zc_bio (drbd_send_dblock) |
@@ -282,6 +281,8 @@ void req_may_be_completed(struct drbd_request *req, struct bio_and_error *m) | |||
282 | return; | 281 | return; |
283 | 282 | ||
284 | if (req->master_bio) { | 283 | if (req->master_bio) { |
284 | int rw = bio_rw(req->master_bio); | ||
285 | |||
285 | /* this is DATA_RECEIVED (remote read) | 286 | /* this is DATA_RECEIVED (remote read) |
286 | * or protocol C P_WRITE_ACK | 287 | * or protocol C P_WRITE_ACK |
287 | * or protocol B P_RECV_ACK | 288 | * or protocol B P_RECV_ACK |
@@ -326,7 +327,18 @@ void req_may_be_completed(struct drbd_request *req, struct bio_and_error *m) | |||
326 | /* Update disk stats */ | 327 | /* Update disk stats */ |
327 | _drbd_end_io_acct(mdev, req); | 328 | _drbd_end_io_acct(mdev, req); |
328 | 329 | ||
329 | if (!(s & RQ_POSTPONED)) { | 330 | /* if READ failed, |
331 | * have it be pushed back to the retry work queue, | ||
332 | * so it will re-enter __drbd_make_request, | ||
333 | * and be re-assigned to a suitable local or remote path, | ||
334 | * or failed if we do not have access to good data anymore. | ||
335 | * READA may fail. | ||
336 | * WRITE should have used all available paths already. | ||
337 | */ | ||
338 | if (!ok && rw == READ) | ||
339 | req->rq_state |= RQ_POSTPONED; | ||
340 | |||
341 | if (!(req->rq_state & RQ_POSTPONED)) { | ||
330 | m->error = ok ? 0 : (error ?: -EIO); | 342 | m->error = ok ? 0 : (error ?: -EIO); |
331 | m->bio = req->master_bio; | 343 | m->bio = req->master_bio; |
332 | req->master_bio = NULL; | 344 | req->master_bio = NULL; |
@@ -420,10 +432,7 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what, | |||
420 | 432 | ||
421 | case ABORT_DISK_IO: | 433 | case ABORT_DISK_IO: |
422 | req->rq_state |= RQ_LOCAL_ABORTED; | 434 | req->rq_state |= RQ_LOCAL_ABORTED; |
423 | if (req->rq_state & RQ_WRITE) | 435 | req_may_be_completed_not_susp(req, m); |
424 | req_may_be_completed_not_susp(req, m); | ||
425 | else | ||
426 | goto goto_queue_for_net_read; | ||
427 | break; | 436 | break; |
428 | 437 | ||
429 | case WRITE_COMPLETED_WITH_ERROR: | 438 | case WRITE_COMPLETED_WITH_ERROR: |
@@ -451,20 +460,7 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what, | |||
451 | D_ASSERT(!(req->rq_state & RQ_NET_MASK)); | 460 | D_ASSERT(!(req->rq_state & RQ_NET_MASK)); |
452 | 461 | ||
453 | __drbd_chk_io_error(mdev, false); | 462 | __drbd_chk_io_error(mdev, false); |
454 | 463 | break; | |
455 | goto_queue_for_net_read: | ||
456 | |||
457 | /* no point in retrying if there is no good remote data, | ||
458 | * or we have no connection. */ | ||
459 | if (mdev->state.pdsk != D_UP_TO_DATE) { | ||
460 | req_may_be_completed_not_susp(req, m); | ||
461 | break; | ||
462 | } | ||
463 | |||
464 | /* _req_mod(req,TO_BE_SENT); oops, recursion... */ | ||
465 | req->rq_state |= RQ_NET_PENDING; | ||
466 | inc_ap_pending(mdev); | ||
467 | /* fall through: _req_mod(req,QUEUE_FOR_NET_READ); */ | ||
468 | 464 | ||
469 | case QUEUE_FOR_NET_READ: | 465 | case QUEUE_FOR_NET_READ: |
470 | /* READ or READA, and | 466 | /* READ or READA, and |
@@ -483,10 +479,9 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what, | |||
483 | set_bit(UNPLUG_REMOTE, &mdev->flags); | 479 | set_bit(UNPLUG_REMOTE, &mdev->flags); |
484 | 480 | ||
485 | D_ASSERT(req->rq_state & RQ_NET_PENDING); | 481 | D_ASSERT(req->rq_state & RQ_NET_PENDING); |
482 | D_ASSERT((req->rq_state & RQ_LOCAL_MASK) == 0); | ||
486 | req->rq_state |= RQ_NET_QUEUED; | 483 | req->rq_state |= RQ_NET_QUEUED; |
487 | req->w.cb = (req->rq_state & RQ_LOCAL_MASK) | 484 | req->w.cb = w_send_read_req; |
488 | ? w_read_retry_remote | ||
489 | : w_send_read_req; | ||
490 | drbd_queue_work(&mdev->tconn->data.work, &req->w); | 485 | drbd_queue_work(&mdev->tconn->data.work, &req->w); |
491 | break; | 486 | break; |
492 | 487 | ||
@@ -604,13 +599,7 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what, | |||
604 | if (req->rq_state & RQ_NET_SENT && req->rq_state & RQ_WRITE) | 599 | if (req->rq_state & RQ_NET_SENT && req->rq_state & RQ_WRITE) |
605 | atomic_sub(req->i.size >> 9, &mdev->ap_in_flight); | 600 | atomic_sub(req->i.size >> 9, &mdev->ap_in_flight); |
606 | 601 | ||
607 | /* if it is still queued, we may not complete it here. | 602 | req_may_be_completed(req, m); /* Allowed while state.susp */ |
608 | * it will be canceled soon. */ | ||
609 | if (!(req->rq_state & RQ_NET_QUEUED)) { | ||
610 | if (p) | ||
611 | goto goto_read_retry_local; | ||
612 | req_may_be_completed(req, m); /* Allowed while state.susp */ | ||
613 | } | ||
614 | break; | 603 | break; |
615 | 604 | ||
616 | case DISCARD_WRITE: | 605 | case DISCARD_WRITE: |
@@ -668,27 +657,11 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what, | |||
668 | 657 | ||
669 | req->rq_state |= RQ_NET_DONE; | 658 | req->rq_state |= RQ_NET_DONE; |
670 | 659 | ||
671 | if (!(req->rq_state & RQ_WRITE)) | ||
672 | goto goto_read_retry_local; | ||
673 | |||
674 | maybe_wakeup_conflicting_requests(req); | 660 | maybe_wakeup_conflicting_requests(req); |
675 | req_may_be_completed_not_susp(req, m); | 661 | req_may_be_completed_not_susp(req, m); |
676 | /* else: done by HANDED_OVER_TO_NETWORK */ | 662 | /* else: done by HANDED_OVER_TO_NETWORK */ |
677 | break; | 663 | break; |
678 | 664 | ||
679 | goto_read_retry_local: | ||
680 | if (!drbd_may_do_local_read(mdev, req->i.sector, req->i.size)) { | ||
681 | req_may_be_completed_not_susp(req, m); | ||
682 | break; | ||
683 | } | ||
684 | D_ASSERT(!(req->rq_state & RQ_LOCAL_PENDING)); | ||
685 | req->rq_state |= RQ_LOCAL_PENDING; | ||
686 | |||
687 | get_ldev(mdev); | ||
688 | req->w.cb = w_restart_disk_io; | ||
689 | drbd_queue_work(&mdev->tconn->data.work, &req->w); | ||
690 | break; | ||
691 | |||
692 | case FAIL_FROZEN_DISK_IO: | 665 | case FAIL_FROZEN_DISK_IO: |
693 | if (!(req->rq_state & RQ_LOCAL_COMPLETED)) | 666 | if (!(req->rq_state & RQ_LOCAL_COMPLETED)) |
694 | break; | 667 | break; |