aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorPhilipp Reisner <philipp.reisner@linbit.com>2010-06-23 11:18:51 -0400
committerPhilipp Reisner <philipp.reisner@linbit.com>2010-10-14 09:05:08 -0400
commitcfa03415a14dd0055f2ff8c3d348d4c1452acba6 (patch)
tree760aa04f76edd0333324705e86b7a7a306f457c4 /drivers
parent84dfb9f564208a0331131d1ab922382c7d61a553 (diff)
drbd: Allow tl_restart() to do IO completion while IO is suspended
Signed-off-by: Philipp Reisner <philipp.reisner@linbit.com> Signed-off-by: Lars Ellenberg <lars.ellenberg@linbit.com>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/block/drbd/drbd_req.c34
1 files changed, 20 insertions, 14 deletions
diff --git a/drivers/block/drbd/drbd_req.c b/drivers/block/drbd/drbd_req.c
index 76b668245612..4e1e10d67c4b 100644
--- a/drivers/block/drbd/drbd_req.c
+++ b/drivers/block/drbd/drbd_req.c
@@ -226,8 +226,6 @@ void _req_may_be_done(struct drbd_request *req, struct bio_and_error *m)
226 return; 226 return;
227 if (s & RQ_LOCAL_PENDING) 227 if (s & RQ_LOCAL_PENDING)
228 return; 228 return;
229 if (mdev->state.susp)
230 return;
231 229
232 if (req->master_bio) { 230 if (req->master_bio) {
233 /* this is data_received (remote read) 231 /* this is data_received (remote read)
@@ -284,6 +282,14 @@ void _req_may_be_done(struct drbd_request *req, struct bio_and_error *m)
284 * protocol A or B, barrier ack still pending... */ 282 * protocol A or B, barrier ack still pending... */
285} 283}
286 284
285static void _req_may_be_done_not_susp(struct drbd_request *req, struct bio_and_error *m)
286{
287 struct drbd_conf *mdev = req->mdev;
288
289 if (!mdev->state.susp)
290 _req_may_be_done(req, m);
291}
292
287/* 293/*
288 * checks whether there was an overlapping request 294 * checks whether there was an overlapping request
289 * or ee already registered. 295 * or ee already registered.
@@ -425,7 +431,7 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what,
425 req->rq_state |= (RQ_LOCAL_COMPLETED|RQ_LOCAL_OK); 431 req->rq_state |= (RQ_LOCAL_COMPLETED|RQ_LOCAL_OK);
426 req->rq_state &= ~RQ_LOCAL_PENDING; 432 req->rq_state &= ~RQ_LOCAL_PENDING;
427 433
428 _req_may_be_done(req, m); 434 _req_may_be_done_not_susp(req, m);
429 put_ldev(mdev); 435 put_ldev(mdev);
430 break; 436 break;
431 437
@@ -434,7 +440,7 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what,
434 req->rq_state &= ~RQ_LOCAL_PENDING; 440 req->rq_state &= ~RQ_LOCAL_PENDING;
435 441
436 __drbd_chk_io_error(mdev, FALSE); 442 __drbd_chk_io_error(mdev, FALSE);
437 _req_may_be_done(req, m); 443 _req_may_be_done_not_susp(req, m);
438 put_ldev(mdev); 444 put_ldev(mdev);
439 break; 445 break;
440 446
@@ -442,7 +448,7 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what,
442 /* it is legal to fail READA */ 448 /* it is legal to fail READA */
443 req->rq_state |= RQ_LOCAL_COMPLETED; 449 req->rq_state |= RQ_LOCAL_COMPLETED;
444 req->rq_state &= ~RQ_LOCAL_PENDING; 450 req->rq_state &= ~RQ_LOCAL_PENDING;
445 _req_may_be_done(req, m); 451 _req_may_be_done_not_susp(req, m);
446 put_ldev(mdev); 452 put_ldev(mdev);
447 break; 453 break;
448 454
@@ -460,7 +466,7 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what,
460 /* no point in retrying if there is no good remote data, 466 /* no point in retrying if there is no good remote data,
461 * or we have no connection. */ 467 * or we have no connection. */
462 if (mdev->state.pdsk != D_UP_TO_DATE) { 468 if (mdev->state.pdsk != D_UP_TO_DATE) {
463 _req_may_be_done(req, m); 469 _req_may_be_done_not_susp(req, m);
464 break; 470 break;
465 } 471 }
466 472
@@ -546,7 +552,7 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what,
546 req->rq_state &= ~RQ_NET_QUEUED; 552 req->rq_state &= ~RQ_NET_QUEUED;
547 /* if we did it right, tl_clear should be scheduled only after 553 /* if we did it right, tl_clear should be scheduled only after
548 * this, so this should not be necessary! */ 554 * this, so this should not be necessary! */
549 _req_may_be_done(req, m); 555 _req_may_be_done_not_susp(req, m);
550 break; 556 break;
551 557
552 case handed_over_to_network: 558 case handed_over_to_network:
@@ -571,7 +577,7 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what,
571 * "completed_ok" events came in, once we return from 577 * "completed_ok" events came in, once we return from
572 * _drbd_send_zc_bio (drbd_send_dblock), we have to check 578 * _drbd_send_zc_bio (drbd_send_dblock), we have to check
573 * whether it is done already, and end it. */ 579 * whether it is done already, and end it. */
574 _req_may_be_done(req, m); 580 _req_may_be_done_not_susp(req, m);
575 break; 581 break;
576 582
577 case read_retry_remote_canceled: 583 case read_retry_remote_canceled:
@@ -587,7 +593,7 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what,
587 /* if it is still queued, we may not complete it here. 593 /* if it is still queued, we may not complete it here.
588 * it will be canceled soon. */ 594 * it will be canceled soon. */
589 if (!(req->rq_state & RQ_NET_QUEUED)) 595 if (!(req->rq_state & RQ_NET_QUEUED))
590 _req_may_be_done(req, m); 596 _req_may_be_done(req, m); /* Allowed while state.susp */
591 break; 597 break;
592 598
593 case write_acked_by_peer_and_sis: 599 case write_acked_by_peer_and_sis:
@@ -622,7 +628,7 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what,
622 D_ASSERT(req->rq_state & RQ_NET_PENDING); 628 D_ASSERT(req->rq_state & RQ_NET_PENDING);
623 dec_ap_pending(mdev); 629 dec_ap_pending(mdev);
624 req->rq_state &= ~RQ_NET_PENDING; 630 req->rq_state &= ~RQ_NET_PENDING;
625 _req_may_be_done(req, m); 631 _req_may_be_done_not_susp(req, m);
626 break; 632 break;
627 633
628 case neg_acked: 634 case neg_acked:
@@ -632,7 +638,7 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what,
632 req->rq_state &= ~(RQ_NET_OK|RQ_NET_PENDING); 638 req->rq_state &= ~(RQ_NET_OK|RQ_NET_PENDING);
633 639
634 req->rq_state |= RQ_NET_DONE; 640 req->rq_state |= RQ_NET_DONE;
635 _req_may_be_done(req, m); 641 _req_may_be_done_not_susp(req, m);
636 /* else: done by handed_over_to_network */ 642 /* else: done by handed_over_to_network */
637 break; 643 break;
638 644
@@ -640,7 +646,7 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what,
640 if (!(req->rq_state & RQ_LOCAL_COMPLETED)) 646 if (!(req->rq_state & RQ_LOCAL_COMPLETED))
641 break; 647 break;
642 648
643 _req_may_be_done(req, m); 649 _req_may_be_done(req, m); /* Allowed while state.susp */
644 break; 650 break;
645 651
646 case restart_frozen_disk_io: 652 case restart_frozen_disk_io:
@@ -685,7 +691,7 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what,
685 } 691 }
686 D_ASSERT(req->rq_state & RQ_NET_SENT); 692 D_ASSERT(req->rq_state & RQ_NET_SENT);
687 req->rq_state |= RQ_NET_DONE; 693 req->rq_state |= RQ_NET_DONE;
688 _req_may_be_done(req, m); 694 _req_may_be_done(req, m); /* Allowed while state.susp */
689 break; 695 break;
690 696
691 case data_received: 697 case data_received:
@@ -693,7 +699,7 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what,
693 dec_ap_pending(mdev); 699 dec_ap_pending(mdev);
694 req->rq_state &= ~RQ_NET_PENDING; 700 req->rq_state &= ~RQ_NET_PENDING;
695 req->rq_state |= (RQ_NET_OK|RQ_NET_DONE); 701 req->rq_state |= (RQ_NET_OK|RQ_NET_DONE);
696 _req_may_be_done(req, m); 702 _req_may_be_done_not_susp(req, m);
697 break; 703 break;
698 }; 704 };
699 705