aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/block/drbd/drbd_req.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/block/drbd/drbd_req.c')
-rw-r--r--drivers/block/drbd/drbd_req.c57
1 files changed, 56 insertions, 1 deletions
diff --git a/drivers/block/drbd/drbd_req.c b/drivers/block/drbd/drbd_req.c
index ceb04a94aace..98251e2a7fb7 100644
--- a/drivers/block/drbd/drbd_req.c
+++ b/drivers/block/drbd/drbd_req.c
@@ -563,6 +563,11 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what,
563 if (req->rq_state & RQ_NET_SENT && req->rq_state & RQ_WRITE) 563 if (req->rq_state & RQ_NET_SENT && req->rq_state & RQ_WRITE)
564 atomic_sub(req->i.size >> 9, &mdev->ap_in_flight); 564 atomic_sub(req->i.size >> 9, &mdev->ap_in_flight);
565 565
566 if (!(req->rq_state & RQ_WRITE) &&
567 mdev->state.disk == D_UP_TO_DATE &&
568 !IS_ERR_OR_NULL(req->private_bio))
569 goto goto_read_retry_local;
570
566 /* if it is still queued, we may not complete it here. 571 /* if it is still queued, we may not complete it here.
567 * it will be canceled soon. */ 572 * it will be canceled soon. */
568 if (!(req->rq_state & RQ_NET_QUEUED)) 573 if (!(req->rq_state & RQ_NET_QUEUED))
@@ -625,10 +630,22 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what,
625 req->rq_state &= ~(RQ_NET_OK|RQ_NET_PENDING); 630 req->rq_state &= ~(RQ_NET_OK|RQ_NET_PENDING);
626 631
627 req->rq_state |= RQ_NET_DONE; 632 req->rq_state |= RQ_NET_DONE;
633
634 if (!(req->rq_state & RQ_WRITE) &&
635 mdev->state.disk == D_UP_TO_DATE &&
636 !IS_ERR_OR_NULL(req->private_bio))
637 goto goto_read_retry_local;
638
628 _req_may_be_done_not_susp(req, m); 639 _req_may_be_done_not_susp(req, m);
629 /* else: done by HANDED_OVER_TO_NETWORK */ 640 /* else: done by HANDED_OVER_TO_NETWORK */
630 break; 641 break;
631 642
643 goto_read_retry_local:
644 req->rq_state |= RQ_LOCAL_PENDING;
645 req->private_bio->bi_bdev = mdev->ldev->backing_bdev;
646 generic_make_request(req->private_bio);
647 break;
648
632 case FAIL_FROZEN_DISK_IO: 649 case FAIL_FROZEN_DISK_IO:
633 if (!(req->rq_state & RQ_LOCAL_COMPLETED)) 650 if (!(req->rq_state & RQ_LOCAL_COMPLETED))
634 break; 651 break;
@@ -689,6 +706,11 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what,
689 dec_ap_pending(mdev); 706 dec_ap_pending(mdev);
690 req->rq_state &= ~RQ_NET_PENDING; 707 req->rq_state &= ~RQ_NET_PENDING;
691 req->rq_state |= (RQ_NET_OK|RQ_NET_DONE); 708 req->rq_state |= (RQ_NET_OK|RQ_NET_DONE);
709 if (!IS_ERR_OR_NULL(req->private_bio)) {
710 bio_put(req->private_bio);
711 req->private_bio = NULL;
712 put_ldev(mdev);
713 }
692 _req_may_be_done_not_susp(req, m); 714 _req_may_be_done_not_susp(req, m);
693 break; 715 break;
694 }; 716 };
@@ -723,6 +745,35 @@ static bool drbd_may_do_local_read(struct drbd_conf *mdev, sector_t sector, int
723 return drbd_bm_count_bits(mdev, sbnr, ebnr) == 0; 745 return drbd_bm_count_bits(mdev, sbnr, ebnr) == 0;
724} 746}
725 747
748static bool remote_due_to_read_balancing(struct drbd_conf *mdev)
749{
750 enum drbd_read_balancing rbm;
751 struct backing_dev_info *bdi;
752
753 if (mdev->state.pdsk < D_UP_TO_DATE)
754 return false;
755
756 rcu_read_lock();
757 rbm = rcu_dereference(mdev->ldev->disk_conf)->read_balancing;
758 rcu_read_unlock();
759
760 switch (rbm) {
761 case RB_CONGESTED_REMOTE:
762 bdi = &mdev->ldev->backing_bdev->bd_disk->queue->backing_dev_info;
763 return bdi_read_congested(bdi);
764 case RB_LEAST_PENDING:
765 return atomic_read(&mdev->local_cnt) >
766 atomic_read(&mdev->ap_pending_cnt) + atomic_read(&mdev->rs_pending_cnt);
767 case RB_ROUND_ROBIN:
768 return test_and_change_bit(READ_BALANCE_RR, &mdev->flags);
769 case RB_PREFER_REMOTE:
770 return true;
771 case RB_PREFER_LOCAL:
772 default:
773 return false;
774 }
775}
776
726/* 777/*
727 * complete_conflicting_writes - wait for any conflicting write requests 778 * complete_conflicting_writes - wait for any conflicting write requests
728 * 779 *
@@ -790,6 +841,10 @@ int __drbd_make_request(struct drbd_conf *mdev, struct bio *bio, unsigned long s
790 bio_put(req->private_bio); 841 bio_put(req->private_bio);
791 req->private_bio = NULL; 842 req->private_bio = NULL;
792 put_ldev(mdev); 843 put_ldev(mdev);
844 } else if (remote_due_to_read_balancing(mdev)) {
845 /* Keep the private bio in case we need it
846 for a local retry */
847 local = 0;
793 } 848 }
794 } 849 }
795 remote = !local && mdev->state.pdsk >= D_UP_TO_DATE; 850 remote = !local && mdev->state.pdsk >= D_UP_TO_DATE;
@@ -1017,7 +1072,7 @@ fail_free_complete:
1017 if (req->rq_state & RQ_IN_ACT_LOG) 1072 if (req->rq_state & RQ_IN_ACT_LOG)
1018 drbd_al_complete_io(mdev, &req->i); 1073 drbd_al_complete_io(mdev, &req->i);
1019fail_and_free_req: 1074fail_and_free_req:
1020 if (local) { 1075 if (!IS_ERR_OR_NULL(req->private_bio)) {
1021 bio_put(req->private_bio); 1076 bio_put(req->private_bio);
1022 req->private_bio = NULL; 1077 req->private_bio = NULL;
1023 put_ldev(mdev); 1078 put_ldev(mdev);