diff options
author | Philipp Reisner <philipp.reisner@linbit.com> | 2010-05-27 09:07:43 -0400 |
---|---|---|
committer | Philipp Reisner <philipp.reisner@linbit.com> | 2010-10-14 08:25:20 -0400 |
commit | 288f422ec13667de40b278535d2a5fb5c77352c4 (patch) | |
tree | fc8f594c05b05637a5052a41c603bbdad6f8641e | |
parent | 7e602c0aaf3e686c36cc742119f0f53f42e9befe (diff) |
drbd: Track all IO requests on the TL, not writes only
With that the drbd_fail_pending_reads() function becomes obsolete.
Signed-off-by: Philipp Reisner <philipp.reisner@linbit.com>
Signed-off-by: Lars Ellenberg <lars.ellenberg@linbit.com>
-rw-r--r-- | drivers/block/drbd/drbd_main.c | 2 | ||||
-rw-r--r-- | drivers/block/drbd/drbd_receiver.c | 37 | ||||
-rw-r--r-- | drivers/block/drbd/drbd_req.c | 24 | ||||
-rw-r--r-- | drivers/block/drbd/drbd_req.h | 7 |
4 files changed, 23 insertions, 47 deletions
diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c index a9bc6bc62400..a86e6f1ff7f4 100644 --- a/drivers/block/drbd/drbd_main.c +++ b/drivers/block/drbd/drbd_main.c | |||
@@ -401,6 +401,8 @@ void tl_clear(struct drbd_conf *mdev) | |||
401 | /* ensure bit indicating barrier is required is clear */ | 401 | /* ensure bit indicating barrier is required is clear */ |
402 | clear_bit(CREATE_BARRIER, &mdev->flags); | 402 | clear_bit(CREATE_BARRIER, &mdev->flags); |
403 | 403 | ||
404 | memset(mdev->app_reads_hash, 0, APP_R_HSIZE*sizeof(void *)); | ||
405 | |||
404 | spin_unlock_irq(&mdev->req_lock); | 406 | spin_unlock_irq(&mdev->req_lock); |
405 | } | 407 | } |
406 | 408 | ||
diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c index 081522d3c742..88a5e1f4ec1d 100644 --- a/drivers/block/drbd/drbd_receiver.c +++ b/drivers/block/drbd/drbd_receiver.c | |||
@@ -3666,41 +3666,6 @@ static void drbdd(struct drbd_conf *mdev) | |||
3666 | } | 3666 | } |
3667 | } | 3667 | } |
3668 | 3668 | ||
3669 | static void drbd_fail_pending_reads(struct drbd_conf *mdev) | ||
3670 | { | ||
3671 | struct hlist_head *slot; | ||
3672 | struct hlist_node *pos; | ||
3673 | struct hlist_node *tmp; | ||
3674 | struct drbd_request *req; | ||
3675 | int i; | ||
3676 | |||
3677 | /* | ||
3678 | * Application READ requests | ||
3679 | */ | ||
3680 | spin_lock_irq(&mdev->req_lock); | ||
3681 | for (i = 0; i < APP_R_HSIZE; i++) { | ||
3682 | slot = mdev->app_reads_hash+i; | ||
3683 | hlist_for_each_entry_safe(req, pos, tmp, slot, colision) { | ||
3684 | /* it may (but should not any longer!) | ||
3685 | * be on the work queue; if that assert triggers, | ||
3686 | * we need to also grab the | ||
3687 | * spin_lock_irq(&mdev->data.work.q_lock); | ||
3688 | * and list_del_init here. */ | ||
3689 | D_ASSERT(list_empty(&req->w.list)); | ||
3690 | /* It would be nice to complete outside of spinlock. | ||
3691 | * But this is easier for now. */ | ||
3692 | _req_mod(req, connection_lost_while_pending); | ||
3693 | } | ||
3694 | } | ||
3695 | for (i = 0; i < APP_R_HSIZE; i++) | ||
3696 | if (!hlist_empty(mdev->app_reads_hash+i)) | ||
3697 | dev_warn(DEV, "ASSERT FAILED: app_reads_hash[%d].first: " | ||
3698 | "%p, should be NULL\n", i, mdev->app_reads_hash[i].first); | ||
3699 | |||
3700 | memset(mdev->app_reads_hash, 0, APP_R_HSIZE*sizeof(void *)); | ||
3701 | spin_unlock_irq(&mdev->req_lock); | ||
3702 | } | ||
3703 | |||
3704 | void drbd_flush_workqueue(struct drbd_conf *mdev) | 3669 | void drbd_flush_workqueue(struct drbd_conf *mdev) |
3705 | { | 3670 | { |
3706 | struct drbd_wq_barrier barr; | 3671 | struct drbd_wq_barrier barr; |
@@ -3770,8 +3735,6 @@ static void drbd_disconnect(struct drbd_conf *mdev) | |||
3770 | if (!mdev->state.susp) | 3735 | if (!mdev->state.susp) |
3771 | tl_clear(mdev); | 3736 | tl_clear(mdev); |
3772 | 3737 | ||
3773 | drbd_fail_pending_reads(mdev); | ||
3774 | |||
3775 | dev_info(DEV, "Connection closed\n"); | 3738 | dev_info(DEV, "Connection closed\n"); |
3776 | 3739 | ||
3777 | drbd_md_sync(mdev); | 3740 | drbd_md_sync(mdev); |
diff --git a/drivers/block/drbd/drbd_req.c b/drivers/block/drbd/drbd_req.c index 976d7941f71e..4a30e2cae56d 100644 --- a/drivers/block/drbd/drbd_req.c +++ b/drivers/block/drbd/drbd_req.c | |||
@@ -59,17 +59,19 @@ static void _drbd_end_io_acct(struct drbd_conf *mdev, struct drbd_request *req) | |||
59 | static void _req_is_done(struct drbd_conf *mdev, struct drbd_request *req, const int rw) | 59 | static void _req_is_done(struct drbd_conf *mdev, struct drbd_request *req, const int rw) |
60 | { | 60 | { |
61 | const unsigned long s = req->rq_state; | 61 | const unsigned long s = req->rq_state; |
62 | |||
63 | /* remove it from the transfer log. | ||
64 | * well, only if it had been there in the first | ||
65 | * place... if it had not (local only or conflicting | ||
66 | * and never sent), it should still be "empty" as | ||
67 | * initialized in drbd_req_new(), so we can list_del() it | ||
68 | * here unconditionally */ | ||
69 | list_del(&req->tl_requests); | ||
70 | |||
62 | /* if it was a write, we may have to set the corresponding | 71 | /* if it was a write, we may have to set the corresponding |
63 | * bit(s) out-of-sync first. If it had a local part, we need to | 72 | * bit(s) out-of-sync first. If it had a local part, we need to |
64 | * release the reference to the activity log. */ | 73 | * release the reference to the activity log. */ |
65 | if (rw == WRITE) { | 74 | if (rw == WRITE) { |
66 | /* remove it from the transfer log. | ||
67 | * well, only if it had been there in the first | ||
68 | * place... if it had not (local only or conflicting | ||
69 | * and never sent), it should still be "empty" as | ||
70 | * initialized in drbd_req_new(), so we can list_del() it | ||
71 | * here unconditionally */ | ||
72 | list_del(&req->tl_requests); | ||
73 | /* Set out-of-sync unless both OK flags are set | 75 | /* Set out-of-sync unless both OK flags are set |
74 | * (local only or remote failed). | 76 | * (local only or remote failed). |
75 | * Other places where we set out-of-sync: | 77 | * Other places where we set out-of-sync: |
@@ -517,8 +519,6 @@ void __req_mod(struct drbd_request *req, enum drbd_req_event what, | |||
517 | D_ASSERT(test_bit(CREATE_BARRIER, &mdev->flags) == 0); | 519 | D_ASSERT(test_bit(CREATE_BARRIER, &mdev->flags) == 0); |
518 | 520 | ||
519 | req->epoch = mdev->newest_tle->br_number; | 521 | req->epoch = mdev->newest_tle->br_number; |
520 | list_add_tail(&req->tl_requests, | ||
521 | &mdev->newest_tle->requests); | ||
522 | 522 | ||
523 | /* increment size of current epoch */ | 523 | /* increment size of current epoch */ |
524 | mdev->newest_tle->n_writes++; | 524 | mdev->newest_tle->n_writes++; |
@@ -634,6 +634,9 @@ void __req_mod(struct drbd_request *req, enum drbd_req_event what, | |||
634 | break; | 634 | break; |
635 | 635 | ||
636 | case barrier_acked: | 636 | case barrier_acked: |
637 | if (!(req->rq_state & RQ_WRITE)) | ||
638 | break; | ||
639 | |||
637 | if (req->rq_state & RQ_NET_PENDING) { | 640 | if (req->rq_state & RQ_NET_PENDING) { |
638 | /* barrier came in before all requests have been acked. | 641 | /* barrier came in before all requests have been acked. |
639 | * this is bad, because if the connection is lost now, | 642 | * this is bad, because if the connection is lost now, |
@@ -892,6 +895,9 @@ allocate_barrier: | |||
892 | remote = 0; | 895 | remote = 0; |
893 | } | 896 | } |
894 | 897 | ||
898 | |||
899 | list_add_tail(&req->tl_requests, &mdev->newest_tle->requests); | ||
900 | |||
895 | /* NOTE remote first: to get the concurrent write detection right, | 901 | /* NOTE remote first: to get the concurrent write detection right, |
896 | * we must register the request before start of local IO. */ | 902 | * we must register the request before start of local IO. */ |
897 | if (remote) { | 903 | if (remote) { |
diff --git a/drivers/block/drbd/drbd_req.h b/drivers/block/drbd/drbd_req.h index 02d575d24518..47b931fe0366 100644 --- a/drivers/block/drbd/drbd_req.h +++ b/drivers/block/drbd/drbd_req.h | |||
@@ -183,6 +183,9 @@ enum drbd_req_state_bits { | |||
183 | 183 | ||
184 | /* keep this last, its for the RQ_NET_MASK */ | 184 | /* keep this last, its for the RQ_NET_MASK */ |
185 | __RQ_NET_MAX, | 185 | __RQ_NET_MAX, |
186 | |||
187 | /* Set when this is a write, clear for a read */ | ||
188 | __RQ_WRITE, | ||
186 | }; | 189 | }; |
187 | 190 | ||
188 | #define RQ_LOCAL_PENDING (1UL << __RQ_LOCAL_PENDING) | 191 | #define RQ_LOCAL_PENDING (1UL << __RQ_LOCAL_PENDING) |
@@ -201,6 +204,8 @@ enum drbd_req_state_bits { | |||
201 | /* 0x1f8 */ | 204 | /* 0x1f8 */ |
202 | #define RQ_NET_MASK (((1UL << __RQ_NET_MAX)-1) & ~RQ_LOCAL_MASK) | 205 | #define RQ_NET_MASK (((1UL << __RQ_NET_MAX)-1) & ~RQ_LOCAL_MASK) |
203 | 206 | ||
207 | #define RQ_WRITE (1UL << __RQ_WRITE) | ||
208 | |||
204 | /* epoch entries */ | 209 | /* epoch entries */ |
205 | static inline | 210 | static inline |
206 | struct hlist_head *ee_hash_slot(struct drbd_conf *mdev, sector_t sector) | 211 | struct hlist_head *ee_hash_slot(struct drbd_conf *mdev, sector_t sector) |
@@ -253,7 +258,7 @@ static inline struct drbd_request *drbd_req_new(struct drbd_conf *mdev, | |||
253 | if (likely(req)) { | 258 | if (likely(req)) { |
254 | bio = bio_clone(bio_src, GFP_NOIO); /* XXX cannot fail?? */ | 259 | bio = bio_clone(bio_src, GFP_NOIO); /* XXX cannot fail?? */ |
255 | 260 | ||
256 | req->rq_state = 0; | 261 | req->rq_state = bio_data_dir(bio_src) == WRITE ? RQ_WRITE : 0; |
257 | req->mdev = mdev; | 262 | req->mdev = mdev; |
258 | req->master_bio = bio_src; | 263 | req->master_bio = bio_src; |
259 | req->private_bio = bio; | 264 | req->private_bio = bio; |