diff options
author | Vivek Goyal <vgoyal@redhat.com> | 2019-10-15 13:46:25 -0400 |
---|---|---|
committer | Miklos Szeredi <mszeredi@redhat.com> | 2019-10-21 09:57:07 -0400 |
commit | c17ea009610366146ec409fd6dc277e0f2510b10 (patch) | |
tree | cedb3dac2277ac7d19af9646cb93b0cf95aa0e7e | |
parent | 5dbe190f341206a7896f7e40c1e3a36933d812f3 (diff) |
virtiofs: Count pending forgets as in_flight forgets
If virtqueue is full, we put forget requests on a list and these forgets
are dispatched later using a worker. As of now we don't count these forgets
in fsvq->in_flight variable. This means when queue is being drained, we
have to have special logic to first drain these pending requests and then
wait for fsvq->in_flight to go to zero.
By counting pending forgets in fsvq->in_flight, we can get rid of special
logic and just wait for in_flight to go to zero. Worker thread will kick
and drain all the forgets anyway, leading in_flight to zero.
I also need similar logic for normal request queue in next patch where I am
about to defer request submission in the worker context if queue is full.
This simplifies the code a bit.
Also add two helper functions to inc/dec in_flight. Decrement in_flight
helper will later used to call completion when in_flight reaches zero.
Signed-off-by: Vivek Goyal <vgoyal@redhat.com>
Signed-off-by: Miklos Szeredi <mszeredi@redhat.com>
-rw-r--r-- | fs/fuse/virtio_fs.c | 44 |
1 files changed, 20 insertions, 24 deletions
diff --git a/fs/fuse/virtio_fs.c b/fs/fuse/virtio_fs.c index 6d153e70c87b..3ea613d5e34f 100644 --- a/fs/fuse/virtio_fs.c +++ b/fs/fuse/virtio_fs.c | |||
@@ -67,6 +67,19 @@ static inline struct fuse_pqueue *vq_to_fpq(struct virtqueue *vq) | |||
67 | return &vq_to_fsvq(vq)->fud->pq; | 67 | return &vq_to_fsvq(vq)->fud->pq; |
68 | } | 68 | } |
69 | 69 | ||
70 | /* Should be called with fsvq->lock held. */ | ||
71 | static inline void inc_in_flight_req(struct virtio_fs_vq *fsvq) | ||
72 | { | ||
73 | fsvq->in_flight++; | ||
74 | } | ||
75 | |||
76 | /* Should be called with fsvq->lock held. */ | ||
77 | static inline void dec_in_flight_req(struct virtio_fs_vq *fsvq) | ||
78 | { | ||
79 | WARN_ON(fsvq->in_flight <= 0); | ||
80 | fsvq->in_flight--; | ||
81 | } | ||
82 | |||
70 | static void release_virtio_fs_obj(struct kref *ref) | 83 | static void release_virtio_fs_obj(struct kref *ref) |
71 | { | 84 | { |
72 | struct virtio_fs *vfs = container_of(ref, struct virtio_fs, refcount); | 85 | struct virtio_fs *vfs = container_of(ref, struct virtio_fs, refcount); |
@@ -110,22 +123,6 @@ static void virtio_fs_drain_queue(struct virtio_fs_vq *fsvq) | |||
110 | flush_delayed_work(&fsvq->dispatch_work); | 123 | flush_delayed_work(&fsvq->dispatch_work); |
111 | } | 124 | } |
112 | 125 | ||
113 | static inline void drain_hiprio_queued_reqs(struct virtio_fs_vq *fsvq) | ||
114 | { | ||
115 | struct virtio_fs_forget *forget; | ||
116 | |||
117 | spin_lock(&fsvq->lock); | ||
118 | while (1) { | ||
119 | forget = list_first_entry_or_null(&fsvq->queued_reqs, | ||
120 | struct virtio_fs_forget, list); | ||
121 | if (!forget) | ||
122 | break; | ||
123 | list_del(&forget->list); | ||
124 | kfree(forget); | ||
125 | } | ||
126 | spin_unlock(&fsvq->lock); | ||
127 | } | ||
128 | |||
129 | static void virtio_fs_drain_all_queues(struct virtio_fs *fs) | 126 | static void virtio_fs_drain_all_queues(struct virtio_fs *fs) |
130 | { | 127 | { |
131 | struct virtio_fs_vq *fsvq; | 128 | struct virtio_fs_vq *fsvq; |
@@ -133,9 +130,6 @@ static void virtio_fs_drain_all_queues(struct virtio_fs *fs) | |||
133 | 130 | ||
134 | for (i = 0; i < fs->nvqs; i++) { | 131 | for (i = 0; i < fs->nvqs; i++) { |
135 | fsvq = &fs->vqs[i]; | 132 | fsvq = &fs->vqs[i]; |
136 | if (i == VQ_HIPRIO) | ||
137 | drain_hiprio_queued_reqs(fsvq); | ||
138 | |||
139 | virtio_fs_drain_queue(fsvq); | 133 | virtio_fs_drain_queue(fsvq); |
140 | } | 134 | } |
141 | } | 135 | } |
@@ -254,7 +248,7 @@ static void virtio_fs_hiprio_done_work(struct work_struct *work) | |||
254 | 248 | ||
255 | while ((req = virtqueue_get_buf(vq, &len)) != NULL) { | 249 | while ((req = virtqueue_get_buf(vq, &len)) != NULL) { |
256 | kfree(req); | 250 | kfree(req); |
257 | fsvq->in_flight--; | 251 | dec_in_flight_req(fsvq); |
258 | } | 252 | } |
259 | } while (!virtqueue_enable_cb(vq) && likely(!virtqueue_is_broken(vq))); | 253 | } while (!virtqueue_enable_cb(vq) && likely(!virtqueue_is_broken(vq))); |
260 | spin_unlock(&fsvq->lock); | 254 | spin_unlock(&fsvq->lock); |
@@ -306,6 +300,7 @@ static void virtio_fs_hiprio_dispatch_work(struct work_struct *work) | |||
306 | 300 | ||
307 | list_del(&forget->list); | 301 | list_del(&forget->list); |
308 | if (!fsvq->connected) { | 302 | if (!fsvq->connected) { |
303 | dec_in_flight_req(fsvq); | ||
309 | spin_unlock(&fsvq->lock); | 304 | spin_unlock(&fsvq->lock); |
310 | kfree(forget); | 305 | kfree(forget); |
311 | continue; | 306 | continue; |
@@ -327,13 +322,13 @@ static void virtio_fs_hiprio_dispatch_work(struct work_struct *work) | |||
327 | } else { | 322 | } else { |
328 | pr_debug("virtio-fs: Could not queue FORGET: err=%d. Dropping it.\n", | 323 | pr_debug("virtio-fs: Could not queue FORGET: err=%d. Dropping it.\n", |
329 | ret); | 324 | ret); |
325 | dec_in_flight_req(fsvq); | ||
330 | kfree(forget); | 326 | kfree(forget); |
331 | } | 327 | } |
332 | spin_unlock(&fsvq->lock); | 328 | spin_unlock(&fsvq->lock); |
333 | return; | 329 | return; |
334 | } | 330 | } |
335 | 331 | ||
336 | fsvq->in_flight++; | ||
337 | notify = virtqueue_kick_prepare(vq); | 332 | notify = virtqueue_kick_prepare(vq); |
338 | spin_unlock(&fsvq->lock); | 333 | spin_unlock(&fsvq->lock); |
339 | 334 | ||
@@ -472,7 +467,7 @@ static void virtio_fs_requests_done_work(struct work_struct *work) | |||
472 | 467 | ||
473 | fuse_request_end(fc, req); | 468 | fuse_request_end(fc, req); |
474 | spin_lock(&fsvq->lock); | 469 | spin_lock(&fsvq->lock); |
475 | fsvq->in_flight--; | 470 | dec_in_flight_req(fsvq); |
476 | spin_unlock(&fsvq->lock); | 471 | spin_unlock(&fsvq->lock); |
477 | } | 472 | } |
478 | } | 473 | } |
@@ -730,6 +725,7 @@ __releases(fiq->lock) | |||
730 | list_add_tail(&forget->list, &fsvq->queued_reqs); | 725 | list_add_tail(&forget->list, &fsvq->queued_reqs); |
731 | schedule_delayed_work(&fsvq->dispatch_work, | 726 | schedule_delayed_work(&fsvq->dispatch_work, |
732 | msecs_to_jiffies(1)); | 727 | msecs_to_jiffies(1)); |
728 | inc_in_flight_req(fsvq); | ||
733 | } else { | 729 | } else { |
734 | pr_debug("virtio-fs: Could not queue FORGET: err=%d. Dropping it.\n", | 730 | pr_debug("virtio-fs: Could not queue FORGET: err=%d. Dropping it.\n", |
735 | ret); | 731 | ret); |
@@ -739,7 +735,7 @@ __releases(fiq->lock) | |||
739 | goto out; | 735 | goto out; |
740 | } | 736 | } |
741 | 737 | ||
742 | fsvq->in_flight++; | 738 | inc_in_flight_req(fsvq); |
743 | notify = virtqueue_kick_prepare(vq); | 739 | notify = virtqueue_kick_prepare(vq); |
744 | 740 | ||
745 | spin_unlock(&fsvq->lock); | 741 | spin_unlock(&fsvq->lock); |
@@ -921,7 +917,7 @@ static int virtio_fs_enqueue_req(struct virtio_fs_vq *fsvq, | |||
921 | /* matches barrier in request_wait_answer() */ | 917 | /* matches barrier in request_wait_answer() */ |
922 | smp_mb__after_atomic(); | 918 | smp_mb__after_atomic(); |
923 | 919 | ||
924 | fsvq->in_flight++; | 920 | inc_in_flight_req(fsvq); |
925 | notify = virtqueue_kick_prepare(vq); | 921 | notify = virtqueue_kick_prepare(vq); |
926 | 922 | ||
927 | spin_unlock(&fsvq->lock); | 923 | spin_unlock(&fsvq->lock); |