aboutsummaryrefslogtreecommitdiffstats
path: root/net/9p/trans_virtio.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2011-03-22 19:26:10 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2011-03-22 19:26:10 -0400
commitab70a1d7c7fc6665d83f41b5ce790ff8376e0b81 (patch)
treedee8acafa2dfb87cde4ddba68eff5e1c75557469 /net/9p/trans_virtio.c
parent0adfc56ce8fdc5c17630434e49f30536ba7b8559 (diff)
parent68da9ba4eeadae86ad42e52b80822fbd56971267 (diff)
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/ericvh/v9fs
* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/ericvh/v9fs: [net/9p]: Introduce basic flow-control for VirtIO transport. 9p: use the updated offset given by generic_write_checks [net/9p] Don't re-pin pages on retrying virtqueue_add_buf(). [net/9p] Set the condition just before waking up. [net/9p] unconditional wake_up to proc waiting for space on VirtIO ring fs/9p: Add v9fs_dentry2v9ses fs/9p: Attach writeback_fid on first open with WR flag fs/9p: Open writeback fid in O_SYNC mode fs/9p: Use truncate_setsize instead of vmtruncate net/9p: Fix compile warning net/9p: Convert the in the 9p rpc call path to GFP_NOFS fs/9p: Fix race in initializing writeback fid
Diffstat (limited to 'net/9p/trans_virtio.c')
-rw-r--r--net/9p/trans_virtio.c72
1 files changed, 46 insertions, 26 deletions
diff --git a/net/9p/trans_virtio.c b/net/9p/trans_virtio.c
index 9b550ed9c711..e8f046b07182 100644
--- a/net/9p/trans_virtio.c
+++ b/net/9p/trans_virtio.c
@@ -43,6 +43,7 @@
43#include <net/9p/client.h> 43#include <net/9p/client.h>
44#include <net/9p/transport.h> 44#include <net/9p/transport.h>
45#include <linux/scatterlist.h> 45#include <linux/scatterlist.h>
46#include <linux/swap.h>
46#include <linux/virtio.h> 47#include <linux/virtio.h>
47#include <linux/virtio_9p.h> 48#include <linux/virtio_9p.h>
48#include "trans_common.h" 49#include "trans_common.h"
@@ -51,6 +52,8 @@
51 52
52/* a single mutex to manage channel initialization and attachment */ 53/* a single mutex to manage channel initialization and attachment */
53static DEFINE_MUTEX(virtio_9p_lock); 54static DEFINE_MUTEX(virtio_9p_lock);
55static DECLARE_WAIT_QUEUE_HEAD(vp_wq);
56static atomic_t vp_pinned = ATOMIC_INIT(0);
54 57
55/** 58/**
56 * struct virtio_chan - per-instance transport information 59 * struct virtio_chan - per-instance transport information
@@ -78,7 +81,10 @@ struct virtio_chan {
78 struct virtqueue *vq; 81 struct virtqueue *vq;
79 int ring_bufs_avail; 82 int ring_bufs_avail;
80 wait_queue_head_t *vc_wq; 83 wait_queue_head_t *vc_wq;
81 84 /* This is global limit. Since we don't have a global structure,
85 * will be placing it in each channel.
86 */
87 int p9_max_pages;
82 /* Scatterlist: can be too big for stack. */ 88 /* Scatterlist: can be too big for stack. */
83 struct scatterlist sg[VIRTQUEUE_NUM]; 89 struct scatterlist sg[VIRTQUEUE_NUM];
84 90
@@ -141,34 +147,36 @@ static void req_done(struct virtqueue *vq)
141 147
142 P9_DPRINTK(P9_DEBUG_TRANS, ": request done\n"); 148 P9_DPRINTK(P9_DEBUG_TRANS, ": request done\n");
143 149
144 do { 150 while (1) {
145 spin_lock_irqsave(&chan->lock, flags); 151 spin_lock_irqsave(&chan->lock, flags);
146 rc = virtqueue_get_buf(chan->vq, &len); 152 rc = virtqueue_get_buf(chan->vq, &len);
147 153
148 if (rc != NULL) { 154 if (rc == NULL) {
149 if (!chan->ring_bufs_avail) {
150 chan->ring_bufs_avail = 1;
151 wake_up(chan->vc_wq);
152 }
153 spin_unlock_irqrestore(&chan->lock, flags);
154 P9_DPRINTK(P9_DEBUG_TRANS, ": rc %p\n", rc);
155 P9_DPRINTK(P9_DEBUG_TRANS, ": lookup tag %d\n",
156 rc->tag);
157 req = p9_tag_lookup(chan->client, rc->tag);
158 req->status = REQ_STATUS_RCVD;
159 if (req->tc->private) {
160 struct trans_rpage_info *rp = req->tc->private;
161 /*Release pages */
162 p9_release_req_pages(rp);
163 if (rp->rp_alloc)
164 kfree(rp);
165 req->tc->private = NULL;
166 }
167 p9_client_cb(chan->client, req);
168 } else {
169 spin_unlock_irqrestore(&chan->lock, flags); 155 spin_unlock_irqrestore(&chan->lock, flags);
156 break;
157 }
158
159 chan->ring_bufs_avail = 1;
160 spin_unlock_irqrestore(&chan->lock, flags);
161 /* Wakeup if anyone waiting for VirtIO ring space. */
162 wake_up(chan->vc_wq);
163 P9_DPRINTK(P9_DEBUG_TRANS, ": rc %p\n", rc);
164 P9_DPRINTK(P9_DEBUG_TRANS, ": lookup tag %d\n", rc->tag);
165 req = p9_tag_lookup(chan->client, rc->tag);
166 if (req->tc->private) {
167 struct trans_rpage_info *rp = req->tc->private;
168 int p = rp->rp_nr_pages;
169 /*Release pages */
170 p9_release_req_pages(rp);
171 atomic_sub(p, &vp_pinned);
172 wake_up(&vp_wq);
173 if (rp->rp_alloc)
174 kfree(rp);
175 req->tc->private = NULL;
170 } 176 }
171 } while (rc != NULL); 177 req->status = REQ_STATUS_RCVD;
178 p9_client_cb(chan->client, req);
179 }
172} 180}
173 181
174/** 182/**
@@ -263,7 +271,6 @@ p9_virtio_request(struct p9_client *client, struct p9_req_t *req)
263 271
264 P9_DPRINTK(P9_DEBUG_TRANS, "9p debug: virtio request\n"); 272 P9_DPRINTK(P9_DEBUG_TRANS, "9p debug: virtio request\n");
265 273
266req_retry:
267 req->status = REQ_STATUS_SENT; 274 req->status = REQ_STATUS_SENT;
268 275
269 if (req->tc->pbuf_size && (req->tc->pubuf && P9_IS_USER_CONTEXT)) { 276 if (req->tc->pbuf_size && (req->tc->pubuf && P9_IS_USER_CONTEXT)) {
@@ -271,6 +278,14 @@ req_retry:
271 int rpinfo_size = sizeof(struct trans_rpage_info) + 278 int rpinfo_size = sizeof(struct trans_rpage_info) +
272 sizeof(struct page *) * nr_pages; 279 sizeof(struct page *) * nr_pages;
273 280
281 if (atomic_read(&vp_pinned) >= chan->p9_max_pages) {
282 err = wait_event_interruptible(vp_wq,
283 atomic_read(&vp_pinned) < chan->p9_max_pages);
284 if (err == -ERESTARTSYS)
285 return err;
286 P9_DPRINTK(P9_DEBUG_TRANS, "9p: May gup pages now.\n");
287 }
288
274 if (rpinfo_size <= (req->tc->capacity - req->tc->size)) { 289 if (rpinfo_size <= (req->tc->capacity - req->tc->size)) {
275 /* We can use sdata */ 290 /* We can use sdata */
276 req->tc->private = req->tc->sdata + req->tc->size; 291 req->tc->private = req->tc->sdata + req->tc->size;
@@ -293,9 +308,12 @@ req_retry:
293 if (rpinfo->rp_alloc) 308 if (rpinfo->rp_alloc)
294 kfree(rpinfo); 309 kfree(rpinfo);
295 return err; 310 return err;
311 } else {
312 atomic_add(rpinfo->rp_nr_pages, &vp_pinned);
296 } 313 }
297 } 314 }
298 315
316req_retry_pinned:
299 spin_lock_irqsave(&chan->lock, flags); 317 spin_lock_irqsave(&chan->lock, flags);
300 318
301 /* Handle out VirtIO ring buffers */ 319 /* Handle out VirtIO ring buffers */
@@ -356,7 +374,7 @@ req_retry:
356 return err; 374 return err;
357 375
358 P9_DPRINTK(P9_DEBUG_TRANS, "9p:Retry virtio request\n"); 376 P9_DPRINTK(P9_DEBUG_TRANS, "9p:Retry virtio request\n");
359 goto req_retry; 377 goto req_retry_pinned;
360 } else { 378 } else {
361 spin_unlock_irqrestore(&chan->lock, flags); 379 spin_unlock_irqrestore(&chan->lock, flags);
362 P9_DPRINTK(P9_DEBUG_TRANS, 380 P9_DPRINTK(P9_DEBUG_TRANS,
@@ -453,6 +471,8 @@ static int p9_virtio_probe(struct virtio_device *vdev)
453 } 471 }
454 init_waitqueue_head(chan->vc_wq); 472 init_waitqueue_head(chan->vc_wq);
455 chan->ring_bufs_avail = 1; 473 chan->ring_bufs_avail = 1;
474 /* Ceiling limit to avoid denial of service attacks */
475 chan->p9_max_pages = nr_free_buffer_pages()/4;
456 476
457 mutex_lock(&virtio_9p_lock); 477 mutex_lock(&virtio_9p_lock);
458 list_add_tail(&chan->chan_list, &virtio_chan_list); 478 list_add_tail(&chan->chan_list, &virtio_chan_list);