aboutsummaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
authorVenkateswararao Jujjuri (JV) <jvrao@linux.vnet.ibm.com>2011-03-18 18:49:48 -0400
committerEric Van Hensbergen <ericvh@gmail.com>2011-03-22 17:32:50 -0400
commit68da9ba4eeadae86ad42e52b80822fbd56971267 (patch)
treec832a4b1ff3e1bf97601183d11dfe7149a0da41c /net
parentaaf0ef1d2bce05cfd06cf29c96a6973df4d0a6a8 (diff)
[net/9p]: Introduce basic flow-control for VirtIO transport.
Recent zerocopy work in the 9P VirtIO transport maps and pins user buffers into kernel memory for the server to work on them. Since the user process can initiate this kind of pinning with a simple read/write call, thousands of IO threads initiated by the user process can hog the system resources and could result into denial of service. This patch introduces flow control to avoid that extreme scenario. The ceiling limit to avoid denial of service attacks is set to relatively high (nr_free_pagecache_pages()/4) so that it won't interfere with regular usage, but can step in extreme cases to limit the total system hang. Since we don't have a global structure to accommodate this variable, I choose the virtio_chan as the home for this. Signed-off-by: Venkateswararao Jujjuri <jvrao@linux.vnet.ibm.com> Reviewed-by: Badari Pulavarty <pbadari@us.ibm.com> Signed-off-by: Eric Van Hensbergen <ericvh@gmail.com>
Diffstat (limited to 'net')
-rw-r--r--net/9p/trans_virtio.c23
1 files changed, 22 insertions, 1 deletions
diff --git a/net/9p/trans_virtio.c b/net/9p/trans_virtio.c
index c6e1ae2fb926..e8f046b07182 100644
--- a/net/9p/trans_virtio.c
+++ b/net/9p/trans_virtio.c
@@ -43,6 +43,7 @@
43#include <net/9p/client.h> 43#include <net/9p/client.h>
44#include <net/9p/transport.h> 44#include <net/9p/transport.h>
45#include <linux/scatterlist.h> 45#include <linux/scatterlist.h>
46#include <linux/swap.h>
46#include <linux/virtio.h> 47#include <linux/virtio.h>
47#include <linux/virtio_9p.h> 48#include <linux/virtio_9p.h>
48#include "trans_common.h" 49#include "trans_common.h"
@@ -51,6 +52,8 @@
51 52
52/* a single mutex to manage channel initialization and attachment */ 53/* a single mutex to manage channel initialization and attachment */
53static DEFINE_MUTEX(virtio_9p_lock); 54static DEFINE_MUTEX(virtio_9p_lock);
55static DECLARE_WAIT_QUEUE_HEAD(vp_wq);
56static atomic_t vp_pinned = ATOMIC_INIT(0);
54 57
55/** 58/**
56 * struct virtio_chan - per-instance transport information 59 * struct virtio_chan - per-instance transport information
@@ -78,7 +81,10 @@ struct virtio_chan {
78 struct virtqueue *vq; 81 struct virtqueue *vq;
79 int ring_bufs_avail; 82 int ring_bufs_avail;
80 wait_queue_head_t *vc_wq; 83 wait_queue_head_t *vc_wq;
81 84 /* This is global limit. Since we don't have a global structure,
85 * will be placing it in each channel.
86 */
87 int p9_max_pages;
82 /* Scatterlist: can be too big for stack. */ 88 /* Scatterlist: can be too big for stack. */
83 struct scatterlist sg[VIRTQUEUE_NUM]; 89 struct scatterlist sg[VIRTQUEUE_NUM];
84 90
@@ -159,8 +165,11 @@ static void req_done(struct virtqueue *vq)
159 req = p9_tag_lookup(chan->client, rc->tag); 165 req = p9_tag_lookup(chan->client, rc->tag);
160 if (req->tc->private) { 166 if (req->tc->private) {
161 struct trans_rpage_info *rp = req->tc->private; 167 struct trans_rpage_info *rp = req->tc->private;
168 int p = rp->rp_nr_pages;
162 /*Release pages */ 169 /*Release pages */
163 p9_release_req_pages(rp); 170 p9_release_req_pages(rp);
171 atomic_sub(p, &vp_pinned);
172 wake_up(&vp_wq);
164 if (rp->rp_alloc) 173 if (rp->rp_alloc)
165 kfree(rp); 174 kfree(rp);
166 req->tc->private = NULL; 175 req->tc->private = NULL;
@@ -269,6 +278,14 @@ p9_virtio_request(struct p9_client *client, struct p9_req_t *req)
269 int rpinfo_size = sizeof(struct trans_rpage_info) + 278 int rpinfo_size = sizeof(struct trans_rpage_info) +
270 sizeof(struct page *) * nr_pages; 279 sizeof(struct page *) * nr_pages;
271 280
281 if (atomic_read(&vp_pinned) >= chan->p9_max_pages) {
282 err = wait_event_interruptible(vp_wq,
283 atomic_read(&vp_pinned) < chan->p9_max_pages);
284 if (err == -ERESTARTSYS)
285 return err;
286 P9_DPRINTK(P9_DEBUG_TRANS, "9p: May gup pages now.\n");
287 }
288
272 if (rpinfo_size <= (req->tc->capacity - req->tc->size)) { 289 if (rpinfo_size <= (req->tc->capacity - req->tc->size)) {
273 /* We can use sdata */ 290 /* We can use sdata */
274 req->tc->private = req->tc->sdata + req->tc->size; 291 req->tc->private = req->tc->sdata + req->tc->size;
@@ -291,6 +308,8 @@ p9_virtio_request(struct p9_client *client, struct p9_req_t *req)
291 if (rpinfo->rp_alloc) 308 if (rpinfo->rp_alloc)
292 kfree(rpinfo); 309 kfree(rpinfo);
293 return err; 310 return err;
311 } else {
312 atomic_add(rpinfo->rp_nr_pages, &vp_pinned);
294 } 313 }
295 } 314 }
296 315
@@ -452,6 +471,8 @@ static int p9_virtio_probe(struct virtio_device *vdev)
452 } 471 }
453 init_waitqueue_head(chan->vc_wq); 472 init_waitqueue_head(chan->vc_wq);
454 chan->ring_bufs_avail = 1; 473 chan->ring_bufs_avail = 1;
474 /* Ceiling limit to avoid denial of service attacks */
475 chan->p9_max_pages = nr_free_buffer_pages()/4;
455 476
456 mutex_lock(&virtio_9p_lock); 477 mutex_lock(&virtio_9p_lock);
457 list_add_tail(&chan->chan_list, &virtio_chan_list); 478 list_add_tail(&chan->chan_list, &virtio_chan_list);