aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorVenkateswararao Jujjuri (JV) <jvrao@linux.vnet.ibm.com>2011-01-28 18:22:36 -0500
committerEric Van Hensbergen <ericvh@gmail.com>2011-03-15 10:57:35 -0400
commit4038866dab4e461e0ef144458bad9d70ce0c98c1 (patch)
treeb1e90818632d5334a952d6b2d407523d36f22fcd
parent9bb6c10a4ed48aef49a7243a6f798694722cf380 (diff)
[net/9p] Add gup/zero_copy support to VirtIO transport layer.
Modify p9_virtio_request() and req_done() functions to support additional payload sent down to the transport layer through tc->pubuf and tc->pkbuf. Signed-off-by: Venkateswararao Jujjuri <jvrao@linux.vnet.ibm.com> Signed-off-by: Eric Van Hensbergen <ericvh@gmail.com>
-rw-r--r--net/9p/trans_common.h3
-rw-r--r--net/9p/trans_virtio.c128
2 files changed, 126 insertions, 5 deletions
diff --git a/net/9p/trans_common.h b/net/9p/trans_common.h
index 04977e0ad938..76309223bb02 100644
--- a/net/9p/trans_common.h
+++ b/net/9p/trans_common.h
@@ -12,6 +12,9 @@
12 * 12 *
13 */ 13 */
14 14
15/* TRUE if it is user context */
16#define P9_IS_USER_CONTEXT (!segment_eq(get_fs(), KERNEL_DS))
17
15/** 18/**
16 * struct trans_rpage_info - To store mapped page information in PDU. 19 * struct trans_rpage_info - To store mapped page information in PDU.
17 * @rp_alloc:Set if this structure is allocd, not a reuse unused space in pdu. 20 * @rp_alloc:Set if this structure is allocd, not a reuse unused space in pdu.
diff --git a/net/9p/trans_virtio.c b/net/9p/trans_virtio.c
index c8f3f72ab20e..4b236de132da 100644
--- a/net/9p/trans_virtio.c
+++ b/net/9p/trans_virtio.c
@@ -45,6 +45,7 @@
45#include <linux/scatterlist.h> 45#include <linux/scatterlist.h>
46#include <linux/virtio.h> 46#include <linux/virtio.h>
47#include <linux/virtio_9p.h> 47#include <linux/virtio_9p.h>
48#include "trans_common.h"
48 49
49#define VIRTQUEUE_NUM 128 50#define VIRTQUEUE_NUM 128
50 51
@@ -155,6 +156,14 @@ static void req_done(struct virtqueue *vq)
155 rc->tag); 156 rc->tag);
156 req = p9_tag_lookup(chan->client, rc->tag); 157 req = p9_tag_lookup(chan->client, rc->tag);
157 req->status = REQ_STATUS_RCVD; 158 req->status = REQ_STATUS_RCVD;
159 if (req->tc->private) {
160 struct trans_rpage_info *rp = req->tc->private;
161 /*Release pages */
162 p9_release_req_pages(rp);
163 if (rp->rp_alloc)
164 kfree(rp);
165 req->tc->private = NULL;
166 }
158 p9_client_cb(chan->client, req); 167 p9_client_cb(chan->client, req);
159 } else { 168 } else {
160 spin_unlock_irqrestore(&chan->lock, flags); 169 spin_unlock_irqrestore(&chan->lock, flags);
@@ -203,6 +212,38 @@ static int p9_virtio_cancel(struct p9_client *client, struct p9_req_t *req)
203} 212}
204 213
205/** 214/**
215 * pack_sg_list_p - Just like pack_sg_list. Instead of taking a buffer,
216 * this takes a list of pages.
217 * @sg: scatter/gather list to pack into
218 * @start: which segment of the sg_list to start at
219 * @pdata_off: Offset into the first page
220 * @**pdata: a list of pages to add into sg.
221 * @count: amount of data to pack into the scatter/gather list
222 */
223static int
224pack_sg_list_p(struct scatterlist *sg, int start, int limit, size_t pdata_off,
225 struct page **pdata, int count)
226{
227 int s;
228 int i = 0;
229 int index = start;
230
231 if (pdata_off) {
232 s = min((int)(PAGE_SIZE - pdata_off), count);
233 sg_set_page(&sg[index++], pdata[i++], s, pdata_off);
234 count -= s;
235 }
236
237 while (count) {
238 BUG_ON(index > limit);
239 s = min((int)PAGE_SIZE, count);
240 sg_set_page(&sg[index++], pdata[i++], s, 0);
241 count -= s;
242 }
243 return index-start;
244}
245
246/**
206 * p9_virtio_request - issue a request 247 * p9_virtio_request - issue a request
207 * @client: client instance issuing the request 248 * @client: client instance issuing the request
208 * @req: request to be issued 249 * @req: request to be issued
@@ -212,22 +253,97 @@ static int p9_virtio_cancel(struct p9_client *client, struct p9_req_t *req)
212static int 253static int
213p9_virtio_request(struct p9_client *client, struct p9_req_t *req) 254p9_virtio_request(struct p9_client *client, struct p9_req_t *req)
214{ 255{
215 int in, out; 256 int in, out, inp, outp;
216 struct virtio_chan *chan = client->trans; 257 struct virtio_chan *chan = client->trans;
217 char *rdata = (char *)req->rc+sizeof(struct p9_fcall); 258 char *rdata = (char *)req->rc+sizeof(struct p9_fcall);
218 unsigned long flags; 259 unsigned long flags;
219 int err; 260 size_t pdata_off = 0;
261 struct trans_rpage_info *rpinfo = NULL;
262 int err, pdata_len = 0;
220 263
221 P9_DPRINTK(P9_DEBUG_TRANS, "9p debug: virtio request\n"); 264 P9_DPRINTK(P9_DEBUG_TRANS, "9p debug: virtio request\n");
222 265
223req_retry: 266req_retry:
224 req->status = REQ_STATUS_SENT; 267 req->status = REQ_STATUS_SENT;
225 268
269 if (req->tc->pbuf_size && (req->tc->pubuf && P9_IS_USER_CONTEXT)) {
270 int nr_pages = p9_nr_pages(req);
271 int rpinfo_size = sizeof(struct trans_rpage_info) +
272 sizeof(struct page *) * nr_pages;
273
274 if (rpinfo_size <= (req->tc->capacity - req->tc->size)) {
275 /* We can use sdata */
276 req->tc->private = req->tc->sdata + req->tc->size;
277 rpinfo = (struct trans_rpage_info *)req->tc->private;
278 rpinfo->rp_alloc = 0;
279 } else {
280 req->tc->private = kmalloc(rpinfo_size, GFP_NOFS);
281 if (!req->tc->private) {
282 P9_DPRINTK(P9_DEBUG_TRANS, "9p debug: "
283 "private kmalloc returned NULL");
284 return -ENOMEM;
285 }
286 rpinfo = (struct trans_rpage_info *)req->tc->private;
287 rpinfo->rp_alloc = 1;
288 }
289
290 err = p9_payload_gup(req, &pdata_off, &pdata_len, nr_pages,
291 req->tc->id == P9_TREAD ? 1 : 0);
292 if (err < 0) {
293 if (rpinfo->rp_alloc)
294 kfree(rpinfo);
295 return err;
296 }
297 }
298
226 spin_lock_irqsave(&chan->lock, flags); 299 spin_lock_irqsave(&chan->lock, flags);
300
301 /* Handle out VirtIO ring buffers */
227 out = pack_sg_list(chan->sg, 0, VIRTQUEUE_NUM, req->tc->sdata, 302 out = pack_sg_list(chan->sg, 0, VIRTQUEUE_NUM, req->tc->sdata,
228 req->tc->size); 303 req->tc->size);
229 in = pack_sg_list(chan->sg, out, VIRTQUEUE_NUM-out, rdata, 304
230 client->msize); 305 if (req->tc->pbuf_size && (req->tc->id == P9_TWRITE)) {
306 /* We have additional write payload buffer to take care */
307 if (req->tc->pubuf && P9_IS_USER_CONTEXT) {
308 outp = pack_sg_list_p(chan->sg, out, VIRTQUEUE_NUM,
309 pdata_off, rpinfo->rp_data, pdata_len);
310 } else {
311 char *pbuf = req->tc->pubuf ? req->tc->pubuf :
312 req->tc->pkbuf;
313 outp = pack_sg_list(chan->sg, out, VIRTQUEUE_NUM, pbuf,
314 req->tc->pbuf_size);
315 }
316 out += outp;
317 }
318
319 /* Handle in VirtIO ring buffers */
320 if (req->tc->pbuf_size &&
321 ((req->tc->id == P9_TREAD) || (req->tc->id == P9_TREADDIR))) {
322 /*
323 * Take care of additional Read payload.
324 * 11 is the read/write header = PDU Header(7) + IO Size (4).
325 * Arrange in such a way that server places header in the
326 * alloced memory and payload onto the user buffer.
327 */
328 inp = pack_sg_list(chan->sg, out, VIRTQUEUE_NUM, rdata, 11);
329 /*
330 * Running executables in the filesystem may result in
331 * a read request with kernel buffer as opposed to user buffer.
332 */
333 if (req->tc->pubuf && P9_IS_USER_CONTEXT) {
334 in = pack_sg_list_p(chan->sg, out+inp, VIRTQUEUE_NUM,
335 pdata_off, rpinfo->rp_data, pdata_len);
336 } else {
337 char *pbuf = req->tc->pubuf ? req->tc->pubuf :
338 req->tc->pkbuf;
339 in = pack_sg_list(chan->sg, out+inp, VIRTQUEUE_NUM,
340 pbuf, req->tc->pbuf_size);
341 }
342 in += inp;
343 } else {
344 in = pack_sg_list(chan->sg, out, VIRTQUEUE_NUM, rdata,
345 client->msize);
346 }
231 347
232 err = virtqueue_add_buf(chan->vq, chan->sg, out, in, req->tc); 348 err = virtqueue_add_buf(chan->vq, chan->sg, out, in, req->tc);
233 if (err < 0) { 349 if (err < 0) {
@@ -246,6 +362,8 @@ req_retry:
246 P9_DPRINTK(P9_DEBUG_TRANS, 362 P9_DPRINTK(P9_DEBUG_TRANS,
247 "9p debug: " 363 "9p debug: "
248 "virtio rpc add_buf returned failure"); 364 "virtio rpc add_buf returned failure");
365 if (rpinfo && rpinfo->rp_alloc)
366 kfree(rpinfo);
249 return -EIO; 367 return -EIO;
250 } 368 }
251 } 369 }