aboutsummaryrefslogtreecommitdiffstats
path: root/net/9p/trans_virtio.c
diff options
context:
space:
mode:
Diffstat (limited to 'net/9p/trans_virtio.c')
-rw-r--r--net/9p/trans_virtio.c137
1 files changed, 79 insertions, 58 deletions
diff --git a/net/9p/trans_virtio.c b/net/9p/trans_virtio.c
index 36a1a739ad68..e62bcbbabb5e 100644
--- a/net/9p/trans_virtio.c
+++ b/net/9p/trans_virtio.c
@@ -217,15 +217,15 @@ static int p9_virtio_cancel(struct p9_client *client, struct p9_req_t *req)
217 * @start: which segment of the sg_list to start at 217 * @start: which segment of the sg_list to start at
218 * @pdata: a list of pages to add into sg. 218 * @pdata: a list of pages to add into sg.
219 * @nr_pages: number of pages to pack into the scatter/gather list 219 * @nr_pages: number of pages to pack into the scatter/gather list
220 * @data: data to pack into scatter/gather list 220 * @offs: amount of data in the beginning of first page _not_ to pack
221 * @count: amount of data to pack into the scatter/gather list 221 * @count: amount of data to pack into the scatter/gather list
222 */ 222 */
223static int 223static int
224pack_sg_list_p(struct scatterlist *sg, int start, int limit, 224pack_sg_list_p(struct scatterlist *sg, int start, int limit,
225 struct page **pdata, int nr_pages, char *data, int count) 225 struct page **pdata, int nr_pages, size_t offs, int count)
226{ 226{
227 int i = 0, s; 227 int i = 0, s;
228 int data_off; 228 int data_off = offs;
229 int index = start; 229 int index = start;
230 230
231 BUG_ON(nr_pages > (limit - start)); 231 BUG_ON(nr_pages > (limit - start));
@@ -233,16 +233,14 @@ pack_sg_list_p(struct scatterlist *sg, int start, int limit,
233 * if the first page doesn't start at 233 * if the first page doesn't start at
234 * page boundary find the offset 234 * page boundary find the offset
235 */ 235 */
236 data_off = offset_in_page(data);
237 while (nr_pages) { 236 while (nr_pages) {
238 s = rest_of_page(data); 237 s = PAGE_SIZE - data_off;
239 if (s > count) 238 if (s > count)
240 s = count; 239 s = count;
241 /* Make sure we don't terminate early. */ 240 /* Make sure we don't terminate early. */
242 sg_unmark_end(&sg[index]); 241 sg_unmark_end(&sg[index]);
243 sg_set_page(&sg[index++], pdata[i++], s, data_off); 242 sg_set_page(&sg[index++], pdata[i++], s, data_off);
244 data_off = 0; 243 data_off = 0;
245 data += s;
246 count -= s; 244 count -= s;
247 nr_pages--; 245 nr_pages--;
248 } 246 }
@@ -314,11 +312,20 @@ req_retry:
314} 312}
315 313
316static int p9_get_mapped_pages(struct virtio_chan *chan, 314static int p9_get_mapped_pages(struct virtio_chan *chan,
317 struct page **pages, char *data, 315 struct page ***pages,
318 int nr_pages, int write, int kern_buf) 316 struct iov_iter *data,
317 int count,
318 size_t *offs,
319 int *need_drop)
319{ 320{
321 int nr_pages;
320 int err; 322 int err;
321 if (!kern_buf) { 323
324 if (!iov_iter_count(data))
325 return 0;
326
327 if (!(data->type & ITER_KVEC)) {
328 int n;
322 /* 329 /*
323 * We allow only p9_max_pages pinned. We wait for the 330 * We allow only p9_max_pages pinned. We wait for the
324 * Other zc request to finish here 331 * Other zc request to finish here
@@ -329,26 +336,49 @@ static int p9_get_mapped_pages(struct virtio_chan *chan,
329 if (err == -ERESTARTSYS) 336 if (err == -ERESTARTSYS)
330 return err; 337 return err;
331 } 338 }
332 err = p9_payload_gup(data, &nr_pages, pages, write); 339 n = iov_iter_get_pages_alloc(data, pages, count, offs);
333 if (err < 0) 340 if (n < 0)
334 return err; 341 return n;
342 *need_drop = 1;
343 nr_pages = DIV_ROUND_UP(n + *offs, PAGE_SIZE);
335 atomic_add(nr_pages, &vp_pinned); 344 atomic_add(nr_pages, &vp_pinned);
345 return n;
336 } else { 346 } else {
337 /* kernel buffer, no need to pin pages */ 347 /* kernel buffer, no need to pin pages */
338 int s, index = 0; 348 int index;
339 int count = nr_pages; 349 size_t len;
340 while (nr_pages) { 350 void *p;
341 s = rest_of_page(data); 351
342 if (is_vmalloc_addr(data)) 352 /* we'd already checked that it's non-empty */
343 pages[index++] = vmalloc_to_page(data); 353 while (1) {
354 len = iov_iter_single_seg_count(data);
355 if (likely(len)) {
356 p = data->kvec->iov_base + data->iov_offset;
357 break;
358 }
359 iov_iter_advance(data, 0);
360 }
361 if (len > count)
362 len = count;
363
364 nr_pages = DIV_ROUND_UP((unsigned long)p + len, PAGE_SIZE) -
365 (unsigned long)p / PAGE_SIZE;
366
367 *pages = kmalloc(sizeof(struct page *) * nr_pages, GFP_NOFS);
368 if (!*pages)
369 return -ENOMEM;
370
371 *need_drop = 0;
372 p -= (*offs = (unsigned long)p % PAGE_SIZE);
373 for (index = 0; index < nr_pages; index++) {
374 if (is_vmalloc_addr(p))
375 (*pages)[index] = vmalloc_to_page(p);
344 else 376 else
345 pages[index++] = kmap_to_page(data); 377 (*pages)[index] = kmap_to_page(p);
346 data += s; 378 p += PAGE_SIZE;
347 nr_pages--;
348 } 379 }
349 nr_pages = count; 380 return len;
350 } 381 }
351 return nr_pages;
352} 382}
353 383
354/** 384/**
@@ -364,8 +394,8 @@ static int p9_get_mapped_pages(struct virtio_chan *chan,
364 */ 394 */
365static int 395static int
366p9_virtio_zc_request(struct p9_client *client, struct p9_req_t *req, 396p9_virtio_zc_request(struct p9_client *client, struct p9_req_t *req,
367 char *uidata, char *uodata, int inlen, 397 struct iov_iter *uidata, struct iov_iter *uodata,
368 int outlen, int in_hdr_len, int kern_buf) 398 int inlen, int outlen, int in_hdr_len)
369{ 399{
370 int in, out, err, out_sgs, in_sgs; 400 int in, out, err, out_sgs, in_sgs;
371 unsigned long flags; 401 unsigned long flags;
@@ -373,41 +403,32 @@ p9_virtio_zc_request(struct p9_client *client, struct p9_req_t *req,
373 struct page **in_pages = NULL, **out_pages = NULL; 403 struct page **in_pages = NULL, **out_pages = NULL;
374 struct virtio_chan *chan = client->trans; 404 struct virtio_chan *chan = client->trans;
375 struct scatterlist *sgs[4]; 405 struct scatterlist *sgs[4];
406 size_t offs;
407 int need_drop = 0;
376 408
377 p9_debug(P9_DEBUG_TRANS, "virtio request\n"); 409 p9_debug(P9_DEBUG_TRANS, "virtio request\n");
378 410
379 if (uodata) { 411 if (uodata) {
380 out_nr_pages = p9_nr_pages(uodata, outlen); 412 int n = p9_get_mapped_pages(chan, &out_pages, uodata,
381 out_pages = kmalloc(sizeof(struct page *) * out_nr_pages, 413 outlen, &offs, &need_drop);
382 GFP_NOFS); 414 if (n < 0)
383 if (!out_pages) { 415 return n;
384 err = -ENOMEM; 416 out_nr_pages = DIV_ROUND_UP(n + offs, PAGE_SIZE);
385 goto err_out; 417 if (n != outlen) {
386 } 418 __le32 v = cpu_to_le32(n);
387 out_nr_pages = p9_get_mapped_pages(chan, out_pages, uodata, 419 memcpy(&req->tc->sdata[req->tc->size - 4], &v, 4);
388 out_nr_pages, 0, kern_buf); 420 outlen = n;
389 if (out_nr_pages < 0) {
390 err = out_nr_pages;
391 kfree(out_pages);
392 out_pages = NULL;
393 goto err_out;
394 } 421 }
395 } 422 } else if (uidata) {
396 if (uidata) { 423 int n = p9_get_mapped_pages(chan, &in_pages, uidata,
397 in_nr_pages = p9_nr_pages(uidata, inlen); 424 inlen, &offs, &need_drop);
398 in_pages = kmalloc(sizeof(struct page *) * in_nr_pages, 425 if (n < 0)
399 GFP_NOFS); 426 return n;
400 if (!in_pages) { 427 in_nr_pages = DIV_ROUND_UP(n + offs, PAGE_SIZE);
401 err = -ENOMEM; 428 if (n != inlen) {
402 goto err_out; 429 __le32 v = cpu_to_le32(n);
403 } 430 memcpy(&req->tc->sdata[req->tc->size - 4], &v, 4);
404 in_nr_pages = p9_get_mapped_pages(chan, in_pages, uidata, 431 inlen = n;
405 in_nr_pages, 1, kern_buf);
406 if (in_nr_pages < 0) {
407 err = in_nr_pages;
408 kfree(in_pages);
409 in_pages = NULL;
410 goto err_out;
411 } 432 }
412 } 433 }
413 req->status = REQ_STATUS_SENT; 434 req->status = REQ_STATUS_SENT;
@@ -426,7 +447,7 @@ req_retry_pinned:
426 if (out_pages) { 447 if (out_pages) {
427 sgs[out_sgs++] = chan->sg + out; 448 sgs[out_sgs++] = chan->sg + out;
428 out += pack_sg_list_p(chan->sg, out, VIRTQUEUE_NUM, 449 out += pack_sg_list_p(chan->sg, out, VIRTQUEUE_NUM,
429 out_pages, out_nr_pages, uodata, outlen); 450 out_pages, out_nr_pages, offs, outlen);
430 } 451 }
431 452
432 /* 453 /*
@@ -444,7 +465,7 @@ req_retry_pinned:
444 if (in_pages) { 465 if (in_pages) {
445 sgs[out_sgs + in_sgs++] = chan->sg + out + in; 466 sgs[out_sgs + in_sgs++] = chan->sg + out + in;
446 in += pack_sg_list_p(chan->sg, out + in, VIRTQUEUE_NUM, 467 in += pack_sg_list_p(chan->sg, out + in, VIRTQUEUE_NUM,
447 in_pages, in_nr_pages, uidata, inlen); 468 in_pages, in_nr_pages, offs, inlen);
448 } 469 }
449 470
450 BUG_ON(out_sgs + in_sgs > ARRAY_SIZE(sgs)); 471 BUG_ON(out_sgs + in_sgs > ARRAY_SIZE(sgs));
@@ -478,7 +499,7 @@ req_retry_pinned:
478 * Non kernel buffers are pinned, unpin them 499 * Non kernel buffers are pinned, unpin them
479 */ 500 */
480err_out: 501err_out:
481 if (!kern_buf) { 502 if (need_drop) {
482 if (in_pages) { 503 if (in_pages) {
483 p9_release_pages(in_pages, in_nr_pages); 504 p9_release_pages(in_pages, in_nr_pages);
484 atomic_sub(in_nr_pages, &vp_pinned); 505 atomic_sub(in_nr_pages, &vp_pinned);