diff options
-rw-r--r-- | fs/splice.c | 135 |
1 files changed, 36 insertions, 99 deletions
diff --git a/fs/splice.c b/fs/splice.c index 36e9353c1910..31c52e0269c2 100644 --- a/fs/splice.c +++ b/fs/splice.c | |||
@@ -1434,106 +1434,32 @@ static long do_splice(struct file *in, loff_t __user *off_in, | |||
1434 | return -EINVAL; | 1434 | return -EINVAL; |
1435 | } | 1435 | } |
1436 | 1436 | ||
1437 | /* | 1437 | static int get_iovec_page_array(struct iov_iter *from, |
1438 | * Map an iov into an array of pages and offset/length tupples. With the | 1438 | struct page **pages, |
1439 | * partial_page structure, we can map several non-contiguous ranges into | 1439 | struct partial_page *partial, |
1440 | * our ones pages[] map instead of splitting that operation into pieces. | ||
1441 | * Could easily be exported as a generic helper for other users, in which | ||
1442 | * case one would probably want to add a 'max_nr_pages' parameter as well. | ||
1443 | */ | ||
1444 | static int get_iovec_page_array(const struct iovec __user *iov, | ||
1445 | unsigned int nr_vecs, struct page **pages, | ||
1446 | struct partial_page *partial, bool aligned, | ||
1447 | unsigned int pipe_buffers) | 1440 | unsigned int pipe_buffers) |
1448 | { | 1441 | { |
1449 | int buffers = 0, error = 0; | 1442 | int buffers = 0; |
1450 | 1443 | while (iov_iter_count(from)) { | |
1451 | while (nr_vecs) { | 1444 | ssize_t copied; |
1452 | unsigned long off, npages; | 1445 | size_t start; |
1453 | struct iovec entry; | 1446 | |
1454 | void __user *base; | 1447 | copied = iov_iter_get_pages(from, pages + buffers, ~0UL, |
1455 | size_t len; | 1448 | pipe_buffers - buffers, &start); |
1456 | int i; | 1449 | if (copied <= 0) |
1457 | 1450 | return buffers ? buffers : copied; | |
1458 | error = -EFAULT; | 1451 | |
1459 | if (copy_from_user(&entry, iov, sizeof(entry))) | 1452 | iov_iter_advance(from, copied); |
1460 | break; | 1453 | while (copied) { |
1461 | 1454 | int size = min_t(int, copied, PAGE_SIZE - start); | |
1462 | base = entry.iov_base; | 1455 | partial[buffers].offset = start; |
1463 | len = entry.iov_len; | 1456 | partial[buffers].len = size; |
1464 | 1457 | copied -= size; | |
1465 | /* | 1458 | start = 0; |
1466 | * Sanity check this iovec. 0 read succeeds. | ||
1467 | */ | ||
1468 | error = 0; | ||
1469 | if (unlikely(!len)) | ||
1470 | break; | ||
1471 | error = -EFAULT; | ||
1472 | if (!access_ok(VERIFY_READ, base, len)) | ||
1473 | break; | ||
1474 | |||
1475 | /* | ||
1476 | * Get this base offset and number of pages, then map | ||
1477 | * in the user pages. | ||
1478 | */ | ||
1479 | off = (unsigned long) base & ~PAGE_MASK; | ||
1480 | |||
1481 | /* | ||
1482 | * If asked for alignment, the offset must be zero and the | ||
1483 | * length a multiple of the PAGE_SIZE. | ||
1484 | */ | ||
1485 | error = -EINVAL; | ||
1486 | if (aligned && (off || len & ~PAGE_MASK)) | ||
1487 | break; | ||
1488 | |||
1489 | npages = (off + len + PAGE_SIZE - 1) >> PAGE_SHIFT; | ||
1490 | if (npages > pipe_buffers - buffers) | ||
1491 | npages = pipe_buffers - buffers; | ||
1492 | |||
1493 | error = get_user_pages_fast((unsigned long)base, npages, | ||
1494 | 0, &pages[buffers]); | ||
1495 | |||
1496 | if (unlikely(error <= 0)) | ||
1497 | break; | ||
1498 | |||
1499 | /* | ||
1500 | * Fill this contiguous range into the partial page map. | ||
1501 | */ | ||
1502 | for (i = 0; i < error; i++) { | ||
1503 | const int plen = min_t(size_t, len, PAGE_SIZE - off); | ||
1504 | |||
1505 | partial[buffers].offset = off; | ||
1506 | partial[buffers].len = plen; | ||
1507 | |||
1508 | off = 0; | ||
1509 | len -= plen; | ||
1510 | buffers++; | 1459 | buffers++; |
1511 | } | 1460 | } |
1512 | |||
1513 | /* | ||
1514 | * We didn't complete this iov, stop here since it probably | ||
1515 | * means we have to move some of this into a pipe to | ||
1516 | * be able to continue. | ||
1517 | */ | ||
1518 | if (len) | ||
1519 | break; | ||
1520 | |||
1521 | /* | ||
1522 | * Don't continue if we mapped fewer pages than we asked for, | ||
1523 | * or if we mapped the max number of pages that we have | ||
1524 | * room for. | ||
1525 | */ | ||
1526 | if (error < npages || buffers == pipe_buffers) | ||
1527 | break; | ||
1528 | |||
1529 | nr_vecs--; | ||
1530 | iov++; | ||
1531 | } | 1461 | } |
1532 | 1462 | return buffers; | |
1533 | if (buffers) | ||
1534 | return buffers; | ||
1535 | |||
1536 | return error; | ||
1537 | } | 1463 | } |
1538 | 1464 | ||
1539 | static int pipe_to_user(struct pipe_inode_info *pipe, struct pipe_buffer *buf, | 1465 | static int pipe_to_user(struct pipe_inode_info *pipe, struct pipe_buffer *buf, |
@@ -1587,10 +1513,13 @@ static long vmsplice_to_user(struct file *file, const struct iovec __user *uiov, | |||
1587 | * as splice-from-memory, where the regular splice is splice-from-file (or | 1513 | * as splice-from-memory, where the regular splice is splice-from-file (or |
1588 | * to file). In both cases the output is a pipe, naturally. | 1514 | * to file). In both cases the output is a pipe, naturally. |
1589 | */ | 1515 | */ |
1590 | static long vmsplice_to_pipe(struct file *file, const struct iovec __user *iov, | 1516 | static long vmsplice_to_pipe(struct file *file, const struct iovec __user *uiov, |
1591 | unsigned long nr_segs, unsigned int flags) | 1517 | unsigned long nr_segs, unsigned int flags) |
1592 | { | 1518 | { |
1593 | struct pipe_inode_info *pipe; | 1519 | struct pipe_inode_info *pipe; |
1520 | struct iovec iovstack[UIO_FASTIOV]; | ||
1521 | struct iovec *iov = iovstack; | ||
1522 | struct iov_iter from; | ||
1594 | struct page *pages[PIPE_DEF_BUFFERS]; | 1523 | struct page *pages[PIPE_DEF_BUFFERS]; |
1595 | struct partial_page partial[PIPE_DEF_BUFFERS]; | 1524 | struct partial_page partial[PIPE_DEF_BUFFERS]; |
1596 | struct splice_pipe_desc spd = { | 1525 | struct splice_pipe_desc spd = { |
@@ -1607,11 +1536,18 @@ static long vmsplice_to_pipe(struct file *file, const struct iovec __user *iov, | |||
1607 | if (!pipe) | 1536 | if (!pipe) |
1608 | return -EBADF; | 1537 | return -EBADF; |
1609 | 1538 | ||
1610 | if (splice_grow_spd(pipe, &spd)) | 1539 | ret = import_iovec(WRITE, uiov, nr_segs, |
1540 | ARRAY_SIZE(iovstack), &iov, &from); | ||
1541 | if (ret < 0) | ||
1542 | return ret; | ||
1543 | |||
1544 | if (splice_grow_spd(pipe, &spd)) { | ||
1545 | kfree(iov); | ||
1611 | return -ENOMEM; | 1546 | return -ENOMEM; |
1547 | } | ||
1612 | 1548 | ||
1613 | spd.nr_pages = get_iovec_page_array(iov, nr_segs, spd.pages, | 1549 | spd.nr_pages = get_iovec_page_array(&from, spd.pages, |
1614 | spd.partial, false, | 1550 | spd.partial, |
1615 | spd.nr_pages_max); | 1551 | spd.nr_pages_max); |
1616 | if (spd.nr_pages <= 0) | 1552 | if (spd.nr_pages <= 0) |
1617 | ret = spd.nr_pages; | 1553 | ret = spd.nr_pages; |
@@ -1619,6 +1555,7 @@ static long vmsplice_to_pipe(struct file *file, const struct iovec __user *iov, | |||
1619 | ret = splice_to_pipe(pipe, &spd); | 1555 | ret = splice_to_pipe(pipe, &spd); |
1620 | 1556 | ||
1621 | splice_shrink_spd(&spd); | 1557 | splice_shrink_spd(&spd); |
1558 | kfree(iov); | ||
1622 | return ret; | 1559 | return ret; |
1623 | } | 1560 | } |
1624 | 1561 | ||