summaryrefslogtreecommitdiffstats
path: root/mm/memory.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/memory.c')
-rw-r--r--mm/memory.c81
1 files changed, 81 insertions, 0 deletions
diff --git a/mm/memory.c b/mm/memory.c
index 9b68a72f8c17..96f1d473c89a 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -1527,6 +1527,87 @@ int vm_insert_page(struct vm_area_struct *vma, unsigned long addr,
1527} 1527}
1528EXPORT_SYMBOL(vm_insert_page); 1528EXPORT_SYMBOL(vm_insert_page);
1529 1529
1530/*
1531 * __vm_map_pages - maps range of kernel pages into user vma
1532 * @vma: user vma to map to
1533 * @pages: pointer to array of source kernel pages
1534 * @num: number of pages in page array
1535 * @offset: user's requested vm_pgoff
1536 *
1537 * This allows drivers to map range of kernel pages into a user vma.
1538 *
1539 * Return: 0 on success and error code otherwise.
1540 */
1541static int __vm_map_pages(struct vm_area_struct *vma, struct page **pages,
1542 unsigned long num, unsigned long offset)
1543{
1544 unsigned long count = vma_pages(vma);
1545 unsigned long uaddr = vma->vm_start;
1546 int ret, i;
1547
1548 /* Fail if the user requested offset is beyond the end of the object */
1549 if (offset > num)
1550 return -ENXIO;
1551
1552 /* Fail if the user requested size exceeds available object size */
1553 if (count > num - offset)
1554 return -ENXIO;
1555
1556 for (i = 0; i < count; i++) {
1557 ret = vm_insert_page(vma, uaddr, pages[offset + i]);
1558 if (ret < 0)
1559 return ret;
1560 uaddr += PAGE_SIZE;
1561 }
1562
1563 return 0;
1564}
1565
1566/**
1567 * vm_map_pages - maps range of kernel pages starts with non zero offset
1568 * @vma: user vma to map to
1569 * @pages: pointer to array of source kernel pages
1570 * @num: number of pages in page array
1571 *
1572 * Maps an object consisting of @num pages, catering for the user's
1573 * requested vm_pgoff
1574 *
1575 * If we fail to insert any page into the vma, the function will return
1576 * immediately leaving any previously inserted pages present. Callers
1577 * from the mmap handler may immediately return the error as their caller
1578 * will destroy the vma, removing any successfully inserted pages. Other
1579 * callers should make their own arrangements for calling unmap_region().
1580 *
1581 * Context: Process context. Called by mmap handlers.
1582 * Return: 0 on success and error code otherwise.
1583 */
1584int vm_map_pages(struct vm_area_struct *vma, struct page **pages,
1585 unsigned long num)
1586{
1587 return __vm_map_pages(vma, pages, num, vma->vm_pgoff);
1588}
1589EXPORT_SYMBOL(vm_map_pages);
1590
1591/**
1592 * vm_map_pages_zero - map range of kernel pages starts with zero offset
1593 * @vma: user vma to map to
1594 * @pages: pointer to array of source kernel pages
1595 * @num: number of pages in page array
1596 *
1597 * Similar to vm_map_pages(), except that it explicitly sets the offset
1598 * to 0. This function is intended for the drivers that did not consider
1599 * vm_pgoff.
1600 *
1601 * Context: Process context. Called by mmap handlers.
1602 * Return: 0 on success and error code otherwise.
1603 */
1604int vm_map_pages_zero(struct vm_area_struct *vma, struct page **pages,
1605 unsigned long num)
1606{
1607 return __vm_map_pages(vma, pages, num, 0);
1608}
1609EXPORT_SYMBOL(vm_map_pages_zero);
1610
1530static vm_fault_t insert_pfn(struct vm_area_struct *vma, unsigned long addr, 1611static vm_fault_t insert_pfn(struct vm_area_struct *vma, unsigned long addr,
1531 pfn_t pfn, pgprot_t prot, bool mkwrite) 1612 pfn_t pfn, pgprot_t prot, bool mkwrite)
1532{ 1613{