summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorSouptick Joarder <jrdr.linux@gmail.com>2019-05-13 20:21:56 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2019-05-14 12:47:50 -0400
commita667d7456f189e3422725dddcd067537feac49c0 (patch)
treee9dbd46e7325f5ecf345f88c5f0e351b5888c32b
parent62afcd1cb8e355330a699b456f05f781e877cc4f (diff)
mm: introduce new vm_map_pages() and vm_map_pages_zero() API
Patch series "mm: Use vm_map_pages() and vm_map_pages_zero() API", v5. This patch (of 5): Previouly drivers have their own way of mapping range of kernel pages/memory into user vma and this was done by invoking vm_insert_page() within a loop. As this pattern is common across different drivers, it can be generalized by creating new functions and using them across the drivers. vm_map_pages() is the API which can be used to map kernel memory/pages in drivers which have considered vm_pgoff vm_map_pages_zero() is the API which can be used to map a range of kernel memory/pages in drivers which have not considered vm_pgoff. vm_pgoff is passed as default 0 for those drivers. We _could_ then at a later "fix" these drivers which are using vm_map_pages_zero() to behave according to the normal vm_pgoff offsetting simply by removing the _zero suffix on the function name and if that causes regressions, it gives us an easy way to revert. Tested on Rockchip hardware and display is working, including talking to Lima via prime. Link: http://lkml.kernel.org/r/751cb8a0f4c3e67e95c58a3b072937617f338eea.1552921225.git.jrdr.linux@gmail.com Signed-off-by: Souptick Joarder <jrdr.linux@gmail.com> Suggested-by: Russell King <linux@armlinux.org.uk> Suggested-by: Matthew Wilcox <willy@infradead.org> Reviewed-by: Mike Rapoport <rppt@linux.ibm.com> Tested-by: Heiko Stuebner <heiko@sntech.de> Cc: Michal Hocko <mhocko@suse.com> Cc: "Kirill A. Shutemov" <kirill.shutemov@linux.intel.com> Cc: Vlastimil Babka <vbabka@suse.cz> Cc: Rik van Riel <riel@surriel.com> Cc: Stephen Rothwell <sfr@canb.auug.org.au> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Robin Murphy <robin.murphy@arm.com> Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com> Cc: Thierry Reding <treding@nvidia.com> Cc: Kees Cook <keescook@chromium.org> Cc: Marek Szyprowski <m.szyprowski@samsung.com> Cc: Stefan Richter <stefanr@s5r6.in-berlin.de> Cc: Sandy Huang <hjc@rock-chips.com> Cc: David Airlie <airlied@linux.ie> Cc: Oleksandr Andrushchenko <oleksandr_andrushchenko@epam.com> Cc: Joerg Roedel <joro@8bytes.org> Cc: Pawel Osciak <pawel@osciak.com> Cc: Kyungmin Park <kyungmin.park@samsung.com> Cc: Boris Ostrovsky <boris.ostrovsky@oracle.com> Cc: Juergen Gross <jgross@suse.com> Cc: Mauro Carvalho Chehab <mchehab@infradead.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r--include/linux/mm.h4
-rw-r--r--mm/memory.c81
-rw-r--r--mm/nommu.c14
3 files changed, 99 insertions, 0 deletions
diff --git a/include/linux/mm.h b/include/linux/mm.h
index abb7eb7ef0f2..912614fbbef3 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -2579,6 +2579,10 @@ struct vm_area_struct *find_extend_vma(struct mm_struct *, unsigned long addr);
2579int remap_pfn_range(struct vm_area_struct *, unsigned long addr, 2579int remap_pfn_range(struct vm_area_struct *, unsigned long addr,
2580 unsigned long pfn, unsigned long size, pgprot_t); 2580 unsigned long pfn, unsigned long size, pgprot_t);
2581int vm_insert_page(struct vm_area_struct *, unsigned long addr, struct page *); 2581int vm_insert_page(struct vm_area_struct *, unsigned long addr, struct page *);
2582int vm_map_pages(struct vm_area_struct *vma, struct page **pages,
2583 unsigned long num);
2584int vm_map_pages_zero(struct vm_area_struct *vma, struct page **pages,
2585 unsigned long num);
2582vm_fault_t vmf_insert_pfn(struct vm_area_struct *vma, unsigned long addr, 2586vm_fault_t vmf_insert_pfn(struct vm_area_struct *vma, unsigned long addr,
2583 unsigned long pfn); 2587 unsigned long pfn);
2584vm_fault_t vmf_insert_pfn_prot(struct vm_area_struct *vma, unsigned long addr, 2588vm_fault_t vmf_insert_pfn_prot(struct vm_area_struct *vma, unsigned long addr,
diff --git a/mm/memory.c b/mm/memory.c
index 9b68a72f8c17..96f1d473c89a 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -1527,6 +1527,87 @@ int vm_insert_page(struct vm_area_struct *vma, unsigned long addr,
1527} 1527}
1528EXPORT_SYMBOL(vm_insert_page); 1528EXPORT_SYMBOL(vm_insert_page);
1529 1529
1530/*
1531 * __vm_map_pages - maps range of kernel pages into user vma
1532 * @vma: user vma to map to
1533 * @pages: pointer to array of source kernel pages
1534 * @num: number of pages in page array
1535 * @offset: user's requested vm_pgoff
1536 *
1537 * This allows drivers to map range of kernel pages into a user vma.
1538 *
1539 * Return: 0 on success and error code otherwise.
1540 */
1541static int __vm_map_pages(struct vm_area_struct *vma, struct page **pages,
1542 unsigned long num, unsigned long offset)
1543{
1544 unsigned long count = vma_pages(vma);
1545 unsigned long uaddr = vma->vm_start;
1546 int ret, i;
1547
1548 /* Fail if the user requested offset is beyond the end of the object */
1549 if (offset > num)
1550 return -ENXIO;
1551
1552 /* Fail if the user requested size exceeds available object size */
1553 if (count > num - offset)
1554 return -ENXIO;
1555
1556 for (i = 0; i < count; i++) {
1557 ret = vm_insert_page(vma, uaddr, pages[offset + i]);
1558 if (ret < 0)
1559 return ret;
1560 uaddr += PAGE_SIZE;
1561 }
1562
1563 return 0;
1564}
1565
1566/**
1567 * vm_map_pages - maps range of kernel pages starts with non zero offset
1568 * @vma: user vma to map to
1569 * @pages: pointer to array of source kernel pages
1570 * @num: number of pages in page array
1571 *
1572 * Maps an object consisting of @num pages, catering for the user's
1573 * requested vm_pgoff
1574 *
1575 * If we fail to insert any page into the vma, the function will return
1576 * immediately leaving any previously inserted pages present. Callers
1577 * from the mmap handler may immediately return the error as their caller
1578 * will destroy the vma, removing any successfully inserted pages. Other
1579 * callers should make their own arrangements for calling unmap_region().
1580 *
1581 * Context: Process context. Called by mmap handlers.
1582 * Return: 0 on success and error code otherwise.
1583 */
1584int vm_map_pages(struct vm_area_struct *vma, struct page **pages,
1585 unsigned long num)
1586{
1587 return __vm_map_pages(vma, pages, num, vma->vm_pgoff);
1588}
1589EXPORT_SYMBOL(vm_map_pages);
1590
1591/**
1592 * vm_map_pages_zero - map range of kernel pages starts with zero offset
1593 * @vma: user vma to map to
1594 * @pages: pointer to array of source kernel pages
1595 * @num: number of pages in page array
1596 *
1597 * Similar to vm_map_pages(), except that it explicitly sets the offset
1598 * to 0. This function is intended for the drivers that did not consider
1599 * vm_pgoff.
1600 *
1601 * Context: Process context. Called by mmap handlers.
1602 * Return: 0 on success and error code otherwise.
1603 */
1604int vm_map_pages_zero(struct vm_area_struct *vma, struct page **pages,
1605 unsigned long num)
1606{
1607 return __vm_map_pages(vma, pages, num, 0);
1608}
1609EXPORT_SYMBOL(vm_map_pages_zero);
1610
1530static vm_fault_t insert_pfn(struct vm_area_struct *vma, unsigned long addr, 1611static vm_fault_t insert_pfn(struct vm_area_struct *vma, unsigned long addr,
1531 pfn_t pfn, pgprot_t prot, bool mkwrite) 1612 pfn_t pfn, pgprot_t prot, bool mkwrite)
1532{ 1613{
diff --git a/mm/nommu.c b/mm/nommu.c
index 749276beb109..b492fd1fcf9f 100644
--- a/mm/nommu.c
+++ b/mm/nommu.c
@@ -473,6 +473,20 @@ int vm_insert_page(struct vm_area_struct *vma, unsigned long addr,
473} 473}
474EXPORT_SYMBOL(vm_insert_page); 474EXPORT_SYMBOL(vm_insert_page);
475 475
476int vm_map_pages(struct vm_area_struct *vma, struct page **pages,
477 unsigned long num)
478{
479 return -EINVAL;
480}
481EXPORT_SYMBOL(vm_map_pages);
482
483int vm_map_pages_zero(struct vm_area_struct *vma, struct page **pages,
484 unsigned long num)
485{
486 return -EINVAL;
487}
488EXPORT_SYMBOL(vm_map_pages_zero);
489
476/* 490/*
477 * sys_brk() for the most part doesn't need the global kernel 491 * sys_brk() for the most part doesn't need the global kernel
478 * lock, except when an application is doing something nasty 492 * lock, except when an application is doing something nasty