aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorHuang Ying <ying.huang@intel.com>2011-01-29 22:15:47 -0500
committerMarcelo Tosatti <mtosatti@redhat.com>2011-03-17 12:08:27 -0400
commit0014bd990e69063b0fb78940b35439d7980ce3ee (patch)
tree56d4576cc07954eb304abaf602aba44a6aa2a4f1
parent91c9c3eda4f3066980d13a6907ef84f3a99364bd (diff)
mm: export __get_user_pages
In most cases, get_user_pages and get_user_pages_fast should be used to pin user pages in memory. But sometimes, some special flags except FOLL_GET, FOLL_WRITE and FOLL_FORCE are needed, for example in following patch, KVM needs FOLL_HWPOISON. To support these users, __get_user_pages is exported directly. There are some symbol name conflicts in infiniband driver, fixed them too. Signed-off-by: Huang Ying <ying.huang@intel.com> CC: Andrew Morton <akpm@linux-foundation.org> CC: Michel Lespinasse <walken@google.com> CC: Roland Dreier <roland@kernel.org> CC: Ralph Campbell <infinipath@qlogic.com> Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com>
-rw-r--r--drivers/infiniband/hw/ipath/ipath_user_pages.c6
-rw-r--r--drivers/infiniband/hw/qib/qib_user_pages.c6
-rw-r--r--include/linux/mm.h4
-rw-r--r--mm/internal.h5
-rw-r--r--mm/memory.c50
5 files changed, 60 insertions, 11 deletions
diff --git a/drivers/infiniband/hw/ipath/ipath_user_pages.c b/drivers/infiniband/hw/ipath/ipath_user_pages.c
index bab9f74c0665..cfed5399f074 100644
--- a/drivers/infiniband/hw/ipath/ipath_user_pages.c
+++ b/drivers/infiniband/hw/ipath/ipath_user_pages.c
@@ -53,8 +53,8 @@ static void __ipath_release_user_pages(struct page **p, size_t num_pages,
53} 53}
54 54
55/* call with current->mm->mmap_sem held */ 55/* call with current->mm->mmap_sem held */
56static int __get_user_pages(unsigned long start_page, size_t num_pages, 56static int __ipath_get_user_pages(unsigned long start_page, size_t num_pages,
57 struct page **p, struct vm_area_struct **vma) 57 struct page **p, struct vm_area_struct **vma)
58{ 58{
59 unsigned long lock_limit; 59 unsigned long lock_limit;
60 size_t got; 60 size_t got;
@@ -165,7 +165,7 @@ int ipath_get_user_pages(unsigned long start_page, size_t num_pages,
165 165
166 down_write(&current->mm->mmap_sem); 166 down_write(&current->mm->mmap_sem);
167 167
168 ret = __get_user_pages(start_page, num_pages, p, NULL); 168 ret = __ipath_get_user_pages(start_page, num_pages, p, NULL);
169 169
170 up_write(&current->mm->mmap_sem); 170 up_write(&current->mm->mmap_sem);
171 171
diff --git a/drivers/infiniband/hw/qib/qib_user_pages.c b/drivers/infiniband/hw/qib/qib_user_pages.c
index d7a26c1d4f37..7689e49c13c9 100644
--- a/drivers/infiniband/hw/qib/qib_user_pages.c
+++ b/drivers/infiniband/hw/qib/qib_user_pages.c
@@ -51,8 +51,8 @@ static void __qib_release_user_pages(struct page **p, size_t num_pages,
51/* 51/*
52 * Call with current->mm->mmap_sem held. 52 * Call with current->mm->mmap_sem held.
53 */ 53 */
54static int __get_user_pages(unsigned long start_page, size_t num_pages, 54static int __qib_get_user_pages(unsigned long start_page, size_t num_pages,
55 struct page **p, struct vm_area_struct **vma) 55 struct page **p, struct vm_area_struct **vma)
56{ 56{
57 unsigned long lock_limit; 57 unsigned long lock_limit;
58 size_t got; 58 size_t got;
@@ -136,7 +136,7 @@ int qib_get_user_pages(unsigned long start_page, size_t num_pages,
136 136
137 down_write(&current->mm->mmap_sem); 137 down_write(&current->mm->mmap_sem);
138 138
139 ret = __get_user_pages(start_page, num_pages, p, NULL); 139 ret = __qib_get_user_pages(start_page, num_pages, p, NULL);
140 140
141 up_write(&current->mm->mmap_sem); 141 up_write(&current->mm->mmap_sem);
142 142
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 679300c050f5..46150c66318e 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -965,6 +965,10 @@ static inline int handle_mm_fault(struct mm_struct *mm,
965extern int make_pages_present(unsigned long addr, unsigned long end); 965extern int make_pages_present(unsigned long addr, unsigned long end);
966extern int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len, int write); 966extern int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len, int write);
967 967
968int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
969 unsigned long start, int len, unsigned int foll_flags,
970 struct page **pages, struct vm_area_struct **vmas,
971 int *nonblocking);
968int get_user_pages(struct task_struct *tsk, struct mm_struct *mm, 972int get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
969 unsigned long start, int nr_pages, int write, int force, 973 unsigned long start, int nr_pages, int write, int force,
970 struct page **pages, struct vm_area_struct **vmas); 974 struct page **pages, struct vm_area_struct **vmas);
diff --git a/mm/internal.h b/mm/internal.h
index 69488205723d..3438dd43a062 100644
--- a/mm/internal.h
+++ b/mm/internal.h
@@ -245,11 +245,6 @@ static inline void mminit_validate_memmodel_limits(unsigned long *start_pfn,
245} 245}
246#endif /* CONFIG_SPARSEMEM */ 246#endif /* CONFIG_SPARSEMEM */
247 247
248int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
249 unsigned long start, int len, unsigned int foll_flags,
250 struct page **pages, struct vm_area_struct **vmas,
251 int *nonblocking);
252
253#define ZONE_RECLAIM_NOSCAN -2 248#define ZONE_RECLAIM_NOSCAN -2
254#define ZONE_RECLAIM_FULL -1 249#define ZONE_RECLAIM_FULL -1
255#define ZONE_RECLAIM_SOME 0 250#define ZONE_RECLAIM_SOME 0
diff --git a/mm/memory.c b/mm/memory.c
index 5823698c2b71..806a37ec71bd 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -1410,6 +1410,55 @@ no_page_table:
1410 return page; 1410 return page;
1411} 1411}
1412 1412
1413/**
1414 * __get_user_pages() - pin user pages in memory
1415 * @tsk: task_struct of target task
1416 * @mm: mm_struct of target mm
1417 * @start: starting user address
1418 * @nr_pages: number of pages from start to pin
1419 * @gup_flags: flags modifying pin behaviour
1420 * @pages: array that receives pointers to the pages pinned.
1421 * Should be at least nr_pages long. Or NULL, if caller
1422 * only intends to ensure the pages are faulted in.
1423 * @vmas: array of pointers to vmas corresponding to each page.
1424 * Or NULL if the caller does not require them.
1425 * @nonblocking: whether waiting for disk IO or mmap_sem contention
1426 *
1427 * Returns number of pages pinned. This may be fewer than the number
1428 * requested. If nr_pages is 0 or negative, returns 0. If no pages
1429 * were pinned, returns -errno. Each page returned must be released
1430 * with a put_page() call when it is finished with. vmas will only
1431 * remain valid while mmap_sem is held.
1432 *
1433 * Must be called with mmap_sem held for read or write.
1434 *
1435 * __get_user_pages walks a process's page tables and takes a reference to
1436 * each struct page that each user address corresponds to at a given
1437 * instant. That is, it takes the page that would be accessed if a user
1438 * thread accesses the given user virtual address at that instant.
1439 *
1440 * This does not guarantee that the page exists in the user mappings when
1441 * __get_user_pages returns, and there may even be a completely different
1442 * page there in some cases (eg. if mmapped pagecache has been invalidated
1443 * and subsequently re faulted). However it does guarantee that the page
1444 * won't be freed completely. And mostly callers simply care that the page
1445 * contains data that was valid *at some point in time*. Typically, an IO
1446 * or similar operation cannot guarantee anything stronger anyway because
1447 * locks can't be held over the syscall boundary.
1448 *
1449 * If @gup_flags & FOLL_WRITE == 0, the page must not be written to. If
1450 * the page is written to, set_page_dirty (or set_page_dirty_lock, as
1451 * appropriate) must be called after the page is finished with, and
1452 * before put_page is called.
1453 *
1454 * If @nonblocking != NULL, __get_user_pages will not wait for disk IO
1455 * or mmap_sem contention, and if waiting is needed to pin all pages,
1456 * *@nonblocking will be set to 0.
1457 *
1458 * In most cases, get_user_pages or get_user_pages_fast should be used
1459 * instead of __get_user_pages. __get_user_pages should be used only if
1460 * you need some special @gup_flags.
1461 */
1413int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm, 1462int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
1414 unsigned long start, int nr_pages, unsigned int gup_flags, 1463 unsigned long start, int nr_pages, unsigned int gup_flags,
1415 struct page **pages, struct vm_area_struct **vmas, 1464 struct page **pages, struct vm_area_struct **vmas,
@@ -1578,6 +1627,7 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
1578 } while (nr_pages); 1627 } while (nr_pages);
1579 return i; 1628 return i;
1580} 1629}
1630EXPORT_SYMBOL(__get_user_pages);
1581 1631
1582/** 1632/**
1583 * get_user_pages() - pin user pages in memory 1633 * get_user_pages() - pin user pages in memory