aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorDave Hansen <dave.hansen@linux.intel.com>2016-02-12 16:01:55 -0500
committerIngo Molnar <mingo@kernel.org>2016-02-16 04:11:12 -0500
commitcde70140fed8429acf7a14e2e2cbd3e329036653 (patch)
treeaa684e9cae39005e66ab4b7d2b801beade6fa9ab /mm
parent1e9877902dc7e11d2be038371c6fbf2dfcd469d7 (diff)
mm/gup: Overload get_user_pages() functions
The concept here was a suggestion from Ingo. The implementation horrors are all mine. This allows get_user_pages(), get_user_pages_unlocked(), and get_user_pages_locked() to be called with or without the leading tsk/mm arguments. We will give a compile-time warning about the old style being __deprecated and we will also WARN_ON() if the non-remote version is used for a remote-style access. Doing this, folks will get nice warnings and will not break the build. This should be nice for -next and will hopefully let developers fix up their own code instead of maintainers needing to do it at merge time. The way we do this is hideous. It uses the __VA_ARGS__ macro functionality to call different functions based on the number of arguments passed to the macro. There's an additional hack to ensure that our EXPORT_SYMBOL() of the deprecated symbols doesn't trigger a warning. We should be able to remove this mess as soon as -rc1 hits in the release after this is merged. Signed-off-by: Dave Hansen <dave.hansen@linux.intel.com> Cc: Al Viro <viro@zeniv.linux.org.uk> Cc: Alexander Kuleshov <kuleshovmail@gmail.com> Cc: Andrea Arcangeli <aarcange@redhat.com> Cc: Andrew Morton <akpm@linux-foundation.org> Cc: Dan Williams <dan.j.williams@intel.com> Cc: Dave Hansen <dave@sr71.net> Cc: Dominik Dingel <dingel@linux.vnet.ibm.com> Cc: Geliang Tang <geliangtang@163.com> Cc: Jan Kara <jack@suse.cz> Cc: Johannes Weiner <hannes@cmpxchg.org> Cc: Kirill A. Shutemov <kirill.shutemov@linux.intel.com> Cc: Konstantin Khlebnikov <koct9i@gmail.com> Cc: Leon Romanovsky <leon@leon.nu> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Masahiro Yamada <yamada.masahiro@socionext.com> Cc: Mateusz Guzik <mguzik@redhat.com> Cc: Maxime Coquelin <mcoquelin.stm32@gmail.com> Cc: Michal Hocko <mhocko@suse.com> Cc: Naoya Horiguchi <n-horiguchi@ah.jp.nec.com> Cc: Oleg Nesterov <oleg@redhat.com> Cc: Paul Gortmaker <paul.gortmaker@windriver.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Srikar Dronamraju <srikar@linux.vnet.ibm.com> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Vladimir Davydov <vdavydov@virtuozzo.com> Cc: Vlastimil Babka <vbabka@suse.cz> Cc: Xie XiuQi <xiexiuqi@huawei.com> Cc: linux-kernel@vger.kernel.org Cc: linux-mm@kvack.org Link: http://lkml.kernel.org/r/20160212210155.73222EE1@viggo.jf.intel.com Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/gup.c62
-rw-r--r--mm/nommu.c64
-rw-r--r--mm/util.c4
3 files changed, 94 insertions, 36 deletions
diff --git a/mm/gup.c b/mm/gup.c
index 36ca850936c9..8a035e042b35 100644
--- a/mm/gup.c
+++ b/mm/gup.c
@@ -1,3 +1,4 @@
1#define __DISABLE_GUP_DEPRECATED 1
1#include <linux/kernel.h> 2#include <linux/kernel.h>
2#include <linux/errno.h> 3#include <linux/errno.h>
3#include <linux/err.h> 4#include <linux/err.h>
@@ -807,15 +808,15 @@ static __always_inline long __get_user_pages_locked(struct task_struct *tsk,
807 * if (locked) 808 * if (locked)
808 * up_read(&mm->mmap_sem); 809 * up_read(&mm->mmap_sem);
809 */ 810 */
810long get_user_pages_locked(struct task_struct *tsk, struct mm_struct *mm, 811long get_user_pages_locked6(unsigned long start, unsigned long nr_pages,
811 unsigned long start, unsigned long nr_pages,
812 int write, int force, struct page **pages, 812 int write, int force, struct page **pages,
813 int *locked) 813 int *locked)
814{ 814{
815 return __get_user_pages_locked(tsk, mm, start, nr_pages, write, force, 815 return __get_user_pages_locked(current, current->mm, start, nr_pages,
816 pages, NULL, locked, true, FOLL_TOUCH); 816 write, force, pages, NULL, locked, true,
817 FOLL_TOUCH);
817} 818}
818EXPORT_SYMBOL(get_user_pages_locked); 819EXPORT_SYMBOL(get_user_pages_locked6);
819 820
820/* 821/*
821 * Same as get_user_pages_unlocked(...., FOLL_TOUCH) but it allows to 822 * Same as get_user_pages_unlocked(...., FOLL_TOUCH) but it allows to
@@ -860,14 +861,13 @@ EXPORT_SYMBOL(__get_user_pages_unlocked);
860 * or if "force" shall be set to 1 (get_user_pages_fast misses the 861 * or if "force" shall be set to 1 (get_user_pages_fast misses the
861 * "force" parameter). 862 * "force" parameter).
862 */ 863 */
863long get_user_pages_unlocked(struct task_struct *tsk, struct mm_struct *mm, 864long get_user_pages_unlocked5(unsigned long start, unsigned long nr_pages,
864 unsigned long start, unsigned long nr_pages,
865 int write, int force, struct page **pages) 865 int write, int force, struct page **pages)
866{ 866{
867 return __get_user_pages_unlocked(tsk, mm, start, nr_pages, write, 867 return __get_user_pages_unlocked(current, current->mm, start, nr_pages,
868 force, pages, FOLL_TOUCH); 868 write, force, pages, FOLL_TOUCH);
869} 869}
870EXPORT_SYMBOL(get_user_pages_unlocked); 870EXPORT_SYMBOL(get_user_pages_unlocked5);
871 871
872/* 872/*
873 * get_user_pages_remote() - pin user pages in memory 873 * get_user_pages_remote() - pin user pages in memory
@@ -939,16 +939,15 @@ EXPORT_SYMBOL(get_user_pages_remote);
939 * This is the same as get_user_pages_remote() for the time 939 * This is the same as get_user_pages_remote() for the time
940 * being. 940 * being.
941 */ 941 */
942long get_user_pages(struct task_struct *tsk, struct mm_struct *mm, 942long get_user_pages6(unsigned long start, unsigned long nr_pages,
943 unsigned long start, unsigned long nr_pages,
944 int write, int force, struct page **pages, 943 int write, int force, struct page **pages,
945 struct vm_area_struct **vmas) 944 struct vm_area_struct **vmas)
946{ 945{
947 return __get_user_pages_locked(tsk, mm, start, nr_pages, 946 return __get_user_pages_locked(current, current->mm, start, nr_pages,
948 write, force, pages, vmas, NULL, false, 947 write, force, pages, vmas, NULL, false,
949 FOLL_TOUCH); 948 FOLL_TOUCH);
950} 949}
951EXPORT_SYMBOL(get_user_pages); 950EXPORT_SYMBOL(get_user_pages6);
952 951
953/** 952/**
954 * populate_vma_page_range() - populate a range of pages in the vma. 953 * populate_vma_page_range() - populate a range of pages in the vma.
@@ -1484,3 +1483,38 @@ int get_user_pages_fast(unsigned long start, int nr_pages, int write,
1484} 1483}
1485 1484
1486#endif /* CONFIG_HAVE_GENERIC_RCU_GUP */ 1485#endif /* CONFIG_HAVE_GENERIC_RCU_GUP */
1486
1487long get_user_pages8(struct task_struct *tsk, struct mm_struct *mm,
1488 unsigned long start, unsigned long nr_pages,
1489 int write, int force, struct page **pages,
1490 struct vm_area_struct **vmas)
1491{
1492 WARN_ONCE(tsk != current, "get_user_pages() called on remote task");
1493 WARN_ONCE(mm != current->mm, "get_user_pages() called on remote mm");
1494
1495 return get_user_pages6(start, nr_pages, write, force, pages, vmas);
1496}
1497EXPORT_SYMBOL(get_user_pages8);
1498
1499long get_user_pages_locked8(struct task_struct *tsk, struct mm_struct *mm,
1500 unsigned long start, unsigned long nr_pages,
1501 int write, int force, struct page **pages, int *locked)
1502{
1503 WARN_ONCE(tsk != current, "get_user_pages_locked() called on remote task");
1504 WARN_ONCE(mm != current->mm, "get_user_pages_locked() called on remote mm");
1505
1506 return get_user_pages_locked6(start, nr_pages, write, force, pages, locked);
1507}
1508EXPORT_SYMBOL(get_user_pages_locked8);
1509
1510long get_user_pages_unlocked7(struct task_struct *tsk, struct mm_struct *mm,
1511 unsigned long start, unsigned long nr_pages,
1512 int write, int force, struct page **pages)
1513{
1514 WARN_ONCE(tsk != current, "get_user_pages_unlocked() called on remote task");
1515 WARN_ONCE(mm != current->mm, "get_user_pages_unlocked() called on remote mm");
1516
1517 return get_user_pages_unlocked5(start, nr_pages, write, force, pages);
1518}
1519EXPORT_SYMBOL(get_user_pages_unlocked7);
1520
diff --git a/mm/nommu.c b/mm/nommu.c
index fbf6f0f1d6c9..b64d04d19702 100644
--- a/mm/nommu.c
+++ b/mm/nommu.c
@@ -15,6 +15,8 @@
15 15
16#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 16#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
17 17
18#define __DISABLE_GUP_DEPRECATED
19
18#include <linux/export.h> 20#include <linux/export.h>
19#include <linux/mm.h> 21#include <linux/mm.h>
20#include <linux/vmacache.h> 22#include <linux/vmacache.h>
@@ -182,8 +184,7 @@ finish_or_fault:
182 * slab page or a secondary page from a compound page 184 * slab page or a secondary page from a compound page
183 * - don't permit access to VMAs that don't support it, such as I/O mappings 185 * - don't permit access to VMAs that don't support it, such as I/O mappings
184 */ 186 */
185long get_user_pages(struct task_struct *tsk, struct mm_struct *mm, 187long get_user_pages6(unsigned long start, unsigned long nr_pages,
186 unsigned long start, unsigned long nr_pages,
187 int write, int force, struct page **pages, 188 int write, int force, struct page **pages,
188 struct vm_area_struct **vmas) 189 struct vm_area_struct **vmas)
189{ 190{
@@ -194,20 +195,18 @@ long get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
194 if (force) 195 if (force)
195 flags |= FOLL_FORCE; 196 flags |= FOLL_FORCE;
196 197
197 return __get_user_pages(tsk, mm, start, nr_pages, flags, pages, vmas, 198 return __get_user_pages(current, current->mm, start, nr_pages, flags,
198 NULL); 199 pages, vmas, NULL);
199} 200}
200EXPORT_SYMBOL(get_user_pages); 201EXPORT_SYMBOL(get_user_pages6);
201 202
202long get_user_pages_locked(struct task_struct *tsk, struct mm_struct *mm, 203long get_user_pages_locked6(unsigned long start, unsigned long nr_pages,
203 unsigned long start, unsigned long nr_pages, 204 int write, int force, struct page **pages,
204 int write, int force, struct page **pages, 205 int *locked)
205 int *locked)
206{ 206{
207 return get_user_pages(tsk, mm, start, nr_pages, write, force, 207 return get_user_pages6(start, nr_pages, write, force, pages, NULL);
208 pages, NULL);
209} 208}
210EXPORT_SYMBOL(get_user_pages_locked); 209EXPORT_SYMBOL(get_user_pages_locked6);
211 210
212long __get_user_pages_unlocked(struct task_struct *tsk, struct mm_struct *mm, 211long __get_user_pages_unlocked(struct task_struct *tsk, struct mm_struct *mm,
213 unsigned long start, unsigned long nr_pages, 212 unsigned long start, unsigned long nr_pages,
@@ -216,21 +215,20 @@ long __get_user_pages_unlocked(struct task_struct *tsk, struct mm_struct *mm,
216{ 215{
217 long ret; 216 long ret;
218 down_read(&mm->mmap_sem); 217 down_read(&mm->mmap_sem);
219 ret = get_user_pages(tsk, mm, start, nr_pages, write, force, 218 ret = __get_user_pages(tsk, mm, start, nr_pages, gup_flags, pages,
220 pages, NULL); 219 NULL, NULL);
221 up_read(&mm->mmap_sem); 220 up_read(&mm->mmap_sem);
222 return ret; 221 return ret;
223} 222}
224EXPORT_SYMBOL(__get_user_pages_unlocked); 223EXPORT_SYMBOL(__get_user_pages_unlocked);
225 224
226long get_user_pages_unlocked(struct task_struct *tsk, struct mm_struct *mm, 225long get_user_pages_unlocked5(unsigned long start, unsigned long nr_pages,
227 unsigned long start, unsigned long nr_pages,
228 int write, int force, struct page **pages) 226 int write, int force, struct page **pages)
229{ 227{
230 return __get_user_pages_unlocked(tsk, mm, start, nr_pages, write, 228 return __get_user_pages_unlocked(current, current->mm, start, nr_pages,
231 force, pages, 0); 229 write, force, pages, 0);
232} 230}
233EXPORT_SYMBOL(get_user_pages_unlocked); 231EXPORT_SYMBOL(get_user_pages_unlocked5);
234 232
235/** 233/**
236 * follow_pfn - look up PFN at a user virtual address 234 * follow_pfn - look up PFN at a user virtual address
@@ -2108,3 +2106,31 @@ static int __meminit init_admin_reserve(void)
2108 return 0; 2106 return 0;
2109} 2107}
2110subsys_initcall(init_admin_reserve); 2108subsys_initcall(init_admin_reserve);
2109
2110long get_user_pages8(struct task_struct *tsk, struct mm_struct *mm,
2111 unsigned long start, unsigned long nr_pages,
2112 int write, int force, struct page **pages,
2113 struct vm_area_struct **vmas)
2114{
2115 return get_user_pages6(start, nr_pages, write, force, pages, vmas);
2116}
2117EXPORT_SYMBOL(get_user_pages8);
2118
2119long get_user_pages_locked8(struct task_struct *tsk, struct mm_struct *mm,
2120 unsigned long start, unsigned long nr_pages,
2121 int write, int force, struct page **pages,
2122 int *locked)
2123{
2124 return get_user_pages_locked6(start, nr_pages, write,
2125 force, pages, locked);
2126}
2127EXPORT_SYMBOL(get_user_pages_locked8);
2128
2129long get_user_pages_unlocked7(struct task_struct *tsk, struct mm_struct *mm,
2130 unsigned long start, unsigned long nr_pages,
2131 int write, int force, struct page **pages)
2132{
2133 return get_user_pages_unlocked5(start, nr_pages, write, force, pages);
2134}
2135EXPORT_SYMBOL(get_user_pages_unlocked7);
2136
diff --git a/mm/util.c b/mm/util.c
index 4fb14ca5a419..1e6011699cab 100644
--- a/mm/util.c
+++ b/mm/util.c
@@ -283,9 +283,7 @@ EXPORT_SYMBOL_GPL(__get_user_pages_fast);
283int __weak get_user_pages_fast(unsigned long start, 283int __weak get_user_pages_fast(unsigned long start,
284 int nr_pages, int write, struct page **pages) 284 int nr_pages, int write, struct page **pages)
285{ 285{
286 struct mm_struct *mm = current->mm; 286 return get_user_pages_unlocked(start, nr_pages, write, 0, pages);
287 return get_user_pages_unlocked(current, mm, start, nr_pages,
288 write, 0, pages);
289} 287}
290EXPORT_SYMBOL_GPL(get_user_pages_fast); 288EXPORT_SYMBOL_GPL(get_user_pages_fast);
291 289