aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDave Hansen <dave.hansen@linux.intel.com>2016-02-12 16:02:18 -0500
committerIngo Molnar <mingo@kernel.org>2016-02-18 03:32:44 -0500
commit1874f6895c92d991ccf85edcc55a0d9dd552d71c (patch)
treed0abdaa8d71abc9850fdbf8c8fa39696a6501e10
parentd4925e00d59698a201231cf99dce47d8b922bb34 (diff)
x86/mm/gup: Simplify get_user_pages() PTE bit handling
The current get_user_pages() code is a wee bit more complicated than it needs to be for pte bit checking. Currently, it establishes a mask of required pte _PAGE_* bits and ensures that the pte it goes after has all those bits. This consolidates the three identical copies of this code. Signed-off-by: Dave Hansen <dave.hansen@linux.intel.com> Reviewed-by: Thomas Gleixner <tglx@linutronix.de> Cc: Andrew Morton <akpm@linux-foundation.org> Cc: Andy Lutomirski <luto@amacapital.net> Cc: Borislav Petkov <bp@alien8.de> Cc: Brian Gerst <brgerst@gmail.com> Cc: Dave Hansen <dave@sr71.net> Cc: Denys Vlasenko <dvlasenk@redhat.com> Cc: H. Peter Anvin <hpa@zytor.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Rik van Riel <riel@redhat.com> Cc: linux-mm@kvack.org Link: http://lkml.kernel.org/r/20160212210218.3A2D4045@viggo.jf.intel.com Signed-off-by: Ingo Molnar <mingo@kernel.org>
-rw-r--r--arch/x86/mm/gup.c38
1 files changed, 22 insertions, 16 deletions
diff --git a/arch/x86/mm/gup.c b/arch/x86/mm/gup.c
index ce5e4545203b..2f0a32945cda 100644
--- a/arch/x86/mm/gup.c
+++ b/arch/x86/mm/gup.c
@@ -75,6 +75,24 @@ static void undo_dev_pagemap(int *nr, int nr_start, struct page **pages)
75} 75}
76 76
77/* 77/*
78 * 'pteval' can come from a pte, pmd or pud. We only check
79 * _PAGE_PRESENT, _PAGE_USER, and _PAGE_RW in here which are the
80 * same value on all 3 types.
81 */
82static inline int pte_allows_gup(unsigned long pteval, int write)
83{
84 unsigned long need_pte_bits = _PAGE_PRESENT|_PAGE_USER;
85
86 if (write)
87 need_pte_bits |= _PAGE_RW;
88
89 if ((pteval & need_pte_bits) != need_pte_bits)
90 return 0;
91
92 return 1;
93}
94
95/*
78 * The performance critical leaf functions are made noinline otherwise gcc 96 * The performance critical leaf functions are made noinline otherwise gcc
79 * inlines everything into a single function which results in too much 97 * inlines everything into a single function which results in too much
80 * register pressure. 98 * register pressure.
@@ -83,14 +101,9 @@ static noinline int gup_pte_range(pmd_t pmd, unsigned long addr,
83 unsigned long end, int write, struct page **pages, int *nr) 101 unsigned long end, int write, struct page **pages, int *nr)
84{ 102{
85 struct dev_pagemap *pgmap = NULL; 103 struct dev_pagemap *pgmap = NULL;
86 unsigned long mask;
87 int nr_start = *nr; 104 int nr_start = *nr;
88 pte_t *ptep; 105 pte_t *ptep;
89 106
90 mask = _PAGE_PRESENT|_PAGE_USER;
91 if (write)
92 mask |= _PAGE_RW;
93
94 ptep = pte_offset_map(&pmd, addr); 107 ptep = pte_offset_map(&pmd, addr);
95 do { 108 do {
96 pte_t pte = gup_get_pte(ptep); 109 pte_t pte = gup_get_pte(ptep);
@@ -110,7 +123,8 @@ static noinline int gup_pte_range(pmd_t pmd, unsigned long addr,
110 pte_unmap(ptep); 123 pte_unmap(ptep);
111 return 0; 124 return 0;
112 } 125 }
113 } else if ((pte_flags(pte) & (mask | _PAGE_SPECIAL)) != mask) { 126 } else if (!pte_allows_gup(pte_val(pte), write) ||
127 pte_special(pte)) {
114 pte_unmap(ptep); 128 pte_unmap(ptep);
115 return 0; 129 return 0;
116 } 130 }
@@ -164,14 +178,10 @@ static int __gup_device_huge_pmd(pmd_t pmd, unsigned long addr,
164static noinline int gup_huge_pmd(pmd_t pmd, unsigned long addr, 178static noinline int gup_huge_pmd(pmd_t pmd, unsigned long addr,
165 unsigned long end, int write, struct page **pages, int *nr) 179 unsigned long end, int write, struct page **pages, int *nr)
166{ 180{
167 unsigned long mask;
168 struct page *head, *page; 181 struct page *head, *page;
169 int refs; 182 int refs;
170 183
171 mask = _PAGE_PRESENT|_PAGE_USER; 184 if (!pte_allows_gup(pmd_val(pmd), write))
172 if (write)
173 mask |= _PAGE_RW;
174 if ((pmd_flags(pmd) & mask) != mask)
175 return 0; 185 return 0;
176 186
177 VM_BUG_ON(!pfn_valid(pmd_pfn(pmd))); 187 VM_BUG_ON(!pfn_valid(pmd_pfn(pmd)));
@@ -231,14 +241,10 @@ static int gup_pmd_range(pud_t pud, unsigned long addr, unsigned long end,
231static noinline int gup_huge_pud(pud_t pud, unsigned long addr, 241static noinline int gup_huge_pud(pud_t pud, unsigned long addr,
232 unsigned long end, int write, struct page **pages, int *nr) 242 unsigned long end, int write, struct page **pages, int *nr)
233{ 243{
234 unsigned long mask;
235 struct page *head, *page; 244 struct page *head, *page;
236 int refs; 245 int refs;
237 246
238 mask = _PAGE_PRESENT|_PAGE_USER; 247 if (!pte_allows_gup(pud_val(pud), write))
239 if (write)
240 mask |= _PAGE_RW;
241 if ((pud_flags(pud) & mask) != mask)
242 return 0; 248 return 0;
243 /* hugepages are never "special" */ 249 /* hugepages are never "special" */
244 VM_BUG_ON(pud_flags(pud) & _PAGE_SPECIAL); 250 VM_BUG_ON(pud_flags(pud) & _PAGE_SPECIAL);