aboutsummaryrefslogtreecommitdiffstats
path: root/mm/gup.c
diff options
context:
space:
mode:
authorAneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>2017-07-06 18:38:56 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2017-07-06 19:24:33 -0400
commit4dc71451a2078efcad2f66bd6ef130d2296827b1 (patch)
tree42f0896ed2bc9e7e59a4943b1b50fde56fb73a6a /mm/gup.c
parente22992923f741c951b830121655b58342fce202e (diff)
mm/follow_page_mask: add support for hugepage directory entry
Architectures like ppc64 supports hugepage size that is not mapped to any of of the page table levels. Instead they add an alternate page table entry format called hugepage directory (hugepd). hugepd indicates that the page table entry maps to a set of hugetlb pages. Add support for this in generic follow_page_mask code. We already support this format in the generic gup code. The default implementation prints warning and returns NULL. We will add ppc64 support in later patches Link: http://lkml.kernel.org/r/1494926612-23928-7-git-send-email-aneesh.kumar@linux.vnet.ibm.com Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com> Cc: Anshuman Khandual <khandual@linux.vnet.ibm.com> Cc: Naoya Horiguchi <n-horiguchi@ah.jp.nec.com> Cc: Michael Ellerman <mpe@ellerman.id.au> Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org> Cc: Mike Kravetz <kravetz@us.ibm.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/gup.c')
-rw-r--r--mm/gup.c33
1 files changed, 33 insertions, 0 deletions
diff --git a/mm/gup.c b/mm/gup.c
index fe95a37a4172..9e472cb835b5 100644
--- a/mm/gup.c
+++ b/mm/gup.c
@@ -226,6 +226,14 @@ static struct page *follow_pmd_mask(struct vm_area_struct *vma,
226 return page; 226 return page;
227 return no_page_table(vma, flags); 227 return no_page_table(vma, flags);
228 } 228 }
229 if (is_hugepd(__hugepd(pmd_val(*pmd)))) {
230 page = follow_huge_pd(vma, address,
231 __hugepd(pmd_val(*pmd)), flags,
232 PMD_SHIFT);
233 if (page)
234 return page;
235 return no_page_table(vma, flags);
236 }
229 if (pmd_devmap(*pmd)) { 237 if (pmd_devmap(*pmd)) {
230 ptl = pmd_lock(mm, pmd); 238 ptl = pmd_lock(mm, pmd);
231 page = follow_devmap_pmd(vma, address, pmd, flags); 239 page = follow_devmap_pmd(vma, address, pmd, flags);
@@ -292,6 +300,14 @@ static struct page *follow_pud_mask(struct vm_area_struct *vma,
292 return page; 300 return page;
293 return no_page_table(vma, flags); 301 return no_page_table(vma, flags);
294 } 302 }
303 if (is_hugepd(__hugepd(pud_val(*pud)))) {
304 page = follow_huge_pd(vma, address,
305 __hugepd(pud_val(*pud)), flags,
306 PUD_SHIFT);
307 if (page)
308 return page;
309 return no_page_table(vma, flags);
310 }
295 if (pud_devmap(*pud)) { 311 if (pud_devmap(*pud)) {
296 ptl = pud_lock(mm, pud); 312 ptl = pud_lock(mm, pud);
297 page = follow_devmap_pud(vma, address, pud, flags); 313 page = follow_devmap_pud(vma, address, pud, flags);
@@ -311,6 +327,7 @@ static struct page *follow_p4d_mask(struct vm_area_struct *vma,
311 unsigned int flags, unsigned int *page_mask) 327 unsigned int flags, unsigned int *page_mask)
312{ 328{
313 p4d_t *p4d; 329 p4d_t *p4d;
330 struct page *page;
314 331
315 p4d = p4d_offset(pgdp, address); 332 p4d = p4d_offset(pgdp, address);
316 if (p4d_none(*p4d)) 333 if (p4d_none(*p4d))
@@ -319,6 +336,14 @@ static struct page *follow_p4d_mask(struct vm_area_struct *vma,
319 if (unlikely(p4d_bad(*p4d))) 336 if (unlikely(p4d_bad(*p4d)))
320 return no_page_table(vma, flags); 337 return no_page_table(vma, flags);
321 338
339 if (is_hugepd(__hugepd(p4d_val(*p4d)))) {
340 page = follow_huge_pd(vma, address,
341 __hugepd(p4d_val(*p4d)), flags,
342 P4D_SHIFT);
343 if (page)
344 return page;
345 return no_page_table(vma, flags);
346 }
322 return follow_pud_mask(vma, address, p4d, flags, page_mask); 347 return follow_pud_mask(vma, address, p4d, flags, page_mask);
323} 348}
324 349
@@ -363,6 +388,14 @@ struct page *follow_page_mask(struct vm_area_struct *vma,
363 return page; 388 return page;
364 return no_page_table(vma, flags); 389 return no_page_table(vma, flags);
365 } 390 }
391 if (is_hugepd(__hugepd(pgd_val(*pgd)))) {
392 page = follow_huge_pd(vma, address,
393 __hugepd(pgd_val(*pgd)), flags,
394 PGDIR_SHIFT);
395 if (page)
396 return page;
397 return no_page_table(vma, flags);
398 }
366 399
367 return follow_p4d_mask(vma, address, pgd, flags, page_mask); 400 return follow_p4d_mask(vma, address, pgd, flags, page_mask);
368} 401}