aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/mm.h
diff options
context:
space:
mode:
authorMichel Lespinasse <walken@google.com>2013-02-22 19:35:56 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2013-02-23 20:50:23 -0500
commit240aadeedc4a89fc44623f8ce4ca46bda73db07e (patch)
treee12cc254f2e78560837e5d454fdbe9b5d9e05c5c /include/linux/mm.h
parent28a35716d317980ae9bc2ff2f84c33a3cda9e884 (diff)
mm: accelerate mm_populate() treatment of THP pages
This change adds a follow_page_mask function which is equivalent to follow_page, but with an extra page_mask argument. follow_page_mask sets *page_mask to HPAGE_PMD_NR - 1 when it encounters a THP page, and to 0 in other cases. __get_user_pages() makes use of this in order to accelerate populating THP ranges - that is, when both the pages and vmas arrays are NULL, we don't need to iterate HPAGE_PMD_NR times to cover a single THP page (and we also avoid taking mm->page_table_lock that many times). Signed-off-by: Michel Lespinasse <walken@google.com> Cc: Andrea Arcangeli <aarcange@redhat.com> Cc: Rik van Riel <riel@redhat.com> Cc: Mel Gorman <mgorman@suse.de> Cc: Hugh Dickins <hughd@google.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'include/linux/mm.h')
-rw-r--r--include/linux/mm.h13
1 files changed, 11 insertions, 2 deletions
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 87b0ef253607..6124f1db50fe 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -1629,8 +1629,17 @@ int vm_insert_pfn(struct vm_area_struct *vma, unsigned long addr,
1629int vm_insert_mixed(struct vm_area_struct *vma, unsigned long addr, 1629int vm_insert_mixed(struct vm_area_struct *vma, unsigned long addr,
1630 unsigned long pfn); 1630 unsigned long pfn);
1631 1631
1632struct page *follow_page(struct vm_area_struct *, unsigned long address, 1632struct page *follow_page_mask(struct vm_area_struct *vma,
1633 unsigned int foll_flags); 1633 unsigned long address, unsigned int foll_flags,
1634 unsigned int *page_mask);
1635
1636static inline struct page *follow_page(struct vm_area_struct *vma,
1637 unsigned long address, unsigned int foll_flags)
1638{
1639 unsigned int unused_page_mask;
1640 return follow_page_mask(vma, address, foll_flags, &unused_page_mask);
1641}
1642
1634#define FOLL_WRITE 0x01 /* check pte is writable */ 1643#define FOLL_WRITE 0x01 /* check pte is writable */
1635#define FOLL_TOUCH 0x02 /* mark page accessed */ 1644#define FOLL_TOUCH 0x02 /* mark page accessed */
1636#define FOLL_GET 0x04 /* do get_page on page */ 1645#define FOLL_GET 0x04 /* do get_page on page */