diff options
author | Johannes Weiner <hannes@cmpxchg.org> | 2011-01-13 18:47:02 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2011-01-13 20:32:44 -0500 |
commit | 0ca1634d4143c3579273ca53b993df19f5c98e92 (patch) | |
tree | d566a582357a9e1528d1783566bfda4375c7e566 | |
parent | f2d6bfe9ff0acec30b713614260e78b03d20e909 (diff) |
thp: mincore transparent hugepage support
Handle transparent huge page pmd entries natively instead of splitting
them into subpages.
Signed-off-by: Johannes Weiner <hannes@cmpxchg.org>
Signed-off-by: Andrea Arcangeli <aarcange@redhat.com>
Reviewed-by: Rik van Riel <riel@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r-- | include/linux/huge_mm.h | 3 | ||||
-rw-r--r-- | mm/huge_memory.c | 25 | ||||
-rw-r--r-- | mm/mincore.c | 8 |
3 files changed, 35 insertions, 1 deletions
diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h index 43a694ef8904..25125fb6acf7 100644 --- a/include/linux/huge_mm.h +++ b/include/linux/huge_mm.h | |||
@@ -19,6 +19,9 @@ extern struct page *follow_trans_huge_pmd(struct mm_struct *mm, | |||
19 | extern int zap_huge_pmd(struct mmu_gather *tlb, | 19 | extern int zap_huge_pmd(struct mmu_gather *tlb, |
20 | struct vm_area_struct *vma, | 20 | struct vm_area_struct *vma, |
21 | pmd_t *pmd); | 21 | pmd_t *pmd); |
22 | extern int mincore_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, | ||
23 | unsigned long addr, unsigned long end, | ||
24 | unsigned char *vec); | ||
22 | 25 | ||
23 | enum transparent_hugepage_flag { | 26 | enum transparent_hugepage_flag { |
24 | TRANSPARENT_HUGEPAGE_FLAG, | 27 | TRANSPARENT_HUGEPAGE_FLAG, |
diff --git a/mm/huge_memory.c b/mm/huge_memory.c index ae2bf08b1099..37e89a32a0b1 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c | |||
@@ -923,6 +923,31 @@ int zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma, | |||
923 | return ret; | 923 | return ret; |
924 | } | 924 | } |
925 | 925 | ||
926 | int mincore_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, | ||
927 | unsigned long addr, unsigned long end, | ||
928 | unsigned char *vec) | ||
929 | { | ||
930 | int ret = 0; | ||
931 | |||
932 | spin_lock(&vma->vm_mm->page_table_lock); | ||
933 | if (likely(pmd_trans_huge(*pmd))) { | ||
934 | ret = !pmd_trans_splitting(*pmd); | ||
935 | spin_unlock(&vma->vm_mm->page_table_lock); | ||
936 | if (unlikely(!ret)) | ||
937 | wait_split_huge_page(vma->anon_vma, pmd); | ||
938 | else { | ||
939 | /* | ||
940 | * All logical pages in the range are present | ||
941 | * if backed by a huge page. | ||
942 | */ | ||
943 | memset(vec, 1, (end - addr) >> PAGE_SHIFT); | ||
944 | } | ||
945 | } else | ||
946 | spin_unlock(&vma->vm_mm->page_table_lock); | ||
947 | |||
948 | return ret; | ||
949 | } | ||
950 | |||
926 | pmd_t *page_check_address_pmd(struct page *page, | 951 | pmd_t *page_check_address_pmd(struct page *page, |
927 | struct mm_struct *mm, | 952 | struct mm_struct *mm, |
928 | unsigned long address, | 953 | unsigned long address, |
diff --git a/mm/mincore.c b/mm/mincore.c index 9959bb41570e..a4e6b9d75c76 100644 --- a/mm/mincore.c +++ b/mm/mincore.c | |||
@@ -154,7 +154,13 @@ static void mincore_pmd_range(struct vm_area_struct *vma, pud_t *pud, | |||
154 | pmd = pmd_offset(pud, addr); | 154 | pmd = pmd_offset(pud, addr); |
155 | do { | 155 | do { |
156 | next = pmd_addr_end(addr, end); | 156 | next = pmd_addr_end(addr, end); |
157 | split_huge_page_pmd(vma->vm_mm, pmd); | 157 | if (pmd_trans_huge(*pmd)) { |
158 | if (mincore_huge_pmd(vma, pmd, addr, next, vec)) { | ||
159 | vec += (next - addr) >> PAGE_SHIFT; | ||
160 | continue; | ||
161 | } | ||
162 | /* fall through */ | ||
163 | } | ||
158 | if (pmd_none_or_clear_bad(pmd)) | 164 | if (pmd_none_or_clear_bad(pmd)) |
159 | mincore_unmapped_range(vma, addr, next, vec); | 165 | mincore_unmapped_range(vma, addr, next, vec); |
160 | else | 166 | else |