diff options
author | Kirill A. Shutemov <kirill.shutemov@linux.intel.com> | 2016-07-26 18:25:29 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2016-07-26 19:19:19 -0400 |
commit | 1010245964415bb7403463115bab2cd26244b445 (patch) | |
tree | 0bb07c499e1816334dd405ea9f82381223c7479a /mm/memory.c | |
parent | dd78fedde4b99b322f2dc849d467d365a82e23ca (diff) |
mm: introduce do_set_pmd()
With postponed page table allocation we have chance to setup huge pages.
do_set_pte() calls do_set_pmd() if following criteria met:
- page is compound;
- pmd entry in pmd_none();
- vma has suitable size and alignment;
Link: http://lkml.kernel.org/r/1466021202-61880-12-git-send-email-kirill.shutemov@linux.intel.com
Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/memory.c')
-rw-r--r-- | mm/memory.c | 72 |
1 files changed, 71 insertions, 1 deletions
diff --git a/mm/memory.c b/mm/memory.c index 30cda24ff205..650622a3a0a1 100644 --- a/mm/memory.c +++ b/mm/memory.c | |||
@@ -2920,6 +2920,66 @@ map_pte: | |||
2920 | return 0; | 2920 | return 0; |
2921 | } | 2921 | } |
2922 | 2922 | ||
2923 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE | ||
2924 | |||
2925 | #define HPAGE_CACHE_INDEX_MASK (HPAGE_PMD_NR - 1) | ||
2926 | static inline bool transhuge_vma_suitable(struct vm_area_struct *vma, | ||
2927 | unsigned long haddr) | ||
2928 | { | ||
2929 | if (((vma->vm_start >> PAGE_SHIFT) & HPAGE_CACHE_INDEX_MASK) != | ||
2930 | (vma->vm_pgoff & HPAGE_CACHE_INDEX_MASK)) | ||
2931 | return false; | ||
2932 | if (haddr < vma->vm_start || haddr + HPAGE_PMD_SIZE > vma->vm_end) | ||
2933 | return false; | ||
2934 | return true; | ||
2935 | } | ||
2936 | |||
2937 | static int do_set_pmd(struct fault_env *fe, struct page *page) | ||
2938 | { | ||
2939 | struct vm_area_struct *vma = fe->vma; | ||
2940 | bool write = fe->flags & FAULT_FLAG_WRITE; | ||
2941 | unsigned long haddr = fe->address & HPAGE_PMD_MASK; | ||
2942 | pmd_t entry; | ||
2943 | int i, ret; | ||
2944 | |||
2945 | if (!transhuge_vma_suitable(vma, haddr)) | ||
2946 | return VM_FAULT_FALLBACK; | ||
2947 | |||
2948 | ret = VM_FAULT_FALLBACK; | ||
2949 | page = compound_head(page); | ||
2950 | |||
2951 | fe->ptl = pmd_lock(vma->vm_mm, fe->pmd); | ||
2952 | if (unlikely(!pmd_none(*fe->pmd))) | ||
2953 | goto out; | ||
2954 | |||
2955 | for (i = 0; i < HPAGE_PMD_NR; i++) | ||
2956 | flush_icache_page(vma, page + i); | ||
2957 | |||
2958 | entry = mk_huge_pmd(page, vma->vm_page_prot); | ||
2959 | if (write) | ||
2960 | entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma); | ||
2961 | |||
2962 | add_mm_counter(vma->vm_mm, MM_FILEPAGES, HPAGE_PMD_NR); | ||
2963 | page_add_file_rmap(page, true); | ||
2964 | |||
2965 | set_pmd_at(vma->vm_mm, haddr, fe->pmd, entry); | ||
2966 | |||
2967 | update_mmu_cache_pmd(vma, haddr, fe->pmd); | ||
2968 | |||
2969 | /* fault is handled */ | ||
2970 | ret = 0; | ||
2971 | out: | ||
2972 | spin_unlock(fe->ptl); | ||
2973 | return ret; | ||
2974 | } | ||
2975 | #else | ||
2976 | static int do_set_pmd(struct fault_env *fe, struct page *page) | ||
2977 | { | ||
2978 | BUILD_BUG(); | ||
2979 | return 0; | ||
2980 | } | ||
2981 | #endif | ||
2982 | |||
2923 | /** | 2983 | /** |
2924 | * alloc_set_pte - setup new PTE entry for given page and add reverse page | 2984 | * alloc_set_pte - setup new PTE entry for given page and add reverse page |
2925 | * mapping. If needed, the fucntion allocates page table or use pre-allocated. | 2985 | * mapping. If needed, the fucntion allocates page table or use pre-allocated. |
@@ -2939,9 +2999,19 @@ int alloc_set_pte(struct fault_env *fe, struct mem_cgroup *memcg, | |||
2939 | struct vm_area_struct *vma = fe->vma; | 2999 | struct vm_area_struct *vma = fe->vma; |
2940 | bool write = fe->flags & FAULT_FLAG_WRITE; | 3000 | bool write = fe->flags & FAULT_FLAG_WRITE; |
2941 | pte_t entry; | 3001 | pte_t entry; |
3002 | int ret; | ||
3003 | |||
3004 | if (pmd_none(*fe->pmd) && PageTransCompound(page)) { | ||
3005 | /* THP on COW? */ | ||
3006 | VM_BUG_ON_PAGE(memcg, page); | ||
3007 | |||
3008 | ret = do_set_pmd(fe, page); | ||
3009 | if (ret != VM_FAULT_FALLBACK) | ||
3010 | return ret; | ||
3011 | } | ||
2942 | 3012 | ||
2943 | if (!fe->pte) { | 3013 | if (!fe->pte) { |
2944 | int ret = pte_alloc_one_map(fe); | 3014 | ret = pte_alloc_one_map(fe); |
2945 | if (ret) | 3015 | if (ret) |
2946 | return ret; | 3016 | return ret; |
2947 | } | 3017 | } |