aboutsummaryrefslogtreecommitdiffstats
path: root/mm/memory.c
diff options
context:
space:
mode:
authorMark Brown <broonie@kernel.org>2015-10-12 13:09:27 -0400
committerMark Brown <broonie@kernel.org>2015-10-12 13:09:27 -0400
commit79828b4fa835f73cdaf4bffa48696abdcbea9d02 (patch)
tree5e0fa7156acb75ba603022bc807df8f2fedb97a8 /mm/memory.c
parent721b51fcf91898299d96f4b72cb9434cda29dce6 (diff)
parent8c1a9d6323abf0fb1e5dad96cf3f1c783505ea5a (diff)
Merge remote-tracking branch 'asoc/fix/rt5645' into asoc-fix-rt5645
Diffstat (limited to 'mm/memory.c')
-rw-r--r--mm/memory.c74
1 files changed, 53 insertions, 21 deletions
diff --git a/mm/memory.c b/mm/memory.c
index 388dcf9aa283..9cb27470fee9 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -61,6 +61,7 @@
61#include <linux/string.h> 61#include <linux/string.h>
62#include <linux/dma-debug.h> 62#include <linux/dma-debug.h>
63#include <linux/debugfs.h> 63#include <linux/debugfs.h>
64#include <linux/userfaultfd_k.h>
64 65
65#include <asm/io.h> 66#include <asm/io.h>
66#include <asm/pgalloc.h> 67#include <asm/pgalloc.h>
@@ -180,22 +181,22 @@ static void check_sync_rss_stat(struct task_struct *task)
180 181
181#ifdef HAVE_GENERIC_MMU_GATHER 182#ifdef HAVE_GENERIC_MMU_GATHER
182 183
183static int tlb_next_batch(struct mmu_gather *tlb) 184static bool tlb_next_batch(struct mmu_gather *tlb)
184{ 185{
185 struct mmu_gather_batch *batch; 186 struct mmu_gather_batch *batch;
186 187
187 batch = tlb->active; 188 batch = tlb->active;
188 if (batch->next) { 189 if (batch->next) {
189 tlb->active = batch->next; 190 tlb->active = batch->next;
190 return 1; 191 return true;
191 } 192 }
192 193
193 if (tlb->batch_count == MAX_GATHER_BATCH_COUNT) 194 if (tlb->batch_count == MAX_GATHER_BATCH_COUNT)
194 return 0; 195 return false;
195 196
196 batch = (void *)__get_free_pages(GFP_NOWAIT | __GFP_NOWARN, 0); 197 batch = (void *)__get_free_pages(GFP_NOWAIT | __GFP_NOWARN, 0);
197 if (!batch) 198 if (!batch)
198 return 0; 199 return false;
199 200
200 tlb->batch_count++; 201 tlb->batch_count++;
201 batch->next = NULL; 202 batch->next = NULL;
@@ -205,7 +206,7 @@ static int tlb_next_batch(struct mmu_gather *tlb)
205 tlb->active->next = batch; 206 tlb->active->next = batch;
206 tlb->active = batch; 207 tlb->active = batch;
207 208
208 return 1; 209 return true;
209} 210}
210 211
211/* tlb_gather_mmu 212/* tlb_gather_mmu
@@ -2425,8 +2426,6 @@ void unmap_mapping_range(struct address_space *mapping,
2425 if (details.last_index < details.first_index) 2426 if (details.last_index < details.first_index)
2426 details.last_index = ULONG_MAX; 2427 details.last_index = ULONG_MAX;
2427 2428
2428
2429 /* DAX uses i_mmap_lock to serialise file truncate vs page fault */
2430 i_mmap_lock_write(mapping); 2429 i_mmap_lock_write(mapping);
2431 if (unlikely(!RB_EMPTY_ROOT(&mapping->i_mmap))) 2430 if (unlikely(!RB_EMPTY_ROOT(&mapping->i_mmap)))
2432 unmap_mapping_range_tree(&mapping->i_mmap, &details); 2431 unmap_mapping_range_tree(&mapping->i_mmap, &details);
@@ -2685,6 +2684,12 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
2685 page_table = pte_offset_map_lock(mm, pmd, address, &ptl); 2684 page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
2686 if (!pte_none(*page_table)) 2685 if (!pte_none(*page_table))
2687 goto unlock; 2686 goto unlock;
2687 /* Deliver the page fault to userland, check inside PT lock */
2688 if (userfaultfd_missing(vma)) {
2689 pte_unmap_unlock(page_table, ptl);
2690 return handle_userfault(vma, address, flags,
2691 VM_UFFD_MISSING);
2692 }
2688 goto setpte; 2693 goto setpte;
2689 } 2694 }
2690 2695
@@ -2713,6 +2718,15 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
2713 if (!pte_none(*page_table)) 2718 if (!pte_none(*page_table))
2714 goto release; 2719 goto release;
2715 2720
2721 /* Deliver the page fault to userland, check inside PT lock */
2722 if (userfaultfd_missing(vma)) {
2723 pte_unmap_unlock(page_table, ptl);
2724 mem_cgroup_cancel_charge(page, memcg);
2725 page_cache_release(page);
2726 return handle_userfault(vma, address, flags,
2727 VM_UFFD_MISSING);
2728 }
2729
2716 inc_mm_counter_fast(mm, MM_ANONPAGES); 2730 inc_mm_counter_fast(mm, MM_ANONPAGES);
2717 page_add_new_anon_rmap(page, vma, address); 2731 page_add_new_anon_rmap(page, vma, address);
2718 mem_cgroup_commit_charge(page, memcg, false); 2732 mem_cgroup_commit_charge(page, memcg, false);
@@ -2999,9 +3013,9 @@ static int do_cow_fault(struct mm_struct *mm, struct vm_area_struct *vma,
2999 } else { 3013 } else {
3000 /* 3014 /*
3001 * The fault handler has no page to lock, so it holds 3015 * The fault handler has no page to lock, so it holds
3002 * i_mmap_lock for read to protect against truncate. 3016 * i_mmap_lock for write to protect against truncate.
3003 */ 3017 */
3004 i_mmap_unlock_read(vma->vm_file->f_mapping); 3018 i_mmap_unlock_write(vma->vm_file->f_mapping);
3005 } 3019 }
3006 goto uncharge_out; 3020 goto uncharge_out;
3007 } 3021 }
@@ -3015,9 +3029,9 @@ static int do_cow_fault(struct mm_struct *mm, struct vm_area_struct *vma,
3015 } else { 3029 } else {
3016 /* 3030 /*
3017 * The fault handler has no page to lock, so it holds 3031 * The fault handler has no page to lock, so it holds
3018 * i_mmap_lock for read to protect against truncate. 3032 * i_mmap_lock for write to protect against truncate.
3019 */ 3033 */
3020 i_mmap_unlock_read(vma->vm_file->f_mapping); 3034 i_mmap_unlock_write(vma->vm_file->f_mapping);
3021 } 3035 }
3022 return ret; 3036 return ret;
3023uncharge_out: 3037uncharge_out:
@@ -3216,6 +3230,27 @@ out:
3216 return 0; 3230 return 0;
3217} 3231}
3218 3232
3233static int create_huge_pmd(struct mm_struct *mm, struct vm_area_struct *vma,
3234 unsigned long address, pmd_t *pmd, unsigned int flags)
3235{
3236 if (vma_is_anonymous(vma))
3237 return do_huge_pmd_anonymous_page(mm, vma, address, pmd, flags);
3238 if (vma->vm_ops->pmd_fault)
3239 return vma->vm_ops->pmd_fault(vma, address, pmd, flags);
3240 return VM_FAULT_FALLBACK;
3241}
3242
3243static int wp_huge_pmd(struct mm_struct *mm, struct vm_area_struct *vma,
3244 unsigned long address, pmd_t *pmd, pmd_t orig_pmd,
3245 unsigned int flags)
3246{
3247 if (vma_is_anonymous(vma))
3248 return do_huge_pmd_wp_page(mm, vma, address, pmd, orig_pmd);
3249 if (vma->vm_ops->pmd_fault)
3250 return vma->vm_ops->pmd_fault(vma, address, pmd, flags);
3251 return VM_FAULT_FALLBACK;
3252}
3253
3219/* 3254/*
3220 * These routines also need to handle stuff like marking pages dirty 3255 * These routines also need to handle stuff like marking pages dirty
3221 * and/or accessed for architectures that don't do it in hardware (most 3256 * and/or accessed for architectures that don't do it in hardware (most
@@ -3251,12 +3286,12 @@ static int handle_pte_fault(struct mm_struct *mm,
3251 barrier(); 3286 barrier();
3252 if (!pte_present(entry)) { 3287 if (!pte_present(entry)) {
3253 if (pte_none(entry)) { 3288 if (pte_none(entry)) {
3254 if (vma->vm_ops) 3289 if (vma_is_anonymous(vma))
3290 return do_anonymous_page(mm, vma, address,
3291 pte, pmd, flags);
3292 else
3255 return do_fault(mm, vma, address, pte, pmd, 3293 return do_fault(mm, vma, address, pte, pmd,
3256 flags, entry); 3294 flags, entry);
3257
3258 return do_anonymous_page(mm, vma, address, pte, pmd,
3259 flags);
3260 } 3295 }
3261 return do_swap_page(mm, vma, address, 3296 return do_swap_page(mm, vma, address,
3262 pte, pmd, flags, entry); 3297 pte, pmd, flags, entry);
@@ -3318,10 +3353,7 @@ static int __handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
3318 if (!pmd) 3353 if (!pmd)
3319 return VM_FAULT_OOM; 3354 return VM_FAULT_OOM;
3320 if (pmd_none(*pmd) && transparent_hugepage_enabled(vma)) { 3355 if (pmd_none(*pmd) && transparent_hugepage_enabled(vma)) {
3321 int ret = VM_FAULT_FALLBACK; 3356 int ret = create_huge_pmd(mm, vma, address, pmd, flags);
3322 if (!vma->vm_ops)
3323 ret = do_huge_pmd_anonymous_page(mm, vma, address,
3324 pmd, flags);
3325 if (!(ret & VM_FAULT_FALLBACK)) 3357 if (!(ret & VM_FAULT_FALLBACK))
3326 return ret; 3358 return ret;
3327 } else { 3359 } else {
@@ -3345,8 +3377,8 @@ static int __handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
3345 orig_pmd, pmd); 3377 orig_pmd, pmd);
3346 3378
3347 if (dirty && !pmd_write(orig_pmd)) { 3379 if (dirty && !pmd_write(orig_pmd)) {
3348 ret = do_huge_pmd_wp_page(mm, vma, address, pmd, 3380 ret = wp_huge_pmd(mm, vma, address, pmd,
3349 orig_pmd); 3381 orig_pmd, flags);
3350 if (!(ret & VM_FAULT_FALLBACK)) 3382 if (!(ret & VM_FAULT_FALLBACK))
3351 return ret; 3383 return ret;
3352 } else { 3384 } else {