summaryrefslogtreecommitdiffstats
path: root/mm/memory.c
diff options
context:
space:
mode:
authorJan Kara <jack@suse.cz>2016-12-14 18:07:10 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2016-12-14 19:04:09 -0500
commit936ca80d3773bd9b6dda8a0dfd54425f9ec1be9d (patch)
tree29605a13cdf2ca249d928966086155451f576242 /mm/memory.c
parent667240e0f2e13e792a5af99b3c34dfab12ef125b (diff)
mm: trim __do_fault() arguments
Use vm_fault structure to pass cow_page, page, and entry in and out of the function. That reduces number of __do_fault() arguments from 4 to 1. Link: http://lkml.kernel.org/r/1479460644-25076-6-git-send-email-jack@suse.cz Signed-off-by: Jan Kara <jack@suse.cz> Reviewed-by: Ross Zwisler <ross.zwisler@linux.intel.com> Acked-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com> Cc: Dan Williams <dan.j.williams@intel.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/memory.c')
-rw-r--r--mm/memory.c67
1 files changed, 29 insertions, 38 deletions
diff --git a/mm/memory.c b/mm/memory.c
index cbc6d47fda73..78b81e8984df 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -2844,26 +2844,22 @@ oom:
2844 * released depending on flags and vma->vm_ops->fault() return value. 2844 * released depending on flags and vma->vm_ops->fault() return value.
2845 * See filemap_fault() and __lock_page_retry(). 2845 * See filemap_fault() and __lock_page_retry().
2846 */ 2846 */
2847static int __do_fault(struct vm_fault *vmf, struct page *cow_page, 2847static int __do_fault(struct vm_fault *vmf)
2848 struct page **page, void **entry)
2849{ 2848{
2850 struct vm_area_struct *vma = vmf->vma; 2849 struct vm_area_struct *vma = vmf->vma;
2851 int ret; 2850 int ret;
2852 2851
2853 vmf->cow_page = cow_page;
2854
2855 ret = vma->vm_ops->fault(vma, vmf); 2852 ret = vma->vm_ops->fault(vma, vmf);
2856 if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY))) 2853 if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY)))
2857 return ret; 2854 return ret;
2858 if (ret & VM_FAULT_DAX_LOCKED) { 2855 if (ret & VM_FAULT_DAX_LOCKED)
2859 *entry = vmf->entry;
2860 return ret; 2856 return ret;
2861 }
2862 2857
2863 if (unlikely(PageHWPoison(vmf->page))) { 2858 if (unlikely(PageHWPoison(vmf->page))) {
2864 if (ret & VM_FAULT_LOCKED) 2859 if (ret & VM_FAULT_LOCKED)
2865 unlock_page(vmf->page); 2860 unlock_page(vmf->page);
2866 put_page(vmf->page); 2861 put_page(vmf->page);
2862 vmf->page = NULL;
2867 return VM_FAULT_HWPOISON; 2863 return VM_FAULT_HWPOISON;
2868 } 2864 }
2869 2865
@@ -2872,7 +2868,6 @@ static int __do_fault(struct vm_fault *vmf, struct page *cow_page,
2872 else 2868 else
2873 VM_BUG_ON_PAGE(!PageLocked(vmf->page), vmf->page); 2869 VM_BUG_ON_PAGE(!PageLocked(vmf->page), vmf->page);
2874 2870
2875 *page = vmf->page;
2876 return ret; 2871 return ret;
2877} 2872}
2878 2873
@@ -3208,7 +3203,6 @@ out:
3208static int do_read_fault(struct vm_fault *vmf) 3203static int do_read_fault(struct vm_fault *vmf)
3209{ 3204{
3210 struct vm_area_struct *vma = vmf->vma; 3205 struct vm_area_struct *vma = vmf->vma;
3211 struct page *fault_page;
3212 int ret = 0; 3206 int ret = 0;
3213 3207
3214 /* 3208 /*
@@ -3222,54 +3216,52 @@ static int do_read_fault(struct vm_fault *vmf)
3222 return ret; 3216 return ret;
3223 } 3217 }
3224 3218
3225 ret = __do_fault(vmf, NULL, &fault_page, NULL); 3219 ret = __do_fault(vmf);
3226 if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY))) 3220 if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY)))
3227 return ret; 3221 return ret;
3228 3222
3229 ret |= alloc_set_pte(vmf, NULL, fault_page); 3223 ret |= alloc_set_pte(vmf, NULL, vmf->page);
3230 if (vmf->pte) 3224 if (vmf->pte)
3231 pte_unmap_unlock(vmf->pte, vmf->ptl); 3225 pte_unmap_unlock(vmf->pte, vmf->ptl);
3232 unlock_page(fault_page); 3226 unlock_page(vmf->page);
3233 if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY))) 3227 if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY)))
3234 put_page(fault_page); 3228 put_page(vmf->page);
3235 return ret; 3229 return ret;
3236} 3230}
3237 3231
3238static int do_cow_fault(struct vm_fault *vmf) 3232static int do_cow_fault(struct vm_fault *vmf)
3239{ 3233{
3240 struct vm_area_struct *vma = vmf->vma; 3234 struct vm_area_struct *vma = vmf->vma;
3241 struct page *fault_page, *new_page;
3242 void *fault_entry;
3243 struct mem_cgroup *memcg; 3235 struct mem_cgroup *memcg;
3244 int ret; 3236 int ret;
3245 3237
3246 if (unlikely(anon_vma_prepare(vma))) 3238 if (unlikely(anon_vma_prepare(vma)))
3247 return VM_FAULT_OOM; 3239 return VM_FAULT_OOM;
3248 3240
3249 new_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, vmf->address); 3241 vmf->cow_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, vmf->address);
3250 if (!new_page) 3242 if (!vmf->cow_page)
3251 return VM_FAULT_OOM; 3243 return VM_FAULT_OOM;
3252 3244
3253 if (mem_cgroup_try_charge(new_page, vma->vm_mm, GFP_KERNEL, 3245 if (mem_cgroup_try_charge(vmf->cow_page, vma->vm_mm, GFP_KERNEL,
3254 &memcg, false)) { 3246 &memcg, false)) {
3255 put_page(new_page); 3247 put_page(vmf->cow_page);
3256 return VM_FAULT_OOM; 3248 return VM_FAULT_OOM;
3257 } 3249 }
3258 3250
3259 ret = __do_fault(vmf, new_page, &fault_page, &fault_entry); 3251 ret = __do_fault(vmf);
3260 if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY))) 3252 if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY)))
3261 goto uncharge_out; 3253 goto uncharge_out;
3262 3254
3263 if (!(ret & VM_FAULT_DAX_LOCKED)) 3255 if (!(ret & VM_FAULT_DAX_LOCKED))
3264 copy_user_highpage(new_page, fault_page, vmf->address, vma); 3256 copy_user_highpage(vmf->cow_page, vmf->page, vmf->address, vma);
3265 __SetPageUptodate(new_page); 3257 __SetPageUptodate(vmf->cow_page);
3266 3258
3267 ret |= alloc_set_pte(vmf, memcg, new_page); 3259 ret |= alloc_set_pte(vmf, memcg, vmf->cow_page);
3268 if (vmf->pte) 3260 if (vmf->pte)
3269 pte_unmap_unlock(vmf->pte, vmf->ptl); 3261 pte_unmap_unlock(vmf->pte, vmf->ptl);
3270 if (!(ret & VM_FAULT_DAX_LOCKED)) { 3262 if (!(ret & VM_FAULT_DAX_LOCKED)) {
3271 unlock_page(fault_page); 3263 unlock_page(vmf->page);
3272 put_page(fault_page); 3264 put_page(vmf->page);
3273 } else { 3265 } else {
3274 dax_unlock_mapping_entry(vma->vm_file->f_mapping, vmf->pgoff); 3266 dax_unlock_mapping_entry(vma->vm_file->f_mapping, vmf->pgoff);
3275 } 3267 }
@@ -3277,20 +3269,19 @@ static int do_cow_fault(struct vm_fault *vmf)
3277 goto uncharge_out; 3269 goto uncharge_out;
3278 return ret; 3270 return ret;
3279uncharge_out: 3271uncharge_out:
3280 mem_cgroup_cancel_charge(new_page, memcg, false); 3272 mem_cgroup_cancel_charge(vmf->cow_page, memcg, false);
3281 put_page(new_page); 3273 put_page(vmf->cow_page);
3282 return ret; 3274 return ret;
3283} 3275}
3284 3276
3285static int do_shared_fault(struct vm_fault *vmf) 3277static int do_shared_fault(struct vm_fault *vmf)
3286{ 3278{
3287 struct vm_area_struct *vma = vmf->vma; 3279 struct vm_area_struct *vma = vmf->vma;
3288 struct page *fault_page;
3289 struct address_space *mapping; 3280 struct address_space *mapping;
3290 int dirtied = 0; 3281 int dirtied = 0;
3291 int ret, tmp; 3282 int ret, tmp;
3292 3283
3293 ret = __do_fault(vmf, NULL, &fault_page, NULL); 3284 ret = __do_fault(vmf);
3294 if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY))) 3285 if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY)))
3295 return ret; 3286 return ret;
3296 3287
@@ -3299,26 +3290,26 @@ static int do_shared_fault(struct vm_fault *vmf)
3299 * about to become writable 3290 * about to become writable
3300 */ 3291 */
3301 if (vma->vm_ops->page_mkwrite) { 3292 if (vma->vm_ops->page_mkwrite) {
3302 unlock_page(fault_page); 3293 unlock_page(vmf->page);
3303 tmp = do_page_mkwrite(vma, fault_page, vmf->address); 3294 tmp = do_page_mkwrite(vma, vmf->page, vmf->address);
3304 if (unlikely(!tmp || 3295 if (unlikely(!tmp ||
3305 (tmp & (VM_FAULT_ERROR | VM_FAULT_NOPAGE)))) { 3296 (tmp & (VM_FAULT_ERROR | VM_FAULT_NOPAGE)))) {
3306 put_page(fault_page); 3297 put_page(vmf->page);
3307 return tmp; 3298 return tmp;
3308 } 3299 }
3309 } 3300 }
3310 3301
3311 ret |= alloc_set_pte(vmf, NULL, fault_page); 3302 ret |= alloc_set_pte(vmf, NULL, vmf->page);
3312 if (vmf->pte) 3303 if (vmf->pte)
3313 pte_unmap_unlock(vmf->pte, vmf->ptl); 3304 pte_unmap_unlock(vmf->pte, vmf->ptl);
3314 if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | 3305 if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE |
3315 VM_FAULT_RETRY))) { 3306 VM_FAULT_RETRY))) {
3316 unlock_page(fault_page); 3307 unlock_page(vmf->page);
3317 put_page(fault_page); 3308 put_page(vmf->page);
3318 return ret; 3309 return ret;
3319 } 3310 }
3320 3311
3321 if (set_page_dirty(fault_page)) 3312 if (set_page_dirty(vmf->page))
3322 dirtied = 1; 3313 dirtied = 1;
3323 /* 3314 /*
3324 * Take a local copy of the address_space - page.mapping may be zeroed 3315 * Take a local copy of the address_space - page.mapping may be zeroed
@@ -3326,8 +3317,8 @@ static int do_shared_fault(struct vm_fault *vmf)
3326 * pinned by vma->vm_file's reference. We rely on unlock_page()'s 3317 * pinned by vma->vm_file's reference. We rely on unlock_page()'s
3327 * release semantics to prevent the compiler from undoing this copying. 3318 * release semantics to prevent the compiler from undoing this copying.
3328 */ 3319 */
3329 mapping = page_rmapping(fault_page); 3320 mapping = page_rmapping(vmf->page);
3330 unlock_page(fault_page); 3321 unlock_page(vmf->page);
3331 if ((dirtied || vma->vm_ops->page_mkwrite) && mapping) { 3322 if ((dirtied || vma->vm_ops->page_mkwrite) && mapping) {
3332 /* 3323 /*
3333 * Some device drivers do not set page.mapping but still 3324 * Some device drivers do not set page.mapping but still