aboutsummaryrefslogtreecommitdiffstats
path: root/mm/memory.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/memory.c')
-rw-r--r--mm/memory.c25
1 files changed, 11 insertions, 14 deletions
diff --git a/mm/memory.c b/mm/memory.c
index 54cf20ee0a83..3a6c4a658325 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -1323,18 +1323,14 @@ static int insert_page(struct vm_area_struct *vma, unsigned long addr,
1323 pte_t *pte; 1323 pte_t *pte;
1324 spinlock_t *ptl; 1324 spinlock_t *ptl;
1325 1325
1326 retval = mem_cgroup_charge(page, mm, GFP_KERNEL);
1327 if (retval)
1328 goto out;
1329
1330 retval = -EINVAL; 1326 retval = -EINVAL;
1331 if (PageAnon(page)) 1327 if (PageAnon(page))
1332 goto out_uncharge; 1328 goto out;
1333 retval = -ENOMEM; 1329 retval = -ENOMEM;
1334 flush_dcache_page(page); 1330 flush_dcache_page(page);
1335 pte = get_locked_pte(mm, addr, &ptl); 1331 pte = get_locked_pte(mm, addr, &ptl);
1336 if (!pte) 1332 if (!pte)
1337 goto out_uncharge; 1333 goto out;
1338 retval = -EBUSY; 1334 retval = -EBUSY;
1339 if (!pte_none(*pte)) 1335 if (!pte_none(*pte))
1340 goto out_unlock; 1336 goto out_unlock;
@@ -1350,8 +1346,6 @@ static int insert_page(struct vm_area_struct *vma, unsigned long addr,
1350 return retval; 1346 return retval;
1351out_unlock: 1347out_unlock:
1352 pte_unmap_unlock(pte, ptl); 1348 pte_unmap_unlock(pte, ptl);
1353out_uncharge:
1354 mem_cgroup_uncharge_page(page);
1355out: 1349out:
1356 return retval; 1350 return retval;
1357} 1351}
@@ -2463,6 +2457,7 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
2463 struct page *page; 2457 struct page *page;
2464 pte_t entry; 2458 pte_t entry;
2465 int anon = 0; 2459 int anon = 0;
2460 int charged = 0;
2466 struct page *dirty_page = NULL; 2461 struct page *dirty_page = NULL;
2467 struct vm_fault vmf; 2462 struct vm_fault vmf;
2468 int ret; 2463 int ret;
@@ -2503,6 +2498,12 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
2503 ret = VM_FAULT_OOM; 2498 ret = VM_FAULT_OOM;
2504 goto out; 2499 goto out;
2505 } 2500 }
2501 if (mem_cgroup_charge(page, mm, GFP_KERNEL)) {
2502 ret = VM_FAULT_OOM;
2503 page_cache_release(page);
2504 goto out;
2505 }
2506 charged = 1;
2506 /* 2507 /*
2507 * Don't let another task, with possibly unlocked vma, 2508 * Don't let another task, with possibly unlocked vma,
2508 * keep the mlocked page. 2509 * keep the mlocked page.
@@ -2543,11 +2544,6 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
2543 2544
2544 } 2545 }
2545 2546
2546 if (mem_cgroup_charge(page, mm, GFP_KERNEL)) {
2547 ret = VM_FAULT_OOM;
2548 goto out;
2549 }
2550
2551 page_table = pte_offset_map_lock(mm, pmd, address, &ptl); 2547 page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
2552 2548
2553 /* 2549 /*
@@ -2585,7 +2581,8 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
2585 /* no need to invalidate: a not-present page won't be cached */ 2581 /* no need to invalidate: a not-present page won't be cached */
2586 update_mmu_cache(vma, address, entry); 2582 update_mmu_cache(vma, address, entry);
2587 } else { 2583 } else {
2588 mem_cgroup_uncharge_page(page); 2584 if (charged)
2585 mem_cgroup_uncharge_page(page);
2589 if (anon) 2586 if (anon)
2590 page_cache_release(page); 2587 page_cache_release(page);
2591 else 2588 else