aboutsummaryrefslogtreecommitdiffstats
path: root/mm/memory.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/memory.c')
-rw-r--r--mm/memory.c23
1 files changed, 13 insertions, 10 deletions
diff --git a/mm/memory.c b/mm/memory.c
index ca8cac11bd2c..f82b359b2745 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -1639,6 +1639,7 @@ static int do_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
1639 struct page *old_page, *new_page; 1639 struct page *old_page, *new_page;
1640 pte_t entry; 1640 pte_t entry;
1641 int reuse = 0, ret = 0; 1641 int reuse = 0, ret = 0;
1642 int page_mkwrite = 0;
1642 struct page *dirty_page = NULL; 1643 struct page *dirty_page = NULL;
1643 1644
1644 old_page = vm_normal_page(vma, address, orig_pte); 1645 old_page = vm_normal_page(vma, address, orig_pte);
@@ -1687,6 +1688,8 @@ static int do_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
1687 page_cache_release(old_page); 1688 page_cache_release(old_page);
1688 if (!pte_same(*page_table, orig_pte)) 1689 if (!pte_same(*page_table, orig_pte))
1689 goto unlock; 1690 goto unlock;
1691
1692 page_mkwrite = 1;
1690 } 1693 }
1691 dirty_page = old_page; 1694 dirty_page = old_page;
1692 get_page(dirty_page); 1695 get_page(dirty_page);
@@ -1774,7 +1777,7 @@ unlock:
1774 * do_no_page is protected similarly. 1777 * do_no_page is protected similarly.
1775 */ 1778 */
1776 wait_on_page_locked(dirty_page); 1779 wait_on_page_locked(dirty_page);
1777 set_page_dirty_balance(dirty_page); 1780 set_page_dirty_balance(dirty_page, page_mkwrite);
1778 put_page(dirty_page); 1781 put_page(dirty_page);
1779 } 1782 }
1780 return ret; 1783 return ret;
@@ -2307,13 +2310,14 @@ oom:
2307 * do not need to flush old virtual caches or the TLB. 2310 * do not need to flush old virtual caches or the TLB.
2308 * 2311 *
2309 * We enter with non-exclusive mmap_sem (to exclude vma changes, 2312 * We enter with non-exclusive mmap_sem (to exclude vma changes,
2310 * but allow concurrent faults), and pte mapped but not yet locked. 2313 * but allow concurrent faults), and pte neither mapped nor locked.
2311 * We return with mmap_sem still held, but pte unmapped and unlocked. 2314 * We return with mmap_sem still held, but pte unmapped and unlocked.
2312 */ 2315 */
2313static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma, 2316static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
2314 unsigned long address, pte_t *page_table, pmd_t *pmd, 2317 unsigned long address, pmd_t *pmd,
2315 pgoff_t pgoff, unsigned int flags, pte_t orig_pte) 2318 pgoff_t pgoff, unsigned int flags, pte_t orig_pte)
2316{ 2319{
2320 pte_t *page_table;
2317 spinlock_t *ptl; 2321 spinlock_t *ptl;
2318 struct page *page; 2322 struct page *page;
2319 pte_t entry; 2323 pte_t entry;
@@ -2321,13 +2325,13 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
2321 struct page *dirty_page = NULL; 2325 struct page *dirty_page = NULL;
2322 struct vm_fault vmf; 2326 struct vm_fault vmf;
2323 int ret; 2327 int ret;
2328 int page_mkwrite = 0;
2324 2329
2325 vmf.virtual_address = (void __user *)(address & PAGE_MASK); 2330 vmf.virtual_address = (void __user *)(address & PAGE_MASK);
2326 vmf.pgoff = pgoff; 2331 vmf.pgoff = pgoff;
2327 vmf.flags = flags; 2332 vmf.flags = flags;
2328 vmf.page = NULL; 2333 vmf.page = NULL;
2329 2334
2330 pte_unmap(page_table);
2331 BUG_ON(vma->vm_flags & VM_PFNMAP); 2335 BUG_ON(vma->vm_flags & VM_PFNMAP);
2332 2336
2333 if (likely(vma->vm_ops->fault)) { 2337 if (likely(vma->vm_ops->fault)) {
@@ -2398,6 +2402,7 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
2398 anon = 1; /* no anon but release vmf.page */ 2402 anon = 1; /* no anon but release vmf.page */
2399 goto out; 2403 goto out;
2400 } 2404 }
2405 page_mkwrite = 1;
2401 } 2406 }
2402 } 2407 }
2403 2408
@@ -2453,7 +2458,7 @@ out_unlocked:
2453 if (anon) 2458 if (anon)
2454 page_cache_release(vmf.page); 2459 page_cache_release(vmf.page);
2455 else if (dirty_page) { 2460 else if (dirty_page) {
2456 set_page_dirty_balance(dirty_page); 2461 set_page_dirty_balance(dirty_page, page_mkwrite);
2457 put_page(dirty_page); 2462 put_page(dirty_page);
2458 } 2463 }
2459 2464
@@ -2468,8 +2473,8 @@ static int do_linear_fault(struct mm_struct *mm, struct vm_area_struct *vma,
2468 - vma->vm_start) >> PAGE_CACHE_SHIFT) + vma->vm_pgoff; 2473 - vma->vm_start) >> PAGE_CACHE_SHIFT) + vma->vm_pgoff;
2469 unsigned int flags = (write_access ? FAULT_FLAG_WRITE : 0); 2474 unsigned int flags = (write_access ? FAULT_FLAG_WRITE : 0);
2470 2475
2471 return __do_fault(mm, vma, address, page_table, pmd, pgoff, 2476 pte_unmap(page_table);
2472 flags, orig_pte); 2477 return __do_fault(mm, vma, address, pmd, pgoff, flags, orig_pte);
2473} 2478}
2474 2479
2475 2480
@@ -2552,9 +2557,7 @@ static int do_nonlinear_fault(struct mm_struct *mm, struct vm_area_struct *vma,
2552 } 2557 }
2553 2558
2554 pgoff = pte_to_pgoff(orig_pte); 2559 pgoff = pte_to_pgoff(orig_pte);
2555 2560 return __do_fault(mm, vma, address, pmd, pgoff, flags, orig_pte);
2556 return __do_fault(mm, vma, address, page_table, pmd, pgoff,
2557 flags, orig_pte);
2558} 2561}
2559 2562
2560/* 2563/*