diff options
Diffstat (limited to 'mm/memory.c')
| -rw-r--r-- | mm/memory.c | 39 |
1 files changed, 31 insertions, 8 deletions
diff --git a/mm/memory.c b/mm/memory.c index baa999e87cd2..cf6873e91c6a 100644 --- a/mm/memory.c +++ b/mm/memory.c | |||
| @@ -1151,6 +1151,11 @@ struct page *follow_page(struct vm_area_struct *vma, unsigned long address, | |||
| 1151 | if ((flags & FOLL_WRITE) && | 1151 | if ((flags & FOLL_WRITE) && |
| 1152 | !pte_dirty(pte) && !PageDirty(page)) | 1152 | !pte_dirty(pte) && !PageDirty(page)) |
| 1153 | set_page_dirty(page); | 1153 | set_page_dirty(page); |
| 1154 | /* | ||
| 1155 | * pte_mkyoung() would be more correct here, but atomic care | ||
| 1156 | * is needed to avoid losing the dirty bit: it is easier to use | ||
| 1157 | * mark_page_accessed(). | ||
| 1158 | */ | ||
| 1154 | mark_page_accessed(page); | 1159 | mark_page_accessed(page); |
| 1155 | } | 1160 | } |
| 1156 | unlock: | 1161 | unlock: |
| @@ -1665,9 +1670,10 @@ int remap_pfn_range(struct vm_area_struct *vma, unsigned long addr, | |||
| 1665 | * behaviour that some programs depend on. We mark the "original" | 1670 | * behaviour that some programs depend on. We mark the "original" |
| 1666 | * un-COW'ed pages by matching them up with "vma->vm_pgoff". | 1671 | * un-COW'ed pages by matching them up with "vma->vm_pgoff". |
| 1667 | */ | 1672 | */ |
| 1668 | if (addr == vma->vm_start && end == vma->vm_end) | 1673 | if (addr == vma->vm_start && end == vma->vm_end) { |
| 1669 | vma->vm_pgoff = pfn; | 1674 | vma->vm_pgoff = pfn; |
| 1670 | else if (is_cow_mapping(vma->vm_flags)) | 1675 | vma->vm_flags |= VM_PFN_AT_MMAP; |
| 1676 | } else if (is_cow_mapping(vma->vm_flags)) | ||
| 1671 | return -EINVAL; | 1677 | return -EINVAL; |
| 1672 | 1678 | ||
| 1673 | vma->vm_flags |= VM_IO | VM_RESERVED | VM_PFNMAP; | 1679 | vma->vm_flags |= VM_IO | VM_RESERVED | VM_PFNMAP; |
| @@ -1679,6 +1685,7 @@ int remap_pfn_range(struct vm_area_struct *vma, unsigned long addr, | |||
| 1679 | * needed from higher level routine calling unmap_vmas | 1685 | * needed from higher level routine calling unmap_vmas |
| 1680 | */ | 1686 | */ |
| 1681 | vma->vm_flags &= ~(VM_IO | VM_RESERVED | VM_PFNMAP); | 1687 | vma->vm_flags &= ~(VM_IO | VM_RESERVED | VM_PFNMAP); |
| 1688 | vma->vm_flags &= ~VM_PFN_AT_MMAP; | ||
| 1682 | return -EINVAL; | 1689 | return -EINVAL; |
| 1683 | } | 1690 | } |
| 1684 | 1691 | ||
| @@ -1938,6 +1945,15 @@ static int do_wp_page(struct mm_struct *mm, struct vm_area_struct *vma, | |||
| 1938 | * get_user_pages(.write=1, .force=1). | 1945 | * get_user_pages(.write=1, .force=1). |
| 1939 | */ | 1946 | */ |
| 1940 | if (vma->vm_ops && vma->vm_ops->page_mkwrite) { | 1947 | if (vma->vm_ops && vma->vm_ops->page_mkwrite) { |
| 1948 | struct vm_fault vmf; | ||
| 1949 | int tmp; | ||
| 1950 | |||
| 1951 | vmf.virtual_address = (void __user *)(address & | ||
| 1952 | PAGE_MASK); | ||
| 1953 | vmf.pgoff = old_page->index; | ||
| 1954 | vmf.flags = FAULT_FLAG_WRITE|FAULT_FLAG_MKWRITE; | ||
| 1955 | vmf.page = old_page; | ||
| 1956 | |||
| 1941 | /* | 1957 | /* |
| 1942 | * Notify the address space that the page is about to | 1958 | * Notify the address space that the page is about to |
| 1943 | * become writable so that it can prohibit this or wait | 1959 | * become writable so that it can prohibit this or wait |
| @@ -1949,8 +1965,12 @@ static int do_wp_page(struct mm_struct *mm, struct vm_area_struct *vma, | |||
| 1949 | page_cache_get(old_page); | 1965 | page_cache_get(old_page); |
| 1950 | pte_unmap_unlock(page_table, ptl); | 1966 | pte_unmap_unlock(page_table, ptl); |
| 1951 | 1967 | ||
| 1952 | if (vma->vm_ops->page_mkwrite(vma, old_page) < 0) | 1968 | tmp = vma->vm_ops->page_mkwrite(vma, &vmf); |
| 1969 | if (unlikely(tmp & | ||
| 1970 | (VM_FAULT_ERROR | VM_FAULT_NOPAGE))) { | ||
| 1971 | ret = tmp; | ||
| 1953 | goto unwritable_page; | 1972 | goto unwritable_page; |
| 1973 | } | ||
| 1954 | 1974 | ||
| 1955 | /* | 1975 | /* |
| 1956 | * Since we dropped the lock we need to revalidate | 1976 | * Since we dropped the lock we need to revalidate |
| @@ -2099,7 +2119,7 @@ oom: | |||
| 2099 | 2119 | ||
| 2100 | unwritable_page: | 2120 | unwritable_page: |
| 2101 | page_cache_release(old_page); | 2121 | page_cache_release(old_page); |
| 2102 | return VM_FAULT_SIGBUS; | 2122 | return ret; |
| 2103 | } | 2123 | } |
| 2104 | 2124 | ||
| 2105 | /* | 2125 | /* |
| @@ -2433,8 +2453,6 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma, | |||
| 2433 | count_vm_event(PGMAJFAULT); | 2453 | count_vm_event(PGMAJFAULT); |
| 2434 | } | 2454 | } |
| 2435 | 2455 | ||
| 2436 | mark_page_accessed(page); | ||
| 2437 | |||
| 2438 | lock_page(page); | 2456 | lock_page(page); |
| 2439 | delayacct_clear_flag(DELAYACCT_PF_SWAPIN); | 2457 | delayacct_clear_flag(DELAYACCT_PF_SWAPIN); |
| 2440 | 2458 | ||
| @@ -2643,9 +2661,14 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma, | |||
| 2643 | * to become writable | 2661 | * to become writable |
| 2644 | */ | 2662 | */ |
| 2645 | if (vma->vm_ops->page_mkwrite) { | 2663 | if (vma->vm_ops->page_mkwrite) { |
| 2664 | int tmp; | ||
| 2665 | |||
| 2646 | unlock_page(page); | 2666 | unlock_page(page); |
| 2647 | if (vma->vm_ops->page_mkwrite(vma, page) < 0) { | 2667 | vmf.flags |= FAULT_FLAG_MKWRITE; |
| 2648 | ret = VM_FAULT_SIGBUS; | 2668 | tmp = vma->vm_ops->page_mkwrite(vma, &vmf); |
| 2669 | if (unlikely(tmp & | ||
| 2670 | (VM_FAULT_ERROR | VM_FAULT_NOPAGE))) { | ||
| 2671 | ret = tmp; | ||
| 2649 | anon = 1; /* no anon but release vmf.page */ | 2672 | anon = 1; /* no anon but release vmf.page */ |
| 2650 | goto out_unlocked; | 2673 | goto out_unlocked; |
| 2651 | } | 2674 | } |
