diff options
Diffstat (limited to 'mm/memory.c')
-rw-r--r-- | mm/memory.c | 26 |
1 files changed, 10 insertions, 16 deletions
diff --git a/mm/memory.c b/mm/memory.c index 6bef278ad303..655e1429388a 100644 --- a/mm/memory.c +++ b/mm/memory.c | |||
@@ -1055,7 +1055,7 @@ int copy_page_range(struct mm_struct *dst_mm, struct mm_struct *src_mm, | |||
1055 | if (is_vm_hugetlb_page(vma)) | 1055 | if (is_vm_hugetlb_page(vma)) |
1056 | return copy_hugetlb_page_range(dst_mm, src_mm, vma); | 1056 | return copy_hugetlb_page_range(dst_mm, src_mm, vma); |
1057 | 1057 | ||
1058 | if (unlikely(is_pfn_mapping(vma))) { | 1058 | if (unlikely(vma->vm_flags & VM_PFNMAP)) { |
1059 | /* | 1059 | /* |
1060 | * We do not free on error cases below as remove_vma | 1060 | * We do not free on error cases below as remove_vma |
1061 | * gets called on error from higher level routine | 1061 | * gets called on error from higher level routine |
@@ -1327,7 +1327,7 @@ static void unmap_single_vma(struct mmu_gather *tlb, | |||
1327 | if (vma->vm_file) | 1327 | if (vma->vm_file) |
1328 | uprobe_munmap(vma, start, end); | 1328 | uprobe_munmap(vma, start, end); |
1329 | 1329 | ||
1330 | if (unlikely(is_pfn_mapping(vma))) | 1330 | if (unlikely(vma->vm_flags & VM_PFNMAP)) |
1331 | untrack_pfn(vma, 0, 0); | 1331 | untrack_pfn(vma, 0, 0); |
1332 | 1332 | ||
1333 | if (start != end) { | 1333 | if (start != end) { |
@@ -2299,26 +2299,20 @@ int remap_pfn_range(struct vm_area_struct *vma, unsigned long addr, | |||
2299 | * There's a horrible special case to handle copy-on-write | 2299 | * There's a horrible special case to handle copy-on-write |
2300 | * behaviour that some programs depend on. We mark the "original" | 2300 | * behaviour that some programs depend on. We mark the "original" |
2301 | * un-COW'ed pages by matching them up with "vma->vm_pgoff". | 2301 | * un-COW'ed pages by matching them up with "vma->vm_pgoff". |
2302 | * See vm_normal_page() for details. | ||
2302 | */ | 2303 | */ |
2303 | if (addr == vma->vm_start && end == vma->vm_end) { | 2304 | if (is_cow_mapping(vma->vm_flags)) { |
2305 | if (addr != vma->vm_start || end != vma->vm_end) | ||
2306 | return -EINVAL; | ||
2304 | vma->vm_pgoff = pfn; | 2307 | vma->vm_pgoff = pfn; |
2305 | vma->vm_flags |= VM_PFN_AT_MMAP; | 2308 | } |
2306 | } else if (is_cow_mapping(vma->vm_flags)) | 2309 | |
2310 | err = track_pfn_remap(vma, &prot, pfn, addr, PAGE_ALIGN(size)); | ||
2311 | if (err) | ||
2307 | return -EINVAL; | 2312 | return -EINVAL; |
2308 | 2313 | ||
2309 | vma->vm_flags |= VM_IO | VM_RESERVED | VM_PFNMAP; | 2314 | vma->vm_flags |= VM_IO | VM_RESERVED | VM_PFNMAP; |
2310 | 2315 | ||
2311 | err = track_pfn_remap(vma, &prot, pfn, PAGE_ALIGN(size)); | ||
2312 | if (err) { | ||
2313 | /* | ||
2314 | * To indicate that track_pfn related cleanup is not | ||
2315 | * needed from higher level routine calling unmap_vmas | ||
2316 | */ | ||
2317 | vma->vm_flags &= ~(VM_IO | VM_RESERVED | VM_PFNMAP); | ||
2318 | vma->vm_flags &= ~VM_PFN_AT_MMAP; | ||
2319 | return -EINVAL; | ||
2320 | } | ||
2321 | |||
2322 | BUG_ON(addr >= end); | 2316 | BUG_ON(addr >= end); |
2323 | pfn -= addr >> PAGE_SHIFT; | 2317 | pfn -= addr >> PAGE_SHIFT; |
2324 | pgd = pgd_offset(mm, addr); | 2318 | pgd = pgd_offset(mm, addr); |