diff options
Diffstat (limited to 'mm/fremap.c')
-rw-r--r-- | mm/fremap.c | 28 |
1 files changed, 9 insertions, 19 deletions
diff --git a/mm/fremap.c b/mm/fremap.c index d862be3bc3e3..f851775e09c2 100644 --- a/mm/fremap.c +++ b/mm/fremap.c | |||
@@ -27,24 +27,20 @@ static int zap_pte(struct mm_struct *mm, struct vm_area_struct *vma, | |||
27 | struct page *page = NULL; | 27 | struct page *page = NULL; |
28 | 28 | ||
29 | if (pte_present(pte)) { | 29 | if (pte_present(pte)) { |
30 | unsigned long pfn = pte_pfn(pte); | 30 | flush_cache_page(vma, addr, pte_pfn(pte)); |
31 | flush_cache_page(vma, addr, pfn); | ||
32 | pte = ptep_clear_flush(vma, addr, ptep); | 31 | pte = ptep_clear_flush(vma, addr, ptep); |
33 | if (unlikely(!pfn_valid(pfn))) { | 32 | page = vm_normal_page(vma, addr, pte); |
34 | print_bad_pte(vma, pte, addr); | 33 | if (page) { |
35 | goto out; | 34 | if (pte_dirty(pte)) |
35 | set_page_dirty(page); | ||
36 | page_remove_rmap(page); | ||
37 | page_cache_release(page); | ||
36 | } | 38 | } |
37 | page = pfn_to_page(pfn); | ||
38 | if (pte_dirty(pte)) | ||
39 | set_page_dirty(page); | ||
40 | page_remove_rmap(page); | ||
41 | page_cache_release(page); | ||
42 | } else { | 39 | } else { |
43 | if (!pte_file(pte)) | 40 | if (!pte_file(pte)) |
44 | free_swap_and_cache(pte_to_swp_entry(pte)); | 41 | free_swap_and_cache(pte_to_swp_entry(pte)); |
45 | pte_clear(mm, addr, ptep); | 42 | pte_clear(mm, addr, ptep); |
46 | } | 43 | } |
47 | out: | ||
48 | return !!page; | 44 | return !!page; |
49 | } | 45 | } |
50 | 46 | ||
@@ -65,8 +61,6 @@ int install_page(struct mm_struct *mm, struct vm_area_struct *vma, | |||
65 | pte_t pte_val; | 61 | pte_t pte_val; |
66 | spinlock_t *ptl; | 62 | spinlock_t *ptl; |
67 | 63 | ||
68 | BUG_ON(vma->vm_flags & VM_RESERVED); | ||
69 | |||
70 | pgd = pgd_offset(mm, addr); | 64 | pgd = pgd_offset(mm, addr); |
71 | pud = pud_alloc(mm, pgd, addr); | 65 | pud = pud_alloc(mm, pgd, addr); |
72 | if (!pud) | 66 | if (!pud) |
@@ -122,8 +116,6 @@ int install_file_pte(struct mm_struct *mm, struct vm_area_struct *vma, | |||
122 | pte_t pte_val; | 116 | pte_t pte_val; |
123 | spinlock_t *ptl; | 117 | spinlock_t *ptl; |
124 | 118 | ||
125 | BUG_ON(vma->vm_flags & VM_RESERVED); | ||
126 | |||
127 | pgd = pgd_offset(mm, addr); | 119 | pgd = pgd_offset(mm, addr); |
128 | pud = pud_alloc(mm, pgd, addr); | 120 | pud = pud_alloc(mm, pgd, addr); |
129 | if (!pud) | 121 | if (!pud) |
@@ -204,12 +196,10 @@ asmlinkage long sys_remap_file_pages(unsigned long start, unsigned long size, | |||
204 | * Make sure the vma is shared, that it supports prefaulting, | 196 | * Make sure the vma is shared, that it supports prefaulting, |
205 | * and that the remapped range is valid and fully within | 197 | * and that the remapped range is valid and fully within |
206 | * the single existing vma. vm_private_data is used as a | 198 | * the single existing vma. vm_private_data is used as a |
207 | * swapout cursor in a VM_NONLINEAR vma (unless VM_RESERVED | 199 | * swapout cursor in a VM_NONLINEAR vma. |
208 | * or VM_LOCKED, but VM_LOCKED could be revoked later on). | ||
209 | */ | 200 | */ |
210 | if (vma && (vma->vm_flags & VM_SHARED) && | 201 | if (vma && (vma->vm_flags & VM_SHARED) && |
211 | (!vma->vm_private_data || | 202 | (!vma->vm_private_data || (vma->vm_flags & VM_NONLINEAR)) && |
212 | (vma->vm_flags & (VM_NONLINEAR|VM_RESERVED))) && | ||
213 | vma->vm_ops && vma->vm_ops->populate && | 203 | vma->vm_ops && vma->vm_ops->populate && |
214 | end > start && start >= vma->vm_start && | 204 | end > start && start >= vma->vm_start && |
215 | end <= vma->vm_end) { | 205 | end <= vma->vm_end) { |