diff options
Diffstat (limited to 'mm/fremap.c')
-rw-r--r-- | mm/fremap.c | 179 |
1 files changed, 93 insertions, 86 deletions
diff --git a/mm/fremap.c b/mm/fremap.c index 4e3f53dd5fd4..c395b1abf082 100644 --- a/mm/fremap.c +++ b/mm/fremap.c | |||
@@ -20,13 +20,14 @@ | |||
20 | #include <asm/cacheflush.h> | 20 | #include <asm/cacheflush.h> |
21 | #include <asm/tlbflush.h> | 21 | #include <asm/tlbflush.h> |
22 | 22 | ||
23 | static int zap_pte(struct mm_struct *mm, struct vm_area_struct *vma, | 23 | static void zap_pte(struct mm_struct *mm, struct vm_area_struct *vma, |
24 | unsigned long addr, pte_t *ptep) | 24 | unsigned long addr, pte_t *ptep) |
25 | { | 25 | { |
26 | pte_t pte = *ptep; | 26 | pte_t pte = *ptep; |
27 | struct page *page = NULL; | ||
28 | 27 | ||
29 | if (pte_present(pte)) { | 28 | if (pte_present(pte)) { |
29 | struct page *page; | ||
30 | |||
30 | flush_cache_page(vma, addr, pte_pfn(pte)); | 31 | flush_cache_page(vma, addr, pte_pfn(pte)); |
31 | pte = ptep_clear_flush(vma, addr, ptep); | 32 | pte = ptep_clear_flush(vma, addr, ptep); |
32 | page = vm_normal_page(vma, addr, pte); | 33 | page = vm_normal_page(vma, addr, pte); |
@@ -35,68 +36,21 @@ static int zap_pte(struct mm_struct *mm, struct vm_area_struct *vma, | |||
35 | set_page_dirty(page); | 36 | set_page_dirty(page); |
36 | page_remove_rmap(page, vma); | 37 | page_remove_rmap(page, vma); |
37 | page_cache_release(page); | 38 | page_cache_release(page); |
39 | update_hiwater_rss(mm); | ||
40 | dec_mm_counter(mm, file_rss); | ||
38 | } | 41 | } |
39 | } else { | 42 | } else { |
40 | if (!pte_file(pte)) | 43 | if (!pte_file(pte)) |
41 | free_swap_and_cache(pte_to_swp_entry(pte)); | 44 | free_swap_and_cache(pte_to_swp_entry(pte)); |
42 | pte_clear_not_present_full(mm, addr, ptep, 0); | 45 | pte_clear_not_present_full(mm, addr, ptep, 0); |
43 | } | 46 | } |
44 | return !!page; | ||
45 | } | 47 | } |
46 | 48 | ||
47 | /* | 49 | /* |
48 | * Install a file page to a given virtual memory address, release any | ||
49 | * previously existing mapping. | ||
50 | */ | ||
51 | int install_page(struct mm_struct *mm, struct vm_area_struct *vma, | ||
52 | unsigned long addr, struct page *page, pgprot_t prot) | ||
53 | { | ||
54 | struct inode *inode; | ||
55 | pgoff_t size; | ||
56 | int err = -ENOMEM; | ||
57 | pte_t *pte; | ||
58 | pte_t pte_val; | ||
59 | spinlock_t *ptl; | ||
60 | |||
61 | pte = get_locked_pte(mm, addr, &ptl); | ||
62 | if (!pte) | ||
63 | goto out; | ||
64 | |||
65 | /* | ||
66 | * This page may have been truncated. Tell the | ||
67 | * caller about it. | ||
68 | */ | ||
69 | err = -EINVAL; | ||
70 | inode = vma->vm_file->f_mapping->host; | ||
71 | size = (i_size_read(inode) + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; | ||
72 | if (!page->mapping || page->index >= size) | ||
73 | goto unlock; | ||
74 | err = -ENOMEM; | ||
75 | if (page_mapcount(page) > INT_MAX/2) | ||
76 | goto unlock; | ||
77 | |||
78 | if (pte_none(*pte) || !zap_pte(mm, vma, addr, pte)) | ||
79 | inc_mm_counter(mm, file_rss); | ||
80 | |||
81 | flush_icache_page(vma, page); | ||
82 | pte_val = mk_pte(page, prot); | ||
83 | set_pte_at(mm, addr, pte, pte_val); | ||
84 | page_add_file_rmap(page); | ||
85 | update_mmu_cache(vma, addr, pte_val); | ||
86 | lazy_mmu_prot_update(pte_val); | ||
87 | err = 0; | ||
88 | unlock: | ||
89 | pte_unmap_unlock(pte, ptl); | ||
90 | out: | ||
91 | return err; | ||
92 | } | ||
93 | EXPORT_SYMBOL(install_page); | ||
94 | |||
95 | /* | ||
96 | * Install a file pte to a given virtual memory address, release any | 50 | * Install a file pte to a given virtual memory address, release any |
97 | * previously existing mapping. | 51 | * previously existing mapping. |
98 | */ | 52 | */ |
99 | int install_file_pte(struct mm_struct *mm, struct vm_area_struct *vma, | 53 | static int install_file_pte(struct mm_struct *mm, struct vm_area_struct *vma, |
100 | unsigned long addr, unsigned long pgoff, pgprot_t prot) | 54 | unsigned long addr, unsigned long pgoff, pgprot_t prot) |
101 | { | 55 | { |
102 | int err = -ENOMEM; | 56 | int err = -ENOMEM; |
@@ -107,10 +61,8 @@ int install_file_pte(struct mm_struct *mm, struct vm_area_struct *vma, | |||
107 | if (!pte) | 61 | if (!pte) |
108 | goto out; | 62 | goto out; |
109 | 63 | ||
110 | if (!pte_none(*pte) && zap_pte(mm, vma, addr, pte)) { | 64 | if (!pte_none(*pte)) |
111 | update_hiwater_rss(mm); | 65 | zap_pte(mm, vma, addr, pte); |
112 | dec_mm_counter(mm, file_rss); | ||
113 | } | ||
114 | 66 | ||
115 | set_pte_at(mm, addr, pte, pgoff_to_pte(pgoff)); | 67 | set_pte_at(mm, addr, pte, pgoff_to_pte(pgoff)); |
116 | /* | 68 | /* |
@@ -126,6 +78,25 @@ out: | |||
126 | return err; | 78 | return err; |
127 | } | 79 | } |
128 | 80 | ||
81 | static int populate_range(struct mm_struct *mm, struct vm_area_struct *vma, | ||
82 | unsigned long addr, unsigned long size, pgoff_t pgoff) | ||
83 | { | ||
84 | int err; | ||
85 | |||
86 | do { | ||
87 | err = install_file_pte(mm, vma, addr, pgoff, vma->vm_page_prot); | ||
88 | if (err) | ||
89 | return err; | ||
90 | |||
91 | size -= PAGE_SIZE; | ||
92 | addr += PAGE_SIZE; | ||
93 | pgoff++; | ||
94 | } while (size); | ||
95 | |||
96 | return 0; | ||
97 | |||
98 | } | ||
99 | |||
129 | /*** | 100 | /*** |
130 | * sys_remap_file_pages - remap arbitrary pages of a shared backing store | 101 | * sys_remap_file_pages - remap arbitrary pages of a shared backing store |
131 | * file within an existing vma. | 102 | * file within an existing vma. |
@@ -183,41 +154,77 @@ asmlinkage long sys_remap_file_pages(unsigned long start, unsigned long size, | |||
183 | * the single existing vma. vm_private_data is used as a | 154 | * the single existing vma. vm_private_data is used as a |
184 | * swapout cursor in a VM_NONLINEAR vma. | 155 | * swapout cursor in a VM_NONLINEAR vma. |
185 | */ | 156 | */ |
186 | if (vma && (vma->vm_flags & VM_SHARED) && | 157 | if (!vma || !(vma->vm_flags & VM_SHARED)) |
187 | (!vma->vm_private_data || (vma->vm_flags & VM_NONLINEAR)) && | 158 | goto out; |
188 | vma->vm_ops && vma->vm_ops->populate && | ||
189 | end > start && start >= vma->vm_start && | ||
190 | end <= vma->vm_end) { | ||
191 | |||
192 | /* Must set VM_NONLINEAR before any pages are populated. */ | ||
193 | if (pgoff != linear_page_index(vma, start) && | ||
194 | !(vma->vm_flags & VM_NONLINEAR)) { | ||
195 | if (!has_write_lock) { | ||
196 | up_read(&mm->mmap_sem); | ||
197 | down_write(&mm->mmap_sem); | ||
198 | has_write_lock = 1; | ||
199 | goto retry; | ||
200 | } | ||
201 | mapping = vma->vm_file->f_mapping; | ||
202 | spin_lock(&mapping->i_mmap_lock); | ||
203 | flush_dcache_mmap_lock(mapping); | ||
204 | vma->vm_flags |= VM_NONLINEAR; | ||
205 | vma_prio_tree_remove(vma, &mapping->i_mmap); | ||
206 | vma_nonlinear_insert(vma, &mapping->i_mmap_nonlinear); | ||
207 | flush_dcache_mmap_unlock(mapping); | ||
208 | spin_unlock(&mapping->i_mmap_lock); | ||
209 | } | ||
210 | 159 | ||
211 | err = vma->vm_ops->populate(vma, start, size, | 160 | if (vma->vm_private_data && !(vma->vm_flags & VM_NONLINEAR)) |
212 | vma->vm_page_prot, | 161 | goto out; |
213 | pgoff, flags & MAP_NONBLOCK); | 162 | |
163 | if (!vma->vm_flags & VM_CAN_NONLINEAR) | ||
164 | goto out; | ||
214 | 165 | ||
166 | if (end <= start || start < vma->vm_start || end > vma->vm_end) | ||
167 | goto out; | ||
168 | |||
169 | /* Must set VM_NONLINEAR before any pages are populated. */ | ||
170 | if (!(vma->vm_flags & VM_NONLINEAR)) { | ||
171 | /* Don't need a nonlinear mapping, exit success */ | ||
172 | if (pgoff == linear_page_index(vma, start)) { | ||
173 | err = 0; | ||
174 | goto out; | ||
175 | } | ||
176 | |||
177 | if (!has_write_lock) { | ||
178 | up_read(&mm->mmap_sem); | ||
179 | down_write(&mm->mmap_sem); | ||
180 | has_write_lock = 1; | ||
181 | goto retry; | ||
182 | } | ||
183 | mapping = vma->vm_file->f_mapping; | ||
215 | /* | 184 | /* |
216 | * We can't clear VM_NONLINEAR because we'd have to do | 185 | * page_mkclean doesn't work on nonlinear vmas, so if |
217 | * it after ->populate completes, and that would prevent | 186 | * dirty pages need to be accounted, emulate with linear |
218 | * downgrading the lock. (Locks can't be upgraded). | 187 | * vmas. |
219 | */ | 188 | */ |
189 | if (mapping_cap_account_dirty(mapping)) { | ||
190 | unsigned long addr; | ||
191 | |||
192 | flags &= MAP_NONBLOCK; | ||
193 | addr = mmap_region(vma->vm_file, start, size, | ||
194 | flags, vma->vm_flags, pgoff, 1); | ||
195 | if (IS_ERR_VALUE(addr)) { | ||
196 | err = addr; | ||
197 | } else { | ||
198 | BUG_ON(addr != start); | ||
199 | err = 0; | ||
200 | } | ||
201 | goto out; | ||
202 | } | ||
203 | spin_lock(&mapping->i_mmap_lock); | ||
204 | flush_dcache_mmap_lock(mapping); | ||
205 | vma->vm_flags |= VM_NONLINEAR; | ||
206 | vma_prio_tree_remove(vma, &mapping->i_mmap); | ||
207 | vma_nonlinear_insert(vma, &mapping->i_mmap_nonlinear); | ||
208 | flush_dcache_mmap_unlock(mapping); | ||
209 | spin_unlock(&mapping->i_mmap_lock); | ||
210 | } | ||
211 | |||
212 | err = populate_range(mm, vma, start, size, pgoff); | ||
213 | if (!err && !(flags & MAP_NONBLOCK)) { | ||
214 | if (unlikely(has_write_lock)) { | ||
215 | downgrade_write(&mm->mmap_sem); | ||
216 | has_write_lock = 0; | ||
217 | } | ||
218 | make_pages_present(start, start+size); | ||
220 | } | 219 | } |
220 | |||
221 | /* | ||
222 | * We can't clear VM_NONLINEAR because we'd have to do | ||
223 | * it after ->populate completes, and that would prevent | ||
224 | * downgrading the lock. (Locks can't be upgraded). | ||
225 | */ | ||
226 | |||
227 | out: | ||
221 | if (likely(!has_write_lock)) | 228 | if (likely(!has_write_lock)) |
222 | up_read(&mm->mmap_sem); | 229 | up_read(&mm->mmap_sem); |
223 | else | 230 | else |