diff options
Diffstat (limited to 'mm/filemap_xip.c')
-rw-r--r-- | mm/filemap_xip.c | 447 |
1 files changed, 447 insertions, 0 deletions
diff --git a/mm/filemap_xip.c b/mm/filemap_xip.c new file mode 100644 index 000000000000..3b6e384b98a6 --- /dev/null +++ b/mm/filemap_xip.c | |||
@@ -0,0 +1,447 @@ | |||
1 | /* | ||
2 | * linux/mm/filemap_xip.c | ||
3 | * | ||
4 | * Copyright (C) 2005 IBM Corporation | ||
5 | * Author: Carsten Otte <cotte@de.ibm.com> | ||
6 | * | ||
7 | * derived from linux/mm/filemap.c - Copyright (C) Linus Torvalds | ||
8 | * | ||
9 | */ | ||
10 | |||
11 | #include <linux/fs.h> | ||
12 | #include <linux/pagemap.h> | ||
13 | #include <linux/module.h> | ||
14 | #include <linux/uio.h> | ||
15 | #include <linux/rmap.h> | ||
16 | #include <asm/tlbflush.h> | ||
17 | #include "filemap.h" | ||
18 | |||
19 | /* | ||
20 | * This is a file read routine for execute in place files, and uses | ||
21 | * the mapping->a_ops->get_xip_page() function for the actual low-level | ||
22 | * stuff. | ||
23 | * | ||
24 | * Note the struct file* is not used at all. It may be NULL. | ||
25 | */ | ||
26 | static void | ||
27 | do_xip_mapping_read(struct address_space *mapping, | ||
28 | struct file_ra_state *_ra, | ||
29 | struct file *filp, | ||
30 | loff_t *ppos, | ||
31 | read_descriptor_t *desc, | ||
32 | read_actor_t actor) | ||
33 | { | ||
34 | struct inode *inode = mapping->host; | ||
35 | unsigned long index, end_index, offset; | ||
36 | loff_t isize; | ||
37 | |||
38 | BUG_ON(!mapping->a_ops->get_xip_page); | ||
39 | |||
40 | index = *ppos >> PAGE_CACHE_SHIFT; | ||
41 | offset = *ppos & ~PAGE_CACHE_MASK; | ||
42 | |||
43 | isize = i_size_read(inode); | ||
44 | if (!isize) | ||
45 | goto out; | ||
46 | |||
47 | end_index = (isize - 1) >> PAGE_CACHE_SHIFT; | ||
48 | for (;;) { | ||
49 | struct page *page; | ||
50 | unsigned long nr, ret; | ||
51 | |||
52 | /* nr is the maximum number of bytes to copy from this page */ | ||
53 | nr = PAGE_CACHE_SIZE; | ||
54 | if (index >= end_index) { | ||
55 | if (index > end_index) | ||
56 | goto out; | ||
57 | nr = ((isize - 1) & ~PAGE_CACHE_MASK) + 1; | ||
58 | if (nr <= offset) { | ||
59 | goto out; | ||
60 | } | ||
61 | } | ||
62 | nr = nr - offset; | ||
63 | |||
64 | page = mapping->a_ops->get_xip_page(mapping, | ||
65 | index*(PAGE_SIZE/512), 0); | ||
66 | if (!page) | ||
67 | goto no_xip_page; | ||
68 | if (unlikely(IS_ERR(page))) { | ||
69 | if (PTR_ERR(page) == -ENODATA) { | ||
70 | /* sparse */ | ||
71 | page = virt_to_page(empty_zero_page); | ||
72 | } else { | ||
73 | desc->error = PTR_ERR(page); | ||
74 | goto out; | ||
75 | } | ||
76 | } else | ||
77 | BUG_ON(!PageUptodate(page)); | ||
78 | |||
79 | /* If users can be writing to this page using arbitrary | ||
80 | * virtual addresses, take care about potential aliasing | ||
81 | * before reading the page on the kernel side. | ||
82 | */ | ||
83 | if (mapping_writably_mapped(mapping)) | ||
84 | flush_dcache_page(page); | ||
85 | |||
86 | /* | ||
87 | * Ok, we have the page, and it's up-to-date, so | ||
88 | * now we can copy it to user space... | ||
89 | * | ||
90 | * The actor routine returns how many bytes were actually used.. | ||
91 | * NOTE! This may not be the same as how much of a user buffer | ||
92 | * we filled up (we may be padding etc), so we can only update | ||
93 | * "pos" here (the actor routine has to update the user buffer | ||
94 | * pointers and the remaining count). | ||
95 | */ | ||
96 | ret = actor(desc, page, offset, nr); | ||
97 | offset += ret; | ||
98 | index += offset >> PAGE_CACHE_SHIFT; | ||
99 | offset &= ~PAGE_CACHE_MASK; | ||
100 | |||
101 | if (ret == nr && desc->count) | ||
102 | continue; | ||
103 | goto out; | ||
104 | |||
105 | no_xip_page: | ||
106 | /* Did not get the page. Report it */ | ||
107 | desc->error = -EIO; | ||
108 | goto out; | ||
109 | } | ||
110 | |||
111 | out: | ||
112 | *ppos = ((loff_t) index << PAGE_CACHE_SHIFT) + offset; | ||
113 | if (filp) | ||
114 | file_accessed(filp); | ||
115 | } | ||
116 | |||
117 | ssize_t | ||
118 | xip_file_read(struct file *filp, char __user *buf, size_t len, loff_t *ppos) | ||
119 | { | ||
120 | read_descriptor_t desc; | ||
121 | |||
122 | if (!access_ok(VERIFY_WRITE, buf, len)) | ||
123 | return -EFAULT; | ||
124 | |||
125 | desc.written = 0; | ||
126 | desc.arg.buf = buf; | ||
127 | desc.count = len; | ||
128 | desc.error = 0; | ||
129 | |||
130 | do_xip_mapping_read(filp->f_mapping, &filp->f_ra, filp, | ||
131 | ppos, &desc, file_read_actor); | ||
132 | |||
133 | if (desc.written) | ||
134 | return desc.written; | ||
135 | else | ||
136 | return desc.error; | ||
137 | } | ||
138 | EXPORT_SYMBOL_GPL(xip_file_read); | ||
139 | |||
140 | ssize_t | ||
141 | xip_file_sendfile(struct file *in_file, loff_t *ppos, | ||
142 | size_t count, read_actor_t actor, void *target) | ||
143 | { | ||
144 | read_descriptor_t desc; | ||
145 | |||
146 | if (!count) | ||
147 | return 0; | ||
148 | |||
149 | desc.written = 0; | ||
150 | desc.count = count; | ||
151 | desc.arg.data = target; | ||
152 | desc.error = 0; | ||
153 | |||
154 | do_xip_mapping_read(in_file->f_mapping, &in_file->f_ra, in_file, | ||
155 | ppos, &desc, actor); | ||
156 | if (desc.written) | ||
157 | return desc.written; | ||
158 | return desc.error; | ||
159 | } | ||
160 | EXPORT_SYMBOL_GPL(xip_file_sendfile); | ||
161 | |||
162 | /* | ||
163 | * __xip_unmap is invoked from xip_unmap and | ||
164 | * xip_write | ||
165 | * | ||
166 | * This function walks all vmas of the address_space and unmaps the | ||
167 | * empty_zero_page when found at pgoff. Should it go in rmap.c? | ||
168 | */ | ||
169 | static void | ||
170 | __xip_unmap (struct address_space * mapping, | ||
171 | unsigned long pgoff) | ||
172 | { | ||
173 | struct vm_area_struct *vma; | ||
174 | struct mm_struct *mm; | ||
175 | struct prio_tree_iter iter; | ||
176 | unsigned long address; | ||
177 | pte_t *pte; | ||
178 | pte_t pteval; | ||
179 | |||
180 | spin_lock(&mapping->i_mmap_lock); | ||
181 | vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, pgoff, pgoff) { | ||
182 | mm = vma->vm_mm; | ||
183 | address = vma->vm_start + | ||
184 | ((pgoff - vma->vm_pgoff) << PAGE_SHIFT); | ||
185 | BUG_ON(address < vma->vm_start || address >= vma->vm_end); | ||
186 | /* | ||
187 | * We need the page_table_lock to protect us from page faults, | ||
188 | * munmap, fork, etc... | ||
189 | */ | ||
190 | pte = page_check_address(virt_to_page(empty_zero_page), mm, | ||
191 | address); | ||
192 | if (!IS_ERR(pte)) { | ||
193 | /* Nuke the page table entry. */ | ||
194 | flush_cache_page(vma, address, pte_pfn(pte)); | ||
195 | pteval = ptep_clear_flush(vma, address, pte); | ||
196 | BUG_ON(pte_dirty(pteval)); | ||
197 | pte_unmap(pte); | ||
198 | spin_unlock(&mm->page_table_lock); | ||
199 | } | ||
200 | } | ||
201 | spin_unlock(&mapping->i_mmap_lock); | ||
202 | } | ||
203 | |||
204 | /* | ||
205 | * xip_nopage() is invoked via the vma operations vector for a | ||
206 | * mapped memory region to read in file data during a page fault. | ||
207 | * | ||
208 | * This function is derived from filemap_nopage, but used for execute in place | ||
209 | */ | ||
210 | static struct page * | ||
211 | xip_file_nopage(struct vm_area_struct * area, | ||
212 | unsigned long address, | ||
213 | int *type) | ||
214 | { | ||
215 | struct file *file = area->vm_file; | ||
216 | struct address_space *mapping = file->f_mapping; | ||
217 | struct inode *inode = mapping->host; | ||
218 | struct page *page; | ||
219 | unsigned long size, pgoff, endoff; | ||
220 | |||
221 | pgoff = ((address - area->vm_start) >> PAGE_CACHE_SHIFT) | ||
222 | + area->vm_pgoff; | ||
223 | endoff = ((area->vm_end - area->vm_start) >> PAGE_CACHE_SHIFT) | ||
224 | + area->vm_pgoff; | ||
225 | |||
226 | size = (i_size_read(inode) + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; | ||
227 | if (pgoff >= size) { | ||
228 | return NULL; | ||
229 | } | ||
230 | |||
231 | page = mapping->a_ops->get_xip_page(mapping, pgoff*(PAGE_SIZE/512), 0); | ||
232 | if (!IS_ERR(page)) { | ||
233 | BUG_ON(!PageUptodate(page)); | ||
234 | return page; | ||
235 | } | ||
236 | if (PTR_ERR(page) != -ENODATA) | ||
237 | return NULL; | ||
238 | |||
239 | /* sparse block */ | ||
240 | if ((area->vm_flags & (VM_WRITE | VM_MAYWRITE)) && | ||
241 | (area->vm_flags & (VM_SHARED| VM_MAYSHARE)) && | ||
242 | (!(mapping->host->i_sb->s_flags & MS_RDONLY))) { | ||
243 | /* maybe shared writable, allocate new block */ | ||
244 | page = mapping->a_ops->get_xip_page (mapping, | ||
245 | pgoff*(PAGE_SIZE/512), 1); | ||
246 | if (IS_ERR(page)) | ||
247 | return NULL; | ||
248 | BUG_ON(!PageUptodate(page)); | ||
249 | /* unmap page at pgoff from all other vmas */ | ||
250 | __xip_unmap(mapping, pgoff); | ||
251 | } else { | ||
252 | /* not shared and writable, use empty_zero_page */ | ||
253 | page = virt_to_page(empty_zero_page); | ||
254 | } | ||
255 | |||
256 | return page; | ||
257 | } | ||
258 | |||
259 | static struct vm_operations_struct xip_file_vm_ops = { | ||
260 | .nopage = xip_file_nopage, | ||
261 | }; | ||
262 | |||
263 | int xip_file_mmap(struct file * file, struct vm_area_struct * vma) | ||
264 | { | ||
265 | BUG_ON(!file->f_mapping->a_ops->get_xip_page); | ||
266 | |||
267 | file_accessed(file); | ||
268 | vma->vm_ops = &xip_file_vm_ops; | ||
269 | return 0; | ||
270 | } | ||
271 | EXPORT_SYMBOL_GPL(xip_file_mmap); | ||
272 | |||
273 | static ssize_t | ||
274 | __xip_file_write(struct file *filp, const char __user *buf, | ||
275 | size_t count, loff_t pos, loff_t *ppos) | ||
276 | { | ||
277 | struct address_space * mapping = filp->f_mapping; | ||
278 | struct address_space_operations *a_ops = mapping->a_ops; | ||
279 | struct inode *inode = mapping->host; | ||
280 | long status = 0; | ||
281 | struct page *page; | ||
282 | size_t bytes; | ||
283 | ssize_t written = 0; | ||
284 | |||
285 | BUG_ON(!mapping->a_ops->get_xip_page); | ||
286 | |||
287 | do { | ||
288 | unsigned long index; | ||
289 | unsigned long offset; | ||
290 | size_t copied; | ||
291 | |||
292 | offset = (pos & (PAGE_CACHE_SIZE -1)); /* Within page */ | ||
293 | index = pos >> PAGE_CACHE_SHIFT; | ||
294 | bytes = PAGE_CACHE_SIZE - offset; | ||
295 | if (bytes > count) | ||
296 | bytes = count; | ||
297 | |||
298 | /* | ||
299 | * Bring in the user page that we will copy from _first_. | ||
300 | * Otherwise there's a nasty deadlock on copying from the | ||
301 | * same page as we're writing to, without it being marked | ||
302 | * up-to-date. | ||
303 | */ | ||
304 | fault_in_pages_readable(buf, bytes); | ||
305 | |||
306 | page = a_ops->get_xip_page(mapping, | ||
307 | index*(PAGE_SIZE/512), 0); | ||
308 | if (IS_ERR(page) && (PTR_ERR(page) == -ENODATA)) { | ||
309 | /* we allocate a new page unmap it */ | ||
310 | page = a_ops->get_xip_page(mapping, | ||
311 | index*(PAGE_SIZE/512), 1); | ||
312 | if (!IS_ERR(page)) | ||
313 | /* unmap page at pgoff from all other vmas */ | ||
314 | __xip_unmap(mapping, index); | ||
315 | } | ||
316 | |||
317 | if (IS_ERR(page)) { | ||
318 | status = PTR_ERR(page); | ||
319 | break; | ||
320 | } | ||
321 | |||
322 | BUG_ON(!PageUptodate(page)); | ||
323 | |||
324 | copied = filemap_copy_from_user(page, offset, buf, bytes); | ||
325 | flush_dcache_page(page); | ||
326 | if (likely(copied > 0)) { | ||
327 | status = copied; | ||
328 | |||
329 | if (status >= 0) { | ||
330 | written += status; | ||
331 | count -= status; | ||
332 | pos += status; | ||
333 | buf += status; | ||
334 | } | ||
335 | } | ||
336 | if (unlikely(copied != bytes)) | ||
337 | if (status >= 0) | ||
338 | status = -EFAULT; | ||
339 | if (status < 0) | ||
340 | break; | ||
341 | } while (count); | ||
342 | *ppos = pos; | ||
343 | /* | ||
344 | * No need to use i_size_read() here, the i_size | ||
345 | * cannot change under us because we hold i_sem. | ||
346 | */ | ||
347 | if (pos > inode->i_size) { | ||
348 | i_size_write(inode, pos); | ||
349 | mark_inode_dirty(inode); | ||
350 | } | ||
351 | |||
352 | return written ? written : status; | ||
353 | } | ||
354 | |||
355 | ssize_t | ||
356 | xip_file_write(struct file *filp, const char __user *buf, size_t len, | ||
357 | loff_t *ppos) | ||
358 | { | ||
359 | struct address_space *mapping = filp->f_mapping; | ||
360 | struct inode *inode = mapping->host; | ||
361 | size_t count; | ||
362 | loff_t pos; | ||
363 | ssize_t ret; | ||
364 | |||
365 | down(&inode->i_sem); | ||
366 | |||
367 | if (!access_ok(VERIFY_READ, buf, len)) { | ||
368 | ret=-EFAULT; | ||
369 | goto out_up; | ||
370 | } | ||
371 | |||
372 | pos = *ppos; | ||
373 | count = len; | ||
374 | |||
375 | vfs_check_frozen(inode->i_sb, SB_FREEZE_WRITE); | ||
376 | |||
377 | /* We can write back this queue in page reclaim */ | ||
378 | current->backing_dev_info = mapping->backing_dev_info; | ||
379 | |||
380 | ret = generic_write_checks(filp, &pos, &count, S_ISBLK(inode->i_mode)); | ||
381 | if (ret) | ||
382 | goto out_backing; | ||
383 | if (count == 0) | ||
384 | goto out_backing; | ||
385 | |||
386 | ret = remove_suid(filp->f_dentry); | ||
387 | if (ret) | ||
388 | goto out_backing; | ||
389 | |||
390 | inode_update_time(inode, 1); | ||
391 | |||
392 | ret = __xip_file_write (filp, buf, count, pos, ppos); | ||
393 | |||
394 | out_backing: | ||
395 | current->backing_dev_info = NULL; | ||
396 | out_up: | ||
397 | up(&inode->i_sem); | ||
398 | return ret; | ||
399 | } | ||
400 | EXPORT_SYMBOL_GPL(xip_file_write); | ||
401 | |||
402 | /* | ||
403 | * truncate a page used for execute in place | ||
404 | * functionality is analog to block_truncate_page but does use get_xip_page | ||
405 | * to get the page instead of page cache | ||
406 | */ | ||
407 | int | ||
408 | xip_truncate_page(struct address_space *mapping, loff_t from) | ||
409 | { | ||
410 | pgoff_t index = from >> PAGE_CACHE_SHIFT; | ||
411 | unsigned offset = from & (PAGE_CACHE_SIZE-1); | ||
412 | unsigned blocksize; | ||
413 | unsigned length; | ||
414 | struct page *page; | ||
415 | void *kaddr; | ||
416 | |||
417 | BUG_ON(!mapping->a_ops->get_xip_page); | ||
418 | |||
419 | blocksize = 1 << mapping->host->i_blkbits; | ||
420 | length = offset & (blocksize - 1); | ||
421 | |||
422 | /* Block boundary? Nothing to do */ | ||
423 | if (!length) | ||
424 | return 0; | ||
425 | |||
426 | length = blocksize - length; | ||
427 | |||
428 | page = mapping->a_ops->get_xip_page(mapping, | ||
429 | index*(PAGE_SIZE/512), 0); | ||
430 | if (!page) | ||
431 | return -ENOMEM; | ||
432 | if (unlikely(IS_ERR(page))) { | ||
433 | if (PTR_ERR(page) == -ENODATA) | ||
434 | /* Hole? No need to truncate */ | ||
435 | return 0; | ||
436 | else | ||
437 | return PTR_ERR(page); | ||
438 | } else | ||
439 | BUG_ON(!PageUptodate(page)); | ||
440 | kaddr = kmap_atomic(page, KM_USER0); | ||
441 | memset(kaddr + offset, 0, length); | ||
442 | kunmap_atomic(kaddr, KM_USER0); | ||
443 | |||
444 | flush_dcache_page(page); | ||
445 | return 0; | ||
446 | } | ||
447 | EXPORT_SYMBOL_GPL(xip_truncate_page); | ||