aboutsummaryrefslogtreecommitdiffstats
path: root/mm/filemap_xip.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/filemap_xip.c')
-rw-r--r--mm/filemap_xip.c234
1 files changed, 0 insertions, 234 deletions
diff --git a/mm/filemap_xip.c b/mm/filemap_xip.c
index 59e1c5585748..9c869f402c07 100644
--- a/mm/filemap_xip.c
+++ b/mm/filemap_xip.c
@@ -43,119 +43,6 @@ static struct page *xip_sparse_page(void)
43} 43}
44 44
45/* 45/*
46 * This is a file read routine for execute in place files, and uses
47 * the mapping->a_ops->get_xip_mem() function for the actual low-level
48 * stuff.
49 *
50 * Note the struct file* is not used at all. It may be NULL.
51 */
52static ssize_t
53do_xip_mapping_read(struct address_space *mapping,
54 struct file_ra_state *_ra,
55 struct file *filp,
56 char __user *buf,
57 size_t len,
58 loff_t *ppos)
59{
60 struct inode *inode = mapping->host;
61 pgoff_t index, end_index;
62 unsigned long offset;
63 loff_t isize, pos;
64 size_t copied = 0, error = 0;
65
66 BUG_ON(!mapping->a_ops->get_xip_mem);
67
68 pos = *ppos;
69 index = pos >> PAGE_CACHE_SHIFT;
70 offset = pos & ~PAGE_CACHE_MASK;
71
72 isize = i_size_read(inode);
73 if (!isize)
74 goto out;
75
76 end_index = (isize - 1) >> PAGE_CACHE_SHIFT;
77 do {
78 unsigned long nr, left;
79 void *xip_mem;
80 unsigned long xip_pfn;
81 int zero = 0;
82
83 /* nr is the maximum number of bytes to copy from this page */
84 nr = PAGE_CACHE_SIZE;
85 if (index >= end_index) {
86 if (index > end_index)
87 goto out;
88 nr = ((isize - 1) & ~PAGE_CACHE_MASK) + 1;
89 if (nr <= offset) {
90 goto out;
91 }
92 }
93 nr = nr - offset;
94 if (nr > len - copied)
95 nr = len - copied;
96
97 error = mapping->a_ops->get_xip_mem(mapping, index, 0,
98 &xip_mem, &xip_pfn);
99 if (unlikely(error)) {
100 if (error == -ENODATA) {
101 /* sparse */
102 zero = 1;
103 } else
104 goto out;
105 }
106
107 /* If users can be writing to this page using arbitrary
108 * virtual addresses, take care about potential aliasing
109 * before reading the page on the kernel side.
110 */
111 if (mapping_writably_mapped(mapping))
112 /* address based flush */ ;
113
114 /*
115 * Ok, we have the mem, so now we can copy it to user space...
116 *
117 * The actor routine returns how many bytes were actually used..
118 * NOTE! This may not be the same as how much of a user buffer
119 * we filled up (we may be padding etc), so we can only update
120 * "pos" here (the actor routine has to update the user buffer
121 * pointers and the remaining count).
122 */
123 if (!zero)
124 left = __copy_to_user(buf+copied, xip_mem+offset, nr);
125 else
126 left = __clear_user(buf + copied, nr);
127
128 if (left) {
129 error = -EFAULT;
130 goto out;
131 }
132
133 copied += (nr - left);
134 offset += (nr - left);
135 index += offset >> PAGE_CACHE_SHIFT;
136 offset &= ~PAGE_CACHE_MASK;
137 } while (copied < len);
138
139out:
140 *ppos = pos + copied;
141 if (filp)
142 file_accessed(filp);
143
144 return (copied ? copied : error);
145}
146
147ssize_t
148xip_file_read(struct file *filp, char __user *buf, size_t len, loff_t *ppos)
149{
150 if (!access_ok(VERIFY_WRITE, buf, len))
151 return -EFAULT;
152
153 return do_xip_mapping_read(filp->f_mapping, &filp->f_ra, filp,
154 buf, len, ppos);
155}
156EXPORT_SYMBOL_GPL(xip_file_read);
157
158/*
159 * __xip_unmap is invoked from xip_unmap and xip_write 46 * __xip_unmap is invoked from xip_unmap and xip_write
160 * 47 *
161 * This function walks all vmas of the address_space and unmaps the 48 * This function walks all vmas of the address_space and unmaps the
@@ -341,127 +228,6 @@ int xip_file_mmap(struct file * file, struct vm_area_struct * vma)
341} 228}
342EXPORT_SYMBOL_GPL(xip_file_mmap); 229EXPORT_SYMBOL_GPL(xip_file_mmap);
343 230
344static ssize_t
345__xip_file_write(struct file *filp, const char __user *buf,
346 size_t count, loff_t pos, loff_t *ppos)
347{
348 struct address_space * mapping = filp->f_mapping;
349 const struct address_space_operations *a_ops = mapping->a_ops;
350 struct inode *inode = mapping->host;
351 long status = 0;
352 size_t bytes;
353 ssize_t written = 0;
354
355 BUG_ON(!mapping->a_ops->get_xip_mem);
356
357 do {
358 unsigned long index;
359 unsigned long offset;
360 size_t copied;
361 void *xip_mem;
362 unsigned long xip_pfn;
363
364 offset = (pos & (PAGE_CACHE_SIZE -1)); /* Within page */
365 index = pos >> PAGE_CACHE_SHIFT;
366 bytes = PAGE_CACHE_SIZE - offset;
367 if (bytes > count)
368 bytes = count;
369
370 status = a_ops->get_xip_mem(mapping, index, 0,
371 &xip_mem, &xip_pfn);
372 if (status == -ENODATA) {
373 /* we allocate a new page unmap it */
374 mutex_lock(&xip_sparse_mutex);
375 status = a_ops->get_xip_mem(mapping, index, 1,
376 &xip_mem, &xip_pfn);
377 mutex_unlock(&xip_sparse_mutex);
378 if (!status)
379 /* unmap page at pgoff from all other vmas */
380 __xip_unmap(mapping, index);
381 }
382
383 if (status)
384 break;
385
386 copied = bytes -
387 __copy_from_user_nocache(xip_mem + offset, buf, bytes);
388
389 if (likely(copied > 0)) {
390 status = copied;
391
392 if (status >= 0) {
393 written += status;
394 count -= status;
395 pos += status;
396 buf += status;
397 }
398 }
399 if (unlikely(copied != bytes))
400 if (status >= 0)
401 status = -EFAULT;
402 if (status < 0)
403 break;
404 } while (count);
405 *ppos = pos;
406 /*
407 * No need to use i_size_read() here, the i_size
408 * cannot change under us because we hold i_mutex.
409 */
410 if (pos > inode->i_size) {
411 i_size_write(inode, pos);
412 mark_inode_dirty(inode);
413 }
414
415 return written ? written : status;
416}
417
418ssize_t
419xip_file_write(struct file *filp, const char __user *buf, size_t len,
420 loff_t *ppos)
421{
422 struct address_space *mapping = filp->f_mapping;
423 struct inode *inode = mapping->host;
424 size_t count;
425 loff_t pos;
426 ssize_t ret;
427
428 mutex_lock(&inode->i_mutex);
429
430 if (!access_ok(VERIFY_READ, buf, len)) {
431 ret=-EFAULT;
432 goto out_up;
433 }
434
435 pos = *ppos;
436 count = len;
437
438 /* We can write back this queue in page reclaim */
439 current->backing_dev_info = inode_to_bdi(inode);
440
441 ret = generic_write_checks(filp, &pos, &count, S_ISBLK(inode->i_mode));
442 if (ret)
443 goto out_backing;
444 if (count == 0)
445 goto out_backing;
446
447 ret = file_remove_suid(filp);
448 if (ret)
449 goto out_backing;
450
451 ret = file_update_time(filp);
452 if (ret)
453 goto out_backing;
454
455 ret = __xip_file_write (filp, buf, count, pos, ppos);
456
457 out_backing:
458 current->backing_dev_info = NULL;
459 out_up:
460 mutex_unlock(&inode->i_mutex);
461 return ret;
462}
463EXPORT_SYMBOL_GPL(xip_file_write);
464
465/* 231/*
466 * truncate a page used for execute in place 232 * truncate a page used for execute in place
467 * functionality is analog to block_truncate_page but does use get_xip_mem 233 * functionality is analog to block_truncate_page but does use get_xip_mem