aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
Diffstat (limited to 'mm')
-rw-r--r--mm/fadvise.c2
-rw-r--r--mm/filemap_xip.c200
-rw-r--r--mm/madvise.c2
3 files changed, 102 insertions, 102 deletions
diff --git a/mm/fadvise.c b/mm/fadvise.c
index 3c0f1e99f5e4..343cfdfebd9e 100644
--- a/mm/fadvise.c
+++ b/mm/fadvise.c
@@ -49,7 +49,7 @@ asmlinkage long sys_fadvise64_64(int fd, loff_t offset, loff_t len, int advice)
49 goto out; 49 goto out;
50 } 50 }
51 51
52 if (mapping->a_ops->get_xip_page) { 52 if (mapping->a_ops->get_xip_mem) {
53 switch (advice) { 53 switch (advice) {
54 case POSIX_FADV_NORMAL: 54 case POSIX_FADV_NORMAL:
55 case POSIX_FADV_RANDOM: 55 case POSIX_FADV_RANDOM:
diff --git a/mm/filemap_xip.c b/mm/filemap_xip.c
index 5e598c42afd7..3e744abcce9d 100644
--- a/mm/filemap_xip.c
+++ b/mm/filemap_xip.c
@@ -15,6 +15,7 @@
15#include <linux/rmap.h> 15#include <linux/rmap.h>
16#include <linux/sched.h> 16#include <linux/sched.h>
17#include <asm/tlbflush.h> 17#include <asm/tlbflush.h>
18#include <asm/io.h>
18 19
19/* 20/*
20 * We do use our own empty page to avoid interference with other users 21 * We do use our own empty page to avoid interference with other users
@@ -42,37 +43,41 @@ static struct page *xip_sparse_page(void)
42 43
43/* 44/*
44 * This is a file read routine for execute in place files, and uses 45 * This is a file read routine for execute in place files, and uses
45 * the mapping->a_ops->get_xip_page() function for the actual low-level 46 * the mapping->a_ops->get_xip_mem() function for the actual low-level
46 * stuff. 47 * stuff.
47 * 48 *
48 * Note the struct file* is not used at all. It may be NULL. 49 * Note the struct file* is not used at all. It may be NULL.
49 */ 50 */
50static void 51static ssize_t
51do_xip_mapping_read(struct address_space *mapping, 52do_xip_mapping_read(struct address_space *mapping,
52 struct file_ra_state *_ra, 53 struct file_ra_state *_ra,
53 struct file *filp, 54 struct file *filp,
54 loff_t *ppos, 55 char __user *buf,
55 read_descriptor_t *desc, 56 size_t len,
56 read_actor_t actor) 57 loff_t *ppos)
57{ 58{
58 struct inode *inode = mapping->host; 59 struct inode *inode = mapping->host;
59 pgoff_t index, end_index; 60 pgoff_t index, end_index;
60 unsigned long offset; 61 unsigned long offset;
61 loff_t isize; 62 loff_t isize, pos;
63 size_t copied = 0, error = 0;
62 64
63 BUG_ON(!mapping->a_ops->get_xip_page); 65 BUG_ON(!mapping->a_ops->get_xip_mem);
64 66
65 index = *ppos >> PAGE_CACHE_SHIFT; 67 pos = *ppos;
66 offset = *ppos & ~PAGE_CACHE_MASK; 68 index = pos >> PAGE_CACHE_SHIFT;
69 offset = pos & ~PAGE_CACHE_MASK;
67 70
68 isize = i_size_read(inode); 71 isize = i_size_read(inode);
69 if (!isize) 72 if (!isize)
70 goto out; 73 goto out;
71 74
72 end_index = (isize - 1) >> PAGE_CACHE_SHIFT; 75 end_index = (isize - 1) >> PAGE_CACHE_SHIFT;
73 for (;;) { 76 do {
74 struct page *page; 77 unsigned long nr, left;
75 unsigned long nr, ret; 78 void *xip_mem;
79 unsigned long xip_pfn;
80 int zero = 0;
76 81
77 /* nr is the maximum number of bytes to copy from this page */ 82 /* nr is the maximum number of bytes to copy from this page */
78 nr = PAGE_CACHE_SIZE; 83 nr = PAGE_CACHE_SIZE;
@@ -85,19 +90,17 @@ do_xip_mapping_read(struct address_space *mapping,
85 } 90 }
86 } 91 }
87 nr = nr - offset; 92 nr = nr - offset;
93 if (nr > len)
94 nr = len;
88 95
89 page = mapping->a_ops->get_xip_page(mapping, 96 error = mapping->a_ops->get_xip_mem(mapping, index, 0,
90 index*(PAGE_SIZE/512), 0); 97 &xip_mem, &xip_pfn);
91 if (!page) 98 if (unlikely(error)) {
92 goto no_xip_page; 99 if (error == -ENODATA) {
93 if (unlikely(IS_ERR(page))) {
94 if (PTR_ERR(page) == -ENODATA) {
95 /* sparse */ 100 /* sparse */
96 page = ZERO_PAGE(0); 101 zero = 1;
97 } else { 102 } else
98 desc->error = PTR_ERR(page);
99 goto out; 103 goto out;
100 }
101 } 104 }
102 105
103 /* If users can be writing to this page using arbitrary 106 /* If users can be writing to this page using arbitrary
@@ -105,10 +108,10 @@ do_xip_mapping_read(struct address_space *mapping,
105 * before reading the page on the kernel side. 108 * before reading the page on the kernel side.
106 */ 109 */
107 if (mapping_writably_mapped(mapping)) 110 if (mapping_writably_mapped(mapping))
108 flush_dcache_page(page); 111 /* address based flush */ ;
109 112
110 /* 113 /*
111 * Ok, we have the page, so now we can copy it to user space... 114 * Ok, we have the mem, so now we can copy it to user space...
112 * 115 *
113 * The actor routine returns how many bytes were actually used.. 116 * The actor routine returns how many bytes were actually used..
114 * NOTE! This may not be the same as how much of a user buffer 117 * NOTE! This may not be the same as how much of a user buffer
@@ -116,47 +119,38 @@ do_xip_mapping_read(struct address_space *mapping,
116 * "pos" here (the actor routine has to update the user buffer 119 * "pos" here (the actor routine has to update the user buffer
117 * pointers and the remaining count). 120 * pointers and the remaining count).
118 */ 121 */
119 ret = actor(desc, page, offset, nr); 122 if (!zero)
120 offset += ret; 123 left = __copy_to_user(buf+copied, xip_mem+offset, nr);
121 index += offset >> PAGE_CACHE_SHIFT; 124 else
122 offset &= ~PAGE_CACHE_MASK; 125 left = __clear_user(buf + copied, nr);
123 126
124 if (ret == nr && desc->count) 127 if (left) {
125 continue; 128 error = -EFAULT;
126 goto out; 129 goto out;
130 }
127 131
128no_xip_page: 132 copied += (nr - left);
129 /* Did not get the page. Report it */ 133 offset += (nr - left);
130 desc->error = -EIO; 134 index += offset >> PAGE_CACHE_SHIFT;
131 goto out; 135 offset &= ~PAGE_CACHE_MASK;
132 } 136 } while (copied < len);
133 137
134out: 138out:
135 *ppos = ((loff_t) index << PAGE_CACHE_SHIFT) + offset; 139 *ppos = pos + copied;
136 if (filp) 140 if (filp)
137 file_accessed(filp); 141 file_accessed(filp);
142
143 return (copied ? copied : error);
138} 144}
139 145
140ssize_t 146ssize_t
141xip_file_read(struct file *filp, char __user *buf, size_t len, loff_t *ppos) 147xip_file_read(struct file *filp, char __user *buf, size_t len, loff_t *ppos)
142{ 148{
143 read_descriptor_t desc;
144
145 if (!access_ok(VERIFY_WRITE, buf, len)) 149 if (!access_ok(VERIFY_WRITE, buf, len))
146 return -EFAULT; 150 return -EFAULT;
147 151
148 desc.written = 0; 152 return do_xip_mapping_read(filp->f_mapping, &filp->f_ra, filp,
149 desc.arg.buf = buf; 153 buf, len, ppos);
150 desc.count = len;
151 desc.error = 0;
152
153 do_xip_mapping_read(filp->f_mapping, &filp->f_ra, filp,
154 ppos, &desc, file_read_actor);
155
156 if (desc.written)
157 return desc.written;
158 else
159 return desc.error;
160} 154}
161EXPORT_SYMBOL_GPL(xip_file_read); 155EXPORT_SYMBOL_GPL(xip_file_read);
162 156
@@ -211,13 +205,16 @@ __xip_unmap (struct address_space * mapping,
211 * 205 *
212 * This function is derived from filemap_fault, but used for execute in place 206 * This function is derived from filemap_fault, but used for execute in place
213 */ 207 */
214static int xip_file_fault(struct vm_area_struct *area, struct vm_fault *vmf) 208static int xip_file_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
215{ 209{
216 struct file *file = area->vm_file; 210 struct file *file = vma->vm_file;
217 struct address_space *mapping = file->f_mapping; 211 struct address_space *mapping = file->f_mapping;
218 struct inode *inode = mapping->host; 212 struct inode *inode = mapping->host;
219 struct page *page;
220 pgoff_t size; 213 pgoff_t size;
214 void *xip_mem;
215 unsigned long xip_pfn;
216 struct page *page;
217 int error;
221 218
222 /* XXX: are VM_FAULT_ codes OK? */ 219 /* XXX: are VM_FAULT_ codes OK? */
223 220
@@ -225,35 +222,44 @@ static int xip_file_fault(struct vm_area_struct *area, struct vm_fault *vmf)
225 if (vmf->pgoff >= size) 222 if (vmf->pgoff >= size)
226 return VM_FAULT_SIGBUS; 223 return VM_FAULT_SIGBUS;
227 224
228 page = mapping->a_ops->get_xip_page(mapping, 225 error = mapping->a_ops->get_xip_mem(mapping, vmf->pgoff, 0,
229 vmf->pgoff*(PAGE_SIZE/512), 0); 226 &xip_mem, &xip_pfn);
230 if (!IS_ERR(page)) 227 if (likely(!error))
231 goto out; 228 goto found;
232 if (PTR_ERR(page) != -ENODATA) 229 if (error != -ENODATA)
233 return VM_FAULT_OOM; 230 return VM_FAULT_OOM;
234 231
235 /* sparse block */ 232 /* sparse block */
236 if ((area->vm_flags & (VM_WRITE | VM_MAYWRITE)) && 233 if ((vma->vm_flags & (VM_WRITE | VM_MAYWRITE)) &&
237 (area->vm_flags & (VM_SHARED| VM_MAYSHARE)) && 234 (vma->vm_flags & (VM_SHARED | VM_MAYSHARE)) &&
238 (!(mapping->host->i_sb->s_flags & MS_RDONLY))) { 235 (!(mapping->host->i_sb->s_flags & MS_RDONLY))) {
236 int err;
237
239 /* maybe shared writable, allocate new block */ 238 /* maybe shared writable, allocate new block */
240 page = mapping->a_ops->get_xip_page(mapping, 239 error = mapping->a_ops->get_xip_mem(mapping, vmf->pgoff, 1,
241 vmf->pgoff*(PAGE_SIZE/512), 1); 240 &xip_mem, &xip_pfn);
242 if (IS_ERR(page)) 241 if (error)
243 return VM_FAULT_SIGBUS; 242 return VM_FAULT_SIGBUS;
244 /* unmap page at pgoff from all other vmas */ 243 /* unmap sparse mappings at pgoff from all other vmas */
245 __xip_unmap(mapping, vmf->pgoff); 244 __xip_unmap(mapping, vmf->pgoff);
245
246found:
247 err = vm_insert_mixed(vma, (unsigned long)vmf->virtual_address,
248 xip_pfn);
249 if (err == -ENOMEM)
250 return VM_FAULT_OOM;
251 BUG_ON(err);
252 return VM_FAULT_NOPAGE;
246 } else { 253 } else {
247 /* not shared and writable, use xip_sparse_page() */ 254 /* not shared and writable, use xip_sparse_page() */
248 page = xip_sparse_page(); 255 page = xip_sparse_page();
249 if (!page) 256 if (!page)
250 return VM_FAULT_OOM; 257 return VM_FAULT_OOM;
251 }
252 258
253out: 259 page_cache_get(page);
254 page_cache_get(page); 260 vmf->page = page;
255 vmf->page = page; 261 return 0;
256 return 0; 262 }
257} 263}
258 264
259static struct vm_operations_struct xip_file_vm_ops = { 265static struct vm_operations_struct xip_file_vm_ops = {
@@ -262,11 +268,11 @@ static struct vm_operations_struct xip_file_vm_ops = {
262 268
263int xip_file_mmap(struct file * file, struct vm_area_struct * vma) 269int xip_file_mmap(struct file * file, struct vm_area_struct * vma)
264{ 270{
265 BUG_ON(!file->f_mapping->a_ops->get_xip_page); 271 BUG_ON(!file->f_mapping->a_ops->get_xip_mem);
266 272
267 file_accessed(file); 273 file_accessed(file);
268 vma->vm_ops = &xip_file_vm_ops; 274 vma->vm_ops = &xip_file_vm_ops;
269 vma->vm_flags |= VM_CAN_NONLINEAR; 275 vma->vm_flags |= VM_CAN_NONLINEAR | VM_MIXEDMAP;
270 return 0; 276 return 0;
271} 277}
272EXPORT_SYMBOL_GPL(xip_file_mmap); 278EXPORT_SYMBOL_GPL(xip_file_mmap);
@@ -279,17 +285,17 @@ __xip_file_write(struct file *filp, const char __user *buf,
279 const struct address_space_operations *a_ops = mapping->a_ops; 285 const struct address_space_operations *a_ops = mapping->a_ops;
280 struct inode *inode = mapping->host; 286 struct inode *inode = mapping->host;
281 long status = 0; 287 long status = 0;
282 struct page *page;
283 size_t bytes; 288 size_t bytes;
284 ssize_t written = 0; 289 ssize_t written = 0;
285 290
286 BUG_ON(!mapping->a_ops->get_xip_page); 291 BUG_ON(!mapping->a_ops->get_xip_mem);
287 292
288 do { 293 do {
289 unsigned long index; 294 unsigned long index;
290 unsigned long offset; 295 unsigned long offset;
291 size_t copied; 296 size_t copied;
292 char *kaddr; 297 void *xip_mem;
298 unsigned long xip_pfn;
293 299
294 offset = (pos & (PAGE_CACHE_SIZE -1)); /* Within page */ 300 offset = (pos & (PAGE_CACHE_SIZE -1)); /* Within page */
295 index = pos >> PAGE_CACHE_SHIFT; 301 index = pos >> PAGE_CACHE_SHIFT;
@@ -297,28 +303,22 @@ __xip_file_write(struct file *filp, const char __user *buf,
297 if (bytes > count) 303 if (bytes > count)
298 bytes = count; 304 bytes = count;
299 305
300 page = a_ops->get_xip_page(mapping, 306 status = a_ops->get_xip_mem(mapping, index, 0,
301 index*(PAGE_SIZE/512), 0); 307 &xip_mem, &xip_pfn);
302 if (IS_ERR(page) && (PTR_ERR(page) == -ENODATA)) { 308 if (status == -ENODATA) {
303 /* we allocate a new page unmap it */ 309 /* we allocate a new page unmap it */
304 page = a_ops->get_xip_page(mapping, 310 status = a_ops->get_xip_mem(mapping, index, 1,
305 index*(PAGE_SIZE/512), 1); 311 &xip_mem, &xip_pfn);
306 if (!IS_ERR(page)) 312 if (!status)
307 /* unmap page at pgoff from all other vmas */ 313 /* unmap page at pgoff from all other vmas */
308 __xip_unmap(mapping, index); 314 __xip_unmap(mapping, index);
309 } 315 }
310 316
311 if (IS_ERR(page)) { 317 if (status)
312 status = PTR_ERR(page);
313 break; 318 break;
314 }
315 319
316 fault_in_pages_readable(buf, bytes);
317 kaddr = kmap_atomic(page, KM_USER0);
318 copied = bytes - 320 copied = bytes -
319 __copy_from_user_inatomic_nocache(kaddr + offset, buf, bytes); 321 __copy_from_user_nocache(xip_mem + offset, buf, bytes);
320 kunmap_atomic(kaddr, KM_USER0);
321 flush_dcache_page(page);
322 322
323 if (likely(copied > 0)) { 323 if (likely(copied > 0)) {
324 status = copied; 324 status = copied;
@@ -398,7 +398,7 @@ EXPORT_SYMBOL_GPL(xip_file_write);
398 398
399/* 399/*
400 * truncate a page used for execute in place 400 * truncate a page used for execute in place
401 * functionality is analog to block_truncate_page but does use get_xip_page 401 * functionality is analog to block_truncate_page but does use get_xip_mem
402 * to get the page instead of page cache 402 * to get the page instead of page cache
403 */ 403 */
404int 404int
@@ -408,9 +408,11 @@ xip_truncate_page(struct address_space *mapping, loff_t from)
408 unsigned offset = from & (PAGE_CACHE_SIZE-1); 408 unsigned offset = from & (PAGE_CACHE_SIZE-1);
409 unsigned blocksize; 409 unsigned blocksize;
410 unsigned length; 410 unsigned length;
411 struct page *page; 411 void *xip_mem;
412 unsigned long xip_pfn;
413 int err;
412 414
413 BUG_ON(!mapping->a_ops->get_xip_page); 415 BUG_ON(!mapping->a_ops->get_xip_mem);
414 416
415 blocksize = 1 << mapping->host->i_blkbits; 417 blocksize = 1 << mapping->host->i_blkbits;
416 length = offset & (blocksize - 1); 418 length = offset & (blocksize - 1);
@@ -421,18 +423,16 @@ xip_truncate_page(struct address_space *mapping, loff_t from)
421 423
422 length = blocksize - length; 424 length = blocksize - length;
423 425
424 page = mapping->a_ops->get_xip_page(mapping, 426 err = mapping->a_ops->get_xip_mem(mapping, index, 0,
425 index*(PAGE_SIZE/512), 0); 427 &xip_mem, &xip_pfn);
426 if (!page) 428 if (unlikely(err)) {
427 return -ENOMEM; 429 if (err == -ENODATA)
428 if (unlikely(IS_ERR(page))) {
429 if (PTR_ERR(page) == -ENODATA)
430 /* Hole? No need to truncate */ 430 /* Hole? No need to truncate */
431 return 0; 431 return 0;
432 else 432 else
433 return PTR_ERR(page); 433 return err;
434 } 434 }
435 zero_user(page, offset, length); 435 memset(xip_mem + offset, 0, length);
436 return 0; 436 return 0;
437} 437}
438EXPORT_SYMBOL_GPL(xip_truncate_page); 438EXPORT_SYMBOL_GPL(xip_truncate_page);
diff --git a/mm/madvise.c b/mm/madvise.c
index 93ee375b38e7..23a0ec3e0ea0 100644
--- a/mm/madvise.c
+++ b/mm/madvise.c
@@ -112,7 +112,7 @@ static long madvise_willneed(struct vm_area_struct * vma,
112 if (!file) 112 if (!file)
113 return -EBADF; 113 return -EBADF;
114 114
115 if (file->f_mapping->a_ops->get_xip_page) { 115 if (file->f_mapping->a_ops->get_xip_mem) {
116 /* no bad return value, but ignore advice */ 116 /* no bad return value, but ignore advice */
117 return 0; 117 return 0;
118 } 118 }