aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
Diffstat (limited to 'mm')
-rw-r--r--mm/filemap.h2
-rw-r--r--mm/filemap_xip.c246
2 files changed, 57 insertions, 191 deletions
diff --git a/mm/filemap.h b/mm/filemap.h
index c2d0546a57eb..13793ba0ce17 100644
--- a/mm/filemap.h
+++ b/mm/filemap.h
@@ -15,7 +15,7 @@
15#include <linux/config.h> 15#include <linux/config.h>
16#include <asm/uaccess.h> 16#include <asm/uaccess.h>
17 17
18extern size_t 18size_t
19__filemap_copy_from_user_iovec(char *vaddr, 19__filemap_copy_from_user_iovec(char *vaddr,
20 const struct iovec *iov, 20 const struct iovec *iov,
21 size_t base, 21 size_t base,
diff --git a/mm/filemap_xip.c b/mm/filemap_xip.c
index 7d63acd48817..3b6e384b98a6 100644
--- a/mm/filemap_xip.c
+++ b/mm/filemap_xip.c
@@ -114,83 +114,28 @@ out:
114 file_accessed(filp); 114 file_accessed(filp);
115} 115}
116 116
117/*
118 * This is the "read()" routine for all filesystems
119 * that uses the get_xip_page address space operation.
120 */
121static ssize_t
122__xip_file_aio_read(struct kiocb *iocb, const struct iovec *iov,
123 unsigned long nr_segs, loff_t *ppos)
124{
125 struct file *filp = iocb->ki_filp;
126 ssize_t retval;
127 unsigned long seg;
128 size_t count;
129
130 count = 0;
131 for (seg = 0; seg < nr_segs; seg++) {
132 const struct iovec *iv = &iov[seg];
133
134 /*
135 * If any segment has a negative length, or the cumulative
136 * length ever wraps negative then return -EINVAL.
137 */
138 count += iv->iov_len;
139 if (unlikely((ssize_t)(count|iv->iov_len) < 0))
140 return -EINVAL;
141 if (access_ok(VERIFY_WRITE, iv->iov_base, iv->iov_len))
142 continue;
143 if (seg == 0)
144 return -EFAULT;
145 nr_segs = seg;
146 count -= iv->iov_len; /* This segment is no good */
147 break;
148 }
149
150 retval = 0;
151 if (count) {
152 for (seg = 0; seg < nr_segs; seg++) {
153 read_descriptor_t desc;
154
155 desc.written = 0;
156 desc.arg.buf = iov[seg].iov_base;
157 desc.count = iov[seg].iov_len;
158 if (desc.count == 0)
159 continue;
160 desc.error = 0;
161 do_xip_mapping_read(filp->f_mapping, &filp->f_ra, filp,
162 ppos, &desc, file_read_actor);
163 retval += desc.written;
164 if (!retval) {
165 retval = desc.error;
166 break;
167 }
168 }
169 }
170 return retval;
171}
172
173ssize_t 117ssize_t
174xip_file_aio_read(struct kiocb *iocb, char __user *buf, size_t count, 118xip_file_read(struct file *filp, char __user *buf, size_t len, loff_t *ppos)
175 loff_t pos)
176{ 119{
177 struct iovec local_iov = { .iov_base = buf, .iov_len = count }; 120 read_descriptor_t desc;
178 121
179 BUG_ON(iocb->ki_pos != pos); 122 if (!access_ok(VERIFY_WRITE, buf, len))
180 return __xip_file_aio_read(iocb, &local_iov, 1, &iocb->ki_pos); 123 return -EFAULT;
181}
182EXPORT_SYMBOL_GPL(xip_file_aio_read);
183 124
184ssize_t 125 desc.written = 0;
185xip_file_readv(struct file *filp, const struct iovec *iov, 126 desc.arg.buf = buf;
186 unsigned long nr_segs, loff_t *ppos) 127 desc.count = len;
187{ 128 desc.error = 0;
188 struct kiocb kiocb;
189 129
190 init_sync_kiocb(&kiocb, filp); 130 do_xip_mapping_read(filp->f_mapping, &filp->f_ra, filp,
191 return __xip_file_aio_read(&kiocb, iov, nr_segs, ppos); 131 ppos, &desc, file_read_actor);
132
133 if (desc.written)
134 return desc.written;
135 else
136 return desc.error;
192} 137}
193EXPORT_SYMBOL_GPL(xip_file_readv); 138EXPORT_SYMBOL_GPL(xip_file_read);
194 139
195ssize_t 140ssize_t
196xip_file_sendfile(struct file *in_file, loff_t *ppos, 141xip_file_sendfile(struct file *in_file, loff_t *ppos,
@@ -326,25 +271,19 @@ int xip_file_mmap(struct file * file, struct vm_area_struct * vma)
326EXPORT_SYMBOL_GPL(xip_file_mmap); 271EXPORT_SYMBOL_GPL(xip_file_mmap);
327 272
328static ssize_t 273static ssize_t
329do_xip_file_write(struct kiocb *iocb, const struct iovec *iov, 274__xip_file_write(struct file *filp, const char __user *buf,
330 unsigned long nr_segs, loff_t pos, loff_t *ppos, 275 size_t count, loff_t pos, loff_t *ppos)
331 size_t count)
332{ 276{
333 struct file *file = iocb->ki_filp; 277 struct address_space * mapping = filp->f_mapping;
334 struct address_space * mapping = file->f_mapping;
335 struct address_space_operations *a_ops = mapping->a_ops; 278 struct address_space_operations *a_ops = mapping->a_ops;
336 struct inode *inode = mapping->host; 279 struct inode *inode = mapping->host;
337 long status = 0; 280 long status = 0;
338 struct page *page; 281 struct page *page;
339 size_t bytes; 282 size_t bytes;
340 const struct iovec *cur_iov = iov; /* current iovec */
341 size_t iov_base = 0; /* offset in the current iovec */
342 char __user *buf;
343 ssize_t written = 0; 283 ssize_t written = 0;
344 284
345 BUG_ON(!mapping->a_ops->get_xip_page); 285 BUG_ON(!mapping->a_ops->get_xip_page);
346 286
347 buf = iov->iov_base;
348 do { 287 do {
349 unsigned long index; 288 unsigned long index;
350 unsigned long offset; 289 unsigned long offset;
@@ -365,15 +304,14 @@ do_xip_file_write(struct kiocb *iocb, const struct iovec *iov,
365 fault_in_pages_readable(buf, bytes); 304 fault_in_pages_readable(buf, bytes);
366 305
367 page = a_ops->get_xip_page(mapping, 306 page = a_ops->get_xip_page(mapping,
368 index*(PAGE_SIZE/512), 0); 307 index*(PAGE_SIZE/512), 0);
369 if (IS_ERR(page) && (PTR_ERR(page) == -ENODATA)) { 308 if (IS_ERR(page) && (PTR_ERR(page) == -ENODATA)) {
370 /* we allocate a new page unmap it */ 309 /* we allocate a new page unmap it */
371 page = a_ops->get_xip_page(mapping, 310 page = a_ops->get_xip_page(mapping,
372 index*(PAGE_SIZE/512), 1); 311 index*(PAGE_SIZE/512), 1);
373 if (!IS_ERR(page)) 312 if (!IS_ERR(page))
374 /* unmap page at pgoff from all other vmas */ 313 /* unmap page at pgoff from all other vmas */
375 __xip_unmap(mapping, index); 314 __xip_unmap(mapping, index);
376
377 } 315 }
378 316
379 if (IS_ERR(page)) { 317 if (IS_ERR(page)) {
@@ -383,12 +321,7 @@ do_xip_file_write(struct kiocb *iocb, const struct iovec *iov,
383 321
384 BUG_ON(!PageUptodate(page)); 322 BUG_ON(!PageUptodate(page));
385 323
386 if (likely(nr_segs == 1)) 324 copied = filemap_copy_from_user(page, offset, buf, bytes);
387 copied = filemap_copy_from_user(page, offset,
388 buf, bytes);
389 else
390 copied = filemap_copy_from_user_iovec(page, offset,
391 cur_iov, iov_base, bytes);
392 flush_dcache_page(page); 325 flush_dcache_page(page);
393 if (likely(copied > 0)) { 326 if (likely(copied > 0)) {
394 status = copied; 327 status = copied;
@@ -398,9 +331,6 @@ do_xip_file_write(struct kiocb *iocb, const struct iovec *iov,
398 count -= status; 331 count -= status;
399 pos += status; 332 pos += status;
400 buf += status; 333 buf += status;
401 if (unlikely(nr_segs > 1))
402 filemap_set_next_iovec(&cur_iov,
403 &iov_base, status);
404 } 334 }
405 } 335 }
406 if (unlikely(copied != bytes)) 336 if (unlikely(copied != bytes))
@@ -422,110 +352,52 @@ do_xip_file_write(struct kiocb *iocb, const struct iovec *iov,
422 return written ? written : status; 352 return written ? written : status;
423} 353}
424 354
425static ssize_t 355ssize_t
426xip_file_aio_write_nolock(struct kiocb *iocb, const struct iovec *iov, 356xip_file_write(struct file *filp, const char __user *buf, size_t len,
427 unsigned long nr_segs, loff_t *ppos) 357 loff_t *ppos)
428{ 358{
429 struct file *file = iocb->ki_filp; 359 struct address_space *mapping = filp->f_mapping;
430 struct address_space * mapping = file->f_mapping; 360 struct inode *inode = mapping->host;
431 size_t ocount; /* original count */ 361 size_t count;
432 size_t count; /* after file limit checks */ 362 loff_t pos;
433 struct inode *inode = mapping->host; 363 ssize_t ret;
434 unsigned long seg;
435 loff_t pos;
436 ssize_t written;
437 ssize_t err;
438 364
439 ocount = 0; 365 down(&inode->i_sem);
440 for (seg = 0; seg < nr_segs; seg++) {
441 const struct iovec *iv = &iov[seg];
442 366
443 /* 367 if (!access_ok(VERIFY_READ, buf, len)) {
444 * If any segment has a negative length, or the cumulative 368 ret=-EFAULT;
445 * length ever wraps negative then return -EINVAL. 369 goto out_up;
446 */
447 ocount += iv->iov_len;
448 if (unlikely((ssize_t)(ocount|iv->iov_len) < 0))
449 return -EINVAL;
450 if (access_ok(VERIFY_READ, iv->iov_base, iv->iov_len))
451 continue;
452 if (seg == 0)
453 return -EFAULT;
454 nr_segs = seg;
455 ocount -= iv->iov_len; /* This segment is no good */
456 break;
457 } 370 }
458 371
459 count = ocount;
460 pos = *ppos; 372 pos = *ppos;
373 count = len;
461 374
462 vfs_check_frozen(inode->i_sb, SB_FREEZE_WRITE); 375 vfs_check_frozen(inode->i_sb, SB_FREEZE_WRITE);
463 376
464 written = 0; 377 /* We can write back this queue in page reclaim */
465 378 current->backing_dev_info = mapping->backing_dev_info;
466 err = generic_write_checks(file, &pos, &count, S_ISBLK(inode->i_mode));
467 if (err)
468 goto out;
469 379
380 ret = generic_write_checks(filp, &pos, &count, S_ISBLK(inode->i_mode));
381 if (ret)
382 goto out_backing;
470 if (count == 0) 383 if (count == 0)
471 goto out; 384 goto out_backing;
472 385
473 err = remove_suid(file->f_dentry); 386 ret = remove_suid(filp->f_dentry);
474 if (err) 387 if (ret)
475 goto out; 388 goto out_backing;
476 389
477 inode_update_time(inode, 1); 390 inode_update_time(inode, 1);
478 391
479 /* use execute in place to copy directly to disk */ 392 ret = __xip_file_write (filp, buf, count, pos, ppos);
480 written = do_xip_file_write (iocb, iov,
481 nr_segs, pos, ppos, count);
482 out:
483 return written ? written : err;
484}
485
486static ssize_t
487__xip_file_write_nolock(struct file *file, const struct iovec *iov,
488 unsigned long nr_segs, loff_t *ppos)
489{
490 struct kiocb kiocb;
491
492 init_sync_kiocb(&kiocb, file);
493 return xip_file_aio_write_nolock(&kiocb, iov, nr_segs, ppos);
494}
495
496ssize_t
497xip_file_aio_write(struct kiocb *iocb, const char __user *buf,
498 size_t count, loff_t pos)
499{
500 struct file *file = iocb->ki_filp;
501 struct address_space *mapping = file->f_mapping;
502 struct inode *inode = mapping->host;
503 ssize_t ret;
504 struct iovec local_iov = { .iov_base = (void __user *)buf,
505 .iov_len = count };
506 393
507 BUG_ON(iocb->ki_pos != pos); 394 out_backing:
508 395 current->backing_dev_info = NULL;
509 down(&inode->i_sem); 396 out_up:
510 ret = xip_file_aio_write_nolock(iocb, &local_iov, 1, &iocb->ki_pos);
511 up(&inode->i_sem); 397 up(&inode->i_sem);
512 return ret; 398 return ret;
513} 399}
514EXPORT_SYMBOL_GPL(xip_file_aio_write); 400EXPORT_SYMBOL_GPL(xip_file_write);
515
516ssize_t xip_file_writev(struct file *file, const struct iovec *iov,
517 unsigned long nr_segs, loff_t *ppos)
518{
519 struct address_space *mapping = file->f_mapping;
520 struct inode *inode = mapping->host;
521 ssize_t ret;
522
523 down(&inode->i_sem);
524 ret = __xip_file_write_nolock(file, iov, nr_segs, ppos);
525 up(&inode->i_sem);
526 return ret;
527}
528EXPORT_SYMBOL_GPL(xip_file_writev);
529 401
530/* 402/*
531 * truncate a page used for execute in place 403 * truncate a page used for execute in place
@@ -541,7 +413,6 @@ xip_truncate_page(struct address_space *mapping, loff_t from)
541 unsigned length; 413 unsigned length;
542 struct page *page; 414 struct page *page;
543 void *kaddr; 415 void *kaddr;
544 int err;
545 416
546 BUG_ON(!mapping->a_ops->get_xip_page); 417 BUG_ON(!mapping->a_ops->get_xip_page);
547 418
@@ -556,17 +427,14 @@ xip_truncate_page(struct address_space *mapping, loff_t from)
556 427
557 page = mapping->a_ops->get_xip_page(mapping, 428 page = mapping->a_ops->get_xip_page(mapping,
558 index*(PAGE_SIZE/512), 0); 429 index*(PAGE_SIZE/512), 0);
559 err = -ENOMEM;
560 if (!page) 430 if (!page)
561 goto out; 431 return -ENOMEM;
562 if (unlikely(IS_ERR(page))) { 432 if (unlikely(IS_ERR(page))) {
563 if (PTR_ERR(page) == -ENODATA) { 433 if (PTR_ERR(page) == -ENODATA)
564 /* Hole? No need to truncate */ 434 /* Hole? No need to truncate */
565 return 0; 435 return 0;
566 } else { 436 else
567 err = PTR_ERR(page); 437 return PTR_ERR(page);
568 goto out;
569 }
570 } else 438 } else
571 BUG_ON(!PageUptodate(page)); 439 BUG_ON(!PageUptodate(page));
572 kaddr = kmap_atomic(page, KM_USER0); 440 kaddr = kmap_atomic(page, KM_USER0);
@@ -574,8 +442,6 @@ xip_truncate_page(struct address_space *mapping, loff_t from)
574 kunmap_atomic(kaddr, KM_USER0); 442 kunmap_atomic(kaddr, KM_USER0);
575 443
576 flush_dcache_page(page); 444 flush_dcache_page(page);
577 err = 0; 445 return 0;
578out:
579 return err;
580} 446}
581EXPORT_SYMBOL_GPL(xip_truncate_page); 447EXPORT_SYMBOL_GPL(xip_truncate_page);