aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorHugh Dickins <hugh@veritas.com>2008-02-05 01:28:44 -0500
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2008-02-05 12:44:15 -0500
commit5402b976ae0be96b3a32f3508ab7308c380d6477 (patch)
treed2c0900086033ea1fdeb3e1540e733d962cd177c /mm
parentd3602444e1e3485890eea5f61366e19a287c00c4 (diff)
shmem_file_write is redundant
With the old aops, writing to a tmpfs file had to use its own special method: the generic method would pass in a fresh page to prepare_write when the right page was there in swapcache - which was inefficient to handle, even once we'd concocted the code to handle it. With the new aops, the generic method uses shmem_write_end, which lets shmem_getpage find the right page: so now abandon shmem_file_write in favour of the generic method. Yes, that does do several things that tmpfs hasn't really needed (notably balance_dirty_pages_ratelimited, which ramfs also calls); but more use of common code is preferable. Signed-off-by: Hugh Dickins <hugh@veritas.com> Cc: Nick Piggin <nickpiggin@yahoo.com.au> Acked-by: Rik van Riel <riel@redhat.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/shmem.c109
1 files changed, 3 insertions, 106 deletions
diff --git a/mm/shmem.c b/mm/shmem.c
index 43d071922b81..5dfe79048f6d 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -1106,7 +1106,7 @@ static int shmem_getpage(struct inode *inode, unsigned long idx,
1106 * Normally, filepage is NULL on entry, and either found 1106 * Normally, filepage is NULL on entry, and either found
1107 * uptodate immediately, or allocated and zeroed, or read 1107 * uptodate immediately, or allocated and zeroed, or read
1108 * in under swappage, which is then assigned to filepage. 1108 * in under swappage, which is then assigned to filepage.
1109 * But shmem_readpage and shmem_write_begin pass in a locked 1109 * But shmem_readpage (required for splice) passes in a locked
1110 * filepage, which may be found not uptodate by other callers 1110 * filepage, which may be found not uptodate by other callers
1111 * too, and may need to be copied from the swappage read in. 1111 * too, and may need to be copied from the swappage read in.
1112 */ 1112 */
@@ -1476,110 +1476,6 @@ shmem_write_end(struct file *file, struct address_space *mapping,
1476 return copied; 1476 return copied;
1477} 1477}
1478 1478
1479static ssize_t
1480shmem_file_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos)
1481{
1482 struct inode *inode = file->f_path.dentry->d_inode;
1483 loff_t pos;
1484 unsigned long written;
1485 ssize_t err;
1486
1487 if ((ssize_t) count < 0)
1488 return -EINVAL;
1489
1490 if (!access_ok(VERIFY_READ, buf, count))
1491 return -EFAULT;
1492
1493 mutex_lock(&inode->i_mutex);
1494
1495 pos = *ppos;
1496 written = 0;
1497
1498 err = generic_write_checks(file, &pos, &count, 0);
1499 if (err || !count)
1500 goto out;
1501
1502 err = remove_suid(file->f_path.dentry);
1503 if (err)
1504 goto out;
1505
1506 inode->i_ctime = inode->i_mtime = CURRENT_TIME;
1507
1508 do {
1509 struct page *page = NULL;
1510 unsigned long bytes, index, offset;
1511 char *kaddr;
1512 int left;
1513
1514 offset = (pos & (PAGE_CACHE_SIZE -1)); /* Within page */
1515 index = pos >> PAGE_CACHE_SHIFT;
1516 bytes = PAGE_CACHE_SIZE - offset;
1517 if (bytes > count)
1518 bytes = count;
1519
1520 /*
1521 * We don't hold page lock across copy from user -
1522 * what would it guard against? - so no deadlock here.
1523 * But it still may be a good idea to prefault below.
1524 */
1525
1526 err = shmem_getpage(inode, index, &page, SGP_WRITE, NULL);
1527 if (err)
1528 break;
1529
1530 unlock_page(page);
1531 left = bytes;
1532 if (PageHighMem(page)) {
1533 volatile unsigned char dummy;
1534 __get_user(dummy, buf);
1535 __get_user(dummy, buf + bytes - 1);
1536
1537 kaddr = kmap_atomic(page, KM_USER0);
1538 left = __copy_from_user_inatomic(kaddr + offset,
1539 buf, bytes);
1540 kunmap_atomic(kaddr, KM_USER0);
1541 }
1542 if (left) {
1543 kaddr = kmap(page);
1544 left = __copy_from_user(kaddr + offset, buf, bytes);
1545 kunmap(page);
1546 }
1547
1548 written += bytes;
1549 count -= bytes;
1550 pos += bytes;
1551 buf += bytes;
1552 if (pos > inode->i_size)
1553 i_size_write(inode, pos);
1554
1555 flush_dcache_page(page);
1556 set_page_dirty(page);
1557 mark_page_accessed(page);
1558 page_cache_release(page);
1559
1560 if (left) {
1561 pos -= left;
1562 written -= left;
1563 err = -EFAULT;
1564 break;
1565 }
1566
1567 /*
1568 * Our dirty pages are not counted in nr_dirty,
1569 * and we do not attempt to balance dirty pages.
1570 */
1571
1572 cond_resched();
1573 } while (count);
1574
1575 *ppos = pos;
1576 if (written)
1577 err = written;
1578out:
1579 mutex_unlock(&inode->i_mutex);
1580 return err;
1581}
1582
1583static void do_shmem_file_read(struct file *filp, loff_t *ppos, read_descriptor_t *desc, read_actor_t actor) 1479static void do_shmem_file_read(struct file *filp, loff_t *ppos, read_descriptor_t *desc, read_actor_t actor)
1584{ 1480{
1585 struct inode *inode = filp->f_path.dentry->d_inode; 1481 struct inode *inode = filp->f_path.dentry->d_inode;
@@ -2354,7 +2250,8 @@ static const struct file_operations shmem_file_operations = {
2354#ifdef CONFIG_TMPFS 2250#ifdef CONFIG_TMPFS
2355 .llseek = generic_file_llseek, 2251 .llseek = generic_file_llseek,
2356 .read = shmem_file_read, 2252 .read = shmem_file_read,
2357 .write = shmem_file_write, 2253 .write = do_sync_write,
2254 .aio_write = generic_file_aio_write,
2358 .fsync = simple_sync_file, 2255 .fsync = simple_sync_file,
2359 .splice_read = generic_file_splice_read, 2256 .splice_read = generic_file_splice_read,
2360 .splice_write = generic_file_splice_write, 2257 .splice_write = generic_file_splice_write,