diff options
author | Christoph Hellwig <hch@lst.de> | 2008-07-24 00:27:04 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2008-07-24 13:47:14 -0400 |
commit | a969e903a944f69309ee5cc9e7c7b08310d1151e (patch) | |
tree | fa8a26a8e9b870b3c4f9a876070de03e4901c579 /mm | |
parent | 75353bed36cfbbfb55bbde0896bbf5a02d9ba355 (diff) |
kill generic_file_direct_IO()
generic_file_direct_IO is a common helper around the invocation of
->direct_IO. But there's almost nothing shared between the read and write
side, so we're better off without this helper.
[akpm@linux-foundation.org: coding-style fixes]
Signed-off-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r-- | mm/filemap.c | 117 |
1 files changed, 51 insertions, 66 deletions
diff --git a/mm/filemap.c b/mm/filemap.c index 65d9d9e2b755..6343f3c841b7 100644 --- a/mm/filemap.c +++ b/mm/filemap.c | |||
@@ -42,9 +42,6 @@ | |||
42 | 42 | ||
43 | #include <asm/mman.h> | 43 | #include <asm/mman.h> |
44 | 44 | ||
45 | static ssize_t | ||
46 | generic_file_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov, | ||
47 | loff_t offset, unsigned long nr_segs); | ||
48 | 45 | ||
49 | /* | 46 | /* |
50 | * Shared mappings implemented 30.11.1994. It's not fully working yet, | 47 | * Shared mappings implemented 30.11.1994. It's not fully working yet, |
@@ -1205,8 +1202,11 @@ generic_file_aio_read(struct kiocb *iocb, const struct iovec *iov, | |||
1205 | goto out; /* skip atime */ | 1202 | goto out; /* skip atime */ |
1206 | size = i_size_read(inode); | 1203 | size = i_size_read(inode); |
1207 | if (pos < size) { | 1204 | if (pos < size) { |
1208 | retval = generic_file_direct_IO(READ, iocb, | 1205 | retval = filemap_write_and_wait(mapping); |
1209 | iov, pos, nr_segs); | 1206 | if (!retval) { |
1207 | retval = mapping->a_ops->direct_IO(READ, iocb, | ||
1208 | iov, pos, nr_segs); | ||
1209 | } | ||
1210 | if (retval > 0) | 1210 | if (retval > 0) |
1211 | *ppos = pos + retval; | 1211 | *ppos = pos + retval; |
1212 | } | 1212 | } |
@@ -2004,11 +2004,55 @@ generic_file_direct_write(struct kiocb *iocb, const struct iovec *iov, | |||
2004 | struct address_space *mapping = file->f_mapping; | 2004 | struct address_space *mapping = file->f_mapping; |
2005 | struct inode *inode = mapping->host; | 2005 | struct inode *inode = mapping->host; |
2006 | ssize_t written; | 2006 | ssize_t written; |
2007 | size_t write_len; | ||
2008 | pgoff_t end; | ||
2007 | 2009 | ||
2008 | if (count != ocount) | 2010 | if (count != ocount) |
2009 | *nr_segs = iov_shorten((struct iovec *)iov, *nr_segs, count); | 2011 | *nr_segs = iov_shorten((struct iovec *)iov, *nr_segs, count); |
2010 | 2012 | ||
2011 | written = generic_file_direct_IO(WRITE, iocb, iov, pos, *nr_segs); | 2013 | /* |
2014 | * Unmap all mmappings of the file up-front. | ||
2015 | * | ||
2016 | * This will cause any pte dirty bits to be propagated into the | ||
2017 | * pageframes for the subsequent filemap_write_and_wait(). | ||
2018 | */ | ||
2019 | write_len = iov_length(iov, *nr_segs); | ||
2020 | end = (pos + write_len - 1) >> PAGE_CACHE_SHIFT; | ||
2021 | if (mapping_mapped(mapping)) | ||
2022 | unmap_mapping_range(mapping, pos, write_len, 0); | ||
2023 | |||
2024 | written = filemap_write_and_wait(mapping); | ||
2025 | if (written) | ||
2026 | goto out; | ||
2027 | |||
2028 | /* | ||
2029 | * After a write we want buffered reads to be sure to go to disk to get | ||
2030 | * the new data. We invalidate clean cached page from the region we're | ||
2031 | * about to write. We do this *before* the write so that we can return | ||
2032 | * -EIO without clobbering -EIOCBQUEUED from ->direct_IO(). | ||
2033 | */ | ||
2034 | if (mapping->nrpages) { | ||
2035 | written = invalidate_inode_pages2_range(mapping, | ||
2036 | pos >> PAGE_CACHE_SHIFT, end); | ||
2037 | if (written) | ||
2038 | goto out; | ||
2039 | } | ||
2040 | |||
2041 | written = mapping->a_ops->direct_IO(WRITE, iocb, iov, pos, *nr_segs); | ||
2042 | |||
2043 | /* | ||
2044 | * Finally, try again to invalidate clean pages which might have been | ||
2045 | * cached by non-direct readahead, or faulted in by get_user_pages() | ||
2046 | * if the source of the write was an mmap'ed region of the file | ||
2047 | * we're writing. Either one is a pretty crazy thing to do, | ||
2048 | * so we don't support it 100%. If this invalidation | ||
2049 | * fails, tough, the write still worked... | ||
2050 | */ | ||
2051 | if (mapping->nrpages) { | ||
2052 | invalidate_inode_pages2_range(mapping, | ||
2053 | pos >> PAGE_CACHE_SHIFT, end); | ||
2054 | } | ||
2055 | |||
2012 | if (written > 0) { | 2056 | if (written > 0) { |
2013 | loff_t end = pos + written; | 2057 | loff_t end = pos + written; |
2014 | if (end > i_size_read(inode) && !S_ISBLK(inode->i_mode)) { | 2058 | if (end > i_size_read(inode) && !S_ISBLK(inode->i_mode)) { |
@@ -2024,6 +2068,7 @@ generic_file_direct_write(struct kiocb *iocb, const struct iovec *iov, | |||
2024 | * i_mutex is held, which protects generic_osync_inode() from | 2068 | * i_mutex is held, which protects generic_osync_inode() from |
2025 | * livelocking. AIO O_DIRECT ops attempt to sync metadata here. | 2069 | * livelocking. AIO O_DIRECT ops attempt to sync metadata here. |
2026 | */ | 2070 | */ |
2071 | out: | ||
2027 | if ((written >= 0 || written == -EIOCBQUEUED) && | 2072 | if ((written >= 0 || written == -EIOCBQUEUED) && |
2028 | ((file->f_flags & O_SYNC) || IS_SYNC(inode))) { | 2073 | ((file->f_flags & O_SYNC) || IS_SYNC(inode))) { |
2029 | int err = generic_osync_inode(inode, mapping, OSYNC_METADATA); | 2074 | int err = generic_osync_inode(inode, mapping, OSYNC_METADATA); |
@@ -2511,66 +2556,6 @@ ssize_t generic_file_aio_write(struct kiocb *iocb, const struct iovec *iov, | |||
2511 | } | 2556 | } |
2512 | EXPORT_SYMBOL(generic_file_aio_write); | 2557 | EXPORT_SYMBOL(generic_file_aio_write); |
2513 | 2558 | ||
2514 | /* | ||
2515 | * Called under i_mutex for writes to S_ISREG files. Returns -EIO if something | ||
2516 | * went wrong during pagecache shootdown. | ||
2517 | */ | ||
2518 | static ssize_t | ||
2519 | generic_file_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov, | ||
2520 | loff_t offset, unsigned long nr_segs) | ||
2521 | { | ||
2522 | struct file *file = iocb->ki_filp; | ||
2523 | struct address_space *mapping = file->f_mapping; | ||
2524 | ssize_t retval; | ||
2525 | size_t write_len; | ||
2526 | pgoff_t end = 0; /* silence gcc */ | ||
2527 | |||
2528 | /* | ||
2529 | * If it's a write, unmap all mmappings of the file up-front. This | ||
2530 | * will cause any pte dirty bits to be propagated into the pageframes | ||
2531 | * for the subsequent filemap_write_and_wait(). | ||
2532 | */ | ||
2533 | if (rw == WRITE) { | ||
2534 | write_len = iov_length(iov, nr_segs); | ||
2535 | end = (offset + write_len - 1) >> PAGE_CACHE_SHIFT; | ||
2536 | if (mapping_mapped(mapping)) | ||
2537 | unmap_mapping_range(mapping, offset, write_len, 0); | ||
2538 | } | ||
2539 | |||
2540 | retval = filemap_write_and_wait(mapping); | ||
2541 | if (retval) | ||
2542 | goto out; | ||
2543 | |||
2544 | /* | ||
2545 | * After a write we want buffered reads to be sure to go to disk to get | ||
2546 | * the new data. We invalidate clean cached page from the region we're | ||
2547 | * about to write. We do this *before* the write so that we can return | ||
2548 | * -EIO without clobbering -EIOCBQUEUED from ->direct_IO(). | ||
2549 | */ | ||
2550 | if (rw == WRITE && mapping->nrpages) { | ||
2551 | retval = invalidate_inode_pages2_range(mapping, | ||
2552 | offset >> PAGE_CACHE_SHIFT, end); | ||
2553 | if (retval) | ||
2554 | goto out; | ||
2555 | } | ||
2556 | |||
2557 | retval = mapping->a_ops->direct_IO(rw, iocb, iov, offset, nr_segs); | ||
2558 | |||
2559 | /* | ||
2560 | * Finally, try again to invalidate clean pages which might have been | ||
2561 | * cached by non-direct readahead, or faulted in by get_user_pages() | ||
2562 | * if the source of the write was an mmap'ed region of the file | ||
2563 | * we're writing. Either one is a pretty crazy thing to do, | ||
2564 | * so we don't support it 100%. If this invalidation | ||
2565 | * fails, tough, the write still worked... | ||
2566 | */ | ||
2567 | if (rw == WRITE && mapping->nrpages) { | ||
2568 | invalidate_inode_pages2_range(mapping, offset >> PAGE_CACHE_SHIFT, end); | ||
2569 | } | ||
2570 | out: | ||
2571 | return retval; | ||
2572 | } | ||
2573 | |||
2574 | /** | 2559 | /** |
2575 | * try_to_release_page() - release old fs-specific metadata on a page | 2560 | * try_to_release_page() - release old fs-specific metadata on a page |
2576 | * | 2561 | * |