aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorNick Piggin <npiggin@suse.de>2007-10-16 04:24:58 -0400
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2007-10-16 12:42:54 -0400
commit4a9e5ef1f4f15205e477817a5cefc34bd3f65f55 (patch)
tree51f52086ecbccc3f41955d8d12293ef7a566a05b /mm
parenteb2be189317d031895b5ca534fbf735eb546158b (diff)
mm: write iovec cleanup
Hide some of the open-coded nr_segs tests into the iovec helpers. This is all to simplify generic_file_buffered_write, because that gets more complex in the next patch. Signed-off-by: Nick Piggin <npiggin@suse.de> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/filemap.c36
-rw-r--r--mm/filemap.h100
-rw-r--r--mm/filemap_xip.c17
3 files changed, 67 insertions, 86 deletions
diff --git a/mm/filemap.c b/mm/filemap.c
index fb4c1c0792e7..c59d5b3cd99a 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -1823,12 +1823,7 @@ generic_file_buffered_write(struct kiocb *iocb, const struct iovec *iov,
1823 /* 1823 /*
1824 * handle partial DIO write. Adjust cur_iov if needed. 1824 * handle partial DIO write. Adjust cur_iov if needed.
1825 */ 1825 */
1826 if (likely(nr_segs == 1)) 1826 filemap_set_next_iovec(&cur_iov, nr_segs, &iov_offset, written);
1827 buf = iov->iov_base + written;
1828 else {
1829 filemap_set_next_iovec(&cur_iov, &iov_offset, written);
1830 buf = cur_iov->iov_base + iov_offset;
1831 }
1832 1827
1833 do { 1828 do {
1834 struct page *page; 1829 struct page *page;
@@ -1838,6 +1833,7 @@ generic_file_buffered_write(struct kiocb *iocb, const struct iovec *iov,
1838 size_t bytes; /* Bytes to write to page */ 1833 size_t bytes; /* Bytes to write to page */
1839 size_t copied; /* Bytes copied from user */ 1834 size_t copied; /* Bytes copied from user */
1840 1835
1836 buf = cur_iov->iov_base + iov_offset;
1841 offset = (pos & (PAGE_CACHE_SIZE - 1)); 1837 offset = (pos & (PAGE_CACHE_SIZE - 1));
1842 index = pos >> PAGE_CACHE_SHIFT; 1838 index = pos >> PAGE_CACHE_SHIFT;
1843 bytes = PAGE_CACHE_SIZE - offset; 1839 bytes = PAGE_CACHE_SIZE - offset;
@@ -1869,13 +1865,10 @@ generic_file_buffered_write(struct kiocb *iocb, const struct iovec *iov,
1869 if (unlikely(status)) 1865 if (unlikely(status))
1870 goto fs_write_aop_error; 1866 goto fs_write_aop_error;
1871 1867
1872 if (likely(nr_segs == 1)) 1868 copied = filemap_copy_from_user(page, offset,
1873 copied = filemap_copy_from_user(page, offset, 1869 cur_iov, nr_segs, iov_offset, bytes);
1874 buf, bytes);
1875 else
1876 copied = filemap_copy_from_user_iovec(page, offset,
1877 cur_iov, iov_offset, bytes);
1878 flush_dcache_page(page); 1870 flush_dcache_page(page);
1871
1879 status = a_ops->commit_write(file, page, offset, offset+bytes); 1872 status = a_ops->commit_write(file, page, offset, offset+bytes);
1880 if (unlikely(status < 0 || status == AOP_TRUNCATED_PAGE)) 1873 if (unlikely(status < 0 || status == AOP_TRUNCATED_PAGE))
1881 goto fs_write_aop_error; 1874 goto fs_write_aop_error;
@@ -1886,20 +1879,11 @@ generic_file_buffered_write(struct kiocb *iocb, const struct iovec *iov,
1886 if (unlikely(status > 0)) /* filesystem did partial write */ 1879 if (unlikely(status > 0)) /* filesystem did partial write */
1887 copied = status; 1880 copied = status;
1888 1881
1889 if (likely(copied > 0)) { 1882 written += copied;
1890 written += copied; 1883 count -= copied;
1891 count -= copied; 1884 pos += copied;
1892 pos += copied; 1885 filemap_set_next_iovec(&cur_iov, nr_segs, &iov_offset, copied);
1893 buf += copied; 1886
1894 if (unlikely(nr_segs > 1)) {
1895 filemap_set_next_iovec(&cur_iov,
1896 &iov_offset, copied);
1897 if (count)
1898 buf = cur_iov->iov_base + iov_offset;
1899 } else {
1900 iov_offset += copied;
1901 }
1902 }
1903 unlock_page(page); 1887 unlock_page(page);
1904 mark_page_accessed(page); 1888 mark_page_accessed(page);
1905 page_cache_release(page); 1889 page_cache_release(page);
diff --git a/mm/filemap.h b/mm/filemap.h
index a1e10a232e92..b500d936cec5 100644
--- a/mm/filemap.h
+++ b/mm/filemap.h
@@ -22,82 +22,82 @@ __filemap_copy_from_user_iovec_inatomic(char *vaddr,
22 22
23/* 23/*
24 * Copy as much as we can into the page and return the number of bytes which 24 * Copy as much as we can into the page and return the number of bytes which
25 * were sucessfully copied. If a fault is encountered then clear the page 25 * were sucessfully copied. If a fault is encountered then return the number of
26 * out to (offset+bytes) and return the number of bytes which were copied. 26 * bytes which were copied.
27 *
28 * NOTE: For this to work reliably we really want copy_from_user_inatomic_nocache
29 * to *NOT* zero any tail of the buffer that it failed to copy. If it does,
30 * and if the following non-atomic copy succeeds, then there is a small window
31 * where the target page contains neither the data before the write, nor the
32 * data after the write (it contains zero). A read at this time will see
33 * data that is inconsistent with any ordering of the read and the write.
34 * (This has been detected in practice).
35 */ 27 */
36static inline size_t 28static inline size_t
37filemap_copy_from_user(struct page *page, unsigned long offset, 29filemap_copy_from_user_atomic(struct page *page, unsigned long offset,
38 const char __user *buf, unsigned bytes) 30 const struct iovec *iov, unsigned long nr_segs,
31 size_t base, size_t bytes)
39{ 32{
40 char *kaddr; 33 char *kaddr;
41 int left; 34 size_t copied;
42 35
43 kaddr = kmap_atomic(page, KM_USER0); 36 kaddr = kmap_atomic(page, KM_USER0);
44 left = __copy_from_user_inatomic_nocache(kaddr + offset, buf, bytes); 37 if (likely(nr_segs == 1)) {
38 int left;
39 char __user *buf = iov->iov_base + base;
40 left = __copy_from_user_inatomic_nocache(kaddr + offset,
41 buf, bytes);
42 copied = bytes - left;
43 } else {
44 copied = __filemap_copy_from_user_iovec_inatomic(kaddr + offset,
45 iov, base, bytes);
46 }
45 kunmap_atomic(kaddr, KM_USER0); 47 kunmap_atomic(kaddr, KM_USER0);
46 48
47 if (left != 0) { 49 return copied;
48 /* Do it the slow way */
49 kaddr = kmap(page);
50 left = __copy_from_user_nocache(kaddr + offset, buf, bytes);
51 kunmap(page);
52 }
53 return bytes - left;
54} 50}
55 51
56/* 52/*
57 * This has the same sideeffects and return value as filemap_copy_from_user(). 53 * This has the same sideeffects and return value as
58 * The difference is that on a fault we need to memset the remainder of the 54 * filemap_copy_from_user_atomic().
59 * page (out to offset+bytes), to emulate filemap_copy_from_user()'s 55 * The difference is that it attempts to resolve faults.
60 * single-segment behaviour.
61 */ 56 */
62static inline size_t 57static inline size_t
63filemap_copy_from_user_iovec(struct page *page, unsigned long offset, 58filemap_copy_from_user(struct page *page, unsigned long offset,
64 const struct iovec *iov, size_t base, size_t bytes) 59 const struct iovec *iov, unsigned long nr_segs,
60 size_t base, size_t bytes)
65{ 61{
66 char *kaddr; 62 char *kaddr;
67 size_t copied; 63 size_t copied;
68 64
69 kaddr = kmap_atomic(page, KM_USER0); 65 kaddr = kmap(page);
70 copied = __filemap_copy_from_user_iovec_inatomic(kaddr + offset, iov, 66 if (likely(nr_segs == 1)) {
71 base, bytes); 67 int left;
72 kunmap_atomic(kaddr, KM_USER0); 68 char __user *buf = iov->iov_base + base;
73 if (copied != bytes) { 69 left = __copy_from_user_nocache(kaddr + offset, buf, bytes);
74 kaddr = kmap(page); 70 copied = bytes - left;
75 copied = __filemap_copy_from_user_iovec_inatomic(kaddr + offset, iov, 71 } else {
76 base, bytes); 72 copied = __filemap_copy_from_user_iovec_inatomic(kaddr + offset,
77 if (bytes - copied) 73 iov, base, bytes);
78 memset(kaddr + offset + copied, 0, bytes - copied);
79 kunmap(page);
80 } 74 }
75 kunmap(page);
81 return copied; 76 return copied;
82} 77}
83 78
84static inline void 79static inline void
85filemap_set_next_iovec(const struct iovec **iovp, size_t *basep, size_t bytes) 80filemap_set_next_iovec(const struct iovec **iovp, unsigned long nr_segs,
81 size_t *basep, size_t bytes)
86{ 82{
87 const struct iovec *iov = *iovp; 83 if (likely(nr_segs == 1)) {
88 size_t base = *basep; 84 *basep += bytes;
85 } else {
86 const struct iovec *iov = *iovp;
87 size_t base = *basep;
89 88
90 while (bytes) { 89 while (bytes) {
91 int copy = min(bytes, iov->iov_len - base); 90 int copy = min(bytes, iov->iov_len - base);
92 91
93 bytes -= copy; 92 bytes -= copy;
94 base += copy; 93 base += copy;
95 if (iov->iov_len == base) { 94 if (iov->iov_len == base) {
96 iov++; 95 iov++;
97 base = 0; 96 base = 0;
97 }
98 } 98 }
99 *iovp = iov;
100 *basep = base;
99 } 101 }
100 *iovp = iov;
101 *basep = base;
102} 102}
103#endif 103#endif
diff --git a/mm/filemap_xip.c b/mm/filemap_xip.c
index 53ee6a299635..32132f3cd641 100644
--- a/mm/filemap_xip.c
+++ b/mm/filemap_xip.c
@@ -15,7 +15,6 @@
15#include <linux/rmap.h> 15#include <linux/rmap.h>
16#include <linux/sched.h> 16#include <linux/sched.h>
17#include <asm/tlbflush.h> 17#include <asm/tlbflush.h>
18#include "filemap.h"
19 18
20/* 19/*
21 * We do use our own empty page to avoid interference with other users 20 * We do use our own empty page to avoid interference with other users
@@ -288,6 +287,7 @@ __xip_file_write(struct file *filp, const char __user *buf,
288 unsigned long index; 287 unsigned long index;
289 unsigned long offset; 288 unsigned long offset;
290 size_t copied; 289 size_t copied;
290 char *kaddr;
291 291
292 offset = (pos & (PAGE_CACHE_SIZE -1)); /* Within page */ 292 offset = (pos & (PAGE_CACHE_SIZE -1)); /* Within page */
293 index = pos >> PAGE_CACHE_SHIFT; 293 index = pos >> PAGE_CACHE_SHIFT;
@@ -295,14 +295,6 @@ __xip_file_write(struct file *filp, const char __user *buf,
295 if (bytes > count) 295 if (bytes > count)
296 bytes = count; 296 bytes = count;
297 297
298 /*
299 * Bring in the user page that we will copy from _first_.
300 * Otherwise there's a nasty deadlock on copying from the
301 * same page as we're writing to, without it being marked
302 * up-to-date.
303 */
304 fault_in_pages_readable(buf, bytes);
305
306 page = a_ops->get_xip_page(mapping, 298 page = a_ops->get_xip_page(mapping,
307 index*(PAGE_SIZE/512), 0); 299 index*(PAGE_SIZE/512), 0);
308 if (IS_ERR(page) && (PTR_ERR(page) == -ENODATA)) { 300 if (IS_ERR(page) && (PTR_ERR(page) == -ENODATA)) {
@@ -319,8 +311,13 @@ __xip_file_write(struct file *filp, const char __user *buf,
319 break; 311 break;
320 } 312 }
321 313
322 copied = filemap_copy_from_user(page, offset, buf, bytes); 314 fault_in_pages_readable(buf, bytes);
315 kaddr = kmap_atomic(page, KM_USER0);
316 copied = bytes -
317 __copy_from_user_inatomic_nocache(kaddr, buf, bytes);
318 kunmap_atomic(kaddr, KM_USER0);
323 flush_dcache_page(page); 319 flush_dcache_page(page);
320
324 if (likely(copied > 0)) { 321 if (likely(copied > 0)) {
325 status = copied; 322 status = copied;
326 323