aboutsummaryrefslogtreecommitdiffstats
path: root/mm/filemap.h
diff options
context:
space:
mode:
authorNick Piggin <npiggin@suse.de>2007-10-16 04:24:58 -0400
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2007-10-16 12:42:54 -0400
commit4a9e5ef1f4f15205e477817a5cefc34bd3f65f55 (patch)
tree51f52086ecbccc3f41955d8d12293ef7a566a05b /mm/filemap.h
parenteb2be189317d031895b5ca534fbf735eb546158b (diff)
mm: write iovec cleanup
Hide some of the open-coded nr_segs tests into the iovec helpers. This is all to simplify generic_file_buffered_write, because that gets more complex in the next patch. Signed-off-by: Nick Piggin <npiggin@suse.de> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/filemap.h')
-rw-r--r--mm/filemap.h100
1 files changed, 50 insertions, 50 deletions
diff --git a/mm/filemap.h b/mm/filemap.h
index a1e10a232e92..b500d936cec5 100644
--- a/mm/filemap.h
+++ b/mm/filemap.h
@@ -22,82 +22,82 @@ __filemap_copy_from_user_iovec_inatomic(char *vaddr,
22 22
23/* 23/*
24 * Copy as much as we can into the page and return the number of bytes which 24 * Copy as much as we can into the page and return the number of bytes which
25 * were sucessfully copied. If a fault is encountered then clear the page 25 * were sucessfully copied. If a fault is encountered then return the number of
26 * out to (offset+bytes) and return the number of bytes which were copied. 26 * bytes which were copied.
27 *
28 * NOTE: For this to work reliably we really want copy_from_user_inatomic_nocache
29 * to *NOT* zero any tail of the buffer that it failed to copy. If it does,
30 * and if the following non-atomic copy succeeds, then there is a small window
31 * where the target page contains neither the data before the write, nor the
32 * data after the write (it contains zero). A read at this time will see
33 * data that is inconsistent with any ordering of the read and the write.
34 * (This has been detected in practice).
35 */ 27 */
36static inline size_t 28static inline size_t
37filemap_copy_from_user(struct page *page, unsigned long offset, 29filemap_copy_from_user_atomic(struct page *page, unsigned long offset,
38 const char __user *buf, unsigned bytes) 30 const struct iovec *iov, unsigned long nr_segs,
31 size_t base, size_t bytes)
39{ 32{
40 char *kaddr; 33 char *kaddr;
41 int left; 34 size_t copied;
42 35
43 kaddr = kmap_atomic(page, KM_USER0); 36 kaddr = kmap_atomic(page, KM_USER0);
44 left = __copy_from_user_inatomic_nocache(kaddr + offset, buf, bytes); 37 if (likely(nr_segs == 1)) {
38 int left;
39 char __user *buf = iov->iov_base + base;
40 left = __copy_from_user_inatomic_nocache(kaddr + offset,
41 buf, bytes);
42 copied = bytes - left;
43 } else {
44 copied = __filemap_copy_from_user_iovec_inatomic(kaddr + offset,
45 iov, base, bytes);
46 }
45 kunmap_atomic(kaddr, KM_USER0); 47 kunmap_atomic(kaddr, KM_USER0);
46 48
47 if (left != 0) { 49 return copied;
48 /* Do it the slow way */
49 kaddr = kmap(page);
50 left = __copy_from_user_nocache(kaddr + offset, buf, bytes);
51 kunmap(page);
52 }
53 return bytes - left;
54} 50}
55 51
56/* 52/*
57 * This has the same sideeffects and return value as filemap_copy_from_user(). 53 * This has the same sideeffects and return value as
58 * The difference is that on a fault we need to memset the remainder of the 54 * filemap_copy_from_user_atomic().
59 * page (out to offset+bytes), to emulate filemap_copy_from_user()'s 55 * The difference is that it attempts to resolve faults.
60 * single-segment behaviour.
61 */ 56 */
62static inline size_t 57static inline size_t
63filemap_copy_from_user_iovec(struct page *page, unsigned long offset, 58filemap_copy_from_user(struct page *page, unsigned long offset,
64 const struct iovec *iov, size_t base, size_t bytes) 59 const struct iovec *iov, unsigned long nr_segs,
60 size_t base, size_t bytes)
65{ 61{
66 char *kaddr; 62 char *kaddr;
67 size_t copied; 63 size_t copied;
68 64
69 kaddr = kmap_atomic(page, KM_USER0); 65 kaddr = kmap(page);
70 copied = __filemap_copy_from_user_iovec_inatomic(kaddr + offset, iov, 66 if (likely(nr_segs == 1)) {
71 base, bytes); 67 int left;
72 kunmap_atomic(kaddr, KM_USER0); 68 char __user *buf = iov->iov_base + base;
73 if (copied != bytes) { 69 left = __copy_from_user_nocache(kaddr + offset, buf, bytes);
74 kaddr = kmap(page); 70 copied = bytes - left;
75 copied = __filemap_copy_from_user_iovec_inatomic(kaddr + offset, iov, 71 } else {
76 base, bytes); 72 copied = __filemap_copy_from_user_iovec_inatomic(kaddr + offset,
77 if (bytes - copied) 73 iov, base, bytes);
78 memset(kaddr + offset + copied, 0, bytes - copied);
79 kunmap(page);
80 } 74 }
75 kunmap(page);
81 return copied; 76 return copied;
82} 77}
83 78
84static inline void 79static inline void
85filemap_set_next_iovec(const struct iovec **iovp, size_t *basep, size_t bytes) 80filemap_set_next_iovec(const struct iovec **iovp, unsigned long nr_segs,
81 size_t *basep, size_t bytes)
86{ 82{
87 const struct iovec *iov = *iovp; 83 if (likely(nr_segs == 1)) {
88 size_t base = *basep; 84 *basep += bytes;
85 } else {
86 const struct iovec *iov = *iovp;
87 size_t base = *basep;
89 88
90 while (bytes) { 89 while (bytes) {
91 int copy = min(bytes, iov->iov_len - base); 90 int copy = min(bytes, iov->iov_len - base);
92 91
93 bytes -= copy; 92 bytes -= copy;
94 base += copy; 93 base += copy;
95 if (iov->iov_len == base) { 94 if (iov->iov_len == base) {
96 iov++; 95 iov++;
97 base = 0; 96 base = 0;
97 }
98 } 98 }
99 *iovp = iov;
100 *basep = base;
99 } 101 }
100 *iovp = iov;
101 *basep = base;
102} 102}
103#endif 103#endif