diff options
| author | Al Viro <viro@zeniv.linux.org.uk> | 2014-02-05 19:11:33 -0500 |
|---|---|---|
| committer | Al Viro <viro@zeniv.linux.org.uk> | 2014-04-01 23:19:30 -0400 |
| commit | 4f18cd317a118c28482f97303600a2fe2ada6c79 (patch) | |
| tree | aecc6dc15451afccd3176321b2e3008962bd3bc4 | |
| parent | 4bafbec7bf60ed56ccbb36a96091bdbd162f075d (diff) | |
take iov_iter stuff to mm/iov_iter.c
Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
| -rw-r--r-- | mm/Makefile | 2 | ||||
| -rw-r--r-- | mm/filemap.c | 221 | ||||
| -rw-r--r-- | mm/iov_iter.c | 224 |
3 files changed, 225 insertions, 222 deletions
diff --git a/mm/Makefile b/mm/Makefile index 310c90a09264..178a43406b0c 100644 --- a/mm/Makefile +++ b/mm/Makefile | |||
| @@ -17,7 +17,7 @@ obj-y := filemap.o mempool.o oom_kill.o fadvise.o \ | |||
| 17 | util.o mmzone.o vmstat.o backing-dev.o \ | 17 | util.o mmzone.o vmstat.o backing-dev.o \ |
| 18 | mm_init.o mmu_context.o percpu.o slab_common.o \ | 18 | mm_init.o mmu_context.o percpu.o slab_common.o \ |
| 19 | compaction.o balloon_compaction.o \ | 19 | compaction.o balloon_compaction.o \ |
| 20 | interval_tree.o list_lru.o $(mmu-y) | 20 | interval_tree.o list_lru.o iov_iter.o $(mmu-y) |
| 21 | 21 | ||
| 22 | obj-y += init-mm.o | 22 | obj-y += init-mm.o |
| 23 | 23 | ||
diff --git a/mm/filemap.c b/mm/filemap.c index a16eb2c4f316..c4730efa5d9e 100644 --- a/mm/filemap.c +++ b/mm/filemap.c | |||
| @@ -1085,84 +1085,6 @@ static void shrink_readahead_size_eio(struct file *filp, | |||
| 1085 | ra->ra_pages /= 4; | 1085 | ra->ra_pages /= 4; |
| 1086 | } | 1086 | } |
| 1087 | 1087 | ||
| 1088 | size_t copy_page_to_iter(struct page *page, size_t offset, size_t bytes, | ||
| 1089 | struct iov_iter *i) | ||
| 1090 | { | ||
| 1091 | size_t skip, copy, left, wanted; | ||
| 1092 | const struct iovec *iov; | ||
| 1093 | char __user *buf; | ||
| 1094 | void *kaddr, *from; | ||
| 1095 | |||
| 1096 | if (unlikely(bytes > i->count)) | ||
| 1097 | bytes = i->count; | ||
| 1098 | |||
| 1099 | if (unlikely(!bytes)) | ||
| 1100 | return 0; | ||
| 1101 | |||
| 1102 | wanted = bytes; | ||
| 1103 | iov = i->iov; | ||
| 1104 | skip = i->iov_offset; | ||
| 1105 | buf = iov->iov_base + skip; | ||
| 1106 | copy = min(bytes, iov->iov_len - skip); | ||
| 1107 | |||
| 1108 | if (!fault_in_pages_writeable(buf, copy)) { | ||
| 1109 | kaddr = kmap_atomic(page); | ||
| 1110 | from = kaddr + offset; | ||
| 1111 | |||
| 1112 | /* first chunk, usually the only one */ | ||
| 1113 | left = __copy_to_user_inatomic(buf, from, copy); | ||
| 1114 | copy -= left; | ||
| 1115 | skip += copy; | ||
| 1116 | from += copy; | ||
| 1117 | bytes -= copy; | ||
| 1118 | |||
| 1119 | while (unlikely(!left && bytes)) { | ||
| 1120 | iov++; | ||
| 1121 | buf = iov->iov_base; | ||
| 1122 | copy = min(bytes, iov->iov_len); | ||
| 1123 | left = __copy_to_user_inatomic(buf, from, copy); | ||
| 1124 | copy -= left; | ||
| 1125 | skip = copy; | ||
| 1126 | from += copy; | ||
| 1127 | bytes -= copy; | ||
| 1128 | } | ||
| 1129 | if (likely(!bytes)) { | ||
| 1130 | kunmap_atomic(kaddr); | ||
| 1131 | goto done; | ||
| 1132 | } | ||
| 1133 | offset = from - kaddr; | ||
| 1134 | buf += copy; | ||
| 1135 | kunmap_atomic(kaddr); | ||
| 1136 | copy = min(bytes, iov->iov_len - skip); | ||
| 1137 | } | ||
| 1138 | /* Too bad - revert to non-atomic kmap */ | ||
| 1139 | kaddr = kmap(page); | ||
| 1140 | from = kaddr + offset; | ||
| 1141 | left = __copy_to_user(buf, from, copy); | ||
| 1142 | copy -= left; | ||
| 1143 | skip += copy; | ||
| 1144 | from += copy; | ||
| 1145 | bytes -= copy; | ||
| 1146 | while (unlikely(!left && bytes)) { | ||
| 1147 | iov++; | ||
| 1148 | buf = iov->iov_base; | ||
| 1149 | copy = min(bytes, iov->iov_len); | ||
| 1150 | left = __copy_to_user(buf, from, copy); | ||
| 1151 | copy -= left; | ||
| 1152 | skip = copy; | ||
| 1153 | from += copy; | ||
| 1154 | bytes -= copy; | ||
| 1155 | } | ||
| 1156 | kunmap(page); | ||
| 1157 | done: | ||
| 1158 | i->count -= wanted - bytes; | ||
| 1159 | i->nr_segs -= iov - i->iov; | ||
| 1160 | i->iov = iov; | ||
| 1161 | i->iov_offset = skip; | ||
| 1162 | return wanted - bytes; | ||
| 1163 | } | ||
| 1164 | EXPORT_SYMBOL(copy_page_to_iter); | ||
| 1165 | |||
| 1166 | /** | 1088 | /** |
| 1167 | * do_generic_file_read - generic file read routine | 1089 | * do_generic_file_read - generic file read routine |
| 1168 | * @filp: the file to read | 1090 | * @filp: the file to read |
| @@ -1957,149 +1879,6 @@ struct page *read_cache_page(struct address_space *mapping, | |||
| 1957 | } | 1879 | } |
| 1958 | EXPORT_SYMBOL(read_cache_page); | 1880 | EXPORT_SYMBOL(read_cache_page); |
| 1959 | 1881 | ||
| 1960 | static size_t __iovec_copy_from_user_inatomic(char *vaddr, | ||
| 1961 | const struct iovec *iov, size_t base, size_t bytes) | ||
| 1962 | { | ||
| 1963 | size_t copied = 0, left = 0; | ||
| 1964 | |||
| 1965 | while (bytes) { | ||
| 1966 | char __user *buf = iov->iov_base + base; | ||
| 1967 | int copy = min(bytes, iov->iov_len - base); | ||
| 1968 | |||
| 1969 | base = 0; | ||
| 1970 | left = __copy_from_user_inatomic(vaddr, buf, copy); | ||
| 1971 | copied += copy; | ||
| 1972 | bytes -= copy; | ||
| 1973 | vaddr += copy; | ||
| 1974 | iov++; | ||
| 1975 | |||
| 1976 | if (unlikely(left)) | ||
| 1977 | break; | ||
| 1978 | } | ||
| 1979 | return copied - left; | ||
| 1980 | } | ||
| 1981 | |||
| 1982 | /* | ||
| 1983 | * Copy as much as we can into the page and return the number of bytes which | ||
| 1984 | * were successfully copied. If a fault is encountered then return the number of | ||
| 1985 | * bytes which were copied. | ||
| 1986 | */ | ||
| 1987 | size_t iov_iter_copy_from_user_atomic(struct page *page, | ||
| 1988 | struct iov_iter *i, unsigned long offset, size_t bytes) | ||
| 1989 | { | ||
| 1990 | char *kaddr; | ||
| 1991 | size_t copied; | ||
| 1992 | |||
| 1993 | kaddr = kmap_atomic(page); | ||
| 1994 | if (likely(i->nr_segs == 1)) { | ||
| 1995 | int left; | ||
| 1996 | char __user *buf = i->iov->iov_base + i->iov_offset; | ||
| 1997 | left = __copy_from_user_inatomic(kaddr + offset, buf, bytes); | ||
| 1998 | copied = bytes - left; | ||
| 1999 | } else { | ||
| 2000 | copied = __iovec_copy_from_user_inatomic(kaddr + offset, | ||
| 2001 | i->iov, i->iov_offset, bytes); | ||
| 2002 | } | ||
| 2003 | kunmap_atomic(kaddr); | ||
| 2004 | |||
| 2005 | return copied; | ||
| 2006 | } | ||
| 2007 | EXPORT_SYMBOL(iov_iter_copy_from_user_atomic); | ||
| 2008 | |||
| 2009 | /* | ||
| 2010 | * This has the same sideeffects and return value as | ||
| 2011 | * iov_iter_copy_from_user_atomic(). | ||
| 2012 | * The difference is that it attempts to resolve faults. | ||
| 2013 | * Page must not be locked. | ||
| 2014 | */ | ||
| 2015 | size_t iov_iter_copy_from_user(struct page *page, | ||
| 2016 | struct iov_iter *i, unsigned long offset, size_t bytes) | ||
| 2017 | { | ||
| 2018 | char *kaddr; | ||
| 2019 | size_t copied; | ||
| 2020 | |||
| 2021 | kaddr = kmap(page); | ||
| 2022 | if (likely(i->nr_segs == 1)) { | ||
| 2023 | int left; | ||
| 2024 | char __user *buf = i->iov->iov_base + i->iov_offset; | ||
| 2025 | left = __copy_from_user(kaddr + offset, buf, bytes); | ||
| 2026 | copied = bytes - left; | ||
| 2027 | } else { | ||
| 2028 | copied = __iovec_copy_from_user_inatomic(kaddr + offset, | ||
| 2029 | i->iov, i->iov_offset, bytes); | ||
| 2030 | } | ||
| 2031 | kunmap(page); | ||
| 2032 | return copied; | ||
| 2033 | } | ||
| 2034 | EXPORT_SYMBOL(iov_iter_copy_from_user); | ||
| 2035 | |||
| 2036 | void iov_iter_advance(struct iov_iter *i, size_t bytes) | ||
| 2037 | { | ||
| 2038 | BUG_ON(i->count < bytes); | ||
| 2039 | |||
| 2040 | if (likely(i->nr_segs == 1)) { | ||
| 2041 | i->iov_offset += bytes; | ||
| 2042 | i->count -= bytes; | ||
| 2043 | } else { | ||
| 2044 | const struct iovec *iov = i->iov; | ||
| 2045 | size_t base = i->iov_offset; | ||
| 2046 | unsigned long nr_segs = i->nr_segs; | ||
| 2047 | |||
| 2048 | /* | ||
| 2049 | * The !iov->iov_len check ensures we skip over unlikely | ||
| 2050 | * zero-length segments (without overruning the iovec). | ||
| 2051 | */ | ||
| 2052 | while (bytes || unlikely(i->count && !iov->iov_len)) { | ||
| 2053 | int copy; | ||
| 2054 | |||
| 2055 | copy = min(bytes, iov->iov_len - base); | ||
| 2056 | BUG_ON(!i->count || i->count < copy); | ||
| 2057 | i->count -= copy; | ||
| 2058 | bytes -= copy; | ||
| 2059 | base += copy; | ||
| 2060 | if (iov->iov_len == base) { | ||
| 2061 | iov++; | ||
| 2062 | nr_segs--; | ||
| 2063 | base = 0; | ||
| 2064 | } | ||
| 2065 | } | ||
| 2066 | i->iov = iov; | ||
| 2067 | i->iov_offset = base; | ||
| 2068 | i->nr_segs = nr_segs; | ||
| 2069 | } | ||
| 2070 | } | ||
| 2071 | EXPORT_SYMBOL(iov_iter_advance); | ||
| 2072 | |||
| 2073 | /* | ||
| 2074 | * Fault in the first iovec of the given iov_iter, to a maximum length | ||
| 2075 | * of bytes. Returns 0 on success, or non-zero if the memory could not be | ||
| 2076 | * accessed (ie. because it is an invalid address). | ||
| 2077 | * | ||
| 2078 | * writev-intensive code may want this to prefault several iovecs -- that | ||
| 2079 | * would be possible (callers must not rely on the fact that _only_ the | ||
| 2080 | * first iovec will be faulted with the current implementation). | ||
| 2081 | */ | ||
| 2082 | int iov_iter_fault_in_readable(struct iov_iter *i, size_t bytes) | ||
| 2083 | { | ||
| 2084 | char __user *buf = i->iov->iov_base + i->iov_offset; | ||
| 2085 | bytes = min(bytes, i->iov->iov_len - i->iov_offset); | ||
| 2086 | return fault_in_pages_readable(buf, bytes); | ||
| 2087 | } | ||
| 2088 | EXPORT_SYMBOL(iov_iter_fault_in_readable); | ||
| 2089 | |||
| 2090 | /* | ||
| 2091 | * Return the count of just the current iov_iter segment. | ||
| 2092 | */ | ||
| 2093 | size_t iov_iter_single_seg_count(const struct iov_iter *i) | ||
| 2094 | { | ||
| 2095 | const struct iovec *iov = i->iov; | ||
| 2096 | if (i->nr_segs == 1) | ||
| 2097 | return i->count; | ||
| 2098 | else | ||
| 2099 | return min(i->count, iov->iov_len - i->iov_offset); | ||
| 2100 | } | ||
| 2101 | EXPORT_SYMBOL(iov_iter_single_seg_count); | ||
| 2102 | |||
| 2103 | /* | 1882 | /* |
| 2104 | * Performs necessary checks before doing a write | 1883 | * Performs necessary checks before doing a write |
| 2105 | * | 1884 | * |
diff --git a/mm/iov_iter.c b/mm/iov_iter.c new file mode 100644 index 000000000000..10e46cd721de --- /dev/null +++ b/mm/iov_iter.c | |||
| @@ -0,0 +1,224 @@ | |||
| 1 | #include <linux/export.h> | ||
| 2 | #include <linux/uio.h> | ||
| 3 | #include <linux/pagemap.h> | ||
| 4 | |||
| 5 | size_t copy_page_to_iter(struct page *page, size_t offset, size_t bytes, | ||
| 6 | struct iov_iter *i) | ||
| 7 | { | ||
| 8 | size_t skip, copy, left, wanted; | ||
| 9 | const struct iovec *iov; | ||
| 10 | char __user *buf; | ||
| 11 | void *kaddr, *from; | ||
| 12 | |||
| 13 | if (unlikely(bytes > i->count)) | ||
| 14 | bytes = i->count; | ||
| 15 | |||
| 16 | if (unlikely(!bytes)) | ||
| 17 | return 0; | ||
| 18 | |||
| 19 | wanted = bytes; | ||
| 20 | iov = i->iov; | ||
| 21 | skip = i->iov_offset; | ||
| 22 | buf = iov->iov_base + skip; | ||
| 23 | copy = min(bytes, iov->iov_len - skip); | ||
| 24 | |||
| 25 | if (!fault_in_pages_writeable(buf, copy)) { | ||
| 26 | kaddr = kmap_atomic(page); | ||
| 27 | from = kaddr + offset; | ||
| 28 | |||
| 29 | /* first chunk, usually the only one */ | ||
| 30 | left = __copy_to_user_inatomic(buf, from, copy); | ||
| 31 | copy -= left; | ||
| 32 | skip += copy; | ||
| 33 | from += copy; | ||
| 34 | bytes -= copy; | ||
| 35 | |||
| 36 | while (unlikely(!left && bytes)) { | ||
| 37 | iov++; | ||
| 38 | buf = iov->iov_base; | ||
| 39 | copy = min(bytes, iov->iov_len); | ||
| 40 | left = __copy_to_user_inatomic(buf, from, copy); | ||
| 41 | copy -= left; | ||
| 42 | skip = copy; | ||
| 43 | from += copy; | ||
| 44 | bytes -= copy; | ||
| 45 | } | ||
| 46 | if (likely(!bytes)) { | ||
| 47 | kunmap_atomic(kaddr); | ||
| 48 | goto done; | ||
| 49 | } | ||
| 50 | offset = from - kaddr; | ||
| 51 | buf += copy; | ||
| 52 | kunmap_atomic(kaddr); | ||
| 53 | copy = min(bytes, iov->iov_len - skip); | ||
| 54 | } | ||
| 55 | /* Too bad - revert to non-atomic kmap */ | ||
| 56 | kaddr = kmap(page); | ||
| 57 | from = kaddr + offset; | ||
| 58 | left = __copy_to_user(buf, from, copy); | ||
| 59 | copy -= left; | ||
| 60 | skip += copy; | ||
| 61 | from += copy; | ||
| 62 | bytes -= copy; | ||
| 63 | while (unlikely(!left && bytes)) { | ||
| 64 | iov++; | ||
| 65 | buf = iov->iov_base; | ||
| 66 | copy = min(bytes, iov->iov_len); | ||
| 67 | left = __copy_to_user(buf, from, copy); | ||
| 68 | copy -= left; | ||
| 69 | skip = copy; | ||
| 70 | from += copy; | ||
| 71 | bytes -= copy; | ||
| 72 | } | ||
| 73 | kunmap(page); | ||
| 74 | done: | ||
| 75 | i->count -= wanted - bytes; | ||
| 76 | i->nr_segs -= iov - i->iov; | ||
| 77 | i->iov = iov; | ||
| 78 | i->iov_offset = skip; | ||
| 79 | return wanted - bytes; | ||
| 80 | } | ||
| 81 | EXPORT_SYMBOL(copy_page_to_iter); | ||
| 82 | |||
| 83 | static size_t __iovec_copy_from_user_inatomic(char *vaddr, | ||
| 84 | const struct iovec *iov, size_t base, size_t bytes) | ||
| 85 | { | ||
| 86 | size_t copied = 0, left = 0; | ||
| 87 | |||
| 88 | while (bytes) { | ||
| 89 | char __user *buf = iov->iov_base + base; | ||
| 90 | int copy = min(bytes, iov->iov_len - base); | ||
| 91 | |||
| 92 | base = 0; | ||
| 93 | left = __copy_from_user_inatomic(vaddr, buf, copy); | ||
| 94 | copied += copy; | ||
| 95 | bytes -= copy; | ||
| 96 | vaddr += copy; | ||
| 97 | iov++; | ||
| 98 | |||
| 99 | if (unlikely(left)) | ||
| 100 | break; | ||
| 101 | } | ||
| 102 | return copied - left; | ||
| 103 | } | ||
| 104 | |||
| 105 | /* | ||
| 106 | * Copy as much as we can into the page and return the number of bytes which | ||
| 107 | * were successfully copied. If a fault is encountered then return the number of | ||
| 108 | * bytes which were copied. | ||
| 109 | */ | ||
| 110 | size_t iov_iter_copy_from_user_atomic(struct page *page, | ||
| 111 | struct iov_iter *i, unsigned long offset, size_t bytes) | ||
| 112 | { | ||
| 113 | char *kaddr; | ||
| 114 | size_t copied; | ||
| 115 | |||
| 116 | kaddr = kmap_atomic(page); | ||
| 117 | if (likely(i->nr_segs == 1)) { | ||
| 118 | int left; | ||
| 119 | char __user *buf = i->iov->iov_base + i->iov_offset; | ||
| 120 | left = __copy_from_user_inatomic(kaddr + offset, buf, bytes); | ||
| 121 | copied = bytes - left; | ||
| 122 | } else { | ||
| 123 | copied = __iovec_copy_from_user_inatomic(kaddr + offset, | ||
| 124 | i->iov, i->iov_offset, bytes); | ||
| 125 | } | ||
| 126 | kunmap_atomic(kaddr); | ||
| 127 | |||
| 128 | return copied; | ||
| 129 | } | ||
| 130 | EXPORT_SYMBOL(iov_iter_copy_from_user_atomic); | ||
| 131 | |||
| 132 | /* | ||
| 133 | * This has the same sideeffects and return value as | ||
| 134 | * iov_iter_copy_from_user_atomic(). | ||
| 135 | * The difference is that it attempts to resolve faults. | ||
| 136 | * Page must not be locked. | ||
| 137 | */ | ||
| 138 | size_t iov_iter_copy_from_user(struct page *page, | ||
| 139 | struct iov_iter *i, unsigned long offset, size_t bytes) | ||
| 140 | { | ||
| 141 | char *kaddr; | ||
| 142 | size_t copied; | ||
| 143 | |||
| 144 | kaddr = kmap(page); | ||
| 145 | if (likely(i->nr_segs == 1)) { | ||
| 146 | int left; | ||
| 147 | char __user *buf = i->iov->iov_base + i->iov_offset; | ||
| 148 | left = __copy_from_user(kaddr + offset, buf, bytes); | ||
| 149 | copied = bytes - left; | ||
| 150 | } else { | ||
| 151 | copied = __iovec_copy_from_user_inatomic(kaddr + offset, | ||
| 152 | i->iov, i->iov_offset, bytes); | ||
| 153 | } | ||
| 154 | kunmap(page); | ||
| 155 | return copied; | ||
| 156 | } | ||
| 157 | EXPORT_SYMBOL(iov_iter_copy_from_user); | ||
| 158 | |||
| 159 | void iov_iter_advance(struct iov_iter *i, size_t bytes) | ||
| 160 | { | ||
| 161 | BUG_ON(i->count < bytes); | ||
| 162 | |||
| 163 | if (likely(i->nr_segs == 1)) { | ||
| 164 | i->iov_offset += bytes; | ||
| 165 | i->count -= bytes; | ||
| 166 | } else { | ||
| 167 | const struct iovec *iov = i->iov; | ||
| 168 | size_t base = i->iov_offset; | ||
| 169 | unsigned long nr_segs = i->nr_segs; | ||
| 170 | |||
| 171 | /* | ||
| 172 | * The !iov->iov_len check ensures we skip over unlikely | ||
| 173 | * zero-length segments (without overruning the iovec). | ||
| 174 | */ | ||
| 175 | while (bytes || unlikely(i->count && !iov->iov_len)) { | ||
| 176 | int copy; | ||
| 177 | |||
| 178 | copy = min(bytes, iov->iov_len - base); | ||
| 179 | BUG_ON(!i->count || i->count < copy); | ||
| 180 | i->count -= copy; | ||
| 181 | bytes -= copy; | ||
| 182 | base += copy; | ||
| 183 | if (iov->iov_len == base) { | ||
| 184 | iov++; | ||
| 185 | nr_segs--; | ||
| 186 | base = 0; | ||
| 187 | } | ||
| 188 | } | ||
| 189 | i->iov = iov; | ||
| 190 | i->iov_offset = base; | ||
| 191 | i->nr_segs = nr_segs; | ||
| 192 | } | ||
| 193 | } | ||
| 194 | EXPORT_SYMBOL(iov_iter_advance); | ||
| 195 | |||
| 196 | /* | ||
| 197 | * Fault in the first iovec of the given iov_iter, to a maximum length | ||
| 198 | * of bytes. Returns 0 on success, or non-zero if the memory could not be | ||
| 199 | * accessed (ie. because it is an invalid address). | ||
| 200 | * | ||
| 201 | * writev-intensive code may want this to prefault several iovecs -- that | ||
| 202 | * would be possible (callers must not rely on the fact that _only_ the | ||
| 203 | * first iovec will be faulted with the current implementation). | ||
| 204 | */ | ||
| 205 | int iov_iter_fault_in_readable(struct iov_iter *i, size_t bytes) | ||
| 206 | { | ||
| 207 | char __user *buf = i->iov->iov_base + i->iov_offset; | ||
| 208 | bytes = min(bytes, i->iov->iov_len - i->iov_offset); | ||
| 209 | return fault_in_pages_readable(buf, bytes); | ||
| 210 | } | ||
| 211 | EXPORT_SYMBOL(iov_iter_fault_in_readable); | ||
| 212 | |||
| 213 | /* | ||
| 214 | * Return the count of just the current iov_iter segment. | ||
| 215 | */ | ||
| 216 | size_t iov_iter_single_seg_count(const struct iov_iter *i) | ||
| 217 | { | ||
| 218 | const struct iovec *iov = i->iov; | ||
| 219 | if (i->nr_segs == 1) | ||
| 220 | return i->count; | ||
| 221 | else | ||
| 222 | return min(i->count, iov->iov_len - i->iov_offset); | ||
| 223 | } | ||
| 224 | EXPORT_SYMBOL(iov_iter_single_seg_count); | ||
