diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2015-04-15 16:22:56 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2015-04-15 16:22:56 -0400 |
commit | fa927894bbb4a4c7669c72bad1924991022fda38 (patch) | |
tree | 93560f1a096973235fe9ff50c436f5239c1c499a /fs/hugetlbfs | |
parent | c841e12add6926d64aa608687893465330b5a03e (diff) | |
parent | 8436318205b9f29e45db88850ec60e326327e241 (diff) |
Merge branch 'for-linus-2' of git://git.kernel.org/pub/scm/linux/kernel/git/viro/vfs
Pull second vfs update from Al Viro:
"Now that net-next went in... Here's the next big chunk - killing
->aio_read() and ->aio_write().
There'll be one more pile today (direct_IO changes and
generic_write_checks() cleanups/fixes), but I'd prefer to keep that
one separate"
* 'for-linus-2' of git://git.kernel.org/pub/scm/linux/kernel/git/viro/vfs: (37 commits)
->aio_read and ->aio_write removed
pcm: another weird API abuse
infinibad: weird APIs switched to ->write_iter()
kill do_sync_read/do_sync_write
fuse: use iov_iter_get_pages() for non-splice path
fuse: switch to ->read_iter/->write_iter
switch drivers/char/mem.c to ->read_iter/->write_iter
make new_sync_{read,write}() static
coredump: accept any write method
switch /dev/loop to vfs_iter_write()
serial2002: switch to __vfs_read/__vfs_write
ashmem: use __vfs_read()
export __vfs_read()
autofs: switch to __vfs_write()
new helper: __vfs_write()
switch hugetlbfs to ->read_iter()
coda: switch to ->read_iter/->write_iter
ncpfs: switch to ->read_iter/->write_iter
net/9p: remove (now-)unused helpers
p9_client_attach(): set fid->uid correctly
...
Diffstat (limited to 'fs/hugetlbfs')
-rw-r--r-- | fs/hugetlbfs/inode.c | 91 |
1 files changed, 33 insertions, 58 deletions
diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c index db76cec3ce21..45e34908bdb5 100644 --- a/fs/hugetlbfs/inode.c +++ b/fs/hugetlbfs/inode.c | |||
@@ -34,6 +34,7 @@ | |||
34 | #include <linux/security.h> | 34 | #include <linux/security.h> |
35 | #include <linux/magic.h> | 35 | #include <linux/magic.h> |
36 | #include <linux/migrate.h> | 36 | #include <linux/migrate.h> |
37 | #include <linux/uio.h> | ||
37 | 38 | ||
38 | #include <asm/uaccess.h> | 39 | #include <asm/uaccess.h> |
39 | 40 | ||
@@ -179,42 +180,33 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr, | |||
179 | } | 180 | } |
180 | #endif | 181 | #endif |
181 | 182 | ||
182 | static int | 183 | static size_t |
183 | hugetlbfs_read_actor(struct page *page, unsigned long offset, | 184 | hugetlbfs_read_actor(struct page *page, unsigned long offset, |
184 | char __user *buf, unsigned long count, | 185 | struct iov_iter *to, unsigned long size) |
185 | unsigned long size) | ||
186 | { | 186 | { |
187 | char *kaddr; | 187 | size_t copied = 0; |
188 | unsigned long left, copied = 0; | ||
189 | int i, chunksize; | 188 | int i, chunksize; |
190 | 189 | ||
191 | if (size > count) | ||
192 | size = count; | ||
193 | |||
194 | /* Find which 4k chunk and offset with in that chunk */ | 190 | /* Find which 4k chunk and offset with in that chunk */ |
195 | i = offset >> PAGE_CACHE_SHIFT; | 191 | i = offset >> PAGE_CACHE_SHIFT; |
196 | offset = offset & ~PAGE_CACHE_MASK; | 192 | offset = offset & ~PAGE_CACHE_MASK; |
197 | 193 | ||
198 | while (size) { | 194 | while (size) { |
195 | size_t n; | ||
199 | chunksize = PAGE_CACHE_SIZE; | 196 | chunksize = PAGE_CACHE_SIZE; |
200 | if (offset) | 197 | if (offset) |
201 | chunksize -= offset; | 198 | chunksize -= offset; |
202 | if (chunksize > size) | 199 | if (chunksize > size) |
203 | chunksize = size; | 200 | chunksize = size; |
204 | kaddr = kmap(&page[i]); | 201 | n = copy_page_to_iter(&page[i], offset, chunksize, to); |
205 | left = __copy_to_user(buf, kaddr + offset, chunksize); | 202 | copied += n; |
206 | kunmap(&page[i]); | 203 | if (n != chunksize) |
207 | if (left) { | 204 | return copied; |
208 | copied += (chunksize - left); | ||
209 | break; | ||
210 | } | ||
211 | offset = 0; | 205 | offset = 0; |
212 | size -= chunksize; | 206 | size -= chunksize; |
213 | buf += chunksize; | ||
214 | copied += chunksize; | ||
215 | i++; | 207 | i++; |
216 | } | 208 | } |
217 | return copied ? copied : -EFAULT; | 209 | return copied; |
218 | } | 210 | } |
219 | 211 | ||
220 | /* | 212 | /* |
@@ -222,39 +214,34 @@ hugetlbfs_read_actor(struct page *page, unsigned long offset, | |||
222 | * data. Its *very* similar to do_generic_mapping_read(), we can't use that | 214 | * data. Its *very* similar to do_generic_mapping_read(), we can't use that |
223 | * since it has PAGE_CACHE_SIZE assumptions. | 215 | * since it has PAGE_CACHE_SIZE assumptions. |
224 | */ | 216 | */ |
225 | static ssize_t hugetlbfs_read(struct file *filp, char __user *buf, | 217 | static ssize_t hugetlbfs_read_iter(struct kiocb *iocb, struct iov_iter *to) |
226 | size_t len, loff_t *ppos) | ||
227 | { | 218 | { |
228 | struct hstate *h = hstate_file(filp); | 219 | struct file *file = iocb->ki_filp; |
229 | struct address_space *mapping = filp->f_mapping; | 220 | struct hstate *h = hstate_file(file); |
221 | struct address_space *mapping = file->f_mapping; | ||
230 | struct inode *inode = mapping->host; | 222 | struct inode *inode = mapping->host; |
231 | unsigned long index = *ppos >> huge_page_shift(h); | 223 | unsigned long index = iocb->ki_pos >> huge_page_shift(h); |
232 | unsigned long offset = *ppos & ~huge_page_mask(h); | 224 | unsigned long offset = iocb->ki_pos & ~huge_page_mask(h); |
233 | unsigned long end_index; | 225 | unsigned long end_index; |
234 | loff_t isize; | 226 | loff_t isize; |
235 | ssize_t retval = 0; | 227 | ssize_t retval = 0; |
236 | 228 | ||
237 | /* validate length */ | 229 | while (iov_iter_count(to)) { |
238 | if (len == 0) | ||
239 | goto out; | ||
240 | |||
241 | for (;;) { | ||
242 | struct page *page; | 230 | struct page *page; |
243 | unsigned long nr, ret; | 231 | size_t nr, copied; |
244 | int ra; | ||
245 | 232 | ||
246 | /* nr is the maximum number of bytes to copy from this page */ | 233 | /* nr is the maximum number of bytes to copy from this page */ |
247 | nr = huge_page_size(h); | 234 | nr = huge_page_size(h); |
248 | isize = i_size_read(inode); | 235 | isize = i_size_read(inode); |
249 | if (!isize) | 236 | if (!isize) |
250 | goto out; | 237 | break; |
251 | end_index = (isize - 1) >> huge_page_shift(h); | 238 | end_index = (isize - 1) >> huge_page_shift(h); |
252 | if (index >= end_index) { | 239 | if (index > end_index) |
253 | if (index > end_index) | 240 | break; |
254 | goto out; | 241 | if (index == end_index) { |
255 | nr = ((isize - 1) & ~huge_page_mask(h)) + 1; | 242 | nr = ((isize - 1) & ~huge_page_mask(h)) + 1; |
256 | if (nr <= offset) | 243 | if (nr <= offset) |
257 | goto out; | 244 | break; |
258 | } | 245 | } |
259 | nr = nr - offset; | 246 | nr = nr - offset; |
260 | 247 | ||
@@ -265,39 +252,27 @@ static ssize_t hugetlbfs_read(struct file *filp, char __user *buf, | |||
265 | * We have a HOLE, zero out the user-buffer for the | 252 | * We have a HOLE, zero out the user-buffer for the |
266 | * length of the hole or request. | 253 | * length of the hole or request. |
267 | */ | 254 | */ |
268 | ret = len < nr ? len : nr; | 255 | copied = iov_iter_zero(nr, to); |
269 | if (clear_user(buf, ret)) | ||
270 | ra = -EFAULT; | ||
271 | else | ||
272 | ra = 0; | ||
273 | } else { | 256 | } else { |
274 | unlock_page(page); | 257 | unlock_page(page); |
275 | 258 | ||
276 | /* | 259 | /* |
277 | * We have the page, copy it to user space buffer. | 260 | * We have the page, copy it to user space buffer. |
278 | */ | 261 | */ |
279 | ra = hugetlbfs_read_actor(page, offset, buf, len, nr); | 262 | copied = hugetlbfs_read_actor(page, offset, to, nr); |
280 | ret = ra; | ||
281 | page_cache_release(page); | 263 | page_cache_release(page); |
282 | } | 264 | } |
283 | if (ra < 0) { | 265 | offset += copied; |
284 | if (retval == 0) | 266 | retval += copied; |
285 | retval = ra; | 267 | if (copied != nr && iov_iter_count(to)) { |
286 | goto out; | 268 | if (!retval) |
269 | retval = -EFAULT; | ||
270 | break; | ||
287 | } | 271 | } |
288 | |||
289 | offset += ret; | ||
290 | retval += ret; | ||
291 | len -= ret; | ||
292 | index += offset >> huge_page_shift(h); | 272 | index += offset >> huge_page_shift(h); |
293 | offset &= ~huge_page_mask(h); | 273 | offset &= ~huge_page_mask(h); |
294 | |||
295 | /* short read or no more work */ | ||
296 | if ((ret != nr) || (len == 0)) | ||
297 | break; | ||
298 | } | 274 | } |
299 | out: | 275 | iocb->ki_pos = ((loff_t)index << huge_page_shift(h)) + offset; |
300 | *ppos = ((loff_t)index << huge_page_shift(h)) + offset; | ||
301 | return retval; | 276 | return retval; |
302 | } | 277 | } |
303 | 278 | ||
@@ -721,7 +696,7 @@ static void init_once(void *foo) | |||
721 | } | 696 | } |
722 | 697 | ||
723 | const struct file_operations hugetlbfs_file_operations = { | 698 | const struct file_operations hugetlbfs_file_operations = { |
724 | .read = hugetlbfs_read, | 699 | .read_iter = hugetlbfs_read_iter, |
725 | .mmap = hugetlbfs_file_mmap, | 700 | .mmap = hugetlbfs_file_mmap, |
726 | .fsync = noop_fsync, | 701 | .fsync = noop_fsync, |
727 | .get_unmapped_area = hugetlb_get_unmapped_area, | 702 | .get_unmapped_area = hugetlb_get_unmapped_area, |