diff options
| author | Linus Torvalds <torvalds@linux-foundation.org> | 2015-04-16 23:27:56 -0400 |
|---|---|---|
| committer | Linus Torvalds <torvalds@linux-foundation.org> | 2015-04-16 23:27:56 -0400 |
| commit | 4fc8adcfec3da639da76e8314c9ccefe5bf9a045 (patch) | |
| tree | e07a2dea8acf04d8bbbecd4fd3a571653ecdd953 | |
| parent | 84588e7a5d8220446d677d7b909a20ee7a4496b9 (diff) | |
| parent | aa4d86163e4e91a1ac560954a554bab417e338f4 (diff) | |
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/viro/vfs
Pull third hunk of vfs changes from Al Viro:
"This contains the ->direct_IO() changes from Omar + saner
generic_write_checks() + dealing with fcntl()/{read,write}() races
(mirroring O_APPEND/O_DIRECT into iocb->ki_flags and instead of
repeatedly looking at ->f_flags, which can be changed by fcntl(2),
check ->ki_flags - which cannot) + infrastructure bits for dhowells'
d_inode annotations + Christophs switch of /dev/loop to
vfs_iter_write()"
* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/viro/vfs: (30 commits)
block: loop: switch to VFS ITER_BVEC
configfs: Fix inconsistent use of file_inode() vs file->f_path.dentry->d_inode
VFS: Make pathwalk use d_is_reg() rather than S_ISREG()
VFS: Fix up debugfs to use d_is_dir() in place of S_ISDIR()
VFS: Combine inode checks with d_is_negative() and d_is_positive() in pathwalk
NFS: Don't use d_inode as a variable name
VFS: Impose ordering on accesses of d_inode and d_flags
VFS: Add owner-filesystem positive/negative dentry checks
nfs: generic_write_checks() shouldn't be done on swapout...
ocfs2: use __generic_file_write_iter()
mirror O_APPEND and O_DIRECT into iocb->ki_flags
switch generic_write_checks() to iocb and iter
ocfs2: move generic_write_checks() before the alignment checks
ocfs2_file_write_iter: stop messing with ppos
udf_file_write_iter: reorder and simplify
fuse: ->direct_IO() doesn't need generic_write_checks()
ext4_file_write_iter: move generic_write_checks() up
xfs_file_aio_write_checks: switch to iocb/iov_iter
generic_write_checks(): drop isblk argument
blkdev_write_iter: expand generic_file_checks() call in there
...
55 files changed, 691 insertions, 861 deletions
diff --git a/Documentation/filesystems/Locking b/Documentation/filesystems/Locking index 7c3f187d48bf..0a926e2ba3ab 100644 --- a/Documentation/filesystems/Locking +++ b/Documentation/filesystems/Locking | |||
| @@ -196,7 +196,7 @@ prototypes: | |||
| 196 | void (*invalidatepage) (struct page *, unsigned int, unsigned int); | 196 | void (*invalidatepage) (struct page *, unsigned int, unsigned int); |
| 197 | int (*releasepage) (struct page *, int); | 197 | int (*releasepage) (struct page *, int); |
| 198 | void (*freepage)(struct page *); | 198 | void (*freepage)(struct page *); |
| 199 | int (*direct_IO)(int, struct kiocb *, struct iov_iter *iter, loff_t offset); | 199 | int (*direct_IO)(struct kiocb *, struct iov_iter *iter, loff_t offset); |
| 200 | int (*migratepage)(struct address_space *, struct page *, struct page *); | 200 | int (*migratepage)(struct address_space *, struct page *, struct page *); |
| 201 | int (*launder_page)(struct page *); | 201 | int (*launder_page)(struct page *); |
| 202 | int (*is_partially_uptodate)(struct page *, unsigned long, unsigned long); | 202 | int (*is_partially_uptodate)(struct page *, unsigned long, unsigned long); |
diff --git a/Documentation/filesystems/vfs.txt b/Documentation/filesystems/vfs.txt index 207cdca68bed..5d833b32bbcd 100644 --- a/Documentation/filesystems/vfs.txt +++ b/Documentation/filesystems/vfs.txt | |||
| @@ -590,7 +590,7 @@ struct address_space_operations { | |||
| 590 | void (*invalidatepage) (struct page *, unsigned int, unsigned int); | 590 | void (*invalidatepage) (struct page *, unsigned int, unsigned int); |
| 591 | int (*releasepage) (struct page *, int); | 591 | int (*releasepage) (struct page *, int); |
| 592 | void (*freepage)(struct page *); | 592 | void (*freepage)(struct page *); |
| 593 | ssize_t (*direct_IO)(int, struct kiocb *, struct iov_iter *iter, loff_t offset); | 593 | ssize_t (*direct_IO)(struct kiocb *, struct iov_iter *iter, loff_t offset); |
| 594 | /* migrate the contents of a page to the specified target */ | 594 | /* migrate the contents of a page to the specified target */ |
| 595 | int (*migratepage) (struct page *, struct page *); | 595 | int (*migratepage) (struct page *, struct page *); |
| 596 | int (*launder_page) (struct page *); | 596 | int (*launder_page) (struct page *); |
diff --git a/drivers/block/loop.c b/drivers/block/loop.c index c4fd1e45ce1e..ae3fcb4199e9 100644 --- a/drivers/block/loop.c +++ b/drivers/block/loop.c | |||
| @@ -88,28 +88,6 @@ static int part_shift; | |||
| 88 | 88 | ||
| 89 | static struct workqueue_struct *loop_wq; | 89 | static struct workqueue_struct *loop_wq; |
| 90 | 90 | ||
| 91 | /* | ||
| 92 | * Transfer functions | ||
| 93 | */ | ||
| 94 | static int transfer_none(struct loop_device *lo, int cmd, | ||
| 95 | struct page *raw_page, unsigned raw_off, | ||
| 96 | struct page *loop_page, unsigned loop_off, | ||
| 97 | int size, sector_t real_block) | ||
| 98 | { | ||
| 99 | char *raw_buf = kmap_atomic(raw_page) + raw_off; | ||
| 100 | char *loop_buf = kmap_atomic(loop_page) + loop_off; | ||
| 101 | |||
| 102 | if (cmd == READ) | ||
| 103 | memcpy(loop_buf, raw_buf, size); | ||
| 104 | else | ||
| 105 | memcpy(raw_buf, loop_buf, size); | ||
| 106 | |||
| 107 | kunmap_atomic(loop_buf); | ||
| 108 | kunmap_atomic(raw_buf); | ||
| 109 | cond_resched(); | ||
| 110 | return 0; | ||
| 111 | } | ||
| 112 | |||
| 113 | static int transfer_xor(struct loop_device *lo, int cmd, | 91 | static int transfer_xor(struct loop_device *lo, int cmd, |
| 114 | struct page *raw_page, unsigned raw_off, | 92 | struct page *raw_page, unsigned raw_off, |
| 115 | struct page *loop_page, unsigned loop_off, | 93 | struct page *loop_page, unsigned loop_off, |
| @@ -148,14 +126,13 @@ static int xor_init(struct loop_device *lo, const struct loop_info64 *info) | |||
| 148 | 126 | ||
| 149 | static struct loop_func_table none_funcs = { | 127 | static struct loop_func_table none_funcs = { |
| 150 | .number = LO_CRYPT_NONE, | 128 | .number = LO_CRYPT_NONE, |
| 151 | .transfer = transfer_none, | 129 | }; |
| 152 | }; | ||
| 153 | 130 | ||
| 154 | static struct loop_func_table xor_funcs = { | 131 | static struct loop_func_table xor_funcs = { |
| 155 | .number = LO_CRYPT_XOR, | 132 | .number = LO_CRYPT_XOR, |
| 156 | .transfer = transfer_xor, | 133 | .transfer = transfer_xor, |
| 157 | .init = xor_init | 134 | .init = xor_init |
| 158 | }; | 135 | }; |
| 159 | 136 | ||
| 160 | /* xfer_funcs[0] is special - its release function is never called */ | 137 | /* xfer_funcs[0] is special - its release function is never called */ |
| 161 | static struct loop_func_table *xfer_funcs[MAX_LO_CRYPT] = { | 138 | static struct loop_func_table *xfer_funcs[MAX_LO_CRYPT] = { |
| @@ -215,207 +192,169 @@ lo_do_transfer(struct loop_device *lo, int cmd, | |||
| 215 | struct page *lpage, unsigned loffs, | 192 | struct page *lpage, unsigned loffs, |
| 216 | int size, sector_t rblock) | 193 | int size, sector_t rblock) |
| 217 | { | 194 | { |
| 218 | if (unlikely(!lo->transfer)) | 195 | int ret; |
| 196 | |||
| 197 | ret = lo->transfer(lo, cmd, rpage, roffs, lpage, loffs, size, rblock); | ||
| 198 | if (likely(!ret)) | ||
| 219 | return 0; | 199 | return 0; |
| 220 | 200 | ||
| 221 | return lo->transfer(lo, cmd, rpage, roffs, lpage, loffs, size, rblock); | 201 | printk_ratelimited(KERN_ERR |
| 202 | "loop: Transfer error at byte offset %llu, length %i.\n", | ||
| 203 | (unsigned long long)rblock << 9, size); | ||
| 204 | return ret; | ||
| 222 | } | 205 | } |
| 223 | 206 | ||
| 224 | /** | 207 | static int lo_write_bvec(struct file *file, struct bio_vec *bvec, loff_t *ppos) |
| 225 | * __do_lo_send_write - helper for writing data to a loop device | ||
| 226 | * | ||
| 227 | * This helper just factors out common code between do_lo_send_direct_write() | ||
| 228 | * and do_lo_send_write(). | ||
| 229 | */ | ||
| 230 | static int __do_lo_send_write(struct file *file, | ||
| 231 | u8 *buf, const int len, loff_t pos) | ||
| 232 | { | 208 | { |
| 233 | struct kvec kvec = {.iov_base = buf, .iov_len = len}; | 209 | struct iov_iter i; |
| 234 | struct iov_iter from; | ||
| 235 | ssize_t bw; | 210 | ssize_t bw; |
| 236 | 211 | ||
| 237 | iov_iter_kvec(&from, ITER_KVEC | WRITE, &kvec, 1, len); | 212 | iov_iter_bvec(&i, ITER_BVEC, bvec, 1, bvec->bv_len); |
| 238 | 213 | ||
| 239 | file_start_write(file); | 214 | file_start_write(file); |
| 240 | bw = vfs_iter_write(file, &from, &pos); | 215 | bw = vfs_iter_write(file, &i, ppos); |
| 241 | file_end_write(file); | 216 | file_end_write(file); |
| 242 | if (likely(bw == len)) | 217 | |
| 218 | if (likely(bw == bvec->bv_len)) | ||
| 243 | return 0; | 219 | return 0; |
| 244 | printk_ratelimited(KERN_ERR "loop: Write error at byte offset %llu, length %i.\n", | 220 | |
| 245 | (unsigned long long)pos, len); | 221 | printk_ratelimited(KERN_ERR |
| 222 | "loop: Write error at byte offset %llu, length %i.\n", | ||
| 223 | (unsigned long long)*ppos, bvec->bv_len); | ||
| 246 | if (bw >= 0) | 224 | if (bw >= 0) |
| 247 | bw = -EIO; | 225 | bw = -EIO; |
| 248 | return bw; | 226 | return bw; |
| 249 | } | 227 | } |
| 250 | 228 | ||
| 251 | /** | 229 | static int lo_write_simple(struct loop_device *lo, struct request *rq, |
| 252 | * do_lo_send_direct_write - helper for writing data to a loop device | 230 | loff_t pos) |
| 253 | * | ||
| 254 | * This is the fast, non-transforming version that does not need double | ||
| 255 | * buffering. | ||
| 256 | */ | ||
| 257 | static int do_lo_send_direct_write(struct loop_device *lo, | ||
| 258 | struct bio_vec *bvec, loff_t pos, struct page *page) | ||
| 259 | { | 231 | { |
| 260 | ssize_t bw = __do_lo_send_write(lo->lo_backing_file, | 232 | struct bio_vec bvec; |
| 261 | kmap(bvec->bv_page) + bvec->bv_offset, | 233 | struct req_iterator iter; |
| 262 | bvec->bv_len, pos); | 234 | int ret = 0; |
| 263 | kunmap(bvec->bv_page); | 235 | |
| 264 | cond_resched(); | 236 | rq_for_each_segment(bvec, rq, iter) { |
| 265 | return bw; | 237 | ret = lo_write_bvec(lo->lo_backing_file, &bvec, &pos); |
| 238 | if (ret < 0) | ||
| 239 | break; | ||
| 240 | cond_resched(); | ||
| 241 | } | ||
| 242 | |||
| 243 | return ret; | ||
| 266 | } | 244 | } |
| 267 | 245 | ||
| 268 | /** | 246 | /* |
| 269 | * do_lo_send_write - helper for writing data to a loop device | ||
| 270 | * | ||
| 271 | * This is the slow, transforming version that needs to double buffer the | 247 | * This is the slow, transforming version that needs to double buffer the |
| 272 | * data as it cannot do the transformations in place without having direct | 248 | * data as it cannot do the transformations in place without having direct |
| 273 | * access to the destination pages of the backing file. | 249 | * access to the destination pages of the backing file. |
| 274 | */ | 250 | */ |
| 275 | static int do_lo_send_write(struct loop_device *lo, struct bio_vec *bvec, | 251 | static int lo_write_transfer(struct loop_device *lo, struct request *rq, |
| 276 | loff_t pos, struct page *page) | 252 | loff_t pos) |
| 277 | { | 253 | { |
| 278 | int ret = lo_do_transfer(lo, WRITE, page, 0, bvec->bv_page, | 254 | struct bio_vec bvec, b; |
| 279 | bvec->bv_offset, bvec->bv_len, pos >> 9); | ||
| 280 | if (likely(!ret)) | ||
| 281 | return __do_lo_send_write(lo->lo_backing_file, | ||
| 282 | page_address(page), bvec->bv_len, | ||
| 283 | pos); | ||
| 284 | printk_ratelimited(KERN_ERR "loop: Transfer error at byte offset %llu, " | ||
| 285 | "length %i.\n", (unsigned long long)pos, bvec->bv_len); | ||
| 286 | if (ret > 0) | ||
| 287 | ret = -EIO; | ||
| 288 | return ret; | ||
| 289 | } | ||
| 290 | |||
| 291 | static int lo_send(struct loop_device *lo, struct request *rq, loff_t pos) | ||
| 292 | { | ||
| 293 | int (*do_lo_send)(struct loop_device *, struct bio_vec *, loff_t, | ||
| 294 | struct page *page); | ||
| 295 | struct bio_vec bvec; | ||
| 296 | struct req_iterator iter; | 255 | struct req_iterator iter; |
| 297 | struct page *page = NULL; | 256 | struct page *page; |
| 298 | int ret = 0; | 257 | int ret = 0; |
| 299 | 258 | ||
| 300 | if (lo->transfer != transfer_none) { | 259 | page = alloc_page(GFP_NOIO); |
| 301 | page = alloc_page(GFP_NOIO | __GFP_HIGHMEM); | 260 | if (unlikely(!page)) |
| 302 | if (unlikely(!page)) | 261 | return -ENOMEM; |
| 303 | goto fail; | ||
| 304 | kmap(page); | ||
| 305 | do_lo_send = do_lo_send_write; | ||
| 306 | } else { | ||
| 307 | do_lo_send = do_lo_send_direct_write; | ||
| 308 | } | ||
| 309 | 262 | ||
| 310 | rq_for_each_segment(bvec, rq, iter) { | 263 | rq_for_each_segment(bvec, rq, iter) { |
| 311 | ret = do_lo_send(lo, &bvec, pos, page); | 264 | ret = lo_do_transfer(lo, WRITE, page, 0, bvec.bv_page, |
| 265 | bvec.bv_offset, bvec.bv_len, pos >> 9); | ||
| 266 | if (unlikely(ret)) | ||
| 267 | break; | ||
| 268 | |||
| 269 | b.bv_page = page; | ||
| 270 | b.bv_offset = 0; | ||
| 271 | b.bv_len = bvec.bv_len; | ||
| 272 | ret = lo_write_bvec(lo->lo_backing_file, &b, &pos); | ||
| 312 | if (ret < 0) | 273 | if (ret < 0) |
| 313 | break; | 274 | break; |
| 314 | pos += bvec.bv_len; | ||
| 315 | } | 275 | } |
| 316 | if (page) { | 276 | |
| 317 | kunmap(page); | 277 | __free_page(page); |
| 318 | __free_page(page); | ||
| 319 | } | ||
| 320 | out: | ||
| 321 | return ret; | 278 | return ret; |
| 322 | fail: | ||
| 323 | printk_ratelimited(KERN_ERR "loop: Failed to allocate temporary page for write.\n"); | ||
| 324 | ret = -ENOMEM; | ||
| 325 | goto out; | ||
| 326 | } | 279 | } |
| 327 | 280 | ||
| 328 | struct lo_read_data { | 281 | static int lo_read_simple(struct loop_device *lo, struct request *rq, |
| 329 | struct loop_device *lo; | 282 | loff_t pos) |
| 330 | struct page *page; | 283 | { |
| 331 | unsigned offset; | 284 | struct bio_vec bvec; |
| 332 | int bsize; | 285 | struct req_iterator iter; |
| 333 | }; | 286 | struct iov_iter i; |
| 287 | ssize_t len; | ||
| 334 | 288 | ||
| 335 | static int | 289 | rq_for_each_segment(bvec, rq, iter) { |
| 336 | lo_splice_actor(struct pipe_inode_info *pipe, struct pipe_buffer *buf, | 290 | iov_iter_bvec(&i, ITER_BVEC, &bvec, 1, bvec.bv_len); |
| 337 | struct splice_desc *sd) | 291 | len = vfs_iter_read(lo->lo_backing_file, &i, &pos); |
| 338 | { | 292 | if (len < 0) |
| 339 | struct lo_read_data *p = sd->u.data; | 293 | return len; |
| 340 | struct loop_device *lo = p->lo; | ||
| 341 | struct page *page = buf->page; | ||
| 342 | sector_t IV; | ||
| 343 | int size; | ||
| 344 | |||
| 345 | IV = ((sector_t) page->index << (PAGE_CACHE_SHIFT - 9)) + | ||
| 346 | (buf->offset >> 9); | ||
| 347 | size = sd->len; | ||
| 348 | if (size > p->bsize) | ||
| 349 | size = p->bsize; | ||
| 350 | |||
| 351 | if (lo_do_transfer(lo, READ, page, buf->offset, p->page, p->offset, size, IV)) { | ||
| 352 | printk_ratelimited(KERN_ERR "loop: transfer error block %ld\n", | ||
| 353 | page->index); | ||
| 354 | size = -EINVAL; | ||
| 355 | } | ||
| 356 | 294 | ||
| 357 | flush_dcache_page(p->page); | 295 | flush_dcache_page(bvec.bv_page); |
| 358 | 296 | ||
| 359 | if (size > 0) | 297 | if (len != bvec.bv_len) { |
| 360 | p->offset += size; | 298 | struct bio *bio; |
| 361 | 299 | ||
| 362 | return size; | 300 | __rq_for_each_bio(bio, rq) |
| 363 | } | 301 | zero_fill_bio(bio); |
| 302 | break; | ||
| 303 | } | ||
| 304 | cond_resched(); | ||
| 305 | } | ||
| 364 | 306 | ||
| 365 | static int | 307 | return 0; |
| 366 | lo_direct_splice_actor(struct pipe_inode_info *pipe, struct splice_desc *sd) | ||
| 367 | { | ||
| 368 | return __splice_from_pipe(pipe, sd, lo_splice_actor); | ||
| 369 | } | 308 | } |
| 370 | 309 | ||
| 371 | static ssize_t | 310 | static int lo_read_transfer(struct loop_device *lo, struct request *rq, |
| 372 | do_lo_receive(struct loop_device *lo, | 311 | loff_t pos) |
| 373 | struct bio_vec *bvec, int bsize, loff_t pos) | ||
| 374 | { | 312 | { |
| 375 | struct lo_read_data cookie; | 313 | struct bio_vec bvec, b; |
| 376 | struct splice_desc sd; | 314 | struct req_iterator iter; |
| 377 | struct file *file; | 315 | struct iov_iter i; |
| 378 | ssize_t retval; | 316 | struct page *page; |
| 317 | ssize_t len; | ||
| 318 | int ret = 0; | ||
| 379 | 319 | ||
| 380 | cookie.lo = lo; | 320 | page = alloc_page(GFP_NOIO); |
| 381 | cookie.page = bvec->bv_page; | 321 | if (unlikely(!page)) |
| 382 | cookie.offset = bvec->bv_offset; | 322 | return -ENOMEM; |
| 383 | cookie.bsize = bsize; | ||
| 384 | 323 | ||
| 385 | sd.len = 0; | 324 | rq_for_each_segment(bvec, rq, iter) { |
| 386 | sd.total_len = bvec->bv_len; | 325 | loff_t offset = pos; |
| 387 | sd.flags = 0; | ||
| 388 | sd.pos = pos; | ||
| 389 | sd.u.data = &cookie; | ||
| 390 | 326 | ||
| 391 | file = lo->lo_backing_file; | 327 | b.bv_page = page; |
| 392 | retval = splice_direct_to_actor(file, &sd, lo_direct_splice_actor); | 328 | b.bv_offset = 0; |
| 329 | b.bv_len = bvec.bv_len; | ||
| 393 | 330 | ||
| 394 | return retval; | 331 | iov_iter_bvec(&i, ITER_BVEC, &b, 1, b.bv_len); |
| 395 | } | 332 | len = vfs_iter_read(lo->lo_backing_file, &i, &pos); |
| 333 | if (len < 0) { | ||
| 334 | ret = len; | ||
| 335 | goto out_free_page; | ||
| 336 | } | ||
| 396 | 337 | ||
| 397 | static int | 338 | ret = lo_do_transfer(lo, READ, page, 0, bvec.bv_page, |
| 398 | lo_receive(struct loop_device *lo, struct request *rq, int bsize, loff_t pos) | 339 | bvec.bv_offset, len, offset >> 9); |
| 399 | { | 340 | if (ret) |
| 400 | struct bio_vec bvec; | 341 | goto out_free_page; |
| 401 | struct req_iterator iter; | ||
| 402 | ssize_t s; | ||
| 403 | 342 | ||
| 404 | rq_for_each_segment(bvec, rq, iter) { | 343 | flush_dcache_page(bvec.bv_page); |
| 405 | s = do_lo_receive(lo, &bvec, bsize, pos); | ||
| 406 | if (s < 0) | ||
| 407 | return s; | ||
| 408 | 344 | ||
| 409 | if (s != bvec.bv_len) { | 345 | if (len != bvec.bv_len) { |
| 410 | struct bio *bio; | 346 | struct bio *bio; |
| 411 | 347 | ||
| 412 | __rq_for_each_bio(bio, rq) | 348 | __rq_for_each_bio(bio, rq) |
| 413 | zero_fill_bio(bio); | 349 | zero_fill_bio(bio); |
| 414 | break; | 350 | break; |
| 415 | } | 351 | } |
| 416 | pos += bvec.bv_len; | ||
| 417 | } | 352 | } |
| 418 | return 0; | 353 | |
| 354 | ret = 0; | ||
| 355 | out_free_page: | ||
| 356 | __free_page(page); | ||
| 357 | return ret; | ||
| 419 | } | 358 | } |
| 420 | 359 | ||
| 421 | static int lo_discard(struct loop_device *lo, struct request *rq, loff_t pos) | 360 | static int lo_discard(struct loop_device *lo, struct request *rq, loff_t pos) |
| @@ -464,10 +403,17 @@ static int do_req_filebacked(struct loop_device *lo, struct request *rq) | |||
| 464 | ret = lo_req_flush(lo, rq); | 403 | ret = lo_req_flush(lo, rq); |
| 465 | else if (rq->cmd_flags & REQ_DISCARD) | 404 | else if (rq->cmd_flags & REQ_DISCARD) |
| 466 | ret = lo_discard(lo, rq, pos); | 405 | ret = lo_discard(lo, rq, pos); |
| 406 | else if (lo->transfer) | ||
| 407 | ret = lo_write_transfer(lo, rq, pos); | ||
| 467 | else | 408 | else |
| 468 | ret = lo_send(lo, rq, pos); | 409 | ret = lo_write_simple(lo, rq, pos); |
| 469 | } else | 410 | |
| 470 | ret = lo_receive(lo, rq, lo->lo_blocksize, pos); | 411 | } else { |
| 412 | if (lo->transfer) | ||
| 413 | ret = lo_read_transfer(lo, rq, pos); | ||
| 414 | else | ||
| 415 | ret = lo_read_simple(lo, rq, pos); | ||
| 416 | } | ||
| 471 | 417 | ||
| 472 | return ret; | 418 | return ret; |
| 473 | } | 419 | } |
| @@ -788,7 +734,7 @@ static int loop_set_fd(struct loop_device *lo, fmode_t mode, | |||
| 788 | lo->lo_device = bdev; | 734 | lo->lo_device = bdev; |
| 789 | lo->lo_flags = lo_flags; | 735 | lo->lo_flags = lo_flags; |
| 790 | lo->lo_backing_file = file; | 736 | lo->lo_backing_file = file; |
| 791 | lo->transfer = transfer_none; | 737 | lo->transfer = NULL; |
| 792 | lo->ioctl = NULL; | 738 | lo->ioctl = NULL; |
| 793 | lo->lo_sizelimit = 0; | 739 | lo->lo_sizelimit = 0; |
| 794 | lo->old_gfp_mask = mapping_gfp_mask(mapping); | 740 | lo->old_gfp_mask = mapping_gfp_mask(mapping); |
| @@ -1007,7 +953,7 @@ loop_set_status(struct loop_device *lo, const struct loop_info64 *info) | |||
| 1007 | memcpy(lo->lo_encrypt_key, info->lo_encrypt_key, | 953 | memcpy(lo->lo_encrypt_key, info->lo_encrypt_key, |
| 1008 | info->lo_encrypt_key_size); | 954 | info->lo_encrypt_key_size); |
| 1009 | lo->lo_key_owner = uid; | 955 | lo->lo_key_owner = uid; |
| 1010 | } | 956 | } |
| 1011 | 957 | ||
| 1012 | return 0; | 958 | return 0; |
| 1013 | } | 959 | } |
diff --git a/drivers/staging/lustre/lustre/llite/rw26.c b/drivers/staging/lustre/lustre/llite/rw26.c index 91442fab5725..c6c824356464 100644 --- a/drivers/staging/lustre/lustre/llite/rw26.c +++ b/drivers/staging/lustre/lustre/llite/rw26.c | |||
| @@ -359,8 +359,8 @@ static ssize_t ll_direct_IO_26_seg(const struct lu_env *env, struct cl_io *io, | |||
| 359 | * up to 22MB for 128kB kmalloc and up to 682MB for 4MB kmalloc. */ | 359 | * up to 22MB for 128kB kmalloc and up to 682MB for 4MB kmalloc. */ |
| 360 | #define MAX_DIO_SIZE ((MAX_MALLOC / sizeof(struct brw_page) * PAGE_CACHE_SIZE) & \ | 360 | #define MAX_DIO_SIZE ((MAX_MALLOC / sizeof(struct brw_page) * PAGE_CACHE_SIZE) & \ |
| 361 | ~(DT_MAX_BRW_SIZE - 1)) | 361 | ~(DT_MAX_BRW_SIZE - 1)) |
| 362 | static ssize_t ll_direct_IO_26(int rw, struct kiocb *iocb, | 362 | static ssize_t ll_direct_IO_26(struct kiocb *iocb, struct iov_iter *iter, |
| 363 | struct iov_iter *iter, loff_t file_offset) | 363 | loff_t file_offset) |
| 364 | { | 364 | { |
| 365 | struct lu_env *env; | 365 | struct lu_env *env; |
| 366 | struct cl_io *io; | 366 | struct cl_io *io; |
| @@ -399,7 +399,7 @@ static ssize_t ll_direct_IO_26(int rw, struct kiocb *iocb, | |||
| 399 | * size changing by concurrent truncates and writes. | 399 | * size changing by concurrent truncates and writes. |
| 400 | * 1. Need inode mutex to operate transient pages. | 400 | * 1. Need inode mutex to operate transient pages. |
| 401 | */ | 401 | */ |
| 402 | if (rw == READ) | 402 | if (iov_iter_rw(iter) == READ) |
| 403 | mutex_lock(&inode->i_mutex); | 403 | mutex_lock(&inode->i_mutex); |
| 404 | 404 | ||
| 405 | LASSERT(obj->cob_transient_pages == 0); | 405 | LASSERT(obj->cob_transient_pages == 0); |
| @@ -408,7 +408,7 @@ static ssize_t ll_direct_IO_26(int rw, struct kiocb *iocb, | |||
| 408 | size_t offs; | 408 | size_t offs; |
| 409 | 409 | ||
| 410 | count = min_t(size_t, iov_iter_count(iter), size); | 410 | count = min_t(size_t, iov_iter_count(iter), size); |
| 411 | if (rw == READ) { | 411 | if (iov_iter_rw(iter) == READ) { |
| 412 | if (file_offset >= i_size_read(inode)) | 412 | if (file_offset >= i_size_read(inode)) |
| 413 | break; | 413 | break; |
| 414 | if (file_offset + count > i_size_read(inode)) | 414 | if (file_offset + count > i_size_read(inode)) |
| @@ -418,11 +418,11 @@ static ssize_t ll_direct_IO_26(int rw, struct kiocb *iocb, | |||
| 418 | result = iov_iter_get_pages_alloc(iter, &pages, count, &offs); | 418 | result = iov_iter_get_pages_alloc(iter, &pages, count, &offs); |
| 419 | if (likely(result > 0)) { | 419 | if (likely(result > 0)) { |
| 420 | int n = DIV_ROUND_UP(result + offs, PAGE_SIZE); | 420 | int n = DIV_ROUND_UP(result + offs, PAGE_SIZE); |
| 421 | result = ll_direct_IO_26_seg(env, io, rw, inode, | 421 | result = ll_direct_IO_26_seg(env, io, iov_iter_rw(iter), |
| 422 | file->f_mapping, | 422 | inode, file->f_mapping, |
| 423 | result, file_offset, | 423 | result, file_offset, pages, |
| 424 | pages, n); | 424 | n); |
| 425 | ll_free_user_pages(pages, n, rw==READ); | 425 | ll_free_user_pages(pages, n, iov_iter_rw(iter) == READ); |
| 426 | } | 426 | } |
| 427 | if (unlikely(result <= 0)) { | 427 | if (unlikely(result <= 0)) { |
| 428 | /* If we can't allocate a large enough buffer | 428 | /* If we can't allocate a large enough buffer |
| @@ -449,11 +449,11 @@ static ssize_t ll_direct_IO_26(int rw, struct kiocb *iocb, | |||
| 449 | } | 449 | } |
| 450 | out: | 450 | out: |
| 451 | LASSERT(obj->cob_transient_pages == 0); | 451 | LASSERT(obj->cob_transient_pages == 0); |
| 452 | if (rw == READ) | 452 | if (iov_iter_rw(iter) == READ) |
| 453 | mutex_unlock(&inode->i_mutex); | 453 | mutex_unlock(&inode->i_mutex); |
| 454 | 454 | ||
| 455 | if (tot_bytes > 0) { | 455 | if (tot_bytes > 0) { |
| 456 | if (rw == WRITE) { | 456 | if (iov_iter_rw(iter) == WRITE) { |
| 457 | struct lov_stripe_md *lsm; | 457 | struct lov_stripe_md *lsm; |
| 458 | 458 | ||
| 459 | lsm = ccc_inode_lsm_get(inode); | 459 | lsm = ccc_inode_lsm_get(inode); |
diff --git a/fs/9p/vfs_addr.c b/fs/9p/vfs_addr.c index 2e38f9a5b472..be35d05a4d0e 100644 --- a/fs/9p/vfs_addr.c +++ b/fs/9p/vfs_addr.c | |||
| @@ -230,7 +230,6 @@ static int v9fs_launder_page(struct page *page) | |||
| 230 | 230 | ||
| 231 | /** | 231 | /** |
| 232 | * v9fs_direct_IO - 9P address space operation for direct I/O | 232 | * v9fs_direct_IO - 9P address space operation for direct I/O |
| 233 | * @rw: direction (read or write) | ||
| 234 | * @iocb: target I/O control block | 233 | * @iocb: target I/O control block |
| 235 | * @iov: array of vectors that define I/O buffer | 234 | * @iov: array of vectors that define I/O buffer |
| 236 | * @pos: offset in file to begin the operation | 235 | * @pos: offset in file to begin the operation |
| @@ -248,12 +247,12 @@ static int v9fs_launder_page(struct page *page) | |||
| 248 | * | 247 | * |
| 249 | */ | 248 | */ |
| 250 | static ssize_t | 249 | static ssize_t |
| 251 | v9fs_direct_IO(int rw, struct kiocb *iocb, struct iov_iter *iter, loff_t pos) | 250 | v9fs_direct_IO(struct kiocb *iocb, struct iov_iter *iter, loff_t pos) |
| 252 | { | 251 | { |
| 253 | struct file *file = iocb->ki_filp; | 252 | struct file *file = iocb->ki_filp; |
| 254 | ssize_t n; | 253 | ssize_t n; |
| 255 | int err = 0; | 254 | int err = 0; |
| 256 | if (rw & WRITE) { | 255 | if (iov_iter_rw(iter) == WRITE) { |
| 257 | n = p9_client_write(file->private_data, pos, iter, &err); | 256 | n = p9_client_write(file->private_data, pos, iter, &err); |
| 258 | if (n) { | 257 | if (n) { |
| 259 | struct inode *inode = file_inode(file); | 258 | struct inode *inode = file_inode(file); |
diff --git a/fs/9p/vfs_file.c b/fs/9p/vfs_file.c index d7fcb775311e..2a9dd37dc426 100644 --- a/fs/9p/vfs_file.c +++ b/fs/9p/vfs_file.c | |||
| @@ -404,21 +404,16 @@ static ssize_t | |||
| 404 | v9fs_file_write_iter(struct kiocb *iocb, struct iov_iter *from) | 404 | v9fs_file_write_iter(struct kiocb *iocb, struct iov_iter *from) |
| 405 | { | 405 | { |
| 406 | struct file *file = iocb->ki_filp; | 406 | struct file *file = iocb->ki_filp; |
| 407 | ssize_t retval = 0; | 407 | ssize_t retval; |
| 408 | loff_t origin = iocb->ki_pos; | 408 | loff_t origin; |
| 409 | size_t count = iov_iter_count(from); | ||
| 410 | int err = 0; | 409 | int err = 0; |
| 411 | 410 | ||
| 412 | retval = generic_write_checks(file, &origin, &count, 0); | 411 | retval = generic_write_checks(iocb, from); |
| 413 | if (retval) | 412 | if (retval <= 0) |
| 414 | return retval; | 413 | return retval; |
| 415 | 414 | ||
| 416 | iov_iter_truncate(from, count); | 415 | origin = iocb->ki_pos; |
| 417 | 416 | retval = p9_client_write(file->private_data, iocb->ki_pos, from, &err); | |
| 418 | if (!count) | ||
| 419 | return 0; | ||
| 420 | |||
| 421 | retval = p9_client_write(file->private_data, origin, from, &err); | ||
| 422 | if (retval > 0) { | 417 | if (retval > 0) { |
| 423 | struct inode *inode = file_inode(file); | 418 | struct inode *inode = file_inode(file); |
| 424 | loff_t i_size; | 419 | loff_t i_size; |
| @@ -428,12 +423,11 @@ v9fs_file_write_iter(struct kiocb *iocb, struct iov_iter *from) | |||
| 428 | if (inode->i_mapping && inode->i_mapping->nrpages) | 423 | if (inode->i_mapping && inode->i_mapping->nrpages) |
| 429 | invalidate_inode_pages2_range(inode->i_mapping, | 424 | invalidate_inode_pages2_range(inode->i_mapping, |
| 430 | pg_start, pg_end); | 425 | pg_start, pg_end); |
| 431 | origin += retval; | 426 | iocb->ki_pos += retval; |
| 432 | i_size = i_size_read(inode); | 427 | i_size = i_size_read(inode); |
| 433 | iocb->ki_pos = origin; | 428 | if (iocb->ki_pos > i_size) { |
| 434 | if (origin > i_size) { | 429 | inode_add_bytes(inode, iocb->ki_pos - i_size); |
| 435 | inode_add_bytes(inode, origin - i_size); | 430 | i_size_write(inode, iocb->ki_pos); |
| 436 | i_size_write(inode, origin); | ||
| 437 | } | 431 | } |
| 438 | return retval; | 432 | return retval; |
| 439 | } | 433 | } |
diff --git a/fs/affs/file.c b/fs/affs/file.c index 7c1a3d4c19c2..dcf27951781c 100644 --- a/fs/affs/file.c +++ b/fs/affs/file.c | |||
| @@ -389,8 +389,7 @@ static void affs_write_failed(struct address_space *mapping, loff_t to) | |||
| 389 | } | 389 | } |
| 390 | 390 | ||
| 391 | static ssize_t | 391 | static ssize_t |
| 392 | affs_direct_IO(int rw, struct kiocb *iocb, struct iov_iter *iter, | 392 | affs_direct_IO(struct kiocb *iocb, struct iov_iter *iter, loff_t offset) |
| 393 | loff_t offset) | ||
| 394 | { | 393 | { |
| 395 | struct file *file = iocb->ki_filp; | 394 | struct file *file = iocb->ki_filp; |
| 396 | struct address_space *mapping = file->f_mapping; | 395 | struct address_space *mapping = file->f_mapping; |
| @@ -398,15 +397,15 @@ affs_direct_IO(int rw, struct kiocb *iocb, struct iov_iter *iter, | |||
| 398 | size_t count = iov_iter_count(iter); | 397 | size_t count = iov_iter_count(iter); |
| 399 | ssize_t ret; | 398 | ssize_t ret; |
| 400 | 399 | ||
| 401 | if (rw == WRITE) { | 400 | if (iov_iter_rw(iter) == WRITE) { |
| 402 | loff_t size = offset + count; | 401 | loff_t size = offset + count; |
| 403 | 402 | ||
| 404 | if (AFFS_I(inode)->mmu_private < size) | 403 | if (AFFS_I(inode)->mmu_private < size) |
| 405 | return 0; | 404 | return 0; |
| 406 | } | 405 | } |
| 407 | 406 | ||
| 408 | ret = blockdev_direct_IO(rw, iocb, inode, iter, offset, affs_get_block); | 407 | ret = blockdev_direct_IO(iocb, inode, iter, offset, affs_get_block); |
| 409 | if (ret < 0 && (rw & WRITE)) | 408 | if (ret < 0 && iov_iter_rw(iter) == WRITE) |
| 410 | affs_write_failed(mapping, offset + count); | 409 | affs_write_failed(mapping, offset + count); |
| 411 | return ret; | 410 | return ret; |
| 412 | } | 411 | } |
| @@ -1517,7 +1517,7 @@ static int io_submit_one(struct kioctx *ctx, struct iocb __user *user_iocb, | |||
| 1517 | } | 1517 | } |
| 1518 | req->common.ki_pos = iocb->aio_offset; | 1518 | req->common.ki_pos = iocb->aio_offset; |
| 1519 | req->common.ki_complete = aio_complete; | 1519 | req->common.ki_complete = aio_complete; |
| 1520 | req->common.ki_flags = 0; | 1520 | req->common.ki_flags = iocb_flags(req->common.ki_filp); |
| 1521 | 1521 | ||
| 1522 | if (iocb->aio_flags & IOCB_FLAG_RESFD) { | 1522 | if (iocb->aio_flags & IOCB_FLAG_RESFD) { |
| 1523 | /* | 1523 | /* |
diff --git a/fs/block_dev.c b/fs/block_dev.c index b5e87896f517..897ee0503932 100644 --- a/fs/block_dev.c +++ b/fs/block_dev.c | |||
| @@ -146,15 +146,13 @@ blkdev_get_block(struct inode *inode, sector_t iblock, | |||
| 146 | } | 146 | } |
| 147 | 147 | ||
| 148 | static ssize_t | 148 | static ssize_t |
| 149 | blkdev_direct_IO(int rw, struct kiocb *iocb, struct iov_iter *iter, | 149 | blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter, loff_t offset) |
| 150 | loff_t offset) | ||
| 151 | { | 150 | { |
| 152 | struct file *file = iocb->ki_filp; | 151 | struct file *file = iocb->ki_filp; |
| 153 | struct inode *inode = file->f_mapping->host; | 152 | struct inode *inode = file->f_mapping->host; |
| 154 | 153 | ||
| 155 | return __blockdev_direct_IO(rw, iocb, inode, I_BDEV(inode), iter, | 154 | return __blockdev_direct_IO(iocb, inode, I_BDEV(inode), iter, offset, |
| 156 | offset, blkdev_get_block, | 155 | blkdev_get_block, NULL, NULL, 0); |
| 157 | NULL, NULL, 0); | ||
| 158 | } | 156 | } |
| 159 | 157 | ||
| 160 | int __sync_blockdev(struct block_device *bdev, int wait) | 158 | int __sync_blockdev(struct block_device *bdev, int wait) |
| @@ -1597,9 +1595,22 @@ static long block_ioctl(struct file *file, unsigned cmd, unsigned long arg) | |||
| 1597 | ssize_t blkdev_write_iter(struct kiocb *iocb, struct iov_iter *from) | 1595 | ssize_t blkdev_write_iter(struct kiocb *iocb, struct iov_iter *from) |
| 1598 | { | 1596 | { |
| 1599 | struct file *file = iocb->ki_filp; | 1597 | struct file *file = iocb->ki_filp; |
| 1598 | struct inode *bd_inode = file->f_mapping->host; | ||
| 1599 | loff_t size = i_size_read(bd_inode); | ||
| 1600 | struct blk_plug plug; | 1600 | struct blk_plug plug; |
| 1601 | ssize_t ret; | 1601 | ssize_t ret; |
| 1602 | 1602 | ||
| 1603 | if (bdev_read_only(I_BDEV(bd_inode))) | ||
| 1604 | return -EPERM; | ||
| 1605 | |||
| 1606 | if (!iov_iter_count(from)) | ||
| 1607 | return 0; | ||
| 1608 | |||
| 1609 | if (iocb->ki_pos >= size) | ||
| 1610 | return -ENOSPC; | ||
| 1611 | |||
| 1612 | iov_iter_truncate(from, size - iocb->ki_pos); | ||
| 1613 | |||
| 1603 | blk_start_plug(&plug); | 1614 | blk_start_plug(&plug); |
| 1604 | ret = __generic_file_write_iter(iocb, from); | 1615 | ret = __generic_file_write_iter(iocb, from); |
| 1605 | if (ret > 0) { | 1616 | if (ret > 0) { |
diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c index cdc801c85105..faa7d390841b 100644 --- a/fs/btrfs/file.c +++ b/fs/btrfs/file.c | |||
| @@ -1739,27 +1739,19 @@ static ssize_t btrfs_file_write_iter(struct kiocb *iocb, | |||
| 1739 | u64 start_pos; | 1739 | u64 start_pos; |
| 1740 | u64 end_pos; | 1740 | u64 end_pos; |
| 1741 | ssize_t num_written = 0; | 1741 | ssize_t num_written = 0; |
| 1742 | ssize_t err = 0; | ||
| 1743 | size_t count = iov_iter_count(from); | ||
| 1744 | bool sync = (file->f_flags & O_DSYNC) || IS_SYNC(file->f_mapping->host); | 1742 | bool sync = (file->f_flags & O_DSYNC) || IS_SYNC(file->f_mapping->host); |
| 1745 | loff_t pos = iocb->ki_pos; | 1743 | ssize_t err; |
| 1744 | loff_t pos; | ||
| 1745 | size_t count; | ||
| 1746 | 1746 | ||
| 1747 | mutex_lock(&inode->i_mutex); | 1747 | mutex_lock(&inode->i_mutex); |
| 1748 | 1748 | err = generic_write_checks(iocb, from); | |
| 1749 | current->backing_dev_info = inode_to_bdi(inode); | 1749 | if (err <= 0) { |
| 1750 | err = generic_write_checks(file, &pos, &count, S_ISBLK(inode->i_mode)); | ||
| 1751 | if (err) { | ||
| 1752 | mutex_unlock(&inode->i_mutex); | 1750 | mutex_unlock(&inode->i_mutex); |
| 1753 | goto out; | 1751 | return err; |
| 1754 | } | ||
| 1755 | |||
| 1756 | if (count == 0) { | ||
| 1757 | mutex_unlock(&inode->i_mutex); | ||
| 1758 | goto out; | ||
| 1759 | } | 1752 | } |
| 1760 | 1753 | ||
| 1761 | iov_iter_truncate(from, count); | 1754 | current->backing_dev_info = inode_to_bdi(inode); |
| 1762 | |||
| 1763 | err = file_remove_suid(file); | 1755 | err = file_remove_suid(file); |
| 1764 | if (err) { | 1756 | if (err) { |
| 1765 | mutex_unlock(&inode->i_mutex); | 1757 | mutex_unlock(&inode->i_mutex); |
| @@ -1786,6 +1778,8 @@ static ssize_t btrfs_file_write_iter(struct kiocb *iocb, | |||
| 1786 | */ | 1778 | */ |
| 1787 | update_time_for_write(inode); | 1779 | update_time_for_write(inode); |
| 1788 | 1780 | ||
| 1781 | pos = iocb->ki_pos; | ||
| 1782 | count = iov_iter_count(from); | ||
| 1789 | start_pos = round_down(pos, root->sectorsize); | 1783 | start_pos = round_down(pos, root->sectorsize); |
| 1790 | if (start_pos > i_size_read(inode)) { | 1784 | if (start_pos > i_size_read(inode)) { |
| 1791 | /* Expand hole size to cover write data, preventing empty gap */ | 1785 | /* Expand hole size to cover write data, preventing empty gap */ |
| @@ -1800,7 +1794,7 @@ static ssize_t btrfs_file_write_iter(struct kiocb *iocb, | |||
| 1800 | if (sync) | 1794 | if (sync) |
| 1801 | atomic_inc(&BTRFS_I(inode)->sync_writers); | 1795 | atomic_inc(&BTRFS_I(inode)->sync_writers); |
| 1802 | 1796 | ||
| 1803 | if (file->f_flags & O_DIRECT) { | 1797 | if (iocb->ki_flags & IOCB_DIRECT) { |
| 1804 | num_written = __btrfs_direct_write(iocb, from, pos); | 1798 | num_written = __btrfs_direct_write(iocb, from, pos); |
| 1805 | } else { | 1799 | } else { |
| 1806 | num_written = __btrfs_buffered_write(file, from, pos); | 1800 | num_written = __btrfs_buffered_write(file, from, pos); |
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index 686331f22b15..43192e10cc43 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c | |||
| @@ -8081,7 +8081,7 @@ free_ordered: | |||
| 8081 | bio_endio(dio_bio, ret); | 8081 | bio_endio(dio_bio, ret); |
| 8082 | } | 8082 | } |
| 8083 | 8083 | ||
| 8084 | static ssize_t check_direct_IO(struct btrfs_root *root, int rw, struct kiocb *iocb, | 8084 | static ssize_t check_direct_IO(struct btrfs_root *root, struct kiocb *iocb, |
| 8085 | const struct iov_iter *iter, loff_t offset) | 8085 | const struct iov_iter *iter, loff_t offset) |
| 8086 | { | 8086 | { |
| 8087 | int seg; | 8087 | int seg; |
| @@ -8096,7 +8096,7 @@ static ssize_t check_direct_IO(struct btrfs_root *root, int rw, struct kiocb *io | |||
| 8096 | goto out; | 8096 | goto out; |
| 8097 | 8097 | ||
| 8098 | /* If this is a write we don't need to check anymore */ | 8098 | /* If this is a write we don't need to check anymore */ |
| 8099 | if (rw & WRITE) | 8099 | if (iov_iter_rw(iter) == WRITE) |
| 8100 | return 0; | 8100 | return 0; |
| 8101 | /* | 8101 | /* |
| 8102 | * Check to make sure we don't have duplicate iov_base's in this | 8102 | * Check to make sure we don't have duplicate iov_base's in this |
| @@ -8114,8 +8114,8 @@ out: | |||
| 8114 | return retval; | 8114 | return retval; |
| 8115 | } | 8115 | } |
| 8116 | 8116 | ||
| 8117 | static ssize_t btrfs_direct_IO(int rw, struct kiocb *iocb, | 8117 | static ssize_t btrfs_direct_IO(struct kiocb *iocb, struct iov_iter *iter, |
| 8118 | struct iov_iter *iter, loff_t offset) | 8118 | loff_t offset) |
| 8119 | { | 8119 | { |
| 8120 | struct file *file = iocb->ki_filp; | 8120 | struct file *file = iocb->ki_filp; |
| 8121 | struct inode *inode = file->f_mapping->host; | 8121 | struct inode *inode = file->f_mapping->host; |
| @@ -8126,7 +8126,7 @@ static ssize_t btrfs_direct_IO(int rw, struct kiocb *iocb, | |||
| 8126 | bool relock = false; | 8126 | bool relock = false; |
| 8127 | ssize_t ret; | 8127 | ssize_t ret; |
| 8128 | 8128 | ||
| 8129 | if (check_direct_IO(BTRFS_I(inode)->root, rw, iocb, iter, offset)) | 8129 | if (check_direct_IO(BTRFS_I(inode)->root, iocb, iter, offset)) |
| 8130 | return 0; | 8130 | return 0; |
| 8131 | 8131 | ||
| 8132 | atomic_inc(&inode->i_dio_count); | 8132 | atomic_inc(&inode->i_dio_count); |
| @@ -8144,7 +8144,7 @@ static ssize_t btrfs_direct_IO(int rw, struct kiocb *iocb, | |||
| 8144 | filemap_fdatawrite_range(inode->i_mapping, offset, | 8144 | filemap_fdatawrite_range(inode->i_mapping, offset, |
| 8145 | offset + count - 1); | 8145 | offset + count - 1); |
| 8146 | 8146 | ||
| 8147 | if (rw & WRITE) { | 8147 | if (iov_iter_rw(iter) == WRITE) { |
| 8148 | /* | 8148 | /* |
| 8149 | * If the write DIO is beyond the EOF, we need update | 8149 | * If the write DIO is beyond the EOF, we need update |
| 8150 | * the isize, but it is protected by i_mutex. So we can | 8150 | * the isize, but it is protected by i_mutex. So we can |
| @@ -8174,11 +8174,11 @@ static ssize_t btrfs_direct_IO(int rw, struct kiocb *iocb, | |||
| 8174 | wakeup = false; | 8174 | wakeup = false; |
| 8175 | } | 8175 | } |
| 8176 | 8176 | ||
| 8177 | ret = __blockdev_direct_IO(rw, iocb, inode, | 8177 | ret = __blockdev_direct_IO(iocb, inode, |
| 8178 | BTRFS_I(inode)->root->fs_info->fs_devices->latest_bdev, | 8178 | BTRFS_I(inode)->root->fs_info->fs_devices->latest_bdev, |
| 8179 | iter, offset, btrfs_get_blocks_direct, NULL, | 8179 | iter, offset, btrfs_get_blocks_direct, NULL, |
| 8180 | btrfs_submit_direct, flags); | 8180 | btrfs_submit_direct, flags); |
| 8181 | if (rw & WRITE) { | 8181 | if (iov_iter_rw(iter) == WRITE) { |
| 8182 | current->journal_info = NULL; | 8182 | current->journal_info = NULL; |
| 8183 | if (ret < 0 && ret != -EIOCBQUEUED) | 8183 | if (ret < 0 && ret != -EIOCBQUEUED) |
| 8184 | btrfs_delalloc_release_space(inode, count); | 8184 | btrfs_delalloc_release_space(inode, count); |
diff --git a/fs/ceph/addr.c b/fs/ceph/addr.c index fd5599d32362..155ab9c0246b 100644 --- a/fs/ceph/addr.c +++ b/fs/ceph/addr.c | |||
| @@ -1198,8 +1198,7 @@ static int ceph_write_end(struct file *file, struct address_space *mapping, | |||
| 1198 | * intercept O_DIRECT reads and writes early, this function should | 1198 | * intercept O_DIRECT reads and writes early, this function should |
| 1199 | * never get called. | 1199 | * never get called. |
| 1200 | */ | 1200 | */ |
| 1201 | static ssize_t ceph_direct_io(int rw, struct kiocb *iocb, | 1201 | static ssize_t ceph_direct_io(struct kiocb *iocb, struct iov_iter *iter, |
| 1202 | struct iov_iter *iter, | ||
| 1203 | loff_t pos) | 1202 | loff_t pos) |
| 1204 | { | 1203 | { |
| 1205 | WARN_ON(1); | 1204 | WARN_ON(1); |
diff --git a/fs/ceph/file.c b/fs/ceph/file.c index 56237ea5fc22..b9b8eb225f66 100644 --- a/fs/ceph/file.c +++ b/fs/ceph/file.c | |||
| @@ -457,7 +457,7 @@ static ssize_t ceph_sync_read(struct kiocb *iocb, struct iov_iter *i, | |||
| 457 | if (ret < 0) | 457 | if (ret < 0) |
| 458 | return ret; | 458 | return ret; |
| 459 | 459 | ||
| 460 | if (file->f_flags & O_DIRECT) { | 460 | if (iocb->ki_flags & IOCB_DIRECT) { |
| 461 | while (iov_iter_count(i)) { | 461 | while (iov_iter_count(i)) { |
| 462 | size_t start; | 462 | size_t start; |
| 463 | ssize_t n; | 463 | ssize_t n; |
| @@ -828,7 +828,7 @@ again: | |||
| 828 | return ret; | 828 | return ret; |
| 829 | 829 | ||
| 830 | if ((got & (CEPH_CAP_FILE_CACHE|CEPH_CAP_FILE_LAZYIO)) == 0 || | 830 | if ((got & (CEPH_CAP_FILE_CACHE|CEPH_CAP_FILE_LAZYIO)) == 0 || |
| 831 | (iocb->ki_filp->f_flags & O_DIRECT) || | 831 | (iocb->ki_flags & IOCB_DIRECT) || |
| 832 | (fi->flags & CEPH_F_SYNC)) { | 832 | (fi->flags & CEPH_F_SYNC)) { |
| 833 | 833 | ||
| 834 | dout("aio_sync_read %p %llx.%llx %llu~%u got cap refs on %s\n", | 834 | dout("aio_sync_read %p %llx.%llx %llu~%u got cap refs on %s\n", |
| @@ -941,9 +941,9 @@ static ssize_t ceph_write_iter(struct kiocb *iocb, struct iov_iter *from) | |||
| 941 | struct ceph_inode_info *ci = ceph_inode(inode); | 941 | struct ceph_inode_info *ci = ceph_inode(inode); |
| 942 | struct ceph_osd_client *osdc = | 942 | struct ceph_osd_client *osdc = |
| 943 | &ceph_sb_to_client(inode->i_sb)->client->osdc; | 943 | &ceph_sb_to_client(inode->i_sb)->client->osdc; |
| 944 | ssize_t count = iov_iter_count(from), written = 0; | 944 | ssize_t count, written = 0; |
| 945 | int err, want, got; | 945 | int err, want, got; |
| 946 | loff_t pos = iocb->ki_pos; | 946 | loff_t pos; |
| 947 | 947 | ||
| 948 | if (ceph_snap(inode) != CEPH_NOSNAP) | 948 | if (ceph_snap(inode) != CEPH_NOSNAP) |
| 949 | return -EROFS; | 949 | return -EROFS; |
| @@ -953,14 +953,12 @@ static ssize_t ceph_write_iter(struct kiocb *iocb, struct iov_iter *from) | |||
| 953 | /* We can write back this queue in page reclaim */ | 953 | /* We can write back this queue in page reclaim */ |
| 954 | current->backing_dev_info = inode_to_bdi(inode); | 954 | current->backing_dev_info = inode_to_bdi(inode); |
| 955 | 955 | ||
| 956 | err = generic_write_checks(file, &pos, &count, S_ISBLK(inode->i_mode)); | 956 | err = generic_write_checks(iocb, from); |
| 957 | if (err) | 957 | if (err <= 0) |
| 958 | goto out; | ||
| 959 | |||
| 960 | if (count == 0) | ||
| 961 | goto out; | 958 | goto out; |
| 962 | iov_iter_truncate(from, count); | ||
| 963 | 959 | ||
| 960 | pos = iocb->ki_pos; | ||
| 961 | count = iov_iter_count(from); | ||
| 964 | err = file_remove_suid(file); | 962 | err = file_remove_suid(file); |
| 965 | if (err) | 963 | if (err) |
| 966 | goto out; | 964 | goto out; |
| @@ -997,12 +995,12 @@ retry_snap: | |||
| 997 | inode, ceph_vinop(inode), pos, count, ceph_cap_string(got)); | 995 | inode, ceph_vinop(inode), pos, count, ceph_cap_string(got)); |
| 998 | 996 | ||
| 999 | if ((got & (CEPH_CAP_FILE_BUFFER|CEPH_CAP_FILE_LAZYIO)) == 0 || | 997 | if ((got & (CEPH_CAP_FILE_BUFFER|CEPH_CAP_FILE_LAZYIO)) == 0 || |
| 1000 | (file->f_flags & O_DIRECT) || (fi->flags & CEPH_F_SYNC)) { | 998 | (iocb->ki_flags & IOCB_DIRECT) || (fi->flags & CEPH_F_SYNC)) { |
| 1001 | struct iov_iter data; | 999 | struct iov_iter data; |
| 1002 | mutex_unlock(&inode->i_mutex); | 1000 | mutex_unlock(&inode->i_mutex); |
| 1003 | /* we might need to revert back to that point */ | 1001 | /* we might need to revert back to that point */ |
| 1004 | data = *from; | 1002 | data = *from; |
| 1005 | if (file->f_flags & O_DIRECT) | 1003 | if (iocb->ki_flags & IOCB_DIRECT) |
| 1006 | written = ceph_sync_direct_write(iocb, &data, pos); | 1004 | written = ceph_sync_direct_write(iocb, &data, pos); |
| 1007 | else | 1005 | else |
| 1008 | written = ceph_sync_write(iocb, &data, pos); | 1006 | written = ceph_sync_write(iocb, &data, pos); |
diff --git a/fs/cifs/file.c b/fs/cifs/file.c index ca30c391a894..ca2bc5406306 100644 --- a/fs/cifs/file.c +++ b/fs/cifs/file.c | |||
| @@ -2560,10 +2560,9 @@ cifs_write_from_iter(loff_t offset, size_t len, struct iov_iter *from, | |||
| 2560 | return rc; | 2560 | return rc; |
| 2561 | } | 2561 | } |
| 2562 | 2562 | ||
| 2563 | static ssize_t | 2563 | ssize_t cifs_user_writev(struct kiocb *iocb, struct iov_iter *from) |
| 2564 | cifs_iovec_write(struct file *file, struct iov_iter *from, loff_t *poffset) | ||
| 2565 | { | 2564 | { |
| 2566 | size_t len; | 2565 | struct file *file = iocb->ki_filp; |
| 2567 | ssize_t total_written = 0; | 2566 | ssize_t total_written = 0; |
| 2568 | struct cifsFileInfo *open_file; | 2567 | struct cifsFileInfo *open_file; |
| 2569 | struct cifs_tcon *tcon; | 2568 | struct cifs_tcon *tcon; |
| @@ -2573,15 +2572,15 @@ cifs_iovec_write(struct file *file, struct iov_iter *from, loff_t *poffset) | |||
| 2573 | struct iov_iter saved_from; | 2572 | struct iov_iter saved_from; |
| 2574 | int rc; | 2573 | int rc; |
| 2575 | 2574 | ||
| 2576 | len = iov_iter_count(from); | 2575 | /* |
| 2577 | rc = generic_write_checks(file, poffset, &len, 0); | 2576 | * BB - optimize the way when signing is disabled. We can drop this |
| 2578 | if (rc) | 2577 | * extra memory-to-memory copying and use iovec buffers for constructing |
| 2579 | return rc; | 2578 | * write request. |
| 2580 | 2579 | */ | |
| 2581 | if (!len) | ||
| 2582 | return 0; | ||
| 2583 | 2580 | ||
| 2584 | iov_iter_truncate(from, len); | 2581 | rc = generic_write_checks(iocb, from); |
| 2582 | if (rc <= 0) | ||
| 2583 | return rc; | ||
| 2585 | 2584 | ||
| 2586 | INIT_LIST_HEAD(&wdata_list); | 2585 | INIT_LIST_HEAD(&wdata_list); |
| 2587 | cifs_sb = CIFS_FILE_SB(file); | 2586 | cifs_sb = CIFS_FILE_SB(file); |
| @@ -2593,8 +2592,8 @@ cifs_iovec_write(struct file *file, struct iov_iter *from, loff_t *poffset) | |||
| 2593 | 2592 | ||
| 2594 | memcpy(&saved_from, from, sizeof(struct iov_iter)); | 2593 | memcpy(&saved_from, from, sizeof(struct iov_iter)); |
| 2595 | 2594 | ||
| 2596 | rc = cifs_write_from_iter(*poffset, len, from, open_file, cifs_sb, | 2595 | rc = cifs_write_from_iter(iocb->ki_pos, iov_iter_count(from), from, |
| 2597 | &wdata_list); | 2596 | open_file, cifs_sb, &wdata_list); |
| 2598 | 2597 | ||
| 2599 | /* | 2598 | /* |
| 2600 | * If at least one write was successfully sent, then discard any rc | 2599 | * If at least one write was successfully sent, then discard any rc |
| @@ -2633,7 +2632,7 @@ restart_loop: | |||
| 2633 | memcpy(&tmp_from, &saved_from, | 2632 | memcpy(&tmp_from, &saved_from, |
| 2634 | sizeof(struct iov_iter)); | 2633 | sizeof(struct iov_iter)); |
| 2635 | iov_iter_advance(&tmp_from, | 2634 | iov_iter_advance(&tmp_from, |
| 2636 | wdata->offset - *poffset); | 2635 | wdata->offset - iocb->ki_pos); |
| 2637 | 2636 | ||
| 2638 | rc = cifs_write_from_iter(wdata->offset, | 2637 | rc = cifs_write_from_iter(wdata->offset, |
| 2639 | wdata->bytes, &tmp_from, | 2638 | wdata->bytes, &tmp_from, |
| @@ -2650,34 +2649,13 @@ restart_loop: | |||
| 2650 | kref_put(&wdata->refcount, cifs_uncached_writedata_release); | 2649 | kref_put(&wdata->refcount, cifs_uncached_writedata_release); |
| 2651 | } | 2650 | } |
| 2652 | 2651 | ||
| 2653 | if (total_written > 0) | 2652 | if (unlikely(!total_written)) |
| 2654 | *poffset += total_written; | 2653 | return rc; |
| 2655 | 2654 | ||
| 2655 | iocb->ki_pos += total_written; | ||
| 2656 | set_bit(CIFS_INO_INVALID_MAPPING, &CIFS_I(file_inode(file))->flags); | ||
| 2656 | cifs_stats_bytes_written(tcon, total_written); | 2657 | cifs_stats_bytes_written(tcon, total_written); |
| 2657 | return total_written ? total_written : (ssize_t)rc; | 2658 | return total_written; |
| 2658 | } | ||
| 2659 | |||
| 2660 | ssize_t cifs_user_writev(struct kiocb *iocb, struct iov_iter *from) | ||
| 2661 | { | ||
| 2662 | ssize_t written; | ||
| 2663 | struct inode *inode; | ||
| 2664 | loff_t pos = iocb->ki_pos; | ||
| 2665 | |||
| 2666 | inode = file_inode(iocb->ki_filp); | ||
| 2667 | |||
| 2668 | /* | ||
| 2669 | * BB - optimize the way when signing is disabled. We can drop this | ||
| 2670 | * extra memory-to-memory copying and use iovec buffers for constructing | ||
| 2671 | * write request. | ||
| 2672 | */ | ||
| 2673 | |||
| 2674 | written = cifs_iovec_write(iocb->ki_filp, from, &pos); | ||
| 2675 | if (written > 0) { | ||
| 2676 | set_bit(CIFS_INO_INVALID_MAPPING, &CIFS_I(inode)->flags); | ||
| 2677 | iocb->ki_pos = pos; | ||
| 2678 | } | ||
| 2679 | |||
| 2680 | return written; | ||
| 2681 | } | 2659 | } |
| 2682 | 2660 | ||
| 2683 | static ssize_t | 2661 | static ssize_t |
| @@ -2688,8 +2666,7 @@ cifs_writev(struct kiocb *iocb, struct iov_iter *from) | |||
| 2688 | struct inode *inode = file->f_mapping->host; | 2666 | struct inode *inode = file->f_mapping->host; |
| 2689 | struct cifsInodeInfo *cinode = CIFS_I(inode); | 2667 | struct cifsInodeInfo *cinode = CIFS_I(inode); |
| 2690 | struct TCP_Server_Info *server = tlink_tcon(cfile->tlink)->ses->server; | 2668 | struct TCP_Server_Info *server = tlink_tcon(cfile->tlink)->ses->server; |
| 2691 | ssize_t rc = -EACCES; | 2669 | ssize_t rc; |
| 2692 | loff_t lock_pos = iocb->ki_pos; | ||
| 2693 | 2670 | ||
| 2694 | /* | 2671 | /* |
| 2695 | * We need to hold the sem to be sure nobody modifies lock list | 2672 | * We need to hold the sem to be sure nobody modifies lock list |
| @@ -2697,23 +2674,24 @@ cifs_writev(struct kiocb *iocb, struct iov_iter *from) | |||
| 2697 | */ | 2674 | */ |
| 2698 | down_read(&cinode->lock_sem); | 2675 | down_read(&cinode->lock_sem); |
| 2699 | mutex_lock(&inode->i_mutex); | 2676 | mutex_lock(&inode->i_mutex); |
| 2700 | if (file->f_flags & O_APPEND) | 2677 | |
| 2701 | lock_pos = i_size_read(inode); | 2678 | rc = generic_write_checks(iocb, from); |
| 2702 | if (!cifs_find_lock_conflict(cfile, lock_pos, iov_iter_count(from), | 2679 | if (rc <= 0) |
| 2680 | goto out; | ||
| 2681 | |||
| 2682 | if (!cifs_find_lock_conflict(cfile, iocb->ki_pos, iov_iter_count(from), | ||
| 2703 | server->vals->exclusive_lock_type, NULL, | 2683 | server->vals->exclusive_lock_type, NULL, |
| 2704 | CIFS_WRITE_OP)) { | 2684 | CIFS_WRITE_OP)) |
| 2705 | rc = __generic_file_write_iter(iocb, from); | 2685 | rc = __generic_file_write_iter(iocb, from); |
| 2706 | mutex_unlock(&inode->i_mutex); | 2686 | else |
| 2707 | 2687 | rc = -EACCES; | |
| 2708 | if (rc > 0) { | 2688 | out: |
| 2709 | ssize_t err; | 2689 | mutex_unlock(&inode->i_mutex); |
| 2710 | 2690 | ||
| 2711 | err = generic_write_sync(file, iocb->ki_pos - rc, rc); | 2691 | if (rc > 0) { |
| 2712 | if (err < 0) | 2692 | ssize_t err = generic_write_sync(file, iocb->ki_pos - rc, rc); |
| 2713 | rc = err; | 2693 | if (err < 0) |
| 2714 | } | 2694 | rc = err; |
| 2715 | } else { | ||
| 2716 | mutex_unlock(&inode->i_mutex); | ||
| 2717 | } | 2695 | } |
| 2718 | up_read(&cinode->lock_sem); | 2696 | up_read(&cinode->lock_sem); |
| 2719 | return rc; | 2697 | return rc; |
| @@ -3877,8 +3855,7 @@ void cifs_oplock_break(struct work_struct *work) | |||
| 3877 | * Direct IO is not yet supported in the cached mode. | 3855 | * Direct IO is not yet supported in the cached mode. |
| 3878 | */ | 3856 | */ |
| 3879 | static ssize_t | 3857 | static ssize_t |
| 3880 | cifs_direct_io(int rw, struct kiocb *iocb, struct iov_iter *iter, | 3858 | cifs_direct_io(struct kiocb *iocb, struct iov_iter *iter, loff_t pos) |
| 3881 | loff_t pos) | ||
| 3882 | { | 3859 | { |
| 3883 | /* | 3860 | /* |
| 3884 | * FIXME | 3861 | * FIXME |
diff --git a/fs/configfs/dir.c b/fs/configfs/dir.c index cf0db005d2f5..acb3d63bc9dc 100644 --- a/fs/configfs/dir.c +++ b/fs/configfs/dir.c | |||
| @@ -1598,7 +1598,7 @@ static loff_t configfs_dir_lseek(struct file *file, loff_t offset, int whence) | |||
| 1598 | if (offset >= 0) | 1598 | if (offset >= 0) |
| 1599 | break; | 1599 | break; |
| 1600 | default: | 1600 | default: |
| 1601 | mutex_unlock(&file_inode(file)->i_mutex); | 1601 | mutex_unlock(&dentry->d_inode->i_mutex); |
| 1602 | return -EINVAL; | 1602 | return -EINVAL; |
| 1603 | } | 1603 | } |
| 1604 | if (offset != file->f_pos) { | 1604 | if (offset != file->f_pos) { |
| @@ -98,9 +98,9 @@ static bool buffer_size_valid(struct buffer_head *bh) | |||
| 98 | return bh->b_state != 0; | 98 | return bh->b_state != 0; |
| 99 | } | 99 | } |
| 100 | 100 | ||
| 101 | static ssize_t dax_io(int rw, struct inode *inode, struct iov_iter *iter, | 101 | static ssize_t dax_io(struct inode *inode, struct iov_iter *iter, |
| 102 | loff_t start, loff_t end, get_block_t get_block, | 102 | loff_t start, loff_t end, get_block_t get_block, |
| 103 | struct buffer_head *bh) | 103 | struct buffer_head *bh) |
| 104 | { | 104 | { |
| 105 | ssize_t retval = 0; | 105 | ssize_t retval = 0; |
| 106 | loff_t pos = start; | 106 | loff_t pos = start; |
| @@ -109,7 +109,7 @@ static ssize_t dax_io(int rw, struct inode *inode, struct iov_iter *iter, | |||
| 109 | void *addr; | 109 | void *addr; |
| 110 | bool hole = false; | 110 | bool hole = false; |
| 111 | 111 | ||
| 112 | if (rw != WRITE) | 112 | if (iov_iter_rw(iter) != WRITE) |
| 113 | end = min(end, i_size_read(inode)); | 113 | end = min(end, i_size_read(inode)); |
| 114 | 114 | ||
| 115 | while (pos < end) { | 115 | while (pos < end) { |
| @@ -124,7 +124,7 @@ static ssize_t dax_io(int rw, struct inode *inode, struct iov_iter *iter, | |||
| 124 | bh->b_size = PAGE_ALIGN(end - pos); | 124 | bh->b_size = PAGE_ALIGN(end - pos); |
| 125 | bh->b_state = 0; | 125 | bh->b_state = 0; |
| 126 | retval = get_block(inode, block, bh, | 126 | retval = get_block(inode, block, bh, |
| 127 | rw == WRITE); | 127 | iov_iter_rw(iter) == WRITE); |
| 128 | if (retval) | 128 | if (retval) |
| 129 | break; | 129 | break; |
| 130 | if (!buffer_size_valid(bh)) | 130 | if (!buffer_size_valid(bh)) |
| @@ -137,7 +137,7 @@ static ssize_t dax_io(int rw, struct inode *inode, struct iov_iter *iter, | |||
| 137 | bh->b_size -= done; | 137 | bh->b_size -= done; |
| 138 | } | 138 | } |
| 139 | 139 | ||
| 140 | hole = (rw != WRITE) && !buffer_written(bh); | 140 | hole = iov_iter_rw(iter) != WRITE && !buffer_written(bh); |
| 141 | if (hole) { | 141 | if (hole) { |
| 142 | addr = NULL; | 142 | addr = NULL; |
| 143 | size = bh->b_size - first; | 143 | size = bh->b_size - first; |
| @@ -154,7 +154,7 @@ static ssize_t dax_io(int rw, struct inode *inode, struct iov_iter *iter, | |||
| 154 | max = min(pos + size, end); | 154 | max = min(pos + size, end); |
| 155 | } | 155 | } |
| 156 | 156 | ||
| 157 | if (rw == WRITE) | 157 | if (iov_iter_rw(iter) == WRITE) |
| 158 | len = copy_from_iter(addr, max - pos, iter); | 158 | len = copy_from_iter(addr, max - pos, iter); |
| 159 | else if (!hole) | 159 | else if (!hole) |
| 160 | len = copy_to_iter(addr, max - pos, iter); | 160 | len = copy_to_iter(addr, max - pos, iter); |
| @@ -173,7 +173,6 @@ static ssize_t dax_io(int rw, struct inode *inode, struct iov_iter *iter, | |||
| 173 | 173 | ||
| 174 | /** | 174 | /** |
| 175 | * dax_do_io - Perform I/O to a DAX file | 175 | * dax_do_io - Perform I/O to a DAX file |
| 176 | * @rw: READ to read or WRITE to write | ||
| 177 | * @iocb: The control block for this I/O | 176 | * @iocb: The control block for this I/O |
| 178 | * @inode: The file which the I/O is directed at | 177 | * @inode: The file which the I/O is directed at |
| 179 | * @iter: The addresses to do I/O from or to | 178 | * @iter: The addresses to do I/O from or to |
| @@ -189,9 +188,9 @@ static ssize_t dax_io(int rw, struct inode *inode, struct iov_iter *iter, | |||
| 189 | * As with do_blockdev_direct_IO(), we increment i_dio_count while the I/O | 188 | * As with do_blockdev_direct_IO(), we increment i_dio_count while the I/O |
| 190 | * is in progress. | 189 | * is in progress. |
| 191 | */ | 190 | */ |
| 192 | ssize_t dax_do_io(int rw, struct kiocb *iocb, struct inode *inode, | 191 | ssize_t dax_do_io(struct kiocb *iocb, struct inode *inode, |
| 193 | struct iov_iter *iter, loff_t pos, | 192 | struct iov_iter *iter, loff_t pos, get_block_t get_block, |
| 194 | get_block_t get_block, dio_iodone_t end_io, int flags) | 193 | dio_iodone_t end_io, int flags) |
| 195 | { | 194 | { |
| 196 | struct buffer_head bh; | 195 | struct buffer_head bh; |
| 197 | ssize_t retval = -EINVAL; | 196 | ssize_t retval = -EINVAL; |
| @@ -199,7 +198,7 @@ ssize_t dax_do_io(int rw, struct kiocb *iocb, struct inode *inode, | |||
| 199 | 198 | ||
| 200 | memset(&bh, 0, sizeof(bh)); | 199 | memset(&bh, 0, sizeof(bh)); |
| 201 | 200 | ||
| 202 | if ((flags & DIO_LOCKING) && (rw == READ)) { | 201 | if ((flags & DIO_LOCKING) && iov_iter_rw(iter) == READ) { |
| 203 | struct address_space *mapping = inode->i_mapping; | 202 | struct address_space *mapping = inode->i_mapping; |
| 204 | mutex_lock(&inode->i_mutex); | 203 | mutex_lock(&inode->i_mutex); |
| 205 | retval = filemap_write_and_wait_range(mapping, pos, end - 1); | 204 | retval = filemap_write_and_wait_range(mapping, pos, end - 1); |
| @@ -212,9 +211,9 @@ ssize_t dax_do_io(int rw, struct kiocb *iocb, struct inode *inode, | |||
| 212 | /* Protects against truncate */ | 211 | /* Protects against truncate */ |
| 213 | atomic_inc(&inode->i_dio_count); | 212 | atomic_inc(&inode->i_dio_count); |
| 214 | 213 | ||
| 215 | retval = dax_io(rw, inode, iter, pos, end, get_block, &bh); | 214 | retval = dax_io(inode, iter, pos, end, get_block, &bh); |
| 216 | 215 | ||
| 217 | if ((flags & DIO_LOCKING) && (rw == READ)) | 216 | if ((flags & DIO_LOCKING) && iov_iter_rw(iter) == READ) |
| 218 | mutex_unlock(&inode->i_mutex); | 217 | mutex_unlock(&inode->i_mutex); |
| 219 | 218 | ||
| 220 | if ((retval > 0) && end_io) | 219 | if ((retval > 0) && end_io) |
diff --git a/fs/dcache.c b/fs/dcache.c index d99736a63e3c..656ce522a218 100644 --- a/fs/dcache.c +++ b/fs/dcache.c | |||
| @@ -269,6 +269,41 @@ static inline int dname_external(const struct dentry *dentry) | |||
| 269 | return dentry->d_name.name != dentry->d_iname; | 269 | return dentry->d_name.name != dentry->d_iname; |
| 270 | } | 270 | } |
| 271 | 271 | ||
| 272 | /* | ||
| 273 | * Make sure other CPUs see the inode attached before the type is set. | ||
| 274 | */ | ||
| 275 | static inline void __d_set_inode_and_type(struct dentry *dentry, | ||
| 276 | struct inode *inode, | ||
| 277 | unsigned type_flags) | ||
| 278 | { | ||
| 279 | unsigned flags; | ||
| 280 | |||
| 281 | dentry->d_inode = inode; | ||
| 282 | smp_wmb(); | ||
| 283 | flags = READ_ONCE(dentry->d_flags); | ||
| 284 | flags &= ~(DCACHE_ENTRY_TYPE | DCACHE_FALLTHRU); | ||
| 285 | flags |= type_flags; | ||
| 286 | WRITE_ONCE(dentry->d_flags, flags); | ||
| 287 | } | ||
| 288 | |||
| 289 | /* | ||
| 290 | * Ideally, we want to make sure that other CPUs see the flags cleared before | ||
| 291 | * the inode is detached, but this is really a violation of RCU principles | ||
| 292 | * since the ordering suggests we should always set inode before flags. | ||
| 293 | * | ||
| 294 | * We should instead replace or discard the entire dentry - but that sucks | ||
| 295 | * performancewise on mass deletion/rename. | ||
| 296 | */ | ||
| 297 | static inline void __d_clear_type_and_inode(struct dentry *dentry) | ||
| 298 | { | ||
| 299 | unsigned flags = READ_ONCE(dentry->d_flags); | ||
| 300 | |||
| 301 | flags &= ~(DCACHE_ENTRY_TYPE | DCACHE_FALLTHRU); | ||
| 302 | WRITE_ONCE(dentry->d_flags, flags); | ||
| 303 | smp_wmb(); | ||
| 304 | dentry->d_inode = NULL; | ||
| 305 | } | ||
| 306 | |||
| 272 | static void dentry_free(struct dentry *dentry) | 307 | static void dentry_free(struct dentry *dentry) |
| 273 | { | 308 | { |
| 274 | WARN_ON(!hlist_unhashed(&dentry->d_u.d_alias)); | 309 | WARN_ON(!hlist_unhashed(&dentry->d_u.d_alias)); |
| @@ -311,7 +346,7 @@ static void dentry_iput(struct dentry * dentry) | |||
| 311 | { | 346 | { |
| 312 | struct inode *inode = dentry->d_inode; | 347 | struct inode *inode = dentry->d_inode; |
| 313 | if (inode) { | 348 | if (inode) { |
| 314 | dentry->d_inode = NULL; | 349 | __d_clear_type_and_inode(dentry); |
| 315 | hlist_del_init(&dentry->d_u.d_alias); | 350 | hlist_del_init(&dentry->d_u.d_alias); |
| 316 | spin_unlock(&dentry->d_lock); | 351 | spin_unlock(&dentry->d_lock); |
| 317 | spin_unlock(&inode->i_lock); | 352 | spin_unlock(&inode->i_lock); |
| @@ -335,8 +370,7 @@ static void dentry_unlink_inode(struct dentry * dentry) | |||
| 335 | __releases(dentry->d_inode->i_lock) | 370 | __releases(dentry->d_inode->i_lock) |
| 336 | { | 371 | { |
| 337 | struct inode *inode = dentry->d_inode; | 372 | struct inode *inode = dentry->d_inode; |
| 338 | __d_clear_type(dentry); | 373 | __d_clear_type_and_inode(dentry); |
| 339 | dentry->d_inode = NULL; | ||
| 340 | hlist_del_init(&dentry->d_u.d_alias); | 374 | hlist_del_init(&dentry->d_u.d_alias); |
| 341 | dentry_rcuwalk_barrier(dentry); | 375 | dentry_rcuwalk_barrier(dentry); |
| 342 | spin_unlock(&dentry->d_lock); | 376 | spin_unlock(&dentry->d_lock); |
| @@ -1715,11 +1749,9 @@ static void __d_instantiate(struct dentry *dentry, struct inode *inode) | |||
| 1715 | unsigned add_flags = d_flags_for_inode(inode); | 1749 | unsigned add_flags = d_flags_for_inode(inode); |
| 1716 | 1750 | ||
| 1717 | spin_lock(&dentry->d_lock); | 1751 | spin_lock(&dentry->d_lock); |
| 1718 | dentry->d_flags &= ~(DCACHE_ENTRY_TYPE | DCACHE_FALLTHRU); | ||
| 1719 | dentry->d_flags |= add_flags; | ||
| 1720 | if (inode) | 1752 | if (inode) |
| 1721 | hlist_add_head(&dentry->d_u.d_alias, &inode->i_dentry); | 1753 | hlist_add_head(&dentry->d_u.d_alias, &inode->i_dentry); |
| 1722 | dentry->d_inode = inode; | 1754 | __d_set_inode_and_type(dentry, inode, add_flags); |
| 1723 | dentry_rcuwalk_barrier(dentry); | 1755 | dentry_rcuwalk_barrier(dentry); |
| 1724 | spin_unlock(&dentry->d_lock); | 1756 | spin_unlock(&dentry->d_lock); |
| 1725 | fsnotify_d_instantiate(dentry, inode); | 1757 | fsnotify_d_instantiate(dentry, inode); |
| @@ -1937,8 +1969,7 @@ static struct dentry *__d_obtain_alias(struct inode *inode, int disconnected) | |||
| 1937 | add_flags |= DCACHE_DISCONNECTED; | 1969 | add_flags |= DCACHE_DISCONNECTED; |
| 1938 | 1970 | ||
| 1939 | spin_lock(&tmp->d_lock); | 1971 | spin_lock(&tmp->d_lock); |
| 1940 | tmp->d_inode = inode; | 1972 | __d_set_inode_and_type(tmp, inode, add_flags); |
| 1941 | tmp->d_flags |= add_flags; | ||
| 1942 | hlist_add_head(&tmp->d_u.d_alias, &inode->i_dentry); | 1973 | hlist_add_head(&tmp->d_u.d_alias, &inode->i_dentry); |
| 1943 | hlist_bl_lock(&tmp->d_sb->s_anon); | 1974 | hlist_bl_lock(&tmp->d_sb->s_anon); |
| 1944 | hlist_bl_add_head(&tmp->d_hash, &tmp->d_sb->s_anon); | 1975 | hlist_bl_add_head(&tmp->d_hash, &tmp->d_sb->s_anon); |
diff --git a/fs/debugfs/inode.c b/fs/debugfs/inode.c index 61e72d44cf94..c9ee0dfe90b5 100644 --- a/fs/debugfs/inode.c +++ b/fs/debugfs/inode.c | |||
| @@ -524,7 +524,7 @@ static int __debugfs_remove(struct dentry *dentry, struct dentry *parent) | |||
| 524 | 524 | ||
| 525 | if (debugfs_positive(dentry)) { | 525 | if (debugfs_positive(dentry)) { |
| 526 | dget(dentry); | 526 | dget(dentry); |
| 527 | if (S_ISDIR(dentry->d_inode->i_mode)) | 527 | if (d_is_dir(dentry)) |
| 528 | ret = simple_rmdir(parent->d_inode, dentry); | 528 | ret = simple_rmdir(parent->d_inode, dentry); |
| 529 | else | 529 | else |
| 530 | simple_unlink(parent->d_inode, dentry); | 530 | simple_unlink(parent->d_inode, dentry); |
diff --git a/fs/direct-io.c b/fs/direct-io.c index 6fb00e3f1059..c3b560b24a46 100644 --- a/fs/direct-io.c +++ b/fs/direct-io.c | |||
| @@ -1093,10 +1093,10 @@ static inline int drop_refcount(struct dio *dio) | |||
| 1093 | * for the whole file. | 1093 | * for the whole file. |
| 1094 | */ | 1094 | */ |
| 1095 | static inline ssize_t | 1095 | static inline ssize_t |
| 1096 | do_blockdev_direct_IO(int rw, struct kiocb *iocb, struct inode *inode, | 1096 | do_blockdev_direct_IO(struct kiocb *iocb, struct inode *inode, |
| 1097 | struct block_device *bdev, struct iov_iter *iter, loff_t offset, | 1097 | struct block_device *bdev, struct iov_iter *iter, |
| 1098 | get_block_t get_block, dio_iodone_t end_io, | 1098 | loff_t offset, get_block_t get_block, dio_iodone_t end_io, |
| 1099 | dio_submit_t submit_io, int flags) | 1099 | dio_submit_t submit_io, int flags) |
| 1100 | { | 1100 | { |
| 1101 | unsigned i_blkbits = ACCESS_ONCE(inode->i_blkbits); | 1101 | unsigned i_blkbits = ACCESS_ONCE(inode->i_blkbits); |
| 1102 | unsigned blkbits = i_blkbits; | 1102 | unsigned blkbits = i_blkbits; |
| @@ -1110,9 +1110,6 @@ do_blockdev_direct_IO(int rw, struct kiocb *iocb, struct inode *inode, | |||
| 1110 | struct blk_plug plug; | 1110 | struct blk_plug plug; |
| 1111 | unsigned long align = offset | iov_iter_alignment(iter); | 1111 | unsigned long align = offset | iov_iter_alignment(iter); |
| 1112 | 1112 | ||
| 1113 | if (rw & WRITE) | ||
| 1114 | rw = WRITE_ODIRECT; | ||
| 1115 | |||
| 1116 | /* | 1113 | /* |
| 1117 | * Avoid references to bdev if not absolutely needed to give | 1114 | * Avoid references to bdev if not absolutely needed to give |
| 1118 | * the early prefetch in the caller enough time. | 1115 | * the early prefetch in the caller enough time. |
| @@ -1127,7 +1124,7 @@ do_blockdev_direct_IO(int rw, struct kiocb *iocb, struct inode *inode, | |||
| 1127 | } | 1124 | } |
| 1128 | 1125 | ||
| 1129 | /* watch out for a 0 len io from a tricksy fs */ | 1126 | /* watch out for a 0 len io from a tricksy fs */ |
| 1130 | if (rw == READ && !iov_iter_count(iter)) | 1127 | if (iov_iter_rw(iter) == READ && !iov_iter_count(iter)) |
| 1131 | return 0; | 1128 | return 0; |
| 1132 | 1129 | ||
| 1133 | dio = kmem_cache_alloc(dio_cache, GFP_KERNEL); | 1130 | dio = kmem_cache_alloc(dio_cache, GFP_KERNEL); |
| @@ -1143,7 +1140,7 @@ do_blockdev_direct_IO(int rw, struct kiocb *iocb, struct inode *inode, | |||
| 1143 | 1140 | ||
| 1144 | dio->flags = flags; | 1141 | dio->flags = flags; |
| 1145 | if (dio->flags & DIO_LOCKING) { | 1142 | if (dio->flags & DIO_LOCKING) { |
| 1146 | if (rw == READ) { | 1143 | if (iov_iter_rw(iter) == READ) { |
| 1147 | struct address_space *mapping = | 1144 | struct address_space *mapping = |
| 1148 | iocb->ki_filp->f_mapping; | 1145 | iocb->ki_filp->f_mapping; |
| 1149 | 1146 | ||
| @@ -1169,19 +1166,19 @@ do_blockdev_direct_IO(int rw, struct kiocb *iocb, struct inode *inode, | |||
| 1169 | if (is_sync_kiocb(iocb)) | 1166 | if (is_sync_kiocb(iocb)) |
| 1170 | dio->is_async = false; | 1167 | dio->is_async = false; |
| 1171 | else if (!(dio->flags & DIO_ASYNC_EXTEND) && | 1168 | else if (!(dio->flags & DIO_ASYNC_EXTEND) && |
| 1172 | (rw & WRITE) && end > i_size_read(inode)) | 1169 | iov_iter_rw(iter) == WRITE && end > i_size_read(inode)) |
| 1173 | dio->is_async = false; | 1170 | dio->is_async = false; |
| 1174 | else | 1171 | else |
| 1175 | dio->is_async = true; | 1172 | dio->is_async = true; |
| 1176 | 1173 | ||
| 1177 | dio->inode = inode; | 1174 | dio->inode = inode; |
| 1178 | dio->rw = rw; | 1175 | dio->rw = iov_iter_rw(iter) == WRITE ? WRITE_ODIRECT : READ; |
| 1179 | 1176 | ||
| 1180 | /* | 1177 | /* |
| 1181 | * For AIO O_(D)SYNC writes we need to defer completions to a workqueue | 1178 | * For AIO O_(D)SYNC writes we need to defer completions to a workqueue |
| 1182 | * so that we can call ->fsync. | 1179 | * so that we can call ->fsync. |
| 1183 | */ | 1180 | */ |
| 1184 | if (dio->is_async && (rw & WRITE) && | 1181 | if (dio->is_async && iov_iter_rw(iter) == WRITE && |
| 1185 | ((iocb->ki_filp->f_flags & O_DSYNC) || | 1182 | ((iocb->ki_filp->f_flags & O_DSYNC) || |
| 1186 | IS_SYNC(iocb->ki_filp->f_mapping->host))) { | 1183 | IS_SYNC(iocb->ki_filp->f_mapping->host))) { |
| 1187 | retval = dio_set_defer_completion(dio); | 1184 | retval = dio_set_defer_completion(dio); |
| @@ -1274,7 +1271,7 @@ do_blockdev_direct_IO(int rw, struct kiocb *iocb, struct inode *inode, | |||
| 1274 | * we can let i_mutex go now that its achieved its purpose | 1271 | * we can let i_mutex go now that its achieved its purpose |
| 1275 | * of protecting us from looking up uninitialized blocks. | 1272 | * of protecting us from looking up uninitialized blocks. |
| 1276 | */ | 1273 | */ |
| 1277 | if (rw == READ && (dio->flags & DIO_LOCKING)) | 1274 | if (iov_iter_rw(iter) == READ && (dio->flags & DIO_LOCKING)) |
| 1278 | mutex_unlock(&dio->inode->i_mutex); | 1275 | mutex_unlock(&dio->inode->i_mutex); |
| 1279 | 1276 | ||
| 1280 | /* | 1277 | /* |
| @@ -1286,7 +1283,7 @@ do_blockdev_direct_IO(int rw, struct kiocb *iocb, struct inode *inode, | |||
| 1286 | */ | 1283 | */ |
| 1287 | BUG_ON(retval == -EIOCBQUEUED); | 1284 | BUG_ON(retval == -EIOCBQUEUED); |
| 1288 | if (dio->is_async && retval == 0 && dio->result && | 1285 | if (dio->is_async && retval == 0 && dio->result && |
| 1289 | (rw == READ || dio->result == count)) | 1286 | (iov_iter_rw(iter) == READ || dio->result == count)) |
| 1290 | retval = -EIOCBQUEUED; | 1287 | retval = -EIOCBQUEUED; |
| 1291 | else | 1288 | else |
| 1292 | dio_await_completion(dio); | 1289 | dio_await_completion(dio); |
| @@ -1300,11 +1297,11 @@ out: | |||
| 1300 | return retval; | 1297 | return retval; |
| 1301 | } | 1298 | } |
| 1302 | 1299 | ||
| 1303 | ssize_t | 1300 | ssize_t __blockdev_direct_IO(struct kiocb *iocb, struct inode *inode, |
| 1304 | __blockdev_direct_IO(int rw, struct kiocb *iocb, struct inode *inode, | 1301 | struct block_device *bdev, struct iov_iter *iter, |
| 1305 | struct block_device *bdev, struct iov_iter *iter, loff_t offset, | 1302 | loff_t offset, get_block_t get_block, |
| 1306 | get_block_t get_block, dio_iodone_t end_io, | 1303 | dio_iodone_t end_io, dio_submit_t submit_io, |
| 1307 | dio_submit_t submit_io, int flags) | 1304 | int flags) |
| 1308 | { | 1305 | { |
| 1309 | /* | 1306 | /* |
| 1310 | * The block device state is needed in the end to finally | 1307 | * The block device state is needed in the end to finally |
| @@ -1318,8 +1315,8 @@ __blockdev_direct_IO(int rw, struct kiocb *iocb, struct inode *inode, | |||
| 1318 | prefetch(bdev->bd_queue); | 1315 | prefetch(bdev->bd_queue); |
| 1319 | prefetch((char *)bdev->bd_queue + SMP_CACHE_BYTES); | 1316 | prefetch((char *)bdev->bd_queue + SMP_CACHE_BYTES); |
| 1320 | 1317 | ||
| 1321 | return do_blockdev_direct_IO(rw, iocb, inode, bdev, iter, offset, | 1318 | return do_blockdev_direct_IO(iocb, inode, bdev, iter, offset, get_block, |
| 1322 | get_block, end_io, submit_io, flags); | 1319 | end_io, submit_io, flags); |
| 1323 | } | 1320 | } |
| 1324 | 1321 | ||
| 1325 | EXPORT_SYMBOL(__blockdev_direct_IO); | 1322 | EXPORT_SYMBOL(__blockdev_direct_IO); |
diff --git a/fs/exofs/inode.c b/fs/exofs/inode.c index a198e94813fe..35073aaec6e0 100644 --- a/fs/exofs/inode.c +++ b/fs/exofs/inode.c | |||
| @@ -963,8 +963,8 @@ static void exofs_invalidatepage(struct page *page, unsigned int offset, | |||
| 963 | 963 | ||
| 964 | 964 | ||
| 965 | /* TODO: Should be easy enough to do proprly */ | 965 | /* TODO: Should be easy enough to do proprly */ |
| 966 | static ssize_t exofs_direct_IO(int rw, struct kiocb *iocb, | 966 | static ssize_t exofs_direct_IO(struct kiocb *iocb, struct iov_iter *iter, |
| 967 | struct iov_iter *iter, loff_t offset) | 967 | loff_t offset) |
| 968 | { | 968 | { |
| 969 | return 0; | 969 | return 0; |
| 970 | } | 970 | } |
diff --git a/fs/ext2/inode.c b/fs/ext2/inode.c index b29eb6747116..5d9213963fae 100644 --- a/fs/ext2/inode.c +++ b/fs/ext2/inode.c | |||
| @@ -851,8 +851,7 @@ static sector_t ext2_bmap(struct address_space *mapping, sector_t block) | |||
| 851 | } | 851 | } |
| 852 | 852 | ||
| 853 | static ssize_t | 853 | static ssize_t |
| 854 | ext2_direct_IO(int rw, struct kiocb *iocb, struct iov_iter *iter, | 854 | ext2_direct_IO(struct kiocb *iocb, struct iov_iter *iter, loff_t offset) |
| 855 | loff_t offset) | ||
| 856 | { | 855 | { |
| 857 | struct file *file = iocb->ki_filp; | 856 | struct file *file = iocb->ki_filp; |
| 858 | struct address_space *mapping = file->f_mapping; | 857 | struct address_space *mapping = file->f_mapping; |
| @@ -861,12 +860,12 @@ ext2_direct_IO(int rw, struct kiocb *iocb, struct iov_iter *iter, | |||
| 861 | ssize_t ret; | 860 | ssize_t ret; |
| 862 | 861 | ||
| 863 | if (IS_DAX(inode)) | 862 | if (IS_DAX(inode)) |
| 864 | ret = dax_do_io(rw, iocb, inode, iter, offset, ext2_get_block, | 863 | ret = dax_do_io(iocb, inode, iter, offset, ext2_get_block, NULL, |
| 865 | NULL, DIO_LOCKING); | 864 | DIO_LOCKING); |
| 866 | else | 865 | else |
| 867 | ret = blockdev_direct_IO(rw, iocb, inode, iter, offset, | 866 | ret = blockdev_direct_IO(iocb, inode, iter, offset, |
| 868 | ext2_get_block); | 867 | ext2_get_block); |
| 869 | if (ret < 0 && (rw & WRITE)) | 868 | if (ret < 0 && iov_iter_rw(iter) == WRITE) |
| 870 | ext2_write_failed(mapping, offset + count); | 869 | ext2_write_failed(mapping, offset + count); |
| 871 | return ret; | 870 | return ret; |
| 872 | } | 871 | } |
diff --git a/fs/ext3/inode.c b/fs/ext3/inode.c index db07ffbe7c85..13c0868c7160 100644 --- a/fs/ext3/inode.c +++ b/fs/ext3/inode.c | |||
| @@ -1820,8 +1820,8 @@ static int ext3_releasepage(struct page *page, gfp_t wait) | |||
| 1820 | * crashes then stale disk data _may_ be exposed inside the file. But current | 1820 | * crashes then stale disk data _may_ be exposed inside the file. But current |
| 1821 | * VFS code falls back into buffered path in that case so we are safe. | 1821 | * VFS code falls back into buffered path in that case so we are safe. |
| 1822 | */ | 1822 | */ |
| 1823 | static ssize_t ext3_direct_IO(int rw, struct kiocb *iocb, | 1823 | static ssize_t ext3_direct_IO(struct kiocb *iocb, struct iov_iter *iter, |
| 1824 | struct iov_iter *iter, loff_t offset) | 1824 | loff_t offset) |
| 1825 | { | 1825 | { |
| 1826 | struct file *file = iocb->ki_filp; | 1826 | struct file *file = iocb->ki_filp; |
| 1827 | struct inode *inode = file->f_mapping->host; | 1827 | struct inode *inode = file->f_mapping->host; |
| @@ -1832,9 +1832,9 @@ static ssize_t ext3_direct_IO(int rw, struct kiocb *iocb, | |||
| 1832 | size_t count = iov_iter_count(iter); | 1832 | size_t count = iov_iter_count(iter); |
| 1833 | int retries = 0; | 1833 | int retries = 0; |
| 1834 | 1834 | ||
| 1835 | trace_ext3_direct_IO_enter(inode, offset, count, rw); | 1835 | trace_ext3_direct_IO_enter(inode, offset, count, iov_iter_rw(iter)); |
| 1836 | 1836 | ||
| 1837 | if (rw == WRITE) { | 1837 | if (iov_iter_rw(iter) == WRITE) { |
| 1838 | loff_t final_size = offset + count; | 1838 | loff_t final_size = offset + count; |
| 1839 | 1839 | ||
| 1840 | if (final_size > inode->i_size) { | 1840 | if (final_size > inode->i_size) { |
| @@ -1856,12 +1856,12 @@ static ssize_t ext3_direct_IO(int rw, struct kiocb *iocb, | |||
| 1856 | } | 1856 | } |
| 1857 | 1857 | ||
| 1858 | retry: | 1858 | retry: |
| 1859 | ret = blockdev_direct_IO(rw, iocb, inode, iter, offset, ext3_get_block); | 1859 | ret = blockdev_direct_IO(iocb, inode, iter, offset, ext3_get_block); |
| 1860 | /* | 1860 | /* |
| 1861 | * In case of error extending write may have instantiated a few | 1861 | * In case of error extending write may have instantiated a few |
| 1862 | * blocks outside i_size. Trim these off again. | 1862 | * blocks outside i_size. Trim these off again. |
| 1863 | */ | 1863 | */ |
| 1864 | if (unlikely((rw & WRITE) && ret < 0)) { | 1864 | if (unlikely(iov_iter_rw(iter) == WRITE && ret < 0)) { |
| 1865 | loff_t isize = i_size_read(inode); | 1865 | loff_t isize = i_size_read(inode); |
| 1866 | loff_t end = offset + count; | 1866 | loff_t end = offset + count; |
| 1867 | 1867 | ||
| @@ -1908,7 +1908,7 @@ retry: | |||
| 1908 | ret = err; | 1908 | ret = err; |
| 1909 | } | 1909 | } |
| 1910 | out: | 1910 | out: |
| 1911 | trace_ext3_direct_IO_exit(inode, offset, count, rw, ret); | 1911 | trace_ext3_direct_IO_exit(inode, offset, count, iov_iter_rw(iter), ret); |
| 1912 | return ret; | 1912 | return ret; |
| 1913 | } | 1913 | } |
| 1914 | 1914 | ||
diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h index 8a3981ea35d8..c8eb32eefc3c 100644 --- a/fs/ext4/ext4.h +++ b/fs/ext4/ext4.h | |||
| @@ -2152,8 +2152,8 @@ extern void ext4_da_update_reserve_space(struct inode *inode, | |||
| 2152 | /* indirect.c */ | 2152 | /* indirect.c */ |
| 2153 | extern int ext4_ind_map_blocks(handle_t *handle, struct inode *inode, | 2153 | extern int ext4_ind_map_blocks(handle_t *handle, struct inode *inode, |
| 2154 | struct ext4_map_blocks *map, int flags); | 2154 | struct ext4_map_blocks *map, int flags); |
| 2155 | extern ssize_t ext4_ind_direct_IO(int rw, struct kiocb *iocb, | 2155 | extern ssize_t ext4_ind_direct_IO(struct kiocb *iocb, struct iov_iter *iter, |
| 2156 | struct iov_iter *iter, loff_t offset); | 2156 | loff_t offset); |
| 2157 | extern int ext4_ind_calc_metadata_amount(struct inode *inode, sector_t lblock); | 2157 | extern int ext4_ind_calc_metadata_amount(struct inode *inode, sector_t lblock); |
| 2158 | extern int ext4_ind_trans_blocks(struct inode *inode, int nrblocks); | 2158 | extern int ext4_ind_trans_blocks(struct inode *inode, int nrblocks); |
| 2159 | extern void ext4_ind_truncate(handle_t *, struct inode *inode); | 2159 | extern void ext4_ind_truncate(handle_t *, struct inode *inode); |
diff --git a/fs/ext4/file.c b/fs/ext4/file.c index 7a6defcf3352..e576d682b353 100644 --- a/fs/ext4/file.c +++ b/fs/ext4/file.c | |||
| @@ -95,11 +95,9 @@ ext4_file_write_iter(struct kiocb *iocb, struct iov_iter *from) | |||
| 95 | struct inode *inode = file_inode(iocb->ki_filp); | 95 | struct inode *inode = file_inode(iocb->ki_filp); |
| 96 | struct mutex *aio_mutex = NULL; | 96 | struct mutex *aio_mutex = NULL; |
| 97 | struct blk_plug plug; | 97 | struct blk_plug plug; |
| 98 | int o_direct = io_is_direct(file); | 98 | int o_direct = iocb->ki_flags & IOCB_DIRECT; |
| 99 | int overwrite = 0; | 99 | int overwrite = 0; |
| 100 | size_t length = iov_iter_count(from); | ||
| 101 | ssize_t ret; | 100 | ssize_t ret; |
| 102 | loff_t pos = iocb->ki_pos; | ||
| 103 | 101 | ||
| 104 | /* | 102 | /* |
| 105 | * Unaligned direct AIO must be serialized; see comment above | 103 | * Unaligned direct AIO must be serialized; see comment above |
| @@ -108,16 +106,17 @@ ext4_file_write_iter(struct kiocb *iocb, struct iov_iter *from) | |||
| 108 | if (o_direct && | 106 | if (o_direct && |
| 109 | ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS) && | 107 | ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS) && |
| 110 | !is_sync_kiocb(iocb) && | 108 | !is_sync_kiocb(iocb) && |
| 111 | (file->f_flags & O_APPEND || | 109 | (iocb->ki_flags & IOCB_APPEND || |
| 112 | ext4_unaligned_aio(inode, from, pos))) { | 110 | ext4_unaligned_aio(inode, from, iocb->ki_pos))) { |
| 113 | aio_mutex = ext4_aio_mutex(inode); | 111 | aio_mutex = ext4_aio_mutex(inode); |
| 114 | mutex_lock(aio_mutex); | 112 | mutex_lock(aio_mutex); |
| 115 | ext4_unwritten_wait(inode); | 113 | ext4_unwritten_wait(inode); |
| 116 | } | 114 | } |
| 117 | 115 | ||
| 118 | mutex_lock(&inode->i_mutex); | 116 | mutex_lock(&inode->i_mutex); |
| 119 | if (file->f_flags & O_APPEND) | 117 | ret = generic_write_checks(iocb, from); |
| 120 | iocb->ki_pos = pos = i_size_read(inode); | 118 | if (ret <= 0) |
| 119 | goto out; | ||
| 121 | 120 | ||
| 122 | /* | 121 | /* |
| 123 | * If we have encountered a bitmap-format file, the size limit | 122 | * If we have encountered a bitmap-format file, the size limit |
| @@ -126,22 +125,19 @@ ext4_file_write_iter(struct kiocb *iocb, struct iov_iter *from) | |||
| 126 | if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) { | 125 | if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) { |
| 127 | struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); | 126 | struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); |
| 128 | 127 | ||
| 129 | if ((pos > sbi->s_bitmap_maxbytes) || | 128 | if (iocb->ki_pos >= sbi->s_bitmap_maxbytes) { |
| 130 | (pos == sbi->s_bitmap_maxbytes && length > 0)) { | ||
| 131 | mutex_unlock(&inode->i_mutex); | ||
| 132 | ret = -EFBIG; | 129 | ret = -EFBIG; |
| 133 | goto errout; | 130 | goto out; |
| 134 | } | 131 | } |
| 135 | 132 | iov_iter_truncate(from, sbi->s_bitmap_maxbytes - iocb->ki_pos); | |
| 136 | if (pos + length > sbi->s_bitmap_maxbytes) | ||
| 137 | iov_iter_truncate(from, sbi->s_bitmap_maxbytes - pos); | ||
| 138 | } | 133 | } |
| 139 | 134 | ||
| 140 | iocb->private = &overwrite; | 135 | iocb->private = &overwrite; |
| 141 | if (o_direct) { | 136 | if (o_direct) { |
| 137 | size_t length = iov_iter_count(from); | ||
| 138 | loff_t pos = iocb->ki_pos; | ||
| 142 | blk_start_plug(&plug); | 139 | blk_start_plug(&plug); |
| 143 | 140 | ||
| 144 | |||
| 145 | /* check whether we do a DIO overwrite or not */ | 141 | /* check whether we do a DIO overwrite or not */ |
| 146 | if (ext4_should_dioread_nolock(inode) && !aio_mutex && | 142 | if (ext4_should_dioread_nolock(inode) && !aio_mutex && |
| 147 | !file->f_mapping->nrpages && pos + length <= i_size_read(inode)) { | 143 | !file->f_mapping->nrpages && pos + length <= i_size_read(inode)) { |
| @@ -185,7 +181,12 @@ ext4_file_write_iter(struct kiocb *iocb, struct iov_iter *from) | |||
| 185 | if (o_direct) | 181 | if (o_direct) |
| 186 | blk_finish_plug(&plug); | 182 | blk_finish_plug(&plug); |
| 187 | 183 | ||
| 188 | errout: | 184 | if (aio_mutex) |
| 185 | mutex_unlock(aio_mutex); | ||
| 186 | return ret; | ||
| 187 | |||
| 188 | out: | ||
| 189 | mutex_unlock(&inode->i_mutex); | ||
| 189 | if (aio_mutex) | 190 | if (aio_mutex) |
| 190 | mutex_unlock(aio_mutex); | 191 | mutex_unlock(aio_mutex); |
| 191 | return ret; | 192 | return ret; |
diff --git a/fs/ext4/indirect.c b/fs/ext4/indirect.c index 740c7871c117..3580629e42d3 100644 --- a/fs/ext4/indirect.c +++ b/fs/ext4/indirect.c | |||
| @@ -642,8 +642,8 @@ out: | |||
| 642 | * crashes then stale disk data _may_ be exposed inside the file. But current | 642 | * crashes then stale disk data _may_ be exposed inside the file. But current |
| 643 | * VFS code falls back into buffered path in that case so we are safe. | 643 | * VFS code falls back into buffered path in that case so we are safe. |
| 644 | */ | 644 | */ |
| 645 | ssize_t ext4_ind_direct_IO(int rw, struct kiocb *iocb, | 645 | ssize_t ext4_ind_direct_IO(struct kiocb *iocb, struct iov_iter *iter, |
| 646 | struct iov_iter *iter, loff_t offset) | 646 | loff_t offset) |
| 647 | { | 647 | { |
| 648 | struct file *file = iocb->ki_filp; | 648 | struct file *file = iocb->ki_filp; |
| 649 | struct inode *inode = file->f_mapping->host; | 649 | struct inode *inode = file->f_mapping->host; |
| @@ -654,7 +654,7 @@ ssize_t ext4_ind_direct_IO(int rw, struct kiocb *iocb, | |||
| 654 | size_t count = iov_iter_count(iter); | 654 | size_t count = iov_iter_count(iter); |
| 655 | int retries = 0; | 655 | int retries = 0; |
| 656 | 656 | ||
| 657 | if (rw == WRITE) { | 657 | if (iov_iter_rw(iter) == WRITE) { |
| 658 | loff_t final_size = offset + count; | 658 | loff_t final_size = offset + count; |
| 659 | 659 | ||
| 660 | if (final_size > inode->i_size) { | 660 | if (final_size > inode->i_size) { |
| @@ -676,7 +676,7 @@ ssize_t ext4_ind_direct_IO(int rw, struct kiocb *iocb, | |||
| 676 | } | 676 | } |
| 677 | 677 | ||
| 678 | retry: | 678 | retry: |
| 679 | if (rw == READ && ext4_should_dioread_nolock(inode)) { | 679 | if (iov_iter_rw(iter) == READ && ext4_should_dioread_nolock(inode)) { |
| 680 | /* | 680 | /* |
| 681 | * Nolock dioread optimization may be dynamically disabled | 681 | * Nolock dioread optimization may be dynamically disabled |
| 682 | * via ext4_inode_block_unlocked_dio(). Check inode's state | 682 | * via ext4_inode_block_unlocked_dio(). Check inode's state |
| @@ -690,23 +690,24 @@ retry: | |||
| 690 | goto locked; | 690 | goto locked; |
| 691 | } | 691 | } |
| 692 | if (IS_DAX(inode)) | 692 | if (IS_DAX(inode)) |
| 693 | ret = dax_do_io(rw, iocb, inode, iter, offset, | 693 | ret = dax_do_io(iocb, inode, iter, offset, |
| 694 | ext4_get_block, NULL, 0); | 694 | ext4_get_block, NULL, 0); |
| 695 | else | 695 | else |
| 696 | ret = __blockdev_direct_IO(rw, iocb, inode, | 696 | ret = __blockdev_direct_IO(iocb, inode, |
| 697 | inode->i_sb->s_bdev, iter, offset, | 697 | inode->i_sb->s_bdev, iter, |
| 698 | ext4_get_block, NULL, NULL, 0); | 698 | offset, ext4_get_block, NULL, |
| 699 | NULL, 0); | ||
| 699 | inode_dio_done(inode); | 700 | inode_dio_done(inode); |
| 700 | } else { | 701 | } else { |
| 701 | locked: | 702 | locked: |
| 702 | if (IS_DAX(inode)) | 703 | if (IS_DAX(inode)) |
| 703 | ret = dax_do_io(rw, iocb, inode, iter, offset, | 704 | ret = dax_do_io(iocb, inode, iter, offset, |
| 704 | ext4_get_block, NULL, DIO_LOCKING); | 705 | ext4_get_block, NULL, DIO_LOCKING); |
| 705 | else | 706 | else |
| 706 | ret = blockdev_direct_IO(rw, iocb, inode, iter, | 707 | ret = blockdev_direct_IO(iocb, inode, iter, offset, |
| 707 | offset, ext4_get_block); | 708 | ext4_get_block); |
| 708 | 709 | ||
| 709 | if (unlikely((rw & WRITE) && ret < 0)) { | 710 | if (unlikely(iov_iter_rw(iter) == WRITE && ret < 0)) { |
| 710 | loff_t isize = i_size_read(inode); | 711 | loff_t isize = i_size_read(inode); |
| 711 | loff_t end = offset + count; | 712 | loff_t end = offset + count; |
| 712 | 713 | ||
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c index 035b7a06f1c3..b49cf6e59953 100644 --- a/fs/ext4/inode.c +++ b/fs/ext4/inode.c | |||
| @@ -2952,8 +2952,8 @@ static void ext4_end_io_dio(struct kiocb *iocb, loff_t offset, | |||
| 2952 | * if the machine crashes during the write. | 2952 | * if the machine crashes during the write. |
| 2953 | * | 2953 | * |
| 2954 | */ | 2954 | */ |
| 2955 | static ssize_t ext4_ext_direct_IO(int rw, struct kiocb *iocb, | 2955 | static ssize_t ext4_ext_direct_IO(struct kiocb *iocb, struct iov_iter *iter, |
| 2956 | struct iov_iter *iter, loff_t offset) | 2956 | loff_t offset) |
| 2957 | { | 2957 | { |
| 2958 | struct file *file = iocb->ki_filp; | 2958 | struct file *file = iocb->ki_filp; |
| 2959 | struct inode *inode = file->f_mapping->host; | 2959 | struct inode *inode = file->f_mapping->host; |
| @@ -2966,8 +2966,8 @@ static ssize_t ext4_ext_direct_IO(int rw, struct kiocb *iocb, | |||
| 2966 | ext4_io_end_t *io_end = NULL; | 2966 | ext4_io_end_t *io_end = NULL; |
| 2967 | 2967 | ||
| 2968 | /* Use the old path for reads and writes beyond i_size. */ | 2968 | /* Use the old path for reads and writes beyond i_size. */ |
| 2969 | if (rw != WRITE || final_size > inode->i_size) | 2969 | if (iov_iter_rw(iter) != WRITE || final_size > inode->i_size) |
| 2970 | return ext4_ind_direct_IO(rw, iocb, iter, offset); | 2970 | return ext4_ind_direct_IO(iocb, iter, offset); |
| 2971 | 2971 | ||
| 2972 | BUG_ON(iocb->private == NULL); | 2972 | BUG_ON(iocb->private == NULL); |
| 2973 | 2973 | ||
| @@ -2976,7 +2976,7 @@ static ssize_t ext4_ext_direct_IO(int rw, struct kiocb *iocb, | |||
| 2976 | * conversion. This also disallows race between truncate() and | 2976 | * conversion. This also disallows race between truncate() and |
| 2977 | * overwrite DIO as i_dio_count needs to be incremented under i_mutex. | 2977 | * overwrite DIO as i_dio_count needs to be incremented under i_mutex. |
| 2978 | */ | 2978 | */ |
| 2979 | if (rw == WRITE) | 2979 | if (iov_iter_rw(iter) == WRITE) |
| 2980 | atomic_inc(&inode->i_dio_count); | 2980 | atomic_inc(&inode->i_dio_count); |
| 2981 | 2981 | ||
| 2982 | /* If we do a overwrite dio, i_mutex locking can be released */ | 2982 | /* If we do a overwrite dio, i_mutex locking can be released */ |
| @@ -3034,10 +3034,10 @@ static ssize_t ext4_ext_direct_IO(int rw, struct kiocb *iocb, | |||
| 3034 | dio_flags = DIO_LOCKING; | 3034 | dio_flags = DIO_LOCKING; |
| 3035 | } | 3035 | } |
| 3036 | if (IS_DAX(inode)) | 3036 | if (IS_DAX(inode)) |
| 3037 | ret = dax_do_io(rw, iocb, inode, iter, offset, get_block_func, | 3037 | ret = dax_do_io(iocb, inode, iter, offset, get_block_func, |
| 3038 | ext4_end_io_dio, dio_flags); | 3038 | ext4_end_io_dio, dio_flags); |
| 3039 | else | 3039 | else |
| 3040 | ret = __blockdev_direct_IO(rw, iocb, inode, | 3040 | ret = __blockdev_direct_IO(iocb, inode, |
| 3041 | inode->i_sb->s_bdev, iter, offset, | 3041 | inode->i_sb->s_bdev, iter, offset, |
| 3042 | get_block_func, | 3042 | get_block_func, |
| 3043 | ext4_end_io_dio, NULL, dio_flags); | 3043 | ext4_end_io_dio, NULL, dio_flags); |
| @@ -3078,7 +3078,7 @@ static ssize_t ext4_ext_direct_IO(int rw, struct kiocb *iocb, | |||
| 3078 | } | 3078 | } |
| 3079 | 3079 | ||
| 3080 | retake_lock: | 3080 | retake_lock: |
| 3081 | if (rw == WRITE) | 3081 | if (iov_iter_rw(iter) == WRITE) |
| 3082 | inode_dio_done(inode); | 3082 | inode_dio_done(inode); |
| 3083 | /* take i_mutex locking again if we do a ovewrite dio */ | 3083 | /* take i_mutex locking again if we do a ovewrite dio */ |
| 3084 | if (overwrite) { | 3084 | if (overwrite) { |
| @@ -3089,8 +3089,8 @@ retake_lock: | |||
| 3089 | return ret; | 3089 | return ret; |
| 3090 | } | 3090 | } |
| 3091 | 3091 | ||
| 3092 | static ssize_t ext4_direct_IO(int rw, struct kiocb *iocb, | 3092 | static ssize_t ext4_direct_IO(struct kiocb *iocb, struct iov_iter *iter, |
| 3093 | struct iov_iter *iter, loff_t offset) | 3093 | loff_t offset) |
| 3094 | { | 3094 | { |
| 3095 | struct file *file = iocb->ki_filp; | 3095 | struct file *file = iocb->ki_filp; |
| 3096 | struct inode *inode = file->f_mapping->host; | 3096 | struct inode *inode = file->f_mapping->host; |
| @@ -3107,12 +3107,12 @@ static ssize_t ext4_direct_IO(int rw, struct kiocb *iocb, | |||
| 3107 | if (ext4_has_inline_data(inode)) | 3107 | if (ext4_has_inline_data(inode)) |
| 3108 | return 0; | 3108 | return 0; |
| 3109 | 3109 | ||
| 3110 | trace_ext4_direct_IO_enter(inode, offset, count, rw); | 3110 | trace_ext4_direct_IO_enter(inode, offset, count, iov_iter_rw(iter)); |
| 3111 | if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) | 3111 | if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) |
| 3112 | ret = ext4_ext_direct_IO(rw, iocb, iter, offset); | 3112 | ret = ext4_ext_direct_IO(iocb, iter, offset); |
| 3113 | else | 3113 | else |
| 3114 | ret = ext4_ind_direct_IO(rw, iocb, iter, offset); | 3114 | ret = ext4_ind_direct_IO(iocb, iter, offset); |
| 3115 | trace_ext4_direct_IO_exit(inode, offset, count, rw, ret); | 3115 | trace_ext4_direct_IO_exit(inode, offset, count, iov_iter_rw(iter), ret); |
| 3116 | return ret; | 3116 | return ret; |
| 3117 | } | 3117 | } |
| 3118 | 3118 | ||
diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c index 497f8515d205..319eda511c4f 100644 --- a/fs/f2fs/data.c +++ b/fs/f2fs/data.c | |||
| @@ -1118,12 +1118,12 @@ static int f2fs_write_end(struct file *file, | |||
| 1118 | return copied; | 1118 | return copied; |
| 1119 | } | 1119 | } |
| 1120 | 1120 | ||
| 1121 | static int check_direct_IO(struct inode *inode, int rw, | 1121 | static int check_direct_IO(struct inode *inode, struct iov_iter *iter, |
| 1122 | struct iov_iter *iter, loff_t offset) | 1122 | loff_t offset) |
| 1123 | { | 1123 | { |
| 1124 | unsigned blocksize_mask = inode->i_sb->s_blocksize - 1; | 1124 | unsigned blocksize_mask = inode->i_sb->s_blocksize - 1; |
| 1125 | 1125 | ||
| 1126 | if (rw == READ) | 1126 | if (iov_iter_rw(iter) == READ) |
| 1127 | return 0; | 1127 | return 0; |
| 1128 | 1128 | ||
| 1129 | if (offset & blocksize_mask) | 1129 | if (offset & blocksize_mask) |
| @@ -1135,8 +1135,8 @@ static int check_direct_IO(struct inode *inode, int rw, | |||
| 1135 | return 0; | 1135 | return 0; |
| 1136 | } | 1136 | } |
| 1137 | 1137 | ||
| 1138 | static ssize_t f2fs_direct_IO(int rw, struct kiocb *iocb, | 1138 | static ssize_t f2fs_direct_IO(struct kiocb *iocb, struct iov_iter *iter, |
| 1139 | struct iov_iter *iter, loff_t offset) | 1139 | loff_t offset) |
| 1140 | { | 1140 | { |
| 1141 | struct file *file = iocb->ki_filp; | 1141 | struct file *file = iocb->ki_filp; |
| 1142 | struct address_space *mapping = file->f_mapping; | 1142 | struct address_space *mapping = file->f_mapping; |
| @@ -1151,19 +1151,19 @@ static ssize_t f2fs_direct_IO(int rw, struct kiocb *iocb, | |||
| 1151 | return err; | 1151 | return err; |
| 1152 | } | 1152 | } |
| 1153 | 1153 | ||
| 1154 | if (check_direct_IO(inode, rw, iter, offset)) | 1154 | if (check_direct_IO(inode, iter, offset)) |
| 1155 | return 0; | 1155 | return 0; |
| 1156 | 1156 | ||
| 1157 | trace_f2fs_direct_IO_enter(inode, offset, count, rw); | 1157 | trace_f2fs_direct_IO_enter(inode, offset, count, iov_iter_rw(iter)); |
| 1158 | 1158 | ||
| 1159 | if (rw & WRITE) | 1159 | if (iov_iter_rw(iter) == WRITE) |
| 1160 | __allocate_data_blocks(inode, offset, count); | 1160 | __allocate_data_blocks(inode, offset, count); |
| 1161 | 1161 | ||
| 1162 | err = blockdev_direct_IO(rw, iocb, inode, iter, offset, get_data_block); | 1162 | err = blockdev_direct_IO(iocb, inode, iter, offset, get_data_block); |
| 1163 | if (err < 0 && (rw & WRITE)) | 1163 | if (err < 0 && iov_iter_rw(iter) == WRITE) |
| 1164 | f2fs_write_failed(mapping, offset + count); | 1164 | f2fs_write_failed(mapping, offset + count); |
| 1165 | 1165 | ||
| 1166 | trace_f2fs_direct_IO_exit(inode, offset, count, rw, err); | 1166 | trace_f2fs_direct_IO_exit(inode, offset, count, iov_iter_rw(iter), err); |
| 1167 | 1167 | ||
| 1168 | return err; | 1168 | return err; |
| 1169 | } | 1169 | } |
diff --git a/fs/fat/inode.c b/fs/fat/inode.c index 8521207de229..41b729933638 100644 --- a/fs/fat/inode.c +++ b/fs/fat/inode.c | |||
| @@ -245,8 +245,7 @@ static int fat_write_end(struct file *file, struct address_space *mapping, | |||
| 245 | return err; | 245 | return err; |
| 246 | } | 246 | } |
| 247 | 247 | ||
| 248 | static ssize_t fat_direct_IO(int rw, struct kiocb *iocb, | 248 | static ssize_t fat_direct_IO(struct kiocb *iocb, struct iov_iter *iter, |
| 249 | struct iov_iter *iter, | ||
| 250 | loff_t offset) | 249 | loff_t offset) |
| 251 | { | 250 | { |
| 252 | struct file *file = iocb->ki_filp; | 251 | struct file *file = iocb->ki_filp; |
| @@ -255,7 +254,7 @@ static ssize_t fat_direct_IO(int rw, struct kiocb *iocb, | |||
| 255 | size_t count = iov_iter_count(iter); | 254 | size_t count = iov_iter_count(iter); |
| 256 | ssize_t ret; | 255 | ssize_t ret; |
| 257 | 256 | ||
| 258 | if (rw == WRITE) { | 257 | if (iov_iter_rw(iter) == WRITE) { |
| 259 | /* | 258 | /* |
| 260 | * FIXME: blockdev_direct_IO() doesn't use ->write_begin(), | 259 | * FIXME: blockdev_direct_IO() doesn't use ->write_begin(), |
| 261 | * so we need to update the ->mmu_private to block boundary. | 260 | * so we need to update the ->mmu_private to block boundary. |
| @@ -274,8 +273,8 @@ static ssize_t fat_direct_IO(int rw, struct kiocb *iocb, | |||
| 274 | * FAT need to use the DIO_LOCKING for avoiding the race | 273 | * FAT need to use the DIO_LOCKING for avoiding the race |
| 275 | * condition of fat_get_block() and ->truncate(). | 274 | * condition of fat_get_block() and ->truncate(). |
| 276 | */ | 275 | */ |
| 277 | ret = blockdev_direct_IO(rw, iocb, inode, iter, offset, fat_get_block); | 276 | ret = blockdev_direct_IO(iocb, inode, iter, offset, fat_get_block); |
| 278 | if (ret < 0 && (rw & WRITE)) | 277 | if (ret < 0 && iov_iter_rw(iter) == WRITE) |
| 279 | fat_write_failed(mapping, offset + count); | 278 | fat_write_failed(mapping, offset + count); |
| 280 | 279 | ||
| 281 | return ret; | 280 | return ret; |
diff --git a/fs/fuse/file.c b/fs/fuse/file.c index e1afdd7abf90..5ef05b5c4cff 100644 --- a/fs/fuse/file.c +++ b/fs/fuse/file.c | |||
| @@ -1145,13 +1145,11 @@ static ssize_t fuse_file_write_iter(struct kiocb *iocb, struct iov_iter *from) | |||
| 1145 | { | 1145 | { |
| 1146 | struct file *file = iocb->ki_filp; | 1146 | struct file *file = iocb->ki_filp; |
| 1147 | struct address_space *mapping = file->f_mapping; | 1147 | struct address_space *mapping = file->f_mapping; |
| 1148 | size_t count = iov_iter_count(from); | ||
| 1149 | ssize_t written = 0; | 1148 | ssize_t written = 0; |
| 1150 | ssize_t written_buffered = 0; | 1149 | ssize_t written_buffered = 0; |
| 1151 | struct inode *inode = mapping->host; | 1150 | struct inode *inode = mapping->host; |
| 1152 | ssize_t err; | 1151 | ssize_t err; |
| 1153 | loff_t endbyte = 0; | 1152 | loff_t endbyte = 0; |
| 1154 | loff_t pos = iocb->ki_pos; | ||
| 1155 | 1153 | ||
| 1156 | if (get_fuse_conn(inode)->writeback_cache) { | 1154 | if (get_fuse_conn(inode)->writeback_cache) { |
| 1157 | /* Update size (EOF optimization) and mode (SUID clearing) */ | 1155 | /* Update size (EOF optimization) and mode (SUID clearing) */ |
| @@ -1167,14 +1165,10 @@ static ssize_t fuse_file_write_iter(struct kiocb *iocb, struct iov_iter *from) | |||
| 1167 | /* We can write back this queue in page reclaim */ | 1165 | /* We can write back this queue in page reclaim */ |
| 1168 | current->backing_dev_info = inode_to_bdi(inode); | 1166 | current->backing_dev_info = inode_to_bdi(inode); |
| 1169 | 1167 | ||
| 1170 | err = generic_write_checks(file, &pos, &count, S_ISBLK(inode->i_mode)); | 1168 | err = generic_write_checks(iocb, from); |
| 1171 | if (err) | 1169 | if (err <= 0) |
| 1172 | goto out; | ||
| 1173 | |||
| 1174 | if (count == 0) | ||
| 1175 | goto out; | 1170 | goto out; |
| 1176 | 1171 | ||
| 1177 | iov_iter_truncate(from, count); | ||
| 1178 | err = file_remove_suid(file); | 1172 | err = file_remove_suid(file); |
| 1179 | if (err) | 1173 | if (err) |
| 1180 | goto out; | 1174 | goto out; |
| @@ -1183,7 +1177,8 @@ static ssize_t fuse_file_write_iter(struct kiocb *iocb, struct iov_iter *from) | |||
| 1183 | if (err) | 1177 | if (err) |
| 1184 | goto out; | 1178 | goto out; |
| 1185 | 1179 | ||
| 1186 | if (file->f_flags & O_DIRECT) { | 1180 | if (iocb->ki_flags & IOCB_DIRECT) { |
| 1181 | loff_t pos = iocb->ki_pos; | ||
| 1187 | written = generic_file_direct_write(iocb, from, pos); | 1182 | written = generic_file_direct_write(iocb, from, pos); |
| 1188 | if (written < 0 || !iov_iter_count(from)) | 1183 | if (written < 0 || !iov_iter_count(from)) |
| 1189 | goto out; | 1184 | goto out; |
| @@ -1209,9 +1204,9 @@ static ssize_t fuse_file_write_iter(struct kiocb *iocb, struct iov_iter *from) | |||
| 1209 | written += written_buffered; | 1204 | written += written_buffered; |
| 1210 | iocb->ki_pos = pos + written_buffered; | 1205 | iocb->ki_pos = pos + written_buffered; |
| 1211 | } else { | 1206 | } else { |
| 1212 | written = fuse_perform_write(file, mapping, from, pos); | 1207 | written = fuse_perform_write(file, mapping, from, iocb->ki_pos); |
| 1213 | if (written >= 0) | 1208 | if (written >= 0) |
| 1214 | iocb->ki_pos = pos + written; | 1209 | iocb->ki_pos += written; |
| 1215 | } | 1210 | } |
| 1216 | out: | 1211 | out: |
| 1217 | current->backing_dev_info = NULL; | 1212 | current->backing_dev_info = NULL; |
| @@ -1412,7 +1407,6 @@ static ssize_t fuse_direct_write_iter(struct kiocb *iocb, struct iov_iter *from) | |||
| 1412 | struct file *file = iocb->ki_filp; | 1407 | struct file *file = iocb->ki_filp; |
| 1413 | struct inode *inode = file_inode(file); | 1408 | struct inode *inode = file_inode(file); |
| 1414 | struct fuse_io_priv io = { .async = 0, .file = file }; | 1409 | struct fuse_io_priv io = { .async = 0, .file = file }; |
| 1415 | size_t count = iov_iter_count(from); | ||
| 1416 | ssize_t res; | 1410 | ssize_t res; |
| 1417 | 1411 | ||
| 1418 | if (is_bad_inode(inode)) | 1412 | if (is_bad_inode(inode)) |
| @@ -1420,11 +1414,9 @@ static ssize_t fuse_direct_write_iter(struct kiocb *iocb, struct iov_iter *from) | |||
| 1420 | 1414 | ||
| 1421 | /* Don't allow parallel writes to the same file */ | 1415 | /* Don't allow parallel writes to the same file */ |
| 1422 | mutex_lock(&inode->i_mutex); | 1416 | mutex_lock(&inode->i_mutex); |
| 1423 | res = generic_write_checks(file, &iocb->ki_pos, &count, 0); | 1417 | res = generic_write_checks(iocb, from); |
| 1424 | if (!res) { | 1418 | if (res > 0) |
| 1425 | iov_iter_truncate(from, count); | ||
| 1426 | res = fuse_direct_io(&io, from, &iocb->ki_pos, FUSE_DIO_WRITE); | 1419 | res = fuse_direct_io(&io, from, &iocb->ki_pos, FUSE_DIO_WRITE); |
| 1427 | } | ||
| 1428 | fuse_invalidate_attr(inode); | 1420 | fuse_invalidate_attr(inode); |
| 1429 | if (res > 0) | 1421 | if (res > 0) |
| 1430 | fuse_write_update_size(inode, iocb->ki_pos); | 1422 | fuse_write_update_size(inode, iocb->ki_pos); |
| @@ -2782,8 +2774,7 @@ static inline loff_t fuse_round_up(loff_t off) | |||
| 2782 | } | 2774 | } |
| 2783 | 2775 | ||
| 2784 | static ssize_t | 2776 | static ssize_t |
| 2785 | fuse_direct_IO(int rw, struct kiocb *iocb, struct iov_iter *iter, | 2777 | fuse_direct_IO(struct kiocb *iocb, struct iov_iter *iter, loff_t offset) |
| 2786 | loff_t offset) | ||
| 2787 | { | 2778 | { |
| 2788 | DECLARE_COMPLETION_ONSTACK(wait); | 2779 | DECLARE_COMPLETION_ONSTACK(wait); |
| 2789 | ssize_t ret = 0; | 2780 | ssize_t ret = 0; |
| @@ -2800,15 +2791,15 @@ fuse_direct_IO(int rw, struct kiocb *iocb, struct iov_iter *iter, | |||
| 2800 | inode = file->f_mapping->host; | 2791 | inode = file->f_mapping->host; |
| 2801 | i_size = i_size_read(inode); | 2792 | i_size = i_size_read(inode); |
| 2802 | 2793 | ||
| 2803 | if ((rw == READ) && (offset > i_size)) | 2794 | if ((iov_iter_rw(iter) == READ) && (offset > i_size)) |
| 2804 | return 0; | 2795 | return 0; |
| 2805 | 2796 | ||
| 2806 | /* optimization for short read */ | 2797 | /* optimization for short read */ |
| 2807 | if (async_dio && rw != WRITE && offset + count > i_size) { | 2798 | if (async_dio && iov_iter_rw(iter) != WRITE && offset + count > i_size) { |
| 2808 | if (offset >= i_size) | 2799 | if (offset >= i_size) |
| 2809 | return 0; | 2800 | return 0; |
| 2810 | count = min_t(loff_t, count, fuse_round_up(i_size - offset)); | 2801 | iov_iter_truncate(iter, fuse_round_up(i_size - offset)); |
| 2811 | iov_iter_truncate(iter, count); | 2802 | count = iov_iter_count(iter); |
| 2812 | } | 2803 | } |
| 2813 | 2804 | ||
| 2814 | io = kmalloc(sizeof(struct fuse_io_priv), GFP_KERNEL); | 2805 | io = kmalloc(sizeof(struct fuse_io_priv), GFP_KERNEL); |
| @@ -2819,7 +2810,7 @@ fuse_direct_IO(int rw, struct kiocb *iocb, struct iov_iter *iter, | |||
| 2819 | io->bytes = -1; | 2810 | io->bytes = -1; |
| 2820 | io->size = 0; | 2811 | io->size = 0; |
| 2821 | io->offset = offset; | 2812 | io->offset = offset; |
| 2822 | io->write = (rw == WRITE); | 2813 | io->write = (iov_iter_rw(iter) == WRITE); |
| 2823 | io->err = 0; | 2814 | io->err = 0; |
| 2824 | io->file = file; | 2815 | io->file = file; |
| 2825 | /* | 2816 | /* |
| @@ -2834,19 +2825,15 @@ fuse_direct_IO(int rw, struct kiocb *iocb, struct iov_iter *iter, | |||
| 2834 | * to wait on real async I/O requests, so we must submit this request | 2825 | * to wait on real async I/O requests, so we must submit this request |
| 2835 | * synchronously. | 2826 | * synchronously. |
| 2836 | */ | 2827 | */ |
| 2837 | if (!is_sync_kiocb(iocb) && (offset + count > i_size) && rw == WRITE) | 2828 | if (!is_sync_kiocb(iocb) && (offset + count > i_size) && |
| 2829 | iov_iter_rw(iter) == WRITE) | ||
| 2838 | io->async = false; | 2830 | io->async = false; |
| 2839 | 2831 | ||
| 2840 | if (io->async && is_sync_kiocb(iocb)) | 2832 | if (io->async && is_sync_kiocb(iocb)) |
| 2841 | io->done = &wait; | 2833 | io->done = &wait; |
| 2842 | 2834 | ||
| 2843 | if (rw == WRITE) { | 2835 | if (iov_iter_rw(iter) == WRITE) { |
| 2844 | ret = generic_write_checks(file, &pos, &count, 0); | 2836 | ret = fuse_direct_io(io, iter, &pos, FUSE_DIO_WRITE); |
| 2845 | if (!ret) { | ||
| 2846 | iov_iter_truncate(iter, count); | ||
| 2847 | ret = fuse_direct_io(io, iter, &pos, FUSE_DIO_WRITE); | ||
| 2848 | } | ||
| 2849 | |||
| 2850 | fuse_invalidate_attr(inode); | 2837 | fuse_invalidate_attr(inode); |
| 2851 | } else { | 2838 | } else { |
| 2852 | ret = __fuse_direct_read(io, iter, &pos); | 2839 | ret = __fuse_direct_read(io, iter, &pos); |
| @@ -2865,7 +2852,7 @@ fuse_direct_IO(int rw, struct kiocb *iocb, struct iov_iter *iter, | |||
| 2865 | 2852 | ||
| 2866 | kfree(io); | 2853 | kfree(io); |
| 2867 | 2854 | ||
| 2868 | if (rw == WRITE) { | 2855 | if (iov_iter_rw(iter) == WRITE) { |
| 2869 | if (ret > 0) | 2856 | if (ret > 0) |
| 2870 | fuse_write_update_size(inode, pos); | 2857 | fuse_write_update_size(inode, pos); |
| 2871 | else if (ret < 0 && offset + count > i_size) | 2858 | else if (ret < 0 && offset + count > i_size) |
diff --git a/fs/gfs2/aops.c b/fs/gfs2/aops.c index a6e6990aea39..5551fea0afd7 100644 --- a/fs/gfs2/aops.c +++ b/fs/gfs2/aops.c | |||
| @@ -1016,13 +1016,12 @@ out: | |||
| 1016 | /** | 1016 | /** |
| 1017 | * gfs2_ok_for_dio - check that dio is valid on this file | 1017 | * gfs2_ok_for_dio - check that dio is valid on this file |
| 1018 | * @ip: The inode | 1018 | * @ip: The inode |
| 1019 | * @rw: READ or WRITE | ||
| 1020 | * @offset: The offset at which we are reading or writing | 1019 | * @offset: The offset at which we are reading or writing |
| 1021 | * | 1020 | * |
| 1022 | * Returns: 0 (to ignore the i/o request and thus fall back to buffered i/o) | 1021 | * Returns: 0 (to ignore the i/o request and thus fall back to buffered i/o) |
| 1023 | * 1 (to accept the i/o request) | 1022 | * 1 (to accept the i/o request) |
| 1024 | */ | 1023 | */ |
| 1025 | static int gfs2_ok_for_dio(struct gfs2_inode *ip, int rw, loff_t offset) | 1024 | static int gfs2_ok_for_dio(struct gfs2_inode *ip, loff_t offset) |
| 1026 | { | 1025 | { |
| 1027 | /* | 1026 | /* |
| 1028 | * Should we return an error here? I can't see that O_DIRECT for | 1027 | * Should we return an error here? I can't see that O_DIRECT for |
| @@ -1039,8 +1038,8 @@ static int gfs2_ok_for_dio(struct gfs2_inode *ip, int rw, loff_t offset) | |||
| 1039 | 1038 | ||
| 1040 | 1039 | ||
| 1041 | 1040 | ||
| 1042 | static ssize_t gfs2_direct_IO(int rw, struct kiocb *iocb, | 1041 | static ssize_t gfs2_direct_IO(struct kiocb *iocb, struct iov_iter *iter, |
| 1043 | struct iov_iter *iter, loff_t offset) | 1042 | loff_t offset) |
| 1044 | { | 1043 | { |
| 1045 | struct file *file = iocb->ki_filp; | 1044 | struct file *file = iocb->ki_filp; |
| 1046 | struct inode *inode = file->f_mapping->host; | 1045 | struct inode *inode = file->f_mapping->host; |
| @@ -1061,7 +1060,7 @@ static ssize_t gfs2_direct_IO(int rw, struct kiocb *iocb, | |||
| 1061 | rv = gfs2_glock_nq(&gh); | 1060 | rv = gfs2_glock_nq(&gh); |
| 1062 | if (rv) | 1061 | if (rv) |
| 1063 | return rv; | 1062 | return rv; |
| 1064 | rv = gfs2_ok_for_dio(ip, rw, offset); | 1063 | rv = gfs2_ok_for_dio(ip, offset); |
| 1065 | if (rv != 1) | 1064 | if (rv != 1) |
| 1066 | goto out; /* dio not valid, fall back to buffered i/o */ | 1065 | goto out; /* dio not valid, fall back to buffered i/o */ |
| 1067 | 1066 | ||
| @@ -1091,13 +1090,12 @@ static ssize_t gfs2_direct_IO(int rw, struct kiocb *iocb, | |||
| 1091 | rv = filemap_write_and_wait_range(mapping, lstart, end); | 1090 | rv = filemap_write_and_wait_range(mapping, lstart, end); |
| 1092 | if (rv) | 1091 | if (rv) |
| 1093 | goto out; | 1092 | goto out; |
| 1094 | if (rw == WRITE) | 1093 | if (iov_iter_rw(iter) == WRITE) |
| 1095 | truncate_inode_pages_range(mapping, lstart, end); | 1094 | truncate_inode_pages_range(mapping, lstart, end); |
| 1096 | } | 1095 | } |
| 1097 | 1096 | ||
| 1098 | rv = __blockdev_direct_IO(rw, iocb, inode, inode->i_sb->s_bdev, | 1097 | rv = __blockdev_direct_IO(iocb, inode, inode->i_sb->s_bdev, iter, |
| 1099 | iter, offset, | 1098 | offset, gfs2_get_block_direct, NULL, NULL, 0); |
| 1100 | gfs2_get_block_direct, NULL, NULL, 0); | ||
| 1101 | out: | 1099 | out: |
| 1102 | gfs2_glock_dq(&gh); | 1100 | gfs2_glock_dq(&gh); |
| 1103 | gfs2_holder_uninit(&gh); | 1101 | gfs2_holder_uninit(&gh); |
diff --git a/fs/gfs2/file.c b/fs/gfs2/file.c index 207eb4a8135e..31892871ea87 100644 --- a/fs/gfs2/file.c +++ b/fs/gfs2/file.c | |||
| @@ -709,7 +709,7 @@ static ssize_t gfs2_file_write_iter(struct kiocb *iocb, struct iov_iter *from) | |||
| 709 | 709 | ||
| 710 | gfs2_size_hint(file, iocb->ki_pos, iov_iter_count(from)); | 710 | gfs2_size_hint(file, iocb->ki_pos, iov_iter_count(from)); |
| 711 | 711 | ||
| 712 | if (file->f_flags & O_APPEND) { | 712 | if (iocb->ki_flags & IOCB_APPEND) { |
| 713 | struct gfs2_holder gh; | 713 | struct gfs2_holder gh; |
| 714 | 714 | ||
| 715 | ret = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, 0, &gh); | 715 | ret = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, 0, &gh); |
diff --git a/fs/hfs/inode.c b/fs/hfs/inode.c index 9337065bcc67..75fd5d873c19 100644 --- a/fs/hfs/inode.c +++ b/fs/hfs/inode.c | |||
| @@ -124,8 +124,8 @@ static int hfs_releasepage(struct page *page, gfp_t mask) | |||
| 124 | return res ? try_to_free_buffers(page) : 0; | 124 | return res ? try_to_free_buffers(page) : 0; |
| 125 | } | 125 | } |
| 126 | 126 | ||
| 127 | static ssize_t hfs_direct_IO(int rw, struct kiocb *iocb, | 127 | static ssize_t hfs_direct_IO(struct kiocb *iocb, struct iov_iter *iter, |
| 128 | struct iov_iter *iter, loff_t offset) | 128 | loff_t offset) |
| 129 | { | 129 | { |
| 130 | struct file *file = iocb->ki_filp; | 130 | struct file *file = iocb->ki_filp; |
| 131 | struct address_space *mapping = file->f_mapping; | 131 | struct address_space *mapping = file->f_mapping; |
| @@ -133,13 +133,13 @@ static ssize_t hfs_direct_IO(int rw, struct kiocb *iocb, | |||
| 133 | size_t count = iov_iter_count(iter); | 133 | size_t count = iov_iter_count(iter); |
| 134 | ssize_t ret; | 134 | ssize_t ret; |
| 135 | 135 | ||
| 136 | ret = blockdev_direct_IO(rw, iocb, inode, iter, offset, hfs_get_block); | 136 | ret = blockdev_direct_IO(iocb, inode, iter, offset, hfs_get_block); |
| 137 | 137 | ||
| 138 | /* | 138 | /* |
| 139 | * In case of error extending write may have instantiated a few | 139 | * In case of error extending write may have instantiated a few |
| 140 | * blocks outside i_size. Trim these off again. | 140 | * blocks outside i_size. Trim these off again. |
| 141 | */ | 141 | */ |
| 142 | if (unlikely((rw & WRITE) && ret < 0)) { | 142 | if (unlikely(iov_iter_rw(iter) == WRITE && ret < 0)) { |
| 143 | loff_t isize = i_size_read(inode); | 143 | loff_t isize = i_size_read(inode); |
| 144 | loff_t end = offset + count; | 144 | loff_t end = offset + count; |
| 145 | 145 | ||
diff --git a/fs/hfsplus/inode.c b/fs/hfsplus/inode.c index 5f86cadb0542..a43811f90935 100644 --- a/fs/hfsplus/inode.c +++ b/fs/hfsplus/inode.c | |||
| @@ -122,8 +122,8 @@ static int hfsplus_releasepage(struct page *page, gfp_t mask) | |||
| 122 | return res ? try_to_free_buffers(page) : 0; | 122 | return res ? try_to_free_buffers(page) : 0; |
| 123 | } | 123 | } |
| 124 | 124 | ||
| 125 | static ssize_t hfsplus_direct_IO(int rw, struct kiocb *iocb, | 125 | static ssize_t hfsplus_direct_IO(struct kiocb *iocb, struct iov_iter *iter, |
| 126 | struct iov_iter *iter, loff_t offset) | 126 | loff_t offset) |
| 127 | { | 127 | { |
| 128 | struct file *file = iocb->ki_filp; | 128 | struct file *file = iocb->ki_filp; |
| 129 | struct address_space *mapping = file->f_mapping; | 129 | struct address_space *mapping = file->f_mapping; |
| @@ -131,14 +131,13 @@ static ssize_t hfsplus_direct_IO(int rw, struct kiocb *iocb, | |||
| 131 | size_t count = iov_iter_count(iter); | 131 | size_t count = iov_iter_count(iter); |
| 132 | ssize_t ret; | 132 | ssize_t ret; |
| 133 | 133 | ||
| 134 | ret = blockdev_direct_IO(rw, iocb, inode, iter, offset, | 134 | ret = blockdev_direct_IO(iocb, inode, iter, offset, hfsplus_get_block); |
| 135 | hfsplus_get_block); | ||
| 136 | 135 | ||
| 137 | /* | 136 | /* |
| 138 | * In case of error extending write may have instantiated a few | 137 | * In case of error extending write may have instantiated a few |
| 139 | * blocks outside i_size. Trim these off again. | 138 | * blocks outside i_size. Trim these off again. |
| 140 | */ | 139 | */ |
| 141 | if (unlikely((rw & WRITE) && ret < 0)) { | 140 | if (unlikely(iov_iter_rw(iter) == WRITE && ret < 0)) { |
| 142 | loff_t isize = i_size_read(inode); | 141 | loff_t isize = i_size_read(inode); |
| 143 | loff_t end = offset + count; | 142 | loff_t end = offset + count; |
| 144 | 143 | ||
diff --git a/fs/jfs/inode.c b/fs/jfs/inode.c index 3197aed10614..070dc4b33544 100644 --- a/fs/jfs/inode.c +++ b/fs/jfs/inode.c | |||
| @@ -330,8 +330,8 @@ static sector_t jfs_bmap(struct address_space *mapping, sector_t block) | |||
| 330 | return generic_block_bmap(mapping, block, jfs_get_block); | 330 | return generic_block_bmap(mapping, block, jfs_get_block); |
| 331 | } | 331 | } |
| 332 | 332 | ||
| 333 | static ssize_t jfs_direct_IO(int rw, struct kiocb *iocb, | 333 | static ssize_t jfs_direct_IO(struct kiocb *iocb, struct iov_iter *iter, |
| 334 | struct iov_iter *iter, loff_t offset) | 334 | loff_t offset) |
| 335 | { | 335 | { |
| 336 | struct file *file = iocb->ki_filp; | 336 | struct file *file = iocb->ki_filp; |
| 337 | struct address_space *mapping = file->f_mapping; | 337 | struct address_space *mapping = file->f_mapping; |
| @@ -339,13 +339,13 @@ static ssize_t jfs_direct_IO(int rw, struct kiocb *iocb, | |||
| 339 | size_t count = iov_iter_count(iter); | 339 | size_t count = iov_iter_count(iter); |
| 340 | ssize_t ret; | 340 | ssize_t ret; |
| 341 | 341 | ||
| 342 | ret = blockdev_direct_IO(rw, iocb, inode, iter, offset, jfs_get_block); | 342 | ret = blockdev_direct_IO(iocb, inode, iter, offset, jfs_get_block); |
| 343 | 343 | ||
| 344 | /* | 344 | /* |
| 345 | * In case of error extending write may have instantiated a few | 345 | * In case of error extending write may have instantiated a few |
| 346 | * blocks outside i_size. Trim these off again. | 346 | * blocks outside i_size. Trim these off again. |
| 347 | */ | 347 | */ |
| 348 | if (unlikely((rw & WRITE) && ret < 0)) { | 348 | if (unlikely(iov_iter_rw(iter) == WRITE && ret < 0)) { |
| 349 | loff_t isize = i_size_read(inode); | 349 | loff_t isize = i_size_read(inode); |
| 350 | loff_t end = offset + count; | 350 | loff_t end = offset + count; |
| 351 | 351 | ||
diff --git a/fs/namei.c b/fs/namei.c index 76fb76a0818b..ffab2e06e147 100644 --- a/fs/namei.c +++ b/fs/namei.c | |||
| @@ -1585,7 +1585,7 @@ static inline int walk_component(struct nameidata *nd, struct path *path, | |||
| 1585 | inode = path->dentry->d_inode; | 1585 | inode = path->dentry->d_inode; |
| 1586 | } | 1586 | } |
| 1587 | err = -ENOENT; | 1587 | err = -ENOENT; |
| 1588 | if (!inode || d_is_negative(path->dentry)) | 1588 | if (d_is_negative(path->dentry)) |
| 1589 | goto out_path_put; | 1589 | goto out_path_put; |
| 1590 | 1590 | ||
| 1591 | if (should_follow_link(path->dentry, follow)) { | 1591 | if (should_follow_link(path->dentry, follow)) { |
| @@ -2310,7 +2310,7 @@ mountpoint_last(struct nameidata *nd, struct path *path) | |||
| 2310 | mutex_unlock(&dir->d_inode->i_mutex); | 2310 | mutex_unlock(&dir->d_inode->i_mutex); |
| 2311 | 2311 | ||
| 2312 | done: | 2312 | done: |
| 2313 | if (!dentry->d_inode || d_is_negative(dentry)) { | 2313 | if (d_is_negative(dentry)) { |
| 2314 | error = -ENOENT; | 2314 | error = -ENOENT; |
| 2315 | dput(dentry); | 2315 | dput(dentry); |
| 2316 | goto out; | 2316 | goto out; |
| @@ -3038,7 +3038,7 @@ retry_lookup: | |||
| 3038 | finish_lookup: | 3038 | finish_lookup: |
| 3039 | /* we _can_ be in RCU mode here */ | 3039 | /* we _can_ be in RCU mode here */ |
| 3040 | error = -ENOENT; | 3040 | error = -ENOENT; |
| 3041 | if (!inode || d_is_negative(path->dentry)) { | 3041 | if (d_is_negative(path->dentry)) { |
| 3042 | path_to_nameidata(path, nd); | 3042 | path_to_nameidata(path, nd); |
| 3043 | goto out; | 3043 | goto out; |
| 3044 | } | 3044 | } |
| @@ -3077,7 +3077,7 @@ finish_open: | |||
| 3077 | error = -ENOTDIR; | 3077 | error = -ENOTDIR; |
| 3078 | if ((nd->flags & LOOKUP_DIRECTORY) && !d_can_lookup(nd->path.dentry)) | 3078 | if ((nd->flags & LOOKUP_DIRECTORY) && !d_can_lookup(nd->path.dentry)) |
| 3079 | goto out; | 3079 | goto out; |
| 3080 | if (!S_ISREG(nd->inode->i_mode)) | 3080 | if (!d_is_reg(nd->path.dentry)) |
| 3081 | will_truncate = false; | 3081 | will_truncate = false; |
| 3082 | 3082 | ||
| 3083 | if (will_truncate) { | 3083 | if (will_truncate) { |
diff --git a/fs/ncpfs/file.c b/fs/ncpfs/file.c index 479bf8db264e..011324ce9df2 100644 --- a/fs/ncpfs/file.c +++ b/fs/ncpfs/file.c | |||
| @@ -170,20 +170,15 @@ ncp_file_write_iter(struct kiocb *iocb, struct iov_iter *from) | |||
| 170 | struct file *file = iocb->ki_filp; | 170 | struct file *file = iocb->ki_filp; |
| 171 | struct inode *inode = file_inode(file); | 171 | struct inode *inode = file_inode(file); |
| 172 | size_t already_written = 0; | 172 | size_t already_written = 0; |
| 173 | loff_t pos = iocb->ki_pos; | ||
| 174 | size_t count = iov_iter_count(from); | ||
| 175 | size_t bufsize; | 173 | size_t bufsize; |
| 176 | int errno; | 174 | int errno; |
| 177 | void *bouncebuffer; | 175 | void *bouncebuffer; |
| 176 | off_t pos; | ||
| 178 | 177 | ||
| 179 | ncp_dbg(1, "enter %pD2\n", file); | 178 | ncp_dbg(1, "enter %pD2\n", file); |
| 180 | errno = generic_write_checks(file, &pos, &count, 0); | 179 | errno = generic_write_checks(iocb, from); |
| 181 | if (errno) | 180 | if (errno <= 0) |
| 182 | return errno; | 181 | return errno; |
| 183 | iov_iter_truncate(from, count); | ||
| 184 | |||
| 185 | if (!count) | ||
| 186 | return 0; | ||
| 187 | 182 | ||
| 188 | errno = ncp_make_open(inode, O_WRONLY); | 183 | errno = ncp_make_open(inode, O_WRONLY); |
| 189 | if (errno) { | 184 | if (errno) { |
| @@ -201,10 +196,11 @@ ncp_file_write_iter(struct kiocb *iocb, struct iov_iter *from) | |||
| 201 | errno = -EIO; /* -ENOMEM */ | 196 | errno = -EIO; /* -ENOMEM */ |
| 202 | goto outrel; | 197 | goto outrel; |
| 203 | } | 198 | } |
| 199 | pos = iocb->ki_pos; | ||
| 204 | while (iov_iter_count(from)) { | 200 | while (iov_iter_count(from)) { |
| 205 | int written_this_time; | 201 | int written_this_time; |
| 206 | size_t to_write = min_t(size_t, | 202 | size_t to_write = min_t(size_t, |
| 207 | bufsize - ((off_t)pos % bufsize), | 203 | bufsize - (pos % bufsize), |
| 208 | iov_iter_count(from)); | 204 | iov_iter_count(from)); |
| 209 | 205 | ||
| 210 | if (copy_from_iter(bouncebuffer, to_write, from) != to_write) { | 206 | if (copy_from_iter(bouncebuffer, to_write, from) != to_write) { |
diff --git a/fs/nfs/direct.c b/fs/nfs/direct.c index c3929fb2ab26..682f65fe09b5 100644 --- a/fs/nfs/direct.c +++ b/fs/nfs/direct.c | |||
| @@ -240,7 +240,6 @@ static int nfs_direct_cmp_commit_data_verf(struct nfs_direct_req *dreq, | |||
| 240 | 240 | ||
| 241 | /** | 241 | /** |
| 242 | * nfs_direct_IO - NFS address space operation for direct I/O | 242 | * nfs_direct_IO - NFS address space operation for direct I/O |
| 243 | * @rw: direction (read or write) | ||
| 244 | * @iocb: target I/O control block | 243 | * @iocb: target I/O control block |
| 245 | * @iov: array of vectors that define I/O buffer | 244 | * @iov: array of vectors that define I/O buffer |
| 246 | * @pos: offset in file to begin the operation | 245 | * @pos: offset in file to begin the operation |
| @@ -251,7 +250,7 @@ static int nfs_direct_cmp_commit_data_verf(struct nfs_direct_req *dreq, | |||
| 251 | * shunt off direct read and write requests before the VFS gets them, | 250 | * shunt off direct read and write requests before the VFS gets them, |
| 252 | * so this method is only ever called for swap. | 251 | * so this method is only ever called for swap. |
| 253 | */ | 252 | */ |
| 254 | ssize_t nfs_direct_IO(int rw, struct kiocb *iocb, struct iov_iter *iter, loff_t pos) | 253 | ssize_t nfs_direct_IO(struct kiocb *iocb, struct iov_iter *iter, loff_t pos) |
| 255 | { | 254 | { |
| 256 | struct inode *inode = iocb->ki_filp->f_mapping->host; | 255 | struct inode *inode = iocb->ki_filp->f_mapping->host; |
| 257 | 256 | ||
| @@ -267,9 +266,9 @@ ssize_t nfs_direct_IO(int rw, struct kiocb *iocb, struct iov_iter *iter, loff_t | |||
| 267 | #else | 266 | #else |
| 268 | VM_BUG_ON(iov_iter_count(iter) != PAGE_SIZE); | 267 | VM_BUG_ON(iov_iter_count(iter) != PAGE_SIZE); |
| 269 | 268 | ||
| 270 | if (rw == READ) | 269 | if (iov_iter_rw(iter) == READ) |
| 271 | return nfs_file_direct_read(iocb, iter, pos); | 270 | return nfs_file_direct_read(iocb, iter, pos); |
| 272 | return nfs_file_direct_write(iocb, iter, pos); | 271 | return nfs_file_direct_write(iocb, iter); |
| 273 | #endif /* CONFIG_NFS_SWAP */ | 272 | #endif /* CONFIG_NFS_SWAP */ |
| 274 | } | 273 | } |
| 275 | 274 | ||
| @@ -960,8 +959,7 @@ static ssize_t nfs_direct_write_schedule_iovec(struct nfs_direct_req *dreq, | |||
| 960 | * Note that O_APPEND is not supported for NFS direct writes, as there | 959 | * Note that O_APPEND is not supported for NFS direct writes, as there |
| 961 | * is no atomic O_APPEND write facility in the NFS protocol. | 960 | * is no atomic O_APPEND write facility in the NFS protocol. |
| 962 | */ | 961 | */ |
| 963 | ssize_t nfs_file_direct_write(struct kiocb *iocb, struct iov_iter *iter, | 962 | ssize_t nfs_file_direct_write(struct kiocb *iocb, struct iov_iter *iter) |
| 964 | loff_t pos) | ||
| 965 | { | 963 | { |
| 966 | ssize_t result = -EINVAL; | 964 | ssize_t result = -EINVAL; |
| 967 | struct file *file = iocb->ki_filp; | 965 | struct file *file = iocb->ki_filp; |
| @@ -969,25 +967,16 @@ ssize_t nfs_file_direct_write(struct kiocb *iocb, struct iov_iter *iter, | |||
| 969 | struct inode *inode = mapping->host; | 967 | struct inode *inode = mapping->host; |
| 970 | struct nfs_direct_req *dreq; | 968 | struct nfs_direct_req *dreq; |
| 971 | struct nfs_lock_context *l_ctx; | 969 | struct nfs_lock_context *l_ctx; |
| 972 | loff_t end; | 970 | loff_t pos, end; |
| 973 | size_t count = iov_iter_count(iter); | ||
| 974 | end = (pos + count - 1) >> PAGE_CACHE_SHIFT; | ||
| 975 | |||
| 976 | nfs_add_stats(mapping->host, NFSIOS_DIRECTWRITTENBYTES, count); | ||
| 977 | 971 | ||
| 978 | dfprintk(FILE, "NFS: direct write(%pD2, %zd@%Ld)\n", | 972 | dfprintk(FILE, "NFS: direct write(%pD2, %zd@%Ld)\n", |
| 979 | file, count, (long long) pos); | 973 | file, iov_iter_count(iter), (long long) iocb->ki_pos); |
| 980 | 974 | ||
| 981 | result = generic_write_checks(file, &pos, &count, 0); | 975 | nfs_add_stats(mapping->host, NFSIOS_DIRECTWRITTENBYTES, |
| 982 | if (result) | 976 | iov_iter_count(iter)); |
| 983 | goto out; | ||
| 984 | 977 | ||
| 985 | result = -EINVAL; | 978 | pos = iocb->ki_pos; |
| 986 | if ((ssize_t) count < 0) | 979 | end = (pos + iov_iter_count(iter) - 1) >> PAGE_CACHE_SHIFT; |
| 987 | goto out; | ||
| 988 | result = 0; | ||
| 989 | if (!count) | ||
| 990 | goto out; | ||
| 991 | 980 | ||
| 992 | mutex_lock(&inode->i_mutex); | 981 | mutex_lock(&inode->i_mutex); |
| 993 | 982 | ||
| @@ -1002,7 +991,7 @@ ssize_t nfs_file_direct_write(struct kiocb *iocb, struct iov_iter *iter, | |||
| 1002 | goto out_unlock; | 991 | goto out_unlock; |
| 1003 | } | 992 | } |
| 1004 | 993 | ||
| 1005 | task_io_account_write(count); | 994 | task_io_account_write(iov_iter_count(iter)); |
| 1006 | 995 | ||
| 1007 | result = -ENOMEM; | 996 | result = -ENOMEM; |
| 1008 | dreq = nfs_direct_req_alloc(); | 997 | dreq = nfs_direct_req_alloc(); |
| @@ -1010,7 +999,7 @@ ssize_t nfs_file_direct_write(struct kiocb *iocb, struct iov_iter *iter, | |||
| 1010 | goto out_unlock; | 999 | goto out_unlock; |
| 1011 | 1000 | ||
| 1012 | dreq->inode = inode; | 1001 | dreq->inode = inode; |
| 1013 | dreq->bytes_left = count; | 1002 | dreq->bytes_left = iov_iter_count(iter); |
| 1014 | dreq->io_start = pos; | 1003 | dreq->io_start = pos; |
| 1015 | dreq->ctx = get_nfs_open_context(nfs_file_open_context(iocb->ki_filp)); | 1004 | dreq->ctx = get_nfs_open_context(nfs_file_open_context(iocb->ki_filp)); |
| 1016 | l_ctx = nfs_get_lock_context(dreq->ctx); | 1005 | l_ctx = nfs_get_lock_context(dreq->ctx); |
| @@ -1050,7 +1039,6 @@ out_release: | |||
| 1050 | nfs_direct_req_release(dreq); | 1039 | nfs_direct_req_release(dreq); |
| 1051 | out_unlock: | 1040 | out_unlock: |
| 1052 | mutex_unlock(&inode->i_mutex); | 1041 | mutex_unlock(&inode->i_mutex); |
| 1053 | out: | ||
| 1054 | return result; | 1042 | return result; |
| 1055 | } | 1043 | } |
| 1056 | 1044 | ||
diff --git a/fs/nfs/file.c b/fs/nfs/file.c index f6a3adedf027..c40e4363e746 100644 --- a/fs/nfs/file.c +++ b/fs/nfs/file.c | |||
| @@ -170,7 +170,7 @@ nfs_file_read(struct kiocb *iocb, struct iov_iter *to) | |||
| 170 | struct inode *inode = file_inode(iocb->ki_filp); | 170 | struct inode *inode = file_inode(iocb->ki_filp); |
| 171 | ssize_t result; | 171 | ssize_t result; |
| 172 | 172 | ||
| 173 | if (iocb->ki_filp->f_flags & O_DIRECT) | 173 | if (iocb->ki_flags & IOCB_DIRECT) |
| 174 | return nfs_file_direct_read(iocb, to, iocb->ki_pos); | 174 | return nfs_file_direct_read(iocb, to, iocb->ki_pos); |
| 175 | 175 | ||
| 176 | dprintk("NFS: read(%pD2, %zu@%lu)\n", | 176 | dprintk("NFS: read(%pD2, %zu@%lu)\n", |
| @@ -674,17 +674,20 @@ ssize_t nfs_file_write(struct kiocb *iocb, struct iov_iter *from) | |||
| 674 | unsigned long written = 0; | 674 | unsigned long written = 0; |
| 675 | ssize_t result; | 675 | ssize_t result; |
| 676 | size_t count = iov_iter_count(from); | 676 | size_t count = iov_iter_count(from); |
| 677 | loff_t pos = iocb->ki_pos; | ||
| 678 | 677 | ||
| 679 | result = nfs_key_timeout_notify(file, inode); | 678 | result = nfs_key_timeout_notify(file, inode); |
| 680 | if (result) | 679 | if (result) |
| 681 | return result; | 680 | return result; |
| 682 | 681 | ||
| 683 | if (file->f_flags & O_DIRECT) | 682 | if (iocb->ki_flags & IOCB_DIRECT) { |
| 684 | return nfs_file_direct_write(iocb, from, pos); | 683 | result = generic_write_checks(iocb, from); |
| 684 | if (result <= 0) | ||
| 685 | return result; | ||
| 686 | return nfs_file_direct_write(iocb, from); | ||
| 687 | } | ||
| 685 | 688 | ||
| 686 | dprintk("NFS: write(%pD2, %zu@%Ld)\n", | 689 | dprintk("NFS: write(%pD2, %zu@%Ld)\n", |
| 687 | file, count, (long long) pos); | 690 | file, count, (long long) iocb->ki_pos); |
| 688 | 691 | ||
| 689 | result = -EBUSY; | 692 | result = -EBUSY; |
| 690 | if (IS_SWAPFILE(inode)) | 693 | if (IS_SWAPFILE(inode)) |
| @@ -692,7 +695,7 @@ ssize_t nfs_file_write(struct kiocb *iocb, struct iov_iter *from) | |||
| 692 | /* | 695 | /* |
| 693 | * O_APPEND implies that we must revalidate the file length. | 696 | * O_APPEND implies that we must revalidate the file length. |
| 694 | */ | 697 | */ |
| 695 | if (file->f_flags & O_APPEND) { | 698 | if (iocb->ki_flags & IOCB_APPEND) { |
| 696 | result = nfs_revalidate_file_size(inode, file); | 699 | result = nfs_revalidate_file_size(inode, file); |
| 697 | if (result) | 700 | if (result) |
| 698 | goto out; | 701 | goto out; |
diff --git a/fs/nfs/read.c b/fs/nfs/read.c index 568ecf0a880f..b8f5c63f77b2 100644 --- a/fs/nfs/read.c +++ b/fs/nfs/read.c | |||
| @@ -117,15 +117,15 @@ int nfs_readpage_async(struct nfs_open_context *ctx, struct inode *inode, | |||
| 117 | 117 | ||
| 118 | static void nfs_readpage_release(struct nfs_page *req) | 118 | static void nfs_readpage_release(struct nfs_page *req) |
| 119 | { | 119 | { |
| 120 | struct inode *d_inode = req->wb_context->dentry->d_inode; | 120 | struct inode *inode = req->wb_context->dentry->d_inode; |
| 121 | 121 | ||
| 122 | dprintk("NFS: read done (%s/%llu %d@%lld)\n", d_inode->i_sb->s_id, | 122 | dprintk("NFS: read done (%s/%llu %d@%lld)\n", inode->i_sb->s_id, |
| 123 | (unsigned long long)NFS_FILEID(d_inode), req->wb_bytes, | 123 | (unsigned long long)NFS_FILEID(inode), req->wb_bytes, |
| 124 | (long long)req_offset(req)); | 124 | (long long)req_offset(req)); |
| 125 | 125 | ||
| 126 | if (nfs_page_group_sync_on_bit(req, PG_UNLOCKPAGE)) { | 126 | if (nfs_page_group_sync_on_bit(req, PG_UNLOCKPAGE)) { |
| 127 | if (PageUptodate(req->wb_page)) | 127 | if (PageUptodate(req->wb_page)) |
| 128 | nfs_readpage_to_fscache(d_inode, req->wb_page, 0); | 128 | nfs_readpage_to_fscache(inode, req->wb_page, 0); |
| 129 | 129 | ||
| 130 | unlock_page(req->wb_page); | 130 | unlock_page(req->wb_page); |
| 131 | } | 131 | } |
diff --git a/fs/nilfs2/inode.c b/fs/nilfs2/inode.c index ab4987bc637f..36f057fa8aa3 100644 --- a/fs/nilfs2/inode.c +++ b/fs/nilfs2/inode.c | |||
| @@ -305,8 +305,7 @@ static int nilfs_write_end(struct file *file, struct address_space *mapping, | |||
| 305 | } | 305 | } |
| 306 | 306 | ||
| 307 | static ssize_t | 307 | static ssize_t |
| 308 | nilfs_direct_IO(int rw, struct kiocb *iocb, struct iov_iter *iter, | 308 | nilfs_direct_IO(struct kiocb *iocb, struct iov_iter *iter, loff_t offset) |
| 309 | loff_t offset) | ||
| 310 | { | 309 | { |
| 311 | struct file *file = iocb->ki_filp; | 310 | struct file *file = iocb->ki_filp; |
| 312 | struct address_space *mapping = file->f_mapping; | 311 | struct address_space *mapping = file->f_mapping; |
| @@ -314,18 +313,17 @@ nilfs_direct_IO(int rw, struct kiocb *iocb, struct iov_iter *iter, | |||
| 314 | size_t count = iov_iter_count(iter); | 313 | size_t count = iov_iter_count(iter); |
| 315 | ssize_t size; | 314 | ssize_t size; |
| 316 | 315 | ||
| 317 | if (rw == WRITE) | 316 | if (iov_iter_rw(iter) == WRITE) |
| 318 | return 0; | 317 | return 0; |
| 319 | 318 | ||
| 320 | /* Needs synchronization with the cleaner */ | 319 | /* Needs synchronization with the cleaner */ |
| 321 | size = blockdev_direct_IO(rw, iocb, inode, iter, offset, | 320 | size = blockdev_direct_IO(iocb, inode, iter, offset, nilfs_get_block); |
| 322 | nilfs_get_block); | ||
| 323 | 321 | ||
| 324 | /* | 322 | /* |
| 325 | * In case of error extending write may have instantiated a few | 323 | * In case of error extending write may have instantiated a few |
| 326 | * blocks outside i_size. Trim these off again. | 324 | * blocks outside i_size. Trim these off again. |
| 327 | */ | 325 | */ |
| 328 | if (unlikely((rw & WRITE) && size < 0)) { | 326 | if (unlikely(iov_iter_rw(iter) == WRITE && size < 0)) { |
| 329 | loff_t isize = i_size_read(inode); | 327 | loff_t isize = i_size_read(inode); |
| 330 | loff_t end = offset + count; | 328 | loff_t end = offset + count; |
| 331 | 329 | ||
diff --git a/fs/ntfs/file.c b/fs/ntfs/file.c index 840e95e3f1d2..7bb487e663b4 100644 --- a/fs/ntfs/file.c +++ b/fs/ntfs/file.c | |||
| @@ -328,25 +328,25 @@ err_out: | |||
| 328 | return err; | 328 | return err; |
| 329 | } | 329 | } |
| 330 | 330 | ||
| 331 | static ssize_t ntfs_prepare_file_for_write(struct file *file, loff_t *ppos, | 331 | static ssize_t ntfs_prepare_file_for_write(struct kiocb *iocb, |
| 332 | size_t *count) | 332 | struct iov_iter *from) |
| 333 | { | 333 | { |
| 334 | loff_t pos; | 334 | loff_t pos; |
| 335 | s64 end, ll; | 335 | s64 end, ll; |
| 336 | ssize_t err; | 336 | ssize_t err; |
| 337 | unsigned long flags; | 337 | unsigned long flags; |
| 338 | struct file *file = iocb->ki_filp; | ||
| 338 | struct inode *vi = file_inode(file); | 339 | struct inode *vi = file_inode(file); |
| 339 | ntfs_inode *base_ni, *ni = NTFS_I(vi); | 340 | ntfs_inode *base_ni, *ni = NTFS_I(vi); |
| 340 | ntfs_volume *vol = ni->vol; | 341 | ntfs_volume *vol = ni->vol; |
| 341 | 342 | ||
| 342 | ntfs_debug("Entering for i_ino 0x%lx, attribute type 0x%x, pos " | 343 | ntfs_debug("Entering for i_ino 0x%lx, attribute type 0x%x, pos " |
| 343 | "0x%llx, count 0x%lx.", vi->i_ino, | 344 | "0x%llx, count 0x%zx.", vi->i_ino, |
| 344 | (unsigned)le32_to_cpu(ni->type), | 345 | (unsigned)le32_to_cpu(ni->type), |
| 345 | (unsigned long long)*ppos, (unsigned long)*count); | 346 | (unsigned long long)iocb->ki_pos, |
| 346 | /* We can write back this queue in page reclaim. */ | 347 | iov_iter_count(from)); |
| 347 | current->backing_dev_info = inode_to_bdi(vi); | 348 | err = generic_write_checks(iocb, from); |
| 348 | err = generic_write_checks(file, ppos, count, S_ISBLK(vi->i_mode)); | 349 | if (unlikely(err <= 0)) |
| 349 | if (unlikely(err)) | ||
| 350 | goto out; | 350 | goto out; |
| 351 | /* | 351 | /* |
| 352 | * All checks have passed. Before we start doing any writing we want | 352 | * All checks have passed. Before we start doing any writing we want |
| @@ -379,8 +379,6 @@ static ssize_t ntfs_prepare_file_for_write(struct file *file, loff_t *ppos, | |||
| 379 | err = -EOPNOTSUPP; | 379 | err = -EOPNOTSUPP; |
| 380 | goto out; | 380 | goto out; |
| 381 | } | 381 | } |
| 382 | if (*count == 0) | ||
| 383 | goto out; | ||
| 384 | base_ni = ni; | 382 | base_ni = ni; |
| 385 | if (NInoAttr(ni)) | 383 | if (NInoAttr(ni)) |
| 386 | base_ni = ni->ext.base_ntfs_ino; | 384 | base_ni = ni->ext.base_ntfs_ino; |
| @@ -392,9 +390,9 @@ static ssize_t ntfs_prepare_file_for_write(struct file *file, loff_t *ppos, | |||
| 392 | * cannot fail either so there is no need to check the return code. | 390 | * cannot fail either so there is no need to check the return code. |
| 393 | */ | 391 | */ |
| 394 | file_update_time(file); | 392 | file_update_time(file); |
| 395 | pos = *ppos; | 393 | pos = iocb->ki_pos; |
| 396 | /* The first byte after the last cluster being written to. */ | 394 | /* The first byte after the last cluster being written to. */ |
| 397 | end = (pos + *count + vol->cluster_size_mask) & | 395 | end = (pos + iov_iter_count(from) + vol->cluster_size_mask) & |
| 398 | ~(u64)vol->cluster_size_mask; | 396 | ~(u64)vol->cluster_size_mask; |
| 399 | /* | 397 | /* |
| 400 | * If the write goes beyond the allocated size, extend the allocation | 398 | * If the write goes beyond the allocated size, extend the allocation |
| @@ -422,7 +420,7 @@ static ssize_t ntfs_prepare_file_for_write(struct file *file, loff_t *ppos, | |||
| 422 | "partially extended.", | 420 | "partially extended.", |
| 423 | vi->i_ino, (unsigned) | 421 | vi->i_ino, (unsigned) |
| 424 | le32_to_cpu(ni->type)); | 422 | le32_to_cpu(ni->type)); |
| 425 | *count = ll - pos; | 423 | iov_iter_truncate(from, ll - pos); |
| 426 | } | 424 | } |
| 427 | } else { | 425 | } else { |
| 428 | err = ll; | 426 | err = ll; |
| @@ -438,7 +436,7 @@ static ssize_t ntfs_prepare_file_for_write(struct file *file, loff_t *ppos, | |||
| 438 | vi->i_ino, (unsigned) | 436 | vi->i_ino, (unsigned) |
| 439 | le32_to_cpu(ni->type), | 437 | le32_to_cpu(ni->type), |
| 440 | (int)-err); | 438 | (int)-err); |
| 441 | *count = ll - pos; | 439 | iov_iter_truncate(from, ll - pos); |
| 442 | } else { | 440 | } else { |
| 443 | if (err != -ENOSPC) | 441 | if (err != -ENOSPC) |
| 444 | ntfs_error(vi->i_sb, "Cannot perform " | 442 | ntfs_error(vi->i_sb, "Cannot perform " |
| @@ -1930,60 +1928,36 @@ again: | |||
| 1930 | } | 1928 | } |
| 1931 | 1929 | ||
| 1932 | /** | 1930 | /** |
| 1933 | * ntfs_file_write_iter_nolock - write data to a file | ||
| 1934 | * @iocb: IO state structure (file, offset, etc.) | ||
| 1935 | * @from: iov_iter with data to write | ||
| 1936 | * | ||
| 1937 | * Basically the same as __generic_file_write_iter() except that it ends | ||
| 1938 | * up calling ntfs_perform_write() instead of generic_perform_write() and that | ||
| 1939 | * O_DIRECT is not implemented. | ||
| 1940 | */ | ||
| 1941 | static ssize_t ntfs_file_write_iter_nolock(struct kiocb *iocb, | ||
| 1942 | struct iov_iter *from) | ||
| 1943 | { | ||
| 1944 | struct file *file = iocb->ki_filp; | ||
| 1945 | loff_t pos = iocb->ki_pos; | ||
| 1946 | ssize_t written = 0; | ||
| 1947 | ssize_t err; | ||
| 1948 | size_t count = iov_iter_count(from); | ||
| 1949 | |||
| 1950 | err = ntfs_prepare_file_for_write(file, &pos, &count); | ||
| 1951 | if (count && !err) { | ||
| 1952 | iov_iter_truncate(from, count); | ||
| 1953 | written = ntfs_perform_write(file, from, pos); | ||
| 1954 | if (likely(written >= 0)) | ||
| 1955 | iocb->ki_pos = pos + written; | ||
| 1956 | } | ||
| 1957 | current->backing_dev_info = NULL; | ||
| 1958 | return written ? written : err; | ||
| 1959 | } | ||
| 1960 | |||
| 1961 | /** | ||
| 1962 | * ntfs_file_write_iter - simple wrapper for ntfs_file_write_iter_nolock() | 1931 | * ntfs_file_write_iter - simple wrapper for ntfs_file_write_iter_nolock() |
| 1963 | * @iocb: IO state structure | 1932 | * @iocb: IO state structure |
| 1964 | * @from: iov_iter with data to write | 1933 | * @from: iov_iter with data to write |
| 1965 | * | 1934 | * |
| 1966 | * Basically the same as generic_file_write_iter() except that it ends up | 1935 | * Basically the same as generic_file_write_iter() except that it ends up |
| 1967 | * calling ntfs_file_write_iter_nolock() instead of | 1936 | * up calling ntfs_perform_write() instead of generic_perform_write() and that |
| 1968 | * __generic_file_write_iter(). | 1937 | * O_DIRECT is not implemented. |
| 1969 | */ | 1938 | */ |
| 1970 | static ssize_t ntfs_file_write_iter(struct kiocb *iocb, struct iov_iter *from) | 1939 | static ssize_t ntfs_file_write_iter(struct kiocb *iocb, struct iov_iter *from) |
| 1971 | { | 1940 | { |
| 1972 | struct file *file = iocb->ki_filp; | 1941 | struct file *file = iocb->ki_filp; |
| 1973 | struct inode *vi = file_inode(file); | 1942 | struct inode *vi = file_inode(file); |
| 1974 | ssize_t ret; | 1943 | ssize_t written = 0; |
| 1944 | ssize_t err; | ||
| 1975 | 1945 | ||
| 1976 | mutex_lock(&vi->i_mutex); | 1946 | mutex_lock(&vi->i_mutex); |
| 1977 | ret = ntfs_file_write_iter_nolock(iocb, from); | 1947 | /* We can write back this queue in page reclaim. */ |
| 1948 | current->backing_dev_info = inode_to_bdi(vi); | ||
| 1949 | err = ntfs_prepare_file_for_write(iocb, from); | ||
| 1950 | if (iov_iter_count(from) && !err) | ||
| 1951 | written = ntfs_perform_write(file, from, iocb->ki_pos); | ||
| 1952 | current->backing_dev_info = NULL; | ||
| 1978 | mutex_unlock(&vi->i_mutex); | 1953 | mutex_unlock(&vi->i_mutex); |
| 1979 | if (ret > 0) { | 1954 | if (likely(written > 0)) { |
| 1980 | ssize_t err; | 1955 | err = generic_write_sync(file, iocb->ki_pos, written); |
| 1981 | |||
| 1982 | err = generic_write_sync(file, iocb->ki_pos - ret, ret); | ||
| 1983 | if (err < 0) | 1956 | if (err < 0) |
| 1984 | ret = err; | 1957 | written = 0; |
| 1985 | } | 1958 | } |
| 1986 | return ret; | 1959 | iocb->ki_pos += written; |
| 1960 | return written ? written : err; | ||
| 1987 | } | 1961 | } |
| 1988 | 1962 | ||
| 1989 | /** | 1963 | /** |
diff --git a/fs/ocfs2/aops.c b/fs/ocfs2/aops.c index 8d2bc840c288..f906a250da6a 100644 --- a/fs/ocfs2/aops.c +++ b/fs/ocfs2/aops.c | |||
| @@ -855,10 +855,9 @@ static ssize_t ocfs2_direct_IO_write(struct kiocb *iocb, | |||
| 855 | ocfs2_inode_unlock(inode, 1); | 855 | ocfs2_inode_unlock(inode, 1); |
| 856 | } | 856 | } |
| 857 | 857 | ||
| 858 | written = __blockdev_direct_IO(WRITE, iocb, inode, inode->i_sb->s_bdev, | 858 | written = __blockdev_direct_IO(iocb, inode, inode->i_sb->s_bdev, iter, |
| 859 | iter, offset, | 859 | offset, ocfs2_direct_IO_get_blocks, |
| 860 | ocfs2_direct_IO_get_blocks, | 860 | ocfs2_dio_end_io, NULL, 0); |
| 861 | ocfs2_dio_end_io, NULL, 0); | ||
| 862 | if (unlikely(written < 0)) { | 861 | if (unlikely(written < 0)) { |
| 863 | loff_t i_size = i_size_read(inode); | 862 | loff_t i_size = i_size_read(inode); |
| 864 | 863 | ||
| @@ -946,9 +945,7 @@ out: | |||
| 946 | return ret; | 945 | return ret; |
| 947 | } | 946 | } |
| 948 | 947 | ||
| 949 | static ssize_t ocfs2_direct_IO(int rw, | 948 | static ssize_t ocfs2_direct_IO(struct kiocb *iocb, struct iov_iter *iter, |
| 950 | struct kiocb *iocb, | ||
| 951 | struct iov_iter *iter, | ||
| 952 | loff_t offset) | 949 | loff_t offset) |
| 953 | { | 950 | { |
| 954 | struct file *file = iocb->ki_filp; | 951 | struct file *file = iocb->ki_filp; |
| @@ -970,12 +967,11 @@ static ssize_t ocfs2_direct_IO(int rw, | |||
| 970 | if (i_size_read(inode) <= offset && !full_coherency) | 967 | if (i_size_read(inode) <= offset && !full_coherency) |
| 971 | return 0; | 968 | return 0; |
| 972 | 969 | ||
| 973 | if (rw == READ) | 970 | if (iov_iter_rw(iter) == READ) |
| 974 | return __blockdev_direct_IO(rw, iocb, inode, | 971 | return __blockdev_direct_IO(iocb, inode, inode->i_sb->s_bdev, |
| 975 | inode->i_sb->s_bdev, | 972 | iter, offset, |
| 976 | iter, offset, | 973 | ocfs2_direct_IO_get_blocks, |
| 977 | ocfs2_direct_IO_get_blocks, | 974 | ocfs2_dio_end_io, NULL, 0); |
| 978 | ocfs2_dio_end_io, NULL, 0); | ||
| 979 | else | 975 | else |
| 980 | return ocfs2_direct_IO_write(iocb, iter, offset); | 976 | return ocfs2_direct_IO_write(iocb, iter, offset); |
| 981 | } | 977 | } |
diff --git a/fs/ocfs2/file.c b/fs/ocfs2/file.c index 8c48e989beba..913fc250d85a 100644 --- a/fs/ocfs2/file.c +++ b/fs/ocfs2/file.c | |||
| @@ -2106,7 +2106,7 @@ out: | |||
| 2106 | } | 2106 | } |
| 2107 | 2107 | ||
| 2108 | static int ocfs2_prepare_inode_for_write(struct file *file, | 2108 | static int ocfs2_prepare_inode_for_write(struct file *file, |
| 2109 | loff_t *ppos, | 2109 | loff_t pos, |
| 2110 | size_t count, | 2110 | size_t count, |
| 2111 | int appending, | 2111 | int appending, |
| 2112 | int *direct_io, | 2112 | int *direct_io, |
| @@ -2115,7 +2115,7 @@ static int ocfs2_prepare_inode_for_write(struct file *file, | |||
| 2115 | int ret = 0, meta_level = 0; | 2115 | int ret = 0, meta_level = 0; |
| 2116 | struct dentry *dentry = file->f_path.dentry; | 2116 | struct dentry *dentry = file->f_path.dentry; |
| 2117 | struct inode *inode = dentry->d_inode; | 2117 | struct inode *inode = dentry->d_inode; |
| 2118 | loff_t saved_pos = 0, end; | 2118 | loff_t end; |
| 2119 | struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); | 2119 | struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); |
| 2120 | int full_coherency = !(osb->s_mount_opt & | 2120 | int full_coherency = !(osb->s_mount_opt & |
| 2121 | OCFS2_MOUNT_COHERENCY_BUFFERED); | 2121 | OCFS2_MOUNT_COHERENCY_BUFFERED); |
| @@ -2155,23 +2155,16 @@ static int ocfs2_prepare_inode_for_write(struct file *file, | |||
| 2155 | } | 2155 | } |
| 2156 | } | 2156 | } |
| 2157 | 2157 | ||
| 2158 | /* work on a copy of ppos until we're sure that we won't have | 2158 | end = pos + count; |
| 2159 | * to recalculate it due to relocking. */ | ||
| 2160 | if (appending) | ||
| 2161 | saved_pos = i_size_read(inode); | ||
| 2162 | else | ||
| 2163 | saved_pos = *ppos; | ||
| 2164 | |||
| 2165 | end = saved_pos + count; | ||
| 2166 | 2159 | ||
| 2167 | ret = ocfs2_check_range_for_refcount(inode, saved_pos, count); | 2160 | ret = ocfs2_check_range_for_refcount(inode, pos, count); |
| 2168 | if (ret == 1) { | 2161 | if (ret == 1) { |
| 2169 | ocfs2_inode_unlock(inode, meta_level); | 2162 | ocfs2_inode_unlock(inode, meta_level); |
| 2170 | meta_level = -1; | 2163 | meta_level = -1; |
| 2171 | 2164 | ||
| 2172 | ret = ocfs2_prepare_inode_for_refcount(inode, | 2165 | ret = ocfs2_prepare_inode_for_refcount(inode, |
| 2173 | file, | 2166 | file, |
| 2174 | saved_pos, | 2167 | pos, |
| 2175 | count, | 2168 | count, |
| 2176 | &meta_level); | 2169 | &meta_level); |
| 2177 | if (has_refcount) | 2170 | if (has_refcount) |
| @@ -2227,7 +2220,7 @@ static int ocfs2_prepare_inode_for_write(struct file *file, | |||
| 2227 | * caller will have to retake some cluster | 2220 | * caller will have to retake some cluster |
| 2228 | * locks and initiate the io as buffered. | 2221 | * locks and initiate the io as buffered. |
| 2229 | */ | 2222 | */ |
| 2230 | ret = ocfs2_check_range_for_holes(inode, saved_pos, count); | 2223 | ret = ocfs2_check_range_for_holes(inode, pos, count); |
| 2231 | if (ret == 1) { | 2224 | if (ret == 1) { |
| 2232 | /* | 2225 | /* |
| 2233 | * Fallback to old way if the feature bit is not set. | 2226 | * Fallback to old way if the feature bit is not set. |
| @@ -2242,12 +2235,9 @@ static int ocfs2_prepare_inode_for_write(struct file *file, | |||
| 2242 | break; | 2235 | break; |
| 2243 | } | 2236 | } |
| 2244 | 2237 | ||
| 2245 | if (appending) | ||
| 2246 | *ppos = saved_pos; | ||
| 2247 | |||
| 2248 | out_unlock: | 2238 | out_unlock: |
| 2249 | trace_ocfs2_prepare_inode_for_write(OCFS2_I(inode)->ip_blkno, | 2239 | trace_ocfs2_prepare_inode_for_write(OCFS2_I(inode)->ip_blkno, |
| 2250 | saved_pos, appending, count, | 2240 | pos, appending, count, |
| 2251 | direct_io, has_refcount); | 2241 | direct_io, has_refcount); |
| 2252 | 2242 | ||
| 2253 | if (meta_level >= 0) | 2243 | if (meta_level >= 0) |
| @@ -2260,19 +2250,20 @@ out: | |||
| 2260 | static ssize_t ocfs2_file_write_iter(struct kiocb *iocb, | 2250 | static ssize_t ocfs2_file_write_iter(struct kiocb *iocb, |
| 2261 | struct iov_iter *from) | 2251 | struct iov_iter *from) |
| 2262 | { | 2252 | { |
| 2263 | int ret, direct_io, appending, rw_level, have_alloc_sem = 0; | 2253 | int direct_io, appending, rw_level, have_alloc_sem = 0; |
| 2264 | int can_do_direct, has_refcount = 0; | 2254 | int can_do_direct, has_refcount = 0; |
| 2265 | ssize_t written = 0; | 2255 | ssize_t written = 0; |
| 2266 | size_t count = iov_iter_count(from); | 2256 | ssize_t ret; |
| 2267 | loff_t old_size, *ppos = &iocb->ki_pos; | 2257 | size_t count = iov_iter_count(from), orig_count; |
| 2258 | loff_t old_size; | ||
| 2268 | u32 old_clusters; | 2259 | u32 old_clusters; |
| 2269 | struct file *file = iocb->ki_filp; | 2260 | struct file *file = iocb->ki_filp; |
| 2270 | struct inode *inode = file_inode(file); | 2261 | struct inode *inode = file_inode(file); |
| 2271 | struct address_space *mapping = file->f_mapping; | ||
| 2272 | struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); | 2262 | struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); |
| 2273 | int full_coherency = !(osb->s_mount_opt & | 2263 | int full_coherency = !(osb->s_mount_opt & |
| 2274 | OCFS2_MOUNT_COHERENCY_BUFFERED); | 2264 | OCFS2_MOUNT_COHERENCY_BUFFERED); |
| 2275 | int unaligned_dio = 0; | 2265 | int unaligned_dio = 0; |
| 2266 | int dropped_dio = 0; | ||
| 2276 | 2267 | ||
| 2277 | trace_ocfs2_file_aio_write(inode, file, file->f_path.dentry, | 2268 | trace_ocfs2_file_aio_write(inode, file, file->f_path.dentry, |
| 2278 | (unsigned long long)OCFS2_I(inode)->ip_blkno, | 2269 | (unsigned long long)OCFS2_I(inode)->ip_blkno, |
| @@ -2283,8 +2274,8 @@ static ssize_t ocfs2_file_write_iter(struct kiocb *iocb, | |||
| 2283 | if (count == 0) | 2274 | if (count == 0) |
| 2284 | return 0; | 2275 | return 0; |
| 2285 | 2276 | ||
| 2286 | appending = file->f_flags & O_APPEND ? 1 : 0; | 2277 | appending = iocb->ki_flags & IOCB_APPEND ? 1 : 0; |
| 2287 | direct_io = file->f_flags & O_DIRECT ? 1 : 0; | 2278 | direct_io = iocb->ki_flags & IOCB_DIRECT ? 1 : 0; |
| 2288 | 2279 | ||
| 2289 | mutex_lock(&inode->i_mutex); | 2280 | mutex_lock(&inode->i_mutex); |
| 2290 | 2281 | ||
| @@ -2329,8 +2320,17 @@ relock: | |||
| 2329 | ocfs2_inode_unlock(inode, 1); | 2320 | ocfs2_inode_unlock(inode, 1); |
| 2330 | } | 2321 | } |
| 2331 | 2322 | ||
| 2323 | orig_count = iov_iter_count(from); | ||
| 2324 | ret = generic_write_checks(iocb, from); | ||
| 2325 | if (ret <= 0) { | ||
| 2326 | if (ret) | ||
| 2327 | mlog_errno(ret); | ||
| 2328 | goto out; | ||
| 2329 | } | ||
| 2330 | count = ret; | ||
| 2331 | |||
| 2332 | can_do_direct = direct_io; | 2332 | can_do_direct = direct_io; |
| 2333 | ret = ocfs2_prepare_inode_for_write(file, ppos, count, appending, | 2333 | ret = ocfs2_prepare_inode_for_write(file, iocb->ki_pos, count, appending, |
| 2334 | &can_do_direct, &has_refcount); | 2334 | &can_do_direct, &has_refcount); |
| 2335 | if (ret < 0) { | 2335 | if (ret < 0) { |
| 2336 | mlog_errno(ret); | 2336 | mlog_errno(ret); |
| @@ -2338,7 +2338,7 @@ relock: | |||
| 2338 | } | 2338 | } |
| 2339 | 2339 | ||
| 2340 | if (direct_io && !is_sync_kiocb(iocb)) | 2340 | if (direct_io && !is_sync_kiocb(iocb)) |
| 2341 | unaligned_dio = ocfs2_is_io_unaligned(inode, count, *ppos); | 2341 | unaligned_dio = ocfs2_is_io_unaligned(inode, count, iocb->ki_pos); |
| 2342 | 2342 | ||
| 2343 | /* | 2343 | /* |
| 2344 | * We can't complete the direct I/O as requested, fall back to | 2344 | * We can't complete the direct I/O as requested, fall back to |
| @@ -2351,6 +2351,9 @@ relock: | |||
| 2351 | rw_level = -1; | 2351 | rw_level = -1; |
| 2352 | 2352 | ||
| 2353 | direct_io = 0; | 2353 | direct_io = 0; |
| 2354 | iocb->ki_flags &= ~IOCB_DIRECT; | ||
| 2355 | iov_iter_reexpand(from, orig_count); | ||
| 2356 | dropped_dio = 1; | ||
| 2354 | goto relock; | 2357 | goto relock; |
| 2355 | } | 2358 | } |
| 2356 | 2359 | ||
| @@ -2374,74 +2377,15 @@ relock: | |||
| 2374 | /* communicate with ocfs2_dio_end_io */ | 2377 | /* communicate with ocfs2_dio_end_io */ |
| 2375 | ocfs2_iocb_set_rw_locked(iocb, rw_level); | 2378 | ocfs2_iocb_set_rw_locked(iocb, rw_level); |
| 2376 | 2379 | ||
| 2377 | ret = generic_write_checks(file, ppos, &count, | 2380 | written = __generic_file_write_iter(iocb, from); |
| 2378 | S_ISBLK(inode->i_mode)); | ||
| 2379 | if (ret) | ||
| 2380 | goto out_dio; | ||
| 2381 | |||
| 2382 | iov_iter_truncate(from, count); | ||
| 2383 | if (direct_io) { | ||
| 2384 | loff_t endbyte; | ||
| 2385 | ssize_t written_buffered; | ||
| 2386 | written = generic_file_direct_write(iocb, from, *ppos); | ||
| 2387 | if (written < 0 || written == count) { | ||
| 2388 | ret = written; | ||
| 2389 | goto out_dio; | ||
| 2390 | } | ||
| 2391 | |||
| 2392 | /* | ||
| 2393 | * for completing the rest of the request. | ||
| 2394 | */ | ||
| 2395 | count -= written; | ||
| 2396 | written_buffered = generic_perform_write(file, from, *ppos); | ||
| 2397 | /* | ||
| 2398 | * If generic_file_buffered_write() returned a synchronous error | ||
| 2399 | * then we want to return the number of bytes which were | ||
| 2400 | * direct-written, or the error code if that was zero. Note | ||
| 2401 | * that this differs from normal direct-io semantics, which | ||
| 2402 | * will return -EFOO even if some bytes were written. | ||
| 2403 | */ | ||
| 2404 | if (written_buffered < 0) { | ||
| 2405 | ret = written_buffered; | ||
| 2406 | goto out_dio; | ||
| 2407 | } | ||
| 2408 | |||
| 2409 | /* We need to ensure that the page cache pages are written to | ||
| 2410 | * disk and invalidated to preserve the expected O_DIRECT | ||
| 2411 | * semantics. | ||
| 2412 | */ | ||
| 2413 | endbyte = *ppos + written_buffered - 1; | ||
| 2414 | ret = filemap_write_and_wait_range(file->f_mapping, *ppos, | ||
| 2415 | endbyte); | ||
| 2416 | if (ret == 0) { | ||
| 2417 | iocb->ki_pos = *ppos + written_buffered; | ||
| 2418 | written += written_buffered; | ||
| 2419 | invalidate_mapping_pages(mapping, | ||
| 2420 | *ppos >> PAGE_CACHE_SHIFT, | ||
| 2421 | endbyte >> PAGE_CACHE_SHIFT); | ||
| 2422 | } else { | ||
| 2423 | /* | ||
| 2424 | * We don't know how much we wrote, so just return | ||
| 2425 | * the number of bytes which were direct-written | ||
| 2426 | */ | ||
| 2427 | } | ||
| 2428 | } else { | ||
| 2429 | current->backing_dev_info = inode_to_bdi(inode); | ||
| 2430 | written = generic_perform_write(file, from, *ppos); | ||
| 2431 | if (likely(written >= 0)) | ||
| 2432 | iocb->ki_pos = *ppos + written; | ||
| 2433 | current->backing_dev_info = NULL; | ||
| 2434 | } | ||
| 2435 | |||
| 2436 | out_dio: | ||
| 2437 | /* buffered aio wouldn't have proper lock coverage today */ | 2381 | /* buffered aio wouldn't have proper lock coverage today */ |
| 2438 | BUG_ON(ret == -EIOCBQUEUED && !(file->f_flags & O_DIRECT)); | 2382 | BUG_ON(written == -EIOCBQUEUED && !(iocb->ki_flags & IOCB_DIRECT)); |
| 2439 | 2383 | ||
| 2440 | if (unlikely(written <= 0)) | 2384 | if (unlikely(written <= 0)) |
| 2441 | goto no_sync; | 2385 | goto no_sync; |
| 2442 | 2386 | ||
| 2443 | if (((file->f_flags & O_DSYNC) && !direct_io) || IS_SYNC(inode) || | 2387 | if (((file->f_flags & O_DSYNC) && !direct_io) || |
| 2444 | ((file->f_flags & O_DIRECT) && !direct_io)) { | 2388 | IS_SYNC(inode) || dropped_dio) { |
| 2445 | ret = filemap_fdatawrite_range(file->f_mapping, | 2389 | ret = filemap_fdatawrite_range(file->f_mapping, |
| 2446 | iocb->ki_pos - written, | 2390 | iocb->ki_pos - written, |
| 2447 | iocb->ki_pos - 1); | 2391 | iocb->ki_pos - 1); |
| @@ -2552,7 +2496,7 @@ static ssize_t ocfs2_file_read_iter(struct kiocb *iocb, | |||
| 2552 | * buffered reads protect themselves in ->readpage(). O_DIRECT reads | 2496 | * buffered reads protect themselves in ->readpage(). O_DIRECT reads |
| 2553 | * need locks to protect pending reads from racing with truncate. | 2497 | * need locks to protect pending reads from racing with truncate. |
| 2554 | */ | 2498 | */ |
| 2555 | if (filp->f_flags & O_DIRECT) { | 2499 | if (iocb->ki_flags & IOCB_DIRECT) { |
| 2556 | have_alloc_sem = 1; | 2500 | have_alloc_sem = 1; |
| 2557 | ocfs2_iocb_set_sem_locked(iocb); | 2501 | ocfs2_iocb_set_sem_locked(iocb); |
| 2558 | 2502 | ||
| @@ -2586,7 +2530,7 @@ static ssize_t ocfs2_file_read_iter(struct kiocb *iocb, | |||
| 2586 | trace_generic_file_aio_read_ret(ret); | 2530 | trace_generic_file_aio_read_ret(ret); |
| 2587 | 2531 | ||
| 2588 | /* buffered aio wouldn't have proper lock coverage today */ | 2532 | /* buffered aio wouldn't have proper lock coverage today */ |
| 2589 | BUG_ON(ret == -EIOCBQUEUED && !(filp->f_flags & O_DIRECT)); | 2533 | BUG_ON(ret == -EIOCBQUEUED && !(iocb->ki_flags & IOCB_DIRECT)); |
| 2590 | 2534 | ||
| 2591 | /* see ocfs2_file_write_iter */ | 2535 | /* see ocfs2_file_write_iter */ |
| 2592 | if (ret == -EIOCBQUEUED || !ocfs2_iocb_is_rw_locked(iocb)) { | 2536 | if (ret == -EIOCBQUEUED || !ocfs2_iocb_is_rw_locked(iocb)) { |
diff --git a/fs/read_write.c b/fs/read_write.c index 45d583c33879..819ef3faf1bb 100644 --- a/fs/read_write.c +++ b/fs/read_write.c | |||
| @@ -477,7 +477,8 @@ static ssize_t new_sync_write(struct file *filp, const char __user *buf, size_t | |||
| 477 | 477 | ||
| 478 | ret = filp->f_op->write_iter(&kiocb, &iter); | 478 | ret = filp->f_op->write_iter(&kiocb, &iter); |
| 479 | BUG_ON(ret == -EIOCBQUEUED); | 479 | BUG_ON(ret == -EIOCBQUEUED); |
| 480 | *ppos = kiocb.ki_pos; | 480 | if (ret > 0) |
| 481 | *ppos = kiocb.ki_pos; | ||
| 481 | return ret; | 482 | return ret; |
| 482 | } | 483 | } |
| 483 | 484 | ||
diff --git a/fs/reiserfs/inode.c b/fs/reiserfs/inode.c index 9312b7842e03..742242b60972 100644 --- a/fs/reiserfs/inode.c +++ b/fs/reiserfs/inode.c | |||
| @@ -3278,22 +3278,22 @@ static int reiserfs_releasepage(struct page *page, gfp_t unused_gfp_flags) | |||
| 3278 | * We thank Mingming Cao for helping us understand in great detail what | 3278 | * We thank Mingming Cao for helping us understand in great detail what |
| 3279 | * to do in this section of the code. | 3279 | * to do in this section of the code. |
| 3280 | */ | 3280 | */ |
| 3281 | static ssize_t reiserfs_direct_IO(int rw, struct kiocb *iocb, | 3281 | static ssize_t reiserfs_direct_IO(struct kiocb *iocb, struct iov_iter *iter, |
| 3282 | struct iov_iter *iter, loff_t offset) | 3282 | loff_t offset) |
| 3283 | { | 3283 | { |
| 3284 | struct file *file = iocb->ki_filp; | 3284 | struct file *file = iocb->ki_filp; |
| 3285 | struct inode *inode = file->f_mapping->host; | 3285 | struct inode *inode = file->f_mapping->host; |
| 3286 | size_t count = iov_iter_count(iter); | 3286 | size_t count = iov_iter_count(iter); |
| 3287 | ssize_t ret; | 3287 | ssize_t ret; |
| 3288 | 3288 | ||
| 3289 | ret = blockdev_direct_IO(rw, iocb, inode, iter, offset, | 3289 | ret = blockdev_direct_IO(iocb, inode, iter, offset, |
| 3290 | reiserfs_get_blocks_direct_io); | 3290 | reiserfs_get_blocks_direct_io); |
| 3291 | 3291 | ||
| 3292 | /* | 3292 | /* |
| 3293 | * In case of error extending write may have instantiated a few | 3293 | * In case of error extending write may have instantiated a few |
| 3294 | * blocks outside i_size. Trim these off again. | 3294 | * blocks outside i_size. Trim these off again. |
| 3295 | */ | 3295 | */ |
| 3296 | if (unlikely((rw & WRITE) && ret < 0)) { | 3296 | if (unlikely(iov_iter_rw(iter) == WRITE && ret < 0)) { |
| 3297 | loff_t isize = i_size_read(inode); | 3297 | loff_t isize = i_size_read(inode); |
| 3298 | loff_t end = offset + count; | 3298 | loff_t end = offset + count; |
| 3299 | 3299 | ||
diff --git a/fs/udf/file.c b/fs/udf/file.c index f77f7681288f..5dadad9960b9 100644 --- a/fs/udf/file.c +++ b/fs/udf/file.c | |||
| @@ -99,8 +99,7 @@ static int udf_adinicb_write_begin(struct file *file, | |||
| 99 | return 0; | 99 | return 0; |
| 100 | } | 100 | } |
| 101 | 101 | ||
| 102 | static ssize_t udf_adinicb_direct_IO(int rw, struct kiocb *iocb, | 102 | static ssize_t udf_adinicb_direct_IO(struct kiocb *iocb, struct iov_iter *iter, |
| 103 | struct iov_iter *iter, | ||
| 104 | loff_t offset) | 103 | loff_t offset) |
| 105 | { | 104 | { |
| 106 | /* Fallback to buffered I/O. */ | 105 | /* Fallback to buffered I/O. */ |
| @@ -120,21 +119,21 @@ static ssize_t udf_file_write_iter(struct kiocb *iocb, struct iov_iter *from) | |||
| 120 | ssize_t retval; | 119 | ssize_t retval; |
| 121 | struct file *file = iocb->ki_filp; | 120 | struct file *file = iocb->ki_filp; |
| 122 | struct inode *inode = file_inode(file); | 121 | struct inode *inode = file_inode(file); |
| 123 | int err, pos; | ||
| 124 | size_t count = iov_iter_count(from); | ||
| 125 | struct udf_inode_info *iinfo = UDF_I(inode); | 122 | struct udf_inode_info *iinfo = UDF_I(inode); |
| 123 | int err; | ||
| 126 | 124 | ||
| 127 | mutex_lock(&inode->i_mutex); | 125 | mutex_lock(&inode->i_mutex); |
| 126 | |||
| 127 | retval = generic_write_checks(iocb, from); | ||
| 128 | if (retval <= 0) | ||
| 129 | goto out; | ||
| 130 | |||
| 128 | down_write(&iinfo->i_data_sem); | 131 | down_write(&iinfo->i_data_sem); |
| 129 | if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB) { | 132 | if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB) { |
| 130 | if (file->f_flags & O_APPEND) | 133 | loff_t end = iocb->ki_pos + iov_iter_count(from); |
| 131 | pos = inode->i_size; | ||
| 132 | else | ||
| 133 | pos = iocb->ki_pos; | ||
| 134 | 134 | ||
| 135 | if (inode->i_sb->s_blocksize < | 135 | if (inode->i_sb->s_blocksize < |
| 136 | (udf_file_entry_alloc_offset(inode) + | 136 | (udf_file_entry_alloc_offset(inode) + end)) { |
| 137 | pos + count)) { | ||
| 138 | err = udf_expand_file_adinicb(inode); | 137 | err = udf_expand_file_adinicb(inode); |
| 139 | if (err) { | 138 | if (err) { |
| 140 | mutex_unlock(&inode->i_mutex); | 139 | mutex_unlock(&inode->i_mutex); |
| @@ -142,16 +141,14 @@ static ssize_t udf_file_write_iter(struct kiocb *iocb, struct iov_iter *from) | |||
| 142 | return err; | 141 | return err; |
| 143 | } | 142 | } |
| 144 | } else { | 143 | } else { |
| 145 | if (pos + count > inode->i_size) | 144 | iinfo->i_lenAlloc = max(end, inode->i_size); |
| 146 | iinfo->i_lenAlloc = pos + count; | ||
| 147 | else | ||
| 148 | iinfo->i_lenAlloc = inode->i_size; | ||
| 149 | up_write(&iinfo->i_data_sem); | 145 | up_write(&iinfo->i_data_sem); |
| 150 | } | 146 | } |
| 151 | } else | 147 | } else |
| 152 | up_write(&iinfo->i_data_sem); | 148 | up_write(&iinfo->i_data_sem); |
| 153 | 149 | ||
| 154 | retval = __generic_file_write_iter(iocb, from); | 150 | retval = __generic_file_write_iter(iocb, from); |
| 151 | out: | ||
| 155 | mutex_unlock(&inode->i_mutex); | 152 | mutex_unlock(&inode->i_mutex); |
| 156 | 153 | ||
| 157 | if (retval > 0) { | 154 | if (retval > 0) { |
diff --git a/fs/udf/inode.c b/fs/udf/inode.c index 9e3d780e5eff..6afac3d561ac 100644 --- a/fs/udf/inode.c +++ b/fs/udf/inode.c | |||
| @@ -214,8 +214,7 @@ static int udf_write_begin(struct file *file, struct address_space *mapping, | |||
| 214 | return ret; | 214 | return ret; |
| 215 | } | 215 | } |
| 216 | 216 | ||
| 217 | static ssize_t udf_direct_IO(int rw, struct kiocb *iocb, | 217 | static ssize_t udf_direct_IO(struct kiocb *iocb, struct iov_iter *iter, |
| 218 | struct iov_iter *iter, | ||
| 219 | loff_t offset) | 218 | loff_t offset) |
| 220 | { | 219 | { |
| 221 | struct file *file = iocb->ki_filp; | 220 | struct file *file = iocb->ki_filp; |
| @@ -224,8 +223,8 @@ static ssize_t udf_direct_IO(int rw, struct kiocb *iocb, | |||
| 224 | size_t count = iov_iter_count(iter); | 223 | size_t count = iov_iter_count(iter); |
| 225 | ssize_t ret; | 224 | ssize_t ret; |
| 226 | 225 | ||
| 227 | ret = blockdev_direct_IO(rw, iocb, inode, iter, offset, udf_get_block); | 226 | ret = blockdev_direct_IO(iocb, inode, iter, offset, udf_get_block); |
| 228 | if (unlikely(ret < 0 && (rw & WRITE))) | 227 | if (unlikely(ret < 0 && iov_iter_rw(iter) == WRITE)) |
| 229 | udf_write_failed(mapping, offset + count); | 228 | udf_write_failed(mapping, offset + count); |
| 230 | return ret; | 229 | return ret; |
| 231 | } | 230 | } |
diff --git a/fs/xfs/xfs_aops.c b/fs/xfs/xfs_aops.c index 4f8cdc59bc38..1d8eef9cf0f5 100644 --- a/fs/xfs/xfs_aops.c +++ b/fs/xfs/xfs_aops.c | |||
| @@ -1495,7 +1495,6 @@ xfs_end_io_direct_write( | |||
| 1495 | 1495 | ||
| 1496 | STATIC ssize_t | 1496 | STATIC ssize_t |
| 1497 | xfs_vm_direct_IO( | 1497 | xfs_vm_direct_IO( |
| 1498 | int rw, | ||
| 1499 | struct kiocb *iocb, | 1498 | struct kiocb *iocb, |
| 1500 | struct iov_iter *iter, | 1499 | struct iov_iter *iter, |
| 1501 | loff_t offset) | 1500 | loff_t offset) |
| @@ -1503,15 +1502,14 @@ xfs_vm_direct_IO( | |||
| 1503 | struct inode *inode = iocb->ki_filp->f_mapping->host; | 1502 | struct inode *inode = iocb->ki_filp->f_mapping->host; |
| 1504 | struct block_device *bdev = xfs_find_bdev_for_inode(inode); | 1503 | struct block_device *bdev = xfs_find_bdev_for_inode(inode); |
| 1505 | 1504 | ||
| 1506 | if (rw & WRITE) { | 1505 | if (iov_iter_rw(iter) == WRITE) { |
| 1507 | return __blockdev_direct_IO(rw, iocb, inode, bdev, iter, | 1506 | return __blockdev_direct_IO(iocb, inode, bdev, iter, offset, |
| 1508 | offset, xfs_get_blocks_direct, | 1507 | xfs_get_blocks_direct, |
| 1509 | xfs_end_io_direct_write, NULL, | 1508 | xfs_end_io_direct_write, NULL, |
| 1510 | DIO_ASYNC_EXTEND); | 1509 | DIO_ASYNC_EXTEND); |
| 1511 | } | 1510 | } |
| 1512 | return __blockdev_direct_IO(rw, iocb, inode, bdev, iter, | 1511 | return __blockdev_direct_IO(iocb, inode, bdev, iter, offset, |
| 1513 | offset, xfs_get_blocks_direct, | 1512 | xfs_get_blocks_direct, NULL, NULL, 0); |
| 1514 | NULL, NULL, 0); | ||
| 1515 | } | 1513 | } |
| 1516 | 1514 | ||
| 1517 | /* | 1515 | /* |
diff --git a/fs/xfs/xfs_file.c b/fs/xfs/xfs_file.c index 44856c3b9617..1f12ad0a8585 100644 --- a/fs/xfs/xfs_file.c +++ b/fs/xfs/xfs_file.c | |||
| @@ -279,7 +279,7 @@ xfs_file_read_iter( | |||
| 279 | 279 | ||
| 280 | XFS_STATS_INC(xs_read_calls); | 280 | XFS_STATS_INC(xs_read_calls); |
| 281 | 281 | ||
| 282 | if (unlikely(file->f_flags & O_DIRECT)) | 282 | if (unlikely(iocb->ki_flags & IOCB_DIRECT)) |
| 283 | ioflags |= XFS_IO_ISDIRECT; | 283 | ioflags |= XFS_IO_ISDIRECT; |
| 284 | if (file->f_mode & FMODE_NOCMTIME) | 284 | if (file->f_mode & FMODE_NOCMTIME) |
| 285 | ioflags |= XFS_IO_INVIS; | 285 | ioflags |= XFS_IO_INVIS; |
| @@ -544,18 +544,19 @@ xfs_zero_eof( | |||
| 544 | */ | 544 | */ |
| 545 | STATIC ssize_t | 545 | STATIC ssize_t |
| 546 | xfs_file_aio_write_checks( | 546 | xfs_file_aio_write_checks( |
| 547 | struct file *file, | 547 | struct kiocb *iocb, |
| 548 | loff_t *pos, | 548 | struct iov_iter *from, |
| 549 | size_t *count, | ||
| 550 | int *iolock) | 549 | int *iolock) |
| 551 | { | 550 | { |
| 551 | struct file *file = iocb->ki_filp; | ||
| 552 | struct inode *inode = file->f_mapping->host; | 552 | struct inode *inode = file->f_mapping->host; |
| 553 | struct xfs_inode *ip = XFS_I(inode); | 553 | struct xfs_inode *ip = XFS_I(inode); |
| 554 | int error = 0; | 554 | ssize_t error = 0; |
| 555 | size_t count = iov_iter_count(from); | ||
| 555 | 556 | ||
| 556 | restart: | 557 | restart: |
| 557 | error = generic_write_checks(file, pos, count, S_ISBLK(inode->i_mode)); | 558 | error = generic_write_checks(iocb, from); |
| 558 | if (error) | 559 | if (error <= 0) |
| 559 | return error; | 560 | return error; |
| 560 | 561 | ||
| 561 | error = xfs_break_layouts(inode, iolock); | 562 | error = xfs_break_layouts(inode, iolock); |
| @@ -569,16 +570,17 @@ restart: | |||
| 569 | * iolock shared, we need to update it to exclusive which implies | 570 | * iolock shared, we need to update it to exclusive which implies |
| 570 | * having to redo all checks before. | 571 | * having to redo all checks before. |
| 571 | */ | 572 | */ |
| 572 | if (*pos > i_size_read(inode)) { | 573 | if (iocb->ki_pos > i_size_read(inode)) { |
| 573 | bool zero = false; | 574 | bool zero = false; |
| 574 | 575 | ||
| 575 | if (*iolock == XFS_IOLOCK_SHARED) { | 576 | if (*iolock == XFS_IOLOCK_SHARED) { |
| 576 | xfs_rw_iunlock(ip, *iolock); | 577 | xfs_rw_iunlock(ip, *iolock); |
| 577 | *iolock = XFS_IOLOCK_EXCL; | 578 | *iolock = XFS_IOLOCK_EXCL; |
| 578 | xfs_rw_ilock(ip, *iolock); | 579 | xfs_rw_ilock(ip, *iolock); |
| 580 | iov_iter_reexpand(from, count); | ||
| 579 | goto restart; | 581 | goto restart; |
| 580 | } | 582 | } |
| 581 | error = xfs_zero_eof(ip, *pos, i_size_read(inode), &zero); | 583 | error = xfs_zero_eof(ip, iocb->ki_pos, i_size_read(inode), &zero); |
| 582 | if (error) | 584 | if (error) |
| 583 | return error; | 585 | return error; |
| 584 | } | 586 | } |
| @@ -678,10 +680,11 @@ xfs_file_dio_aio_write( | |||
| 678 | xfs_rw_ilock(ip, iolock); | 680 | xfs_rw_ilock(ip, iolock); |
| 679 | } | 681 | } |
| 680 | 682 | ||
| 681 | ret = xfs_file_aio_write_checks(file, &pos, &count, &iolock); | 683 | ret = xfs_file_aio_write_checks(iocb, from, &iolock); |
| 682 | if (ret) | 684 | if (ret) |
| 683 | goto out; | 685 | goto out; |
| 684 | iov_iter_truncate(from, count); | 686 | count = iov_iter_count(from); |
| 687 | pos = iocb->ki_pos; | ||
| 685 | 688 | ||
| 686 | if (mapping->nrpages) { | 689 | if (mapping->nrpages) { |
| 687 | ret = filemap_write_and_wait_range(VFS_I(ip)->i_mapping, | 690 | ret = filemap_write_and_wait_range(VFS_I(ip)->i_mapping, |
| @@ -734,24 +737,22 @@ xfs_file_buffered_aio_write( | |||
| 734 | ssize_t ret; | 737 | ssize_t ret; |
| 735 | int enospc = 0; | 738 | int enospc = 0; |
| 736 | int iolock = XFS_IOLOCK_EXCL; | 739 | int iolock = XFS_IOLOCK_EXCL; |
| 737 | loff_t pos = iocb->ki_pos; | ||
| 738 | size_t count = iov_iter_count(from); | ||
| 739 | 740 | ||
| 740 | xfs_rw_ilock(ip, iolock); | 741 | xfs_rw_ilock(ip, iolock); |
| 741 | 742 | ||
| 742 | ret = xfs_file_aio_write_checks(file, &pos, &count, &iolock); | 743 | ret = xfs_file_aio_write_checks(iocb, from, &iolock); |
| 743 | if (ret) | 744 | if (ret) |
| 744 | goto out; | 745 | goto out; |
| 745 | 746 | ||
| 746 | iov_iter_truncate(from, count); | ||
| 747 | /* We can write back this queue in page reclaim */ | 747 | /* We can write back this queue in page reclaim */ |
| 748 | current->backing_dev_info = inode_to_bdi(inode); | 748 | current->backing_dev_info = inode_to_bdi(inode); |
| 749 | 749 | ||
| 750 | write_retry: | 750 | write_retry: |
| 751 | trace_xfs_file_buffered_write(ip, count, iocb->ki_pos, 0); | 751 | trace_xfs_file_buffered_write(ip, iov_iter_count(from), |
| 752 | ret = generic_perform_write(file, from, pos); | 752 | iocb->ki_pos, 0); |
| 753 | ret = generic_perform_write(file, from, iocb->ki_pos); | ||
| 753 | if (likely(ret >= 0)) | 754 | if (likely(ret >= 0)) |
| 754 | iocb->ki_pos = pos + ret; | 755 | iocb->ki_pos += ret; |
| 755 | 756 | ||
| 756 | /* | 757 | /* |
| 757 | * If we hit a space limit, try to free up some lingering preallocated | 758 | * If we hit a space limit, try to free up some lingering preallocated |
| @@ -803,7 +804,7 @@ xfs_file_write_iter( | |||
| 803 | if (XFS_FORCED_SHUTDOWN(ip->i_mount)) | 804 | if (XFS_FORCED_SHUTDOWN(ip->i_mount)) |
| 804 | return -EIO; | 805 | return -EIO; |
| 805 | 806 | ||
| 806 | if (unlikely(file->f_flags & O_DIRECT)) | 807 | if (unlikely(iocb->ki_flags & IOCB_DIRECT)) |
| 807 | ret = xfs_file_dio_aio_write(iocb, from); | 808 | ret = xfs_file_dio_aio_write(iocb, from); |
| 808 | else | 809 | else |
| 809 | ret = xfs_file_buffered_aio_write(iocb, from); | 810 | ret = xfs_file_buffered_aio_write(iocb, from); |
diff --git a/include/linux/dcache.h b/include/linux/dcache.h index d8358799c594..df334cbacc6d 100644 --- a/include/linux/dcache.h +++ b/include/linux/dcache.h | |||
| @@ -404,26 +404,11 @@ static inline bool d_mountpoint(const struct dentry *dentry) | |||
| 404 | /* | 404 | /* |
| 405 | * Directory cache entry type accessor functions. | 405 | * Directory cache entry type accessor functions. |
| 406 | */ | 406 | */ |
| 407 | static inline void __d_set_type(struct dentry *dentry, unsigned type) | ||
| 408 | { | ||
| 409 | dentry->d_flags = (dentry->d_flags & ~DCACHE_ENTRY_TYPE) | type; | ||
| 410 | } | ||
| 411 | |||
| 412 | static inline void __d_clear_type(struct dentry *dentry) | ||
| 413 | { | ||
| 414 | __d_set_type(dentry, DCACHE_MISS_TYPE); | ||
| 415 | } | ||
| 416 | |||
| 417 | static inline void d_set_type(struct dentry *dentry, unsigned type) | ||
| 418 | { | ||
| 419 | spin_lock(&dentry->d_lock); | ||
| 420 | __d_set_type(dentry, type); | ||
| 421 | spin_unlock(&dentry->d_lock); | ||
| 422 | } | ||
| 423 | |||
| 424 | static inline unsigned __d_entry_type(const struct dentry *dentry) | 407 | static inline unsigned __d_entry_type(const struct dentry *dentry) |
| 425 | { | 408 | { |
| 426 | return dentry->d_flags & DCACHE_ENTRY_TYPE; | 409 | unsigned type = READ_ONCE(dentry->d_flags); |
| 410 | smp_rmb(); | ||
| 411 | return type & DCACHE_ENTRY_TYPE; | ||
| 427 | } | 412 | } |
| 428 | 413 | ||
| 429 | static inline bool d_is_miss(const struct dentry *dentry) | 414 | static inline bool d_is_miss(const struct dentry *dentry) |
| @@ -482,6 +467,44 @@ static inline bool d_is_positive(const struct dentry *dentry) | |||
| 482 | return !d_is_negative(dentry); | 467 | return !d_is_negative(dentry); |
| 483 | } | 468 | } |
| 484 | 469 | ||
| 470 | /** | ||
| 471 | * d_really_is_negative - Determine if a dentry is really negative (ignoring fallthroughs) | ||
| 472 | * @dentry: The dentry in question | ||
| 473 | * | ||
| 474 | * Returns true if the dentry represents either an absent name or a name that | ||
| 475 | * doesn't map to an inode (ie. ->d_inode is NULL). The dentry could represent | ||
| 476 | * a true miss, a whiteout that isn't represented by a 0,0 chardev or a | ||
| 477 | * fallthrough marker in an opaque directory. | ||
| 478 | * | ||
| 479 | * Note! (1) This should be used *only* by a filesystem to examine its own | ||
| 480 | * dentries. It should not be used to look at some other filesystem's | ||
| 481 | * dentries. (2) It should also be used in combination with d_inode() to get | ||
| 482 | * the inode. (3) The dentry may have something attached to ->d_lower and the | ||
| 483 | * type field of the flags may be set to something other than miss or whiteout. | ||
| 484 | */ | ||
| 485 | static inline bool d_really_is_negative(const struct dentry *dentry) | ||
| 486 | { | ||
| 487 | return dentry->d_inode == NULL; | ||
| 488 | } | ||
| 489 | |||
| 490 | /** | ||
| 491 | * d_really_is_positive - Determine if a dentry is really positive (ignoring fallthroughs) | ||
| 492 | * @dentry: The dentry in question | ||
| 493 | * | ||
| 494 | * Returns true if the dentry represents a name that maps to an inode | ||
| 495 | * (ie. ->d_inode is not NULL). The dentry might still represent a whiteout if | ||
| 496 | * that is represented on medium as a 0,0 chardev. | ||
| 497 | * | ||
| 498 | * Note! (1) This should be used *only* by a filesystem to examine its own | ||
| 499 | * dentries. It should not be used to look at some other filesystem's | ||
| 500 | * dentries. (2) It should also be used in combination with d_inode() to get | ||
| 501 | * the inode. | ||
| 502 | */ | ||
| 503 | static inline bool d_really_is_positive(const struct dentry *dentry) | ||
| 504 | { | ||
| 505 | return dentry->d_inode != NULL; | ||
| 506 | } | ||
| 507 | |||
| 485 | extern void d_set_fallthru(struct dentry *dentry); | 508 | extern void d_set_fallthru(struct dentry *dentry); |
| 486 | 509 | ||
| 487 | static inline bool d_is_fallthru(const struct dentry *dentry) | 510 | static inline bool d_is_fallthru(const struct dentry *dentry) |
diff --git a/include/linux/fs.h b/include/linux/fs.h index f4fc60727b8d..f4d63544a791 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h | |||
| @@ -315,6 +315,8 @@ struct address_space; | |||
| 315 | struct writeback_control; | 315 | struct writeback_control; |
| 316 | 316 | ||
| 317 | #define IOCB_EVENTFD (1 << 0) | 317 | #define IOCB_EVENTFD (1 << 0) |
| 318 | #define IOCB_APPEND (1 << 1) | ||
| 319 | #define IOCB_DIRECT (1 << 2) | ||
| 318 | 320 | ||
| 319 | struct kiocb { | 321 | struct kiocb { |
| 320 | struct file *ki_filp; | 322 | struct file *ki_filp; |
| @@ -329,10 +331,13 @@ static inline bool is_sync_kiocb(struct kiocb *kiocb) | |||
| 329 | return kiocb->ki_complete == NULL; | 331 | return kiocb->ki_complete == NULL; |
| 330 | } | 332 | } |
| 331 | 333 | ||
| 334 | static inline int iocb_flags(struct file *file); | ||
| 335 | |||
| 332 | static inline void init_sync_kiocb(struct kiocb *kiocb, struct file *filp) | 336 | static inline void init_sync_kiocb(struct kiocb *kiocb, struct file *filp) |
| 333 | { | 337 | { |
| 334 | *kiocb = (struct kiocb) { | 338 | *kiocb = (struct kiocb) { |
| 335 | .ki_filp = filp, | 339 | .ki_filp = filp, |
| 340 | .ki_flags = iocb_flags(filp), | ||
| 336 | }; | 341 | }; |
| 337 | } | 342 | } |
| 338 | 343 | ||
| @@ -383,7 +388,7 @@ struct address_space_operations { | |||
| 383 | void (*invalidatepage) (struct page *, unsigned int, unsigned int); | 388 | void (*invalidatepage) (struct page *, unsigned int, unsigned int); |
| 384 | int (*releasepage) (struct page *, gfp_t); | 389 | int (*releasepage) (struct page *, gfp_t); |
| 385 | void (*freepage)(struct page *); | 390 | void (*freepage)(struct page *); |
| 386 | ssize_t (*direct_IO)(int, struct kiocb *, struct iov_iter *iter, loff_t offset); | 391 | ssize_t (*direct_IO)(struct kiocb *, struct iov_iter *iter, loff_t offset); |
| 387 | /* | 392 | /* |
| 388 | * migrate the contents of a page to the specified target. If | 393 | * migrate the contents of a page to the specified target. If |
| 389 | * migrate_mode is MIGRATE_ASYNC, it must not block. | 394 | * migrate_mode is MIGRATE_ASYNC, it must not block. |
| @@ -2566,7 +2571,7 @@ extern int sb_min_blocksize(struct super_block *, int); | |||
| 2566 | 2571 | ||
| 2567 | extern int generic_file_mmap(struct file *, struct vm_area_struct *); | 2572 | extern int generic_file_mmap(struct file *, struct vm_area_struct *); |
| 2568 | extern int generic_file_readonly_mmap(struct file *, struct vm_area_struct *); | 2573 | extern int generic_file_readonly_mmap(struct file *, struct vm_area_struct *); |
| 2569 | int generic_write_checks(struct file *file, loff_t *pos, size_t *count, int isblk); | 2574 | extern ssize_t generic_write_checks(struct kiocb *, struct iov_iter *); |
| 2570 | extern ssize_t generic_file_read_iter(struct kiocb *, struct iov_iter *); | 2575 | extern ssize_t generic_file_read_iter(struct kiocb *, struct iov_iter *); |
| 2571 | extern ssize_t __generic_file_write_iter(struct kiocb *, struct iov_iter *); | 2576 | extern ssize_t __generic_file_write_iter(struct kiocb *, struct iov_iter *); |
| 2572 | extern ssize_t generic_file_write_iter(struct kiocb *, struct iov_iter *); | 2577 | extern ssize_t generic_file_write_iter(struct kiocb *, struct iov_iter *); |
| @@ -2609,8 +2614,8 @@ extern loff_t fixed_size_llseek(struct file *file, loff_t offset, | |||
| 2609 | extern int generic_file_open(struct inode * inode, struct file * filp); | 2614 | extern int generic_file_open(struct inode * inode, struct file * filp); |
| 2610 | extern int nonseekable_open(struct inode * inode, struct file * filp); | 2615 | extern int nonseekable_open(struct inode * inode, struct file * filp); |
| 2611 | 2616 | ||
| 2612 | ssize_t dax_do_io(int rw, struct kiocb *, struct inode *, struct iov_iter *, | 2617 | ssize_t dax_do_io(struct kiocb *, struct inode *, struct iov_iter *, loff_t, |
| 2613 | loff_t, get_block_t, dio_iodone_t, int flags); | 2618 | get_block_t, dio_iodone_t, int flags); |
| 2614 | int dax_clear_blocks(struct inode *, sector_t block, long size); | 2619 | int dax_clear_blocks(struct inode *, sector_t block, long size); |
| 2615 | int dax_zero_page_range(struct inode *, loff_t from, unsigned len, get_block_t); | 2620 | int dax_zero_page_range(struct inode *, loff_t from, unsigned len, get_block_t); |
| 2616 | int dax_truncate_page(struct inode *, loff_t from, get_block_t); | 2621 | int dax_truncate_page(struct inode *, loff_t from, get_block_t); |
| @@ -2635,16 +2640,18 @@ enum { | |||
| 2635 | 2640 | ||
| 2636 | void dio_end_io(struct bio *bio, int error); | 2641 | void dio_end_io(struct bio *bio, int error); |
| 2637 | 2642 | ||
| 2638 | ssize_t __blockdev_direct_IO(int rw, struct kiocb *iocb, struct inode *inode, | 2643 | ssize_t __blockdev_direct_IO(struct kiocb *iocb, struct inode *inode, |
| 2639 | struct block_device *bdev, struct iov_iter *iter, loff_t offset, | 2644 | struct block_device *bdev, struct iov_iter *iter, |
| 2640 | get_block_t get_block, dio_iodone_t end_io, | 2645 | loff_t offset, get_block_t get_block, |
| 2641 | dio_submit_t submit_io, int flags); | 2646 | dio_iodone_t end_io, dio_submit_t submit_io, |
| 2647 | int flags); | ||
| 2642 | 2648 | ||
| 2643 | static inline ssize_t blockdev_direct_IO(int rw, struct kiocb *iocb, | 2649 | static inline ssize_t blockdev_direct_IO(struct kiocb *iocb, |
| 2644 | struct inode *inode, struct iov_iter *iter, loff_t offset, | 2650 | struct inode *inode, |
| 2645 | get_block_t get_block) | 2651 | struct iov_iter *iter, loff_t offset, |
| 2652 | get_block_t get_block) | ||
| 2646 | { | 2653 | { |
| 2647 | return __blockdev_direct_IO(rw, iocb, inode, inode->i_sb->s_bdev, iter, | 2654 | return __blockdev_direct_IO(iocb, inode, inode->i_sb->s_bdev, iter, |
| 2648 | offset, get_block, NULL, NULL, | 2655 | offset, get_block, NULL, NULL, |
| 2649 | DIO_LOCKING | DIO_SKIP_HOLES); | 2656 | DIO_LOCKING | DIO_SKIP_HOLES); |
| 2650 | } | 2657 | } |
| @@ -2777,6 +2784,16 @@ static inline bool io_is_direct(struct file *filp) | |||
| 2777 | return (filp->f_flags & O_DIRECT) || IS_DAX(file_inode(filp)); | 2784 | return (filp->f_flags & O_DIRECT) || IS_DAX(file_inode(filp)); |
| 2778 | } | 2785 | } |
| 2779 | 2786 | ||
| 2787 | static inline int iocb_flags(struct file *file) | ||
| 2788 | { | ||
| 2789 | int res = 0; | ||
| 2790 | if (file->f_flags & O_APPEND) | ||
| 2791 | res |= IOCB_APPEND; | ||
| 2792 | if (io_is_direct(file)) | ||
| 2793 | res |= IOCB_DIRECT; | ||
| 2794 | return res; | ||
| 2795 | } | ||
| 2796 | |||
| 2780 | static inline ino_t parent_ino(struct dentry *dentry) | 2797 | static inline ino_t parent_ino(struct dentry *dentry) |
| 2781 | { | 2798 | { |
| 2782 | ino_t res; | 2799 | ino_t res; |
diff --git a/include/linux/nfs_fs.h b/include/linux/nfs_fs.h index b01ccf371fdc..410abd172feb 100644 --- a/include/linux/nfs_fs.h +++ b/include/linux/nfs_fs.h | |||
| @@ -447,13 +447,12 @@ static inline struct rpc_cred *nfs_file_cred(struct file *file) | |||
| 447 | /* | 447 | /* |
| 448 | * linux/fs/nfs/direct.c | 448 | * linux/fs/nfs/direct.c |
| 449 | */ | 449 | */ |
| 450 | extern ssize_t nfs_direct_IO(int, struct kiocb *, struct iov_iter *, loff_t); | 450 | extern ssize_t nfs_direct_IO(struct kiocb *, struct iov_iter *, loff_t); |
| 451 | extern ssize_t nfs_file_direct_read(struct kiocb *iocb, | 451 | extern ssize_t nfs_file_direct_read(struct kiocb *iocb, |
| 452 | struct iov_iter *iter, | 452 | struct iov_iter *iter, |
| 453 | loff_t pos); | 453 | loff_t pos); |
| 454 | extern ssize_t nfs_file_direct_write(struct kiocb *iocb, | 454 | extern ssize_t nfs_file_direct_write(struct kiocb *iocb, |
| 455 | struct iov_iter *iter, | 455 | struct iov_iter *iter); |
| 456 | loff_t pos); | ||
| 457 | 456 | ||
| 458 | /* | 457 | /* |
| 459 | * linux/fs/nfs/dir.c | 458 | * linux/fs/nfs/dir.c |
diff --git a/include/linux/uio.h b/include/linux/uio.h index 15f11fb9fff6..8b01e1c3c614 100644 --- a/include/linux/uio.h +++ b/include/linux/uio.h | |||
| @@ -112,6 +112,14 @@ static inline bool iter_is_iovec(struct iov_iter *i) | |||
| 112 | } | 112 | } |
| 113 | 113 | ||
| 114 | /* | 114 | /* |
| 115 | * Get one of READ or WRITE out of iter->type without any other flags OR'd in | ||
| 116 | * with it. | ||
| 117 | * | ||
| 118 | * The ?: is just for type safety. | ||
| 119 | */ | ||
| 120 | #define iov_iter_rw(i) ((0 ? (struct iov_iter *)0 : (i))->type & RW_MASK) | ||
| 121 | |||
| 122 | /* | ||
| 115 | * Cap the iov_iter by given limit; note that the second argument is | 123 | * Cap the iov_iter by given limit; note that the second argument is |
| 116 | * *not* the new size - it's upper limit for such. Passing it a value | 124 | * *not* the new size - it's upper limit for such. Passing it a value |
| 117 | * greater than the amount of data in iov_iter is fine - it'll just do | 125 | * greater than the amount of data in iov_iter is fine - it'll just do |
diff --git a/mm/filemap.c b/mm/filemap.c index 12548d03c11d..6bf5e42d560a 100644 --- a/mm/filemap.c +++ b/mm/filemap.c | |||
| @@ -1693,7 +1693,7 @@ generic_file_read_iter(struct kiocb *iocb, struct iov_iter *iter) | |||
| 1693 | loff_t *ppos = &iocb->ki_pos; | 1693 | loff_t *ppos = &iocb->ki_pos; |
| 1694 | loff_t pos = *ppos; | 1694 | loff_t pos = *ppos; |
| 1695 | 1695 | ||
| 1696 | if (io_is_direct(file)) { | 1696 | if (iocb->ki_flags & IOCB_DIRECT) { |
| 1697 | struct address_space *mapping = file->f_mapping; | 1697 | struct address_space *mapping = file->f_mapping; |
| 1698 | struct inode *inode = mapping->host; | 1698 | struct inode *inode = mapping->host; |
| 1699 | size_t count = iov_iter_count(iter); | 1699 | size_t count = iov_iter_count(iter); |
| @@ -1706,7 +1706,7 @@ generic_file_read_iter(struct kiocb *iocb, struct iov_iter *iter) | |||
| 1706 | pos + count - 1); | 1706 | pos + count - 1); |
| 1707 | if (!retval) { | 1707 | if (!retval) { |
| 1708 | struct iov_iter data = *iter; | 1708 | struct iov_iter data = *iter; |
| 1709 | retval = mapping->a_ops->direct_IO(READ, iocb, &data, pos); | 1709 | retval = mapping->a_ops->direct_IO(iocb, &data, pos); |
| 1710 | } | 1710 | } |
| 1711 | 1711 | ||
| 1712 | if (retval > 0) { | 1712 | if (retval > 0) { |
| @@ -2259,41 +2259,38 @@ EXPORT_SYMBOL(read_cache_page_gfp); | |||
| 2259 | * Returns appropriate error code that caller should return or | 2259 | * Returns appropriate error code that caller should return or |
| 2260 | * zero in case that write should be allowed. | 2260 | * zero in case that write should be allowed. |
| 2261 | */ | 2261 | */ |
| 2262 | inline int generic_write_checks(struct file *file, loff_t *pos, size_t *count, int isblk) | 2262 | inline ssize_t generic_write_checks(struct kiocb *iocb, struct iov_iter *from) |
| 2263 | { | 2263 | { |
| 2264 | struct file *file = iocb->ki_filp; | ||
| 2264 | struct inode *inode = file->f_mapping->host; | 2265 | struct inode *inode = file->f_mapping->host; |
| 2265 | unsigned long limit = rlimit(RLIMIT_FSIZE); | 2266 | unsigned long limit = rlimit(RLIMIT_FSIZE); |
| 2267 | loff_t pos; | ||
| 2266 | 2268 | ||
| 2267 | if (unlikely(*pos < 0)) | 2269 | if (!iov_iter_count(from)) |
| 2268 | return -EINVAL; | 2270 | return 0; |
| 2269 | 2271 | ||
| 2270 | if (!isblk) { | 2272 | /* FIXME: this is for backwards compatibility with 2.4 */ |
| 2271 | /* FIXME: this is for backwards compatibility with 2.4 */ | 2273 | if (iocb->ki_flags & IOCB_APPEND) |
| 2272 | if (file->f_flags & O_APPEND) | 2274 | iocb->ki_pos = i_size_read(inode); |
| 2273 | *pos = i_size_read(inode); | ||
| 2274 | 2275 | ||
| 2275 | if (limit != RLIM_INFINITY) { | 2276 | pos = iocb->ki_pos; |
| 2276 | if (*pos >= limit) { | 2277 | |
| 2277 | send_sig(SIGXFSZ, current, 0); | 2278 | if (limit != RLIM_INFINITY) { |
| 2278 | return -EFBIG; | 2279 | if (iocb->ki_pos >= limit) { |
| 2279 | } | 2280 | send_sig(SIGXFSZ, current, 0); |
| 2280 | if (*count > limit - (typeof(limit))*pos) { | 2281 | return -EFBIG; |
| 2281 | *count = limit - (typeof(limit))*pos; | ||
| 2282 | } | ||
| 2283 | } | 2282 | } |
| 2283 | iov_iter_truncate(from, limit - (unsigned long)pos); | ||
| 2284 | } | 2284 | } |
| 2285 | 2285 | ||
| 2286 | /* | 2286 | /* |
| 2287 | * LFS rule | 2287 | * LFS rule |
| 2288 | */ | 2288 | */ |
| 2289 | if (unlikely(*pos + *count > MAX_NON_LFS && | 2289 | if (unlikely(pos + iov_iter_count(from) > MAX_NON_LFS && |
| 2290 | !(file->f_flags & O_LARGEFILE))) { | 2290 | !(file->f_flags & O_LARGEFILE))) { |
| 2291 | if (*pos >= MAX_NON_LFS) { | 2291 | if (pos >= MAX_NON_LFS) |
| 2292 | return -EFBIG; | 2292 | return -EFBIG; |
| 2293 | } | 2293 | iov_iter_truncate(from, MAX_NON_LFS - (unsigned long)pos); |
| 2294 | if (*count > MAX_NON_LFS - (unsigned long)*pos) { | ||
| 2295 | *count = MAX_NON_LFS - (unsigned long)*pos; | ||
| 2296 | } | ||
| 2297 | } | 2294 | } |
| 2298 | 2295 | ||
| 2299 | /* | 2296 | /* |
| @@ -2303,34 +2300,11 @@ inline int generic_write_checks(struct file *file, loff_t *pos, size_t *count, i | |||
| 2303 | * exceeded without writing data we send a signal and return EFBIG. | 2300 | * exceeded without writing data we send a signal and return EFBIG. |
| 2304 | * Linus frestrict idea will clean these up nicely.. | 2301 | * Linus frestrict idea will clean these up nicely.. |
| 2305 | */ | 2302 | */ |
| 2306 | if (likely(!isblk)) { | 2303 | if (unlikely(pos >= inode->i_sb->s_maxbytes)) |
| 2307 | if (unlikely(*pos >= inode->i_sb->s_maxbytes)) { | 2304 | return -EFBIG; |
| 2308 | if (*count || *pos > inode->i_sb->s_maxbytes) { | ||
| 2309 | return -EFBIG; | ||
| 2310 | } | ||
| 2311 | /* zero-length writes at ->s_maxbytes are OK */ | ||
| 2312 | } | ||
| 2313 | 2305 | ||
| 2314 | if (unlikely(*pos + *count > inode->i_sb->s_maxbytes)) | 2306 | iov_iter_truncate(from, inode->i_sb->s_maxbytes - pos); |
| 2315 | *count = inode->i_sb->s_maxbytes - *pos; | 2307 | return iov_iter_count(from); |
| 2316 | } else { | ||
| 2317 | #ifdef CONFIG_BLOCK | ||
| 2318 | loff_t isize; | ||
| 2319 | if (bdev_read_only(I_BDEV(inode))) | ||
| 2320 | return -EPERM; | ||
| 2321 | isize = i_size_read(inode); | ||
| 2322 | if (*pos >= isize) { | ||
| 2323 | if (*count || *pos > isize) | ||
| 2324 | return -ENOSPC; | ||
| 2325 | } | ||
| 2326 | |||
| 2327 | if (*pos + *count > isize) | ||
| 2328 | *count = isize - *pos; | ||
| 2329 | #else | ||
| 2330 | return -EPERM; | ||
| 2331 | #endif | ||
| 2332 | } | ||
| 2333 | return 0; | ||
| 2334 | } | 2308 | } |
| 2335 | EXPORT_SYMBOL(generic_write_checks); | 2309 | EXPORT_SYMBOL(generic_write_checks); |
| 2336 | 2310 | ||
| @@ -2394,7 +2368,7 @@ generic_file_direct_write(struct kiocb *iocb, struct iov_iter *from, loff_t pos) | |||
| 2394 | } | 2368 | } |
| 2395 | 2369 | ||
| 2396 | data = *from; | 2370 | data = *from; |
| 2397 | written = mapping->a_ops->direct_IO(WRITE, iocb, &data, pos); | 2371 | written = mapping->a_ops->direct_IO(iocb, &data, pos); |
| 2398 | 2372 | ||
| 2399 | /* | 2373 | /* |
| 2400 | * Finally, try again to invalidate clean pages which might have been | 2374 | * Finally, try again to invalidate clean pages which might have been |
| @@ -2556,23 +2530,12 @@ ssize_t __generic_file_write_iter(struct kiocb *iocb, struct iov_iter *from) | |||
| 2556 | struct file *file = iocb->ki_filp; | 2530 | struct file *file = iocb->ki_filp; |
| 2557 | struct address_space * mapping = file->f_mapping; | 2531 | struct address_space * mapping = file->f_mapping; |
| 2558 | struct inode *inode = mapping->host; | 2532 | struct inode *inode = mapping->host; |
| 2559 | loff_t pos = iocb->ki_pos; | ||
| 2560 | ssize_t written = 0; | 2533 | ssize_t written = 0; |
| 2561 | ssize_t err; | 2534 | ssize_t err; |
| 2562 | ssize_t status; | 2535 | ssize_t status; |
| 2563 | size_t count = iov_iter_count(from); | ||
| 2564 | 2536 | ||
| 2565 | /* We can write back this queue in page reclaim */ | 2537 | /* We can write back this queue in page reclaim */ |
| 2566 | current->backing_dev_info = inode_to_bdi(inode); | 2538 | current->backing_dev_info = inode_to_bdi(inode); |
| 2567 | err = generic_write_checks(file, &pos, &count, S_ISBLK(inode->i_mode)); | ||
| 2568 | if (err) | ||
| 2569 | goto out; | ||
| 2570 | |||
| 2571 | if (count == 0) | ||
| 2572 | goto out; | ||
| 2573 | |||
| 2574 | iov_iter_truncate(from, count); | ||
| 2575 | |||
| 2576 | err = file_remove_suid(file); | 2539 | err = file_remove_suid(file); |
| 2577 | if (err) | 2540 | if (err) |
| 2578 | goto out; | 2541 | goto out; |
| @@ -2581,10 +2544,10 @@ ssize_t __generic_file_write_iter(struct kiocb *iocb, struct iov_iter *from) | |||
| 2581 | if (err) | 2544 | if (err) |
| 2582 | goto out; | 2545 | goto out; |
| 2583 | 2546 | ||
| 2584 | if (io_is_direct(file)) { | 2547 | if (iocb->ki_flags & IOCB_DIRECT) { |
| 2585 | loff_t endbyte; | 2548 | loff_t pos, endbyte; |
| 2586 | 2549 | ||
| 2587 | written = generic_file_direct_write(iocb, from, pos); | 2550 | written = generic_file_direct_write(iocb, from, iocb->ki_pos); |
| 2588 | /* | 2551 | /* |
| 2589 | * If the write stopped short of completing, fall back to | 2552 | * If the write stopped short of completing, fall back to |
| 2590 | * buffered writes. Some filesystems do this for writes to | 2553 | * buffered writes. Some filesystems do this for writes to |
| @@ -2592,13 +2555,10 @@ ssize_t __generic_file_write_iter(struct kiocb *iocb, struct iov_iter *from) | |||
| 2592 | * not succeed (even if it did, DAX does not handle dirty | 2555 | * not succeed (even if it did, DAX does not handle dirty |
| 2593 | * page-cache pages correctly). | 2556 | * page-cache pages correctly). |
| 2594 | */ | 2557 | */ |
| 2595 | if (written < 0 || written == count || IS_DAX(inode)) | 2558 | if (written < 0 || !iov_iter_count(from) || IS_DAX(inode)) |
| 2596 | goto out; | 2559 | goto out; |
| 2597 | 2560 | ||
| 2598 | pos += written; | 2561 | status = generic_perform_write(file, from, pos = iocb->ki_pos); |
| 2599 | count -= written; | ||
| 2600 | |||
| 2601 | status = generic_perform_write(file, from, pos); | ||
| 2602 | /* | 2562 | /* |
| 2603 | * If generic_perform_write() returned a synchronous error | 2563 | * If generic_perform_write() returned a synchronous error |
| 2604 | * then we want to return the number of bytes which were | 2564 | * then we want to return the number of bytes which were |
| @@ -2610,15 +2570,15 @@ ssize_t __generic_file_write_iter(struct kiocb *iocb, struct iov_iter *from) | |||
| 2610 | err = status; | 2570 | err = status; |
| 2611 | goto out; | 2571 | goto out; |
| 2612 | } | 2572 | } |
| 2613 | iocb->ki_pos = pos + status; | ||
| 2614 | /* | 2573 | /* |
| 2615 | * We need to ensure that the page cache pages are written to | 2574 | * We need to ensure that the page cache pages are written to |
| 2616 | * disk and invalidated to preserve the expected O_DIRECT | 2575 | * disk and invalidated to preserve the expected O_DIRECT |
| 2617 | * semantics. | 2576 | * semantics. |
| 2618 | */ | 2577 | */ |
| 2619 | endbyte = pos + status - 1; | 2578 | endbyte = pos + status - 1; |
| 2620 | err = filemap_write_and_wait_range(file->f_mapping, pos, endbyte); | 2579 | err = filemap_write_and_wait_range(mapping, pos, endbyte); |
| 2621 | if (err == 0) { | 2580 | if (err == 0) { |
| 2581 | iocb->ki_pos = endbyte + 1; | ||
| 2622 | written += status; | 2582 | written += status; |
| 2623 | invalidate_mapping_pages(mapping, | 2583 | invalidate_mapping_pages(mapping, |
| 2624 | pos >> PAGE_CACHE_SHIFT, | 2584 | pos >> PAGE_CACHE_SHIFT, |
| @@ -2630,9 +2590,9 @@ ssize_t __generic_file_write_iter(struct kiocb *iocb, struct iov_iter *from) | |||
| 2630 | */ | 2590 | */ |
| 2631 | } | 2591 | } |
| 2632 | } else { | 2592 | } else { |
| 2633 | written = generic_perform_write(file, from, pos); | 2593 | written = generic_perform_write(file, from, iocb->ki_pos); |
| 2634 | if (likely(written >= 0)) | 2594 | if (likely(written > 0)) |
| 2635 | iocb->ki_pos = pos + written; | 2595 | iocb->ki_pos += written; |
| 2636 | } | 2596 | } |
| 2637 | out: | 2597 | out: |
| 2638 | current->backing_dev_info = NULL; | 2598 | current->backing_dev_info = NULL; |
| @@ -2656,7 +2616,9 @@ ssize_t generic_file_write_iter(struct kiocb *iocb, struct iov_iter *from) | |||
| 2656 | ssize_t ret; | 2616 | ssize_t ret; |
| 2657 | 2617 | ||
| 2658 | mutex_lock(&inode->i_mutex); | 2618 | mutex_lock(&inode->i_mutex); |
| 2659 | ret = __generic_file_write_iter(iocb, from); | 2619 | ret = generic_write_checks(iocb, from); |
| 2620 | if (ret > 0) | ||
| 2621 | ret = __generic_file_write_iter(iocb, from); | ||
| 2660 | mutex_unlock(&inode->i_mutex); | 2622 | mutex_unlock(&inode->i_mutex); |
| 2661 | 2623 | ||
| 2662 | if (ret > 0) { | 2624 | if (ret > 0) { |
diff --git a/mm/page_io.c b/mm/page_io.c index a96c8562d835..6424869e275e 100644 --- a/mm/page_io.c +++ b/mm/page_io.c | |||
| @@ -277,9 +277,7 @@ int __swap_writepage(struct page *page, struct writeback_control *wbc, | |||
| 277 | 277 | ||
| 278 | set_page_writeback(page); | 278 | set_page_writeback(page); |
| 279 | unlock_page(page); | 279 | unlock_page(page); |
| 280 | ret = mapping->a_ops->direct_IO(ITER_BVEC | WRITE, | 280 | ret = mapping->a_ops->direct_IO(&kiocb, &from, kiocb.ki_pos); |
| 281 | &kiocb, &from, | ||
| 282 | kiocb.ki_pos); | ||
| 283 | if (ret == PAGE_SIZE) { | 281 | if (ret == PAGE_SIZE) { |
| 284 | count_vm_event(PSWPOUT); | 282 | count_vm_event(PSWPOUT); |
| 285 | ret = 0; | 283 | ret = 0; |
