diff options
| author | Linus Torvalds <torvalds@linux-foundation.org> | 2015-04-16 23:27:56 -0400 |
|---|---|---|
| committer | Linus Torvalds <torvalds@linux-foundation.org> | 2015-04-16 23:27:56 -0400 |
| commit | 4fc8adcfec3da639da76e8314c9ccefe5bf9a045 (patch) | |
| tree | e07a2dea8acf04d8bbbecd4fd3a571653ecdd953 /drivers | |
| parent | 84588e7a5d8220446d677d7b909a20ee7a4496b9 (diff) | |
| parent | aa4d86163e4e91a1ac560954a554bab417e338f4 (diff) | |
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/viro/vfs
Pull third hunk of vfs changes from Al Viro:
"This contains the ->direct_IO() changes from Omar + saner
generic_write_checks() + dealing with fcntl()/{read,write}() races
(mirroring O_APPEND/O_DIRECT into iocb->ki_flags and instead of
repeatedly looking at ->f_flags, which can be changed by fcntl(2),
check ->ki_flags - which cannot) + infrastructure bits for dhowells'
d_inode annotations + Christophs switch of /dev/loop to
vfs_iter_write()"
* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/viro/vfs: (30 commits)
block: loop: switch to VFS ITER_BVEC
configfs: Fix inconsistent use of file_inode() vs file->f_path.dentry->d_inode
VFS: Make pathwalk use d_is_reg() rather than S_ISREG()
VFS: Fix up debugfs to use d_is_dir() in place of S_ISDIR()
VFS: Combine inode checks with d_is_negative() and d_is_positive() in pathwalk
NFS: Don't use d_inode as a variable name
VFS: Impose ordering on accesses of d_inode and d_flags
VFS: Add owner-filesystem positive/negative dentry checks
nfs: generic_write_checks() shouldn't be done on swapout...
ocfs2: use __generic_file_write_iter()
mirror O_APPEND and O_DIRECT into iocb->ki_flags
switch generic_write_checks() to iocb and iter
ocfs2: move generic_write_checks() before the alignment checks
ocfs2_file_write_iter: stop messing with ppos
udf_file_write_iter: reorder and simplify
fuse: ->direct_IO() doesn't need generic_write_checks()
ext4_file_write_iter: move generic_write_checks() up
xfs_file_aio_write_checks: switch to iocb/iov_iter
generic_write_checks(): drop isblk argument
blkdev_write_iter: expand generic_file_checks() call in there
...
Diffstat (limited to 'drivers')
| -rw-r--r-- | drivers/block/loop.c | 294 | ||||
| -rw-r--r-- | drivers/staging/lustre/lustre/llite/rw26.c | 22 |
2 files changed, 131 insertions, 185 deletions
diff --git a/drivers/block/loop.c b/drivers/block/loop.c index c4fd1e45ce1e..ae3fcb4199e9 100644 --- a/drivers/block/loop.c +++ b/drivers/block/loop.c | |||
| @@ -88,28 +88,6 @@ static int part_shift; | |||
| 88 | 88 | ||
| 89 | static struct workqueue_struct *loop_wq; | 89 | static struct workqueue_struct *loop_wq; |
| 90 | 90 | ||
| 91 | /* | ||
| 92 | * Transfer functions | ||
| 93 | */ | ||
| 94 | static int transfer_none(struct loop_device *lo, int cmd, | ||
| 95 | struct page *raw_page, unsigned raw_off, | ||
| 96 | struct page *loop_page, unsigned loop_off, | ||
| 97 | int size, sector_t real_block) | ||
| 98 | { | ||
| 99 | char *raw_buf = kmap_atomic(raw_page) + raw_off; | ||
| 100 | char *loop_buf = kmap_atomic(loop_page) + loop_off; | ||
| 101 | |||
| 102 | if (cmd == READ) | ||
| 103 | memcpy(loop_buf, raw_buf, size); | ||
| 104 | else | ||
| 105 | memcpy(raw_buf, loop_buf, size); | ||
| 106 | |||
| 107 | kunmap_atomic(loop_buf); | ||
| 108 | kunmap_atomic(raw_buf); | ||
| 109 | cond_resched(); | ||
| 110 | return 0; | ||
| 111 | } | ||
| 112 | |||
| 113 | static int transfer_xor(struct loop_device *lo, int cmd, | 91 | static int transfer_xor(struct loop_device *lo, int cmd, |
| 114 | struct page *raw_page, unsigned raw_off, | 92 | struct page *raw_page, unsigned raw_off, |
| 115 | struct page *loop_page, unsigned loop_off, | 93 | struct page *loop_page, unsigned loop_off, |
| @@ -148,14 +126,13 @@ static int xor_init(struct loop_device *lo, const struct loop_info64 *info) | |||
| 148 | 126 | ||
| 149 | static struct loop_func_table none_funcs = { | 127 | static struct loop_func_table none_funcs = { |
| 150 | .number = LO_CRYPT_NONE, | 128 | .number = LO_CRYPT_NONE, |
| 151 | .transfer = transfer_none, | 129 | }; |
| 152 | }; | ||
| 153 | 130 | ||
| 154 | static struct loop_func_table xor_funcs = { | 131 | static struct loop_func_table xor_funcs = { |
| 155 | .number = LO_CRYPT_XOR, | 132 | .number = LO_CRYPT_XOR, |
| 156 | .transfer = transfer_xor, | 133 | .transfer = transfer_xor, |
| 157 | .init = xor_init | 134 | .init = xor_init |
| 158 | }; | 135 | }; |
| 159 | 136 | ||
| 160 | /* xfer_funcs[0] is special - its release function is never called */ | 137 | /* xfer_funcs[0] is special - its release function is never called */ |
| 161 | static struct loop_func_table *xfer_funcs[MAX_LO_CRYPT] = { | 138 | static struct loop_func_table *xfer_funcs[MAX_LO_CRYPT] = { |
| @@ -215,207 +192,169 @@ lo_do_transfer(struct loop_device *lo, int cmd, | |||
| 215 | struct page *lpage, unsigned loffs, | 192 | struct page *lpage, unsigned loffs, |
| 216 | int size, sector_t rblock) | 193 | int size, sector_t rblock) |
| 217 | { | 194 | { |
| 218 | if (unlikely(!lo->transfer)) | 195 | int ret; |
| 196 | |||
| 197 | ret = lo->transfer(lo, cmd, rpage, roffs, lpage, loffs, size, rblock); | ||
| 198 | if (likely(!ret)) | ||
| 219 | return 0; | 199 | return 0; |
| 220 | 200 | ||
| 221 | return lo->transfer(lo, cmd, rpage, roffs, lpage, loffs, size, rblock); | 201 | printk_ratelimited(KERN_ERR |
| 202 | "loop: Transfer error at byte offset %llu, length %i.\n", | ||
| 203 | (unsigned long long)rblock << 9, size); | ||
| 204 | return ret; | ||
| 222 | } | 205 | } |
| 223 | 206 | ||
| 224 | /** | 207 | static int lo_write_bvec(struct file *file, struct bio_vec *bvec, loff_t *ppos) |
| 225 | * __do_lo_send_write - helper for writing data to a loop device | ||
| 226 | * | ||
| 227 | * This helper just factors out common code between do_lo_send_direct_write() | ||
| 228 | * and do_lo_send_write(). | ||
| 229 | */ | ||
| 230 | static int __do_lo_send_write(struct file *file, | ||
| 231 | u8 *buf, const int len, loff_t pos) | ||
| 232 | { | 208 | { |
| 233 | struct kvec kvec = {.iov_base = buf, .iov_len = len}; | 209 | struct iov_iter i; |
| 234 | struct iov_iter from; | ||
| 235 | ssize_t bw; | 210 | ssize_t bw; |
| 236 | 211 | ||
| 237 | iov_iter_kvec(&from, ITER_KVEC | WRITE, &kvec, 1, len); | 212 | iov_iter_bvec(&i, ITER_BVEC, bvec, 1, bvec->bv_len); |
| 238 | 213 | ||
| 239 | file_start_write(file); | 214 | file_start_write(file); |
| 240 | bw = vfs_iter_write(file, &from, &pos); | 215 | bw = vfs_iter_write(file, &i, ppos); |
| 241 | file_end_write(file); | 216 | file_end_write(file); |
| 242 | if (likely(bw == len)) | 217 | |
| 218 | if (likely(bw == bvec->bv_len)) | ||
| 243 | return 0; | 219 | return 0; |
| 244 | printk_ratelimited(KERN_ERR "loop: Write error at byte offset %llu, length %i.\n", | 220 | |
| 245 | (unsigned long long)pos, len); | 221 | printk_ratelimited(KERN_ERR |
| 222 | "loop: Write error at byte offset %llu, length %i.\n", | ||
| 223 | (unsigned long long)*ppos, bvec->bv_len); | ||
| 246 | if (bw >= 0) | 224 | if (bw >= 0) |
| 247 | bw = -EIO; | 225 | bw = -EIO; |
| 248 | return bw; | 226 | return bw; |
| 249 | } | 227 | } |
| 250 | 228 | ||
| 251 | /** | 229 | static int lo_write_simple(struct loop_device *lo, struct request *rq, |
| 252 | * do_lo_send_direct_write - helper for writing data to a loop device | 230 | loff_t pos) |
| 253 | * | ||
| 254 | * This is the fast, non-transforming version that does not need double | ||
| 255 | * buffering. | ||
| 256 | */ | ||
| 257 | static int do_lo_send_direct_write(struct loop_device *lo, | ||
| 258 | struct bio_vec *bvec, loff_t pos, struct page *page) | ||
| 259 | { | 231 | { |
| 260 | ssize_t bw = __do_lo_send_write(lo->lo_backing_file, | 232 | struct bio_vec bvec; |
| 261 | kmap(bvec->bv_page) + bvec->bv_offset, | 233 | struct req_iterator iter; |
| 262 | bvec->bv_len, pos); | 234 | int ret = 0; |
| 263 | kunmap(bvec->bv_page); | 235 | |
| 264 | cond_resched(); | 236 | rq_for_each_segment(bvec, rq, iter) { |
| 265 | return bw; | 237 | ret = lo_write_bvec(lo->lo_backing_file, &bvec, &pos); |
| 238 | if (ret < 0) | ||
| 239 | break; | ||
| 240 | cond_resched(); | ||
| 241 | } | ||
| 242 | |||
| 243 | return ret; | ||
| 266 | } | 244 | } |
| 267 | 245 | ||
| 268 | /** | 246 | /* |
| 269 | * do_lo_send_write - helper for writing data to a loop device | ||
| 270 | * | ||
| 271 | * This is the slow, transforming version that needs to double buffer the | 247 | * This is the slow, transforming version that needs to double buffer the |
| 272 | * data as it cannot do the transformations in place without having direct | 248 | * data as it cannot do the transformations in place without having direct |
| 273 | * access to the destination pages of the backing file. | 249 | * access to the destination pages of the backing file. |
| 274 | */ | 250 | */ |
| 275 | static int do_lo_send_write(struct loop_device *lo, struct bio_vec *bvec, | 251 | static int lo_write_transfer(struct loop_device *lo, struct request *rq, |
| 276 | loff_t pos, struct page *page) | 252 | loff_t pos) |
| 277 | { | 253 | { |
| 278 | int ret = lo_do_transfer(lo, WRITE, page, 0, bvec->bv_page, | 254 | struct bio_vec bvec, b; |
| 279 | bvec->bv_offset, bvec->bv_len, pos >> 9); | ||
| 280 | if (likely(!ret)) | ||
| 281 | return __do_lo_send_write(lo->lo_backing_file, | ||
| 282 | page_address(page), bvec->bv_len, | ||
| 283 | pos); | ||
| 284 | printk_ratelimited(KERN_ERR "loop: Transfer error at byte offset %llu, " | ||
| 285 | "length %i.\n", (unsigned long long)pos, bvec->bv_len); | ||
| 286 | if (ret > 0) | ||
| 287 | ret = -EIO; | ||
| 288 | return ret; | ||
| 289 | } | ||
| 290 | |||
| 291 | static int lo_send(struct loop_device *lo, struct request *rq, loff_t pos) | ||
| 292 | { | ||
| 293 | int (*do_lo_send)(struct loop_device *, struct bio_vec *, loff_t, | ||
| 294 | struct page *page); | ||
| 295 | struct bio_vec bvec; | ||
| 296 | struct req_iterator iter; | 255 | struct req_iterator iter; |
| 297 | struct page *page = NULL; | 256 | struct page *page; |
| 298 | int ret = 0; | 257 | int ret = 0; |
| 299 | 258 | ||
| 300 | if (lo->transfer != transfer_none) { | 259 | page = alloc_page(GFP_NOIO); |
| 301 | page = alloc_page(GFP_NOIO | __GFP_HIGHMEM); | 260 | if (unlikely(!page)) |
| 302 | if (unlikely(!page)) | 261 | return -ENOMEM; |
| 303 | goto fail; | ||
| 304 | kmap(page); | ||
| 305 | do_lo_send = do_lo_send_write; | ||
| 306 | } else { | ||
| 307 | do_lo_send = do_lo_send_direct_write; | ||
| 308 | } | ||
| 309 | 262 | ||
| 310 | rq_for_each_segment(bvec, rq, iter) { | 263 | rq_for_each_segment(bvec, rq, iter) { |
| 311 | ret = do_lo_send(lo, &bvec, pos, page); | 264 | ret = lo_do_transfer(lo, WRITE, page, 0, bvec.bv_page, |
| 265 | bvec.bv_offset, bvec.bv_len, pos >> 9); | ||
| 266 | if (unlikely(ret)) | ||
| 267 | break; | ||
| 268 | |||
| 269 | b.bv_page = page; | ||
| 270 | b.bv_offset = 0; | ||
| 271 | b.bv_len = bvec.bv_len; | ||
| 272 | ret = lo_write_bvec(lo->lo_backing_file, &b, &pos); | ||
| 312 | if (ret < 0) | 273 | if (ret < 0) |
| 313 | break; | 274 | break; |
| 314 | pos += bvec.bv_len; | ||
| 315 | } | 275 | } |
| 316 | if (page) { | 276 | |
| 317 | kunmap(page); | 277 | __free_page(page); |
| 318 | __free_page(page); | ||
| 319 | } | ||
| 320 | out: | ||
| 321 | return ret; | 278 | return ret; |
| 322 | fail: | ||
| 323 | printk_ratelimited(KERN_ERR "loop: Failed to allocate temporary page for write.\n"); | ||
| 324 | ret = -ENOMEM; | ||
| 325 | goto out; | ||
| 326 | } | 279 | } |
| 327 | 280 | ||
| 328 | struct lo_read_data { | 281 | static int lo_read_simple(struct loop_device *lo, struct request *rq, |
| 329 | struct loop_device *lo; | 282 | loff_t pos) |
| 330 | struct page *page; | 283 | { |
| 331 | unsigned offset; | 284 | struct bio_vec bvec; |
| 332 | int bsize; | 285 | struct req_iterator iter; |
| 333 | }; | 286 | struct iov_iter i; |
| 287 | ssize_t len; | ||
| 334 | 288 | ||
| 335 | static int | 289 | rq_for_each_segment(bvec, rq, iter) { |
| 336 | lo_splice_actor(struct pipe_inode_info *pipe, struct pipe_buffer *buf, | 290 | iov_iter_bvec(&i, ITER_BVEC, &bvec, 1, bvec.bv_len); |
| 337 | struct splice_desc *sd) | 291 | len = vfs_iter_read(lo->lo_backing_file, &i, &pos); |
| 338 | { | 292 | if (len < 0) |
| 339 | struct lo_read_data *p = sd->u.data; | 293 | return len; |
| 340 | struct loop_device *lo = p->lo; | ||
| 341 | struct page *page = buf->page; | ||
| 342 | sector_t IV; | ||
| 343 | int size; | ||
| 344 | |||
| 345 | IV = ((sector_t) page->index << (PAGE_CACHE_SHIFT - 9)) + | ||
| 346 | (buf->offset >> 9); | ||
| 347 | size = sd->len; | ||
| 348 | if (size > p->bsize) | ||
| 349 | size = p->bsize; | ||
| 350 | |||
| 351 | if (lo_do_transfer(lo, READ, page, buf->offset, p->page, p->offset, size, IV)) { | ||
| 352 | printk_ratelimited(KERN_ERR "loop: transfer error block %ld\n", | ||
| 353 | page->index); | ||
| 354 | size = -EINVAL; | ||
| 355 | } | ||
| 356 | 294 | ||
| 357 | flush_dcache_page(p->page); | 295 | flush_dcache_page(bvec.bv_page); |
| 358 | 296 | ||
| 359 | if (size > 0) | 297 | if (len != bvec.bv_len) { |
| 360 | p->offset += size; | 298 | struct bio *bio; |
| 361 | 299 | ||
| 362 | return size; | 300 | __rq_for_each_bio(bio, rq) |
| 363 | } | 301 | zero_fill_bio(bio); |
| 302 | break; | ||
| 303 | } | ||
| 304 | cond_resched(); | ||
| 305 | } | ||
| 364 | 306 | ||
| 365 | static int | 307 | return 0; |
| 366 | lo_direct_splice_actor(struct pipe_inode_info *pipe, struct splice_desc *sd) | ||
| 367 | { | ||
| 368 | return __splice_from_pipe(pipe, sd, lo_splice_actor); | ||
| 369 | } | 308 | } |
| 370 | 309 | ||
| 371 | static ssize_t | 310 | static int lo_read_transfer(struct loop_device *lo, struct request *rq, |
| 372 | do_lo_receive(struct loop_device *lo, | 311 | loff_t pos) |
| 373 | struct bio_vec *bvec, int bsize, loff_t pos) | ||
| 374 | { | 312 | { |
| 375 | struct lo_read_data cookie; | 313 | struct bio_vec bvec, b; |
| 376 | struct splice_desc sd; | 314 | struct req_iterator iter; |
| 377 | struct file *file; | 315 | struct iov_iter i; |
| 378 | ssize_t retval; | 316 | struct page *page; |
| 317 | ssize_t len; | ||
| 318 | int ret = 0; | ||
| 379 | 319 | ||
| 380 | cookie.lo = lo; | 320 | page = alloc_page(GFP_NOIO); |
| 381 | cookie.page = bvec->bv_page; | 321 | if (unlikely(!page)) |
| 382 | cookie.offset = bvec->bv_offset; | 322 | return -ENOMEM; |
| 383 | cookie.bsize = bsize; | ||
| 384 | 323 | ||
| 385 | sd.len = 0; | 324 | rq_for_each_segment(bvec, rq, iter) { |
| 386 | sd.total_len = bvec->bv_len; | 325 | loff_t offset = pos; |
| 387 | sd.flags = 0; | ||
| 388 | sd.pos = pos; | ||
| 389 | sd.u.data = &cookie; | ||
| 390 | 326 | ||
| 391 | file = lo->lo_backing_file; | 327 | b.bv_page = page; |
| 392 | retval = splice_direct_to_actor(file, &sd, lo_direct_splice_actor); | 328 | b.bv_offset = 0; |
| 329 | b.bv_len = bvec.bv_len; | ||
| 393 | 330 | ||
| 394 | return retval; | 331 | iov_iter_bvec(&i, ITER_BVEC, &b, 1, b.bv_len); |
| 395 | } | 332 | len = vfs_iter_read(lo->lo_backing_file, &i, &pos); |
| 333 | if (len < 0) { | ||
| 334 | ret = len; | ||
| 335 | goto out_free_page; | ||
| 336 | } | ||
| 396 | 337 | ||
| 397 | static int | 338 | ret = lo_do_transfer(lo, READ, page, 0, bvec.bv_page, |
| 398 | lo_receive(struct loop_device *lo, struct request *rq, int bsize, loff_t pos) | 339 | bvec.bv_offset, len, offset >> 9); |
| 399 | { | 340 | if (ret) |
| 400 | struct bio_vec bvec; | 341 | goto out_free_page; |
| 401 | struct req_iterator iter; | ||
| 402 | ssize_t s; | ||
| 403 | 342 | ||
| 404 | rq_for_each_segment(bvec, rq, iter) { | 343 | flush_dcache_page(bvec.bv_page); |
| 405 | s = do_lo_receive(lo, &bvec, bsize, pos); | ||
| 406 | if (s < 0) | ||
| 407 | return s; | ||
| 408 | 344 | ||
| 409 | if (s != bvec.bv_len) { | 345 | if (len != bvec.bv_len) { |
| 410 | struct bio *bio; | 346 | struct bio *bio; |
| 411 | 347 | ||
| 412 | __rq_for_each_bio(bio, rq) | 348 | __rq_for_each_bio(bio, rq) |
| 413 | zero_fill_bio(bio); | 349 | zero_fill_bio(bio); |
| 414 | break; | 350 | break; |
| 415 | } | 351 | } |
| 416 | pos += bvec.bv_len; | ||
| 417 | } | 352 | } |
| 418 | return 0; | 353 | |
| 354 | ret = 0; | ||
| 355 | out_free_page: | ||
| 356 | __free_page(page); | ||
| 357 | return ret; | ||
| 419 | } | 358 | } |
| 420 | 359 | ||
| 421 | static int lo_discard(struct loop_device *lo, struct request *rq, loff_t pos) | 360 | static int lo_discard(struct loop_device *lo, struct request *rq, loff_t pos) |
| @@ -464,10 +403,17 @@ static int do_req_filebacked(struct loop_device *lo, struct request *rq) | |||
| 464 | ret = lo_req_flush(lo, rq); | 403 | ret = lo_req_flush(lo, rq); |
| 465 | else if (rq->cmd_flags & REQ_DISCARD) | 404 | else if (rq->cmd_flags & REQ_DISCARD) |
| 466 | ret = lo_discard(lo, rq, pos); | 405 | ret = lo_discard(lo, rq, pos); |
| 406 | else if (lo->transfer) | ||
| 407 | ret = lo_write_transfer(lo, rq, pos); | ||
| 467 | else | 408 | else |
| 468 | ret = lo_send(lo, rq, pos); | 409 | ret = lo_write_simple(lo, rq, pos); |
| 469 | } else | 410 | |
| 470 | ret = lo_receive(lo, rq, lo->lo_blocksize, pos); | 411 | } else { |
| 412 | if (lo->transfer) | ||
| 413 | ret = lo_read_transfer(lo, rq, pos); | ||
| 414 | else | ||
| 415 | ret = lo_read_simple(lo, rq, pos); | ||
| 416 | } | ||
| 471 | 417 | ||
| 472 | return ret; | 418 | return ret; |
| 473 | } | 419 | } |
| @@ -788,7 +734,7 @@ static int loop_set_fd(struct loop_device *lo, fmode_t mode, | |||
| 788 | lo->lo_device = bdev; | 734 | lo->lo_device = bdev; |
| 789 | lo->lo_flags = lo_flags; | 735 | lo->lo_flags = lo_flags; |
| 790 | lo->lo_backing_file = file; | 736 | lo->lo_backing_file = file; |
| 791 | lo->transfer = transfer_none; | 737 | lo->transfer = NULL; |
| 792 | lo->ioctl = NULL; | 738 | lo->ioctl = NULL; |
| 793 | lo->lo_sizelimit = 0; | 739 | lo->lo_sizelimit = 0; |
| 794 | lo->old_gfp_mask = mapping_gfp_mask(mapping); | 740 | lo->old_gfp_mask = mapping_gfp_mask(mapping); |
| @@ -1007,7 +953,7 @@ loop_set_status(struct loop_device *lo, const struct loop_info64 *info) | |||
| 1007 | memcpy(lo->lo_encrypt_key, info->lo_encrypt_key, | 953 | memcpy(lo->lo_encrypt_key, info->lo_encrypt_key, |
| 1008 | info->lo_encrypt_key_size); | 954 | info->lo_encrypt_key_size); |
| 1009 | lo->lo_key_owner = uid; | 955 | lo->lo_key_owner = uid; |
| 1010 | } | 956 | } |
| 1011 | 957 | ||
| 1012 | return 0; | 958 | return 0; |
| 1013 | } | 959 | } |
diff --git a/drivers/staging/lustre/lustre/llite/rw26.c b/drivers/staging/lustre/lustre/llite/rw26.c index 91442fab5725..c6c824356464 100644 --- a/drivers/staging/lustre/lustre/llite/rw26.c +++ b/drivers/staging/lustre/lustre/llite/rw26.c | |||
| @@ -359,8 +359,8 @@ static ssize_t ll_direct_IO_26_seg(const struct lu_env *env, struct cl_io *io, | |||
| 359 | * up to 22MB for 128kB kmalloc and up to 682MB for 4MB kmalloc. */ | 359 | * up to 22MB for 128kB kmalloc and up to 682MB for 4MB kmalloc. */ |
| 360 | #define MAX_DIO_SIZE ((MAX_MALLOC / sizeof(struct brw_page) * PAGE_CACHE_SIZE) & \ | 360 | #define MAX_DIO_SIZE ((MAX_MALLOC / sizeof(struct brw_page) * PAGE_CACHE_SIZE) & \ |
| 361 | ~(DT_MAX_BRW_SIZE - 1)) | 361 | ~(DT_MAX_BRW_SIZE - 1)) |
| 362 | static ssize_t ll_direct_IO_26(int rw, struct kiocb *iocb, | 362 | static ssize_t ll_direct_IO_26(struct kiocb *iocb, struct iov_iter *iter, |
| 363 | struct iov_iter *iter, loff_t file_offset) | 363 | loff_t file_offset) |
| 364 | { | 364 | { |
| 365 | struct lu_env *env; | 365 | struct lu_env *env; |
| 366 | struct cl_io *io; | 366 | struct cl_io *io; |
| @@ -399,7 +399,7 @@ static ssize_t ll_direct_IO_26(int rw, struct kiocb *iocb, | |||
| 399 | * size changing by concurrent truncates and writes. | 399 | * size changing by concurrent truncates and writes. |
| 400 | * 1. Need inode mutex to operate transient pages. | 400 | * 1. Need inode mutex to operate transient pages. |
| 401 | */ | 401 | */ |
| 402 | if (rw == READ) | 402 | if (iov_iter_rw(iter) == READ) |
| 403 | mutex_lock(&inode->i_mutex); | 403 | mutex_lock(&inode->i_mutex); |
| 404 | 404 | ||
| 405 | LASSERT(obj->cob_transient_pages == 0); | 405 | LASSERT(obj->cob_transient_pages == 0); |
| @@ -408,7 +408,7 @@ static ssize_t ll_direct_IO_26(int rw, struct kiocb *iocb, | |||
| 408 | size_t offs; | 408 | size_t offs; |
| 409 | 409 | ||
| 410 | count = min_t(size_t, iov_iter_count(iter), size); | 410 | count = min_t(size_t, iov_iter_count(iter), size); |
| 411 | if (rw == READ) { | 411 | if (iov_iter_rw(iter) == READ) { |
| 412 | if (file_offset >= i_size_read(inode)) | 412 | if (file_offset >= i_size_read(inode)) |
| 413 | break; | 413 | break; |
| 414 | if (file_offset + count > i_size_read(inode)) | 414 | if (file_offset + count > i_size_read(inode)) |
| @@ -418,11 +418,11 @@ static ssize_t ll_direct_IO_26(int rw, struct kiocb *iocb, | |||
| 418 | result = iov_iter_get_pages_alloc(iter, &pages, count, &offs); | 418 | result = iov_iter_get_pages_alloc(iter, &pages, count, &offs); |
| 419 | if (likely(result > 0)) { | 419 | if (likely(result > 0)) { |
| 420 | int n = DIV_ROUND_UP(result + offs, PAGE_SIZE); | 420 | int n = DIV_ROUND_UP(result + offs, PAGE_SIZE); |
| 421 | result = ll_direct_IO_26_seg(env, io, rw, inode, | 421 | result = ll_direct_IO_26_seg(env, io, iov_iter_rw(iter), |
| 422 | file->f_mapping, | 422 | inode, file->f_mapping, |
| 423 | result, file_offset, | 423 | result, file_offset, pages, |
| 424 | pages, n); | 424 | n); |
| 425 | ll_free_user_pages(pages, n, rw==READ); | 425 | ll_free_user_pages(pages, n, iov_iter_rw(iter) == READ); |
| 426 | } | 426 | } |
| 427 | if (unlikely(result <= 0)) { | 427 | if (unlikely(result <= 0)) { |
| 428 | /* If we can't allocate a large enough buffer | 428 | /* If we can't allocate a large enough buffer |
| @@ -449,11 +449,11 @@ static ssize_t ll_direct_IO_26(int rw, struct kiocb *iocb, | |||
| 449 | } | 449 | } |
| 450 | out: | 450 | out: |
| 451 | LASSERT(obj->cob_transient_pages == 0); | 451 | LASSERT(obj->cob_transient_pages == 0); |
| 452 | if (rw == READ) | 452 | if (iov_iter_rw(iter) == READ) |
| 453 | mutex_unlock(&inode->i_mutex); | 453 | mutex_unlock(&inode->i_mutex); |
| 454 | 454 | ||
| 455 | if (tot_bytes > 0) { | 455 | if (tot_bytes > 0) { |
| 456 | if (rw == WRITE) { | 456 | if (iov_iter_rw(iter) == WRITE) { |
| 457 | struct lov_stripe_md *lsm; | 457 | struct lov_stripe_md *lsm; |
| 458 | 458 | ||
| 459 | lsm = ccc_inode_lsm_get(inode); | 459 | lsm = ccc_inode_lsm_get(inode); |
