diff options
author | David S. Miller <davem@davemloft.net> | 2017-10-22 08:36:53 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2017-10-22 08:39:14 -0400 |
commit | f8ddadc4db6c7b7029b6d0e0d9af24f74ad27ca2 (patch) | |
tree | 0a6432aba336bae42313613f4c891bcfce02bd4e /fs/direct-io.c | |
parent | bdd091bab8c631bd2801af838e344fad34566410 (diff) | |
parent | b5ac3beb5a9f0ef0ea64cd85faf94c0dc4de0e42 (diff) |
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
There were quite a few overlapping sets of changes here.
Daniel's bug fix for off-by-ones in the new BPF branch instructions,
along with the added allowances for "data_end > ptr + x" forms
collided with the metadata additions.
Along with those three changes came veritifer test cases, which in
their final form I tried to group together properly. If I had just
trimmed GIT's conflict tags as-is, this would have split up the
meta tests unnecessarily.
In the socketmap code, a set of preemption disabling changes
overlapped with the rename of bpf_compute_data_end() to
bpf_compute_data_pointers().
Changes were made to the mv88e6060.c driver set addr method
which got removed in net-next.
The hyperv transport socket layer had a locking change in 'net'
which overlapped with a change of socket state macro usage
in 'net-next'.
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'fs/direct-io.c')
-rw-r--r-- | fs/direct-io.c | 42 |
1 files changed, 27 insertions, 15 deletions
diff --git a/fs/direct-io.c b/fs/direct-io.c index 62cf812ed0e5..b53e66d9abd7 100644 --- a/fs/direct-io.c +++ b/fs/direct-io.c | |||
@@ -45,6 +45,12 @@ | |||
45 | #define DIO_PAGES 64 | 45 | #define DIO_PAGES 64 |
46 | 46 | ||
47 | /* | 47 | /* |
48 | * Flags for dio_complete() | ||
49 | */ | ||
50 | #define DIO_COMPLETE_ASYNC 0x01 /* This is async IO */ | ||
51 | #define DIO_COMPLETE_INVALIDATE 0x02 /* Can invalidate pages */ | ||
52 | |||
53 | /* | ||
48 | * This code generally works in units of "dio_blocks". A dio_block is | 54 | * This code generally works in units of "dio_blocks". A dio_block is |
49 | * somewhere between the hard sector size and the filesystem block size. it | 55 | * somewhere between the hard sector size and the filesystem block size. it |
50 | * is determined on a per-invocation basis. When talking to the filesystem | 56 | * is determined on a per-invocation basis. When talking to the filesystem |
@@ -225,7 +231,7 @@ static inline struct page *dio_get_page(struct dio *dio, | |||
225 | * filesystems can use it to hold additional state between get_block calls and | 231 | * filesystems can use it to hold additional state between get_block calls and |
226 | * dio_complete. | 232 | * dio_complete. |
227 | */ | 233 | */ |
228 | static ssize_t dio_complete(struct dio *dio, ssize_t ret, bool is_async) | 234 | static ssize_t dio_complete(struct dio *dio, ssize_t ret, unsigned int flags) |
229 | { | 235 | { |
230 | loff_t offset = dio->iocb->ki_pos; | 236 | loff_t offset = dio->iocb->ki_pos; |
231 | ssize_t transferred = 0; | 237 | ssize_t transferred = 0; |
@@ -259,14 +265,27 @@ static ssize_t dio_complete(struct dio *dio, ssize_t ret, bool is_async) | |||
259 | if (ret == 0) | 265 | if (ret == 0) |
260 | ret = transferred; | 266 | ret = transferred; |
261 | 267 | ||
268 | if (dio->end_io) { | ||
269 | // XXX: ki_pos?? | ||
270 | err = dio->end_io(dio->iocb, offset, ret, dio->private); | ||
271 | if (err) | ||
272 | ret = err; | ||
273 | } | ||
274 | |||
262 | /* | 275 | /* |
263 | * Try again to invalidate clean pages which might have been cached by | 276 | * Try again to invalidate clean pages which might have been cached by |
264 | * non-direct readahead, or faulted in by get_user_pages() if the source | 277 | * non-direct readahead, or faulted in by get_user_pages() if the source |
265 | * of the write was an mmap'ed region of the file we're writing. Either | 278 | * of the write was an mmap'ed region of the file we're writing. Either |
266 | * one is a pretty crazy thing to do, so we don't support it 100%. If | 279 | * one is a pretty crazy thing to do, so we don't support it 100%. If |
267 | * this invalidation fails, tough, the write still worked... | 280 | * this invalidation fails, tough, the write still worked... |
281 | * | ||
282 | * And this page cache invalidation has to be after dio->end_io(), as | ||
283 | * some filesystems convert unwritten extents to real allocations in | ||
284 | * end_io() when necessary, otherwise a racing buffer read would cache | ||
285 | * zeros from unwritten extents. | ||
268 | */ | 286 | */ |
269 | if (ret > 0 && dio->op == REQ_OP_WRITE && | 287 | if (flags & DIO_COMPLETE_INVALIDATE && |
288 | ret > 0 && dio->op == REQ_OP_WRITE && | ||
270 | dio->inode->i_mapping->nrpages) { | 289 | dio->inode->i_mapping->nrpages) { |
271 | err = invalidate_inode_pages2_range(dio->inode->i_mapping, | 290 | err = invalidate_inode_pages2_range(dio->inode->i_mapping, |
272 | offset >> PAGE_SHIFT, | 291 | offset >> PAGE_SHIFT, |
@@ -274,18 +293,10 @@ static ssize_t dio_complete(struct dio *dio, ssize_t ret, bool is_async) | |||
274 | WARN_ON_ONCE(err); | 293 | WARN_ON_ONCE(err); |
275 | } | 294 | } |
276 | 295 | ||
277 | if (dio->end_io) { | ||
278 | |||
279 | // XXX: ki_pos?? | ||
280 | err = dio->end_io(dio->iocb, offset, ret, dio->private); | ||
281 | if (err) | ||
282 | ret = err; | ||
283 | } | ||
284 | |||
285 | if (!(dio->flags & DIO_SKIP_DIO_COUNT)) | 296 | if (!(dio->flags & DIO_SKIP_DIO_COUNT)) |
286 | inode_dio_end(dio->inode); | 297 | inode_dio_end(dio->inode); |
287 | 298 | ||
288 | if (is_async) { | 299 | if (flags & DIO_COMPLETE_ASYNC) { |
289 | /* | 300 | /* |
290 | * generic_write_sync expects ki_pos to have been updated | 301 | * generic_write_sync expects ki_pos to have been updated |
291 | * already, but the submission path only does this for | 302 | * already, but the submission path only does this for |
@@ -306,7 +317,7 @@ static void dio_aio_complete_work(struct work_struct *work) | |||
306 | { | 317 | { |
307 | struct dio *dio = container_of(work, struct dio, complete_work); | 318 | struct dio *dio = container_of(work, struct dio, complete_work); |
308 | 319 | ||
309 | dio_complete(dio, 0, true); | 320 | dio_complete(dio, 0, DIO_COMPLETE_ASYNC | DIO_COMPLETE_INVALIDATE); |
310 | } | 321 | } |
311 | 322 | ||
312 | static blk_status_t dio_bio_complete(struct dio *dio, struct bio *bio); | 323 | static blk_status_t dio_bio_complete(struct dio *dio, struct bio *bio); |
@@ -348,7 +359,7 @@ static void dio_bio_end_aio(struct bio *bio) | |||
348 | queue_work(dio->inode->i_sb->s_dio_done_wq, | 359 | queue_work(dio->inode->i_sb->s_dio_done_wq, |
349 | &dio->complete_work); | 360 | &dio->complete_work); |
350 | } else { | 361 | } else { |
351 | dio_complete(dio, 0, true); | 362 | dio_complete(dio, 0, DIO_COMPLETE_ASYNC); |
352 | } | 363 | } |
353 | } | 364 | } |
354 | } | 365 | } |
@@ -866,7 +877,8 @@ out: | |||
866 | */ | 877 | */ |
867 | if (sdio->boundary) { | 878 | if (sdio->boundary) { |
868 | ret = dio_send_cur_page(dio, sdio, map_bh); | 879 | ret = dio_send_cur_page(dio, sdio, map_bh); |
869 | dio_bio_submit(dio, sdio); | 880 | if (sdio->bio) |
881 | dio_bio_submit(dio, sdio); | ||
870 | put_page(sdio->cur_page); | 882 | put_page(sdio->cur_page); |
871 | sdio->cur_page = NULL; | 883 | sdio->cur_page = NULL; |
872 | } | 884 | } |
@@ -1359,7 +1371,7 @@ do_blockdev_direct_IO(struct kiocb *iocb, struct inode *inode, | |||
1359 | dio_await_completion(dio); | 1371 | dio_await_completion(dio); |
1360 | 1372 | ||
1361 | if (drop_refcount(dio) == 0) { | 1373 | if (drop_refcount(dio) == 0) { |
1362 | retval = dio_complete(dio, retval, false); | 1374 | retval = dio_complete(dio, retval, DIO_COMPLETE_INVALIDATE); |
1363 | } else | 1375 | } else |
1364 | BUG_ON(retval != -EIOCBQUEUED); | 1376 | BUG_ON(retval != -EIOCBQUEUED); |
1365 | 1377 | ||