diff options
Diffstat (limited to 'fs/direct-io.c')
| -rw-r--r-- | fs/direct-io.c | 39 |
1 files changed, 18 insertions, 21 deletions
diff --git a/fs/direct-io.c b/fs/direct-io.c index 6fb00e3f1059..c3b560b24a46 100644 --- a/fs/direct-io.c +++ b/fs/direct-io.c | |||
| @@ -1093,10 +1093,10 @@ static inline int drop_refcount(struct dio *dio) | |||
| 1093 | * for the whole file. | 1093 | * for the whole file. |
| 1094 | */ | 1094 | */ |
| 1095 | static inline ssize_t | 1095 | static inline ssize_t |
| 1096 | do_blockdev_direct_IO(int rw, struct kiocb *iocb, struct inode *inode, | 1096 | do_blockdev_direct_IO(struct kiocb *iocb, struct inode *inode, |
| 1097 | struct block_device *bdev, struct iov_iter *iter, loff_t offset, | 1097 | struct block_device *bdev, struct iov_iter *iter, |
| 1098 | get_block_t get_block, dio_iodone_t end_io, | 1098 | loff_t offset, get_block_t get_block, dio_iodone_t end_io, |
| 1099 | dio_submit_t submit_io, int flags) | 1099 | dio_submit_t submit_io, int flags) |
| 1100 | { | 1100 | { |
| 1101 | unsigned i_blkbits = ACCESS_ONCE(inode->i_blkbits); | 1101 | unsigned i_blkbits = ACCESS_ONCE(inode->i_blkbits); |
| 1102 | unsigned blkbits = i_blkbits; | 1102 | unsigned blkbits = i_blkbits; |
| @@ -1110,9 +1110,6 @@ do_blockdev_direct_IO(int rw, struct kiocb *iocb, struct inode *inode, | |||
| 1110 | struct blk_plug plug; | 1110 | struct blk_plug plug; |
| 1111 | unsigned long align = offset | iov_iter_alignment(iter); | 1111 | unsigned long align = offset | iov_iter_alignment(iter); |
| 1112 | 1112 | ||
| 1113 | if (rw & WRITE) | ||
| 1114 | rw = WRITE_ODIRECT; | ||
| 1115 | |||
| 1116 | /* | 1113 | /* |
| 1117 | * Avoid references to bdev if not absolutely needed to give | 1114 | * Avoid references to bdev if not absolutely needed to give |
| 1118 | * the early prefetch in the caller enough time. | 1115 | * the early prefetch in the caller enough time. |
| @@ -1127,7 +1124,7 @@ do_blockdev_direct_IO(int rw, struct kiocb *iocb, struct inode *inode, | |||
| 1127 | } | 1124 | } |
| 1128 | 1125 | ||
| 1129 | /* watch out for a 0 len io from a tricksy fs */ | 1126 | /* watch out for a 0 len io from a tricksy fs */ |
| 1130 | if (rw == READ && !iov_iter_count(iter)) | 1127 | if (iov_iter_rw(iter) == READ && !iov_iter_count(iter)) |
| 1131 | return 0; | 1128 | return 0; |
| 1132 | 1129 | ||
| 1133 | dio = kmem_cache_alloc(dio_cache, GFP_KERNEL); | 1130 | dio = kmem_cache_alloc(dio_cache, GFP_KERNEL); |
| @@ -1143,7 +1140,7 @@ do_blockdev_direct_IO(int rw, struct kiocb *iocb, struct inode *inode, | |||
| 1143 | 1140 | ||
| 1144 | dio->flags = flags; | 1141 | dio->flags = flags; |
| 1145 | if (dio->flags & DIO_LOCKING) { | 1142 | if (dio->flags & DIO_LOCKING) { |
| 1146 | if (rw == READ) { | 1143 | if (iov_iter_rw(iter) == READ) { |
| 1147 | struct address_space *mapping = | 1144 | struct address_space *mapping = |
| 1148 | iocb->ki_filp->f_mapping; | 1145 | iocb->ki_filp->f_mapping; |
| 1149 | 1146 | ||
| @@ -1169,19 +1166,19 @@ do_blockdev_direct_IO(int rw, struct kiocb *iocb, struct inode *inode, | |||
| 1169 | if (is_sync_kiocb(iocb)) | 1166 | if (is_sync_kiocb(iocb)) |
| 1170 | dio->is_async = false; | 1167 | dio->is_async = false; |
| 1171 | else if (!(dio->flags & DIO_ASYNC_EXTEND) && | 1168 | else if (!(dio->flags & DIO_ASYNC_EXTEND) && |
| 1172 | (rw & WRITE) && end > i_size_read(inode)) | 1169 | iov_iter_rw(iter) == WRITE && end > i_size_read(inode)) |
| 1173 | dio->is_async = false; | 1170 | dio->is_async = false; |
| 1174 | else | 1171 | else |
| 1175 | dio->is_async = true; | 1172 | dio->is_async = true; |
| 1176 | 1173 | ||
| 1177 | dio->inode = inode; | 1174 | dio->inode = inode; |
| 1178 | dio->rw = rw; | 1175 | dio->rw = iov_iter_rw(iter) == WRITE ? WRITE_ODIRECT : READ; |
| 1179 | 1176 | ||
| 1180 | /* | 1177 | /* |
| 1181 | * For AIO O_(D)SYNC writes we need to defer completions to a workqueue | 1178 | * For AIO O_(D)SYNC writes we need to defer completions to a workqueue |
| 1182 | * so that we can call ->fsync. | 1179 | * so that we can call ->fsync. |
| 1183 | */ | 1180 | */ |
| 1184 | if (dio->is_async && (rw & WRITE) && | 1181 | if (dio->is_async && iov_iter_rw(iter) == WRITE && |
| 1185 | ((iocb->ki_filp->f_flags & O_DSYNC) || | 1182 | ((iocb->ki_filp->f_flags & O_DSYNC) || |
| 1186 | IS_SYNC(iocb->ki_filp->f_mapping->host))) { | 1183 | IS_SYNC(iocb->ki_filp->f_mapping->host))) { |
| 1187 | retval = dio_set_defer_completion(dio); | 1184 | retval = dio_set_defer_completion(dio); |
| @@ -1274,7 +1271,7 @@ do_blockdev_direct_IO(int rw, struct kiocb *iocb, struct inode *inode, | |||
| 1274 | * we can let i_mutex go now that its achieved its purpose | 1271 | * we can let i_mutex go now that its achieved its purpose |
| 1275 | * of protecting us from looking up uninitialized blocks. | 1272 | * of protecting us from looking up uninitialized blocks. |
| 1276 | */ | 1273 | */ |
| 1277 | if (rw == READ && (dio->flags & DIO_LOCKING)) | 1274 | if (iov_iter_rw(iter) == READ && (dio->flags & DIO_LOCKING)) |
| 1278 | mutex_unlock(&dio->inode->i_mutex); | 1275 | mutex_unlock(&dio->inode->i_mutex); |
| 1279 | 1276 | ||
| 1280 | /* | 1277 | /* |
| @@ -1286,7 +1283,7 @@ do_blockdev_direct_IO(int rw, struct kiocb *iocb, struct inode *inode, | |||
| 1286 | */ | 1283 | */ |
| 1287 | BUG_ON(retval == -EIOCBQUEUED); | 1284 | BUG_ON(retval == -EIOCBQUEUED); |
| 1288 | if (dio->is_async && retval == 0 && dio->result && | 1285 | if (dio->is_async && retval == 0 && dio->result && |
| 1289 | (rw == READ || dio->result == count)) | 1286 | (iov_iter_rw(iter) == READ || dio->result == count)) |
| 1290 | retval = -EIOCBQUEUED; | 1287 | retval = -EIOCBQUEUED; |
| 1291 | else | 1288 | else |
| 1292 | dio_await_completion(dio); | 1289 | dio_await_completion(dio); |
| @@ -1300,11 +1297,11 @@ out: | |||
| 1300 | return retval; | 1297 | return retval; |
| 1301 | } | 1298 | } |
| 1302 | 1299 | ||
| 1303 | ssize_t | 1300 | ssize_t __blockdev_direct_IO(struct kiocb *iocb, struct inode *inode, |
| 1304 | __blockdev_direct_IO(int rw, struct kiocb *iocb, struct inode *inode, | 1301 | struct block_device *bdev, struct iov_iter *iter, |
| 1305 | struct block_device *bdev, struct iov_iter *iter, loff_t offset, | 1302 | loff_t offset, get_block_t get_block, |
| 1306 | get_block_t get_block, dio_iodone_t end_io, | 1303 | dio_iodone_t end_io, dio_submit_t submit_io, |
| 1307 | dio_submit_t submit_io, int flags) | 1304 | int flags) |
| 1308 | { | 1305 | { |
| 1309 | /* | 1306 | /* |
| 1310 | * The block device state is needed in the end to finally | 1307 | * The block device state is needed in the end to finally |
| @@ -1318,8 +1315,8 @@ __blockdev_direct_IO(int rw, struct kiocb *iocb, struct inode *inode, | |||
| 1318 | prefetch(bdev->bd_queue); | 1315 | prefetch(bdev->bd_queue); |
| 1319 | prefetch((char *)bdev->bd_queue + SMP_CACHE_BYTES); | 1316 | prefetch((char *)bdev->bd_queue + SMP_CACHE_BYTES); |
| 1320 | 1317 | ||
| 1321 | return do_blockdev_direct_IO(rw, iocb, inode, bdev, iter, offset, | 1318 | return do_blockdev_direct_IO(iocb, inode, bdev, iter, offset, get_block, |
| 1322 | get_block, end_io, submit_io, flags); | 1319 | end_io, submit_io, flags); |
| 1323 | } | 1320 | } |
| 1324 | 1321 | ||
| 1325 | EXPORT_SYMBOL(__blockdev_direct_IO); | 1322 | EXPORT_SYMBOL(__blockdev_direct_IO); |
