diff options
author | Jens Axboe <axboe@kernel.dk> | 2019-09-23 13:05:34 -0400 |
---|---|---|
committer | Jens Axboe <axboe@kernel.dk> | 2019-09-23 13:05:34 -0400 |
commit | 32960613b7c3352ddf38c42596e28a16ae36335e (patch) | |
tree | 60748bfdf64056a50293f2b4c0641c3e728e6813 /fs/io_uring.c | |
parent | 5262f567987d3c30052b22e78c35c2313d07b230 (diff) |
io_uring: correctly handle non ->{read,write}_iter() file_operations
Currently we just -EINVAL a read or write to an fd that isn't backed
by ->read_iter() or ->write_iter(). But we can handle them just fine,
as long as we punt fo async context first.
Implement a simple loop function for doing ->read() or ->write()
instead, and ensure we call it appropriately.
Reported-by: 李通洲 <carter.li@eoitek.com>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'fs/io_uring.c')
-rw-r--r-- | fs/io_uring.c | 60 |
1 files changed, 54 insertions, 6 deletions
diff --git a/fs/io_uring.c b/fs/io_uring.c index 9d8e703bc851..ca7570aca430 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c | |||
@@ -1298,6 +1298,51 @@ static void io_async_list_note(int rw, struct io_kiocb *req, size_t len) | |||
1298 | } | 1298 | } |
1299 | } | 1299 | } |
1300 | 1300 | ||
1301 | /* | ||
1302 | * For files that don't have ->read_iter() and ->write_iter(), handle them | ||
1303 | * by looping over ->read() or ->write() manually. | ||
1304 | */ | ||
1305 | static ssize_t loop_rw_iter(int rw, struct file *file, struct kiocb *kiocb, | ||
1306 | struct iov_iter *iter) | ||
1307 | { | ||
1308 | ssize_t ret = 0; | ||
1309 | |||
1310 | /* | ||
1311 | * Don't support polled IO through this interface, and we can't | ||
1312 | * support non-blocking either. For the latter, this just causes | ||
1313 | * the kiocb to be handled from an async context. | ||
1314 | */ | ||
1315 | if (kiocb->ki_flags & IOCB_HIPRI) | ||
1316 | return -EOPNOTSUPP; | ||
1317 | if (kiocb->ki_flags & IOCB_NOWAIT) | ||
1318 | return -EAGAIN; | ||
1319 | |||
1320 | while (iov_iter_count(iter)) { | ||
1321 | struct iovec iovec = iov_iter_iovec(iter); | ||
1322 | ssize_t nr; | ||
1323 | |||
1324 | if (rw == READ) { | ||
1325 | nr = file->f_op->read(file, iovec.iov_base, | ||
1326 | iovec.iov_len, &kiocb->ki_pos); | ||
1327 | } else { | ||
1328 | nr = file->f_op->write(file, iovec.iov_base, | ||
1329 | iovec.iov_len, &kiocb->ki_pos); | ||
1330 | } | ||
1331 | |||
1332 | if (nr < 0) { | ||
1333 | if (!ret) | ||
1334 | ret = nr; | ||
1335 | break; | ||
1336 | } | ||
1337 | ret += nr; | ||
1338 | if (nr != iovec.iov_len) | ||
1339 | break; | ||
1340 | iov_iter_advance(iter, nr); | ||
1341 | } | ||
1342 | |||
1343 | return ret; | ||
1344 | } | ||
1345 | |||
1301 | static int io_read(struct io_kiocb *req, const struct sqe_submit *s, | 1346 | static int io_read(struct io_kiocb *req, const struct sqe_submit *s, |
1302 | bool force_nonblock) | 1347 | bool force_nonblock) |
1303 | { | 1348 | { |
@@ -1315,8 +1360,6 @@ static int io_read(struct io_kiocb *req, const struct sqe_submit *s, | |||
1315 | 1360 | ||
1316 | if (unlikely(!(file->f_mode & FMODE_READ))) | 1361 | if (unlikely(!(file->f_mode & FMODE_READ))) |
1317 | return -EBADF; | 1362 | return -EBADF; |
1318 | if (unlikely(!file->f_op->read_iter)) | ||
1319 | return -EINVAL; | ||
1320 | 1363 | ||
1321 | ret = io_import_iovec(req->ctx, READ, s, &iovec, &iter); | 1364 | ret = io_import_iovec(req->ctx, READ, s, &iovec, &iter); |
1322 | if (ret < 0) | 1365 | if (ret < 0) |
@@ -1331,7 +1374,11 @@ static int io_read(struct io_kiocb *req, const struct sqe_submit *s, | |||
1331 | if (!ret) { | 1374 | if (!ret) { |
1332 | ssize_t ret2; | 1375 | ssize_t ret2; |
1333 | 1376 | ||
1334 | ret2 = call_read_iter(file, kiocb, &iter); | 1377 | if (file->f_op->read_iter) |
1378 | ret2 = call_read_iter(file, kiocb, &iter); | ||
1379 | else | ||
1380 | ret2 = loop_rw_iter(READ, file, kiocb, &iter); | ||
1381 | |||
1335 | /* | 1382 | /* |
1336 | * In case of a short read, punt to async. This can happen | 1383 | * In case of a short read, punt to async. This can happen |
1337 | * if we have data partially cached. Alternatively we can | 1384 | * if we have data partially cached. Alternatively we can |
@@ -1376,8 +1423,6 @@ static int io_write(struct io_kiocb *req, const struct sqe_submit *s, | |||
1376 | file = kiocb->ki_filp; | 1423 | file = kiocb->ki_filp; |
1377 | if (unlikely(!(file->f_mode & FMODE_WRITE))) | 1424 | if (unlikely(!(file->f_mode & FMODE_WRITE))) |
1378 | return -EBADF; | 1425 | return -EBADF; |
1379 | if (unlikely(!file->f_op->write_iter)) | ||
1380 | return -EINVAL; | ||
1381 | 1426 | ||
1382 | ret = io_import_iovec(req->ctx, WRITE, s, &iovec, &iter); | 1427 | ret = io_import_iovec(req->ctx, WRITE, s, &iovec, &iter); |
1383 | if (ret < 0) | 1428 | if (ret < 0) |
@@ -1415,7 +1460,10 @@ static int io_write(struct io_kiocb *req, const struct sqe_submit *s, | |||
1415 | } | 1460 | } |
1416 | kiocb->ki_flags |= IOCB_WRITE; | 1461 | kiocb->ki_flags |= IOCB_WRITE; |
1417 | 1462 | ||
1418 | ret2 = call_write_iter(file, kiocb, &iter); | 1463 | if (file->f_op->write_iter) |
1464 | ret2 = call_write_iter(file, kiocb, &iter); | ||
1465 | else | ||
1466 | ret2 = loop_rw_iter(WRITE, file, kiocb, &iter); | ||
1419 | if (!force_nonblock || ret2 != -EAGAIN) { | 1467 | if (!force_nonblock || ret2 != -EAGAIN) { |
1420 | io_rw_done(kiocb, ret2); | 1468 | io_rw_done(kiocb, ret2); |
1421 | } else { | 1469 | } else { |