diff options
Diffstat (limited to 'fs/aio.c')
-rw-r--r-- | fs/aio.c | 171 |
1 files changed, 117 insertions, 54 deletions
@@ -415,6 +415,7 @@ static struct kiocb fastcall *__aio_get_req(struct kioctx *ctx) | |||
415 | req->ki_retry = NULL; | 415 | req->ki_retry = NULL; |
416 | req->ki_dtor = NULL; | 416 | req->ki_dtor = NULL; |
417 | req->private = NULL; | 417 | req->private = NULL; |
418 | req->ki_iovec = NULL; | ||
418 | INIT_LIST_HEAD(&req->ki_run_list); | 419 | INIT_LIST_HEAD(&req->ki_run_list); |
419 | 420 | ||
420 | /* Check if the completion queue has enough free space to | 421 | /* Check if the completion queue has enough free space to |
@@ -460,6 +461,8 @@ static inline void really_put_req(struct kioctx *ctx, struct kiocb *req) | |||
460 | 461 | ||
461 | if (req->ki_dtor) | 462 | if (req->ki_dtor) |
462 | req->ki_dtor(req); | 463 | req->ki_dtor(req); |
464 | if (req->ki_iovec != &req->ki_inline_vec) | ||
465 | kfree(req->ki_iovec); | ||
463 | kmem_cache_free(kiocb_cachep, req); | 466 | kmem_cache_free(kiocb_cachep, req); |
464 | ctx->reqs_active--; | 467 | ctx->reqs_active--; |
465 | 468 | ||
@@ -1301,69 +1304,63 @@ asmlinkage long sys_io_destroy(aio_context_t ctx) | |||
1301 | return -EINVAL; | 1304 | return -EINVAL; |
1302 | } | 1305 | } |
1303 | 1306 | ||
1304 | /* | 1307 | static void aio_advance_iovec(struct kiocb *iocb, ssize_t ret) |
1305 | * aio_p{read,write} are the default ki_retry methods for | ||
1306 | * IO_CMD_P{READ,WRITE}. They maintains kiocb retry state around potentially | ||
1307 | * multiple calls to f_op->aio_read(). They loop around partial progress | ||
1308 | * instead of returning -EIOCBRETRY because they don't have the means to call | ||
1309 | * kick_iocb(). | ||
1310 | */ | ||
1311 | static ssize_t aio_pread(struct kiocb *iocb) | ||
1312 | { | 1308 | { |
1313 | struct file *file = iocb->ki_filp; | 1309 | struct iovec *iov = &iocb->ki_iovec[iocb->ki_cur_seg]; |
1314 | struct address_space *mapping = file->f_mapping; | 1310 | |
1315 | struct inode *inode = mapping->host; | 1311 | BUG_ON(ret <= 0); |
1316 | ssize_t ret = 0; | 1312 | |
1317 | 1313 | while (iocb->ki_cur_seg < iocb->ki_nr_segs && ret > 0) { | |
1318 | do { | 1314 | ssize_t this = min((ssize_t)iov->iov_len, ret); |
1319 | iocb->ki_inline_vec.iov_base = iocb->ki_buf; | 1315 | iov->iov_base += this; |
1320 | iocb->ki_inline_vec.iov_len = iocb->ki_left; | 1316 | iov->iov_len -= this; |
1321 | 1317 | iocb->ki_left -= this; | |
1322 | ret = file->f_op->aio_read(iocb, &iocb->ki_inline_vec, | 1318 | ret -= this; |
1323 | 1, iocb->ki_pos); | 1319 | if (iov->iov_len == 0) { |
1324 | /* | 1320 | iocb->ki_cur_seg++; |
1325 | * Can't just depend on iocb->ki_left to determine | 1321 | iov++; |
1326 | * whether we are done. This may have been a short read. | ||
1327 | */ | ||
1328 | if (ret > 0) { | ||
1329 | iocb->ki_buf += ret; | ||
1330 | iocb->ki_left -= ret; | ||
1331 | } | 1322 | } |
1323 | } | ||
1332 | 1324 | ||
1333 | /* | 1325 | /* the caller should not have done more io than what fit in |
1334 | * For pipes and sockets we return once we have some data; for | 1326 | * the remaining iovecs */ |
1335 | * regular files we retry till we complete the entire read or | 1327 | BUG_ON(ret > 0 && iocb->ki_left == 0); |
1336 | * find that we can't read any more data (e.g short reads). | ||
1337 | */ | ||
1338 | } while (ret > 0 && iocb->ki_left > 0 && | ||
1339 | !S_ISFIFO(inode->i_mode) && !S_ISSOCK(inode->i_mode)); | ||
1340 | |||
1341 | /* This means we must have transferred all that we could */ | ||
1342 | /* No need to retry anymore */ | ||
1343 | if ((ret == 0) || (iocb->ki_left == 0)) | ||
1344 | ret = iocb->ki_nbytes - iocb->ki_left; | ||
1345 | |||
1346 | return ret; | ||
1347 | } | 1328 | } |
1348 | 1329 | ||
1349 | /* see aio_pread() */ | 1330 | static ssize_t aio_rw_vect_retry(struct kiocb *iocb) |
1350 | static ssize_t aio_pwrite(struct kiocb *iocb) | ||
1351 | { | 1331 | { |
1352 | struct file *file = iocb->ki_filp; | 1332 | struct file *file = iocb->ki_filp; |
1333 | struct address_space *mapping = file->f_mapping; | ||
1334 | struct inode *inode = mapping->host; | ||
1335 | ssize_t (*rw_op)(struct kiocb *, const struct iovec *, | ||
1336 | unsigned long, loff_t); | ||
1353 | ssize_t ret = 0; | 1337 | ssize_t ret = 0; |
1338 | unsigned short opcode; | ||
1339 | |||
1340 | if ((iocb->ki_opcode == IOCB_CMD_PREADV) || | ||
1341 | (iocb->ki_opcode == IOCB_CMD_PREAD)) { | ||
1342 | rw_op = file->f_op->aio_read; | ||
1343 | opcode = IOCB_CMD_PREADV; | ||
1344 | } else { | ||
1345 | rw_op = file->f_op->aio_write; | ||
1346 | opcode = IOCB_CMD_PWRITEV; | ||
1347 | } | ||
1354 | 1348 | ||
1355 | do { | 1349 | do { |
1356 | iocb->ki_inline_vec.iov_base = iocb->ki_buf; | 1350 | ret = rw_op(iocb, &iocb->ki_iovec[iocb->ki_cur_seg], |
1357 | iocb->ki_inline_vec.iov_len = iocb->ki_left; | 1351 | iocb->ki_nr_segs - iocb->ki_cur_seg, |
1358 | 1352 | iocb->ki_pos); | |
1359 | ret = file->f_op->aio_write(iocb, &iocb->ki_inline_vec, | 1353 | if (ret > 0) |
1360 | 1, iocb->ki_pos); | 1354 | aio_advance_iovec(iocb, ret); |
1361 | if (ret > 0) { | 1355 | |
1362 | iocb->ki_buf += ret; | 1356 | /* retry all partial writes. retry partial reads as long as its a |
1363 | iocb->ki_left -= ret; | 1357 | * regular file. */ |
1364 | } | 1358 | } while (ret > 0 && iocb->ki_left > 0 && |
1365 | } while (ret > 0 && iocb->ki_left > 0); | 1359 | (opcode == IOCB_CMD_PWRITEV || |
1360 | (!S_ISFIFO(inode->i_mode) && !S_ISSOCK(inode->i_mode)))); | ||
1366 | 1361 | ||
1362 | /* This means we must have transferred all that we could */ | ||
1363 | /* No need to retry anymore */ | ||
1367 | if ((ret == 0) || (iocb->ki_left == 0)) | 1364 | if ((ret == 0) || (iocb->ki_left == 0)) |
1368 | ret = iocb->ki_nbytes - iocb->ki_left; | 1365 | ret = iocb->ki_nbytes - iocb->ki_left; |
1369 | 1366 | ||
@@ -1390,6 +1387,38 @@ static ssize_t aio_fsync(struct kiocb *iocb) | |||
1390 | return ret; | 1387 | return ret; |
1391 | } | 1388 | } |
1392 | 1389 | ||
1390 | static ssize_t aio_setup_vectored_rw(int type, struct kiocb *kiocb) | ||
1391 | { | ||
1392 | ssize_t ret; | ||
1393 | |||
1394 | ret = rw_copy_check_uvector(type, (struct iovec __user *)kiocb->ki_buf, | ||
1395 | kiocb->ki_nbytes, 1, | ||
1396 | &kiocb->ki_inline_vec, &kiocb->ki_iovec); | ||
1397 | if (ret < 0) | ||
1398 | goto out; | ||
1399 | |||
1400 | kiocb->ki_nr_segs = kiocb->ki_nbytes; | ||
1401 | kiocb->ki_cur_seg = 0; | ||
1402 | /* ki_nbytes/left now reflect bytes instead of segs */ | ||
1403 | kiocb->ki_nbytes = ret; | ||
1404 | kiocb->ki_left = ret; | ||
1405 | |||
1406 | ret = 0; | ||
1407 | out: | ||
1408 | return ret; | ||
1409 | } | ||
1410 | |||
1411 | static ssize_t aio_setup_single_vector(struct kiocb *kiocb) | ||
1412 | { | ||
1413 | kiocb->ki_iovec = &kiocb->ki_inline_vec; | ||
1414 | kiocb->ki_iovec->iov_base = kiocb->ki_buf; | ||
1415 | kiocb->ki_iovec->iov_len = kiocb->ki_left; | ||
1416 | kiocb->ki_nr_segs = 1; | ||
1417 | kiocb->ki_cur_seg = 0; | ||
1418 | kiocb->ki_nbytes = kiocb->ki_left; | ||
1419 | return 0; | ||
1420 | } | ||
1421 | |||
1393 | /* | 1422 | /* |
1394 | * aio_setup_iocb: | 1423 | * aio_setup_iocb: |
1395 | * Performs the initial checks and aio retry method | 1424 | * Performs the initial checks and aio retry method |
@@ -1412,9 +1441,12 @@ static ssize_t aio_setup_iocb(struct kiocb *kiocb) | |||
1412 | ret = security_file_permission(file, MAY_READ); | 1441 | ret = security_file_permission(file, MAY_READ); |
1413 | if (unlikely(ret)) | 1442 | if (unlikely(ret)) |
1414 | break; | 1443 | break; |
1444 | ret = aio_setup_single_vector(kiocb); | ||
1445 | if (ret) | ||
1446 | break; | ||
1415 | ret = -EINVAL; | 1447 | ret = -EINVAL; |
1416 | if (file->f_op->aio_read) | 1448 | if (file->f_op->aio_read) |
1417 | kiocb->ki_retry = aio_pread; | 1449 | kiocb->ki_retry = aio_rw_vect_retry; |
1418 | break; | 1450 | break; |
1419 | case IOCB_CMD_PWRITE: | 1451 | case IOCB_CMD_PWRITE: |
1420 | ret = -EBADF; | 1452 | ret = -EBADF; |
@@ -1427,9 +1459,40 @@ static ssize_t aio_setup_iocb(struct kiocb *kiocb) | |||
1427 | ret = security_file_permission(file, MAY_WRITE); | 1459 | ret = security_file_permission(file, MAY_WRITE); |
1428 | if (unlikely(ret)) | 1460 | if (unlikely(ret)) |
1429 | break; | 1461 | break; |
1462 | ret = aio_setup_single_vector(kiocb); | ||
1463 | if (ret) | ||
1464 | break; | ||
1465 | ret = -EINVAL; | ||
1466 | if (file->f_op->aio_write) | ||
1467 | kiocb->ki_retry = aio_rw_vect_retry; | ||
1468 | break; | ||
1469 | case IOCB_CMD_PREADV: | ||
1470 | ret = -EBADF; | ||
1471 | if (unlikely(!(file->f_mode & FMODE_READ))) | ||
1472 | break; | ||
1473 | ret = security_file_permission(file, MAY_READ); | ||
1474 | if (unlikely(ret)) | ||
1475 | break; | ||
1476 | ret = aio_setup_vectored_rw(READ, kiocb); | ||
1477 | if (ret) | ||
1478 | break; | ||
1479 | ret = -EINVAL; | ||
1480 | if (file->f_op->aio_read) | ||
1481 | kiocb->ki_retry = aio_rw_vect_retry; | ||
1482 | break; | ||
1483 | case IOCB_CMD_PWRITEV: | ||
1484 | ret = -EBADF; | ||
1485 | if (unlikely(!(file->f_mode & FMODE_WRITE))) | ||
1486 | break; | ||
1487 | ret = security_file_permission(file, MAY_WRITE); | ||
1488 | if (unlikely(ret)) | ||
1489 | break; | ||
1490 | ret = aio_setup_vectored_rw(WRITE, kiocb); | ||
1491 | if (ret) | ||
1492 | break; | ||
1430 | ret = -EINVAL; | 1493 | ret = -EINVAL; |
1431 | if (file->f_op->aio_write) | 1494 | if (file->f_op->aio_write) |
1432 | kiocb->ki_retry = aio_pwrite; | 1495 | kiocb->ki_retry = aio_rw_vect_retry; |
1433 | break; | 1496 | break; |
1434 | case IOCB_CMD_FDSYNC: | 1497 | case IOCB_CMD_FDSYNC: |
1435 | ret = -EINVAL; | 1498 | ret = -EINVAL; |