diff options
author | Greg Kroah-Hartman <gregkh@linuxfoundation.org> | 2019-08-19 01:15:42 -0400 |
---|---|---|
committer | Greg Kroah-Hartman <gregkh@linuxfoundation.org> | 2019-08-19 01:15:42 -0400 |
commit | 7ffc95e90e305c6803991ec2a2f4e442236efc77 (patch) | |
tree | 5018f8310d14bb4d8dd25813ae69827c557060a9 /fs | |
parent | 1e296b5be40d309a1585c14bc55da6ff6a29ecf0 (diff) | |
parent | d1abaeb3be7b5fa6d7a1fbbd2e14e3310005c4c1 (diff) |
Merge 5.3-rc5 into usb-next
We need the usb fixes in here as well for other patches to build on.
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Diffstat (limited to 'fs')
-rw-r--r-- | fs/afs/cmservice.c | 10 | ||||
-rw-r--r-- | fs/afs/dir.c | 89 | ||||
-rw-r--r-- | fs/afs/file.c | 12 | ||||
-rw-r--r-- | fs/afs/vlclient.c | 11 | ||||
-rw-r--r-- | fs/block_dev.c | 49 | ||||
-rw-r--r-- | fs/btrfs/ctree.h | 4 | ||||
-rw-r--r-- | fs/btrfs/disk-io.c | 2 | ||||
-rw-r--r-- | fs/btrfs/extent-tree.c | 71 | ||||
-rw-r--r-- | fs/btrfs/volumes.c | 13 | ||||
-rw-r--r-- | fs/io_uring.c | 20 | ||||
-rw-r--r-- | fs/seq_file.c | 2 | ||||
-rw-r--r-- | fs/xfs/libxfs/xfs_bmap.c | 29 | ||||
-rw-r--r-- | fs/xfs/libxfs/xfs_da_btree.c | 19 | ||||
-rw-r--r-- | fs/xfs/libxfs/xfs_dir2_node.c | 3 | ||||
-rw-r--r-- | fs/xfs/xfs_log.c | 5 |
15 files changed, 176 insertions, 163 deletions
diff --git a/fs/afs/cmservice.c b/fs/afs/cmservice.c index 4f1b6f466ff5..b86195e4dc6c 100644 --- a/fs/afs/cmservice.c +++ b/fs/afs/cmservice.c | |||
@@ -505,18 +505,14 @@ static void SRXAFSCB_ProbeUuid(struct work_struct *work) | |||
505 | struct afs_call *call = container_of(work, struct afs_call, work); | 505 | struct afs_call *call = container_of(work, struct afs_call, work); |
506 | struct afs_uuid *r = call->request; | 506 | struct afs_uuid *r = call->request; |
507 | 507 | ||
508 | struct { | ||
509 | __be32 match; | ||
510 | } reply; | ||
511 | |||
512 | _enter(""); | 508 | _enter(""); |
513 | 509 | ||
514 | if (memcmp(r, &call->net->uuid, sizeof(call->net->uuid)) == 0) | 510 | if (memcmp(r, &call->net->uuid, sizeof(call->net->uuid)) == 0) |
515 | reply.match = htonl(0); | 511 | afs_send_empty_reply(call); |
516 | else | 512 | else |
517 | reply.match = htonl(1); | 513 | rxrpc_kernel_abort_call(call->net->socket, call->rxcall, |
514 | 1, 1, "K-1"); | ||
518 | 515 | ||
519 | afs_send_simple_reply(call, &reply, sizeof(reply)); | ||
520 | afs_put_call(call); | 516 | afs_put_call(call); |
521 | _leave(""); | 517 | _leave(""); |
522 | } | 518 | } |
diff --git a/fs/afs/dir.c b/fs/afs/dir.c index e640d67274be..81207dc3c997 100644 --- a/fs/afs/dir.c +++ b/fs/afs/dir.c | |||
@@ -440,7 +440,7 @@ static int afs_dir_iterate_block(struct afs_vnode *dvnode, | |||
440 | * iterate through the data blob that lists the contents of an AFS directory | 440 | * iterate through the data blob that lists the contents of an AFS directory |
441 | */ | 441 | */ |
442 | static int afs_dir_iterate(struct inode *dir, struct dir_context *ctx, | 442 | static int afs_dir_iterate(struct inode *dir, struct dir_context *ctx, |
443 | struct key *key) | 443 | struct key *key, afs_dataversion_t *_dir_version) |
444 | { | 444 | { |
445 | struct afs_vnode *dvnode = AFS_FS_I(dir); | 445 | struct afs_vnode *dvnode = AFS_FS_I(dir); |
446 | struct afs_xdr_dir_page *dbuf; | 446 | struct afs_xdr_dir_page *dbuf; |
@@ -460,6 +460,7 @@ static int afs_dir_iterate(struct inode *dir, struct dir_context *ctx, | |||
460 | req = afs_read_dir(dvnode, key); | 460 | req = afs_read_dir(dvnode, key); |
461 | if (IS_ERR(req)) | 461 | if (IS_ERR(req)) |
462 | return PTR_ERR(req); | 462 | return PTR_ERR(req); |
463 | *_dir_version = req->data_version; | ||
463 | 464 | ||
464 | /* round the file position up to the next entry boundary */ | 465 | /* round the file position up to the next entry boundary */ |
465 | ctx->pos += sizeof(union afs_xdr_dirent) - 1; | 466 | ctx->pos += sizeof(union afs_xdr_dirent) - 1; |
@@ -514,7 +515,10 @@ out: | |||
514 | */ | 515 | */ |
515 | static int afs_readdir(struct file *file, struct dir_context *ctx) | 516 | static int afs_readdir(struct file *file, struct dir_context *ctx) |
516 | { | 517 | { |
517 | return afs_dir_iterate(file_inode(file), ctx, afs_file_key(file)); | 518 | afs_dataversion_t dir_version; |
519 | |||
520 | return afs_dir_iterate(file_inode(file), ctx, afs_file_key(file), | ||
521 | &dir_version); | ||
518 | } | 522 | } |
519 | 523 | ||
520 | /* | 524 | /* |
@@ -555,7 +559,8 @@ static int afs_lookup_one_filldir(struct dir_context *ctx, const char *name, | |||
555 | * - just returns the FID the dentry name maps to if found | 559 | * - just returns the FID the dentry name maps to if found |
556 | */ | 560 | */ |
557 | static int afs_do_lookup_one(struct inode *dir, struct dentry *dentry, | 561 | static int afs_do_lookup_one(struct inode *dir, struct dentry *dentry, |
558 | struct afs_fid *fid, struct key *key) | 562 | struct afs_fid *fid, struct key *key, |
563 | afs_dataversion_t *_dir_version) | ||
559 | { | 564 | { |
560 | struct afs_super_info *as = dir->i_sb->s_fs_info; | 565 | struct afs_super_info *as = dir->i_sb->s_fs_info; |
561 | struct afs_lookup_one_cookie cookie = { | 566 | struct afs_lookup_one_cookie cookie = { |
@@ -568,7 +573,7 @@ static int afs_do_lookup_one(struct inode *dir, struct dentry *dentry, | |||
568 | _enter("{%lu},%p{%pd},", dir->i_ino, dentry, dentry); | 573 | _enter("{%lu},%p{%pd},", dir->i_ino, dentry, dentry); |
569 | 574 | ||
570 | /* search the directory */ | 575 | /* search the directory */ |
571 | ret = afs_dir_iterate(dir, &cookie.ctx, key); | 576 | ret = afs_dir_iterate(dir, &cookie.ctx, key, _dir_version); |
572 | if (ret < 0) { | 577 | if (ret < 0) { |
573 | _leave(" = %d [iter]", ret); | 578 | _leave(" = %d [iter]", ret); |
574 | return ret; | 579 | return ret; |
@@ -642,6 +647,7 @@ static struct inode *afs_do_lookup(struct inode *dir, struct dentry *dentry, | |||
642 | struct afs_server *server; | 647 | struct afs_server *server; |
643 | struct afs_vnode *dvnode = AFS_FS_I(dir), *vnode; | 648 | struct afs_vnode *dvnode = AFS_FS_I(dir), *vnode; |
644 | struct inode *inode = NULL, *ti; | 649 | struct inode *inode = NULL, *ti; |
650 | afs_dataversion_t data_version = READ_ONCE(dvnode->status.data_version); | ||
645 | int ret, i; | 651 | int ret, i; |
646 | 652 | ||
647 | _enter("{%lu},%p{%pd},", dir->i_ino, dentry, dentry); | 653 | _enter("{%lu},%p{%pd},", dir->i_ino, dentry, dentry); |
@@ -669,12 +675,14 @@ static struct inode *afs_do_lookup(struct inode *dir, struct dentry *dentry, | |||
669 | cookie->fids[i].vid = as->volume->vid; | 675 | cookie->fids[i].vid = as->volume->vid; |
670 | 676 | ||
671 | /* search the directory */ | 677 | /* search the directory */ |
672 | ret = afs_dir_iterate(dir, &cookie->ctx, key); | 678 | ret = afs_dir_iterate(dir, &cookie->ctx, key, &data_version); |
673 | if (ret < 0) { | 679 | if (ret < 0) { |
674 | inode = ERR_PTR(ret); | 680 | inode = ERR_PTR(ret); |
675 | goto out; | 681 | goto out; |
676 | } | 682 | } |
677 | 683 | ||
684 | dentry->d_fsdata = (void *)(unsigned long)data_version; | ||
685 | |||
678 | inode = ERR_PTR(-ENOENT); | 686 | inode = ERR_PTR(-ENOENT); |
679 | if (!cookie->found) | 687 | if (!cookie->found) |
680 | goto out; | 688 | goto out; |
@@ -968,7 +976,8 @@ static int afs_d_revalidate(struct dentry *dentry, unsigned int flags) | |||
968 | struct dentry *parent; | 976 | struct dentry *parent; |
969 | struct inode *inode; | 977 | struct inode *inode; |
970 | struct key *key; | 978 | struct key *key; |
971 | long dir_version, de_version; | 979 | afs_dataversion_t dir_version; |
980 | long de_version; | ||
972 | int ret; | 981 | int ret; |
973 | 982 | ||
974 | if (flags & LOOKUP_RCU) | 983 | if (flags & LOOKUP_RCU) |
@@ -1014,20 +1023,20 @@ static int afs_d_revalidate(struct dentry *dentry, unsigned int flags) | |||
1014 | * on a 32-bit system, we only have 32 bits in the dentry to store the | 1023 | * on a 32-bit system, we only have 32 bits in the dentry to store the |
1015 | * version. | 1024 | * version. |
1016 | */ | 1025 | */ |
1017 | dir_version = (long)dir->status.data_version; | 1026 | dir_version = dir->status.data_version; |
1018 | de_version = (long)dentry->d_fsdata; | 1027 | de_version = (long)dentry->d_fsdata; |
1019 | if (de_version == dir_version) | 1028 | if (de_version == (long)dir_version) |
1020 | goto out_valid; | 1029 | goto out_valid_noupdate; |
1021 | 1030 | ||
1022 | dir_version = (long)dir->invalid_before; | 1031 | dir_version = dir->invalid_before; |
1023 | if (de_version - dir_version >= 0) | 1032 | if (de_version - (long)dir_version >= 0) |
1024 | goto out_valid; | 1033 | goto out_valid; |
1025 | 1034 | ||
1026 | _debug("dir modified"); | 1035 | _debug("dir modified"); |
1027 | afs_stat_v(dir, n_reval); | 1036 | afs_stat_v(dir, n_reval); |
1028 | 1037 | ||
1029 | /* search the directory for this vnode */ | 1038 | /* search the directory for this vnode */ |
1030 | ret = afs_do_lookup_one(&dir->vfs_inode, dentry, &fid, key); | 1039 | ret = afs_do_lookup_one(&dir->vfs_inode, dentry, &fid, key, &dir_version); |
1031 | switch (ret) { | 1040 | switch (ret) { |
1032 | case 0: | 1041 | case 0: |
1033 | /* the filename maps to something */ | 1042 | /* the filename maps to something */ |
@@ -1080,7 +1089,8 @@ static int afs_d_revalidate(struct dentry *dentry, unsigned int flags) | |||
1080 | } | 1089 | } |
1081 | 1090 | ||
1082 | out_valid: | 1091 | out_valid: |
1083 | dentry->d_fsdata = (void *)dir_version; | 1092 | dentry->d_fsdata = (void *)(unsigned long)dir_version; |
1093 | out_valid_noupdate: | ||
1084 | dput(parent); | 1094 | dput(parent); |
1085 | key_put(key); | 1095 | key_put(key); |
1086 | _leave(" = 1 [valid]"); | 1096 | _leave(" = 1 [valid]"); |
@@ -1186,6 +1196,20 @@ static void afs_prep_for_new_inode(struct afs_fs_cursor *fc, | |||
1186 | } | 1196 | } |
1187 | 1197 | ||
1188 | /* | 1198 | /* |
1199 | * Note that a dentry got changed. We need to set d_fsdata to the data version | ||
1200 | * number derived from the result of the operation. It doesn't matter if | ||
1201 | * d_fsdata goes backwards as we'll just revalidate. | ||
1202 | */ | ||
1203 | static void afs_update_dentry_version(struct afs_fs_cursor *fc, | ||
1204 | struct dentry *dentry, | ||
1205 | struct afs_status_cb *scb) | ||
1206 | { | ||
1207 | if (fc->ac.error == 0) | ||
1208 | dentry->d_fsdata = | ||
1209 | (void *)(unsigned long)scb->status.data_version; | ||
1210 | } | ||
1211 | |||
1212 | /* | ||
1189 | * create a directory on an AFS filesystem | 1213 | * create a directory on an AFS filesystem |
1190 | */ | 1214 | */ |
1191 | static int afs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode) | 1215 | static int afs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode) |
@@ -1227,6 +1251,7 @@ static int afs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode) | |||
1227 | afs_check_for_remote_deletion(&fc, dvnode); | 1251 | afs_check_for_remote_deletion(&fc, dvnode); |
1228 | afs_vnode_commit_status(&fc, dvnode, fc.cb_break, | 1252 | afs_vnode_commit_status(&fc, dvnode, fc.cb_break, |
1229 | &data_version, &scb[0]); | 1253 | &data_version, &scb[0]); |
1254 | afs_update_dentry_version(&fc, dentry, &scb[0]); | ||
1230 | afs_vnode_new_inode(&fc, dentry, &iget_data, &scb[1]); | 1255 | afs_vnode_new_inode(&fc, dentry, &iget_data, &scb[1]); |
1231 | ret = afs_end_vnode_operation(&fc); | 1256 | ret = afs_end_vnode_operation(&fc); |
1232 | if (ret < 0) | 1257 | if (ret < 0) |
@@ -1319,6 +1344,7 @@ static int afs_rmdir(struct inode *dir, struct dentry *dentry) | |||
1319 | 1344 | ||
1320 | afs_vnode_commit_status(&fc, dvnode, fc.cb_break, | 1345 | afs_vnode_commit_status(&fc, dvnode, fc.cb_break, |
1321 | &data_version, scb); | 1346 | &data_version, scb); |
1347 | afs_update_dentry_version(&fc, dentry, scb); | ||
1322 | ret = afs_end_vnode_operation(&fc); | 1348 | ret = afs_end_vnode_operation(&fc); |
1323 | if (ret == 0) { | 1349 | if (ret == 0) { |
1324 | afs_dir_remove_subdir(dentry); | 1350 | afs_dir_remove_subdir(dentry); |
@@ -1458,6 +1484,7 @@ static int afs_unlink(struct inode *dir, struct dentry *dentry) | |||
1458 | &data_version, &scb[0]); | 1484 | &data_version, &scb[0]); |
1459 | afs_vnode_commit_status(&fc, vnode, fc.cb_break_2, | 1485 | afs_vnode_commit_status(&fc, vnode, fc.cb_break_2, |
1460 | &data_version_2, &scb[1]); | 1486 | &data_version_2, &scb[1]); |
1487 | afs_update_dentry_version(&fc, dentry, &scb[0]); | ||
1461 | ret = afs_end_vnode_operation(&fc); | 1488 | ret = afs_end_vnode_operation(&fc); |
1462 | if (ret == 0 && !(scb[1].have_status || scb[1].have_error)) | 1489 | if (ret == 0 && !(scb[1].have_status || scb[1].have_error)) |
1463 | ret = afs_dir_remove_link(dvnode, dentry, key); | 1490 | ret = afs_dir_remove_link(dvnode, dentry, key); |
@@ -1526,6 +1553,7 @@ static int afs_create(struct inode *dir, struct dentry *dentry, umode_t mode, | |||
1526 | afs_check_for_remote_deletion(&fc, dvnode); | 1553 | afs_check_for_remote_deletion(&fc, dvnode); |
1527 | afs_vnode_commit_status(&fc, dvnode, fc.cb_break, | 1554 | afs_vnode_commit_status(&fc, dvnode, fc.cb_break, |
1528 | &data_version, &scb[0]); | 1555 | &data_version, &scb[0]); |
1556 | afs_update_dentry_version(&fc, dentry, &scb[0]); | ||
1529 | afs_vnode_new_inode(&fc, dentry, &iget_data, &scb[1]); | 1557 | afs_vnode_new_inode(&fc, dentry, &iget_data, &scb[1]); |
1530 | ret = afs_end_vnode_operation(&fc); | 1558 | ret = afs_end_vnode_operation(&fc); |
1531 | if (ret < 0) | 1559 | if (ret < 0) |
@@ -1607,6 +1635,7 @@ static int afs_link(struct dentry *from, struct inode *dir, | |||
1607 | afs_vnode_commit_status(&fc, vnode, fc.cb_break_2, | 1635 | afs_vnode_commit_status(&fc, vnode, fc.cb_break_2, |
1608 | NULL, &scb[1]); | 1636 | NULL, &scb[1]); |
1609 | ihold(&vnode->vfs_inode); | 1637 | ihold(&vnode->vfs_inode); |
1638 | afs_update_dentry_version(&fc, dentry, &scb[0]); | ||
1610 | d_instantiate(dentry, &vnode->vfs_inode); | 1639 | d_instantiate(dentry, &vnode->vfs_inode); |
1611 | 1640 | ||
1612 | mutex_unlock(&vnode->io_lock); | 1641 | mutex_unlock(&vnode->io_lock); |
@@ -1686,6 +1715,7 @@ static int afs_symlink(struct inode *dir, struct dentry *dentry, | |||
1686 | afs_check_for_remote_deletion(&fc, dvnode); | 1715 | afs_check_for_remote_deletion(&fc, dvnode); |
1687 | afs_vnode_commit_status(&fc, dvnode, fc.cb_break, | 1716 | afs_vnode_commit_status(&fc, dvnode, fc.cb_break, |
1688 | &data_version, &scb[0]); | 1717 | &data_version, &scb[0]); |
1718 | afs_update_dentry_version(&fc, dentry, &scb[0]); | ||
1689 | afs_vnode_new_inode(&fc, dentry, &iget_data, &scb[1]); | 1719 | afs_vnode_new_inode(&fc, dentry, &iget_data, &scb[1]); |
1690 | ret = afs_end_vnode_operation(&fc); | 1720 | ret = afs_end_vnode_operation(&fc); |
1691 | if (ret < 0) | 1721 | if (ret < 0) |
@@ -1791,6 +1821,17 @@ static int afs_rename(struct inode *old_dir, struct dentry *old_dentry, | |||
1791 | } | 1821 | } |
1792 | } | 1822 | } |
1793 | 1823 | ||
1824 | /* This bit is potentially nasty as there's a potential race with | ||
1825 | * afs_d_revalidate{,_rcu}(). We have to change d_fsdata on the dentry | ||
1826 | * to reflect it's new parent's new data_version after the op, but | ||
1827 | * d_revalidate may see old_dentry between the op having taken place | ||
1828 | * and the version being updated. | ||
1829 | * | ||
1830 | * So drop the old_dentry for now to make other threads go through | ||
1831 | * lookup instead - which we hold a lock against. | ||
1832 | */ | ||
1833 | d_drop(old_dentry); | ||
1834 | |||
1794 | ret = -ERESTARTSYS; | 1835 | ret = -ERESTARTSYS; |
1795 | if (afs_begin_vnode_operation(&fc, orig_dvnode, key, true)) { | 1836 | if (afs_begin_vnode_operation(&fc, orig_dvnode, key, true)) { |
1796 | afs_dataversion_t orig_data_version; | 1837 | afs_dataversion_t orig_data_version; |
@@ -1802,9 +1843,9 @@ static int afs_rename(struct inode *old_dir, struct dentry *old_dentry, | |||
1802 | if (orig_dvnode != new_dvnode) { | 1843 | if (orig_dvnode != new_dvnode) { |
1803 | if (mutex_lock_interruptible_nested(&new_dvnode->io_lock, 1) < 0) { | 1844 | if (mutex_lock_interruptible_nested(&new_dvnode->io_lock, 1) < 0) { |
1804 | afs_end_vnode_operation(&fc); | 1845 | afs_end_vnode_operation(&fc); |
1805 | goto error_rehash; | 1846 | goto error_rehash_old; |
1806 | } | 1847 | } |
1807 | new_data_version = new_dvnode->status.data_version; | 1848 | new_data_version = new_dvnode->status.data_version + 1; |
1808 | } else { | 1849 | } else { |
1809 | new_data_version = orig_data_version; | 1850 | new_data_version = orig_data_version; |
1810 | new_scb = &scb[0]; | 1851 | new_scb = &scb[0]; |
@@ -1827,7 +1868,7 @@ static int afs_rename(struct inode *old_dir, struct dentry *old_dentry, | |||
1827 | } | 1868 | } |
1828 | ret = afs_end_vnode_operation(&fc); | 1869 | ret = afs_end_vnode_operation(&fc); |
1829 | if (ret < 0) | 1870 | if (ret < 0) |
1830 | goto error_rehash; | 1871 | goto error_rehash_old; |
1831 | } | 1872 | } |
1832 | 1873 | ||
1833 | if (ret == 0) { | 1874 | if (ret == 0) { |
@@ -1853,10 +1894,26 @@ static int afs_rename(struct inode *old_dir, struct dentry *old_dentry, | |||
1853 | drop_nlink(new_inode); | 1894 | drop_nlink(new_inode); |
1854 | spin_unlock(&new_inode->i_lock); | 1895 | spin_unlock(&new_inode->i_lock); |
1855 | } | 1896 | } |
1897 | |||
1898 | /* Now we can update d_fsdata on the dentries to reflect their | ||
1899 | * new parent's data_version. | ||
1900 | * | ||
1901 | * Note that if we ever implement RENAME_EXCHANGE, we'll have | ||
1902 | * to update both dentries with opposing dir versions. | ||
1903 | */ | ||
1904 | if (new_dvnode != orig_dvnode) { | ||
1905 | afs_update_dentry_version(&fc, old_dentry, &scb[1]); | ||
1906 | afs_update_dentry_version(&fc, new_dentry, &scb[1]); | ||
1907 | } else { | ||
1908 | afs_update_dentry_version(&fc, old_dentry, &scb[0]); | ||
1909 | afs_update_dentry_version(&fc, new_dentry, &scb[0]); | ||
1910 | } | ||
1856 | d_move(old_dentry, new_dentry); | 1911 | d_move(old_dentry, new_dentry); |
1857 | goto error_tmp; | 1912 | goto error_tmp; |
1858 | } | 1913 | } |
1859 | 1914 | ||
1915 | error_rehash_old: | ||
1916 | d_rehash(new_dentry); | ||
1860 | error_rehash: | 1917 | error_rehash: |
1861 | if (rehash) | 1918 | if (rehash) |
1862 | d_rehash(rehash); | 1919 | d_rehash(rehash); |
diff --git a/fs/afs/file.c b/fs/afs/file.c index 56b69576274d..dd3c55c9101c 100644 --- a/fs/afs/file.c +++ b/fs/afs/file.c | |||
@@ -191,11 +191,13 @@ void afs_put_read(struct afs_read *req) | |||
191 | int i; | 191 | int i; |
192 | 192 | ||
193 | if (refcount_dec_and_test(&req->usage)) { | 193 | if (refcount_dec_and_test(&req->usage)) { |
194 | for (i = 0; i < req->nr_pages; i++) | 194 | if (req->pages) { |
195 | if (req->pages[i]) | 195 | for (i = 0; i < req->nr_pages; i++) |
196 | put_page(req->pages[i]); | 196 | if (req->pages[i]) |
197 | if (req->pages != req->array) | 197 | put_page(req->pages[i]); |
198 | kfree(req->pages); | 198 | if (req->pages != req->array) |
199 | kfree(req->pages); | ||
200 | } | ||
199 | kfree(req); | 201 | kfree(req); |
200 | } | 202 | } |
201 | } | 203 | } |
diff --git a/fs/afs/vlclient.c b/fs/afs/vlclient.c index d7e0fd3c00df..cfb0ac4bd039 100644 --- a/fs/afs/vlclient.c +++ b/fs/afs/vlclient.c | |||
@@ -56,23 +56,24 @@ static int afs_deliver_vl_get_entry_by_name_u(struct afs_call *call) | |||
56 | struct afs_uuid__xdr *xdr; | 56 | struct afs_uuid__xdr *xdr; |
57 | struct afs_uuid *uuid; | 57 | struct afs_uuid *uuid; |
58 | int j; | 58 | int j; |
59 | int n = entry->nr_servers; | ||
59 | 60 | ||
60 | tmp = ntohl(uvldb->serverFlags[i]); | 61 | tmp = ntohl(uvldb->serverFlags[i]); |
61 | if (tmp & AFS_VLSF_DONTUSE || | 62 | if (tmp & AFS_VLSF_DONTUSE || |
62 | (new_only && !(tmp & AFS_VLSF_NEWREPSITE))) | 63 | (new_only && !(tmp & AFS_VLSF_NEWREPSITE))) |
63 | continue; | 64 | continue; |
64 | if (tmp & AFS_VLSF_RWVOL) { | 65 | if (tmp & AFS_VLSF_RWVOL) { |
65 | entry->fs_mask[i] |= AFS_VOL_VTM_RW; | 66 | entry->fs_mask[n] |= AFS_VOL_VTM_RW; |
66 | if (vlflags & AFS_VLF_BACKEXISTS) | 67 | if (vlflags & AFS_VLF_BACKEXISTS) |
67 | entry->fs_mask[i] |= AFS_VOL_VTM_BAK; | 68 | entry->fs_mask[n] |= AFS_VOL_VTM_BAK; |
68 | } | 69 | } |
69 | if (tmp & AFS_VLSF_ROVOL) | 70 | if (tmp & AFS_VLSF_ROVOL) |
70 | entry->fs_mask[i] |= AFS_VOL_VTM_RO; | 71 | entry->fs_mask[n] |= AFS_VOL_VTM_RO; |
71 | if (!entry->fs_mask[i]) | 72 | if (!entry->fs_mask[n]) |
72 | continue; | 73 | continue; |
73 | 74 | ||
74 | xdr = &uvldb->serverNumber[i]; | 75 | xdr = &uvldb->serverNumber[i]; |
75 | uuid = (struct afs_uuid *)&entry->fs_server[i]; | 76 | uuid = (struct afs_uuid *)&entry->fs_server[n]; |
76 | uuid->time_low = xdr->time_low; | 77 | uuid->time_low = xdr->time_low; |
77 | uuid->time_mid = htons(ntohl(xdr->time_mid)); | 78 | uuid->time_mid = htons(ntohl(xdr->time_mid)); |
78 | uuid->time_hi_and_version = htons(ntohl(xdr->time_hi_and_version)); | 79 | uuid->time_hi_and_version = htons(ntohl(xdr->time_hi_and_version)); |
diff --git a/fs/block_dev.c b/fs/block_dev.c index eb657ab94060..677cb364d33f 100644 --- a/fs/block_dev.c +++ b/fs/block_dev.c | |||
@@ -345,24 +345,15 @@ __blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter, int nr_pages) | |||
345 | struct bio *bio; | 345 | struct bio *bio; |
346 | bool is_poll = (iocb->ki_flags & IOCB_HIPRI) != 0; | 346 | bool is_poll = (iocb->ki_flags & IOCB_HIPRI) != 0; |
347 | bool is_read = (iov_iter_rw(iter) == READ), is_sync; | 347 | bool is_read = (iov_iter_rw(iter) == READ), is_sync; |
348 | bool nowait = (iocb->ki_flags & IOCB_NOWAIT) != 0; | ||
349 | loff_t pos = iocb->ki_pos; | 348 | loff_t pos = iocb->ki_pos; |
350 | blk_qc_t qc = BLK_QC_T_NONE; | 349 | blk_qc_t qc = BLK_QC_T_NONE; |
351 | gfp_t gfp; | 350 | int ret = 0; |
352 | int ret; | ||
353 | 351 | ||
354 | if ((pos | iov_iter_alignment(iter)) & | 352 | if ((pos | iov_iter_alignment(iter)) & |
355 | (bdev_logical_block_size(bdev) - 1)) | 353 | (bdev_logical_block_size(bdev) - 1)) |
356 | return -EINVAL; | 354 | return -EINVAL; |
357 | 355 | ||
358 | if (nowait) | 356 | bio = bio_alloc_bioset(GFP_KERNEL, nr_pages, &blkdev_dio_pool); |
359 | gfp = GFP_NOWAIT; | ||
360 | else | ||
361 | gfp = GFP_KERNEL; | ||
362 | |||
363 | bio = bio_alloc_bioset(gfp, nr_pages, &blkdev_dio_pool); | ||
364 | if (!bio) | ||
365 | return -EAGAIN; | ||
366 | 357 | ||
367 | dio = container_of(bio, struct blkdev_dio, bio); | 358 | dio = container_of(bio, struct blkdev_dio, bio); |
368 | dio->is_sync = is_sync = is_sync_kiocb(iocb); | 359 | dio->is_sync = is_sync = is_sync_kiocb(iocb); |
@@ -384,7 +375,6 @@ __blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter, int nr_pages) | |||
384 | if (!is_poll) | 375 | if (!is_poll) |
385 | blk_start_plug(&plug); | 376 | blk_start_plug(&plug); |
386 | 377 | ||
387 | ret = 0; | ||
388 | for (;;) { | 378 | for (;;) { |
389 | bio_set_dev(bio, bdev); | 379 | bio_set_dev(bio, bdev); |
390 | bio->bi_iter.bi_sector = pos >> 9; | 380 | bio->bi_iter.bi_sector = pos >> 9; |
@@ -409,14 +399,7 @@ __blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter, int nr_pages) | |||
409 | task_io_account_write(bio->bi_iter.bi_size); | 399 | task_io_account_write(bio->bi_iter.bi_size); |
410 | } | 400 | } |
411 | 401 | ||
412 | /* | 402 | dio->size += bio->bi_iter.bi_size; |
413 | * Tell underlying layer to not block for resource shortage. | ||
414 | * And if we would have blocked, return error inline instead | ||
415 | * of through the bio->bi_end_io() callback. | ||
416 | */ | ||
417 | if (nowait) | ||
418 | bio->bi_opf |= (REQ_NOWAIT | REQ_NOWAIT_INLINE); | ||
419 | |||
420 | pos += bio->bi_iter.bi_size; | 403 | pos += bio->bi_iter.bi_size; |
421 | 404 | ||
422 | nr_pages = iov_iter_npages(iter, BIO_MAX_PAGES); | 405 | nr_pages = iov_iter_npages(iter, BIO_MAX_PAGES); |
@@ -428,13 +411,7 @@ __blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter, int nr_pages) | |||
428 | polled = true; | 411 | polled = true; |
429 | } | 412 | } |
430 | 413 | ||
431 | dio->size += bio->bi_iter.bi_size; | ||
432 | qc = submit_bio(bio); | 414 | qc = submit_bio(bio); |
433 | if (qc == BLK_QC_T_EAGAIN) { | ||
434 | dio->size -= bio->bi_iter.bi_size; | ||
435 | ret = -EAGAIN; | ||
436 | goto error; | ||
437 | } | ||
438 | 415 | ||
439 | if (polled) | 416 | if (polled) |
440 | WRITE_ONCE(iocb->ki_cookie, qc); | 417 | WRITE_ONCE(iocb->ki_cookie, qc); |
@@ -455,19 +432,8 @@ __blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter, int nr_pages) | |||
455 | atomic_inc(&dio->ref); | 432 | atomic_inc(&dio->ref); |
456 | } | 433 | } |
457 | 434 | ||
458 | dio->size += bio->bi_iter.bi_size; | 435 | submit_bio(bio); |
459 | qc = submit_bio(bio); | 436 | bio = bio_alloc(GFP_KERNEL, nr_pages); |
460 | if (qc == BLK_QC_T_EAGAIN) { | ||
461 | dio->size -= bio->bi_iter.bi_size; | ||
462 | ret = -EAGAIN; | ||
463 | goto error; | ||
464 | } | ||
465 | |||
466 | bio = bio_alloc(gfp, nr_pages); | ||
467 | if (!bio) { | ||
468 | ret = -EAGAIN; | ||
469 | goto error; | ||
470 | } | ||
471 | } | 437 | } |
472 | 438 | ||
473 | if (!is_poll) | 439 | if (!is_poll) |
@@ -487,7 +453,6 @@ __blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter, int nr_pages) | |||
487 | } | 453 | } |
488 | __set_current_state(TASK_RUNNING); | 454 | __set_current_state(TASK_RUNNING); |
489 | 455 | ||
490 | out: | ||
491 | if (!ret) | 456 | if (!ret) |
492 | ret = blk_status_to_errno(dio->bio.bi_status); | 457 | ret = blk_status_to_errno(dio->bio.bi_status); |
493 | if (likely(!ret)) | 458 | if (likely(!ret)) |
@@ -495,10 +460,6 @@ out: | |||
495 | 460 | ||
496 | bio_put(&dio->bio); | 461 | bio_put(&dio->bio); |
497 | return ret; | 462 | return ret; |
498 | error: | ||
499 | if (!is_poll) | ||
500 | blk_finish_plug(&plug); | ||
501 | goto out; | ||
502 | } | 463 | } |
503 | 464 | ||
504 | static ssize_t | 465 | static ssize_t |
diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h index 299e11e6c554..94660063a162 100644 --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h | |||
@@ -401,7 +401,6 @@ struct btrfs_dev_replace { | |||
401 | struct raid_kobject { | 401 | struct raid_kobject { |
402 | u64 flags; | 402 | u64 flags; |
403 | struct kobject kobj; | 403 | struct kobject kobj; |
404 | struct list_head list; | ||
405 | }; | 404 | }; |
406 | 405 | ||
407 | /* | 406 | /* |
@@ -915,8 +914,6 @@ struct btrfs_fs_info { | |||
915 | u32 thread_pool_size; | 914 | u32 thread_pool_size; |
916 | 915 | ||
917 | struct kobject *space_info_kobj; | 916 | struct kobject *space_info_kobj; |
918 | struct list_head pending_raid_kobjs; | ||
919 | spinlock_t pending_raid_kobjs_lock; /* uncontended */ | ||
920 | 917 | ||
921 | u64 total_pinned; | 918 | u64 total_pinned; |
922 | 919 | ||
@@ -2698,7 +2695,6 @@ int btrfs_can_relocate(struct btrfs_fs_info *fs_info, u64 bytenr); | |||
2698 | int btrfs_make_block_group(struct btrfs_trans_handle *trans, | 2695 | int btrfs_make_block_group(struct btrfs_trans_handle *trans, |
2699 | u64 bytes_used, u64 type, u64 chunk_offset, | 2696 | u64 bytes_used, u64 type, u64 chunk_offset, |
2700 | u64 size); | 2697 | u64 size); |
2701 | void btrfs_add_raid_kobjects(struct btrfs_fs_info *fs_info); | ||
2702 | struct btrfs_trans_handle *btrfs_start_trans_remove_block_group( | 2698 | struct btrfs_trans_handle *btrfs_start_trans_remove_block_group( |
2703 | struct btrfs_fs_info *fs_info, | 2699 | struct btrfs_fs_info *fs_info, |
2704 | const u64 chunk_offset); | 2700 | const u64 chunk_offset); |
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index 5f7ee70b3d1a..97beb351a10c 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c | |||
@@ -2683,8 +2683,6 @@ int open_ctree(struct super_block *sb, | |||
2683 | INIT_LIST_HEAD(&fs_info->delayed_iputs); | 2683 | INIT_LIST_HEAD(&fs_info->delayed_iputs); |
2684 | INIT_LIST_HEAD(&fs_info->delalloc_roots); | 2684 | INIT_LIST_HEAD(&fs_info->delalloc_roots); |
2685 | INIT_LIST_HEAD(&fs_info->caching_block_groups); | 2685 | INIT_LIST_HEAD(&fs_info->caching_block_groups); |
2686 | INIT_LIST_HEAD(&fs_info->pending_raid_kobjs); | ||
2687 | spin_lock_init(&fs_info->pending_raid_kobjs_lock); | ||
2688 | spin_lock_init(&fs_info->delalloc_root_lock); | 2686 | spin_lock_init(&fs_info->delalloc_root_lock); |
2689 | spin_lock_init(&fs_info->trans_lock); | 2687 | spin_lock_init(&fs_info->trans_lock); |
2690 | spin_lock_init(&fs_info->fs_roots_radix_lock); | 2688 | spin_lock_init(&fs_info->fs_roots_radix_lock); |
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index d3b58e388535..8b7eb22d508a 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c | |||
@@ -4,6 +4,7 @@ | |||
4 | */ | 4 | */ |
5 | 5 | ||
6 | #include <linux/sched.h> | 6 | #include <linux/sched.h> |
7 | #include <linux/sched/mm.h> | ||
7 | #include <linux/sched/signal.h> | 8 | #include <linux/sched/signal.h> |
8 | #include <linux/pagemap.h> | 9 | #include <linux/pagemap.h> |
9 | #include <linux/writeback.h> | 10 | #include <linux/writeback.h> |
@@ -7888,33 +7889,6 @@ int btrfs_free_block_groups(struct btrfs_fs_info *info) | |||
7888 | return 0; | 7889 | return 0; |
7889 | } | 7890 | } |
7890 | 7891 | ||
7891 | /* link_block_group will queue up kobjects to add when we're reclaim-safe */ | ||
7892 | void btrfs_add_raid_kobjects(struct btrfs_fs_info *fs_info) | ||
7893 | { | ||
7894 | struct btrfs_space_info *space_info; | ||
7895 | struct raid_kobject *rkobj; | ||
7896 | LIST_HEAD(list); | ||
7897 | int ret = 0; | ||
7898 | |||
7899 | spin_lock(&fs_info->pending_raid_kobjs_lock); | ||
7900 | list_splice_init(&fs_info->pending_raid_kobjs, &list); | ||
7901 | spin_unlock(&fs_info->pending_raid_kobjs_lock); | ||
7902 | |||
7903 | list_for_each_entry(rkobj, &list, list) { | ||
7904 | space_info = btrfs_find_space_info(fs_info, rkobj->flags); | ||
7905 | |||
7906 | ret = kobject_add(&rkobj->kobj, &space_info->kobj, | ||
7907 | "%s", btrfs_bg_type_to_raid_name(rkobj->flags)); | ||
7908 | if (ret) { | ||
7909 | kobject_put(&rkobj->kobj); | ||
7910 | break; | ||
7911 | } | ||
7912 | } | ||
7913 | if (ret) | ||
7914 | btrfs_warn(fs_info, | ||
7915 | "failed to add kobject for block cache, ignoring"); | ||
7916 | } | ||
7917 | |||
7918 | static void link_block_group(struct btrfs_block_group_cache *cache) | 7892 | static void link_block_group(struct btrfs_block_group_cache *cache) |
7919 | { | 7893 | { |
7920 | struct btrfs_space_info *space_info = cache->space_info; | 7894 | struct btrfs_space_info *space_info = cache->space_info; |
@@ -7929,18 +7903,36 @@ static void link_block_group(struct btrfs_block_group_cache *cache) | |||
7929 | up_write(&space_info->groups_sem); | 7903 | up_write(&space_info->groups_sem); |
7930 | 7904 | ||
7931 | if (first) { | 7905 | if (first) { |
7932 | struct raid_kobject *rkobj = kzalloc(sizeof(*rkobj), GFP_NOFS); | 7906 | struct raid_kobject *rkobj; |
7907 | unsigned int nofs_flag; | ||
7908 | int ret; | ||
7909 | |||
7910 | /* | ||
7911 | * Setup a NOFS context because kobject_add(), deep in its call | ||
7912 | * chain, does GFP_KERNEL allocations, and we are often called | ||
7913 | * in a context where if reclaim is triggered we can deadlock | ||
7914 | * (we are either holding a transaction handle or some lock | ||
7915 | * required for a transaction commit). | ||
7916 | */ | ||
7917 | nofs_flag = memalloc_nofs_save(); | ||
7918 | rkobj = kzalloc(sizeof(*rkobj), GFP_KERNEL); | ||
7933 | if (!rkobj) { | 7919 | if (!rkobj) { |
7920 | memalloc_nofs_restore(nofs_flag); | ||
7934 | btrfs_warn(cache->fs_info, | 7921 | btrfs_warn(cache->fs_info, |
7935 | "couldn't alloc memory for raid level kobject"); | 7922 | "couldn't alloc memory for raid level kobject"); |
7936 | return; | 7923 | return; |
7937 | } | 7924 | } |
7938 | rkobj->flags = cache->flags; | 7925 | rkobj->flags = cache->flags; |
7939 | kobject_init(&rkobj->kobj, &btrfs_raid_ktype); | 7926 | kobject_init(&rkobj->kobj, &btrfs_raid_ktype); |
7940 | 7927 | ret = kobject_add(&rkobj->kobj, &space_info->kobj, "%s", | |
7941 | spin_lock(&fs_info->pending_raid_kobjs_lock); | 7928 | btrfs_bg_type_to_raid_name(rkobj->flags)); |
7942 | list_add_tail(&rkobj->list, &fs_info->pending_raid_kobjs); | 7929 | memalloc_nofs_restore(nofs_flag); |
7943 | spin_unlock(&fs_info->pending_raid_kobjs_lock); | 7930 | if (ret) { |
7931 | kobject_put(&rkobj->kobj); | ||
7932 | btrfs_warn(fs_info, | ||
7933 | "failed to add kobject for block cache, ignoring"); | ||
7934 | return; | ||
7935 | } | ||
7944 | space_info->block_group_kobjs[index] = &rkobj->kobj; | 7936 | space_info->block_group_kobjs[index] = &rkobj->kobj; |
7945 | } | 7937 | } |
7946 | } | 7938 | } |
@@ -8206,7 +8198,6 @@ int btrfs_read_block_groups(struct btrfs_fs_info *info) | |||
8206 | inc_block_group_ro(cache, 1); | 8198 | inc_block_group_ro(cache, 1); |
8207 | } | 8199 | } |
8208 | 8200 | ||
8209 | btrfs_add_raid_kobjects(info); | ||
8210 | btrfs_init_global_block_rsv(info); | 8201 | btrfs_init_global_block_rsv(info); |
8211 | ret = check_chunk_block_group_mappings(info); | 8202 | ret = check_chunk_block_group_mappings(info); |
8212 | error: | 8203 | error: |
@@ -8975,6 +8966,7 @@ int btrfs_trim_fs(struct btrfs_fs_info *fs_info, struct fstrim_range *range) | |||
8975 | struct btrfs_device *device; | 8966 | struct btrfs_device *device; |
8976 | struct list_head *devices; | 8967 | struct list_head *devices; |
8977 | u64 group_trimmed; | 8968 | u64 group_trimmed; |
8969 | u64 range_end = U64_MAX; | ||
8978 | u64 start; | 8970 | u64 start; |
8979 | u64 end; | 8971 | u64 end; |
8980 | u64 trimmed = 0; | 8972 | u64 trimmed = 0; |
@@ -8984,16 +8976,23 @@ int btrfs_trim_fs(struct btrfs_fs_info *fs_info, struct fstrim_range *range) | |||
8984 | int dev_ret = 0; | 8976 | int dev_ret = 0; |
8985 | int ret = 0; | 8977 | int ret = 0; |
8986 | 8978 | ||
8979 | /* | ||
8980 | * Check range overflow if range->len is set. | ||
8981 | * The default range->len is U64_MAX. | ||
8982 | */ | ||
8983 | if (range->len != U64_MAX && | ||
8984 | check_add_overflow(range->start, range->len, &range_end)) | ||
8985 | return -EINVAL; | ||
8986 | |||
8987 | cache = btrfs_lookup_first_block_group(fs_info, range->start); | 8987 | cache = btrfs_lookup_first_block_group(fs_info, range->start); |
8988 | for (; cache; cache = next_block_group(cache)) { | 8988 | for (; cache; cache = next_block_group(cache)) { |
8989 | if (cache->key.objectid >= (range->start + range->len)) { | 8989 | if (cache->key.objectid >= range_end) { |
8990 | btrfs_put_block_group(cache); | 8990 | btrfs_put_block_group(cache); |
8991 | break; | 8991 | break; |
8992 | } | 8992 | } |
8993 | 8993 | ||
8994 | start = max(range->start, cache->key.objectid); | 8994 | start = max(range->start, cache->key.objectid); |
8995 | end = min(range->start + range->len, | 8995 | end = min(range_end, cache->key.objectid + cache->key.offset); |
8996 | cache->key.objectid + cache->key.offset); | ||
8997 | 8996 | ||
8998 | if (end - start >= range->minlen) { | 8997 | if (end - start >= range->minlen) { |
8999 | if (!block_group_cache_done(cache)) { | 8998 | if (!block_group_cache_done(cache)) { |
diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c index d74b74ca07af..a447d3ec48d5 100644 --- a/fs/btrfs/volumes.c +++ b/fs/btrfs/volumes.c | |||
@@ -3087,16 +3087,6 @@ static int btrfs_relocate_chunk(struct btrfs_fs_info *fs_info, u64 chunk_offset) | |||
3087 | if (ret) | 3087 | if (ret) |
3088 | return ret; | 3088 | return ret; |
3089 | 3089 | ||
3090 | /* | ||
3091 | * We add the kobjects here (and after forcing data chunk creation) | ||
3092 | * since relocation is the only place we'll create chunks of a new | ||
3093 | * type at runtime. The only place where we'll remove the last | ||
3094 | * chunk of a type is the call immediately below this one. Even | ||
3095 | * so, we're protected against races with the cleaner thread since | ||
3096 | * we're covered by the delete_unused_bgs_mutex. | ||
3097 | */ | ||
3098 | btrfs_add_raid_kobjects(fs_info); | ||
3099 | |||
3100 | trans = btrfs_start_trans_remove_block_group(root->fs_info, | 3090 | trans = btrfs_start_trans_remove_block_group(root->fs_info, |
3101 | chunk_offset); | 3091 | chunk_offset); |
3102 | if (IS_ERR(trans)) { | 3092 | if (IS_ERR(trans)) { |
@@ -3223,9 +3213,6 @@ static int btrfs_may_alloc_data_chunk(struct btrfs_fs_info *fs_info, | |||
3223 | btrfs_end_transaction(trans); | 3213 | btrfs_end_transaction(trans); |
3224 | if (ret < 0) | 3214 | if (ret < 0) |
3225 | return ret; | 3215 | return ret; |
3226 | |||
3227 | btrfs_add_raid_kobjects(fs_info); | ||
3228 | |||
3229 | return 1; | 3216 | return 1; |
3230 | } | 3217 | } |
3231 | } | 3218 | } |
diff --git a/fs/io_uring.c b/fs/io_uring.c index d542f1cf4428..24bbe3cb7ad4 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c | |||
@@ -1097,10 +1097,8 @@ static int io_import_fixed(struct io_ring_ctx *ctx, int rw, | |||
1097 | 1097 | ||
1098 | iter->bvec = bvec + seg_skip; | 1098 | iter->bvec = bvec + seg_skip; |
1099 | iter->nr_segs -= seg_skip; | 1099 | iter->nr_segs -= seg_skip; |
1100 | iter->count -= (seg_skip << PAGE_SHIFT); | 1100 | iter->count -= bvec->bv_len + offset; |
1101 | iter->iov_offset = offset & ~PAGE_MASK; | 1101 | iter->iov_offset = offset & ~PAGE_MASK; |
1102 | if (iter->iov_offset) | ||
1103 | iter->count -= iter->iov_offset; | ||
1104 | } | 1102 | } |
1105 | } | 1103 | } |
1106 | 1104 | ||
@@ -2025,6 +2023,15 @@ static int io_queue_sqe(struct io_ring_ctx *ctx, struct io_kiocb *req, | |||
2025 | { | 2023 | { |
2026 | int ret; | 2024 | int ret; |
2027 | 2025 | ||
2026 | ret = io_req_defer(ctx, req, s->sqe); | ||
2027 | if (ret) { | ||
2028 | if (ret != -EIOCBQUEUED) { | ||
2029 | io_free_req(req); | ||
2030 | io_cqring_add_event(ctx, s->sqe->user_data, ret); | ||
2031 | } | ||
2032 | return 0; | ||
2033 | } | ||
2034 | |||
2028 | ret = __io_submit_sqe(ctx, req, s, true); | 2035 | ret = __io_submit_sqe(ctx, req, s, true); |
2029 | if (ret == -EAGAIN && !(req->flags & REQ_F_NOWAIT)) { | 2036 | if (ret == -EAGAIN && !(req->flags & REQ_F_NOWAIT)) { |
2030 | struct io_uring_sqe *sqe_copy; | 2037 | struct io_uring_sqe *sqe_copy; |
@@ -2097,13 +2104,6 @@ err: | |||
2097 | return; | 2104 | return; |
2098 | } | 2105 | } |
2099 | 2106 | ||
2100 | ret = io_req_defer(ctx, req, s->sqe); | ||
2101 | if (ret) { | ||
2102 | if (ret != -EIOCBQUEUED) | ||
2103 | goto err_req; | ||
2104 | return; | ||
2105 | } | ||
2106 | |||
2107 | /* | 2107 | /* |
2108 | * If we already have a head request, queue this one for async | 2108 | * If we already have a head request, queue this one for async |
2109 | * submittal once the head completes. If we don't have a head but | 2109 | * submittal once the head completes. If we don't have a head but |
diff --git a/fs/seq_file.c b/fs/seq_file.c index 04f09689cd6d..1600034a929b 100644 --- a/fs/seq_file.c +++ b/fs/seq_file.c | |||
@@ -119,6 +119,7 @@ static int traverse(struct seq_file *m, loff_t offset) | |||
119 | } | 119 | } |
120 | if (seq_has_overflowed(m)) | 120 | if (seq_has_overflowed(m)) |
121 | goto Eoverflow; | 121 | goto Eoverflow; |
122 | p = m->op->next(m, p, &m->index); | ||
122 | if (pos + m->count > offset) { | 123 | if (pos + m->count > offset) { |
123 | m->from = offset - pos; | 124 | m->from = offset - pos; |
124 | m->count -= m->from; | 125 | m->count -= m->from; |
@@ -126,7 +127,6 @@ static int traverse(struct seq_file *m, loff_t offset) | |||
126 | } | 127 | } |
127 | pos += m->count; | 128 | pos += m->count; |
128 | m->count = 0; | 129 | m->count = 0; |
129 | p = m->op->next(m, p, &m->index); | ||
130 | if (pos == offset) | 130 | if (pos == offset) |
131 | break; | 131 | break; |
132 | } | 132 | } |
diff --git a/fs/xfs/libxfs/xfs_bmap.c b/fs/xfs/libxfs/xfs_bmap.c index baf0b72c0a37..07aad70f3931 100644 --- a/fs/xfs/libxfs/xfs_bmap.c +++ b/fs/xfs/libxfs/xfs_bmap.c | |||
@@ -3835,15 +3835,28 @@ xfs_bmapi_read( | |||
3835 | XFS_STATS_INC(mp, xs_blk_mapr); | 3835 | XFS_STATS_INC(mp, xs_blk_mapr); |
3836 | 3836 | ||
3837 | ifp = XFS_IFORK_PTR(ip, whichfork); | 3837 | ifp = XFS_IFORK_PTR(ip, whichfork); |
3838 | if (!ifp) { | ||
3839 | /* No CoW fork? Return a hole. */ | ||
3840 | if (whichfork == XFS_COW_FORK) { | ||
3841 | mval->br_startoff = bno; | ||
3842 | mval->br_startblock = HOLESTARTBLOCK; | ||
3843 | mval->br_blockcount = len; | ||
3844 | mval->br_state = XFS_EXT_NORM; | ||
3845 | *nmap = 1; | ||
3846 | return 0; | ||
3847 | } | ||
3838 | 3848 | ||
3839 | /* No CoW fork? Return a hole. */ | 3849 | /* |
3840 | if (whichfork == XFS_COW_FORK && !ifp) { | 3850 | * A missing attr ifork implies that the inode says we're in |
3841 | mval->br_startoff = bno; | 3851 | * extents or btree format but failed to pass the inode fork |
3842 | mval->br_startblock = HOLESTARTBLOCK; | 3852 | * verifier while trying to load it. Treat that as a file |
3843 | mval->br_blockcount = len; | 3853 | * corruption too. |
3844 | mval->br_state = XFS_EXT_NORM; | 3854 | */ |
3845 | *nmap = 1; | 3855 | #ifdef DEBUG |
3846 | return 0; | 3856 | xfs_alert(mp, "%s: inode %llu missing fork %d", |
3857 | __func__, ip->i_ino, whichfork); | ||
3858 | #endif /* DEBUG */ | ||
3859 | return -EFSCORRUPTED; | ||
3847 | } | 3860 | } |
3848 | 3861 | ||
3849 | if (!(ifp->if_flags & XFS_IFEXTENTS)) { | 3862 | if (!(ifp->if_flags & XFS_IFEXTENTS)) { |
diff --git a/fs/xfs/libxfs/xfs_da_btree.c b/fs/xfs/libxfs/xfs_da_btree.c index d1c77fd0815d..0bf56e94bfe9 100644 --- a/fs/xfs/libxfs/xfs_da_btree.c +++ b/fs/xfs/libxfs/xfs_da_btree.c | |||
@@ -487,10 +487,8 @@ xfs_da3_split( | |||
487 | ASSERT(state->path.active == 0); | 487 | ASSERT(state->path.active == 0); |
488 | oldblk = &state->path.blk[0]; | 488 | oldblk = &state->path.blk[0]; |
489 | error = xfs_da3_root_split(state, oldblk, addblk); | 489 | error = xfs_da3_root_split(state, oldblk, addblk); |
490 | if (error) { | 490 | if (error) |
491 | addblk->bp = NULL; | 491 | goto out; |
492 | return error; /* GROT: dir is inconsistent */ | ||
493 | } | ||
494 | 492 | ||
495 | /* | 493 | /* |
496 | * Update pointers to the node which used to be block 0 and just got | 494 | * Update pointers to the node which used to be block 0 and just got |
@@ -505,7 +503,10 @@ xfs_da3_split( | |||
505 | */ | 503 | */ |
506 | node = oldblk->bp->b_addr; | 504 | node = oldblk->bp->b_addr; |
507 | if (node->hdr.info.forw) { | 505 | if (node->hdr.info.forw) { |
508 | ASSERT(be32_to_cpu(node->hdr.info.forw) == addblk->blkno); | 506 | if (be32_to_cpu(node->hdr.info.forw) != addblk->blkno) { |
507 | error = -EFSCORRUPTED; | ||
508 | goto out; | ||
509 | } | ||
509 | node = addblk->bp->b_addr; | 510 | node = addblk->bp->b_addr; |
510 | node->hdr.info.back = cpu_to_be32(oldblk->blkno); | 511 | node->hdr.info.back = cpu_to_be32(oldblk->blkno); |
511 | xfs_trans_log_buf(state->args->trans, addblk->bp, | 512 | xfs_trans_log_buf(state->args->trans, addblk->bp, |
@@ -514,15 +515,19 @@ xfs_da3_split( | |||
514 | } | 515 | } |
515 | node = oldblk->bp->b_addr; | 516 | node = oldblk->bp->b_addr; |
516 | if (node->hdr.info.back) { | 517 | if (node->hdr.info.back) { |
517 | ASSERT(be32_to_cpu(node->hdr.info.back) == addblk->blkno); | 518 | if (be32_to_cpu(node->hdr.info.back) != addblk->blkno) { |
519 | error = -EFSCORRUPTED; | ||
520 | goto out; | ||
521 | } | ||
518 | node = addblk->bp->b_addr; | 522 | node = addblk->bp->b_addr; |
519 | node->hdr.info.forw = cpu_to_be32(oldblk->blkno); | 523 | node->hdr.info.forw = cpu_to_be32(oldblk->blkno); |
520 | xfs_trans_log_buf(state->args->trans, addblk->bp, | 524 | xfs_trans_log_buf(state->args->trans, addblk->bp, |
521 | XFS_DA_LOGRANGE(node, &node->hdr.info, | 525 | XFS_DA_LOGRANGE(node, &node->hdr.info, |
522 | sizeof(node->hdr.info))); | 526 | sizeof(node->hdr.info))); |
523 | } | 527 | } |
528 | out: | ||
524 | addblk->bp = NULL; | 529 | addblk->bp = NULL; |
525 | return 0; | 530 | return error; |
526 | } | 531 | } |
527 | 532 | ||
528 | /* | 533 | /* |
diff --git a/fs/xfs/libxfs/xfs_dir2_node.c b/fs/xfs/libxfs/xfs_dir2_node.c index afcc6642690a..1fc44efc344d 100644 --- a/fs/xfs/libxfs/xfs_dir2_node.c +++ b/fs/xfs/libxfs/xfs_dir2_node.c | |||
@@ -741,7 +741,8 @@ xfs_dir2_leafn_lookup_for_entry( | |||
741 | ents = dp->d_ops->leaf_ents_p(leaf); | 741 | ents = dp->d_ops->leaf_ents_p(leaf); |
742 | 742 | ||
743 | xfs_dir3_leaf_check(dp, bp); | 743 | xfs_dir3_leaf_check(dp, bp); |
744 | ASSERT(leafhdr.count > 0); | 744 | if (leafhdr.count <= 0) |
745 | return -EFSCORRUPTED; | ||
745 | 746 | ||
746 | /* | 747 | /* |
747 | * Look up the hash value in the leaf entries. | 748 | * Look up the hash value in the leaf entries. |
diff --git a/fs/xfs/xfs_log.c b/fs/xfs/xfs_log.c index 00e9f5c388d3..7fc3c1ad36bc 100644 --- a/fs/xfs/xfs_log.c +++ b/fs/xfs/xfs_log.c | |||
@@ -429,10 +429,7 @@ xfs_log_reserve( | |||
429 | 429 | ||
430 | ASSERT(*ticp == NULL); | 430 | ASSERT(*ticp == NULL); |
431 | tic = xlog_ticket_alloc(log, unit_bytes, cnt, client, permanent, | 431 | tic = xlog_ticket_alloc(log, unit_bytes, cnt, client, permanent, |
432 | KM_SLEEP | KM_MAYFAIL); | 432 | KM_SLEEP); |
433 | if (!tic) | ||
434 | return -ENOMEM; | ||
435 | |||
436 | *ticp = tic; | 433 | *ticp = tic; |
437 | 434 | ||
438 | xlog_grant_push_ail(log, tic->t_cnt ? tic->t_unit_res * tic->t_cnt | 435 | xlog_grant_push_ail(log, tic->t_cnt ? tic->t_unit_res * tic->t_cnt |