diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2018-09-15 01:25:28 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2018-09-15 01:25:28 -0400 |
commit | 589109df31843384f410ba09f6b8383464408d1e (patch) | |
tree | 1757aa3375033df48d114fa12a76ff7c25c48cec | |
parent | 5b945fd2c34c6324752fc793266b4598a307d765 (diff) | |
parent | 9f0c5124f4a82503ee5d55c60b0b9c6afc3af68b (diff) |
Merge tag 'nfs-for-4.19-2' of git://git.linux-nfs.org/projects/anna/linux-nfs
Pull NFS client bugfixes from Anna Schumaker:
"These are a handful of fixes for problems that Trond found. Patch #1
and #3 have the same name, a second issue was found after applying the
first patch.
Stable bugfixes:
- v4.17+: Fix tracepoint Oops in initiate_file_draining()
- v4.11+: Fix an infinite loop on I/O
Other fixes:
- Return errors if a waiting layoutget is killed
- Don't open code clearing of delegation state"
* tag 'nfs-for-4.19-2' of git://git.linux-nfs.org/projects/anna/linux-nfs:
NFS: Don't open code clearing of delegation state
NFSv4.1 fix infinite loop on I/O.
NFSv4: Fix a tracepoint Oops in initiate_file_draining()
pNFS: Ensure we return the error if someone kills a waiting layoutget
NFSv4: Fix a tracepoint Oops in initiate_file_draining()
-rw-r--r-- | fs/nfs/nfs4proc.c | 31 | ||||
-rw-r--r-- | fs/nfs/nfs4state.c | 2 | ||||
-rw-r--r-- | fs/nfs/nfs4trace.h | 4 | ||||
-rw-r--r-- | fs/nfs/pnfs.c | 26 |
4 files changed, 39 insertions, 24 deletions
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c index 34830f6457ea..8220a168282e 100644 --- a/fs/nfs/nfs4proc.c +++ b/fs/nfs/nfs4proc.c | |||
@@ -1637,6 +1637,14 @@ static void nfs_state_set_delegation(struct nfs4_state *state, | |||
1637 | write_sequnlock(&state->seqlock); | 1637 | write_sequnlock(&state->seqlock); |
1638 | } | 1638 | } |
1639 | 1639 | ||
1640 | static void nfs_state_clear_delegation(struct nfs4_state *state) | ||
1641 | { | ||
1642 | write_seqlock(&state->seqlock); | ||
1643 | nfs4_stateid_copy(&state->stateid, &state->open_stateid); | ||
1644 | clear_bit(NFS_DELEGATED_STATE, &state->flags); | ||
1645 | write_sequnlock(&state->seqlock); | ||
1646 | } | ||
1647 | |||
1640 | static int update_open_stateid(struct nfs4_state *state, | 1648 | static int update_open_stateid(struct nfs4_state *state, |
1641 | const nfs4_stateid *open_stateid, | 1649 | const nfs4_stateid *open_stateid, |
1642 | const nfs4_stateid *delegation, | 1650 | const nfs4_stateid *delegation, |
@@ -2145,10 +2153,7 @@ int nfs4_open_delegation_recall(struct nfs_open_context *ctx, | |||
2145 | if (IS_ERR(opendata)) | 2153 | if (IS_ERR(opendata)) |
2146 | return PTR_ERR(opendata); | 2154 | return PTR_ERR(opendata); |
2147 | nfs4_stateid_copy(&opendata->o_arg.u.delegation, stateid); | 2155 | nfs4_stateid_copy(&opendata->o_arg.u.delegation, stateid); |
2148 | write_seqlock(&state->seqlock); | 2156 | nfs_state_clear_delegation(state); |
2149 | nfs4_stateid_copy(&state->stateid, &state->open_stateid); | ||
2150 | write_sequnlock(&state->seqlock); | ||
2151 | clear_bit(NFS_DELEGATED_STATE, &state->flags); | ||
2152 | switch (type & (FMODE_READ|FMODE_WRITE)) { | 2157 | switch (type & (FMODE_READ|FMODE_WRITE)) { |
2153 | case FMODE_READ|FMODE_WRITE: | 2158 | case FMODE_READ|FMODE_WRITE: |
2154 | case FMODE_WRITE: | 2159 | case FMODE_WRITE: |
@@ -2601,10 +2606,7 @@ static void nfs_finish_clear_delegation_stateid(struct nfs4_state *state, | |||
2601 | const nfs4_stateid *stateid) | 2606 | const nfs4_stateid *stateid) |
2602 | { | 2607 | { |
2603 | nfs_remove_bad_delegation(state->inode, stateid); | 2608 | nfs_remove_bad_delegation(state->inode, stateid); |
2604 | write_seqlock(&state->seqlock); | 2609 | nfs_state_clear_delegation(state); |
2605 | nfs4_stateid_copy(&state->stateid, &state->open_stateid); | ||
2606 | write_sequnlock(&state->seqlock); | ||
2607 | clear_bit(NFS_DELEGATED_STATE, &state->flags); | ||
2608 | } | 2610 | } |
2609 | 2611 | ||
2610 | static void nfs40_clear_delegation_stateid(struct nfs4_state *state) | 2612 | static void nfs40_clear_delegation_stateid(struct nfs4_state *state) |
@@ -2672,15 +2674,20 @@ static void nfs41_check_delegation_stateid(struct nfs4_state *state) | |||
2672 | delegation = rcu_dereference(NFS_I(state->inode)->delegation); | 2674 | delegation = rcu_dereference(NFS_I(state->inode)->delegation); |
2673 | if (delegation == NULL) { | 2675 | if (delegation == NULL) { |
2674 | rcu_read_unlock(); | 2676 | rcu_read_unlock(); |
2677 | nfs_state_clear_delegation(state); | ||
2675 | return; | 2678 | return; |
2676 | } | 2679 | } |
2677 | 2680 | ||
2678 | nfs4_stateid_copy(&stateid, &delegation->stateid); | 2681 | nfs4_stateid_copy(&stateid, &delegation->stateid); |
2679 | if (test_bit(NFS_DELEGATION_REVOKED, &delegation->flags) || | 2682 | if (test_bit(NFS_DELEGATION_REVOKED, &delegation->flags)) { |
2680 | !test_and_clear_bit(NFS_DELEGATION_TEST_EXPIRED, | 2683 | rcu_read_unlock(); |
2681 | &delegation->flags)) { | 2684 | nfs_state_clear_delegation(state); |
2685 | return; | ||
2686 | } | ||
2687 | |||
2688 | if (!test_and_clear_bit(NFS_DELEGATION_TEST_EXPIRED, | ||
2689 | &delegation->flags)) { | ||
2682 | rcu_read_unlock(); | 2690 | rcu_read_unlock(); |
2683 | nfs_finish_clear_delegation_stateid(state, &stateid); | ||
2684 | return; | 2691 | return; |
2685 | } | 2692 | } |
2686 | 2693 | ||
diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c index 3df0eb52da1c..40a08cd483f0 100644 --- a/fs/nfs/nfs4state.c +++ b/fs/nfs/nfs4state.c | |||
@@ -1390,6 +1390,8 @@ int nfs4_schedule_stateid_recovery(const struct nfs_server *server, struct nfs4_ | |||
1390 | 1390 | ||
1391 | if (!nfs4_state_mark_reclaim_nograce(clp, state)) | 1391 | if (!nfs4_state_mark_reclaim_nograce(clp, state)) |
1392 | return -EBADF; | 1392 | return -EBADF; |
1393 | nfs_inode_find_delegation_state_and_recover(state->inode, | ||
1394 | &state->stateid); | ||
1393 | dprintk("%s: scheduling stateid recovery for server %s\n", __func__, | 1395 | dprintk("%s: scheduling stateid recovery for server %s\n", __func__, |
1394 | clp->cl_hostname); | 1396 | clp->cl_hostname); |
1395 | nfs4_schedule_state_manager(clp); | 1397 | nfs4_schedule_state_manager(clp); |
diff --git a/fs/nfs/nfs4trace.h b/fs/nfs/nfs4trace.h index a275fba93170..b1483b303e0b 100644 --- a/fs/nfs/nfs4trace.h +++ b/fs/nfs/nfs4trace.h | |||
@@ -1137,7 +1137,7 @@ DECLARE_EVENT_CLASS(nfs4_inode_callback_event, | |||
1137 | TP_fast_assign( | 1137 | TP_fast_assign( |
1138 | __entry->error = error; | 1138 | __entry->error = error; |
1139 | __entry->fhandle = nfs_fhandle_hash(fhandle); | 1139 | __entry->fhandle = nfs_fhandle_hash(fhandle); |
1140 | if (inode != NULL) { | 1140 | if (!IS_ERR_OR_NULL(inode)) { |
1141 | __entry->fileid = NFS_FILEID(inode); | 1141 | __entry->fileid = NFS_FILEID(inode); |
1142 | __entry->dev = inode->i_sb->s_dev; | 1142 | __entry->dev = inode->i_sb->s_dev; |
1143 | } else { | 1143 | } else { |
@@ -1194,7 +1194,7 @@ DECLARE_EVENT_CLASS(nfs4_inode_stateid_callback_event, | |||
1194 | TP_fast_assign( | 1194 | TP_fast_assign( |
1195 | __entry->error = error; | 1195 | __entry->error = error; |
1196 | __entry->fhandle = nfs_fhandle_hash(fhandle); | 1196 | __entry->fhandle = nfs_fhandle_hash(fhandle); |
1197 | if (inode != NULL) { | 1197 | if (!IS_ERR_OR_NULL(inode)) { |
1198 | __entry->fileid = NFS_FILEID(inode); | 1198 | __entry->fileid = NFS_FILEID(inode); |
1199 | __entry->dev = inode->i_sb->s_dev; | 1199 | __entry->dev = inode->i_sb->s_dev; |
1200 | } else { | 1200 | } else { |
diff --git a/fs/nfs/pnfs.c b/fs/nfs/pnfs.c index e8f232de484f..7d9a51e6b847 100644 --- a/fs/nfs/pnfs.c +++ b/fs/nfs/pnfs.c | |||
@@ -1740,16 +1740,16 @@ static bool pnfs_within_mdsthreshold(struct nfs_open_context *ctx, | |||
1740 | return ret; | 1740 | return ret; |
1741 | } | 1741 | } |
1742 | 1742 | ||
1743 | static bool pnfs_prepare_to_retry_layoutget(struct pnfs_layout_hdr *lo) | 1743 | static int pnfs_prepare_to_retry_layoutget(struct pnfs_layout_hdr *lo) |
1744 | { | 1744 | { |
1745 | /* | 1745 | /* |
1746 | * send layoutcommit as it can hold up layoutreturn due to lseg | 1746 | * send layoutcommit as it can hold up layoutreturn due to lseg |
1747 | * reference | 1747 | * reference |
1748 | */ | 1748 | */ |
1749 | pnfs_layoutcommit_inode(lo->plh_inode, false); | 1749 | pnfs_layoutcommit_inode(lo->plh_inode, false); |
1750 | return !wait_on_bit_action(&lo->plh_flags, NFS_LAYOUT_RETURN, | 1750 | return wait_on_bit_action(&lo->plh_flags, NFS_LAYOUT_RETURN, |
1751 | nfs_wait_bit_killable, | 1751 | nfs_wait_bit_killable, |
1752 | TASK_UNINTERRUPTIBLE); | 1752 | TASK_KILLABLE); |
1753 | } | 1753 | } |
1754 | 1754 | ||
1755 | static void nfs_layoutget_begin(struct pnfs_layout_hdr *lo) | 1755 | static void nfs_layoutget_begin(struct pnfs_layout_hdr *lo) |
@@ -1830,7 +1830,9 @@ pnfs_update_layout(struct inode *ino, | |||
1830 | } | 1830 | } |
1831 | 1831 | ||
1832 | lookup_again: | 1832 | lookup_again: |
1833 | nfs4_client_recover_expired_lease(clp); | 1833 | lseg = ERR_PTR(nfs4_client_recover_expired_lease(clp)); |
1834 | if (IS_ERR(lseg)) | ||
1835 | goto out; | ||
1834 | first = false; | 1836 | first = false; |
1835 | spin_lock(&ino->i_lock); | 1837 | spin_lock(&ino->i_lock); |
1836 | lo = pnfs_find_alloc_layout(ino, ctx, gfp_flags); | 1838 | lo = pnfs_find_alloc_layout(ino, ctx, gfp_flags); |
@@ -1863,9 +1865,9 @@ lookup_again: | |||
1863 | if (list_empty(&lo->plh_segs) && | 1865 | if (list_empty(&lo->plh_segs) && |
1864 | atomic_read(&lo->plh_outstanding) != 0) { | 1866 | atomic_read(&lo->plh_outstanding) != 0) { |
1865 | spin_unlock(&ino->i_lock); | 1867 | spin_unlock(&ino->i_lock); |
1866 | if (wait_var_event_killable(&lo->plh_outstanding, | 1868 | lseg = ERR_PTR(wait_var_event_killable(&lo->plh_outstanding, |
1867 | atomic_read(&lo->plh_outstanding) == 0 | 1869 | atomic_read(&lo->plh_outstanding))); |
1868 | || !list_empty(&lo->plh_segs))) | 1870 | if (IS_ERR(lseg) || !list_empty(&lo->plh_segs)) |
1869 | goto out_put_layout_hdr; | 1871 | goto out_put_layout_hdr; |
1870 | pnfs_put_layout_hdr(lo); | 1872 | pnfs_put_layout_hdr(lo); |
1871 | goto lookup_again; | 1873 | goto lookup_again; |
@@ -1898,8 +1900,11 @@ lookup_again: | |||
1898 | if (test_and_set_bit(NFS_LAYOUT_FIRST_LAYOUTGET, | 1900 | if (test_and_set_bit(NFS_LAYOUT_FIRST_LAYOUTGET, |
1899 | &lo->plh_flags)) { | 1901 | &lo->plh_flags)) { |
1900 | spin_unlock(&ino->i_lock); | 1902 | spin_unlock(&ino->i_lock); |
1901 | wait_on_bit(&lo->plh_flags, NFS_LAYOUT_FIRST_LAYOUTGET, | 1903 | lseg = ERR_PTR(wait_on_bit(&lo->plh_flags, |
1902 | TASK_UNINTERRUPTIBLE); | 1904 | NFS_LAYOUT_FIRST_LAYOUTGET, |
1905 | TASK_KILLABLE)); | ||
1906 | if (IS_ERR(lseg)) | ||
1907 | goto out_put_layout_hdr; | ||
1903 | pnfs_put_layout_hdr(lo); | 1908 | pnfs_put_layout_hdr(lo); |
1904 | dprintk("%s retrying\n", __func__); | 1909 | dprintk("%s retrying\n", __func__); |
1905 | goto lookup_again; | 1910 | goto lookup_again; |
@@ -1925,7 +1930,8 @@ lookup_again: | |||
1925 | if (test_bit(NFS_LAYOUT_RETURN, &lo->plh_flags)) { | 1930 | if (test_bit(NFS_LAYOUT_RETURN, &lo->plh_flags)) { |
1926 | spin_unlock(&ino->i_lock); | 1931 | spin_unlock(&ino->i_lock); |
1927 | dprintk("%s wait for layoutreturn\n", __func__); | 1932 | dprintk("%s wait for layoutreturn\n", __func__); |
1928 | if (pnfs_prepare_to_retry_layoutget(lo)) { | 1933 | lseg = ERR_PTR(pnfs_prepare_to_retry_layoutget(lo)); |
1934 | if (!IS_ERR(lseg)) { | ||
1929 | if (first) | 1935 | if (first) |
1930 | pnfs_clear_first_layoutget(lo); | 1936 | pnfs_clear_first_layoutget(lo); |
1931 | pnfs_put_layout_hdr(lo); | 1937 | pnfs_put_layout_hdr(lo); |