diff options
| author | Elena Reshetova <elena.reshetova@intel.com> | 2017-10-20 05:53:38 -0400 |
|---|---|---|
| committer | Anna Schumaker <Anna.Schumaker@Netapp.com> | 2017-11-17 13:48:01 -0500 |
| commit | 212bf41d88c06afc23e03f9b274eebf1e8dba197 (patch) | |
| tree | 755a58e9b423e6e8bfe06fa2036b633a427ff42b /fs/nfs/nfs4proc.c | |
| parent | 2f62b5aa4814be2c511553fd6afb4d35b6c2503b (diff) | |
fs, nfs: convert nfs_client.cl_count from atomic_t to refcount_t
atomic_t variables are currently used to implement reference
counters with the following properties:
- counter is initialized to 1 using atomic_set()
- a resource is freed upon counter reaching zero
- once counter reaches zero, its further
increments aren't allowed
- counter schema uses basic atomic operations
(set, inc, inc_not_zero, dec_and_test, etc.)
Such atomic variables should be converted to a newly provided
refcount_t type and API that prevents accidental counter overflows
and underflows. This is important since overflows and underflows
can lead to use-after-free situation and be exploitable.
The variable nfs_client.cl_count is used as pure reference counter.
Convert it to refcount_t and fix up the operations.
Suggested-by: Kees Cook <keescook@chromium.org>
Reviewed-by: David Windsor <dwindsor@gmail.com>
Reviewed-by: Hans Liljestrand <ishkamiel@gmail.com>
Signed-off-by: Elena Reshetova <elena.reshetova@intel.com>
Signed-off-by: Anna Schumaker <Anna.Schumaker@Netapp.com>
Diffstat (limited to 'fs/nfs/nfs4proc.c')
| -rw-r--r-- | fs/nfs/nfs4proc.c | 12 |
1 files changed, 6 insertions, 6 deletions
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c index be8c75a2cbbe..82e5ed2ee6ba 100644 --- a/fs/nfs/nfs4proc.c +++ b/fs/nfs/nfs4proc.c | |||
| @@ -4870,7 +4870,7 @@ static void nfs4_renew_release(void *calldata) | |||
| 4870 | struct nfs4_renewdata *data = calldata; | 4870 | struct nfs4_renewdata *data = calldata; |
| 4871 | struct nfs_client *clp = data->client; | 4871 | struct nfs_client *clp = data->client; |
| 4872 | 4872 | ||
| 4873 | if (atomic_read(&clp->cl_count) > 1) | 4873 | if (refcount_read(&clp->cl_count) > 1) |
| 4874 | nfs4_schedule_state_renewal(clp); | 4874 | nfs4_schedule_state_renewal(clp); |
| 4875 | nfs_put_client(clp); | 4875 | nfs_put_client(clp); |
| 4876 | kfree(data); | 4876 | kfree(data); |
| @@ -4918,7 +4918,7 @@ static int nfs4_proc_async_renew(struct nfs_client *clp, struct rpc_cred *cred, | |||
| 4918 | 4918 | ||
| 4919 | if (renew_flags == 0) | 4919 | if (renew_flags == 0) |
| 4920 | return 0; | 4920 | return 0; |
| 4921 | if (!atomic_inc_not_zero(&clp->cl_count)) | 4921 | if (!refcount_inc_not_zero(&clp->cl_count)) |
| 4922 | return -EIO; | 4922 | return -EIO; |
| 4923 | data = kmalloc(sizeof(*data), GFP_NOFS); | 4923 | data = kmalloc(sizeof(*data), GFP_NOFS); |
| 4924 | if (data == NULL) { | 4924 | if (data == NULL) { |
| @@ -7499,7 +7499,7 @@ nfs4_run_exchange_id(struct nfs_client *clp, struct rpc_cred *cred, | |||
| 7499 | struct nfs41_exchange_id_data *calldata; | 7499 | struct nfs41_exchange_id_data *calldata; |
| 7500 | int status; | 7500 | int status; |
| 7501 | 7501 | ||
| 7502 | if (!atomic_inc_not_zero(&clp->cl_count)) | 7502 | if (!refcount_inc_not_zero(&clp->cl_count)) |
| 7503 | return ERR_PTR(-EIO); | 7503 | return ERR_PTR(-EIO); |
| 7504 | 7504 | ||
| 7505 | status = -ENOMEM; | 7505 | status = -ENOMEM; |
| @@ -8099,7 +8099,7 @@ static void nfs41_sequence_release(void *data) | |||
| 8099 | struct nfs4_sequence_data *calldata = data; | 8099 | struct nfs4_sequence_data *calldata = data; |
| 8100 | struct nfs_client *clp = calldata->clp; | 8100 | struct nfs_client *clp = calldata->clp; |
| 8101 | 8101 | ||
| 8102 | if (atomic_read(&clp->cl_count) > 1) | 8102 | if (refcount_read(&clp->cl_count) > 1) |
| 8103 | nfs4_schedule_state_renewal(clp); | 8103 | nfs4_schedule_state_renewal(clp); |
| 8104 | nfs_put_client(clp); | 8104 | nfs_put_client(clp); |
| 8105 | kfree(calldata); | 8105 | kfree(calldata); |
| @@ -8128,7 +8128,7 @@ static void nfs41_sequence_call_done(struct rpc_task *task, void *data) | |||
| 8128 | trace_nfs4_sequence(clp, task->tk_status); | 8128 | trace_nfs4_sequence(clp, task->tk_status); |
| 8129 | if (task->tk_status < 0) { | 8129 | if (task->tk_status < 0) { |
| 8130 | dprintk("%s ERROR %d\n", __func__, task->tk_status); | 8130 | dprintk("%s ERROR %d\n", __func__, task->tk_status); |
| 8131 | if (atomic_read(&clp->cl_count) == 1) | 8131 | if (refcount_read(&clp->cl_count) == 1) |
| 8132 | goto out; | 8132 | goto out; |
| 8133 | 8133 | ||
| 8134 | if (nfs41_sequence_handle_errors(task, clp) == -EAGAIN) { | 8134 | if (nfs41_sequence_handle_errors(task, clp) == -EAGAIN) { |
| @@ -8179,7 +8179,7 @@ static struct rpc_task *_nfs41_proc_sequence(struct nfs_client *clp, | |||
| 8179 | struct rpc_task *ret; | 8179 | struct rpc_task *ret; |
| 8180 | 8180 | ||
| 8181 | ret = ERR_PTR(-EIO); | 8181 | ret = ERR_PTR(-EIO); |
| 8182 | if (!atomic_inc_not_zero(&clp->cl_count)) | 8182 | if (!refcount_inc_not_zero(&clp->cl_count)) |
| 8183 | goto out_err; | 8183 | goto out_err; |
| 8184 | 8184 | ||
| 8185 | ret = ERR_PTR(-ENOMEM); | 8185 | ret = ERR_PTR(-ENOMEM); |
