diff options
author | Trond Myklebust <Trond.Myklebust@netapp.com> | 2012-08-13 17:15:50 -0400 |
---|---|---|
committer | Trond Myklebust <Trond.Myklebust@netapp.com> | 2012-09-28 16:03:03 -0400 |
commit | b3c54de6f82d01637796bcc1f667a45f3b32e814 (patch) | |
tree | 9b0e87f60b2815ca20ef47eceed6da0f16b799ef | |
parent | a11a2bf4de5679fa0b63474c7d39bea2dac7d061 (diff) |
NFS: Convert nfs_get_lock_context to return an ERR_PTR on failure
We want to be able to distinguish between allocation failures, and
the case where the lock context is not needed (because there are no
locks).
Signed-off-by: Trond Myklebust <Trond.Myklebust@netapp.com>
-rw-r--r-- | fs/nfs/direct.c | 16 | ||||
-rw-r--r-- | fs/nfs/inode.c | 2 | ||||
-rw-r--r-- | fs/nfs/pagelist.c | 8 |
3 files changed, 18 insertions, 8 deletions
diff --git a/fs/nfs/direct.c b/fs/nfs/direct.c index 1ba385b7c90d..22130df16218 100644 --- a/fs/nfs/direct.c +++ b/fs/nfs/direct.c | |||
@@ -450,6 +450,7 @@ static ssize_t nfs_direct_read(struct kiocb *iocb, const struct iovec *iov, | |||
450 | ssize_t result = -ENOMEM; | 450 | ssize_t result = -ENOMEM; |
451 | struct inode *inode = iocb->ki_filp->f_mapping->host; | 451 | struct inode *inode = iocb->ki_filp->f_mapping->host; |
452 | struct nfs_direct_req *dreq; | 452 | struct nfs_direct_req *dreq; |
453 | struct nfs_lock_context *l_ctx; | ||
453 | 454 | ||
454 | dreq = nfs_direct_req_alloc(); | 455 | dreq = nfs_direct_req_alloc(); |
455 | if (dreq == NULL) | 456 | if (dreq == NULL) |
@@ -457,9 +458,12 @@ static ssize_t nfs_direct_read(struct kiocb *iocb, const struct iovec *iov, | |||
457 | 458 | ||
458 | dreq->inode = inode; | 459 | dreq->inode = inode; |
459 | dreq->ctx = get_nfs_open_context(nfs_file_open_context(iocb->ki_filp)); | 460 | dreq->ctx = get_nfs_open_context(nfs_file_open_context(iocb->ki_filp)); |
460 | dreq->l_ctx = nfs_get_lock_context(dreq->ctx); | 461 | l_ctx = nfs_get_lock_context(dreq->ctx); |
461 | if (dreq->l_ctx == NULL) | 462 | if (IS_ERR(l_ctx)) { |
463 | result = PTR_ERR(l_ctx); | ||
462 | goto out_release; | 464 | goto out_release; |
465 | } | ||
466 | dreq->l_ctx = l_ctx; | ||
463 | if (!is_sync_kiocb(iocb)) | 467 | if (!is_sync_kiocb(iocb)) |
464 | dreq->iocb = iocb; | 468 | dreq->iocb = iocb; |
465 | 469 | ||
@@ -849,6 +853,7 @@ static ssize_t nfs_direct_write(struct kiocb *iocb, const struct iovec *iov, | |||
849 | ssize_t result = -ENOMEM; | 853 | ssize_t result = -ENOMEM; |
850 | struct inode *inode = iocb->ki_filp->f_mapping->host; | 854 | struct inode *inode = iocb->ki_filp->f_mapping->host; |
851 | struct nfs_direct_req *dreq; | 855 | struct nfs_direct_req *dreq; |
856 | struct nfs_lock_context *l_ctx; | ||
852 | 857 | ||
853 | dreq = nfs_direct_req_alloc(); | 858 | dreq = nfs_direct_req_alloc(); |
854 | if (!dreq) | 859 | if (!dreq) |
@@ -856,9 +861,12 @@ static ssize_t nfs_direct_write(struct kiocb *iocb, const struct iovec *iov, | |||
856 | 861 | ||
857 | dreq->inode = inode; | 862 | dreq->inode = inode; |
858 | dreq->ctx = get_nfs_open_context(nfs_file_open_context(iocb->ki_filp)); | 863 | dreq->ctx = get_nfs_open_context(nfs_file_open_context(iocb->ki_filp)); |
859 | dreq->l_ctx = nfs_get_lock_context(dreq->ctx); | 864 | l_ctx = nfs_get_lock_context(dreq->ctx); |
860 | if (dreq->l_ctx == NULL) | 865 | if (IS_ERR(l_ctx)) { |
866 | result = PTR_ERR(l_ctx); | ||
861 | goto out_release; | 867 | goto out_release; |
868 | } | ||
869 | dreq->l_ctx = l_ctx; | ||
862 | if (!is_sync_kiocb(iocb)) | 870 | if (!is_sync_kiocb(iocb)) |
863 | dreq->iocb = iocb; | 871 | dreq->iocb = iocb; |
864 | 872 | ||
diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c index 9b47610338f5..b5e2913dff2d 100644 --- a/fs/nfs/inode.c +++ b/fs/nfs/inode.c | |||
@@ -578,7 +578,7 @@ struct nfs_lock_context *nfs_get_lock_context(struct nfs_open_context *ctx) | |||
578 | spin_unlock(&inode->i_lock); | 578 | spin_unlock(&inode->i_lock); |
579 | new = kmalloc(sizeof(*new), GFP_KERNEL); | 579 | new = kmalloc(sizeof(*new), GFP_KERNEL); |
580 | if (new == NULL) | 580 | if (new == NULL) |
581 | return NULL; | 581 | return ERR_PTR(-ENOMEM); |
582 | nfs_init_lock_context(new); | 582 | nfs_init_lock_context(new); |
583 | spin_lock(&inode->i_lock); | 583 | spin_lock(&inode->i_lock); |
584 | res = __nfs_find_lock_context(ctx); | 584 | res = __nfs_find_lock_context(ctx); |
diff --git a/fs/nfs/pagelist.c b/fs/nfs/pagelist.c index 311a79681e2b..dfd764bd943d 100644 --- a/fs/nfs/pagelist.c +++ b/fs/nfs/pagelist.c | |||
@@ -102,6 +102,7 @@ nfs_create_request(struct nfs_open_context *ctx, struct inode *inode, | |||
102 | unsigned int offset, unsigned int count) | 102 | unsigned int offset, unsigned int count) |
103 | { | 103 | { |
104 | struct nfs_page *req; | 104 | struct nfs_page *req; |
105 | struct nfs_lock_context *l_ctx; | ||
105 | 106 | ||
106 | /* try to allocate the request struct */ | 107 | /* try to allocate the request struct */ |
107 | req = nfs_page_alloc(); | 108 | req = nfs_page_alloc(); |
@@ -109,11 +110,12 @@ nfs_create_request(struct nfs_open_context *ctx, struct inode *inode, | |||
109 | return ERR_PTR(-ENOMEM); | 110 | return ERR_PTR(-ENOMEM); |
110 | 111 | ||
111 | /* get lock context early so we can deal with alloc failures */ | 112 | /* get lock context early so we can deal with alloc failures */ |
112 | req->wb_lock_context = nfs_get_lock_context(ctx); | 113 | l_ctx = nfs_get_lock_context(ctx); |
113 | if (req->wb_lock_context == NULL) { | 114 | if (IS_ERR(l_ctx)) { |
114 | nfs_page_free(req); | 115 | nfs_page_free(req); |
115 | return ERR_PTR(-ENOMEM); | 116 | return ERR_CAST(l_ctx); |
116 | } | 117 | } |
118 | req->wb_lock_context = l_ctx; | ||
117 | 119 | ||
118 | /* Initialize the request struct. Initially, we assume a | 120 | /* Initialize the request struct. Initially, we assume a |
119 | * long write-back delay. This will be adjusted in | 121 | * long write-back delay. This will be adjusted in |