aboutsummaryrefslogtreecommitdiffstats
path: root/fs/nfs/direct.c
diff options
context:
space:
mode:
authorChuck Lever <cel@netapp.com>2006-03-20 13:44:31 -0500
committerTrond Myklebust <Trond.Myklebust@netapp.com>2006-03-20 13:44:31 -0500
commit93619e5989173614bef0013b0bb8a3fe3dbd5a95 (patch)
tree2172226a57bae4db4943d271e5250fd0606a74d1 /fs/nfs/direct.c
parentbc0fb201b34b12e2d16e8cbd5bb078c1db936304 (diff)
NFS: create common routine for allocating nfs_direct_req
Factor out a small common piece of the path that allocate nfs_direct_req structures. Test plan: Compile kernel with CONFIG_NFS and CONFIG_NFS_DIRECTIO enabled. Signed-off-by: Chuck Lever <cel@netapp.com> Signed-off-by: Trond Myklebust <Trond.Myklebust@netapp.com>
Diffstat (limited to 'fs/nfs/direct.c')
-rw-r--r--fs/nfs/direct.c27
1 files changed, 19 insertions, 8 deletions
diff --git a/fs/nfs/direct.c b/fs/nfs/direct.c
index 2593f47eaff0..489f736d0f5d 100644
--- a/fs/nfs/direct.c
+++ b/fs/nfs/direct.c
@@ -152,6 +152,24 @@ static void nfs_free_user_pages(struct page **pages, int npages, int do_dirty)
152 kfree(pages); 152 kfree(pages);
153} 153}
154 154
155static inline struct nfs_direct_req *nfs_direct_req_alloc(void)
156{
157 struct nfs_direct_req *dreq;
158
159 dreq = kmem_cache_alloc(nfs_direct_cachep, SLAB_KERNEL);
160 if (!dreq)
161 return NULL;
162
163 kref_init(&dreq->kref);
164 init_waitqueue_head(&dreq->wait);
165 INIT_LIST_HEAD(&dreq->list);
166 dreq->iocb = NULL;
167 atomic_set(&dreq->count, 0);
168 atomic_set(&dreq->error, 0);
169
170 return dreq;
171}
172
155static void nfs_direct_req_release(struct kref *kref) 173static void nfs_direct_req_release(struct kref *kref)
156{ 174{
157 struct nfs_direct_req *dreq = container_of(kref, struct nfs_direct_req, kref); 175 struct nfs_direct_req *dreq = container_of(kref, struct nfs_direct_req, kref);
@@ -194,17 +212,10 @@ static struct nfs_direct_req *nfs_direct_read_alloc(size_t nbytes, size_t rsize)
194 unsigned int reads = 0; 212 unsigned int reads = 0;
195 unsigned int rpages = (rsize + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; 213 unsigned int rpages = (rsize + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
196 214
197 dreq = kmem_cache_alloc(nfs_direct_cachep, SLAB_KERNEL); 215 dreq = nfs_direct_req_alloc();
198 if (!dreq) 216 if (!dreq)
199 return NULL; 217 return NULL;
200 218
201 kref_init(&dreq->kref);
202 init_waitqueue_head(&dreq->wait);
203 INIT_LIST_HEAD(&dreq->list);
204 dreq->iocb = NULL;
205 atomic_set(&dreq->count, 0);
206 atomic_set(&dreq->error, 0);
207
208 list = &dreq->list; 219 list = &dreq->list;
209 for(;;) { 220 for(;;) {
210 struct nfs_read_data *data = nfs_readdata_alloc(rpages); 221 struct nfs_read_data *data = nfs_readdata_alloc(rpages);