aboutsummaryrefslogtreecommitdiffstats
path: root/fs/nfsd/nfs4state.c
diff options
context:
space:
mode:
authorJ. Bruce Fields <bfields@redhat.com>2011-10-17 11:14:48 -0400
committerJ. Bruce Fields <bfields@redhat.com>2011-10-17 17:50:07 -0400
commit996e09385c364f97a89648b401409521e2a3a094 (patch)
treeab1f76e385a3e66e070eb87e170288f99d6a1b64 /fs/nfsd/nfs4state.c
parent32513b40efdc693b3675f1c691ab901518fbcb6a (diff)
nfsd4: do idr preallocation with stateid allocation
Move idr preallocation out of stateid initialization, into stateid allocation, so that we no longer have to handle any errors from the former. This is a little subtle due to the way the idr code manages these preallocated items--document that in comments. Signed-off-by: J. Bruce Fields <bfields@redhat.com>
Diffstat (limited to 'fs/nfsd/nfs4state.c')
-rw-r--r--fs/nfsd/nfs4state.c76
1 files changed, 37 insertions, 39 deletions
diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
index ae5d25075f6..1f8c781c2a2 100644
--- a/fs/nfsd/nfs4state.c
+++ b/fs/nfsd/nfs4state.c
@@ -215,13 +215,12 @@ static inline int get_new_stid(struct nfs4_stid *stid)
215 int new_stid; 215 int new_stid;
216 int error; 216 int error;
217 217
218 if (!idr_pre_get(stateids, GFP_KERNEL))
219 return -ENOMEM;
220
221 error = idr_get_new_above(stateids, stid, min_stateid, &new_stid); 218 error = idr_get_new_above(stateids, stid, min_stateid, &new_stid);
222 /* 219 /*
223 * All this code is currently serialized; the preallocation 220 * Note: the necessary preallocation was done in
224 * above should still be ours: 221 * nfs4_alloc_stateid(). The idr code caps the number of
222 * preallocations that can exist at a time, but the state lock
223 * prevents anyone from using ours before we get here:
225 */ 224 */
226 BUG_ON(error); 225 BUG_ON(error);
227 /* 226 /*
@@ -240,7 +239,7 @@ static inline int get_new_stid(struct nfs4_stid *stid)
240 return new_stid; 239 return new_stid;
241} 240}
242 241
243static inline __be32 init_stid(struct nfs4_stid *stid, struct nfs4_client *cl, unsigned char type) 242static void init_stid(struct nfs4_stid *stid, struct nfs4_client *cl, unsigned char type)
244{ 243{
245 stateid_t *s = &stid->sc_stateid; 244 stateid_t *s = &stid->sc_stateid;
246 int new_id; 245 int new_id;
@@ -249,12 +248,24 @@ static inline __be32 init_stid(struct nfs4_stid *stid, struct nfs4_client *cl, u
249 stid->sc_client = cl; 248 stid->sc_client = cl;
250 s->si_opaque.so_clid = cl->cl_clientid; 249 s->si_opaque.so_clid = cl->cl_clientid;
251 new_id = get_new_stid(stid); 250 new_id = get_new_stid(stid);
252 if (new_id < 0)
253 return nfserr_jukebox;
254 s->si_opaque.so_id = (u32)new_id; 251 s->si_opaque.so_id = (u32)new_id;
255 /* Will be incremented before return to client: */ 252 /* Will be incremented before return to client: */
256 s->si_generation = 0; 253 s->si_generation = 0;
257 return 0; 254}
255
256static struct nfs4_stid *nfs4_alloc_stid(struct nfs4_client *cl, struct kmem_cache *slab)
257{
258 struct idr *stateids = &cl->cl_stateids;
259
260 if (!idr_pre_get(stateids, GFP_KERNEL))
261 return NULL;
262 /*
263 * Note: if we fail here (or any time between now and the time
264 * we actually get the new idr), we won't need to undo the idr
265 * preallocation, since the idr code caps the number of
266 * preallocated entries.
267 */
268 return kmem_cache_alloc(slab, GFP_KERNEL);
258} 269}
259 270
260static struct nfs4_delegation * 271static struct nfs4_delegation *
@@ -262,7 +273,6 @@ alloc_init_deleg(struct nfs4_client *clp, struct nfs4_ol_stateid *stp, struct sv
262{ 273{
263 struct nfs4_delegation *dp; 274 struct nfs4_delegation *dp;
264 struct nfs4_file *fp = stp->st_file; 275 struct nfs4_file *fp = stp->st_file;
265 __be32 status;
266 276
267 dprintk("NFSD alloc_init_deleg\n"); 277 dprintk("NFSD alloc_init_deleg\n");
268 /* 278 /*
@@ -276,14 +286,10 @@ alloc_init_deleg(struct nfs4_client *clp, struct nfs4_ol_stateid *stp, struct sv
276 return NULL; 286 return NULL;
277 if (num_delegations > max_delegations) 287 if (num_delegations > max_delegations)
278 return NULL; 288 return NULL;
279 dp = kmem_cache_alloc(deleg_slab, GFP_KERNEL); 289 dp = delegstateid(nfs4_alloc_stid(clp, deleg_slab));
280 if (dp == NULL) 290 if (dp == NULL)
281 return dp; 291 return dp;
282 status = init_stid(&dp->dl_stid, clp, NFS4_DELEG_STID); 292 init_stid(&dp->dl_stid, clp, NFS4_DELEG_STID);
283 if (status) {
284 kmem_cache_free(deleg_slab, dp);
285 return NULL;
286 }
287 /* 293 /*
288 * delegation seqid's are never incremented. The 4.1 special 294 * delegation seqid's are never incremented. The 4.1 special
289 * meaning of seqid 0 isn't meaningful, really, but let's avoid 295 * meaning of seqid 0 isn't meaningful, really, but let's avoid
@@ -2331,14 +2337,11 @@ alloc_init_open_stateowner(unsigned int strhashval, struct nfs4_client *clp, str
2331 return oo; 2337 return oo;
2332} 2338}
2333 2339
2334static inline __be32 init_open_stateid(struct nfs4_ol_stateid *stp, struct nfs4_file *fp, struct nfsd4_open *open) { 2340static void init_open_stateid(struct nfs4_ol_stateid *stp, struct nfs4_file *fp, struct nfsd4_open *open) {
2335 struct nfs4_openowner *oo = open->op_openowner; 2341 struct nfs4_openowner *oo = open->op_openowner;
2336 struct nfs4_client *clp = oo->oo_owner.so_client; 2342 struct nfs4_client *clp = oo->oo_owner.so_client;
2337 __be32 status;
2338 2343
2339 status = init_stid(&stp->st_stid, clp, NFS4_OPEN_STID); 2344 init_stid(&stp->st_stid, clp, NFS4_OPEN_STID);
2340 if (status)
2341 return status;
2342 INIT_LIST_HEAD(&stp->st_lockowners); 2345 INIT_LIST_HEAD(&stp->st_lockowners);
2343 list_add(&stp->st_perstateowner, &oo->oo_owner.so_stateids); 2346 list_add(&stp->st_perstateowner, &oo->oo_owner.so_stateids);
2344 list_add(&stp->st_perfile, &fp->fi_stateids); 2347 list_add(&stp->st_perfile, &fp->fi_stateids);
@@ -2350,7 +2353,6 @@ static inline __be32 init_open_stateid(struct nfs4_ol_stateid *stp, struct nfs4_
2350 __set_bit(open->op_share_access, &stp->st_access_bmap); 2353 __set_bit(open->op_share_access, &stp->st_access_bmap);
2351 __set_bit(open->op_share_deny, &stp->st_deny_bmap); 2354 __set_bit(open->op_share_deny, &stp->st_deny_bmap);
2352 stp->st_openstp = NULL; 2355 stp->st_openstp = NULL;
2353 return nfs_ok;
2354} 2356}
2355 2357
2356static void 2358static void
@@ -2614,10 +2616,14 @@ nfs4_check_open(struct nfs4_file *fp, struct nfsd4_open *open, struct nfs4_ol_st
2614 return nfs_ok; 2616 return nfs_ok;
2615} 2617}
2616 2618
2617static inline struct nfs4_ol_stateid * 2619static struct nfs4_ol_stateid * nfs4_alloc_stateid(struct nfs4_client *clp)
2618nfs4_alloc_stateid(void)
2619{ 2620{
2620 return kmem_cache_alloc(stateid_slab, GFP_KERNEL); 2621 return openlockstateid(nfs4_alloc_stid(clp, stateid_slab));
2622}
2623
2624static void nfs4_free_stateid(struct nfs4_ol_stateid *s)
2625{
2626 kmem_cache_free(stateid_slab, s);
2621} 2627}
2622 2628
2623static inline int nfs4_access_to_access(u32 nfs4_access) 2629static inline int nfs4_access_to_access(u32 nfs4_access)
@@ -2661,15 +2667,16 @@ nfs4_new_open(struct svc_rqst *rqstp, struct nfs4_ol_stateid **stpp,
2661 struct nfsd4_open *open) 2667 struct nfsd4_open *open)
2662{ 2668{
2663 struct nfs4_ol_stateid *stp; 2669 struct nfs4_ol_stateid *stp;
2670 struct nfs4_client *cl = open->op_openowner->oo_owner.so_client;
2664 __be32 status; 2671 __be32 status;
2665 2672
2666 stp = nfs4_alloc_stateid(); 2673 stp = nfs4_alloc_stateid(cl);
2667 if (stp == NULL) 2674 if (stp == NULL)
2668 return nfserr_jukebox; 2675 return nfserr_jukebox;
2669 2676
2670 status = nfs4_get_vfs_file(rqstp, fp, cur_fh, open); 2677 status = nfs4_get_vfs_file(rqstp, fp, cur_fh, open);
2671 if (status) { 2678 if (status) {
2672 kmem_cache_free(stateid_slab, stp); 2679 nfs4_free_stateid(stp);
2673 return status; 2680 return status;
2674 } 2681 }
2675 *stpp = stp; 2682 *stpp = stp;
@@ -2912,11 +2919,7 @@ nfsd4_process_open2(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nf
2912 status = nfs4_new_open(rqstp, &stp, fp, current_fh, open); 2919 status = nfs4_new_open(rqstp, &stp, fp, current_fh, open);
2913 if (status) 2920 if (status)
2914 goto out; 2921 goto out;
2915 status = init_open_stateid(stp, fp, open); 2922 init_open_stateid(stp, fp, open);
2916 if (status) {
2917 release_open_stateid(stp);
2918 goto out;
2919 }
2920 status = nfsd4_truncate(rqstp, current_fh, open); 2923 status = nfsd4_truncate(rqstp, current_fh, open);
2921 if (status) { 2924 if (status) {
2922 release_open_stateid(stp); 2925 release_open_stateid(stp);
@@ -3812,16 +3815,11 @@ alloc_init_lock_stateid(struct nfs4_lockowner *lo, struct nfs4_file *fp, struct
3812{ 3815{
3813 struct nfs4_ol_stateid *stp; 3816 struct nfs4_ol_stateid *stp;
3814 struct nfs4_client *clp = lo->lo_owner.so_client; 3817 struct nfs4_client *clp = lo->lo_owner.so_client;
3815 __be32 status;
3816 3818
3817 stp = nfs4_alloc_stateid(); 3819 stp = nfs4_alloc_stateid(clp);
3818 if (stp == NULL) 3820 if (stp == NULL)
3819 return NULL; 3821 return NULL;
3820 status = init_stid(&stp->st_stid, clp, NFS4_LOCK_STID); 3822 init_stid(&stp->st_stid, clp, NFS4_LOCK_STID);
3821 if (status) {
3822 free_generic_stateid(stp);
3823 return NULL;
3824 }
3825 list_add(&stp->st_perfile, &fp->fi_stateids); 3823 list_add(&stp->st_perfile, &fp->fi_stateids);
3826 list_add(&stp->st_perstateowner, &lo->lo_owner.so_stateids); 3824 list_add(&stp->st_perstateowner, &lo->lo_owner.so_stateids);
3827 stp->st_stateowner = &lo->lo_owner; 3825 stp->st_stateowner = &lo->lo_owner;