diff options
author | J. Bruce Fields <bfields@redhat.com> | 2013-02-03 12:23:01 -0500 |
---|---|---|
committer | J. Bruce Fields <bfields@redhat.com> | 2013-02-05 09:41:12 -0500 |
commit | 3abdb6071250b896b9d5b6e0c310d6e95666b4d7 (patch) | |
tree | 4d3e50e7ad92ef58db95d3299d77001f1cff9366 /fs/nfsd | |
parent | 2d32b29a1c2830f7c42caa8258c714acd983961f (diff) |
nfsd4: simplify idr allocation
We don't really need to preallocate at all; just allocate and initialize
everything at once, but leave the sc_type field initially 0 to prevent
finding the stateid till it's fully initialized.
Signed-off-by: J. Bruce Fields <bfields@redhat.com>
Diffstat (limited to 'fs/nfsd')
-rw-r--r-- | fs/nfsd/nfs4state.c | 81 |
1 files changed, 52 insertions, 29 deletions
diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c index a6637de7ae13..27c77a0b423f 100644 --- a/fs/nfsd/nfs4state.c +++ b/fs/nfsd/nfs4state.c | |||
@@ -261,33 +261,46 @@ static inline int get_new_stid(struct nfs4_stid *stid) | |||
261 | return new_stid; | 261 | return new_stid; |
262 | } | 262 | } |
263 | 263 | ||
264 | static void init_stid(struct nfs4_stid *stid, struct nfs4_client *cl, unsigned char type) | 264 | static struct nfs4_stid *nfs4_alloc_stid(struct nfs4_client *cl, struct |
265 | kmem_cache *slab) | ||
265 | { | 266 | { |
266 | stateid_t *s = &stid->sc_stateid; | 267 | struct idr *stateids = &cl->cl_stateids; |
268 | static int min_stateid = 0; | ||
269 | struct nfs4_stid *stid; | ||
267 | int new_id; | 270 | int new_id; |
268 | 271 | ||
269 | stid->sc_type = type; | 272 | stid = kmem_cache_alloc(slab, GFP_KERNEL); |
273 | if (!stid) | ||
274 | return NULL; | ||
275 | |||
276 | if (!idr_pre_get(stateids, GFP_KERNEL)) | ||
277 | goto out_free; | ||
278 | if (idr_get_new_above(stateids, stid, min_stateid, &new_id)) | ||
279 | goto out_free; | ||
270 | stid->sc_client = cl; | 280 | stid->sc_client = cl; |
271 | s->si_opaque.so_clid = cl->cl_clientid; | 281 | stid->sc_type = 0; |
272 | new_id = get_new_stid(stid); | 282 | stid->sc_stateid.si_opaque.so_id = new_id; |
273 | s->si_opaque.so_id = (u32)new_id; | 283 | stid->sc_stateid.si_opaque.so_clid = cl->cl_clientid; |
274 | /* Will be incremented before return to client: */ | 284 | /* Will be incremented before return to client: */ |
275 | s->si_generation = 0; | 285 | stid->sc_stateid.si_generation = 0; |
276 | } | ||
277 | 286 | ||
278 | static struct nfs4_stid *nfs4_alloc_stid(struct nfs4_client *cl, struct kmem_cache *slab) | ||
279 | { | ||
280 | struct idr *stateids = &cl->cl_stateids; | ||
281 | |||
282 | if (!idr_pre_get(stateids, GFP_KERNEL)) | ||
283 | return NULL; | ||
284 | /* | 287 | /* |
285 | * Note: if we fail here (or any time between now and the time | 288 | * It shouldn't be a problem to reuse an opaque stateid value. |
286 | * we actually get the new idr), we won't need to undo the idr | 289 | * I don't think it is for 4.1. But with 4.0 I worry that, for |
287 | * preallocation, since the idr code caps the number of | 290 | * example, a stray write retransmission could be accepted by |
288 | * preallocated entries. | 291 | * the server when it should have been rejected. Therefore, |
292 | * adopt a trick from the sctp code to attempt to maximize the | ||
293 | * amount of time until an id is reused, by ensuring they always | ||
294 | * "increase" (mod INT_MAX): | ||
289 | */ | 295 | */ |
290 | return kmem_cache_alloc(slab, GFP_KERNEL); | 296 | |
297 | min_stateid = new_id+1; | ||
298 | if (min_stateid == INT_MAX) | ||
299 | min_stateid = 0; | ||
300 | return stid; | ||
301 | out_free: | ||
302 | kfree(stid); | ||
303 | return NULL; | ||
291 | } | 304 | } |
292 | 305 | ||
293 | static struct nfs4_ol_stateid * nfs4_alloc_stateid(struct nfs4_client *clp) | 306 | static struct nfs4_ol_stateid * nfs4_alloc_stateid(struct nfs4_client *clp) |
@@ -316,7 +329,7 @@ alloc_init_deleg(struct nfs4_client *clp, struct nfs4_ol_stateid *stp, struct sv | |||
316 | dp = delegstateid(nfs4_alloc_stid(clp, deleg_slab)); | 329 | dp = delegstateid(nfs4_alloc_stid(clp, deleg_slab)); |
317 | if (dp == NULL) | 330 | if (dp == NULL) |
318 | return dp; | 331 | return dp; |
319 | init_stid(&dp->dl_stid, clp, NFS4_DELEG_STID); | 332 | dp->dl_stid.sc_type = NFS4_DELEG_STID; |
320 | /* | 333 | /* |
321 | * delegation seqid's are never incremented. The 4.1 special | 334 | * delegation seqid's are never incremented. The 4.1 special |
322 | * meaning of seqid 0 isn't meaningful, really, but let's avoid | 335 | * meaning of seqid 0 isn't meaningful, really, but let's avoid |
@@ -337,13 +350,21 @@ alloc_init_deleg(struct nfs4_client *clp, struct nfs4_ol_stateid *stp, struct sv | |||
337 | return dp; | 350 | return dp; |
338 | } | 351 | } |
339 | 352 | ||
353 | void free_stid(struct nfs4_stid *s, struct kmem_cache *slab) | ||
354 | { | ||
355 | struct idr *stateids = &s->sc_client->cl_stateids; | ||
356 | |||
357 | idr_remove(stateids, s->sc_stateid.si_opaque.so_id); | ||
358 | kmem_cache_free(slab, s); | ||
359 | } | ||
360 | |||
340 | void | 361 | void |
341 | nfs4_put_delegation(struct nfs4_delegation *dp) | 362 | nfs4_put_delegation(struct nfs4_delegation *dp) |
342 | { | 363 | { |
343 | if (atomic_dec_and_test(&dp->dl_count)) { | 364 | if (atomic_dec_and_test(&dp->dl_count)) { |
344 | dprintk("NFSD: freeing dp %p\n",dp); | 365 | dprintk("NFSD: freeing dp %p\n",dp); |
345 | put_nfs4_file(dp->dl_file); | 366 | put_nfs4_file(dp->dl_file); |
346 | kmem_cache_free(deleg_slab, dp); | 367 | free_stid(&dp->dl_stid, deleg_slab); |
347 | num_delegations--; | 368 | num_delegations--; |
348 | } | 369 | } |
349 | } | 370 | } |
@@ -360,9 +381,7 @@ static void nfs4_put_deleg_lease(struct nfs4_file *fp) | |||
360 | 381 | ||
361 | static void unhash_stid(struct nfs4_stid *s) | 382 | static void unhash_stid(struct nfs4_stid *s) |
362 | { | 383 | { |
363 | struct idr *stateids = &s->sc_client->cl_stateids; | 384 | s->sc_type = 0; |
364 | |||
365 | idr_remove(stateids, s->sc_stateid.si_opaque.so_id); | ||
366 | } | 385 | } |
367 | 386 | ||
368 | /* Called under the state lock. */ | 387 | /* Called under the state lock. */ |
@@ -519,7 +538,7 @@ static void close_generic_stateid(struct nfs4_ol_stateid *stp) | |||
519 | 538 | ||
520 | static void free_generic_stateid(struct nfs4_ol_stateid *stp) | 539 | static void free_generic_stateid(struct nfs4_ol_stateid *stp) |
521 | { | 540 | { |
522 | kmem_cache_free(stateid_slab, stp); | 541 | free_stid(&stp->st_stid, stateid_slab); |
523 | } | 542 | } |
524 | 543 | ||
525 | static void release_lock_stateid(struct nfs4_ol_stateid *stp) | 544 | static void release_lock_stateid(struct nfs4_ol_stateid *stp) |
@@ -1260,7 +1279,12 @@ static void gen_confirm(struct nfs4_client *clp) | |||
1260 | 1279 | ||
1261 | static struct nfs4_stid *find_stateid(struct nfs4_client *cl, stateid_t *t) | 1280 | static struct nfs4_stid *find_stateid(struct nfs4_client *cl, stateid_t *t) |
1262 | { | 1281 | { |
1263 | return idr_find(&cl->cl_stateids, t->si_opaque.so_id); | 1282 | struct nfs4_stid *ret; |
1283 | |||
1284 | ret = idr_find(&cl->cl_stateids, t->si_opaque.so_id); | ||
1285 | if (!ret || !ret->sc_type) | ||
1286 | return NULL; | ||
1287 | return ret; | ||
1264 | } | 1288 | } |
1265 | 1289 | ||
1266 | static struct nfs4_stid *find_stateid_by_type(struct nfs4_client *cl, stateid_t *t, char typemask) | 1290 | static struct nfs4_stid *find_stateid_by_type(struct nfs4_client *cl, stateid_t *t, char typemask) |
@@ -2446,9 +2470,8 @@ alloc_init_open_stateowner(unsigned int strhashval, struct nfs4_client *clp, str | |||
2446 | 2470 | ||
2447 | static void init_open_stateid(struct nfs4_ol_stateid *stp, struct nfs4_file *fp, struct nfsd4_open *open) { | 2471 | static void init_open_stateid(struct nfs4_ol_stateid *stp, struct nfs4_file *fp, struct nfsd4_open *open) { |
2448 | struct nfs4_openowner *oo = open->op_openowner; | 2472 | struct nfs4_openowner *oo = open->op_openowner; |
2449 | struct nfs4_client *clp = oo->oo_owner.so_client; | ||
2450 | 2473 | ||
2451 | init_stid(&stp->st_stid, clp, NFS4_OPEN_STID); | 2474 | stp->st_stid.sc_type = NFS4_OPEN_STID; |
2452 | INIT_LIST_HEAD(&stp->st_lockowners); | 2475 | INIT_LIST_HEAD(&stp->st_lockowners); |
2453 | list_add(&stp->st_perstateowner, &oo->oo_owner.so_stateids); | 2476 | list_add(&stp->st_perstateowner, &oo->oo_owner.so_stateids); |
2454 | list_add(&stp->st_perfile, &fp->fi_stateids); | 2477 | list_add(&stp->st_perfile, &fp->fi_stateids); |
@@ -4034,7 +4057,7 @@ alloc_init_lock_stateid(struct nfs4_lockowner *lo, struct nfs4_file *fp, struct | |||
4034 | stp = nfs4_alloc_stateid(clp); | 4057 | stp = nfs4_alloc_stateid(clp); |
4035 | if (stp == NULL) | 4058 | if (stp == NULL) |
4036 | return NULL; | 4059 | return NULL; |
4037 | init_stid(&stp->st_stid, clp, NFS4_LOCK_STID); | 4060 | stp->st_stid.sc_type = NFS4_LOCK_STID; |
4038 | list_add(&stp->st_perfile, &fp->fi_stateids); | 4061 | list_add(&stp->st_perfile, &fp->fi_stateids); |
4039 | list_add(&stp->st_perstateowner, &lo->lo_owner.so_stateids); | 4062 | list_add(&stp->st_perstateowner, &lo->lo_owner.so_stateids); |
4040 | stp->st_stateowner = &lo->lo_owner; | 4063 | stp->st_stateowner = &lo->lo_owner; |