diff options
author | Trond Myklebust <Trond.Myklebust@netapp.com> | 2012-01-17 22:04:25 -0500 |
---|---|---|
committer | Trond Myklebust <Trond.Myklebust@netapp.com> | 2012-01-31 18:20:28 -0500 |
commit | d2d7ce28a2f8ec6ca2a49145e643d2e3c7d21ba3 (patch) | |
tree | 4c73f071c2282185402a4da7d20b30749530c89f /fs/nfs/nfs4state.c | |
parent | 9157c31dd610a127bc6f01bc1953cf8b80382040 (diff) |
NFSv4: Replace lock_owner->ld_id with an ida based allocator
Again, We're unlikely to ever need more than 2^31 simultaneous lock
owners, so let's replace the custom allocator.
Now that there are no more users, we can also get rid of the custom
allocator code.
Signed-off-by: Trond Myklebust <Trond.Myklebust@netapp.com>
Diffstat (limited to 'fs/nfs/nfs4state.c')
-rw-r--r-- | fs/nfs/nfs4state.c | 74 |
1 files changed, 8 insertions, 66 deletions
diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c index 8472707286f9..5abf23615bc5 100644 --- a/fs/nfs/nfs4state.c +++ b/fs/nfs/nfs4state.c | |||
@@ -317,62 +317,6 @@ out: | |||
317 | return cred; | 317 | return cred; |
318 | } | 318 | } |
319 | 319 | ||
320 | static void nfs_alloc_unique_id_locked(struct rb_root *root, | ||
321 | struct nfs_unique_id *new, | ||
322 | __u64 minval, int maxbits) | ||
323 | { | ||
324 | struct rb_node **p, *parent; | ||
325 | struct nfs_unique_id *pos; | ||
326 | __u64 mask = ~0ULL; | ||
327 | |||
328 | if (maxbits < 64) | ||
329 | mask = (1ULL << maxbits) - 1ULL; | ||
330 | |||
331 | /* Ensure distribution is more or less flat */ | ||
332 | get_random_bytes(&new->id, sizeof(new->id)); | ||
333 | new->id &= mask; | ||
334 | if (new->id < minval) | ||
335 | new->id += minval; | ||
336 | retry: | ||
337 | p = &root->rb_node; | ||
338 | parent = NULL; | ||
339 | |||
340 | while (*p != NULL) { | ||
341 | parent = *p; | ||
342 | pos = rb_entry(parent, struct nfs_unique_id, rb_node); | ||
343 | |||
344 | if (new->id < pos->id) | ||
345 | p = &(*p)->rb_left; | ||
346 | else if (new->id > pos->id) | ||
347 | p = &(*p)->rb_right; | ||
348 | else | ||
349 | goto id_exists; | ||
350 | } | ||
351 | rb_link_node(&new->rb_node, parent, p); | ||
352 | rb_insert_color(&new->rb_node, root); | ||
353 | return; | ||
354 | id_exists: | ||
355 | for (;;) { | ||
356 | new->id++; | ||
357 | if (new->id < minval || (new->id & mask) != new->id) { | ||
358 | new->id = minval; | ||
359 | break; | ||
360 | } | ||
361 | parent = rb_next(parent); | ||
362 | if (parent == NULL) | ||
363 | break; | ||
364 | pos = rb_entry(parent, struct nfs_unique_id, rb_node); | ||
365 | if (new->id < pos->id) | ||
366 | break; | ||
367 | } | ||
368 | goto retry; | ||
369 | } | ||
370 | |||
371 | static void nfs_free_unique_id(struct rb_root *root, struct nfs_unique_id *id) | ||
372 | { | ||
373 | rb_erase(&id->rb_node, root); | ||
374 | } | ||
375 | |||
376 | static struct nfs4_state_owner * | 320 | static struct nfs4_state_owner * |
377 | nfs4_find_state_owner_locked(struct nfs_server *server, struct rpc_cred *cred) | 321 | nfs4_find_state_owner_locked(struct nfs_server *server, struct rpc_cred *cred) |
378 | { | 322 | { |
@@ -800,7 +744,6 @@ static struct nfs4_lock_state *nfs4_alloc_lock_state(struct nfs4_state *state, f | |||
800 | { | 744 | { |
801 | struct nfs4_lock_state *lsp; | 745 | struct nfs4_lock_state *lsp; |
802 | struct nfs_server *server = state->owner->so_server; | 746 | struct nfs_server *server = state->owner->so_server; |
803 | struct nfs_client *clp = server->nfs_client; | ||
804 | 747 | ||
805 | lsp = kzalloc(sizeof(*lsp), GFP_NOFS); | 748 | lsp = kzalloc(sizeof(*lsp), GFP_NOFS); |
806 | if (lsp == NULL) | 749 | if (lsp == NULL) |
@@ -820,24 +763,23 @@ static struct nfs4_lock_state *nfs4_alloc_lock_state(struct nfs4_state *state, f | |||
820 | lsp->ls_owner.lo_u.posix_owner = fl_owner; | 763 | lsp->ls_owner.lo_u.posix_owner = fl_owner; |
821 | break; | 764 | break; |
822 | default: | 765 | default: |
823 | kfree(lsp); | 766 | goto out_free; |
824 | return NULL; | ||
825 | } | 767 | } |
826 | spin_lock(&clp->cl_lock); | 768 | lsp->ls_id = ida_simple_get(&server->lockowner_id, 0, 0, GFP_NOFS); |
827 | nfs_alloc_unique_id_locked(&server->lockowner_id, &lsp->ls_id, 1, 64); | 769 | if (lsp->ls_id < 0) |
828 | spin_unlock(&clp->cl_lock); | 770 | goto out_free; |
829 | INIT_LIST_HEAD(&lsp->ls_locks); | 771 | INIT_LIST_HEAD(&lsp->ls_locks); |
830 | return lsp; | 772 | return lsp; |
773 | out_free: | ||
774 | kfree(lsp); | ||
775 | return NULL; | ||
831 | } | 776 | } |
832 | 777 | ||
833 | static void nfs4_free_lock_state(struct nfs4_lock_state *lsp) | 778 | static void nfs4_free_lock_state(struct nfs4_lock_state *lsp) |
834 | { | 779 | { |
835 | struct nfs_server *server = lsp->ls_state->owner->so_server; | 780 | struct nfs_server *server = lsp->ls_state->owner->so_server; |
836 | struct nfs_client *clp = server->nfs_client; | ||
837 | 781 | ||
838 | spin_lock(&clp->cl_lock); | 782 | ida_simple_remove(&server->lockowner_id, lsp->ls_id); |
839 | nfs_free_unique_id(&server->lockowner_id, &lsp->ls_id); | ||
840 | spin_unlock(&clp->cl_lock); | ||
841 | rpc_destroy_wait_queue(&lsp->ls_sequence.wait); | 783 | rpc_destroy_wait_queue(&lsp->ls_sequence.wait); |
842 | kfree(lsp); | 784 | kfree(lsp); |
843 | } | 785 | } |