diff options
author | J. Bruce Fields <bfields@citi.umich.edu> | 2008-02-20 15:40:15 -0500 |
---|---|---|
committer | J. Bruce Fields <bfields@citi.umich.edu> | 2008-04-23 16:13:39 -0400 |
commit | d8421202121ce74daf4625ca9d1d825bbd7ce66a (patch) | |
tree | 0465a155e03b97660d2f887e702b8f2917889c75 /fs/lockd | |
parent | a95e56e72c196970a8067cd515c658d064813170 (diff) |
lockd: convert nsm_mutex to a spinlock
There's no reason for a mutex here, except to allow an allocation under
the lock, which we can avoid with the usual trick of preallocating
memory for the new object and freeing it if it turns out to be
unnecessary.
Signed-off-by: J. Bruce Fields <bfields@citi.umich.edu>
Diffstat (limited to 'fs/lockd')
-rw-r--r-- | fs/lockd/host.c | 34 |
1 files changed, 19 insertions, 15 deletions
diff --git a/fs/lockd/host.c b/fs/lockd/host.c index de0ffb6106c4..c7854791898f 100644 --- a/fs/lockd/host.c +++ b/fs/lockd/host.c | |||
@@ -457,7 +457,7 @@ nlm_gc_hosts(void) | |||
457 | * Manage NSM handles | 457 | * Manage NSM handles |
458 | */ | 458 | */ |
459 | static LIST_HEAD(nsm_handles); | 459 | static LIST_HEAD(nsm_handles); |
460 | static DEFINE_MUTEX(nsm_mutex); | 460 | static DEFINE_SPINLOCK(nsm_lock); |
461 | 461 | ||
462 | static struct nsm_handle * | 462 | static struct nsm_handle * |
463 | __nsm_find(const struct sockaddr_in *sin, | 463 | __nsm_find(const struct sockaddr_in *sin, |
@@ -479,7 +479,8 @@ __nsm_find(const struct sockaddr_in *sin, | |||
479 | return NULL; | 479 | return NULL; |
480 | } | 480 | } |
481 | 481 | ||
482 | mutex_lock(&nsm_mutex); | 482 | retry: |
483 | spin_lock(&nsm_lock); | ||
483 | list_for_each_entry(pos, &nsm_handles, sm_link) { | 484 | list_for_each_entry(pos, &nsm_handles, sm_link) { |
484 | 485 | ||
485 | if (hostname && nsm_use_hostnames) { | 486 | if (hostname && nsm_use_hostnames) { |
@@ -489,28 +490,32 @@ __nsm_find(const struct sockaddr_in *sin, | |||
489 | } else if (!nlm_cmp_addr(&pos->sm_addr, sin)) | 490 | } else if (!nlm_cmp_addr(&pos->sm_addr, sin)) |
490 | continue; | 491 | continue; |
491 | atomic_inc(&pos->sm_count); | 492 | atomic_inc(&pos->sm_count); |
493 | kfree(nsm); | ||
492 | nsm = pos; | 494 | nsm = pos; |
493 | goto out; | 495 | goto found; |
494 | } | 496 | } |
495 | 497 | if (nsm) { | |
496 | if (!create) { | 498 | list_add(&nsm->sm_link, &nsm_handles); |
497 | nsm = NULL; | 499 | goto found; |
498 | goto out; | ||
499 | } | 500 | } |
501 | spin_unlock(&nsm_lock); | ||
502 | |||
503 | if (!create) | ||
504 | return NULL; | ||
500 | 505 | ||
501 | nsm = kzalloc(sizeof(*nsm) + hostname_len + 1, GFP_KERNEL); | 506 | nsm = kzalloc(sizeof(*nsm) + hostname_len + 1, GFP_KERNEL); |
502 | if (nsm == NULL) | 507 | if (nsm == NULL) |
503 | goto out; | 508 | return NULL; |
509 | |||
504 | nsm->sm_addr = *sin; | 510 | nsm->sm_addr = *sin; |
505 | nsm->sm_name = (char *) (nsm + 1); | 511 | nsm->sm_name = (char *) (nsm + 1); |
506 | memcpy(nsm->sm_name, hostname, hostname_len); | 512 | memcpy(nsm->sm_name, hostname, hostname_len); |
507 | nsm->sm_name[hostname_len] = '\0'; | 513 | nsm->sm_name[hostname_len] = '\0'; |
508 | atomic_set(&nsm->sm_count, 1); | 514 | atomic_set(&nsm->sm_count, 1); |
515 | goto retry; | ||
509 | 516 | ||
510 | list_add(&nsm->sm_link, &nsm_handles); | 517 | found: |
511 | 518 | spin_unlock(&nsm_lock); | |
512 | out: | ||
513 | mutex_unlock(&nsm_mutex); | ||
514 | return nsm; | 519 | return nsm; |
515 | } | 520 | } |
516 | 521 | ||
@@ -529,10 +534,9 @@ nsm_release(struct nsm_handle *nsm) | |||
529 | { | 534 | { |
530 | if (!nsm) | 535 | if (!nsm) |
531 | return; | 536 | return; |
532 | mutex_lock(&nsm_mutex); | 537 | if (atomic_dec_and_lock(&nsm->sm_count, &nsm_lock)) { |
533 | if (atomic_dec_and_test(&nsm->sm_count)) { | ||
534 | list_del(&nsm->sm_link); | 538 | list_del(&nsm->sm_link); |
539 | spin_unlock(&nsm_lock); | ||
535 | kfree(nsm); | 540 | kfree(nsm); |
536 | } | 541 | } |
537 | mutex_unlock(&nsm_mutex); | ||
538 | } | 542 | } |