diff options
author | Chuck Lever <chuck.lever@oracle.com> | 2010-12-14 10:06:22 -0500 |
---|---|---|
committer | Trond Myklebust <Trond.Myklebust@netapp.com> | 2010-12-16 12:37:27 -0500 |
commit | d2df0484bb38f2e0d9754b00597d4a6d1cf666d0 (patch) | |
tree | 6d0145d895ab18bd41bd60b215fb0afe5a1549bd /fs/lockd | |
parent | 67216b94d498f5880d8bba2a6b841880739dd524 (diff) |
lockd: Rename nlm_hosts
Clean up.
nlm_hosts now contains only server-side entries. Rename it to match
convention of client side cache.
Signed-off-by: Chuck Lever <chuck.lever@oracle.com>
Signed-off-by: Trond Myklebust <Trond.Myklebust@netapp.com>
Diffstat (limited to 'fs/lockd')
-rw-r--r-- | fs/lockd/host.c | 16 |
1 files changed, 8 insertions, 8 deletions
diff --git a/fs/lockd/host.c b/fs/lockd/host.c index 0250b0e4f5e9..87fbde1d1a1f 100644 --- a/fs/lockd/host.c +++ b/fs/lockd/host.c | |||
@@ -25,7 +25,7 @@ | |||
25 | #define NLM_HOST_EXPIRE (300 * HZ) | 25 | #define NLM_HOST_EXPIRE (300 * HZ) |
26 | #define NLM_HOST_COLLECT (120 * HZ) | 26 | #define NLM_HOST_COLLECT (120 * HZ) |
27 | 27 | ||
28 | static struct hlist_head nlm_hosts[NLM_HOST_NRHASH]; | 28 | static struct hlist_head nlm_server_hosts[NLM_HOST_NRHASH]; |
29 | static struct hlist_head nlm_client_hosts[NLM_HOST_NRHASH]; | 29 | static struct hlist_head nlm_client_hosts[NLM_HOST_NRHASH]; |
30 | 30 | ||
31 | #define for_each_host(host, pos, chain, table) \ | 31 | #define for_each_host(host, pos, chain, table) \ |
@@ -184,7 +184,7 @@ static struct nlm_host *nlm_lookup_host(struct nlm_lookup_host_info *ni) | |||
184 | * different NLM rpc_clients into one single nlm_host object. | 184 | * different NLM rpc_clients into one single nlm_host object. |
185 | * This would allow us to have one nlm_host per address. | 185 | * This would allow us to have one nlm_host per address. |
186 | */ | 186 | */ |
187 | chain = &nlm_hosts[nlm_hash_address(ni->sap)]; | 187 | chain = &nlm_server_hosts[nlm_hash_address(ni->sap)]; |
188 | hlist_for_each_entry(host, pos, chain, h_hash) { | 188 | hlist_for_each_entry(host, pos, chain, h_hash) { |
189 | if (!rpc_cmp_addr(nlm_addr(host), ni->sap)) | 189 | if (!rpc_cmp_addr(nlm_addr(host), ni->sap)) |
190 | continue; | 190 | continue; |
@@ -428,7 +428,7 @@ struct nlm_host *nlmsvc_lookup_host(const struct svc_rqst *rqstp, | |||
428 | if (time_after_eq(jiffies, next_gc)) | 428 | if (time_after_eq(jiffies, next_gc)) |
429 | nlm_gc_hosts(); | 429 | nlm_gc_hosts(); |
430 | 430 | ||
431 | chain = &nlm_hosts[nlm_hash_address(ni.sap)]; | 431 | chain = &nlm_server_hosts[nlm_hash_address(ni.sap)]; |
432 | hlist_for_each_entry(host, pos, chain, h_hash) { | 432 | hlist_for_each_entry(host, pos, chain, h_hash) { |
433 | if (!rpc_cmp_addr(nlm_addr(host), ni.sap)) | 433 | if (!rpc_cmp_addr(nlm_addr(host), ni.sap)) |
434 | continue; | 434 | continue; |
@@ -632,7 +632,7 @@ void nlm_host_rebooted(const struct nlm_reboot *info) | |||
632 | * lock for this. | 632 | * lock for this. |
633 | * To avoid processing a host several times, we match the nsmstate. | 633 | * To avoid processing a host several times, we match the nsmstate. |
634 | */ | 634 | */ |
635 | while ((host = next_host_state(nlm_hosts, nsm, info)) != NULL) { | 635 | while ((host = next_host_state(nlm_server_hosts, nsm, info)) != NULL) { |
636 | nlmsvc_free_host_resources(host); | 636 | nlmsvc_free_host_resources(host); |
637 | nlmsvc_release_host(host); | 637 | nlmsvc_release_host(host); |
638 | } | 638 | } |
@@ -660,7 +660,7 @@ nlm_shutdown_hosts(void) | |||
660 | 660 | ||
661 | /* First, make all hosts eligible for gc */ | 661 | /* First, make all hosts eligible for gc */ |
662 | dprintk("lockd: nuking all hosts...\n"); | 662 | dprintk("lockd: nuking all hosts...\n"); |
663 | for_each_host(host, pos, chain, nlm_hosts) { | 663 | for_each_host(host, pos, chain, nlm_server_hosts) { |
664 | host->h_expires = jiffies - 1; | 664 | host->h_expires = jiffies - 1; |
665 | if (host->h_rpcclnt) { | 665 | if (host->h_rpcclnt) { |
666 | rpc_shutdown_client(host->h_rpcclnt); | 666 | rpc_shutdown_client(host->h_rpcclnt); |
@@ -676,7 +676,7 @@ nlm_shutdown_hosts(void) | |||
676 | if (nrhosts) { | 676 | if (nrhosts) { |
677 | printk(KERN_WARNING "lockd: couldn't shutdown host module!\n"); | 677 | printk(KERN_WARNING "lockd: couldn't shutdown host module!\n"); |
678 | dprintk("lockd: %d hosts left:\n", nrhosts); | 678 | dprintk("lockd: %d hosts left:\n", nrhosts); |
679 | for_each_host(host, pos, chain, nlm_hosts) { | 679 | for_each_host(host, pos, chain, nlm_server_hosts) { |
680 | dprintk(" %s (cnt %d use %d exp %ld)\n", | 680 | dprintk(" %s (cnt %d use %d exp %ld)\n", |
681 | host->h_name, atomic_read(&host->h_count), | 681 | host->h_name, atomic_read(&host->h_count), |
682 | host->h_inuse, host->h_expires); | 682 | host->h_inuse, host->h_expires); |
@@ -697,13 +697,13 @@ nlm_gc_hosts(void) | |||
697 | struct nlm_host *host; | 697 | struct nlm_host *host; |
698 | 698 | ||
699 | dprintk("lockd: host garbage collection\n"); | 699 | dprintk("lockd: host garbage collection\n"); |
700 | for_each_host(host, pos, chain, nlm_hosts) | 700 | for_each_host(host, pos, chain, nlm_server_hosts) |
701 | host->h_inuse = 0; | 701 | host->h_inuse = 0; |
702 | 702 | ||
703 | /* Mark all hosts that hold locks, blocks or shares */ | 703 | /* Mark all hosts that hold locks, blocks or shares */ |
704 | nlmsvc_mark_resources(); | 704 | nlmsvc_mark_resources(); |
705 | 705 | ||
706 | for_each_host_safe(host, pos, next, chain, nlm_hosts) { | 706 | for_each_host_safe(host, pos, next, chain, nlm_server_hosts) { |
707 | if (atomic_read(&host->h_count) || host->h_inuse | 707 | if (atomic_read(&host->h_count) || host->h_inuse |
708 | || time_before(jiffies, host->h_expires)) { | 708 | || time_before(jiffies, host->h_expires)) { |
709 | dprintk("nlm_gc_hosts skipping %s " | 709 | dprintk("nlm_gc_hosts skipping %s " |