diff options
author | Linus Torvalds <torvalds@ppc970.osdl.org> | 2005-04-16 18:20:36 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@ppc970.osdl.org> | 2005-04-16 18:20:36 -0400 |
commit | 1da177e4c3f41524e886b7f1b8a0c1fc7321cac2 (patch) | |
tree | 0bba044c4ce775e45a88a51686b5d9f90697ea9d /fs/lockd/host.c |
Linux-2.6.12-rc2v2.6.12-rc2
Initial git repository build. I'm not bothering with the full history,
even though we have it. We can create a separate "historical" git
archive of that later if we want to, and in the meantime it's about
3.2GB when imported into git - space that would just make the early
git days unnecessarily complicated, when we don't have a lot of good
infrastructure for it.
Let it rip!
Diffstat (limited to 'fs/lockd/host.c')
-rw-r--r-- | fs/lockd/host.c | 346 |
1 files changed, 346 insertions, 0 deletions
diff --git a/fs/lockd/host.c b/fs/lockd/host.c new file mode 100644 index 000000000000..52707c5ad6ea --- /dev/null +++ b/fs/lockd/host.c | |||
@@ -0,0 +1,346 @@ | |||
1 | /* | ||
2 | * linux/fs/lockd/host.c | ||
3 | * | ||
4 | * Management for NLM peer hosts. The nlm_host struct is shared | ||
5 | * between client and server implementation. The only reason to | ||
6 | * do so is to reduce code bloat. | ||
7 | * | ||
8 | * Copyright (C) 1996, Olaf Kirch <okir@monad.swb.de> | ||
9 | */ | ||
10 | |||
11 | #include <linux/types.h> | ||
12 | #include <linux/sched.h> | ||
13 | #include <linux/slab.h> | ||
14 | #include <linux/in.h> | ||
15 | #include <linux/sunrpc/clnt.h> | ||
16 | #include <linux/sunrpc/svc.h> | ||
17 | #include <linux/lockd/lockd.h> | ||
18 | #include <linux/lockd/sm_inter.h> | ||
19 | |||
20 | |||
21 | #define NLMDBG_FACILITY NLMDBG_HOSTCACHE | ||
22 | #define NLM_HOST_MAX 64 | ||
23 | #define NLM_HOST_NRHASH 32 | ||
24 | #define NLM_ADDRHASH(addr) (ntohl(addr) & (NLM_HOST_NRHASH-1)) | ||
25 | #define NLM_HOST_REBIND (60 * HZ) | ||
26 | #define NLM_HOST_EXPIRE ((nrhosts > NLM_HOST_MAX)? 300 * HZ : 120 * HZ) | ||
27 | #define NLM_HOST_COLLECT ((nrhosts > NLM_HOST_MAX)? 120 * HZ : 60 * HZ) | ||
28 | #define NLM_HOST_ADDR(sv) (&(sv)->s_nlmclnt->cl_xprt->addr) | ||
29 | |||
30 | static struct nlm_host * nlm_hosts[NLM_HOST_NRHASH]; | ||
31 | static unsigned long next_gc; | ||
32 | static int nrhosts; | ||
33 | static DECLARE_MUTEX(nlm_host_sema); | ||
34 | |||
35 | |||
36 | static void nlm_gc_hosts(void); | ||
37 | |||
38 | /* | ||
39 | * Find an NLM server handle in the cache. If there is none, create it. | ||
40 | */ | ||
41 | struct nlm_host * | ||
42 | nlmclnt_lookup_host(struct sockaddr_in *sin, int proto, int version) | ||
43 | { | ||
44 | return nlm_lookup_host(0, sin, proto, version); | ||
45 | } | ||
46 | |||
47 | /* | ||
48 | * Find an NLM client handle in the cache. If there is none, create it. | ||
49 | */ | ||
50 | struct nlm_host * | ||
51 | nlmsvc_lookup_host(struct svc_rqst *rqstp) | ||
52 | { | ||
53 | return nlm_lookup_host(1, &rqstp->rq_addr, | ||
54 | rqstp->rq_prot, rqstp->rq_vers); | ||
55 | } | ||
56 | |||
57 | /* | ||
58 | * Common host lookup routine for server & client | ||
59 | */ | ||
60 | struct nlm_host * | ||
61 | nlm_lookup_host(int server, struct sockaddr_in *sin, | ||
62 | int proto, int version) | ||
63 | { | ||
64 | struct nlm_host *host, **hp; | ||
65 | u32 addr; | ||
66 | int hash; | ||
67 | |||
68 | dprintk("lockd: nlm_lookup_host(%08x, p=%d, v=%d)\n", | ||
69 | (unsigned)(sin? ntohl(sin->sin_addr.s_addr) : 0), proto, version); | ||
70 | |||
71 | hash = NLM_ADDRHASH(sin->sin_addr.s_addr); | ||
72 | |||
73 | /* Lock hash table */ | ||
74 | down(&nlm_host_sema); | ||
75 | |||
76 | if (time_after_eq(jiffies, next_gc)) | ||
77 | nlm_gc_hosts(); | ||
78 | |||
79 | for (hp = &nlm_hosts[hash]; (host = *hp) != 0; hp = &host->h_next) { | ||
80 | if (host->h_proto != proto) | ||
81 | continue; | ||
82 | if (host->h_version != version) | ||
83 | continue; | ||
84 | if (host->h_server != server) | ||
85 | continue; | ||
86 | |||
87 | if (nlm_cmp_addr(&host->h_addr, sin)) { | ||
88 | if (hp != nlm_hosts + hash) { | ||
89 | *hp = host->h_next; | ||
90 | host->h_next = nlm_hosts[hash]; | ||
91 | nlm_hosts[hash] = host; | ||
92 | } | ||
93 | nlm_get_host(host); | ||
94 | up(&nlm_host_sema); | ||
95 | return host; | ||
96 | } | ||
97 | } | ||
98 | |||
99 | /* Ooops, no host found, create it */ | ||
100 | dprintk("lockd: creating host entry\n"); | ||
101 | |||
102 | if (!(host = (struct nlm_host *) kmalloc(sizeof(*host), GFP_KERNEL))) | ||
103 | goto nohost; | ||
104 | memset(host, 0, sizeof(*host)); | ||
105 | |||
106 | addr = sin->sin_addr.s_addr; | ||
107 | sprintf(host->h_name, "%u.%u.%u.%u", NIPQUAD(addr)); | ||
108 | |||
109 | host->h_addr = *sin; | ||
110 | host->h_addr.sin_port = 0; /* ouch! */ | ||
111 | host->h_version = version; | ||
112 | host->h_proto = proto; | ||
113 | host->h_rpcclnt = NULL; | ||
114 | init_MUTEX(&host->h_sema); | ||
115 | host->h_nextrebind = jiffies + NLM_HOST_REBIND; | ||
116 | host->h_expires = jiffies + NLM_HOST_EXPIRE; | ||
117 | atomic_set(&host->h_count, 1); | ||
118 | init_waitqueue_head(&host->h_gracewait); | ||
119 | host->h_state = 0; /* pseudo NSM state */ | ||
120 | host->h_nsmstate = 0; /* real NSM state */ | ||
121 | host->h_server = server; | ||
122 | host->h_next = nlm_hosts[hash]; | ||
123 | nlm_hosts[hash] = host; | ||
124 | INIT_LIST_HEAD(&host->h_lockowners); | ||
125 | spin_lock_init(&host->h_lock); | ||
126 | |||
127 | if (++nrhosts > NLM_HOST_MAX) | ||
128 | next_gc = 0; | ||
129 | |||
130 | nohost: | ||
131 | up(&nlm_host_sema); | ||
132 | return host; | ||
133 | } | ||
134 | |||
135 | struct nlm_host * | ||
136 | nlm_find_client(void) | ||
137 | { | ||
138 | /* find a nlm_host for a client for which h_killed == 0. | ||
139 | * and return it | ||
140 | */ | ||
141 | int hash; | ||
142 | down(&nlm_host_sema); | ||
143 | for (hash = 0 ; hash < NLM_HOST_NRHASH; hash++) { | ||
144 | struct nlm_host *host, **hp; | ||
145 | for (hp = &nlm_hosts[hash]; (host = *hp) != 0; hp = &host->h_next) { | ||
146 | if (host->h_server && | ||
147 | host->h_killed == 0) { | ||
148 | nlm_get_host(host); | ||
149 | up(&nlm_host_sema); | ||
150 | return host; | ||
151 | } | ||
152 | } | ||
153 | } | ||
154 | up(&nlm_host_sema); | ||
155 | return NULL; | ||
156 | } | ||
157 | |||
158 | |||
159 | /* | ||
160 | * Create the NLM RPC client for an NLM peer | ||
161 | */ | ||
162 | struct rpc_clnt * | ||
163 | nlm_bind_host(struct nlm_host *host) | ||
164 | { | ||
165 | struct rpc_clnt *clnt; | ||
166 | struct rpc_xprt *xprt; | ||
167 | |||
168 | dprintk("lockd: nlm_bind_host(%08x)\n", | ||
169 | (unsigned)ntohl(host->h_addr.sin_addr.s_addr)); | ||
170 | |||
171 | /* Lock host handle */ | ||
172 | down(&host->h_sema); | ||
173 | |||
174 | /* If we've already created an RPC client, check whether | ||
175 | * RPC rebind is required | ||
176 | * Note: why keep rebinding if we're on a tcp connection? | ||
177 | */ | ||
178 | if ((clnt = host->h_rpcclnt) != NULL) { | ||
179 | xprt = clnt->cl_xprt; | ||
180 | if (!xprt->stream && time_after_eq(jiffies, host->h_nextrebind)) { | ||
181 | clnt->cl_port = 0; | ||
182 | host->h_nextrebind = jiffies + NLM_HOST_REBIND; | ||
183 | dprintk("lockd: next rebind in %ld jiffies\n", | ||
184 | host->h_nextrebind - jiffies); | ||
185 | } | ||
186 | } else { | ||
187 | xprt = xprt_create_proto(host->h_proto, &host->h_addr, NULL); | ||
188 | if (IS_ERR(xprt)) | ||
189 | goto forgetit; | ||
190 | |||
191 | xprt_set_timeout(&xprt->timeout, 5, nlmsvc_timeout); | ||
192 | |||
193 | /* Existing NLM servers accept AUTH_UNIX only */ | ||
194 | clnt = rpc_create_client(xprt, host->h_name, &nlm_program, | ||
195 | host->h_version, RPC_AUTH_UNIX); | ||
196 | if (IS_ERR(clnt)) { | ||
197 | xprt_destroy(xprt); | ||
198 | goto forgetit; | ||
199 | } | ||
200 | clnt->cl_autobind = 1; /* turn on pmap queries */ | ||
201 | xprt->nocong = 1; /* No congestion control for NLM */ | ||
202 | xprt->resvport = 1; /* NLM requires a reserved port */ | ||
203 | |||
204 | host->h_rpcclnt = clnt; | ||
205 | } | ||
206 | |||
207 | up(&host->h_sema); | ||
208 | return clnt; | ||
209 | |||
210 | forgetit: | ||
211 | printk("lockd: couldn't create RPC handle for %s\n", host->h_name); | ||
212 | up(&host->h_sema); | ||
213 | return NULL; | ||
214 | } | ||
215 | |||
216 | /* | ||
217 | * Force a portmap lookup of the remote lockd port | ||
218 | */ | ||
219 | void | ||
220 | nlm_rebind_host(struct nlm_host *host) | ||
221 | { | ||
222 | dprintk("lockd: rebind host %s\n", host->h_name); | ||
223 | if (host->h_rpcclnt && time_after_eq(jiffies, host->h_nextrebind)) { | ||
224 | host->h_rpcclnt->cl_port = 0; | ||
225 | host->h_nextrebind = jiffies + NLM_HOST_REBIND; | ||
226 | } | ||
227 | } | ||
228 | |||
229 | /* | ||
230 | * Increment NLM host count | ||
231 | */ | ||
232 | struct nlm_host * nlm_get_host(struct nlm_host *host) | ||
233 | { | ||
234 | if (host) { | ||
235 | dprintk("lockd: get host %s\n", host->h_name); | ||
236 | atomic_inc(&host->h_count); | ||
237 | host->h_expires = jiffies + NLM_HOST_EXPIRE; | ||
238 | } | ||
239 | return host; | ||
240 | } | ||
241 | |||
242 | /* | ||
243 | * Release NLM host after use | ||
244 | */ | ||
245 | void nlm_release_host(struct nlm_host *host) | ||
246 | { | ||
247 | if (host != NULL) { | ||
248 | dprintk("lockd: release host %s\n", host->h_name); | ||
249 | atomic_dec(&host->h_count); | ||
250 | BUG_ON(atomic_read(&host->h_count) < 0); | ||
251 | } | ||
252 | } | ||
253 | |||
254 | /* | ||
255 | * Shut down the hosts module. | ||
256 | * Note that this routine is called only at server shutdown time. | ||
257 | */ | ||
258 | void | ||
259 | nlm_shutdown_hosts(void) | ||
260 | { | ||
261 | struct nlm_host *host; | ||
262 | int i; | ||
263 | |||
264 | dprintk("lockd: shutting down host module\n"); | ||
265 | down(&nlm_host_sema); | ||
266 | |||
267 | /* First, make all hosts eligible for gc */ | ||
268 | dprintk("lockd: nuking all hosts...\n"); | ||
269 | for (i = 0; i < NLM_HOST_NRHASH; i++) { | ||
270 | for (host = nlm_hosts[i]; host; host = host->h_next) | ||
271 | host->h_expires = jiffies - 1; | ||
272 | } | ||
273 | |||
274 | /* Then, perform a garbage collection pass */ | ||
275 | nlm_gc_hosts(); | ||
276 | up(&nlm_host_sema); | ||
277 | |||
278 | /* complain if any hosts are left */ | ||
279 | if (nrhosts) { | ||
280 | printk(KERN_WARNING "lockd: couldn't shutdown host module!\n"); | ||
281 | dprintk("lockd: %d hosts left:\n", nrhosts); | ||
282 | for (i = 0; i < NLM_HOST_NRHASH; i++) { | ||
283 | for (host = nlm_hosts[i]; host; host = host->h_next) { | ||
284 | dprintk(" %s (cnt %d use %d exp %ld)\n", | ||
285 | host->h_name, atomic_read(&host->h_count), | ||
286 | host->h_inuse, host->h_expires); | ||
287 | } | ||
288 | } | ||
289 | } | ||
290 | } | ||
291 | |||
292 | /* | ||
293 | * Garbage collect any unused NLM hosts. | ||
294 | * This GC combines reference counting for async operations with | ||
295 | * mark & sweep for resources held by remote clients. | ||
296 | */ | ||
297 | static void | ||
298 | nlm_gc_hosts(void) | ||
299 | { | ||
300 | struct nlm_host **q, *host; | ||
301 | struct rpc_clnt *clnt; | ||
302 | int i; | ||
303 | |||
304 | dprintk("lockd: host garbage collection\n"); | ||
305 | for (i = 0; i < NLM_HOST_NRHASH; i++) { | ||
306 | for (host = nlm_hosts[i]; host; host = host->h_next) | ||
307 | host->h_inuse = 0; | ||
308 | } | ||
309 | |||
310 | /* Mark all hosts that hold locks, blocks or shares */ | ||
311 | nlmsvc_mark_resources(); | ||
312 | |||
313 | for (i = 0; i < NLM_HOST_NRHASH; i++) { | ||
314 | q = &nlm_hosts[i]; | ||
315 | while ((host = *q) != NULL) { | ||
316 | if (atomic_read(&host->h_count) || host->h_inuse | ||
317 | || time_before(jiffies, host->h_expires)) { | ||
318 | dprintk("nlm_gc_hosts skipping %s (cnt %d use %d exp %ld)\n", | ||
319 | host->h_name, atomic_read(&host->h_count), | ||
320 | host->h_inuse, host->h_expires); | ||
321 | q = &host->h_next; | ||
322 | continue; | ||
323 | } | ||
324 | dprintk("lockd: delete host %s\n", host->h_name); | ||
325 | *q = host->h_next; | ||
326 | /* Don't unmonitor hosts that have been invalidated */ | ||
327 | if (host->h_monitored && !host->h_killed) | ||
328 | nsm_unmonitor(host); | ||
329 | if ((clnt = host->h_rpcclnt) != NULL) { | ||
330 | if (atomic_read(&clnt->cl_users)) { | ||
331 | printk(KERN_WARNING | ||
332 | "lockd: active RPC handle\n"); | ||
333 | clnt->cl_dead = 1; | ||
334 | } else { | ||
335 | rpc_destroy_client(host->h_rpcclnt); | ||
336 | } | ||
337 | } | ||
338 | BUG_ON(!list_empty(&host->h_lockowners)); | ||
339 | kfree(host); | ||
340 | nrhosts--; | ||
341 | } | ||
342 | } | ||
343 | |||
344 | next_gc = jiffies + NLM_HOST_COLLECT; | ||
345 | } | ||
346 | |||