diff options
author | NeilBrown <neilb@suse.de> | 2006-10-02 05:17:45 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@g5.osdl.org> | 2006-10-02 10:57:17 -0400 |
commit | 24e36663c375df577d2dcae437713481ffd6850c (patch) | |
tree | dd738e582b663c433eef3a53eb593a518439a285 /fs | |
parent | bc591ccff27e6a85d3a0d6fcb16cfadcc45267a8 (diff) |
[PATCH] knfsd: be more selective in which sockets lockd listens on
Currently lockd listens on UDP always, and TCP if CONFIG_NFSD_TCP is set.
However as lockd performs services of the client as well, this is a problem.
If CONFIG_NfSD_TCP is not set, and a tcp mount is used, the server will not be
able to call back to lockd.
So:
- add an option to lockd_up saying which protocol is needed
- Always open sockets for which an explicit port was given, otherwise
only open a socket of the type required
- Change nfsd to do one lockd_up per socket rather than one per thread.
This
- removes the dependancy on CONFIG_NFSD_TCP
- means that lockd may open sockets other than at startup
- means that lockd will *not* listen on UDP if the only
mounts are TCP mount (and nfsd hasn't started).
The latter is the only one that concerns me at all - I don't know if this
might be a problem with some servers.
Signed-off-by: Neil Brown <neilb@suse.de>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'fs')
-rw-r--r-- | fs/lockd/clntlock.c | 2 | ||||
-rw-r--r-- | fs/lockd/svc.c | 47 | ||||
-rw-r--r-- | fs/nfs/client.c | 3 | ||||
-rw-r--r-- | fs/nfsd/nfssvc.c | 16 |
4 files changed, 53 insertions, 15 deletions
diff --git a/fs/lockd/clntlock.c b/fs/lockd/clntlock.c index f95cc3f3c42d..6abb465b650f 100644 --- a/fs/lockd/clntlock.c +++ b/fs/lockd/clntlock.c | |||
@@ -202,7 +202,7 @@ reclaimer(void *ptr) | |||
202 | /* This one ensures that our parent doesn't terminate while the | 202 | /* This one ensures that our parent doesn't terminate while the |
203 | * reclaim is in progress */ | 203 | * reclaim is in progress */ |
204 | lock_kernel(); | 204 | lock_kernel(); |
205 | lockd_up(); | 205 | lockd_up(0); |
206 | 206 | ||
207 | nlmclnt_prepare_reclaim(host); | 207 | nlmclnt_prepare_reclaim(host); |
208 | /* First, reclaim all locks that have been marked. */ | 208 | /* First, reclaim all locks that have been marked. */ |
diff --git a/fs/lockd/svc.c b/fs/lockd/svc.c index 13feba45030e..8d19de6a14dc 100644 --- a/fs/lockd/svc.c +++ b/fs/lockd/svc.c | |||
@@ -31,6 +31,7 @@ | |||
31 | #include <linux/sunrpc/clnt.h> | 31 | #include <linux/sunrpc/clnt.h> |
32 | #include <linux/sunrpc/svc.h> | 32 | #include <linux/sunrpc/svc.h> |
33 | #include <linux/sunrpc/svcsock.h> | 33 | #include <linux/sunrpc/svcsock.h> |
34 | #include <net/ip.h> | ||
34 | #include <linux/lockd/lockd.h> | 35 | #include <linux/lockd/lockd.h> |
35 | #include <linux/nfs.h> | 36 | #include <linux/nfs.h> |
36 | 37 | ||
@@ -46,6 +47,7 @@ EXPORT_SYMBOL(nlmsvc_ops); | |||
46 | static DEFINE_MUTEX(nlmsvc_mutex); | 47 | static DEFINE_MUTEX(nlmsvc_mutex); |
47 | static unsigned int nlmsvc_users; | 48 | static unsigned int nlmsvc_users; |
48 | static pid_t nlmsvc_pid; | 49 | static pid_t nlmsvc_pid; |
50 | static struct svc_serv *nlmsvc_serv; | ||
49 | int nlmsvc_grace_period; | 51 | int nlmsvc_grace_period; |
50 | unsigned long nlmsvc_timeout; | 52 | unsigned long nlmsvc_timeout; |
51 | 53 | ||
@@ -112,6 +114,7 @@ lockd(struct svc_rqst *rqstp) | |||
112 | * Let our maker know we're running. | 114 | * Let our maker know we're running. |
113 | */ | 115 | */ |
114 | nlmsvc_pid = current->pid; | 116 | nlmsvc_pid = current->pid; |
117 | nlmsvc_serv = serv; | ||
115 | complete(&lockd_start_done); | 118 | complete(&lockd_start_done); |
116 | 119 | ||
117 | daemonize("lockd"); | 120 | daemonize("lockd"); |
@@ -189,6 +192,7 @@ lockd(struct svc_rqst *rqstp) | |||
189 | nlmsvc_invalidate_all(); | 192 | nlmsvc_invalidate_all(); |
190 | nlm_shutdown_hosts(); | 193 | nlm_shutdown_hosts(); |
191 | nlmsvc_pid = 0; | 194 | nlmsvc_pid = 0; |
195 | nlmsvc_serv = NULL; | ||
192 | } else | 196 | } else |
193 | printk(KERN_DEBUG | 197 | printk(KERN_DEBUG |
194 | "lockd: new process, skipping host shutdown\n"); | 198 | "lockd: new process, skipping host shutdown\n"); |
@@ -205,11 +209,42 @@ lockd(struct svc_rqst *rqstp) | |||
205 | module_put_and_exit(0); | 209 | module_put_and_exit(0); |
206 | } | 210 | } |
207 | 211 | ||
212 | |||
213 | static int find_socket(struct svc_serv *serv, int proto) | ||
214 | { | ||
215 | struct svc_sock *svsk; | ||
216 | int found = 0; | ||
217 | list_for_each_entry(svsk, &serv->sv_permsocks, sk_list) | ||
218 | if (svsk->sk_sk->sk_protocol == proto) { | ||
219 | found = 1; | ||
220 | break; | ||
221 | } | ||
222 | return found; | ||
223 | } | ||
224 | |||
225 | static int make_socks(struct svc_serv *serv, int proto) | ||
226 | { | ||
227 | /* Make any sockets that are needed but not present. | ||
228 | * If nlm_udpport or nlm_tcpport were set as module | ||
229 | * options, make those sockets unconditionally | ||
230 | */ | ||
231 | int err = 0; | ||
232 | if (proto == IPPROTO_UDP || nlm_udpport) | ||
233 | if (!find_socket(serv, IPPROTO_UDP)) | ||
234 | err = svc_makesock(serv, IPPROTO_UDP, nlm_udpport); | ||
235 | if (err) | ||
236 | return err; | ||
237 | if (proto == IPPROTO_TCP || nlm_tcpport) | ||
238 | if (!find_socket(serv, IPPROTO_TCP)) | ||
239 | err= svc_makesock(serv, IPPROTO_TCP, nlm_tcpport); | ||
240 | return err; | ||
241 | } | ||
242 | |||
208 | /* | 243 | /* |
209 | * Bring up the lockd process if it's not already up. | 244 | * Bring up the lockd process if it's not already up. |
210 | */ | 245 | */ |
211 | int | 246 | int |
212 | lockd_up(void) | 247 | lockd_up(int proto) /* Maybe add a 'family' option when IPv6 is supported ?? */ |
213 | { | 248 | { |
214 | static int warned; | 249 | static int warned; |
215 | struct svc_serv * serv; | 250 | struct svc_serv * serv; |
@@ -224,8 +259,10 @@ lockd_up(void) | |||
224 | /* | 259 | /* |
225 | * Check whether we're already up and running. | 260 | * Check whether we're already up and running. |
226 | */ | 261 | */ |
227 | if (nlmsvc_pid) | 262 | if (nlmsvc_pid) { |
263 | error = make_socks(nlmsvc_serv, proto); | ||
228 | goto out; | 264 | goto out; |
265 | } | ||
229 | 266 | ||
230 | /* | 267 | /* |
231 | * Sanity check: if there's no pid, | 268 | * Sanity check: if there's no pid, |
@@ -242,11 +279,7 @@ lockd_up(void) | |||
242 | goto out; | 279 | goto out; |
243 | } | 280 | } |
244 | 281 | ||
245 | if ((error = svc_makesock(serv, IPPROTO_UDP, nlm_udpport)) < 0 | 282 | if ((error = make_socks(serv, proto)) < 0) { |
246 | #ifdef CONFIG_NFSD_TCP | ||
247 | || (error = svc_makesock(serv, IPPROTO_TCP, nlm_tcpport)) < 0 | ||
248 | #endif | ||
249 | ) { | ||
250 | if (warned++ == 0) | 283 | if (warned++ == 0) |
251 | printk(KERN_WARNING | 284 | printk(KERN_WARNING |
252 | "lockd_up: makesock failed, error=%d\n", error); | 285 | "lockd_up: makesock failed, error=%d\n", error); |
diff --git a/fs/nfs/client.c b/fs/nfs/client.c index ec1938d4b814..8106f3b29e4a 100644 --- a/fs/nfs/client.c +++ b/fs/nfs/client.c | |||
@@ -460,7 +460,8 @@ static int nfs_start_lockd(struct nfs_server *server) | |||
460 | goto out; | 460 | goto out; |
461 | if (server->flags & NFS_MOUNT_NONLM) | 461 | if (server->flags & NFS_MOUNT_NONLM) |
462 | goto out; | 462 | goto out; |
463 | error = lockd_up(); | 463 | error = lockd_up((server->flags & NFS_MOUNT_TCP) ? |
464 | IPPROTO_TCP : IPPROTO_UDP); | ||
464 | if (error < 0) | 465 | if (error < 0) |
465 | server->flags |= NFS_MOUNT_NONLM; | 466 | server->flags |= NFS_MOUNT_NONLM; |
466 | else | 467 | else |
diff --git a/fs/nfsd/nfssvc.c b/fs/nfsd/nfssvc.c index c52c99964a4c..0339b4ddfa3b 100644 --- a/fs/nfsd/nfssvc.c +++ b/fs/nfsd/nfssvc.c | |||
@@ -134,6 +134,9 @@ static int killsig; /* signal that was used to kill last nfsd */ | |||
134 | static void nfsd_last_thread(struct svc_serv *serv) | 134 | static void nfsd_last_thread(struct svc_serv *serv) |
135 | { | 135 | { |
136 | /* When last nfsd thread exits we need to do some clean-up */ | 136 | /* When last nfsd thread exits we need to do some clean-up */ |
137 | struct svc_sock *svsk; | ||
138 | list_for_each_entry(svsk, &serv->sv_permsocks, sk_list) | ||
139 | lockd_down(); | ||
137 | nfsd_serv = NULL; | 140 | nfsd_serv = NULL; |
138 | nfsd_racache_shutdown(); | 141 | nfsd_racache_shutdown(); |
139 | nfs4_state_shutdown(); | 142 | nfs4_state_shutdown(); |
@@ -218,11 +221,16 @@ nfsd_svc(unsigned short port, int nrservs) | |||
218 | error = svc_makesock(nfsd_serv, IPPROTO_UDP, port); | 221 | error = svc_makesock(nfsd_serv, IPPROTO_UDP, port); |
219 | if (error < 0) | 222 | if (error < 0) |
220 | goto failure; | 223 | goto failure; |
221 | 224 | error = lockd_up(IPPROTO_UDP); | |
225 | if (error < 0) | ||
226 | goto failure; | ||
222 | #ifdef CONFIG_NFSD_TCP | 227 | #ifdef CONFIG_NFSD_TCP |
223 | error = svc_makesock(nfsd_serv, IPPROTO_TCP, port); | 228 | error = svc_makesock(nfsd_serv, IPPROTO_TCP, port); |
224 | if (error < 0) | 229 | if (error < 0) |
225 | goto failure; | 230 | goto failure; |
231 | error = lockd_up(IPPROTO_TCP); | ||
232 | if (error < 0) | ||
233 | goto failure; | ||
226 | #endif | 234 | #endif |
227 | do_gettimeofday(&nfssvc_boot); /* record boot time */ | 235 | do_gettimeofday(&nfssvc_boot); /* record boot time */ |
228 | } else | 236 | } else |
@@ -306,8 +314,6 @@ nfsd(struct svc_rqst *rqstp) | |||
306 | 314 | ||
307 | nfsdstats.th_cnt++; | 315 | nfsdstats.th_cnt++; |
308 | 316 | ||
309 | lockd_up(); /* start lockd */ | ||
310 | |||
311 | me.task = current; | 317 | me.task = current; |
312 | list_add(&me.list, &nfsd_list); | 318 | list_add(&me.list, &nfsd_list); |
313 | 319 | ||
@@ -364,13 +370,11 @@ nfsd(struct svc_rqst *rqstp) | |||
364 | break; | 370 | break; |
365 | killsig = signo; | 371 | killsig = signo; |
366 | } | 372 | } |
367 | /* Clear signals before calling lockd_down() and svc_exit_thread() */ | 373 | /* Clear signals before calling svc_exit_thread() */ |
368 | flush_signals(current); | 374 | flush_signals(current); |
369 | 375 | ||
370 | lock_kernel(); | 376 | lock_kernel(); |
371 | 377 | ||
372 | /* Release lockd */ | ||
373 | lockd_down(); | ||
374 | list_del(&me.list); | 378 | list_del(&me.list); |
375 | nfsdstats.th_cnt --; | 379 | nfsdstats.th_cnt --; |
376 | 380 | ||