diff options
author | Linus Torvalds <torvalds@ppc970.osdl.org> | 2005-04-16 18:20:36 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@ppc970.osdl.org> | 2005-04-16 18:20:36 -0400 |
commit | 1da177e4c3f41524e886b7f1b8a0c1fc7321cac2 (patch) | |
tree | 0bba044c4ce775e45a88a51686b5d9f90697ea9d /fs/lockd/clntproc.c |
Linux-2.6.12-rc2v2.6.12-rc2
Initial git repository build. I'm not bothering with the full history,
even though we have it. We can create a separate "historical" git
archive of that later if we want to, and in the meantime it's about
3.2GB when imported into git - space that would just make the early
git days unnecessarily complicated, when we don't have a lot of good
infrastructure for it.
Let it rip!
Diffstat (limited to 'fs/lockd/clntproc.c')
-rw-r--r-- | fs/lockd/clntproc.c | 820 |
1 files changed, 820 insertions, 0 deletions
diff --git a/fs/lockd/clntproc.c b/fs/lockd/clntproc.c new file mode 100644 index 000000000000..a4407619b1f1 --- /dev/null +++ b/fs/lockd/clntproc.c | |||
@@ -0,0 +1,820 @@ | |||
1 | /* | ||
2 | * linux/fs/lockd/clntproc.c | ||
3 | * | ||
4 | * RPC procedures for the client side NLM implementation | ||
5 | * | ||
6 | * Copyright (C) 1996, Olaf Kirch <okir@monad.swb.de> | ||
7 | */ | ||
8 | |||
9 | #include <linux/config.h> | ||
10 | #include <linux/module.h> | ||
11 | #include <linux/types.h> | ||
12 | #include <linux/errno.h> | ||
13 | #include <linux/fs.h> | ||
14 | #include <linux/nfs_fs.h> | ||
15 | #include <linux/utsname.h> | ||
16 | #include <linux/smp_lock.h> | ||
17 | #include <linux/sunrpc/clnt.h> | ||
18 | #include <linux/sunrpc/svc.h> | ||
19 | #include <linux/lockd/lockd.h> | ||
20 | #include <linux/lockd/sm_inter.h> | ||
21 | |||
22 | #define NLMDBG_FACILITY NLMDBG_CLIENT | ||
23 | #define NLMCLNT_GRACE_WAIT (5*HZ) | ||
24 | |||
25 | static int nlmclnt_test(struct nlm_rqst *, struct file_lock *); | ||
26 | static int nlmclnt_lock(struct nlm_rqst *, struct file_lock *); | ||
27 | static int nlmclnt_unlock(struct nlm_rqst *, struct file_lock *); | ||
28 | static void nlmclnt_unlock_callback(struct rpc_task *); | ||
29 | static void nlmclnt_cancel_callback(struct rpc_task *); | ||
30 | static int nlm_stat_to_errno(u32 stat); | ||
31 | static void nlmclnt_locks_init_private(struct file_lock *fl, struct nlm_host *host); | ||
32 | |||
33 | /* | ||
34 | * Cookie counter for NLM requests | ||
35 | */ | ||
36 | static u32 nlm_cookie = 0x1234; | ||
37 | |||
38 | static inline void nlmclnt_next_cookie(struct nlm_cookie *c) | ||
39 | { | ||
40 | memcpy(c->data, &nlm_cookie, 4); | ||
41 | memset(c->data+4, 0, 4); | ||
42 | c->len=4; | ||
43 | nlm_cookie++; | ||
44 | } | ||
45 | |||
46 | static struct nlm_lockowner *nlm_get_lockowner(struct nlm_lockowner *lockowner) | ||
47 | { | ||
48 | atomic_inc(&lockowner->count); | ||
49 | return lockowner; | ||
50 | } | ||
51 | |||
52 | static void nlm_put_lockowner(struct nlm_lockowner *lockowner) | ||
53 | { | ||
54 | if (!atomic_dec_and_lock(&lockowner->count, &lockowner->host->h_lock)) | ||
55 | return; | ||
56 | list_del(&lockowner->list); | ||
57 | spin_unlock(&lockowner->host->h_lock); | ||
58 | nlm_release_host(lockowner->host); | ||
59 | kfree(lockowner); | ||
60 | } | ||
61 | |||
62 | static inline int nlm_pidbusy(struct nlm_host *host, uint32_t pid) | ||
63 | { | ||
64 | struct nlm_lockowner *lockowner; | ||
65 | list_for_each_entry(lockowner, &host->h_lockowners, list) { | ||
66 | if (lockowner->pid == pid) | ||
67 | return -EBUSY; | ||
68 | } | ||
69 | return 0; | ||
70 | } | ||
71 | |||
72 | static inline uint32_t __nlm_alloc_pid(struct nlm_host *host) | ||
73 | { | ||
74 | uint32_t res; | ||
75 | do { | ||
76 | res = host->h_pidcount++; | ||
77 | } while (nlm_pidbusy(host, res) < 0); | ||
78 | return res; | ||
79 | } | ||
80 | |||
81 | static struct nlm_lockowner *__nlm_find_lockowner(struct nlm_host *host, fl_owner_t owner) | ||
82 | { | ||
83 | struct nlm_lockowner *lockowner; | ||
84 | list_for_each_entry(lockowner, &host->h_lockowners, list) { | ||
85 | if (lockowner->owner != owner) | ||
86 | continue; | ||
87 | return nlm_get_lockowner(lockowner); | ||
88 | } | ||
89 | return NULL; | ||
90 | } | ||
91 | |||
92 | static struct nlm_lockowner *nlm_find_lockowner(struct nlm_host *host, fl_owner_t owner) | ||
93 | { | ||
94 | struct nlm_lockowner *res, *new = NULL; | ||
95 | |||
96 | spin_lock(&host->h_lock); | ||
97 | res = __nlm_find_lockowner(host, owner); | ||
98 | if (res == NULL) { | ||
99 | spin_unlock(&host->h_lock); | ||
100 | new = (struct nlm_lockowner *)kmalloc(sizeof(*new), GFP_KERNEL); | ||
101 | spin_lock(&host->h_lock); | ||
102 | res = __nlm_find_lockowner(host, owner); | ||
103 | if (res == NULL && new != NULL) { | ||
104 | res = new; | ||
105 | atomic_set(&new->count, 1); | ||
106 | new->owner = owner; | ||
107 | new->pid = __nlm_alloc_pid(host); | ||
108 | new->host = nlm_get_host(host); | ||
109 | list_add(&new->list, &host->h_lockowners); | ||
110 | new = NULL; | ||
111 | } | ||
112 | } | ||
113 | spin_unlock(&host->h_lock); | ||
114 | if (new != NULL) | ||
115 | kfree(new); | ||
116 | return res; | ||
117 | } | ||
118 | |||
119 | /* | ||
120 | * Initialize arguments for TEST/LOCK/UNLOCK/CANCEL calls | ||
121 | */ | ||
122 | static void nlmclnt_setlockargs(struct nlm_rqst *req, struct file_lock *fl) | ||
123 | { | ||
124 | struct nlm_args *argp = &req->a_args; | ||
125 | struct nlm_lock *lock = &argp->lock; | ||
126 | |||
127 | nlmclnt_next_cookie(&argp->cookie); | ||
128 | argp->state = nsm_local_state; | ||
129 | memcpy(&lock->fh, NFS_FH(fl->fl_file->f_dentry->d_inode), sizeof(struct nfs_fh)); | ||
130 | lock->caller = system_utsname.nodename; | ||
131 | lock->oh.data = req->a_owner; | ||
132 | lock->oh.len = sprintf(req->a_owner, "%d@%s", | ||
133 | current->pid, system_utsname.nodename); | ||
134 | locks_copy_lock(&lock->fl, fl); | ||
135 | } | ||
136 | |||
137 | static void nlmclnt_release_lockargs(struct nlm_rqst *req) | ||
138 | { | ||
139 | struct file_lock *fl = &req->a_args.lock.fl; | ||
140 | |||
141 | if (fl->fl_ops && fl->fl_ops->fl_release_private) | ||
142 | fl->fl_ops->fl_release_private(fl); | ||
143 | } | ||
144 | |||
145 | /* | ||
146 | * Initialize arguments for GRANTED call. The nlm_rqst structure | ||
147 | * has been cleared already. | ||
148 | */ | ||
149 | int | ||
150 | nlmclnt_setgrantargs(struct nlm_rqst *call, struct nlm_lock *lock) | ||
151 | { | ||
152 | locks_copy_lock(&call->a_args.lock.fl, &lock->fl); | ||
153 | memcpy(&call->a_args.lock.fh, &lock->fh, sizeof(call->a_args.lock.fh)); | ||
154 | call->a_args.lock.caller = system_utsname.nodename; | ||
155 | call->a_args.lock.oh.len = lock->oh.len; | ||
156 | |||
157 | /* set default data area */ | ||
158 | call->a_args.lock.oh.data = call->a_owner; | ||
159 | |||
160 | if (lock->oh.len > NLMCLNT_OHSIZE) { | ||
161 | void *data = kmalloc(lock->oh.len, GFP_KERNEL); | ||
162 | if (!data) { | ||
163 | nlmclnt_freegrantargs(call); | ||
164 | return 0; | ||
165 | } | ||
166 | call->a_args.lock.oh.data = (u8 *) data; | ||
167 | } | ||
168 | |||
169 | memcpy(call->a_args.lock.oh.data, lock->oh.data, lock->oh.len); | ||
170 | return 1; | ||
171 | } | ||
172 | |||
173 | void | ||
174 | nlmclnt_freegrantargs(struct nlm_rqst *call) | ||
175 | { | ||
176 | struct file_lock *fl = &call->a_args.lock.fl; | ||
177 | /* | ||
178 | * Check whether we allocated memory for the owner. | ||
179 | */ | ||
180 | if (call->a_args.lock.oh.data != (u8 *) call->a_owner) { | ||
181 | kfree(call->a_args.lock.oh.data); | ||
182 | } | ||
183 | if (fl->fl_ops && fl->fl_ops->fl_release_private) | ||
184 | fl->fl_ops->fl_release_private(fl); | ||
185 | } | ||
186 | |||
187 | /* | ||
188 | * This is the main entry point for the NLM client. | ||
189 | */ | ||
190 | int | ||
191 | nlmclnt_proc(struct inode *inode, int cmd, struct file_lock *fl) | ||
192 | { | ||
193 | struct nfs_server *nfssrv = NFS_SERVER(inode); | ||
194 | struct nlm_host *host; | ||
195 | struct nlm_rqst reqst, *call = &reqst; | ||
196 | sigset_t oldset; | ||
197 | unsigned long flags; | ||
198 | int status, proto, vers; | ||
199 | |||
200 | vers = (NFS_PROTO(inode)->version == 3) ? 4 : 1; | ||
201 | if (NFS_PROTO(inode)->version > 3) { | ||
202 | printk(KERN_NOTICE "NFSv4 file locking not implemented!\n"); | ||
203 | return -ENOLCK; | ||
204 | } | ||
205 | |||
206 | /* Retrieve transport protocol from NFS client */ | ||
207 | proto = NFS_CLIENT(inode)->cl_xprt->prot; | ||
208 | |||
209 | if (!(host = nlmclnt_lookup_host(NFS_ADDR(inode), proto, vers))) | ||
210 | return -ENOLCK; | ||
211 | |||
212 | /* Create RPC client handle if not there, and copy soft | ||
213 | * and intr flags from NFS client. */ | ||
214 | if (host->h_rpcclnt == NULL) { | ||
215 | struct rpc_clnt *clnt; | ||
216 | |||
217 | /* Bind an rpc client to this host handle (does not | ||
218 | * perform a portmapper lookup) */ | ||
219 | if (!(clnt = nlm_bind_host(host))) { | ||
220 | status = -ENOLCK; | ||
221 | goto done; | ||
222 | } | ||
223 | clnt->cl_softrtry = nfssrv->client->cl_softrtry; | ||
224 | clnt->cl_intr = nfssrv->client->cl_intr; | ||
225 | clnt->cl_chatty = nfssrv->client->cl_chatty; | ||
226 | } | ||
227 | |||
228 | /* Keep the old signal mask */ | ||
229 | spin_lock_irqsave(¤t->sighand->siglock, flags); | ||
230 | oldset = current->blocked; | ||
231 | |||
232 | /* If we're cleaning up locks because the process is exiting, | ||
233 | * perform the RPC call asynchronously. */ | ||
234 | if ((IS_SETLK(cmd) || IS_SETLKW(cmd)) | ||
235 | && fl->fl_type == F_UNLCK | ||
236 | && (current->flags & PF_EXITING)) { | ||
237 | sigfillset(¤t->blocked); /* Mask all signals */ | ||
238 | recalc_sigpending(); | ||
239 | spin_unlock_irqrestore(¤t->sighand->siglock, flags); | ||
240 | |||
241 | call = nlmclnt_alloc_call(); | ||
242 | if (!call) { | ||
243 | status = -ENOMEM; | ||
244 | goto out_restore; | ||
245 | } | ||
246 | call->a_flags = RPC_TASK_ASYNC; | ||
247 | } else { | ||
248 | spin_unlock_irqrestore(¤t->sighand->siglock, flags); | ||
249 | memset(call, 0, sizeof(*call)); | ||
250 | locks_init_lock(&call->a_args.lock.fl); | ||
251 | locks_init_lock(&call->a_res.lock.fl); | ||
252 | } | ||
253 | call->a_host = host; | ||
254 | |||
255 | nlmclnt_locks_init_private(fl, host); | ||
256 | |||
257 | /* Set up the argument struct */ | ||
258 | nlmclnt_setlockargs(call, fl); | ||
259 | |||
260 | if (IS_SETLK(cmd) || IS_SETLKW(cmd)) { | ||
261 | if (fl->fl_type != F_UNLCK) { | ||
262 | call->a_args.block = IS_SETLKW(cmd) ? 1 : 0; | ||
263 | status = nlmclnt_lock(call, fl); | ||
264 | } else | ||
265 | status = nlmclnt_unlock(call, fl); | ||
266 | } else if (IS_GETLK(cmd)) | ||
267 | status = nlmclnt_test(call, fl); | ||
268 | else | ||
269 | status = -EINVAL; | ||
270 | |||
271 | out_restore: | ||
272 | spin_lock_irqsave(¤t->sighand->siglock, flags); | ||
273 | current->blocked = oldset; | ||
274 | recalc_sigpending(); | ||
275 | spin_unlock_irqrestore(¤t->sighand->siglock, flags); | ||
276 | |||
277 | done: | ||
278 | dprintk("lockd: clnt proc returns %d\n", status); | ||
279 | nlm_release_host(host); | ||
280 | return status; | ||
281 | } | ||
282 | EXPORT_SYMBOL(nlmclnt_proc); | ||
283 | |||
284 | /* | ||
285 | * Allocate an NLM RPC call struct | ||
286 | */ | ||
287 | struct nlm_rqst * | ||
288 | nlmclnt_alloc_call(void) | ||
289 | { | ||
290 | struct nlm_rqst *call; | ||
291 | |||
292 | while (!signalled()) { | ||
293 | call = (struct nlm_rqst *) kmalloc(sizeof(struct nlm_rqst), GFP_KERNEL); | ||
294 | if (call) { | ||
295 | memset(call, 0, sizeof(*call)); | ||
296 | locks_init_lock(&call->a_args.lock.fl); | ||
297 | locks_init_lock(&call->a_res.lock.fl); | ||
298 | return call; | ||
299 | } | ||
300 | printk("nlmclnt_alloc_call: failed, waiting for memory\n"); | ||
301 | current->state = TASK_INTERRUPTIBLE; | ||
302 | schedule_timeout(5*HZ); | ||
303 | } | ||
304 | return NULL; | ||
305 | } | ||
306 | |||
307 | static int nlm_wait_on_grace(wait_queue_head_t *queue) | ||
308 | { | ||
309 | DEFINE_WAIT(wait); | ||
310 | int status = -EINTR; | ||
311 | |||
312 | prepare_to_wait(queue, &wait, TASK_INTERRUPTIBLE); | ||
313 | if (!signalled ()) { | ||
314 | schedule_timeout(NLMCLNT_GRACE_WAIT); | ||
315 | try_to_freeze(PF_FREEZE); | ||
316 | if (!signalled ()) | ||
317 | status = 0; | ||
318 | } | ||
319 | finish_wait(queue, &wait); | ||
320 | return status; | ||
321 | } | ||
322 | |||
323 | /* | ||
324 | * Generic NLM call | ||
325 | */ | ||
326 | static int | ||
327 | nlmclnt_call(struct nlm_rqst *req, u32 proc) | ||
328 | { | ||
329 | struct nlm_host *host = req->a_host; | ||
330 | struct rpc_clnt *clnt; | ||
331 | struct nlm_args *argp = &req->a_args; | ||
332 | struct nlm_res *resp = &req->a_res; | ||
333 | struct rpc_message msg = { | ||
334 | .rpc_argp = argp, | ||
335 | .rpc_resp = resp, | ||
336 | }; | ||
337 | int status; | ||
338 | |||
339 | dprintk("lockd: call procedure %d on %s\n", | ||
340 | (int)proc, host->h_name); | ||
341 | |||
342 | do { | ||
343 | if (host->h_reclaiming && !argp->reclaim) | ||
344 | goto in_grace_period; | ||
345 | |||
346 | /* If we have no RPC client yet, create one. */ | ||
347 | if ((clnt = nlm_bind_host(host)) == NULL) | ||
348 | return -ENOLCK; | ||
349 | msg.rpc_proc = &clnt->cl_procinfo[proc]; | ||
350 | |||
351 | /* Perform the RPC call. If an error occurs, try again */ | ||
352 | if ((status = rpc_call_sync(clnt, &msg, 0)) < 0) { | ||
353 | dprintk("lockd: rpc_call returned error %d\n", -status); | ||
354 | switch (status) { | ||
355 | case -EPROTONOSUPPORT: | ||
356 | status = -EINVAL; | ||
357 | break; | ||
358 | case -ECONNREFUSED: | ||
359 | case -ETIMEDOUT: | ||
360 | case -ENOTCONN: | ||
361 | nlm_rebind_host(host); | ||
362 | status = -EAGAIN; | ||
363 | break; | ||
364 | case -ERESTARTSYS: | ||
365 | return signalled () ? -EINTR : status; | ||
366 | default: | ||
367 | break; | ||
368 | } | ||
369 | break; | ||
370 | } else | ||
371 | if (resp->status == NLM_LCK_DENIED_GRACE_PERIOD) { | ||
372 | dprintk("lockd: server in grace period\n"); | ||
373 | if (argp->reclaim) { | ||
374 | printk(KERN_WARNING | ||
375 | "lockd: spurious grace period reject?!\n"); | ||
376 | return -ENOLCK; | ||
377 | } | ||
378 | } else { | ||
379 | if (!argp->reclaim) { | ||
380 | /* We appear to be out of the grace period */ | ||
381 | wake_up_all(&host->h_gracewait); | ||
382 | } | ||
383 | dprintk("lockd: server returns status %d\n", resp->status); | ||
384 | return 0; /* Okay, call complete */ | ||
385 | } | ||
386 | |||
387 | in_grace_period: | ||
388 | /* | ||
389 | * The server has rebooted and appears to be in the grace | ||
390 | * period during which locks are only allowed to be | ||
391 | * reclaimed. | ||
392 | * We can only back off and try again later. | ||
393 | */ | ||
394 | status = nlm_wait_on_grace(&host->h_gracewait); | ||
395 | } while (status == 0); | ||
396 | |||
397 | return status; | ||
398 | } | ||
399 | |||
400 | /* | ||
401 | * Generic NLM call, async version. | ||
402 | */ | ||
403 | int | ||
404 | nlmsvc_async_call(struct nlm_rqst *req, u32 proc, rpc_action callback) | ||
405 | { | ||
406 | struct nlm_host *host = req->a_host; | ||
407 | struct rpc_clnt *clnt; | ||
408 | struct rpc_message msg = { | ||
409 | .rpc_argp = &req->a_args, | ||
410 | .rpc_resp = &req->a_res, | ||
411 | }; | ||
412 | int status; | ||
413 | |||
414 | dprintk("lockd: call procedure %d on %s (async)\n", | ||
415 | (int)proc, host->h_name); | ||
416 | |||
417 | /* If we have no RPC client yet, create one. */ | ||
418 | if ((clnt = nlm_bind_host(host)) == NULL) | ||
419 | return -ENOLCK; | ||
420 | msg.rpc_proc = &clnt->cl_procinfo[proc]; | ||
421 | |||
422 | /* bootstrap and kick off the async RPC call */ | ||
423 | status = rpc_call_async(clnt, &msg, RPC_TASK_ASYNC, callback, req); | ||
424 | |||
425 | return status; | ||
426 | } | ||
427 | |||
428 | static int | ||
429 | nlmclnt_async_call(struct nlm_rqst *req, u32 proc, rpc_action callback) | ||
430 | { | ||
431 | struct nlm_host *host = req->a_host; | ||
432 | struct rpc_clnt *clnt; | ||
433 | struct nlm_args *argp = &req->a_args; | ||
434 | struct nlm_res *resp = &req->a_res; | ||
435 | struct rpc_message msg = { | ||
436 | .rpc_argp = argp, | ||
437 | .rpc_resp = resp, | ||
438 | }; | ||
439 | int status; | ||
440 | |||
441 | dprintk("lockd: call procedure %d on %s (async)\n", | ||
442 | (int)proc, host->h_name); | ||
443 | |||
444 | /* If we have no RPC client yet, create one. */ | ||
445 | if ((clnt = nlm_bind_host(host)) == NULL) | ||
446 | return -ENOLCK; | ||
447 | msg.rpc_proc = &clnt->cl_procinfo[proc]; | ||
448 | |||
449 | /* Increment host refcount */ | ||
450 | nlm_get_host(host); | ||
451 | /* bootstrap and kick off the async RPC call */ | ||
452 | status = rpc_call_async(clnt, &msg, RPC_TASK_ASYNC, callback, req); | ||
453 | if (status < 0) | ||
454 | nlm_release_host(host); | ||
455 | return status; | ||
456 | } | ||
457 | |||
458 | /* | ||
459 | * TEST for the presence of a conflicting lock | ||
460 | */ | ||
461 | static int | ||
462 | nlmclnt_test(struct nlm_rqst *req, struct file_lock *fl) | ||
463 | { | ||
464 | int status; | ||
465 | |||
466 | status = nlmclnt_call(req, NLMPROC_TEST); | ||
467 | nlmclnt_release_lockargs(req); | ||
468 | if (status < 0) | ||
469 | return status; | ||
470 | |||
471 | status = req->a_res.status; | ||
472 | if (status == NLM_LCK_GRANTED) { | ||
473 | fl->fl_type = F_UNLCK; | ||
474 | } if (status == NLM_LCK_DENIED) { | ||
475 | /* | ||
476 | * Report the conflicting lock back to the application. | ||
477 | */ | ||
478 | locks_copy_lock(fl, &req->a_res.lock.fl); | ||
479 | fl->fl_pid = 0; | ||
480 | } else { | ||
481 | return nlm_stat_to_errno(req->a_res.status); | ||
482 | } | ||
483 | |||
484 | return 0; | ||
485 | } | ||
486 | |||
487 | static void nlmclnt_locks_copy_lock(struct file_lock *new, struct file_lock *fl) | ||
488 | { | ||
489 | memcpy(&new->fl_u.nfs_fl, &fl->fl_u.nfs_fl, sizeof(new->fl_u.nfs_fl)); | ||
490 | nlm_get_lockowner(new->fl_u.nfs_fl.owner); | ||
491 | } | ||
492 | |||
493 | static void nlmclnt_locks_release_private(struct file_lock *fl) | ||
494 | { | ||
495 | nlm_put_lockowner(fl->fl_u.nfs_fl.owner); | ||
496 | fl->fl_ops = NULL; | ||
497 | } | ||
498 | |||
499 | static struct file_lock_operations nlmclnt_lock_ops = { | ||
500 | .fl_copy_lock = nlmclnt_locks_copy_lock, | ||
501 | .fl_release_private = nlmclnt_locks_release_private, | ||
502 | }; | ||
503 | |||
504 | static void nlmclnt_locks_init_private(struct file_lock *fl, struct nlm_host *host) | ||
505 | { | ||
506 | BUG_ON(fl->fl_ops != NULL); | ||
507 | fl->fl_u.nfs_fl.state = 0; | ||
508 | fl->fl_u.nfs_fl.flags = 0; | ||
509 | fl->fl_u.nfs_fl.owner = nlm_find_lockowner(host, fl->fl_owner); | ||
510 | fl->fl_ops = &nlmclnt_lock_ops; | ||
511 | } | ||
512 | |||
513 | static void do_vfs_lock(struct file_lock *fl) | ||
514 | { | ||
515 | int res = 0; | ||
516 | switch (fl->fl_flags & (FL_POSIX|FL_FLOCK)) { | ||
517 | case FL_POSIX: | ||
518 | res = posix_lock_file_wait(fl->fl_file, fl); | ||
519 | break; | ||
520 | case FL_FLOCK: | ||
521 | res = flock_lock_file_wait(fl->fl_file, fl); | ||
522 | break; | ||
523 | default: | ||
524 | BUG(); | ||
525 | } | ||
526 | if (res < 0) | ||
527 | printk(KERN_WARNING "%s: VFS is out of sync with lock manager!\n", | ||
528 | __FUNCTION__); | ||
529 | } | ||
530 | |||
531 | /* | ||
532 | * LOCK: Try to create a lock | ||
533 | * | ||
534 | * Programmer Harassment Alert | ||
535 | * | ||
536 | * When given a blocking lock request in a sync RPC call, the HPUX lockd | ||
537 | * will faithfully return LCK_BLOCKED but never cares to notify us when | ||
538 | * the lock could be granted. This way, our local process could hang | ||
539 | * around forever waiting for the callback. | ||
540 | * | ||
541 | * Solution A: Implement busy-waiting | ||
542 | * Solution B: Use the async version of the call (NLM_LOCK_{MSG,RES}) | ||
543 | * | ||
544 | * For now I am implementing solution A, because I hate the idea of | ||
545 | * re-implementing lockd for a third time in two months. The async | ||
546 | * calls shouldn't be too hard to do, however. | ||
547 | * | ||
548 | * This is one of the lovely things about standards in the NFS area: | ||
549 | * they're so soft and squishy you can't really blame HP for doing this. | ||
550 | */ | ||
551 | static int | ||
552 | nlmclnt_lock(struct nlm_rqst *req, struct file_lock *fl) | ||
553 | { | ||
554 | struct nlm_host *host = req->a_host; | ||
555 | struct nlm_res *resp = &req->a_res; | ||
556 | int status; | ||
557 | |||
558 | if (!host->h_monitored && nsm_monitor(host) < 0) { | ||
559 | printk(KERN_NOTICE "lockd: failed to monitor %s\n", | ||
560 | host->h_name); | ||
561 | status = -ENOLCK; | ||
562 | goto out; | ||
563 | } | ||
564 | |||
565 | do { | ||
566 | if ((status = nlmclnt_call(req, NLMPROC_LOCK)) >= 0) { | ||
567 | if (resp->status != NLM_LCK_BLOCKED) | ||
568 | break; | ||
569 | status = nlmclnt_block(host, fl, &resp->status); | ||
570 | } | ||
571 | if (status < 0) | ||
572 | goto out; | ||
573 | } while (resp->status == NLM_LCK_BLOCKED && req->a_args.block); | ||
574 | |||
575 | if (resp->status == NLM_LCK_GRANTED) { | ||
576 | fl->fl_u.nfs_fl.state = host->h_state; | ||
577 | fl->fl_u.nfs_fl.flags |= NFS_LCK_GRANTED; | ||
578 | fl->fl_flags |= FL_SLEEP; | ||
579 | do_vfs_lock(fl); | ||
580 | } | ||
581 | status = nlm_stat_to_errno(resp->status); | ||
582 | out: | ||
583 | nlmclnt_release_lockargs(req); | ||
584 | return status; | ||
585 | } | ||
586 | |||
587 | /* | ||
588 | * RECLAIM: Try to reclaim a lock | ||
589 | */ | ||
590 | int | ||
591 | nlmclnt_reclaim(struct nlm_host *host, struct file_lock *fl) | ||
592 | { | ||
593 | struct nlm_rqst reqst, *req; | ||
594 | int status; | ||
595 | |||
596 | req = &reqst; | ||
597 | memset(req, 0, sizeof(*req)); | ||
598 | locks_init_lock(&req->a_args.lock.fl); | ||
599 | locks_init_lock(&req->a_res.lock.fl); | ||
600 | req->a_host = host; | ||
601 | req->a_flags = 0; | ||
602 | |||
603 | /* Set up the argument struct */ | ||
604 | nlmclnt_setlockargs(req, fl); | ||
605 | req->a_args.reclaim = 1; | ||
606 | |||
607 | if ((status = nlmclnt_call(req, NLMPROC_LOCK)) >= 0 | ||
608 | && req->a_res.status == NLM_LCK_GRANTED) | ||
609 | return 0; | ||
610 | |||
611 | printk(KERN_WARNING "lockd: failed to reclaim lock for pid %d " | ||
612 | "(errno %d, status %d)\n", fl->fl_pid, | ||
613 | status, req->a_res.status); | ||
614 | |||
615 | /* | ||
616 | * FIXME: This is a serious failure. We can | ||
617 | * | ||
618 | * a. Ignore the problem | ||
619 | * b. Send the owning process some signal (Linux doesn't have | ||
620 | * SIGLOST, though...) | ||
621 | * c. Retry the operation | ||
622 | * | ||
623 | * Until someone comes up with a simple implementation | ||
624 | * for b or c, I'll choose option a. | ||
625 | */ | ||
626 | |||
627 | return -ENOLCK; | ||
628 | } | ||
629 | |||
630 | /* | ||
631 | * UNLOCK: remove an existing lock | ||
632 | */ | ||
633 | static int | ||
634 | nlmclnt_unlock(struct nlm_rqst *req, struct file_lock *fl) | ||
635 | { | ||
636 | struct nlm_res *resp = &req->a_res; | ||
637 | int status; | ||
638 | |||
639 | /* Clean the GRANTED flag now so the lock doesn't get | ||
640 | * reclaimed while we're stuck in the unlock call. */ | ||
641 | fl->fl_u.nfs_fl.flags &= ~NFS_LCK_GRANTED; | ||
642 | |||
643 | if (req->a_flags & RPC_TASK_ASYNC) { | ||
644 | status = nlmclnt_async_call(req, NLMPROC_UNLOCK, | ||
645 | nlmclnt_unlock_callback); | ||
646 | /* Hrmf... Do the unlock early since locks_remove_posix() | ||
647 | * really expects us to free the lock synchronously */ | ||
648 | do_vfs_lock(fl); | ||
649 | if (status < 0) { | ||
650 | nlmclnt_release_lockargs(req); | ||
651 | kfree(req); | ||
652 | } | ||
653 | return status; | ||
654 | } | ||
655 | |||
656 | status = nlmclnt_call(req, NLMPROC_UNLOCK); | ||
657 | nlmclnt_release_lockargs(req); | ||
658 | if (status < 0) | ||
659 | return status; | ||
660 | |||
661 | do_vfs_lock(fl); | ||
662 | if (resp->status == NLM_LCK_GRANTED) | ||
663 | return 0; | ||
664 | |||
665 | if (resp->status != NLM_LCK_DENIED_NOLOCKS) | ||
666 | printk("lockd: unexpected unlock status: %d\n", resp->status); | ||
667 | |||
668 | /* What to do now? I'm out of my depth... */ | ||
669 | |||
670 | return -ENOLCK; | ||
671 | } | ||
672 | |||
673 | static void | ||
674 | nlmclnt_unlock_callback(struct rpc_task *task) | ||
675 | { | ||
676 | struct nlm_rqst *req = (struct nlm_rqst *) task->tk_calldata; | ||
677 | int status = req->a_res.status; | ||
678 | |||
679 | if (RPC_ASSASSINATED(task)) | ||
680 | goto die; | ||
681 | |||
682 | if (task->tk_status < 0) { | ||
683 | dprintk("lockd: unlock failed (err = %d)\n", -task->tk_status); | ||
684 | goto retry_rebind; | ||
685 | } | ||
686 | if (status == NLM_LCK_DENIED_GRACE_PERIOD) { | ||
687 | rpc_delay(task, NLMCLNT_GRACE_WAIT); | ||
688 | goto retry_unlock; | ||
689 | } | ||
690 | if (status != NLM_LCK_GRANTED) | ||
691 | printk(KERN_WARNING "lockd: unexpected unlock status: %d\n", status); | ||
692 | die: | ||
693 | nlm_release_host(req->a_host); | ||
694 | nlmclnt_release_lockargs(req); | ||
695 | kfree(req); | ||
696 | return; | ||
697 | retry_rebind: | ||
698 | nlm_rebind_host(req->a_host); | ||
699 | retry_unlock: | ||
700 | rpc_restart_call(task); | ||
701 | } | ||
702 | |||
703 | /* | ||
704 | * Cancel a blocked lock request. | ||
705 | * We always use an async RPC call for this in order not to hang a | ||
706 | * process that has been Ctrl-C'ed. | ||
707 | */ | ||
708 | int | ||
709 | nlmclnt_cancel(struct nlm_host *host, struct file_lock *fl) | ||
710 | { | ||
711 | struct nlm_rqst *req; | ||
712 | unsigned long flags; | ||
713 | sigset_t oldset; | ||
714 | int status; | ||
715 | |||
716 | /* Block all signals while setting up call */ | ||
717 | spin_lock_irqsave(¤t->sighand->siglock, flags); | ||
718 | oldset = current->blocked; | ||
719 | sigfillset(¤t->blocked); | ||
720 | recalc_sigpending(); | ||
721 | spin_unlock_irqrestore(¤t->sighand->siglock, flags); | ||
722 | |||
723 | req = nlmclnt_alloc_call(); | ||
724 | if (!req) | ||
725 | return -ENOMEM; | ||
726 | req->a_host = host; | ||
727 | req->a_flags = RPC_TASK_ASYNC; | ||
728 | |||
729 | nlmclnt_setlockargs(req, fl); | ||
730 | |||
731 | status = nlmclnt_async_call(req, NLMPROC_CANCEL, | ||
732 | nlmclnt_cancel_callback); | ||
733 | if (status < 0) { | ||
734 | nlmclnt_release_lockargs(req); | ||
735 | kfree(req); | ||
736 | } | ||
737 | |||
738 | spin_lock_irqsave(¤t->sighand->siglock, flags); | ||
739 | current->blocked = oldset; | ||
740 | recalc_sigpending(); | ||
741 | spin_unlock_irqrestore(¤t->sighand->siglock, flags); | ||
742 | |||
743 | return status; | ||
744 | } | ||
745 | |||
746 | static void | ||
747 | nlmclnt_cancel_callback(struct rpc_task *task) | ||
748 | { | ||
749 | struct nlm_rqst *req = (struct nlm_rqst *) task->tk_calldata; | ||
750 | |||
751 | if (RPC_ASSASSINATED(task)) | ||
752 | goto die; | ||
753 | |||
754 | if (task->tk_status < 0) { | ||
755 | dprintk("lockd: CANCEL call error %d, retrying.\n", | ||
756 | task->tk_status); | ||
757 | goto retry_cancel; | ||
758 | } | ||
759 | |||
760 | dprintk("lockd: cancel status %d (task %d)\n", | ||
761 | req->a_res.status, task->tk_pid); | ||
762 | |||
763 | switch (req->a_res.status) { | ||
764 | case NLM_LCK_GRANTED: | ||
765 | case NLM_LCK_DENIED_GRACE_PERIOD: | ||
766 | /* Everything's good */ | ||
767 | break; | ||
768 | case NLM_LCK_DENIED_NOLOCKS: | ||
769 | dprintk("lockd: CANCEL failed (server has no locks)\n"); | ||
770 | goto retry_cancel; | ||
771 | default: | ||
772 | printk(KERN_NOTICE "lockd: weird return %d for CANCEL call\n", | ||
773 | req->a_res.status); | ||
774 | } | ||
775 | |||
776 | die: | ||
777 | nlm_release_host(req->a_host); | ||
778 | nlmclnt_release_lockargs(req); | ||
779 | kfree(req); | ||
780 | return; | ||
781 | |||
782 | retry_cancel: | ||
783 | nlm_rebind_host(req->a_host); | ||
784 | rpc_restart_call(task); | ||
785 | rpc_delay(task, 30 * HZ); | ||
786 | } | ||
787 | |||
788 | /* | ||
789 | * Convert an NLM status code to a generic kernel errno | ||
790 | */ | ||
791 | static int | ||
792 | nlm_stat_to_errno(u32 status) | ||
793 | { | ||
794 | switch(status) { | ||
795 | case NLM_LCK_GRANTED: | ||
796 | return 0; | ||
797 | case NLM_LCK_DENIED: | ||
798 | return -EAGAIN; | ||
799 | case NLM_LCK_DENIED_NOLOCKS: | ||
800 | case NLM_LCK_DENIED_GRACE_PERIOD: | ||
801 | return -ENOLCK; | ||
802 | case NLM_LCK_BLOCKED: | ||
803 | printk(KERN_NOTICE "lockd: unexpected status NLM_BLOCKED\n"); | ||
804 | return -ENOLCK; | ||
805 | #ifdef CONFIG_LOCKD_V4 | ||
806 | case NLM_DEADLCK: | ||
807 | return -EDEADLK; | ||
808 | case NLM_ROFS: | ||
809 | return -EROFS; | ||
810 | case NLM_STALE_FH: | ||
811 | return -ESTALE; | ||
812 | case NLM_FBIG: | ||
813 | return -EOVERFLOW; | ||
814 | case NLM_FAILED: | ||
815 | return -ENOLCK; | ||
816 | #endif | ||
817 | } | ||
818 | printk(KERN_NOTICE "lockd: unexpected server status %d\n", status); | ||
819 | return -ENOLCK; | ||
820 | } | ||