diff options
author | Linus Torvalds <torvalds@ppc970.osdl.org> | 2005-04-16 18:20:36 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@ppc970.osdl.org> | 2005-04-16 18:20:36 -0400 |
commit | 1da177e4c3f41524e886b7f1b8a0c1fc7321cac2 (patch) | |
tree | 0bba044c4ce775e45a88a51686b5d9f90697ea9d /fs/lockd |
Linux-2.6.12-rc2v2.6.12-rc2
Initial git repository build. I'm not bothering with the full history,
even though we have it. We can create a separate "historical" git
archive of that later if we want to, and in the meantime it's about
3.2GB when imported into git - space that would just make the early
git days unnecessarily complicated, when we don't have a lot of good
infrastructure for it.
Let it rip!
Diffstat (limited to 'fs/lockd')
-rw-r--r-- | fs/lockd/Makefile | 10 | ||||
-rw-r--r-- | fs/lockd/clntlock.c | 245 | ||||
-rw-r--r-- | fs/lockd/clntproc.c | 820 | ||||
-rw-r--r-- | fs/lockd/host.c | 346 | ||||
-rw-r--r-- | fs/lockd/mon.c | 246 | ||||
-rw-r--r-- | fs/lockd/svc.c | 519 | ||||
-rw-r--r-- | fs/lockd/svc4proc.c | 580 | ||||
-rw-r--r-- | fs/lockd/svclock.c | 686 | ||||
-rw-r--r-- | fs/lockd/svcproc.c | 606 | ||||
-rw-r--r-- | fs/lockd/svcshare.c | 111 | ||||
-rw-r--r-- | fs/lockd/svcsubs.c | 309 | ||||
-rw-r--r-- | fs/lockd/xdr.c | 635 | ||||
-rw-r--r-- | fs/lockd/xdr4.c | 580 |
13 files changed, 5693 insertions, 0 deletions
diff --git a/fs/lockd/Makefile b/fs/lockd/Makefile new file mode 100644 index 000000000000..7725a0a9a555 --- /dev/null +++ b/fs/lockd/Makefile | |||
@@ -0,0 +1,10 @@ | |||
1 | # | ||
2 | # Makefile for the linux lock manager stuff | ||
3 | # | ||
4 | |||
5 | obj-$(CONFIG_LOCKD) += lockd.o | ||
6 | |||
7 | lockd-objs-y := clntlock.o clntproc.o host.o svc.o svclock.o svcshare.o \ | ||
8 | svcproc.o svcsubs.o mon.o xdr.o | ||
9 | lockd-objs-$(CONFIG_LOCKD_V4) += xdr4.o svc4proc.o | ||
10 | lockd-objs := $(lockd-objs-y) | ||
diff --git a/fs/lockd/clntlock.c b/fs/lockd/clntlock.c new file mode 100644 index 000000000000..ef7103b8c5bd --- /dev/null +++ b/fs/lockd/clntlock.c | |||
@@ -0,0 +1,245 @@ | |||
1 | /* | ||
2 | * linux/fs/lockd/clntlock.c | ||
3 | * | ||
4 | * Lock handling for the client side NLM implementation | ||
5 | * | ||
6 | * Copyright (C) 1996, Olaf Kirch <okir@monad.swb.de> | ||
7 | */ | ||
8 | |||
9 | #include <linux/module.h> | ||
10 | #include <linux/types.h> | ||
11 | #include <linux/time.h> | ||
12 | #include <linux/nfs_fs.h> | ||
13 | #include <linux/sunrpc/clnt.h> | ||
14 | #include <linux/sunrpc/svc.h> | ||
15 | #include <linux/lockd/lockd.h> | ||
16 | #include <linux/smp_lock.h> | ||
17 | |||
18 | #define NLMDBG_FACILITY NLMDBG_CLIENT | ||
19 | |||
20 | /* | ||
21 | * Local function prototypes | ||
22 | */ | ||
23 | static int reclaimer(void *ptr); | ||
24 | |||
25 | /* | ||
26 | * The following functions handle blocking and granting from the | ||
27 | * client perspective. | ||
28 | */ | ||
29 | |||
30 | /* | ||
31 | * This is the representation of a blocked client lock. | ||
32 | */ | ||
33 | struct nlm_wait { | ||
34 | struct nlm_wait * b_next; /* linked list */ | ||
35 | wait_queue_head_t b_wait; /* where to wait on */ | ||
36 | struct nlm_host * b_host; | ||
37 | struct file_lock * b_lock; /* local file lock */ | ||
38 | unsigned short b_reclaim; /* got to reclaim lock */ | ||
39 | u32 b_status; /* grant callback status */ | ||
40 | }; | ||
41 | |||
42 | static struct nlm_wait * nlm_blocked; | ||
43 | |||
44 | /* | ||
45 | * Block on a lock | ||
46 | */ | ||
47 | int | ||
48 | nlmclnt_block(struct nlm_host *host, struct file_lock *fl, u32 *statp) | ||
49 | { | ||
50 | struct nlm_wait block, **head; | ||
51 | int err; | ||
52 | u32 pstate; | ||
53 | |||
54 | block.b_host = host; | ||
55 | block.b_lock = fl; | ||
56 | init_waitqueue_head(&block.b_wait); | ||
57 | block.b_status = NLM_LCK_BLOCKED; | ||
58 | block.b_next = nlm_blocked; | ||
59 | nlm_blocked = █ | ||
60 | |||
61 | /* Remember pseudo nsm state */ | ||
62 | pstate = host->h_state; | ||
63 | |||
64 | /* Go to sleep waiting for GRANT callback. Some servers seem | ||
65 | * to lose callbacks, however, so we're going to poll from | ||
66 | * time to time just to make sure. | ||
67 | * | ||
68 | * For now, the retry frequency is pretty high; normally | ||
69 | * a 1 minute timeout would do. See the comment before | ||
70 | * nlmclnt_lock for an explanation. | ||
71 | */ | ||
72 | sleep_on_timeout(&block.b_wait, 30*HZ); | ||
73 | |||
74 | for (head = &nlm_blocked; *head; head = &(*head)->b_next) { | ||
75 | if (*head == &block) { | ||
76 | *head = block.b_next; | ||
77 | break; | ||
78 | } | ||
79 | } | ||
80 | |||
81 | if (!signalled()) { | ||
82 | *statp = block.b_status; | ||
83 | return 0; | ||
84 | } | ||
85 | |||
86 | /* Okay, we were interrupted. Cancel the pending request | ||
87 | * unless the server has rebooted. | ||
88 | */ | ||
89 | if (pstate == host->h_state && (err = nlmclnt_cancel(host, fl)) < 0) | ||
90 | printk(KERN_NOTICE | ||
91 | "lockd: CANCEL call failed (errno %d)\n", -err); | ||
92 | |||
93 | return -ERESTARTSYS; | ||
94 | } | ||
95 | |||
96 | /* | ||
97 | * The server lockd has called us back to tell us the lock was granted | ||
98 | */ | ||
99 | u32 | ||
100 | nlmclnt_grant(struct nlm_lock *lock) | ||
101 | { | ||
102 | struct nlm_wait *block; | ||
103 | |||
104 | /* | ||
105 | * Look up blocked request based on arguments. | ||
106 | * Warning: must not use cookie to match it! | ||
107 | */ | ||
108 | for (block = nlm_blocked; block; block = block->b_next) { | ||
109 | if (nlm_compare_locks(block->b_lock, &lock->fl)) | ||
110 | break; | ||
111 | } | ||
112 | |||
113 | /* Ooops, no blocked request found. */ | ||
114 | if (block == NULL) | ||
115 | return nlm_lck_denied; | ||
116 | |||
117 | /* Alright, we found the lock. Set the return status and | ||
118 | * wake up the caller. | ||
119 | */ | ||
120 | block->b_status = NLM_LCK_GRANTED; | ||
121 | wake_up(&block->b_wait); | ||
122 | |||
123 | return nlm_granted; | ||
124 | } | ||
125 | |||
126 | /* | ||
127 | * The following procedures deal with the recovery of locks after a | ||
128 | * server crash. | ||
129 | */ | ||
130 | |||
131 | /* | ||
132 | * Mark the locks for reclaiming. | ||
133 | * FIXME: In 2.5 we don't want to iterate through any global file_lock_list. | ||
134 | * Maintain NLM lock reclaiming lists in the nlm_host instead. | ||
135 | */ | ||
136 | static | ||
137 | void nlmclnt_mark_reclaim(struct nlm_host *host) | ||
138 | { | ||
139 | struct file_lock *fl; | ||
140 | struct inode *inode; | ||
141 | struct list_head *tmp; | ||
142 | |||
143 | list_for_each(tmp, &file_lock_list) { | ||
144 | fl = list_entry(tmp, struct file_lock, fl_link); | ||
145 | |||
146 | inode = fl->fl_file->f_dentry->d_inode; | ||
147 | if (inode->i_sb->s_magic != NFS_SUPER_MAGIC) | ||
148 | continue; | ||
149 | if (fl->fl_u.nfs_fl.owner->host != host) | ||
150 | continue; | ||
151 | if (!(fl->fl_u.nfs_fl.flags & NFS_LCK_GRANTED)) | ||
152 | continue; | ||
153 | fl->fl_u.nfs_fl.flags |= NFS_LCK_RECLAIM; | ||
154 | } | ||
155 | } | ||
156 | |||
157 | /* | ||
158 | * Someone has sent us an SM_NOTIFY. Ensure we bind to the new port number, | ||
159 | * that we mark locks for reclaiming, and that we bump the pseudo NSM state. | ||
160 | */ | ||
161 | static inline | ||
162 | void nlmclnt_prepare_reclaim(struct nlm_host *host, u32 newstate) | ||
163 | { | ||
164 | host->h_monitored = 0; | ||
165 | host->h_nsmstate = newstate; | ||
166 | host->h_state++; | ||
167 | host->h_nextrebind = 0; | ||
168 | nlm_rebind_host(host); | ||
169 | nlmclnt_mark_reclaim(host); | ||
170 | dprintk("NLM: reclaiming locks for host %s", host->h_name); | ||
171 | } | ||
172 | |||
173 | /* | ||
174 | * Reclaim all locks on server host. We do this by spawning a separate | ||
175 | * reclaimer thread. | ||
176 | */ | ||
177 | void | ||
178 | nlmclnt_recovery(struct nlm_host *host, u32 newstate) | ||
179 | { | ||
180 | if (host->h_reclaiming++) { | ||
181 | if (host->h_nsmstate == newstate) | ||
182 | return; | ||
183 | nlmclnt_prepare_reclaim(host, newstate); | ||
184 | } else { | ||
185 | nlmclnt_prepare_reclaim(host, newstate); | ||
186 | nlm_get_host(host); | ||
187 | __module_get(THIS_MODULE); | ||
188 | if (kernel_thread(reclaimer, host, CLONE_KERNEL) < 0) | ||
189 | module_put(THIS_MODULE); | ||
190 | } | ||
191 | } | ||
192 | |||
193 | static int | ||
194 | reclaimer(void *ptr) | ||
195 | { | ||
196 | struct nlm_host *host = (struct nlm_host *) ptr; | ||
197 | struct nlm_wait *block; | ||
198 | struct list_head *tmp; | ||
199 | struct file_lock *fl; | ||
200 | struct inode *inode; | ||
201 | |||
202 | daemonize("%s-reclaim", host->h_name); | ||
203 | allow_signal(SIGKILL); | ||
204 | |||
205 | /* This one ensures that our parent doesn't terminate while the | ||
206 | * reclaim is in progress */ | ||
207 | lock_kernel(); | ||
208 | lockd_up(); | ||
209 | |||
210 | /* First, reclaim all locks that have been marked. */ | ||
211 | restart: | ||
212 | list_for_each(tmp, &file_lock_list) { | ||
213 | fl = list_entry(tmp, struct file_lock, fl_link); | ||
214 | |||
215 | inode = fl->fl_file->f_dentry->d_inode; | ||
216 | if (inode->i_sb->s_magic != NFS_SUPER_MAGIC) | ||
217 | continue; | ||
218 | if (fl->fl_u.nfs_fl.owner->host != host) | ||
219 | continue; | ||
220 | if (!(fl->fl_u.nfs_fl.flags & NFS_LCK_RECLAIM)) | ||
221 | continue; | ||
222 | |||
223 | fl->fl_u.nfs_fl.flags &= ~NFS_LCK_RECLAIM; | ||
224 | nlmclnt_reclaim(host, fl); | ||
225 | if (signalled()) | ||
226 | break; | ||
227 | goto restart; | ||
228 | } | ||
229 | |||
230 | host->h_reclaiming = 0; | ||
231 | |||
232 | /* Now, wake up all processes that sleep on a blocked lock */ | ||
233 | for (block = nlm_blocked; block; block = block->b_next) { | ||
234 | if (block->b_host == host) { | ||
235 | block->b_status = NLM_LCK_DENIED_GRACE_PERIOD; | ||
236 | wake_up(&block->b_wait); | ||
237 | } | ||
238 | } | ||
239 | |||
240 | /* Release host handle after use */ | ||
241 | nlm_release_host(host); | ||
242 | lockd_down(); | ||
243 | unlock_kernel(); | ||
244 | module_put_and_exit(0); | ||
245 | } | ||
diff --git a/fs/lockd/clntproc.c b/fs/lockd/clntproc.c new file mode 100644 index 000000000000..a4407619b1f1 --- /dev/null +++ b/fs/lockd/clntproc.c | |||
@@ -0,0 +1,820 @@ | |||
1 | /* | ||
2 | * linux/fs/lockd/clntproc.c | ||
3 | * | ||
4 | * RPC procedures for the client side NLM implementation | ||
5 | * | ||
6 | * Copyright (C) 1996, Olaf Kirch <okir@monad.swb.de> | ||
7 | */ | ||
8 | |||
9 | #include <linux/config.h> | ||
10 | #include <linux/module.h> | ||
11 | #include <linux/types.h> | ||
12 | #include <linux/errno.h> | ||
13 | #include <linux/fs.h> | ||
14 | #include <linux/nfs_fs.h> | ||
15 | #include <linux/utsname.h> | ||
16 | #include <linux/smp_lock.h> | ||
17 | #include <linux/sunrpc/clnt.h> | ||
18 | #include <linux/sunrpc/svc.h> | ||
19 | #include <linux/lockd/lockd.h> | ||
20 | #include <linux/lockd/sm_inter.h> | ||
21 | |||
22 | #define NLMDBG_FACILITY NLMDBG_CLIENT | ||
23 | #define NLMCLNT_GRACE_WAIT (5*HZ) | ||
24 | |||
25 | static int nlmclnt_test(struct nlm_rqst *, struct file_lock *); | ||
26 | static int nlmclnt_lock(struct nlm_rqst *, struct file_lock *); | ||
27 | static int nlmclnt_unlock(struct nlm_rqst *, struct file_lock *); | ||
28 | static void nlmclnt_unlock_callback(struct rpc_task *); | ||
29 | static void nlmclnt_cancel_callback(struct rpc_task *); | ||
30 | static int nlm_stat_to_errno(u32 stat); | ||
31 | static void nlmclnt_locks_init_private(struct file_lock *fl, struct nlm_host *host); | ||
32 | |||
33 | /* | ||
34 | * Cookie counter for NLM requests | ||
35 | */ | ||
36 | static u32 nlm_cookie = 0x1234; | ||
37 | |||
38 | static inline void nlmclnt_next_cookie(struct nlm_cookie *c) | ||
39 | { | ||
40 | memcpy(c->data, &nlm_cookie, 4); | ||
41 | memset(c->data+4, 0, 4); | ||
42 | c->len=4; | ||
43 | nlm_cookie++; | ||
44 | } | ||
45 | |||
46 | static struct nlm_lockowner *nlm_get_lockowner(struct nlm_lockowner *lockowner) | ||
47 | { | ||
48 | atomic_inc(&lockowner->count); | ||
49 | return lockowner; | ||
50 | } | ||
51 | |||
52 | static void nlm_put_lockowner(struct nlm_lockowner *lockowner) | ||
53 | { | ||
54 | if (!atomic_dec_and_lock(&lockowner->count, &lockowner->host->h_lock)) | ||
55 | return; | ||
56 | list_del(&lockowner->list); | ||
57 | spin_unlock(&lockowner->host->h_lock); | ||
58 | nlm_release_host(lockowner->host); | ||
59 | kfree(lockowner); | ||
60 | } | ||
61 | |||
62 | static inline int nlm_pidbusy(struct nlm_host *host, uint32_t pid) | ||
63 | { | ||
64 | struct nlm_lockowner *lockowner; | ||
65 | list_for_each_entry(lockowner, &host->h_lockowners, list) { | ||
66 | if (lockowner->pid == pid) | ||
67 | return -EBUSY; | ||
68 | } | ||
69 | return 0; | ||
70 | } | ||
71 | |||
72 | static inline uint32_t __nlm_alloc_pid(struct nlm_host *host) | ||
73 | { | ||
74 | uint32_t res; | ||
75 | do { | ||
76 | res = host->h_pidcount++; | ||
77 | } while (nlm_pidbusy(host, res) < 0); | ||
78 | return res; | ||
79 | } | ||
80 | |||
81 | static struct nlm_lockowner *__nlm_find_lockowner(struct nlm_host *host, fl_owner_t owner) | ||
82 | { | ||
83 | struct nlm_lockowner *lockowner; | ||
84 | list_for_each_entry(lockowner, &host->h_lockowners, list) { | ||
85 | if (lockowner->owner != owner) | ||
86 | continue; | ||
87 | return nlm_get_lockowner(lockowner); | ||
88 | } | ||
89 | return NULL; | ||
90 | } | ||
91 | |||
92 | static struct nlm_lockowner *nlm_find_lockowner(struct nlm_host *host, fl_owner_t owner) | ||
93 | { | ||
94 | struct nlm_lockowner *res, *new = NULL; | ||
95 | |||
96 | spin_lock(&host->h_lock); | ||
97 | res = __nlm_find_lockowner(host, owner); | ||
98 | if (res == NULL) { | ||
99 | spin_unlock(&host->h_lock); | ||
100 | new = (struct nlm_lockowner *)kmalloc(sizeof(*new), GFP_KERNEL); | ||
101 | spin_lock(&host->h_lock); | ||
102 | res = __nlm_find_lockowner(host, owner); | ||
103 | if (res == NULL && new != NULL) { | ||
104 | res = new; | ||
105 | atomic_set(&new->count, 1); | ||
106 | new->owner = owner; | ||
107 | new->pid = __nlm_alloc_pid(host); | ||
108 | new->host = nlm_get_host(host); | ||
109 | list_add(&new->list, &host->h_lockowners); | ||
110 | new = NULL; | ||
111 | } | ||
112 | } | ||
113 | spin_unlock(&host->h_lock); | ||
114 | if (new != NULL) | ||
115 | kfree(new); | ||
116 | return res; | ||
117 | } | ||
118 | |||
119 | /* | ||
120 | * Initialize arguments for TEST/LOCK/UNLOCK/CANCEL calls | ||
121 | */ | ||
122 | static void nlmclnt_setlockargs(struct nlm_rqst *req, struct file_lock *fl) | ||
123 | { | ||
124 | struct nlm_args *argp = &req->a_args; | ||
125 | struct nlm_lock *lock = &argp->lock; | ||
126 | |||
127 | nlmclnt_next_cookie(&argp->cookie); | ||
128 | argp->state = nsm_local_state; | ||
129 | memcpy(&lock->fh, NFS_FH(fl->fl_file->f_dentry->d_inode), sizeof(struct nfs_fh)); | ||
130 | lock->caller = system_utsname.nodename; | ||
131 | lock->oh.data = req->a_owner; | ||
132 | lock->oh.len = sprintf(req->a_owner, "%d@%s", | ||
133 | current->pid, system_utsname.nodename); | ||
134 | locks_copy_lock(&lock->fl, fl); | ||
135 | } | ||
136 | |||
137 | static void nlmclnt_release_lockargs(struct nlm_rqst *req) | ||
138 | { | ||
139 | struct file_lock *fl = &req->a_args.lock.fl; | ||
140 | |||
141 | if (fl->fl_ops && fl->fl_ops->fl_release_private) | ||
142 | fl->fl_ops->fl_release_private(fl); | ||
143 | } | ||
144 | |||
145 | /* | ||
146 | * Initialize arguments for GRANTED call. The nlm_rqst structure | ||
147 | * has been cleared already. | ||
148 | */ | ||
149 | int | ||
150 | nlmclnt_setgrantargs(struct nlm_rqst *call, struct nlm_lock *lock) | ||
151 | { | ||
152 | locks_copy_lock(&call->a_args.lock.fl, &lock->fl); | ||
153 | memcpy(&call->a_args.lock.fh, &lock->fh, sizeof(call->a_args.lock.fh)); | ||
154 | call->a_args.lock.caller = system_utsname.nodename; | ||
155 | call->a_args.lock.oh.len = lock->oh.len; | ||
156 | |||
157 | /* set default data area */ | ||
158 | call->a_args.lock.oh.data = call->a_owner; | ||
159 | |||
160 | if (lock->oh.len > NLMCLNT_OHSIZE) { | ||
161 | void *data = kmalloc(lock->oh.len, GFP_KERNEL); | ||
162 | if (!data) { | ||
163 | nlmclnt_freegrantargs(call); | ||
164 | return 0; | ||
165 | } | ||
166 | call->a_args.lock.oh.data = (u8 *) data; | ||
167 | } | ||
168 | |||
169 | memcpy(call->a_args.lock.oh.data, lock->oh.data, lock->oh.len); | ||
170 | return 1; | ||
171 | } | ||
172 | |||
173 | void | ||
174 | nlmclnt_freegrantargs(struct nlm_rqst *call) | ||
175 | { | ||
176 | struct file_lock *fl = &call->a_args.lock.fl; | ||
177 | /* | ||
178 | * Check whether we allocated memory for the owner. | ||
179 | */ | ||
180 | if (call->a_args.lock.oh.data != (u8 *) call->a_owner) { | ||
181 | kfree(call->a_args.lock.oh.data); | ||
182 | } | ||
183 | if (fl->fl_ops && fl->fl_ops->fl_release_private) | ||
184 | fl->fl_ops->fl_release_private(fl); | ||
185 | } | ||
186 | |||
187 | /* | ||
188 | * This is the main entry point for the NLM client. | ||
189 | */ | ||
190 | int | ||
191 | nlmclnt_proc(struct inode *inode, int cmd, struct file_lock *fl) | ||
192 | { | ||
193 | struct nfs_server *nfssrv = NFS_SERVER(inode); | ||
194 | struct nlm_host *host; | ||
195 | struct nlm_rqst reqst, *call = &reqst; | ||
196 | sigset_t oldset; | ||
197 | unsigned long flags; | ||
198 | int status, proto, vers; | ||
199 | |||
200 | vers = (NFS_PROTO(inode)->version == 3) ? 4 : 1; | ||
201 | if (NFS_PROTO(inode)->version > 3) { | ||
202 | printk(KERN_NOTICE "NFSv4 file locking not implemented!\n"); | ||
203 | return -ENOLCK; | ||
204 | } | ||
205 | |||
206 | /* Retrieve transport protocol from NFS client */ | ||
207 | proto = NFS_CLIENT(inode)->cl_xprt->prot; | ||
208 | |||
209 | if (!(host = nlmclnt_lookup_host(NFS_ADDR(inode), proto, vers))) | ||
210 | return -ENOLCK; | ||
211 | |||
212 | /* Create RPC client handle if not there, and copy soft | ||
213 | * and intr flags from NFS client. */ | ||
214 | if (host->h_rpcclnt == NULL) { | ||
215 | struct rpc_clnt *clnt; | ||
216 | |||
217 | /* Bind an rpc client to this host handle (does not | ||
218 | * perform a portmapper lookup) */ | ||
219 | if (!(clnt = nlm_bind_host(host))) { | ||
220 | status = -ENOLCK; | ||
221 | goto done; | ||
222 | } | ||
223 | clnt->cl_softrtry = nfssrv->client->cl_softrtry; | ||
224 | clnt->cl_intr = nfssrv->client->cl_intr; | ||
225 | clnt->cl_chatty = nfssrv->client->cl_chatty; | ||
226 | } | ||
227 | |||
228 | /* Keep the old signal mask */ | ||
229 | spin_lock_irqsave(¤t->sighand->siglock, flags); | ||
230 | oldset = current->blocked; | ||
231 | |||
232 | /* If we're cleaning up locks because the process is exiting, | ||
233 | * perform the RPC call asynchronously. */ | ||
234 | if ((IS_SETLK(cmd) || IS_SETLKW(cmd)) | ||
235 | && fl->fl_type == F_UNLCK | ||
236 | && (current->flags & PF_EXITING)) { | ||
237 | sigfillset(¤t->blocked); /* Mask all signals */ | ||
238 | recalc_sigpending(); | ||
239 | spin_unlock_irqrestore(¤t->sighand->siglock, flags); | ||
240 | |||
241 | call = nlmclnt_alloc_call(); | ||
242 | if (!call) { | ||
243 | status = -ENOMEM; | ||
244 | goto out_restore; | ||
245 | } | ||
246 | call->a_flags = RPC_TASK_ASYNC; | ||
247 | } else { | ||
248 | spin_unlock_irqrestore(¤t->sighand->siglock, flags); | ||
249 | memset(call, 0, sizeof(*call)); | ||
250 | locks_init_lock(&call->a_args.lock.fl); | ||
251 | locks_init_lock(&call->a_res.lock.fl); | ||
252 | } | ||
253 | call->a_host = host; | ||
254 | |||
255 | nlmclnt_locks_init_private(fl, host); | ||
256 | |||
257 | /* Set up the argument struct */ | ||
258 | nlmclnt_setlockargs(call, fl); | ||
259 | |||
260 | if (IS_SETLK(cmd) || IS_SETLKW(cmd)) { | ||
261 | if (fl->fl_type != F_UNLCK) { | ||
262 | call->a_args.block = IS_SETLKW(cmd) ? 1 : 0; | ||
263 | status = nlmclnt_lock(call, fl); | ||
264 | } else | ||
265 | status = nlmclnt_unlock(call, fl); | ||
266 | } else if (IS_GETLK(cmd)) | ||
267 | status = nlmclnt_test(call, fl); | ||
268 | else | ||
269 | status = -EINVAL; | ||
270 | |||
271 | out_restore: | ||
272 | spin_lock_irqsave(¤t->sighand->siglock, flags); | ||
273 | current->blocked = oldset; | ||
274 | recalc_sigpending(); | ||
275 | spin_unlock_irqrestore(¤t->sighand->siglock, flags); | ||
276 | |||
277 | done: | ||
278 | dprintk("lockd: clnt proc returns %d\n", status); | ||
279 | nlm_release_host(host); | ||
280 | return status; | ||
281 | } | ||
282 | EXPORT_SYMBOL(nlmclnt_proc); | ||
283 | |||
284 | /* | ||
285 | * Allocate an NLM RPC call struct | ||
286 | */ | ||
287 | struct nlm_rqst * | ||
288 | nlmclnt_alloc_call(void) | ||
289 | { | ||
290 | struct nlm_rqst *call; | ||
291 | |||
292 | while (!signalled()) { | ||
293 | call = (struct nlm_rqst *) kmalloc(sizeof(struct nlm_rqst), GFP_KERNEL); | ||
294 | if (call) { | ||
295 | memset(call, 0, sizeof(*call)); | ||
296 | locks_init_lock(&call->a_args.lock.fl); | ||
297 | locks_init_lock(&call->a_res.lock.fl); | ||
298 | return call; | ||
299 | } | ||
300 | printk("nlmclnt_alloc_call: failed, waiting for memory\n"); | ||
301 | current->state = TASK_INTERRUPTIBLE; | ||
302 | schedule_timeout(5*HZ); | ||
303 | } | ||
304 | return NULL; | ||
305 | } | ||
306 | |||
307 | static int nlm_wait_on_grace(wait_queue_head_t *queue) | ||
308 | { | ||
309 | DEFINE_WAIT(wait); | ||
310 | int status = -EINTR; | ||
311 | |||
312 | prepare_to_wait(queue, &wait, TASK_INTERRUPTIBLE); | ||
313 | if (!signalled ()) { | ||
314 | schedule_timeout(NLMCLNT_GRACE_WAIT); | ||
315 | try_to_freeze(PF_FREEZE); | ||
316 | if (!signalled ()) | ||
317 | status = 0; | ||
318 | } | ||
319 | finish_wait(queue, &wait); | ||
320 | return status; | ||
321 | } | ||
322 | |||
323 | /* | ||
324 | * Generic NLM call | ||
325 | */ | ||
326 | static int | ||
327 | nlmclnt_call(struct nlm_rqst *req, u32 proc) | ||
328 | { | ||
329 | struct nlm_host *host = req->a_host; | ||
330 | struct rpc_clnt *clnt; | ||
331 | struct nlm_args *argp = &req->a_args; | ||
332 | struct nlm_res *resp = &req->a_res; | ||
333 | struct rpc_message msg = { | ||
334 | .rpc_argp = argp, | ||
335 | .rpc_resp = resp, | ||
336 | }; | ||
337 | int status; | ||
338 | |||
339 | dprintk("lockd: call procedure %d on %s\n", | ||
340 | (int)proc, host->h_name); | ||
341 | |||
342 | do { | ||
343 | if (host->h_reclaiming && !argp->reclaim) | ||
344 | goto in_grace_period; | ||
345 | |||
346 | /* If we have no RPC client yet, create one. */ | ||
347 | if ((clnt = nlm_bind_host(host)) == NULL) | ||
348 | return -ENOLCK; | ||
349 | msg.rpc_proc = &clnt->cl_procinfo[proc]; | ||
350 | |||
351 | /* Perform the RPC call. If an error occurs, try again */ | ||
352 | if ((status = rpc_call_sync(clnt, &msg, 0)) < 0) { | ||
353 | dprintk("lockd: rpc_call returned error %d\n", -status); | ||
354 | switch (status) { | ||
355 | case -EPROTONOSUPPORT: | ||
356 | status = -EINVAL; | ||
357 | break; | ||
358 | case -ECONNREFUSED: | ||
359 | case -ETIMEDOUT: | ||
360 | case -ENOTCONN: | ||
361 | nlm_rebind_host(host); | ||
362 | status = -EAGAIN; | ||
363 | break; | ||
364 | case -ERESTARTSYS: | ||
365 | return signalled () ? -EINTR : status; | ||
366 | default: | ||
367 | break; | ||
368 | } | ||
369 | break; | ||
370 | } else | ||
371 | if (resp->status == NLM_LCK_DENIED_GRACE_PERIOD) { | ||
372 | dprintk("lockd: server in grace period\n"); | ||
373 | if (argp->reclaim) { | ||
374 | printk(KERN_WARNING | ||
375 | "lockd: spurious grace period reject?!\n"); | ||
376 | return -ENOLCK; | ||
377 | } | ||
378 | } else { | ||
379 | if (!argp->reclaim) { | ||
380 | /* We appear to be out of the grace period */ | ||
381 | wake_up_all(&host->h_gracewait); | ||
382 | } | ||
383 | dprintk("lockd: server returns status %d\n", resp->status); | ||
384 | return 0; /* Okay, call complete */ | ||
385 | } | ||
386 | |||
387 | in_grace_period: | ||
388 | /* | ||
389 | * The server has rebooted and appears to be in the grace | ||
390 | * period during which locks are only allowed to be | ||
391 | * reclaimed. | ||
392 | * We can only back off and try again later. | ||
393 | */ | ||
394 | status = nlm_wait_on_grace(&host->h_gracewait); | ||
395 | } while (status == 0); | ||
396 | |||
397 | return status; | ||
398 | } | ||
399 | |||
400 | /* | ||
401 | * Generic NLM call, async version. | ||
402 | */ | ||
403 | int | ||
404 | nlmsvc_async_call(struct nlm_rqst *req, u32 proc, rpc_action callback) | ||
405 | { | ||
406 | struct nlm_host *host = req->a_host; | ||
407 | struct rpc_clnt *clnt; | ||
408 | struct rpc_message msg = { | ||
409 | .rpc_argp = &req->a_args, | ||
410 | .rpc_resp = &req->a_res, | ||
411 | }; | ||
412 | int status; | ||
413 | |||
414 | dprintk("lockd: call procedure %d on %s (async)\n", | ||
415 | (int)proc, host->h_name); | ||
416 | |||
417 | /* If we have no RPC client yet, create one. */ | ||
418 | if ((clnt = nlm_bind_host(host)) == NULL) | ||
419 | return -ENOLCK; | ||
420 | msg.rpc_proc = &clnt->cl_procinfo[proc]; | ||
421 | |||
422 | /* bootstrap and kick off the async RPC call */ | ||
423 | status = rpc_call_async(clnt, &msg, RPC_TASK_ASYNC, callback, req); | ||
424 | |||
425 | return status; | ||
426 | } | ||
427 | |||
428 | static int | ||
429 | nlmclnt_async_call(struct nlm_rqst *req, u32 proc, rpc_action callback) | ||
430 | { | ||
431 | struct nlm_host *host = req->a_host; | ||
432 | struct rpc_clnt *clnt; | ||
433 | struct nlm_args *argp = &req->a_args; | ||
434 | struct nlm_res *resp = &req->a_res; | ||
435 | struct rpc_message msg = { | ||
436 | .rpc_argp = argp, | ||
437 | .rpc_resp = resp, | ||
438 | }; | ||
439 | int status; | ||
440 | |||
441 | dprintk("lockd: call procedure %d on %s (async)\n", | ||
442 | (int)proc, host->h_name); | ||
443 | |||
444 | /* If we have no RPC client yet, create one. */ | ||
445 | if ((clnt = nlm_bind_host(host)) == NULL) | ||
446 | return -ENOLCK; | ||
447 | msg.rpc_proc = &clnt->cl_procinfo[proc]; | ||
448 | |||
449 | /* Increment host refcount */ | ||
450 | nlm_get_host(host); | ||
451 | /* bootstrap and kick off the async RPC call */ | ||
452 | status = rpc_call_async(clnt, &msg, RPC_TASK_ASYNC, callback, req); | ||
453 | if (status < 0) | ||
454 | nlm_release_host(host); | ||
455 | return status; | ||
456 | } | ||
457 | |||
458 | /* | ||
459 | * TEST for the presence of a conflicting lock | ||
460 | */ | ||
461 | static int | ||
462 | nlmclnt_test(struct nlm_rqst *req, struct file_lock *fl) | ||
463 | { | ||
464 | int status; | ||
465 | |||
466 | status = nlmclnt_call(req, NLMPROC_TEST); | ||
467 | nlmclnt_release_lockargs(req); | ||
468 | if (status < 0) | ||
469 | return status; | ||
470 | |||
471 | status = req->a_res.status; | ||
472 | if (status == NLM_LCK_GRANTED) { | ||
473 | fl->fl_type = F_UNLCK; | ||
474 | } if (status == NLM_LCK_DENIED) { | ||
475 | /* | ||
476 | * Report the conflicting lock back to the application. | ||
477 | */ | ||
478 | locks_copy_lock(fl, &req->a_res.lock.fl); | ||
479 | fl->fl_pid = 0; | ||
480 | } else { | ||
481 | return nlm_stat_to_errno(req->a_res.status); | ||
482 | } | ||
483 | |||
484 | return 0; | ||
485 | } | ||
486 | |||
487 | static void nlmclnt_locks_copy_lock(struct file_lock *new, struct file_lock *fl) | ||
488 | { | ||
489 | memcpy(&new->fl_u.nfs_fl, &fl->fl_u.nfs_fl, sizeof(new->fl_u.nfs_fl)); | ||
490 | nlm_get_lockowner(new->fl_u.nfs_fl.owner); | ||
491 | } | ||
492 | |||
493 | static void nlmclnt_locks_release_private(struct file_lock *fl) | ||
494 | { | ||
495 | nlm_put_lockowner(fl->fl_u.nfs_fl.owner); | ||
496 | fl->fl_ops = NULL; | ||
497 | } | ||
498 | |||
499 | static struct file_lock_operations nlmclnt_lock_ops = { | ||
500 | .fl_copy_lock = nlmclnt_locks_copy_lock, | ||
501 | .fl_release_private = nlmclnt_locks_release_private, | ||
502 | }; | ||
503 | |||
504 | static void nlmclnt_locks_init_private(struct file_lock *fl, struct nlm_host *host) | ||
505 | { | ||
506 | BUG_ON(fl->fl_ops != NULL); | ||
507 | fl->fl_u.nfs_fl.state = 0; | ||
508 | fl->fl_u.nfs_fl.flags = 0; | ||
509 | fl->fl_u.nfs_fl.owner = nlm_find_lockowner(host, fl->fl_owner); | ||
510 | fl->fl_ops = &nlmclnt_lock_ops; | ||
511 | } | ||
512 | |||
513 | static void do_vfs_lock(struct file_lock *fl) | ||
514 | { | ||
515 | int res = 0; | ||
516 | switch (fl->fl_flags & (FL_POSIX|FL_FLOCK)) { | ||
517 | case FL_POSIX: | ||
518 | res = posix_lock_file_wait(fl->fl_file, fl); | ||
519 | break; | ||
520 | case FL_FLOCK: | ||
521 | res = flock_lock_file_wait(fl->fl_file, fl); | ||
522 | break; | ||
523 | default: | ||
524 | BUG(); | ||
525 | } | ||
526 | if (res < 0) | ||
527 | printk(KERN_WARNING "%s: VFS is out of sync with lock manager!\n", | ||
528 | __FUNCTION__); | ||
529 | } | ||
530 | |||
531 | /* | ||
532 | * LOCK: Try to create a lock | ||
533 | * | ||
534 | * Programmer Harassment Alert | ||
535 | * | ||
536 | * When given a blocking lock request in a sync RPC call, the HPUX lockd | ||
537 | * will faithfully return LCK_BLOCKED but never cares to notify us when | ||
538 | * the lock could be granted. This way, our local process could hang | ||
539 | * around forever waiting for the callback. | ||
540 | * | ||
541 | * Solution A: Implement busy-waiting | ||
542 | * Solution B: Use the async version of the call (NLM_LOCK_{MSG,RES}) | ||
543 | * | ||
544 | * For now I am implementing solution A, because I hate the idea of | ||
545 | * re-implementing lockd for a third time in two months. The async | ||
546 | * calls shouldn't be too hard to do, however. | ||
547 | * | ||
548 | * This is one of the lovely things about standards in the NFS area: | ||
549 | * they're so soft and squishy you can't really blame HP for doing this. | ||
550 | */ | ||
551 | static int | ||
552 | nlmclnt_lock(struct nlm_rqst *req, struct file_lock *fl) | ||
553 | { | ||
554 | struct nlm_host *host = req->a_host; | ||
555 | struct nlm_res *resp = &req->a_res; | ||
556 | int status; | ||
557 | |||
558 | if (!host->h_monitored && nsm_monitor(host) < 0) { | ||
559 | printk(KERN_NOTICE "lockd: failed to monitor %s\n", | ||
560 | host->h_name); | ||
561 | status = -ENOLCK; | ||
562 | goto out; | ||
563 | } | ||
564 | |||
565 | do { | ||
566 | if ((status = nlmclnt_call(req, NLMPROC_LOCK)) >= 0) { | ||
567 | if (resp->status != NLM_LCK_BLOCKED) | ||
568 | break; | ||
569 | status = nlmclnt_block(host, fl, &resp->status); | ||
570 | } | ||
571 | if (status < 0) | ||
572 | goto out; | ||
573 | } while (resp->status == NLM_LCK_BLOCKED && req->a_args.block); | ||
574 | |||
575 | if (resp->status == NLM_LCK_GRANTED) { | ||
576 | fl->fl_u.nfs_fl.state = host->h_state; | ||
577 | fl->fl_u.nfs_fl.flags |= NFS_LCK_GRANTED; | ||
578 | fl->fl_flags |= FL_SLEEP; | ||
579 | do_vfs_lock(fl); | ||
580 | } | ||
581 | status = nlm_stat_to_errno(resp->status); | ||
582 | out: | ||
583 | nlmclnt_release_lockargs(req); | ||
584 | return status; | ||
585 | } | ||
586 | |||
587 | /* | ||
588 | * RECLAIM: Try to reclaim a lock | ||
589 | */ | ||
590 | int | ||
591 | nlmclnt_reclaim(struct nlm_host *host, struct file_lock *fl) | ||
592 | { | ||
593 | struct nlm_rqst reqst, *req; | ||
594 | int status; | ||
595 | |||
596 | req = &reqst; | ||
597 | memset(req, 0, sizeof(*req)); | ||
598 | locks_init_lock(&req->a_args.lock.fl); | ||
599 | locks_init_lock(&req->a_res.lock.fl); | ||
600 | req->a_host = host; | ||
601 | req->a_flags = 0; | ||
602 | |||
603 | /* Set up the argument struct */ | ||
604 | nlmclnt_setlockargs(req, fl); | ||
605 | req->a_args.reclaim = 1; | ||
606 | |||
607 | if ((status = nlmclnt_call(req, NLMPROC_LOCK)) >= 0 | ||
608 | && req->a_res.status == NLM_LCK_GRANTED) | ||
609 | return 0; | ||
610 | |||
611 | printk(KERN_WARNING "lockd: failed to reclaim lock for pid %d " | ||
612 | "(errno %d, status %d)\n", fl->fl_pid, | ||
613 | status, req->a_res.status); | ||
614 | |||
615 | /* | ||
616 | * FIXME: This is a serious failure. We can | ||
617 | * | ||
618 | * a. Ignore the problem | ||
619 | * b. Send the owning process some signal (Linux doesn't have | ||
620 | * SIGLOST, though...) | ||
621 | * c. Retry the operation | ||
622 | * | ||
623 | * Until someone comes up with a simple implementation | ||
624 | * for b or c, I'll choose option a. | ||
625 | */ | ||
626 | |||
627 | return -ENOLCK; | ||
628 | } | ||
629 | |||
630 | /* | ||
631 | * UNLOCK: remove an existing lock | ||
632 | */ | ||
633 | static int | ||
634 | nlmclnt_unlock(struct nlm_rqst *req, struct file_lock *fl) | ||
635 | { | ||
636 | struct nlm_res *resp = &req->a_res; | ||
637 | int status; | ||
638 | |||
639 | /* Clean the GRANTED flag now so the lock doesn't get | ||
640 | * reclaimed while we're stuck in the unlock call. */ | ||
641 | fl->fl_u.nfs_fl.flags &= ~NFS_LCK_GRANTED; | ||
642 | |||
643 | if (req->a_flags & RPC_TASK_ASYNC) { | ||
644 | status = nlmclnt_async_call(req, NLMPROC_UNLOCK, | ||
645 | nlmclnt_unlock_callback); | ||
646 | /* Hrmf... Do the unlock early since locks_remove_posix() | ||
647 | * really expects us to free the lock synchronously */ | ||
648 | do_vfs_lock(fl); | ||
649 | if (status < 0) { | ||
650 | nlmclnt_release_lockargs(req); | ||
651 | kfree(req); | ||
652 | } | ||
653 | return status; | ||
654 | } | ||
655 | |||
656 | status = nlmclnt_call(req, NLMPROC_UNLOCK); | ||
657 | nlmclnt_release_lockargs(req); | ||
658 | if (status < 0) | ||
659 | return status; | ||
660 | |||
661 | do_vfs_lock(fl); | ||
662 | if (resp->status == NLM_LCK_GRANTED) | ||
663 | return 0; | ||
664 | |||
665 | if (resp->status != NLM_LCK_DENIED_NOLOCKS) | ||
666 | printk("lockd: unexpected unlock status: %d\n", resp->status); | ||
667 | |||
668 | /* What to do now? I'm out of my depth... */ | ||
669 | |||
670 | return -ENOLCK; | ||
671 | } | ||
672 | |||
673 | static void | ||
674 | nlmclnt_unlock_callback(struct rpc_task *task) | ||
675 | { | ||
676 | struct nlm_rqst *req = (struct nlm_rqst *) task->tk_calldata; | ||
677 | int status = req->a_res.status; | ||
678 | |||
679 | if (RPC_ASSASSINATED(task)) | ||
680 | goto die; | ||
681 | |||
682 | if (task->tk_status < 0) { | ||
683 | dprintk("lockd: unlock failed (err = %d)\n", -task->tk_status); | ||
684 | goto retry_rebind; | ||
685 | } | ||
686 | if (status == NLM_LCK_DENIED_GRACE_PERIOD) { | ||
687 | rpc_delay(task, NLMCLNT_GRACE_WAIT); | ||
688 | goto retry_unlock; | ||
689 | } | ||
690 | if (status != NLM_LCK_GRANTED) | ||
691 | printk(KERN_WARNING "lockd: unexpected unlock status: %d\n", status); | ||
692 | die: | ||
693 | nlm_release_host(req->a_host); | ||
694 | nlmclnt_release_lockargs(req); | ||
695 | kfree(req); | ||
696 | return; | ||
697 | retry_rebind: | ||
698 | nlm_rebind_host(req->a_host); | ||
699 | retry_unlock: | ||
700 | rpc_restart_call(task); | ||
701 | } | ||
702 | |||
703 | /* | ||
704 | * Cancel a blocked lock request. | ||
705 | * We always use an async RPC call for this in order not to hang a | ||
706 | * process that has been Ctrl-C'ed. | ||
707 | */ | ||
708 | int | ||
709 | nlmclnt_cancel(struct nlm_host *host, struct file_lock *fl) | ||
710 | { | ||
711 | struct nlm_rqst *req; | ||
712 | unsigned long flags; | ||
713 | sigset_t oldset; | ||
714 | int status; | ||
715 | |||
716 | /* Block all signals while setting up call */ | ||
717 | spin_lock_irqsave(¤t->sighand->siglock, flags); | ||
718 | oldset = current->blocked; | ||
719 | sigfillset(¤t->blocked); | ||
720 | recalc_sigpending(); | ||
721 | spin_unlock_irqrestore(¤t->sighand->siglock, flags); | ||
722 | |||
723 | req = nlmclnt_alloc_call(); | ||
724 | if (!req) | ||
725 | return -ENOMEM; | ||
726 | req->a_host = host; | ||
727 | req->a_flags = RPC_TASK_ASYNC; | ||
728 | |||
729 | nlmclnt_setlockargs(req, fl); | ||
730 | |||
731 | status = nlmclnt_async_call(req, NLMPROC_CANCEL, | ||
732 | nlmclnt_cancel_callback); | ||
733 | if (status < 0) { | ||
734 | nlmclnt_release_lockargs(req); | ||
735 | kfree(req); | ||
736 | } | ||
737 | |||
738 | spin_lock_irqsave(¤t->sighand->siglock, flags); | ||
739 | current->blocked = oldset; | ||
740 | recalc_sigpending(); | ||
741 | spin_unlock_irqrestore(¤t->sighand->siglock, flags); | ||
742 | |||
743 | return status; | ||
744 | } | ||
745 | |||
746 | static void | ||
747 | nlmclnt_cancel_callback(struct rpc_task *task) | ||
748 | { | ||
749 | struct nlm_rqst *req = (struct nlm_rqst *) task->tk_calldata; | ||
750 | |||
751 | if (RPC_ASSASSINATED(task)) | ||
752 | goto die; | ||
753 | |||
754 | if (task->tk_status < 0) { | ||
755 | dprintk("lockd: CANCEL call error %d, retrying.\n", | ||
756 | task->tk_status); | ||
757 | goto retry_cancel; | ||
758 | } | ||
759 | |||
760 | dprintk("lockd: cancel status %d (task %d)\n", | ||
761 | req->a_res.status, task->tk_pid); | ||
762 | |||
763 | switch (req->a_res.status) { | ||
764 | case NLM_LCK_GRANTED: | ||
765 | case NLM_LCK_DENIED_GRACE_PERIOD: | ||
766 | /* Everything's good */ | ||
767 | break; | ||
768 | case NLM_LCK_DENIED_NOLOCKS: | ||
769 | dprintk("lockd: CANCEL failed (server has no locks)\n"); | ||
770 | goto retry_cancel; | ||
771 | default: | ||
772 | printk(KERN_NOTICE "lockd: weird return %d for CANCEL call\n", | ||
773 | req->a_res.status); | ||
774 | } | ||
775 | |||
776 | die: | ||
777 | nlm_release_host(req->a_host); | ||
778 | nlmclnt_release_lockargs(req); | ||
779 | kfree(req); | ||
780 | return; | ||
781 | |||
782 | retry_cancel: | ||
783 | nlm_rebind_host(req->a_host); | ||
784 | rpc_restart_call(task); | ||
785 | rpc_delay(task, 30 * HZ); | ||
786 | } | ||
787 | |||
788 | /* | ||
789 | * Convert an NLM status code to a generic kernel errno | ||
790 | */ | ||
791 | static int | ||
792 | nlm_stat_to_errno(u32 status) | ||
793 | { | ||
794 | switch(status) { | ||
795 | case NLM_LCK_GRANTED: | ||
796 | return 0; | ||
797 | case NLM_LCK_DENIED: | ||
798 | return -EAGAIN; | ||
799 | case NLM_LCK_DENIED_NOLOCKS: | ||
800 | case NLM_LCK_DENIED_GRACE_PERIOD: | ||
801 | return -ENOLCK; | ||
802 | case NLM_LCK_BLOCKED: | ||
803 | printk(KERN_NOTICE "lockd: unexpected status NLM_BLOCKED\n"); | ||
804 | return -ENOLCK; | ||
805 | #ifdef CONFIG_LOCKD_V4 | ||
806 | case NLM_DEADLCK: | ||
807 | return -EDEADLK; | ||
808 | case NLM_ROFS: | ||
809 | return -EROFS; | ||
810 | case NLM_STALE_FH: | ||
811 | return -ESTALE; | ||
812 | case NLM_FBIG: | ||
813 | return -EOVERFLOW; | ||
814 | case NLM_FAILED: | ||
815 | return -ENOLCK; | ||
816 | #endif | ||
817 | } | ||
818 | printk(KERN_NOTICE "lockd: unexpected server status %d\n", status); | ||
819 | return -ENOLCK; | ||
820 | } | ||
diff --git a/fs/lockd/host.c b/fs/lockd/host.c new file mode 100644 index 000000000000..52707c5ad6ea --- /dev/null +++ b/fs/lockd/host.c | |||
@@ -0,0 +1,346 @@ | |||
1 | /* | ||
2 | * linux/fs/lockd/host.c | ||
3 | * | ||
4 | * Management for NLM peer hosts. The nlm_host struct is shared | ||
5 | * between client and server implementation. The only reason to | ||
6 | * do so is to reduce code bloat. | ||
7 | * | ||
8 | * Copyright (C) 1996, Olaf Kirch <okir@monad.swb.de> | ||
9 | */ | ||
10 | |||
11 | #include <linux/types.h> | ||
12 | #include <linux/sched.h> | ||
13 | #include <linux/slab.h> | ||
14 | #include <linux/in.h> | ||
15 | #include <linux/sunrpc/clnt.h> | ||
16 | #include <linux/sunrpc/svc.h> | ||
17 | #include <linux/lockd/lockd.h> | ||
18 | #include <linux/lockd/sm_inter.h> | ||
19 | |||
20 | |||
21 | #define NLMDBG_FACILITY NLMDBG_HOSTCACHE | ||
22 | #define NLM_HOST_MAX 64 | ||
23 | #define NLM_HOST_NRHASH 32 | ||
24 | #define NLM_ADDRHASH(addr) (ntohl(addr) & (NLM_HOST_NRHASH-1)) | ||
25 | #define NLM_HOST_REBIND (60 * HZ) | ||
26 | #define NLM_HOST_EXPIRE ((nrhosts > NLM_HOST_MAX)? 300 * HZ : 120 * HZ) | ||
27 | #define NLM_HOST_COLLECT ((nrhosts > NLM_HOST_MAX)? 120 * HZ : 60 * HZ) | ||
28 | #define NLM_HOST_ADDR(sv) (&(sv)->s_nlmclnt->cl_xprt->addr) | ||
29 | |||
30 | static struct nlm_host * nlm_hosts[NLM_HOST_NRHASH]; | ||
31 | static unsigned long next_gc; | ||
32 | static int nrhosts; | ||
33 | static DECLARE_MUTEX(nlm_host_sema); | ||
34 | |||
35 | |||
36 | static void nlm_gc_hosts(void); | ||
37 | |||
38 | /* | ||
39 | * Find an NLM server handle in the cache. If there is none, create it. | ||
40 | */ | ||
41 | struct nlm_host * | ||
42 | nlmclnt_lookup_host(struct sockaddr_in *sin, int proto, int version) | ||
43 | { | ||
44 | return nlm_lookup_host(0, sin, proto, version); | ||
45 | } | ||
46 | |||
47 | /* | ||
48 | * Find an NLM client handle in the cache. If there is none, create it. | ||
49 | */ | ||
50 | struct nlm_host * | ||
51 | nlmsvc_lookup_host(struct svc_rqst *rqstp) | ||
52 | { | ||
53 | return nlm_lookup_host(1, &rqstp->rq_addr, | ||
54 | rqstp->rq_prot, rqstp->rq_vers); | ||
55 | } | ||
56 | |||
57 | /* | ||
58 | * Common host lookup routine for server & client | ||
59 | */ | ||
60 | struct nlm_host * | ||
61 | nlm_lookup_host(int server, struct sockaddr_in *sin, | ||
62 | int proto, int version) | ||
63 | { | ||
64 | struct nlm_host *host, **hp; | ||
65 | u32 addr; | ||
66 | int hash; | ||
67 | |||
68 | dprintk("lockd: nlm_lookup_host(%08x, p=%d, v=%d)\n", | ||
69 | (unsigned)(sin? ntohl(sin->sin_addr.s_addr) : 0), proto, version); | ||
70 | |||
71 | hash = NLM_ADDRHASH(sin->sin_addr.s_addr); | ||
72 | |||
73 | /* Lock hash table */ | ||
74 | down(&nlm_host_sema); | ||
75 | |||
76 | if (time_after_eq(jiffies, next_gc)) | ||
77 | nlm_gc_hosts(); | ||
78 | |||
79 | for (hp = &nlm_hosts[hash]; (host = *hp) != 0; hp = &host->h_next) { | ||
80 | if (host->h_proto != proto) | ||
81 | continue; | ||
82 | if (host->h_version != version) | ||
83 | continue; | ||
84 | if (host->h_server != server) | ||
85 | continue; | ||
86 | |||
87 | if (nlm_cmp_addr(&host->h_addr, sin)) { | ||
88 | if (hp != nlm_hosts + hash) { | ||
89 | *hp = host->h_next; | ||
90 | host->h_next = nlm_hosts[hash]; | ||
91 | nlm_hosts[hash] = host; | ||
92 | } | ||
93 | nlm_get_host(host); | ||
94 | up(&nlm_host_sema); | ||
95 | return host; | ||
96 | } | ||
97 | } | ||
98 | |||
99 | /* Ooops, no host found, create it */ | ||
100 | dprintk("lockd: creating host entry\n"); | ||
101 | |||
102 | if (!(host = (struct nlm_host *) kmalloc(sizeof(*host), GFP_KERNEL))) | ||
103 | goto nohost; | ||
104 | memset(host, 0, sizeof(*host)); | ||
105 | |||
106 | addr = sin->sin_addr.s_addr; | ||
107 | sprintf(host->h_name, "%u.%u.%u.%u", NIPQUAD(addr)); | ||
108 | |||
109 | host->h_addr = *sin; | ||
110 | host->h_addr.sin_port = 0; /* ouch! */ | ||
111 | host->h_version = version; | ||
112 | host->h_proto = proto; | ||
113 | host->h_rpcclnt = NULL; | ||
114 | init_MUTEX(&host->h_sema); | ||
115 | host->h_nextrebind = jiffies + NLM_HOST_REBIND; | ||
116 | host->h_expires = jiffies + NLM_HOST_EXPIRE; | ||
117 | atomic_set(&host->h_count, 1); | ||
118 | init_waitqueue_head(&host->h_gracewait); | ||
119 | host->h_state = 0; /* pseudo NSM state */ | ||
120 | host->h_nsmstate = 0; /* real NSM state */ | ||
121 | host->h_server = server; | ||
122 | host->h_next = nlm_hosts[hash]; | ||
123 | nlm_hosts[hash] = host; | ||
124 | INIT_LIST_HEAD(&host->h_lockowners); | ||
125 | spin_lock_init(&host->h_lock); | ||
126 | |||
127 | if (++nrhosts > NLM_HOST_MAX) | ||
128 | next_gc = 0; | ||
129 | |||
130 | nohost: | ||
131 | up(&nlm_host_sema); | ||
132 | return host; | ||
133 | } | ||
134 | |||
135 | struct nlm_host * | ||
136 | nlm_find_client(void) | ||
137 | { | ||
138 | /* find a nlm_host for a client for which h_killed == 0. | ||
139 | * and return it | ||
140 | */ | ||
141 | int hash; | ||
142 | down(&nlm_host_sema); | ||
143 | for (hash = 0 ; hash < NLM_HOST_NRHASH; hash++) { | ||
144 | struct nlm_host *host, **hp; | ||
145 | for (hp = &nlm_hosts[hash]; (host = *hp) != 0; hp = &host->h_next) { | ||
146 | if (host->h_server && | ||
147 | host->h_killed == 0) { | ||
148 | nlm_get_host(host); | ||
149 | up(&nlm_host_sema); | ||
150 | return host; | ||
151 | } | ||
152 | } | ||
153 | } | ||
154 | up(&nlm_host_sema); | ||
155 | return NULL; | ||
156 | } | ||
157 | |||
158 | |||
159 | /* | ||
160 | * Create the NLM RPC client for an NLM peer | ||
161 | */ | ||
162 | struct rpc_clnt * | ||
163 | nlm_bind_host(struct nlm_host *host) | ||
164 | { | ||
165 | struct rpc_clnt *clnt; | ||
166 | struct rpc_xprt *xprt; | ||
167 | |||
168 | dprintk("lockd: nlm_bind_host(%08x)\n", | ||
169 | (unsigned)ntohl(host->h_addr.sin_addr.s_addr)); | ||
170 | |||
171 | /* Lock host handle */ | ||
172 | down(&host->h_sema); | ||
173 | |||
174 | /* If we've already created an RPC client, check whether | ||
175 | * RPC rebind is required | ||
176 | * Note: why keep rebinding if we're on a tcp connection? | ||
177 | */ | ||
178 | if ((clnt = host->h_rpcclnt) != NULL) { | ||
179 | xprt = clnt->cl_xprt; | ||
180 | if (!xprt->stream && time_after_eq(jiffies, host->h_nextrebind)) { | ||
181 | clnt->cl_port = 0; | ||
182 | host->h_nextrebind = jiffies + NLM_HOST_REBIND; | ||
183 | dprintk("lockd: next rebind in %ld jiffies\n", | ||
184 | host->h_nextrebind - jiffies); | ||
185 | } | ||
186 | } else { | ||
187 | xprt = xprt_create_proto(host->h_proto, &host->h_addr, NULL); | ||
188 | if (IS_ERR(xprt)) | ||
189 | goto forgetit; | ||
190 | |||
191 | xprt_set_timeout(&xprt->timeout, 5, nlmsvc_timeout); | ||
192 | |||
193 | /* Existing NLM servers accept AUTH_UNIX only */ | ||
194 | clnt = rpc_create_client(xprt, host->h_name, &nlm_program, | ||
195 | host->h_version, RPC_AUTH_UNIX); | ||
196 | if (IS_ERR(clnt)) { | ||
197 | xprt_destroy(xprt); | ||
198 | goto forgetit; | ||
199 | } | ||
200 | clnt->cl_autobind = 1; /* turn on pmap queries */ | ||
201 | xprt->nocong = 1; /* No congestion control for NLM */ | ||
202 | xprt->resvport = 1; /* NLM requires a reserved port */ | ||
203 | |||
204 | host->h_rpcclnt = clnt; | ||
205 | } | ||
206 | |||
207 | up(&host->h_sema); | ||
208 | return clnt; | ||
209 | |||
210 | forgetit: | ||
211 | printk("lockd: couldn't create RPC handle for %s\n", host->h_name); | ||
212 | up(&host->h_sema); | ||
213 | return NULL; | ||
214 | } | ||
215 | |||
216 | /* | ||
217 | * Force a portmap lookup of the remote lockd port | ||
218 | */ | ||
219 | void | ||
220 | nlm_rebind_host(struct nlm_host *host) | ||
221 | { | ||
222 | dprintk("lockd: rebind host %s\n", host->h_name); | ||
223 | if (host->h_rpcclnt && time_after_eq(jiffies, host->h_nextrebind)) { | ||
224 | host->h_rpcclnt->cl_port = 0; | ||
225 | host->h_nextrebind = jiffies + NLM_HOST_REBIND; | ||
226 | } | ||
227 | } | ||
228 | |||
229 | /* | ||
230 | * Increment NLM host count | ||
231 | */ | ||
232 | struct nlm_host * nlm_get_host(struct nlm_host *host) | ||
233 | { | ||
234 | if (host) { | ||
235 | dprintk("lockd: get host %s\n", host->h_name); | ||
236 | atomic_inc(&host->h_count); | ||
237 | host->h_expires = jiffies + NLM_HOST_EXPIRE; | ||
238 | } | ||
239 | return host; | ||
240 | } | ||
241 | |||
242 | /* | ||
243 | * Release NLM host after use | ||
244 | */ | ||
245 | void nlm_release_host(struct nlm_host *host) | ||
246 | { | ||
247 | if (host != NULL) { | ||
248 | dprintk("lockd: release host %s\n", host->h_name); | ||
249 | atomic_dec(&host->h_count); | ||
250 | BUG_ON(atomic_read(&host->h_count) < 0); | ||
251 | } | ||
252 | } | ||
253 | |||
254 | /* | ||
255 | * Shut down the hosts module. | ||
256 | * Note that this routine is called only at server shutdown time. | ||
257 | */ | ||
258 | void | ||
259 | nlm_shutdown_hosts(void) | ||
260 | { | ||
261 | struct nlm_host *host; | ||
262 | int i; | ||
263 | |||
264 | dprintk("lockd: shutting down host module\n"); | ||
265 | down(&nlm_host_sema); | ||
266 | |||
267 | /* First, make all hosts eligible for gc */ | ||
268 | dprintk("lockd: nuking all hosts...\n"); | ||
269 | for (i = 0; i < NLM_HOST_NRHASH; i++) { | ||
270 | for (host = nlm_hosts[i]; host; host = host->h_next) | ||
271 | host->h_expires = jiffies - 1; | ||
272 | } | ||
273 | |||
274 | /* Then, perform a garbage collection pass */ | ||
275 | nlm_gc_hosts(); | ||
276 | up(&nlm_host_sema); | ||
277 | |||
278 | /* complain if any hosts are left */ | ||
279 | if (nrhosts) { | ||
280 | printk(KERN_WARNING "lockd: couldn't shutdown host module!\n"); | ||
281 | dprintk("lockd: %d hosts left:\n", nrhosts); | ||
282 | for (i = 0; i < NLM_HOST_NRHASH; i++) { | ||
283 | for (host = nlm_hosts[i]; host; host = host->h_next) { | ||
284 | dprintk(" %s (cnt %d use %d exp %ld)\n", | ||
285 | host->h_name, atomic_read(&host->h_count), | ||
286 | host->h_inuse, host->h_expires); | ||
287 | } | ||
288 | } | ||
289 | } | ||
290 | } | ||
291 | |||
292 | /* | ||
293 | * Garbage collect any unused NLM hosts. | ||
294 | * This GC combines reference counting for async operations with | ||
295 | * mark & sweep for resources held by remote clients. | ||
296 | */ | ||
297 | static void | ||
298 | nlm_gc_hosts(void) | ||
299 | { | ||
300 | struct nlm_host **q, *host; | ||
301 | struct rpc_clnt *clnt; | ||
302 | int i; | ||
303 | |||
304 | dprintk("lockd: host garbage collection\n"); | ||
305 | for (i = 0; i < NLM_HOST_NRHASH; i++) { | ||
306 | for (host = nlm_hosts[i]; host; host = host->h_next) | ||
307 | host->h_inuse = 0; | ||
308 | } | ||
309 | |||
310 | /* Mark all hosts that hold locks, blocks or shares */ | ||
311 | nlmsvc_mark_resources(); | ||
312 | |||
313 | for (i = 0; i < NLM_HOST_NRHASH; i++) { | ||
314 | q = &nlm_hosts[i]; | ||
315 | while ((host = *q) != NULL) { | ||
316 | if (atomic_read(&host->h_count) || host->h_inuse | ||
317 | || time_before(jiffies, host->h_expires)) { | ||
318 | dprintk("nlm_gc_hosts skipping %s (cnt %d use %d exp %ld)\n", | ||
319 | host->h_name, atomic_read(&host->h_count), | ||
320 | host->h_inuse, host->h_expires); | ||
321 | q = &host->h_next; | ||
322 | continue; | ||
323 | } | ||
324 | dprintk("lockd: delete host %s\n", host->h_name); | ||
325 | *q = host->h_next; | ||
326 | /* Don't unmonitor hosts that have been invalidated */ | ||
327 | if (host->h_monitored && !host->h_killed) | ||
328 | nsm_unmonitor(host); | ||
329 | if ((clnt = host->h_rpcclnt) != NULL) { | ||
330 | if (atomic_read(&clnt->cl_users)) { | ||
331 | printk(KERN_WARNING | ||
332 | "lockd: active RPC handle\n"); | ||
333 | clnt->cl_dead = 1; | ||
334 | } else { | ||
335 | rpc_destroy_client(host->h_rpcclnt); | ||
336 | } | ||
337 | } | ||
338 | BUG_ON(!list_empty(&host->h_lockowners)); | ||
339 | kfree(host); | ||
340 | nrhosts--; | ||
341 | } | ||
342 | } | ||
343 | |||
344 | next_gc = jiffies + NLM_HOST_COLLECT; | ||
345 | } | ||
346 | |||
diff --git a/fs/lockd/mon.c b/fs/lockd/mon.c new file mode 100644 index 000000000000..6fc1bebeec1d --- /dev/null +++ b/fs/lockd/mon.c | |||
@@ -0,0 +1,246 @@ | |||
1 | /* | ||
2 | * linux/fs/lockd/mon.c | ||
3 | * | ||
4 | * The kernel statd client. | ||
5 | * | ||
6 | * Copyright (C) 1996, Olaf Kirch <okir@monad.swb.de> | ||
7 | */ | ||
8 | |||
9 | #include <linux/types.h> | ||
10 | #include <linux/utsname.h> | ||
11 | #include <linux/kernel.h> | ||
12 | #include <linux/sunrpc/clnt.h> | ||
13 | #include <linux/sunrpc/svc.h> | ||
14 | #include <linux/lockd/lockd.h> | ||
15 | #include <linux/lockd/sm_inter.h> | ||
16 | |||
17 | |||
18 | #define NLMDBG_FACILITY NLMDBG_MONITOR | ||
19 | |||
20 | static struct rpc_clnt * nsm_create(void); | ||
21 | |||
22 | static struct rpc_program nsm_program; | ||
23 | |||
24 | /* | ||
25 | * Local NSM state | ||
26 | */ | ||
27 | u32 nsm_local_state; | ||
28 | |||
29 | /* | ||
30 | * Common procedure for SM_MON/SM_UNMON calls | ||
31 | */ | ||
32 | static int | ||
33 | nsm_mon_unmon(struct nlm_host *host, u32 proc, struct nsm_res *res) | ||
34 | { | ||
35 | struct rpc_clnt *clnt; | ||
36 | int status; | ||
37 | struct nsm_args args; | ||
38 | |||
39 | clnt = nsm_create(); | ||
40 | if (IS_ERR(clnt)) { | ||
41 | status = PTR_ERR(clnt); | ||
42 | goto out; | ||
43 | } | ||
44 | |||
45 | args.addr = host->h_addr.sin_addr.s_addr; | ||
46 | args.proto= (host->h_proto<<1) | host->h_server; | ||
47 | args.prog = NLM_PROGRAM; | ||
48 | args.vers = host->h_version; | ||
49 | args.proc = NLMPROC_NSM_NOTIFY; | ||
50 | memset(res, 0, sizeof(*res)); | ||
51 | |||
52 | status = rpc_call(clnt, proc, &args, res, 0); | ||
53 | if (status < 0) | ||
54 | printk(KERN_DEBUG "nsm_mon_unmon: rpc failed, status=%d\n", | ||
55 | status); | ||
56 | else | ||
57 | status = 0; | ||
58 | out: | ||
59 | return status; | ||
60 | } | ||
61 | |||
62 | /* | ||
63 | * Set up monitoring of a remote host | ||
64 | */ | ||
65 | int | ||
66 | nsm_monitor(struct nlm_host *host) | ||
67 | { | ||
68 | struct nsm_res res; | ||
69 | int status; | ||
70 | |||
71 | dprintk("lockd: nsm_monitor(%s)\n", host->h_name); | ||
72 | |||
73 | status = nsm_mon_unmon(host, SM_MON, &res); | ||
74 | |||
75 | if (status < 0 || res.status != 0) | ||
76 | printk(KERN_NOTICE "lockd: cannot monitor %s\n", host->h_name); | ||
77 | else | ||
78 | host->h_monitored = 1; | ||
79 | return status; | ||
80 | } | ||
81 | |||
82 | /* | ||
83 | * Cease to monitor remote host | ||
84 | */ | ||
85 | int | ||
86 | nsm_unmonitor(struct nlm_host *host) | ||
87 | { | ||
88 | struct nsm_res res; | ||
89 | int status; | ||
90 | |||
91 | dprintk("lockd: nsm_unmonitor(%s)\n", host->h_name); | ||
92 | |||
93 | status = nsm_mon_unmon(host, SM_UNMON, &res); | ||
94 | if (status < 0) | ||
95 | printk(KERN_NOTICE "lockd: cannot unmonitor %s\n", host->h_name); | ||
96 | else | ||
97 | host->h_monitored = 0; | ||
98 | return status; | ||
99 | } | ||
100 | |||
101 | /* | ||
102 | * Create NSM client for the local host | ||
103 | */ | ||
104 | static struct rpc_clnt * | ||
105 | nsm_create(void) | ||
106 | { | ||
107 | struct rpc_xprt *xprt; | ||
108 | struct rpc_clnt *clnt; | ||
109 | struct sockaddr_in sin; | ||
110 | |||
111 | sin.sin_family = AF_INET; | ||
112 | sin.sin_addr.s_addr = htonl(INADDR_LOOPBACK); | ||
113 | sin.sin_port = 0; | ||
114 | |||
115 | xprt = xprt_create_proto(IPPROTO_UDP, &sin, NULL); | ||
116 | if (IS_ERR(xprt)) | ||
117 | return (struct rpc_clnt *)xprt; | ||
118 | |||
119 | clnt = rpc_create_client(xprt, "localhost", | ||
120 | &nsm_program, SM_VERSION, | ||
121 | RPC_AUTH_NULL); | ||
122 | if (IS_ERR(clnt)) | ||
123 | goto out_destroy; | ||
124 | clnt->cl_softrtry = 1; | ||
125 | clnt->cl_chatty = 1; | ||
126 | clnt->cl_oneshot = 1; | ||
127 | xprt->resvport = 1; /* NSM requires a reserved port */ | ||
128 | return clnt; | ||
129 | |||
130 | out_destroy: | ||
131 | xprt_destroy(xprt); | ||
132 | return clnt; | ||
133 | } | ||
134 | |||
135 | /* | ||
136 | * XDR functions for NSM. | ||
137 | */ | ||
138 | |||
139 | static u32 * | ||
140 | xdr_encode_common(struct rpc_rqst *rqstp, u32 *p, struct nsm_args *argp) | ||
141 | { | ||
142 | char buffer[20]; | ||
143 | |||
144 | /* | ||
145 | * Use the dotted-quad IP address of the remote host as | ||
146 | * identifier. Linux statd always looks up the canonical | ||
147 | * hostname first for whatever remote hostname it receives, | ||
148 | * so this works alright. | ||
149 | */ | ||
150 | sprintf(buffer, "%u.%u.%u.%u", NIPQUAD(argp->addr)); | ||
151 | if (!(p = xdr_encode_string(p, buffer)) | ||
152 | || !(p = xdr_encode_string(p, system_utsname.nodename))) | ||
153 | return ERR_PTR(-EIO); | ||
154 | *p++ = htonl(argp->prog); | ||
155 | *p++ = htonl(argp->vers); | ||
156 | *p++ = htonl(argp->proc); | ||
157 | |||
158 | return p; | ||
159 | } | ||
160 | |||
161 | static int | ||
162 | xdr_encode_mon(struct rpc_rqst *rqstp, u32 *p, struct nsm_args *argp) | ||
163 | { | ||
164 | p = xdr_encode_common(rqstp, p, argp); | ||
165 | if (IS_ERR(p)) | ||
166 | return PTR_ERR(p); | ||
167 | *p++ = argp->addr; | ||
168 | *p++ = argp->vers; | ||
169 | *p++ = argp->proto; | ||
170 | *p++ = 0; | ||
171 | rqstp->rq_slen = xdr_adjust_iovec(rqstp->rq_svec, p); | ||
172 | return 0; | ||
173 | } | ||
174 | |||
175 | static int | ||
176 | xdr_encode_unmon(struct rpc_rqst *rqstp, u32 *p, struct nsm_args *argp) | ||
177 | { | ||
178 | p = xdr_encode_common(rqstp, p, argp); | ||
179 | if (IS_ERR(p)) | ||
180 | return PTR_ERR(p); | ||
181 | rqstp->rq_slen = xdr_adjust_iovec(rqstp->rq_svec, p); | ||
182 | return 0; | ||
183 | } | ||
184 | |||
185 | static int | ||
186 | xdr_decode_stat_res(struct rpc_rqst *rqstp, u32 *p, struct nsm_res *resp) | ||
187 | { | ||
188 | resp->status = ntohl(*p++); | ||
189 | resp->state = ntohl(*p++); | ||
190 | dprintk("nsm: xdr_decode_stat_res status %d state %d\n", | ||
191 | resp->status, resp->state); | ||
192 | return 0; | ||
193 | } | ||
194 | |||
195 | static int | ||
196 | xdr_decode_stat(struct rpc_rqst *rqstp, u32 *p, struct nsm_res *resp) | ||
197 | { | ||
198 | resp->state = ntohl(*p++); | ||
199 | return 0; | ||
200 | } | ||
201 | |||
202 | #define SM_my_name_sz (1+XDR_QUADLEN(SM_MAXSTRLEN)) | ||
203 | #define SM_my_id_sz (3+1+SM_my_name_sz) | ||
204 | #define SM_mon_id_sz (1+XDR_QUADLEN(20)+SM_my_id_sz) | ||
205 | #define SM_mon_sz (SM_mon_id_sz+4) | ||
206 | #define SM_monres_sz 2 | ||
207 | #define SM_unmonres_sz 1 | ||
208 | |||
209 | #ifndef MAX | ||
210 | # define MAX(a, b) (((a) > (b))? (a) : (b)) | ||
211 | #endif | ||
212 | |||
213 | static struct rpc_procinfo nsm_procedures[] = { | ||
214 | [SM_MON] = { | ||
215 | .p_proc = SM_MON, | ||
216 | .p_encode = (kxdrproc_t) xdr_encode_mon, | ||
217 | .p_decode = (kxdrproc_t) xdr_decode_stat_res, | ||
218 | .p_bufsiz = MAX(SM_mon_sz, SM_monres_sz) << 2, | ||
219 | }, | ||
220 | [SM_UNMON] = { | ||
221 | .p_proc = SM_UNMON, | ||
222 | .p_encode = (kxdrproc_t) xdr_encode_unmon, | ||
223 | .p_decode = (kxdrproc_t) xdr_decode_stat, | ||
224 | .p_bufsiz = MAX(SM_mon_id_sz, SM_unmonres_sz) << 2, | ||
225 | }, | ||
226 | }; | ||
227 | |||
228 | static struct rpc_version nsm_version1 = { | ||
229 | .number = 1, | ||
230 | .nrprocs = sizeof(nsm_procedures)/sizeof(nsm_procedures[0]), | ||
231 | .procs = nsm_procedures | ||
232 | }; | ||
233 | |||
234 | static struct rpc_version * nsm_version[] = { | ||
235 | [1] = &nsm_version1, | ||
236 | }; | ||
237 | |||
238 | static struct rpc_stat nsm_stats; | ||
239 | |||
240 | static struct rpc_program nsm_program = { | ||
241 | .name = "statd", | ||
242 | .number = SM_PROGRAM, | ||
243 | .nrvers = sizeof(nsm_version)/sizeof(nsm_version[0]), | ||
244 | .version = nsm_version, | ||
245 | .stats = &nsm_stats | ||
246 | }; | ||
diff --git a/fs/lockd/svc.c b/fs/lockd/svc.c new file mode 100644 index 000000000000..b82e470912e8 --- /dev/null +++ b/fs/lockd/svc.c | |||
@@ -0,0 +1,519 @@ | |||
1 | /* | ||
2 | * linux/fs/lockd/svc.c | ||
3 | * | ||
4 | * This is the central lockd service. | ||
5 | * | ||
6 | * FIXME: Separate the lockd NFS server functionality from the lockd NFS | ||
7 | * client functionality. Oh why didn't Sun create two separate | ||
8 | * services in the first place? | ||
9 | * | ||
10 | * Authors: Olaf Kirch (okir@monad.swb.de) | ||
11 | * | ||
12 | * Copyright (C) 1995, 1996 Olaf Kirch <okir@monad.swb.de> | ||
13 | */ | ||
14 | |||
15 | #include <linux/config.h> | ||
16 | #include <linux/module.h> | ||
17 | #include <linux/init.h> | ||
18 | #include <linux/sysctl.h> | ||
19 | #include <linux/moduleparam.h> | ||
20 | |||
21 | #include <linux/sched.h> | ||
22 | #include <linux/errno.h> | ||
23 | #include <linux/in.h> | ||
24 | #include <linux/uio.h> | ||
25 | #include <linux/slab.h> | ||
26 | #include <linux/smp.h> | ||
27 | #include <linux/smp_lock.h> | ||
28 | |||
29 | #include <linux/sunrpc/types.h> | ||
30 | #include <linux/sunrpc/stats.h> | ||
31 | #include <linux/sunrpc/clnt.h> | ||
32 | #include <linux/sunrpc/svc.h> | ||
33 | #include <linux/sunrpc/svcsock.h> | ||
34 | #include <linux/lockd/lockd.h> | ||
35 | #include <linux/nfs.h> | ||
36 | |||
37 | #define NLMDBG_FACILITY NLMDBG_SVC | ||
38 | #define LOCKD_BUFSIZE (1024 + NLMSVC_XDRSIZE) | ||
39 | #define ALLOWED_SIGS (sigmask(SIGKILL)) | ||
40 | |||
41 | static struct svc_program nlmsvc_program; | ||
42 | |||
43 | struct nlmsvc_binding * nlmsvc_ops; | ||
44 | EXPORT_SYMBOL(nlmsvc_ops); | ||
45 | |||
46 | static DECLARE_MUTEX(nlmsvc_sema); | ||
47 | static unsigned int nlmsvc_users; | ||
48 | static pid_t nlmsvc_pid; | ||
49 | int nlmsvc_grace_period; | ||
50 | unsigned long nlmsvc_timeout; | ||
51 | |||
52 | static DECLARE_MUTEX_LOCKED(lockd_start); | ||
53 | static DECLARE_WAIT_QUEUE_HEAD(lockd_exit); | ||
54 | |||
55 | /* | ||
56 | * These can be set at insmod time (useful for NFS as root filesystem), | ||
57 | * and also changed through the sysctl interface. -- Jamie Lokier, Aug 2003 | ||
58 | */ | ||
59 | static unsigned long nlm_grace_period; | ||
60 | static unsigned long nlm_timeout = LOCKD_DFLT_TIMEO; | ||
61 | static int nlm_udpport, nlm_tcpport; | ||
62 | |||
63 | /* | ||
64 | * Constants needed for the sysctl interface. | ||
65 | */ | ||
66 | static const unsigned long nlm_grace_period_min = 0; | ||
67 | static const unsigned long nlm_grace_period_max = 240; | ||
68 | static const unsigned long nlm_timeout_min = 3; | ||
69 | static const unsigned long nlm_timeout_max = 20; | ||
70 | static const int nlm_port_min = 0, nlm_port_max = 65535; | ||
71 | |||
72 | static struct ctl_table_header * nlm_sysctl_table; | ||
73 | |||
74 | static unsigned long set_grace_period(void) | ||
75 | { | ||
76 | unsigned long grace_period; | ||
77 | |||
78 | /* Note: nlm_timeout should always be nonzero */ | ||
79 | if (nlm_grace_period) | ||
80 | grace_period = ((nlm_grace_period + nlm_timeout - 1) | ||
81 | / nlm_timeout) * nlm_timeout * HZ; | ||
82 | else | ||
83 | grace_period = nlm_timeout * 5 * HZ; | ||
84 | nlmsvc_grace_period = 1; | ||
85 | return grace_period + jiffies; | ||
86 | } | ||
87 | |||
88 | static inline void clear_grace_period(void) | ||
89 | { | ||
90 | nlmsvc_grace_period = 0; | ||
91 | } | ||
92 | |||
93 | /* | ||
94 | * This is the lockd kernel thread | ||
95 | */ | ||
96 | static void | ||
97 | lockd(struct svc_rqst *rqstp) | ||
98 | { | ||
99 | struct svc_serv *serv = rqstp->rq_server; | ||
100 | int err = 0; | ||
101 | unsigned long grace_period_expire; | ||
102 | |||
103 | /* Lock module and set up kernel thread */ | ||
104 | /* lockd_up is waiting for us to startup, so will | ||
105 | * be holding a reference to this module, so it | ||
106 | * is safe to just claim another reference | ||
107 | */ | ||
108 | __module_get(THIS_MODULE); | ||
109 | lock_kernel(); | ||
110 | |||
111 | /* | ||
112 | * Let our maker know we're running. | ||
113 | */ | ||
114 | nlmsvc_pid = current->pid; | ||
115 | up(&lockd_start); | ||
116 | |||
117 | daemonize("lockd"); | ||
118 | |||
119 | /* Process request with signals blocked, but allow SIGKILL. */ | ||
120 | allow_signal(SIGKILL); | ||
121 | |||
122 | /* kick rpciod */ | ||
123 | rpciod_up(); | ||
124 | |||
125 | dprintk("NFS locking service started (ver " LOCKD_VERSION ").\n"); | ||
126 | |||
127 | if (!nlm_timeout) | ||
128 | nlm_timeout = LOCKD_DFLT_TIMEO; | ||
129 | nlmsvc_timeout = nlm_timeout * HZ; | ||
130 | |||
131 | grace_period_expire = set_grace_period(); | ||
132 | |||
133 | /* | ||
134 | * The main request loop. We don't terminate until the last | ||
135 | * NFS mount or NFS daemon has gone away, and we've been sent a | ||
136 | * signal, or else another process has taken over our job. | ||
137 | */ | ||
138 | while ((nlmsvc_users || !signalled()) && nlmsvc_pid == current->pid) { | ||
139 | long timeout = MAX_SCHEDULE_TIMEOUT; | ||
140 | |||
141 | if (signalled()) { | ||
142 | flush_signals(current); | ||
143 | if (nlmsvc_ops) { | ||
144 | nlmsvc_invalidate_all(); | ||
145 | grace_period_expire = set_grace_period(); | ||
146 | } | ||
147 | } | ||
148 | |||
149 | /* | ||
150 | * Retry any blocked locks that have been notified by | ||
151 | * the VFS. Don't do this during grace period. | ||
152 | * (Theoretically, there shouldn't even be blocked locks | ||
153 | * during grace period). | ||
154 | */ | ||
155 | if (!nlmsvc_grace_period) { | ||
156 | timeout = nlmsvc_retry_blocked(); | ||
157 | } else if (time_before(grace_period_expire, jiffies)) | ||
158 | clear_grace_period(); | ||
159 | |||
160 | /* | ||
161 | * Find a socket with data available and call its | ||
162 | * recvfrom routine. | ||
163 | */ | ||
164 | err = svc_recv(serv, rqstp, timeout); | ||
165 | if (err == -EAGAIN || err == -EINTR) | ||
166 | continue; | ||
167 | if (err < 0) { | ||
168 | printk(KERN_WARNING | ||
169 | "lockd: terminating on error %d\n", | ||
170 | -err); | ||
171 | break; | ||
172 | } | ||
173 | |||
174 | dprintk("lockd: request from %08x\n", | ||
175 | (unsigned)ntohl(rqstp->rq_addr.sin_addr.s_addr)); | ||
176 | |||
177 | svc_process(serv, rqstp); | ||
178 | |||
179 | } | ||
180 | |||
181 | /* | ||
182 | * Check whether there's a new lockd process before | ||
183 | * shutting down the hosts and clearing the slot. | ||
184 | */ | ||
185 | if (!nlmsvc_pid || current->pid == nlmsvc_pid) { | ||
186 | if (nlmsvc_ops) | ||
187 | nlmsvc_invalidate_all(); | ||
188 | nlm_shutdown_hosts(); | ||
189 | nlmsvc_pid = 0; | ||
190 | } else | ||
191 | printk(KERN_DEBUG | ||
192 | "lockd: new process, skipping host shutdown\n"); | ||
193 | wake_up(&lockd_exit); | ||
194 | |||
195 | /* Exit the RPC thread */ | ||
196 | svc_exit_thread(rqstp); | ||
197 | |||
198 | /* release rpciod */ | ||
199 | rpciod_down(); | ||
200 | |||
201 | /* Release module */ | ||
202 | unlock_kernel(); | ||
203 | module_put_and_exit(0); | ||
204 | } | ||
205 | |||
206 | /* | ||
207 | * Bring up the lockd process if it's not already up. | ||
208 | */ | ||
209 | int | ||
210 | lockd_up(void) | ||
211 | { | ||
212 | static int warned; | ||
213 | struct svc_serv * serv; | ||
214 | int error = 0; | ||
215 | |||
216 | down(&nlmsvc_sema); | ||
217 | /* | ||
218 | * Unconditionally increment the user count ... this is | ||
219 | * the number of clients who _want_ a lockd process. | ||
220 | */ | ||
221 | nlmsvc_users++; | ||
222 | /* | ||
223 | * Check whether we're already up and running. | ||
224 | */ | ||
225 | if (nlmsvc_pid) | ||
226 | goto out; | ||
227 | |||
228 | /* | ||
229 | * Sanity check: if there's no pid, | ||
230 | * we should be the first user ... | ||
231 | */ | ||
232 | if (nlmsvc_users > 1) | ||
233 | printk(KERN_WARNING | ||
234 | "lockd_up: no pid, %d users??\n", nlmsvc_users); | ||
235 | |||
236 | error = -ENOMEM; | ||
237 | serv = svc_create(&nlmsvc_program, LOCKD_BUFSIZE); | ||
238 | if (!serv) { | ||
239 | printk(KERN_WARNING "lockd_up: create service failed\n"); | ||
240 | goto out; | ||
241 | } | ||
242 | |||
243 | if ((error = svc_makesock(serv, IPPROTO_UDP, nlm_udpport)) < 0 | ||
244 | #ifdef CONFIG_NFSD_TCP | ||
245 | || (error = svc_makesock(serv, IPPROTO_TCP, nlm_tcpport)) < 0 | ||
246 | #endif | ||
247 | ) { | ||
248 | if (warned++ == 0) | ||
249 | printk(KERN_WARNING | ||
250 | "lockd_up: makesock failed, error=%d\n", error); | ||
251 | goto destroy_and_out; | ||
252 | } | ||
253 | warned = 0; | ||
254 | |||
255 | /* | ||
256 | * Create the kernel thread and wait for it to start. | ||
257 | */ | ||
258 | error = svc_create_thread(lockd, serv); | ||
259 | if (error) { | ||
260 | printk(KERN_WARNING | ||
261 | "lockd_up: create thread failed, error=%d\n", error); | ||
262 | goto destroy_and_out; | ||
263 | } | ||
264 | down(&lockd_start); | ||
265 | |||
266 | /* | ||
267 | * Note: svc_serv structures have an initial use count of 1, | ||
268 | * so we exit through here on both success and failure. | ||
269 | */ | ||
270 | destroy_and_out: | ||
271 | svc_destroy(serv); | ||
272 | out: | ||
273 | up(&nlmsvc_sema); | ||
274 | return error; | ||
275 | } | ||
276 | EXPORT_SYMBOL(lockd_up); | ||
277 | |||
278 | /* | ||
279 | * Decrement the user count and bring down lockd if we're the last. | ||
280 | */ | ||
281 | void | ||
282 | lockd_down(void) | ||
283 | { | ||
284 | static int warned; | ||
285 | |||
286 | down(&nlmsvc_sema); | ||
287 | if (nlmsvc_users) { | ||
288 | if (--nlmsvc_users) | ||
289 | goto out; | ||
290 | } else | ||
291 | printk(KERN_WARNING "lockd_down: no users! pid=%d\n", nlmsvc_pid); | ||
292 | |||
293 | if (!nlmsvc_pid) { | ||
294 | if (warned++ == 0) | ||
295 | printk(KERN_WARNING "lockd_down: no lockd running.\n"); | ||
296 | goto out; | ||
297 | } | ||
298 | warned = 0; | ||
299 | |||
300 | kill_proc(nlmsvc_pid, SIGKILL, 1); | ||
301 | /* | ||
302 | * Wait for the lockd process to exit, but since we're holding | ||
303 | * the lockd semaphore, we can't wait around forever ... | ||
304 | */ | ||
305 | clear_thread_flag(TIF_SIGPENDING); | ||
306 | interruptible_sleep_on_timeout(&lockd_exit, HZ); | ||
307 | if (nlmsvc_pid) { | ||
308 | printk(KERN_WARNING | ||
309 | "lockd_down: lockd failed to exit, clearing pid\n"); | ||
310 | nlmsvc_pid = 0; | ||
311 | } | ||
312 | spin_lock_irq(¤t->sighand->siglock); | ||
313 | recalc_sigpending(); | ||
314 | spin_unlock_irq(¤t->sighand->siglock); | ||
315 | out: | ||
316 | up(&nlmsvc_sema); | ||
317 | } | ||
318 | EXPORT_SYMBOL(lockd_down); | ||
319 | |||
320 | /* | ||
321 | * Sysctl parameters (same as module parameters, different interface). | ||
322 | */ | ||
323 | |||
324 | /* Something that isn't CTL_ANY, CTL_NONE or a value that may clash. */ | ||
325 | #define CTL_UNNUMBERED -2 | ||
326 | |||
327 | static ctl_table nlm_sysctls[] = { | ||
328 | { | ||
329 | .ctl_name = CTL_UNNUMBERED, | ||
330 | .procname = "nlm_grace_period", | ||
331 | .data = &nlm_grace_period, | ||
332 | .maxlen = sizeof(int), | ||
333 | .mode = 0644, | ||
334 | .proc_handler = &proc_doulongvec_minmax, | ||
335 | .extra1 = (unsigned long *) &nlm_grace_period_min, | ||
336 | .extra2 = (unsigned long *) &nlm_grace_period_max, | ||
337 | }, | ||
338 | { | ||
339 | .ctl_name = CTL_UNNUMBERED, | ||
340 | .procname = "nlm_timeout", | ||
341 | .data = &nlm_timeout, | ||
342 | .maxlen = sizeof(int), | ||
343 | .mode = 0644, | ||
344 | .proc_handler = &proc_doulongvec_minmax, | ||
345 | .extra1 = (unsigned long *) &nlm_timeout_min, | ||
346 | .extra2 = (unsigned long *) &nlm_timeout_max, | ||
347 | }, | ||
348 | { | ||
349 | .ctl_name = CTL_UNNUMBERED, | ||
350 | .procname = "nlm_udpport", | ||
351 | .data = &nlm_udpport, | ||
352 | .maxlen = sizeof(int), | ||
353 | .mode = 0644, | ||
354 | .proc_handler = &proc_dointvec_minmax, | ||
355 | .extra1 = (int *) &nlm_port_min, | ||
356 | .extra2 = (int *) &nlm_port_max, | ||
357 | }, | ||
358 | { | ||
359 | .ctl_name = CTL_UNNUMBERED, | ||
360 | .procname = "nlm_tcpport", | ||
361 | .data = &nlm_tcpport, | ||
362 | .maxlen = sizeof(int), | ||
363 | .mode = 0644, | ||
364 | .proc_handler = &proc_dointvec_minmax, | ||
365 | .extra1 = (int *) &nlm_port_min, | ||
366 | .extra2 = (int *) &nlm_port_max, | ||
367 | }, | ||
368 | { .ctl_name = 0 } | ||
369 | }; | ||
370 | |||
371 | static ctl_table nlm_sysctl_dir[] = { | ||
372 | { | ||
373 | .ctl_name = CTL_UNNUMBERED, | ||
374 | .procname = "nfs", | ||
375 | .mode = 0555, | ||
376 | .child = nlm_sysctls, | ||
377 | }, | ||
378 | { .ctl_name = 0 } | ||
379 | }; | ||
380 | |||
381 | static ctl_table nlm_sysctl_root[] = { | ||
382 | { | ||
383 | .ctl_name = CTL_FS, | ||
384 | .procname = "fs", | ||
385 | .mode = 0555, | ||
386 | .child = nlm_sysctl_dir, | ||
387 | }, | ||
388 | { .ctl_name = 0 } | ||
389 | }; | ||
390 | |||
391 | /* | ||
392 | * Module (and driverfs) parameters. | ||
393 | */ | ||
394 | |||
395 | #define param_set_min_max(name, type, which_strtol, min, max) \ | ||
396 | static int param_set_##name(const char *val, struct kernel_param *kp) \ | ||
397 | { \ | ||
398 | char *endp; \ | ||
399 | __typeof__(type) num = which_strtol(val, &endp, 0); \ | ||
400 | if (endp == val || *endp || num < (min) || num > (max)) \ | ||
401 | return -EINVAL; \ | ||
402 | *((int *) kp->arg) = num; \ | ||
403 | return 0; \ | ||
404 | } | ||
405 | |||
406 | static inline int is_callback(u32 proc) | ||
407 | { | ||
408 | return proc == NLMPROC_GRANTED | ||
409 | || proc == NLMPROC_GRANTED_MSG | ||
410 | || proc == NLMPROC_TEST_RES | ||
411 | || proc == NLMPROC_LOCK_RES | ||
412 | || proc == NLMPROC_CANCEL_RES | ||
413 | || proc == NLMPROC_UNLOCK_RES | ||
414 | || proc == NLMPROC_NSM_NOTIFY; | ||
415 | } | ||
416 | |||
417 | |||
418 | static int lockd_authenticate(struct svc_rqst *rqstp) | ||
419 | { | ||
420 | rqstp->rq_client = NULL; | ||
421 | switch (rqstp->rq_authop->flavour) { | ||
422 | case RPC_AUTH_NULL: | ||
423 | case RPC_AUTH_UNIX: | ||
424 | if (rqstp->rq_proc == 0) | ||
425 | return SVC_OK; | ||
426 | if (is_callback(rqstp->rq_proc)) { | ||
427 | /* Leave it to individual procedures to | ||
428 | * call nlmsvc_lookup_host(rqstp) | ||
429 | */ | ||
430 | return SVC_OK; | ||
431 | } | ||
432 | return svc_set_client(rqstp); | ||
433 | } | ||
434 | return SVC_DENIED; | ||
435 | } | ||
436 | |||
437 | |||
438 | param_set_min_max(port, int, simple_strtol, 0, 65535) | ||
439 | param_set_min_max(grace_period, unsigned long, simple_strtoul, | ||
440 | nlm_grace_period_min, nlm_grace_period_max) | ||
441 | param_set_min_max(timeout, unsigned long, simple_strtoul, | ||
442 | nlm_timeout_min, nlm_timeout_max) | ||
443 | |||
444 | MODULE_AUTHOR("Olaf Kirch <okir@monad.swb.de>"); | ||
445 | MODULE_DESCRIPTION("NFS file locking service version " LOCKD_VERSION "."); | ||
446 | MODULE_LICENSE("GPL"); | ||
447 | |||
448 | module_param_call(nlm_grace_period, param_set_grace_period, param_get_ulong, | ||
449 | &nlm_grace_period, 0644); | ||
450 | module_param_call(nlm_timeout, param_set_timeout, param_get_ulong, | ||
451 | &nlm_timeout, 0644); | ||
452 | module_param_call(nlm_udpport, param_set_port, param_get_int, | ||
453 | &nlm_udpport, 0644); | ||
454 | module_param_call(nlm_tcpport, param_set_port, param_get_int, | ||
455 | &nlm_tcpport, 0644); | ||
456 | |||
457 | /* | ||
458 | * Initialising and terminating the module. | ||
459 | */ | ||
460 | |||
461 | static int __init init_nlm(void) | ||
462 | { | ||
463 | nlm_sysctl_table = register_sysctl_table(nlm_sysctl_root, 0); | ||
464 | return nlm_sysctl_table ? 0 : -ENOMEM; | ||
465 | } | ||
466 | |||
467 | static void __exit exit_nlm(void) | ||
468 | { | ||
469 | /* FIXME: delete all NLM clients */ | ||
470 | nlm_shutdown_hosts(); | ||
471 | unregister_sysctl_table(nlm_sysctl_table); | ||
472 | } | ||
473 | |||
474 | module_init(init_nlm); | ||
475 | module_exit(exit_nlm); | ||
476 | |||
477 | /* | ||
478 | * Define NLM program and procedures | ||
479 | */ | ||
480 | static struct svc_version nlmsvc_version1 = { | ||
481 | .vs_vers = 1, | ||
482 | .vs_nproc = 17, | ||
483 | .vs_proc = nlmsvc_procedures, | ||
484 | .vs_xdrsize = NLMSVC_XDRSIZE, | ||
485 | }; | ||
486 | static struct svc_version nlmsvc_version3 = { | ||
487 | .vs_vers = 3, | ||
488 | .vs_nproc = 24, | ||
489 | .vs_proc = nlmsvc_procedures, | ||
490 | .vs_xdrsize = NLMSVC_XDRSIZE, | ||
491 | }; | ||
492 | #ifdef CONFIG_LOCKD_V4 | ||
493 | static struct svc_version nlmsvc_version4 = { | ||
494 | .vs_vers = 4, | ||
495 | .vs_nproc = 24, | ||
496 | .vs_proc = nlmsvc_procedures4, | ||
497 | .vs_xdrsize = NLMSVC_XDRSIZE, | ||
498 | }; | ||
499 | #endif | ||
500 | static struct svc_version * nlmsvc_version[] = { | ||
501 | [1] = &nlmsvc_version1, | ||
502 | [3] = &nlmsvc_version3, | ||
503 | #ifdef CONFIG_LOCKD_V4 | ||
504 | [4] = &nlmsvc_version4, | ||
505 | #endif | ||
506 | }; | ||
507 | |||
508 | static struct svc_stat nlmsvc_stats; | ||
509 | |||
510 | #define NLM_NRVERS (sizeof(nlmsvc_version)/sizeof(nlmsvc_version[0])) | ||
511 | static struct svc_program nlmsvc_program = { | ||
512 | .pg_prog = NLM_PROGRAM, /* program number */ | ||
513 | .pg_nvers = NLM_NRVERS, /* number of entries in nlmsvc_version */ | ||
514 | .pg_vers = nlmsvc_version, /* version table */ | ||
515 | .pg_name = "lockd", /* service name */ | ||
516 | .pg_class = "nfsd", /* share authentication with nfsd */ | ||
517 | .pg_stats = &nlmsvc_stats, /* stats table */ | ||
518 | .pg_authenticate = &lockd_authenticate /* export authentication */ | ||
519 | }; | ||
diff --git a/fs/lockd/svc4proc.c b/fs/lockd/svc4proc.c new file mode 100644 index 000000000000..489670e21769 --- /dev/null +++ b/fs/lockd/svc4proc.c | |||
@@ -0,0 +1,580 @@ | |||
1 | /* | ||
2 | * linux/fs/lockd/svc4proc.c | ||
3 | * | ||
4 | * Lockd server procedures. We don't implement the NLM_*_RES | ||
5 | * procedures because we don't use the async procedures. | ||
6 | * | ||
7 | * Copyright (C) 1996, Olaf Kirch <okir@monad.swb.de> | ||
8 | */ | ||
9 | |||
10 | #include <linux/types.h> | ||
11 | #include <linux/time.h> | ||
12 | #include <linux/slab.h> | ||
13 | #include <linux/in.h> | ||
14 | #include <linux/sunrpc/svc.h> | ||
15 | #include <linux/sunrpc/clnt.h> | ||
16 | #include <linux/nfsd/nfsd.h> | ||
17 | #include <linux/lockd/lockd.h> | ||
18 | #include <linux/lockd/share.h> | ||
19 | #include <linux/lockd/sm_inter.h> | ||
20 | |||
21 | |||
22 | #define NLMDBG_FACILITY NLMDBG_CLIENT | ||
23 | |||
24 | static u32 nlm4svc_callback(struct svc_rqst *, u32, struct nlm_res *); | ||
25 | static void nlm4svc_callback_exit(struct rpc_task *); | ||
26 | |||
27 | /* | ||
28 | * Obtain client and file from arguments | ||
29 | */ | ||
30 | static u32 | ||
31 | nlm4svc_retrieve_args(struct svc_rqst *rqstp, struct nlm_args *argp, | ||
32 | struct nlm_host **hostp, struct nlm_file **filp) | ||
33 | { | ||
34 | struct nlm_host *host = NULL; | ||
35 | struct nlm_file *file = NULL; | ||
36 | struct nlm_lock *lock = &argp->lock; | ||
37 | u32 error = 0; | ||
38 | |||
39 | /* nfsd callbacks must have been installed for this procedure */ | ||
40 | if (!nlmsvc_ops) | ||
41 | return nlm_lck_denied_nolocks; | ||
42 | |||
43 | /* Obtain host handle */ | ||
44 | if (!(host = nlmsvc_lookup_host(rqstp)) | ||
45 | || (argp->monitor && !host->h_monitored && nsm_monitor(host) < 0)) | ||
46 | goto no_locks; | ||
47 | *hostp = host; | ||
48 | |||
49 | /* Obtain file pointer. Not used by FREE_ALL call. */ | ||
50 | if (filp != NULL) { | ||
51 | if ((error = nlm_lookup_file(rqstp, &file, &lock->fh)) != 0) | ||
52 | goto no_locks; | ||
53 | *filp = file; | ||
54 | |||
55 | /* Set up the missing parts of the file_lock structure */ | ||
56 | lock->fl.fl_file = file->f_file; | ||
57 | lock->fl.fl_owner = (fl_owner_t) host; | ||
58 | lock->fl.fl_lmops = &nlmsvc_lock_operations; | ||
59 | } | ||
60 | |||
61 | return 0; | ||
62 | |||
63 | no_locks: | ||
64 | if (host) | ||
65 | nlm_release_host(host); | ||
66 | if (error) | ||
67 | return error; | ||
68 | return nlm_lck_denied_nolocks; | ||
69 | } | ||
70 | |||
71 | /* | ||
72 | * NULL: Test for presence of service | ||
73 | */ | ||
74 | static int | ||
75 | nlm4svc_proc_null(struct svc_rqst *rqstp, void *argp, void *resp) | ||
76 | { | ||
77 | dprintk("lockd: NULL called\n"); | ||
78 | return rpc_success; | ||
79 | } | ||
80 | |||
81 | /* | ||
82 | * TEST: Check for conflicting lock | ||
83 | */ | ||
84 | static int | ||
85 | nlm4svc_proc_test(struct svc_rqst *rqstp, struct nlm_args *argp, | ||
86 | struct nlm_res *resp) | ||
87 | { | ||
88 | struct nlm_host *host; | ||
89 | struct nlm_file *file; | ||
90 | |||
91 | dprintk("lockd: TEST4 called\n"); | ||
92 | resp->cookie = argp->cookie; | ||
93 | |||
94 | /* Don't accept test requests during grace period */ | ||
95 | if (nlmsvc_grace_period) { | ||
96 | resp->status = nlm_lck_denied_grace_period; | ||
97 | return rpc_success; | ||
98 | } | ||
99 | |||
100 | /* Obtain client and file */ | ||
101 | if ((resp->status = nlm4svc_retrieve_args(rqstp, argp, &host, &file))) | ||
102 | return rpc_success; | ||
103 | |||
104 | /* Now check for conflicting locks */ | ||
105 | resp->status = nlmsvc_testlock(file, &argp->lock, &resp->lock); | ||
106 | |||
107 | dprintk("lockd: TEST4 status %d\n", ntohl(resp->status)); | ||
108 | nlm_release_host(host); | ||
109 | nlm_release_file(file); | ||
110 | return rpc_success; | ||
111 | } | ||
112 | |||
113 | static int | ||
114 | nlm4svc_proc_lock(struct svc_rqst *rqstp, struct nlm_args *argp, | ||
115 | struct nlm_res *resp) | ||
116 | { | ||
117 | struct nlm_host *host; | ||
118 | struct nlm_file *file; | ||
119 | |||
120 | dprintk("lockd: LOCK called\n"); | ||
121 | |||
122 | resp->cookie = argp->cookie; | ||
123 | |||
124 | /* Don't accept new lock requests during grace period */ | ||
125 | if (nlmsvc_grace_period && !argp->reclaim) { | ||
126 | resp->status = nlm_lck_denied_grace_period; | ||
127 | return rpc_success; | ||
128 | } | ||
129 | |||
130 | /* Obtain client and file */ | ||
131 | if ((resp->status = nlm4svc_retrieve_args(rqstp, argp, &host, &file))) | ||
132 | return rpc_success; | ||
133 | |||
134 | #if 0 | ||
135 | /* If supplied state doesn't match current state, we assume it's | ||
136 | * an old request that time-warped somehow. Any error return would | ||
137 | * do in this case because it's irrelevant anyway. | ||
138 | * | ||
139 | * NB: We don't retrieve the remote host's state yet. | ||
140 | */ | ||
141 | if (host->h_nsmstate && host->h_nsmstate != argp->state) { | ||
142 | resp->status = nlm_lck_denied_nolocks; | ||
143 | } else | ||
144 | #endif | ||
145 | |||
146 | /* Now try to lock the file */ | ||
147 | resp->status = nlmsvc_lock(rqstp, file, &argp->lock, | ||
148 | argp->block, &argp->cookie); | ||
149 | |||
150 | dprintk("lockd: LOCK status %d\n", ntohl(resp->status)); | ||
151 | nlm_release_host(host); | ||
152 | nlm_release_file(file); | ||
153 | return rpc_success; | ||
154 | } | ||
155 | |||
156 | static int | ||
157 | nlm4svc_proc_cancel(struct svc_rqst *rqstp, struct nlm_args *argp, | ||
158 | struct nlm_res *resp) | ||
159 | { | ||
160 | struct nlm_host *host; | ||
161 | struct nlm_file *file; | ||
162 | |||
163 | dprintk("lockd: CANCEL called\n"); | ||
164 | |||
165 | resp->cookie = argp->cookie; | ||
166 | |||
167 | /* Don't accept requests during grace period */ | ||
168 | if (nlmsvc_grace_period) { | ||
169 | resp->status = nlm_lck_denied_grace_period; | ||
170 | return rpc_success; | ||
171 | } | ||
172 | |||
173 | /* Obtain client and file */ | ||
174 | if ((resp->status = nlm4svc_retrieve_args(rqstp, argp, &host, &file))) | ||
175 | return rpc_success; | ||
176 | |||
177 | /* Try to cancel request. */ | ||
178 | resp->status = nlmsvc_cancel_blocked(file, &argp->lock); | ||
179 | |||
180 | dprintk("lockd: CANCEL status %d\n", ntohl(resp->status)); | ||
181 | nlm_release_host(host); | ||
182 | nlm_release_file(file); | ||
183 | return rpc_success; | ||
184 | } | ||
185 | |||
186 | /* | ||
187 | * UNLOCK: release a lock | ||
188 | */ | ||
189 | static int | ||
190 | nlm4svc_proc_unlock(struct svc_rqst *rqstp, struct nlm_args *argp, | ||
191 | struct nlm_res *resp) | ||
192 | { | ||
193 | struct nlm_host *host; | ||
194 | struct nlm_file *file; | ||
195 | |||
196 | dprintk("lockd: UNLOCK called\n"); | ||
197 | |||
198 | resp->cookie = argp->cookie; | ||
199 | |||
200 | /* Don't accept new lock requests during grace period */ | ||
201 | if (nlmsvc_grace_period) { | ||
202 | resp->status = nlm_lck_denied_grace_period; | ||
203 | return rpc_success; | ||
204 | } | ||
205 | |||
206 | /* Obtain client and file */ | ||
207 | if ((resp->status = nlm4svc_retrieve_args(rqstp, argp, &host, &file))) | ||
208 | return rpc_success; | ||
209 | |||
210 | /* Now try to remove the lock */ | ||
211 | resp->status = nlmsvc_unlock(file, &argp->lock); | ||
212 | |||
213 | dprintk("lockd: UNLOCK status %d\n", ntohl(resp->status)); | ||
214 | nlm_release_host(host); | ||
215 | nlm_release_file(file); | ||
216 | return rpc_success; | ||
217 | } | ||
218 | |||
219 | /* | ||
220 | * GRANTED: A server calls us to tell that a process' lock request | ||
221 | * was granted | ||
222 | */ | ||
223 | static int | ||
224 | nlm4svc_proc_granted(struct svc_rqst *rqstp, struct nlm_args *argp, | ||
225 | struct nlm_res *resp) | ||
226 | { | ||
227 | resp->cookie = argp->cookie; | ||
228 | |||
229 | dprintk("lockd: GRANTED called\n"); | ||
230 | resp->status = nlmclnt_grant(&argp->lock); | ||
231 | dprintk("lockd: GRANTED status %d\n", ntohl(resp->status)); | ||
232 | return rpc_success; | ||
233 | } | ||
234 | |||
235 | /* | ||
236 | * `Async' versions of the above service routines. They aren't really, | ||
237 | * because we send the callback before the reply proper. I hope this | ||
238 | * doesn't break any clients. | ||
239 | */ | ||
240 | static int | ||
241 | nlm4svc_proc_test_msg(struct svc_rqst *rqstp, struct nlm_args *argp, | ||
242 | void *resp) | ||
243 | { | ||
244 | struct nlm_res res; | ||
245 | u32 stat; | ||
246 | |||
247 | dprintk("lockd: TEST_MSG called\n"); | ||
248 | memset(&res, 0, sizeof(res)); | ||
249 | |||
250 | if ((stat = nlm4svc_proc_test(rqstp, argp, &res)) == 0) | ||
251 | stat = nlm4svc_callback(rqstp, NLMPROC_TEST_RES, &res); | ||
252 | return stat; | ||
253 | } | ||
254 | |||
255 | static int | ||
256 | nlm4svc_proc_lock_msg(struct svc_rqst *rqstp, struct nlm_args *argp, | ||
257 | void *resp) | ||
258 | { | ||
259 | struct nlm_res res; | ||
260 | u32 stat; | ||
261 | |||
262 | dprintk("lockd: LOCK_MSG called\n"); | ||
263 | memset(&res, 0, sizeof(res)); | ||
264 | |||
265 | if ((stat = nlm4svc_proc_lock(rqstp, argp, &res)) == 0) | ||
266 | stat = nlm4svc_callback(rqstp, NLMPROC_LOCK_RES, &res); | ||
267 | return stat; | ||
268 | } | ||
269 | |||
270 | static int | ||
271 | nlm4svc_proc_cancel_msg(struct svc_rqst *rqstp, struct nlm_args *argp, | ||
272 | void *resp) | ||
273 | { | ||
274 | struct nlm_res res; | ||
275 | u32 stat; | ||
276 | |||
277 | dprintk("lockd: CANCEL_MSG called\n"); | ||
278 | memset(&res, 0, sizeof(res)); | ||
279 | |||
280 | if ((stat = nlm4svc_proc_cancel(rqstp, argp, &res)) == 0) | ||
281 | stat = nlm4svc_callback(rqstp, NLMPROC_CANCEL_RES, &res); | ||
282 | return stat; | ||
283 | } | ||
284 | |||
285 | static int | ||
286 | nlm4svc_proc_unlock_msg(struct svc_rqst *rqstp, struct nlm_args *argp, | ||
287 | void *resp) | ||
288 | { | ||
289 | struct nlm_res res; | ||
290 | u32 stat; | ||
291 | |||
292 | dprintk("lockd: UNLOCK_MSG called\n"); | ||
293 | memset(&res, 0, sizeof(res)); | ||
294 | |||
295 | if ((stat = nlm4svc_proc_unlock(rqstp, argp, &res)) == 0) | ||
296 | stat = nlm4svc_callback(rqstp, NLMPROC_UNLOCK_RES, &res); | ||
297 | return stat; | ||
298 | } | ||
299 | |||
300 | static int | ||
301 | nlm4svc_proc_granted_msg(struct svc_rqst *rqstp, struct nlm_args *argp, | ||
302 | void *resp) | ||
303 | { | ||
304 | struct nlm_res res; | ||
305 | u32 stat; | ||
306 | |||
307 | dprintk("lockd: GRANTED_MSG called\n"); | ||
308 | memset(&res, 0, sizeof(res)); | ||
309 | |||
310 | if ((stat = nlm4svc_proc_granted(rqstp, argp, &res)) == 0) | ||
311 | stat = nlm4svc_callback(rqstp, NLMPROC_GRANTED_RES, &res); | ||
312 | return stat; | ||
313 | } | ||
314 | |||
315 | /* | ||
316 | * SHARE: create a DOS share or alter existing share. | ||
317 | */ | ||
318 | static int | ||
319 | nlm4svc_proc_share(struct svc_rqst *rqstp, struct nlm_args *argp, | ||
320 | struct nlm_res *resp) | ||
321 | { | ||
322 | struct nlm_host *host; | ||
323 | struct nlm_file *file; | ||
324 | |||
325 | dprintk("lockd: SHARE called\n"); | ||
326 | |||
327 | resp->cookie = argp->cookie; | ||
328 | |||
329 | /* Don't accept new lock requests during grace period */ | ||
330 | if (nlmsvc_grace_period && !argp->reclaim) { | ||
331 | resp->status = nlm_lck_denied_grace_period; | ||
332 | return rpc_success; | ||
333 | } | ||
334 | |||
335 | /* Obtain client and file */ | ||
336 | if ((resp->status = nlm4svc_retrieve_args(rqstp, argp, &host, &file))) | ||
337 | return rpc_success; | ||
338 | |||
339 | /* Now try to create the share */ | ||
340 | resp->status = nlmsvc_share_file(host, file, argp); | ||
341 | |||
342 | dprintk("lockd: SHARE status %d\n", ntohl(resp->status)); | ||
343 | nlm_release_host(host); | ||
344 | nlm_release_file(file); | ||
345 | return rpc_success; | ||
346 | } | ||
347 | |||
348 | /* | ||
349 | * UNSHARE: Release a DOS share. | ||
350 | */ | ||
351 | static int | ||
352 | nlm4svc_proc_unshare(struct svc_rqst *rqstp, struct nlm_args *argp, | ||
353 | struct nlm_res *resp) | ||
354 | { | ||
355 | struct nlm_host *host; | ||
356 | struct nlm_file *file; | ||
357 | |||
358 | dprintk("lockd: UNSHARE called\n"); | ||
359 | |||
360 | resp->cookie = argp->cookie; | ||
361 | |||
362 | /* Don't accept requests during grace period */ | ||
363 | if (nlmsvc_grace_period) { | ||
364 | resp->status = nlm_lck_denied_grace_period; | ||
365 | return rpc_success; | ||
366 | } | ||
367 | |||
368 | /* Obtain client and file */ | ||
369 | if ((resp->status = nlm4svc_retrieve_args(rqstp, argp, &host, &file))) | ||
370 | return rpc_success; | ||
371 | |||
372 | /* Now try to lock the file */ | ||
373 | resp->status = nlmsvc_unshare_file(host, file, argp); | ||
374 | |||
375 | dprintk("lockd: UNSHARE status %d\n", ntohl(resp->status)); | ||
376 | nlm_release_host(host); | ||
377 | nlm_release_file(file); | ||
378 | return rpc_success; | ||
379 | } | ||
380 | |||
381 | /* | ||
382 | * NM_LOCK: Create an unmonitored lock | ||
383 | */ | ||
384 | static int | ||
385 | nlm4svc_proc_nm_lock(struct svc_rqst *rqstp, struct nlm_args *argp, | ||
386 | struct nlm_res *resp) | ||
387 | { | ||
388 | dprintk("lockd: NM_LOCK called\n"); | ||
389 | |||
390 | argp->monitor = 0; /* just clean the monitor flag */ | ||
391 | return nlm4svc_proc_lock(rqstp, argp, resp); | ||
392 | } | ||
393 | |||
394 | /* | ||
395 | * FREE_ALL: Release all locks and shares held by client | ||
396 | */ | ||
397 | static int | ||
398 | nlm4svc_proc_free_all(struct svc_rqst *rqstp, struct nlm_args *argp, | ||
399 | void *resp) | ||
400 | { | ||
401 | struct nlm_host *host; | ||
402 | |||
403 | /* Obtain client */ | ||
404 | if (nlm4svc_retrieve_args(rqstp, argp, &host, NULL)) | ||
405 | return rpc_success; | ||
406 | |||
407 | nlmsvc_free_host_resources(host); | ||
408 | nlm_release_host(host); | ||
409 | return rpc_success; | ||
410 | } | ||
411 | |||
412 | /* | ||
413 | * SM_NOTIFY: private callback from statd (not part of official NLM proto) | ||
414 | */ | ||
415 | static int | ||
416 | nlm4svc_proc_sm_notify(struct svc_rqst *rqstp, struct nlm_reboot *argp, | ||
417 | void *resp) | ||
418 | { | ||
419 | struct sockaddr_in saddr = rqstp->rq_addr; | ||
420 | int vers = argp->vers; | ||
421 | int prot = argp->proto >> 1; | ||
422 | |||
423 | struct nlm_host *host; | ||
424 | |||
425 | dprintk("lockd: SM_NOTIFY called\n"); | ||
426 | if (saddr.sin_addr.s_addr != htonl(INADDR_LOOPBACK) | ||
427 | || ntohs(saddr.sin_port) >= 1024) { | ||
428 | printk(KERN_WARNING | ||
429 | "lockd: rejected NSM callback from %08x:%d\n", | ||
430 | ntohl(rqstp->rq_addr.sin_addr.s_addr), | ||
431 | ntohs(rqstp->rq_addr.sin_port)); | ||
432 | return rpc_system_err; | ||
433 | } | ||
434 | |||
435 | /* Obtain the host pointer for this NFS server and try to | ||
436 | * reclaim all locks we hold on this server. | ||
437 | */ | ||
438 | saddr.sin_addr.s_addr = argp->addr; | ||
439 | |||
440 | if ((argp->proto & 1)==0) { | ||
441 | if ((host = nlmclnt_lookup_host(&saddr, prot, vers)) != NULL) { | ||
442 | nlmclnt_recovery(host, argp->state); | ||
443 | nlm_release_host(host); | ||
444 | } | ||
445 | } else { | ||
446 | /* If we run on an NFS server, delete all locks held by the client */ | ||
447 | |||
448 | if ((host = nlm_lookup_host(1, &saddr, prot, vers)) != NULL) { | ||
449 | nlmsvc_free_host_resources(host); | ||
450 | nlm_release_host(host); | ||
451 | } | ||
452 | } | ||
453 | return rpc_success; | ||
454 | } | ||
455 | |||
456 | /* | ||
457 | * client sent a GRANTED_RES, let's remove the associated block | ||
458 | */ | ||
459 | static int | ||
460 | nlm4svc_proc_granted_res(struct svc_rqst *rqstp, struct nlm_res *argp, | ||
461 | void *resp) | ||
462 | { | ||
463 | if (!nlmsvc_ops) | ||
464 | return rpc_success; | ||
465 | |||
466 | dprintk("lockd: GRANTED_RES called\n"); | ||
467 | |||
468 | nlmsvc_grant_reply(rqstp, &argp->cookie, argp->status); | ||
469 | return rpc_success; | ||
470 | } | ||
471 | |||
472 | |||
473 | |||
474 | /* | ||
475 | * This is the generic lockd callback for async RPC calls | ||
476 | */ | ||
477 | static u32 | ||
478 | nlm4svc_callback(struct svc_rqst *rqstp, u32 proc, struct nlm_res *resp) | ||
479 | { | ||
480 | struct nlm_host *host; | ||
481 | struct nlm_rqst *call; | ||
482 | |||
483 | if (!(call = nlmclnt_alloc_call())) | ||
484 | return rpc_system_err; | ||
485 | |||
486 | host = nlmclnt_lookup_host(&rqstp->rq_addr, | ||
487 | rqstp->rq_prot, rqstp->rq_vers); | ||
488 | if (!host) { | ||
489 | kfree(call); | ||
490 | return rpc_system_err; | ||
491 | } | ||
492 | |||
493 | call->a_flags = RPC_TASK_ASYNC; | ||
494 | call->a_host = host; | ||
495 | memcpy(&call->a_args, resp, sizeof(*resp)); | ||
496 | |||
497 | if (nlmsvc_async_call(call, proc, nlm4svc_callback_exit) < 0) | ||
498 | goto error; | ||
499 | |||
500 | return rpc_success; | ||
501 | error: | ||
502 | kfree(call); | ||
503 | nlm_release_host(host); | ||
504 | return rpc_system_err; | ||
505 | } | ||
506 | |||
507 | static void | ||
508 | nlm4svc_callback_exit(struct rpc_task *task) | ||
509 | { | ||
510 | struct nlm_rqst *call = (struct nlm_rqst *) task->tk_calldata; | ||
511 | |||
512 | if (task->tk_status < 0) { | ||
513 | dprintk("lockd: %4d callback failed (errno = %d)\n", | ||
514 | task->tk_pid, -task->tk_status); | ||
515 | } | ||
516 | nlm_release_host(call->a_host); | ||
517 | kfree(call); | ||
518 | } | ||
519 | |||
520 | /* | ||
521 | * NLM Server procedures. | ||
522 | */ | ||
523 | |||
524 | #define nlm4svc_encode_norep nlm4svc_encode_void | ||
525 | #define nlm4svc_decode_norep nlm4svc_decode_void | ||
526 | #define nlm4svc_decode_testres nlm4svc_decode_void | ||
527 | #define nlm4svc_decode_lockres nlm4svc_decode_void | ||
528 | #define nlm4svc_decode_unlockres nlm4svc_decode_void | ||
529 | #define nlm4svc_decode_cancelres nlm4svc_decode_void | ||
530 | #define nlm4svc_decode_grantedres nlm4svc_decode_void | ||
531 | |||
532 | #define nlm4svc_proc_none nlm4svc_proc_null | ||
533 | #define nlm4svc_proc_test_res nlm4svc_proc_null | ||
534 | #define nlm4svc_proc_lock_res nlm4svc_proc_null | ||
535 | #define nlm4svc_proc_cancel_res nlm4svc_proc_null | ||
536 | #define nlm4svc_proc_unlock_res nlm4svc_proc_null | ||
537 | |||
538 | struct nlm_void { int dummy; }; | ||
539 | |||
540 | #define PROC(name, xargt, xrest, argt, rest, respsize) \ | ||
541 | { .pc_func = (svc_procfunc) nlm4svc_proc_##name, \ | ||
542 | .pc_decode = (kxdrproc_t) nlm4svc_decode_##xargt, \ | ||
543 | .pc_encode = (kxdrproc_t) nlm4svc_encode_##xrest, \ | ||
544 | .pc_release = NULL, \ | ||
545 | .pc_argsize = sizeof(struct nlm_##argt), \ | ||
546 | .pc_ressize = sizeof(struct nlm_##rest), \ | ||
547 | .pc_xdrressize = respsize, \ | ||
548 | } | ||
549 | #define Ck (1+XDR_QUADLEN(NLM_MAXCOOKIELEN)) /* cookie */ | ||
550 | #define No (1+1024/4) /* netobj */ | ||
551 | #define St 1 /* status */ | ||
552 | #define Rg 4 /* range (offset + length) */ | ||
553 | struct svc_procedure nlmsvc_procedures4[] = { | ||
554 | PROC(null, void, void, void, void, 1), | ||
555 | PROC(test, testargs, testres, args, res, Ck+St+2+No+Rg), | ||
556 | PROC(lock, lockargs, res, args, res, Ck+St), | ||
557 | PROC(cancel, cancargs, res, args, res, Ck+St), | ||
558 | PROC(unlock, unlockargs, res, args, res, Ck+St), | ||
559 | PROC(granted, testargs, res, args, res, Ck+St), | ||
560 | PROC(test_msg, testargs, norep, args, void, 1), | ||
561 | PROC(lock_msg, lockargs, norep, args, void, 1), | ||
562 | PROC(cancel_msg, cancargs, norep, args, void, 1), | ||
563 | PROC(unlock_msg, unlockargs, norep, args, void, 1), | ||
564 | PROC(granted_msg, testargs, norep, args, void, 1), | ||
565 | PROC(test_res, testres, norep, res, void, 1), | ||
566 | PROC(lock_res, lockres, norep, res, void, 1), | ||
567 | PROC(cancel_res, cancelres, norep, res, void, 1), | ||
568 | PROC(unlock_res, unlockres, norep, res, void, 1), | ||
569 | PROC(granted_res, res, norep, res, void, 1), | ||
570 | /* statd callback */ | ||
571 | PROC(sm_notify, reboot, void, reboot, void, 1), | ||
572 | PROC(none, void, void, void, void, 0), | ||
573 | PROC(none, void, void, void, void, 0), | ||
574 | PROC(none, void, void, void, void, 0), | ||
575 | PROC(share, shareargs, shareres, args, res, Ck+St+1), | ||
576 | PROC(unshare, shareargs, shareres, args, res, Ck+St+1), | ||
577 | PROC(nm_lock, lockargs, res, args, res, Ck+St), | ||
578 | PROC(free_all, notify, void, args, void, 1), | ||
579 | |||
580 | }; | ||
diff --git a/fs/lockd/svclock.c b/fs/lockd/svclock.c new file mode 100644 index 000000000000..49f959796b66 --- /dev/null +++ b/fs/lockd/svclock.c | |||
@@ -0,0 +1,686 @@ | |||
1 | /* | ||
2 | * linux/fs/lockd/svclock.c | ||
3 | * | ||
4 | * Handling of server-side locks, mostly of the blocked variety. | ||
5 | * This is the ugliest part of lockd because we tread on very thin ice. | ||
6 | * GRANT and CANCEL calls may get stuck, meet in mid-flight, etc. | ||
7 | * IMNSHO introducing the grant callback into the NLM protocol was one | ||
8 | * of the worst ideas Sun ever had. Except maybe for the idea of doing | ||
9 | * NFS file locking at all. | ||
10 | * | ||
11 | * I'm trying hard to avoid race conditions by protecting most accesses | ||
12 | * to a file's list of blocked locks through a semaphore. The global | ||
13 | * list of blocked locks is not protected in this fashion however. | ||
14 | * Therefore, some functions (such as the RPC callback for the async grant | ||
15 | * call) move blocked locks towards the head of the list *while some other | ||
16 | * process might be traversing it*. This should not be a problem in | ||
17 | * practice, because this will only cause functions traversing the list | ||
18 | * to visit some blocks twice. | ||
19 | * | ||
20 | * Copyright (C) 1996, Olaf Kirch <okir@monad.swb.de> | ||
21 | */ | ||
22 | |||
23 | #include <linux/config.h> | ||
24 | #include <linux/types.h> | ||
25 | #include <linux/errno.h> | ||
26 | #include <linux/kernel.h> | ||
27 | #include <linux/sched.h> | ||
28 | #include <linux/smp_lock.h> | ||
29 | #include <linux/sunrpc/clnt.h> | ||
30 | #include <linux/sunrpc/svc.h> | ||
31 | #include <linux/lockd/nlm.h> | ||
32 | #include <linux/lockd/lockd.h> | ||
33 | |||
34 | #define NLMDBG_FACILITY NLMDBG_SVCLOCK | ||
35 | |||
36 | #ifdef CONFIG_LOCKD_V4 | ||
37 | #define nlm_deadlock nlm4_deadlock | ||
38 | #else | ||
39 | #define nlm_deadlock nlm_lck_denied | ||
40 | #endif | ||
41 | |||
42 | static void nlmsvc_insert_block(struct nlm_block *block, unsigned long); | ||
43 | static int nlmsvc_remove_block(struct nlm_block *block); | ||
44 | static void nlmsvc_grant_callback(struct rpc_task *task); | ||
45 | |||
46 | /* | ||
47 | * The list of blocked locks to retry | ||
48 | */ | ||
49 | static struct nlm_block * nlm_blocked; | ||
50 | |||
51 | /* | ||
52 | * Insert a blocked lock into the global list | ||
53 | */ | ||
54 | static void | ||
55 | nlmsvc_insert_block(struct nlm_block *block, unsigned long when) | ||
56 | { | ||
57 | struct nlm_block **bp, *b; | ||
58 | |||
59 | dprintk("lockd: nlmsvc_insert_block(%p, %ld)\n", block, when); | ||
60 | if (block->b_queued) | ||
61 | nlmsvc_remove_block(block); | ||
62 | bp = &nlm_blocked; | ||
63 | if (when != NLM_NEVER) { | ||
64 | if ((when += jiffies) == NLM_NEVER) | ||
65 | when ++; | ||
66 | while ((b = *bp) && time_before_eq(b->b_when,when) && b->b_when != NLM_NEVER) | ||
67 | bp = &b->b_next; | ||
68 | } else | ||
69 | while ((b = *bp) != 0) | ||
70 | bp = &b->b_next; | ||
71 | |||
72 | block->b_queued = 1; | ||
73 | block->b_when = when; | ||
74 | block->b_next = b; | ||
75 | *bp = block; | ||
76 | } | ||
77 | |||
78 | /* | ||
79 | * Remove a block from the global list | ||
80 | */ | ||
81 | static int | ||
82 | nlmsvc_remove_block(struct nlm_block *block) | ||
83 | { | ||
84 | struct nlm_block **bp, *b; | ||
85 | |||
86 | if (!block->b_queued) | ||
87 | return 1; | ||
88 | for (bp = &nlm_blocked; (b = *bp) != 0; bp = &b->b_next) { | ||
89 | if (b == block) { | ||
90 | *bp = block->b_next; | ||
91 | block->b_queued = 0; | ||
92 | return 1; | ||
93 | } | ||
94 | } | ||
95 | |||
96 | return 0; | ||
97 | } | ||
98 | |||
99 | /* | ||
100 | * Find a block for a given lock and optionally remove it from | ||
101 | * the list. | ||
102 | */ | ||
103 | static struct nlm_block * | ||
104 | nlmsvc_lookup_block(struct nlm_file *file, struct nlm_lock *lock, int remove) | ||
105 | { | ||
106 | struct nlm_block **head, *block; | ||
107 | struct file_lock *fl; | ||
108 | |||
109 | dprintk("lockd: nlmsvc_lookup_block f=%p pd=%d %Ld-%Ld ty=%d\n", | ||
110 | file, lock->fl.fl_pid, | ||
111 | (long long)lock->fl.fl_start, | ||
112 | (long long)lock->fl.fl_end, lock->fl.fl_type); | ||
113 | for (head = &nlm_blocked; (block = *head) != 0; head = &block->b_next) { | ||
114 | fl = &block->b_call.a_args.lock.fl; | ||
115 | dprintk("lockd: check f=%p pd=%d %Ld-%Ld ty=%d cookie=%s\n", | ||
116 | block->b_file, fl->fl_pid, | ||
117 | (long long)fl->fl_start, | ||
118 | (long long)fl->fl_end, fl->fl_type, | ||
119 | nlmdbg_cookie2a(&block->b_call.a_args.cookie)); | ||
120 | if (block->b_file == file && nlm_compare_locks(fl, &lock->fl)) { | ||
121 | if (remove) { | ||
122 | *head = block->b_next; | ||
123 | block->b_queued = 0; | ||
124 | } | ||
125 | return block; | ||
126 | } | ||
127 | } | ||
128 | |||
129 | return NULL; | ||
130 | } | ||
131 | |||
132 | static inline int nlm_cookie_match(struct nlm_cookie *a, struct nlm_cookie *b) | ||
133 | { | ||
134 | if(a->len != b->len) | ||
135 | return 0; | ||
136 | if(memcmp(a->data,b->data,a->len)) | ||
137 | return 0; | ||
138 | return 1; | ||
139 | } | ||
140 | |||
141 | /* | ||
142 | * Find a block with a given NLM cookie. | ||
143 | */ | ||
144 | static inline struct nlm_block * | ||
145 | nlmsvc_find_block(struct nlm_cookie *cookie, struct sockaddr_in *sin) | ||
146 | { | ||
147 | struct nlm_block *block; | ||
148 | |||
149 | for (block = nlm_blocked; block; block = block->b_next) { | ||
150 | dprintk("cookie: head of blocked queue %p, block %p\n", | ||
151 | nlm_blocked, block); | ||
152 | if (nlm_cookie_match(&block->b_call.a_args.cookie,cookie) | ||
153 | && nlm_cmp_addr(sin, &block->b_host->h_addr)) | ||
154 | break; | ||
155 | } | ||
156 | |||
157 | return block; | ||
158 | } | ||
159 | |||
160 | /* | ||
161 | * Create a block and initialize it. | ||
162 | * | ||
163 | * Note: we explicitly set the cookie of the grant reply to that of | ||
164 | * the blocked lock request. The spec explicitly mentions that the client | ||
165 | * should _not_ rely on the callback containing the same cookie as the | ||
166 | * request, but (as I found out later) that's because some implementations | ||
167 | * do just this. Never mind the standards comittees, they support our | ||
168 | * logging industries. | ||
169 | */ | ||
170 | static inline struct nlm_block * | ||
171 | nlmsvc_create_block(struct svc_rqst *rqstp, struct nlm_file *file, | ||
172 | struct nlm_lock *lock, struct nlm_cookie *cookie) | ||
173 | { | ||
174 | struct nlm_block *block; | ||
175 | struct nlm_host *host; | ||
176 | struct nlm_rqst *call; | ||
177 | |||
178 | /* Create host handle for callback */ | ||
179 | host = nlmclnt_lookup_host(&rqstp->rq_addr, | ||
180 | rqstp->rq_prot, rqstp->rq_vers); | ||
181 | if (host == NULL) | ||
182 | return NULL; | ||
183 | |||
184 | /* Allocate memory for block, and initialize arguments */ | ||
185 | if (!(block = (struct nlm_block *) kmalloc(sizeof(*block), GFP_KERNEL))) | ||
186 | goto failed; | ||
187 | memset(block, 0, sizeof(*block)); | ||
188 | locks_init_lock(&block->b_call.a_args.lock.fl); | ||
189 | locks_init_lock(&block->b_call.a_res.lock.fl); | ||
190 | |||
191 | if (!nlmclnt_setgrantargs(&block->b_call, lock)) | ||
192 | goto failed_free; | ||
193 | |||
194 | /* Set notifier function for VFS, and init args */ | ||
195 | block->b_call.a_args.lock.fl.fl_lmops = &nlmsvc_lock_operations; | ||
196 | block->b_call.a_args.cookie = *cookie; /* see above */ | ||
197 | |||
198 | dprintk("lockd: created block %p...\n", block); | ||
199 | |||
200 | /* Create and initialize the block */ | ||
201 | block->b_daemon = rqstp->rq_server; | ||
202 | block->b_host = host; | ||
203 | block->b_file = file; | ||
204 | |||
205 | /* Add to file's list of blocks */ | ||
206 | block->b_fnext = file->f_blocks; | ||
207 | file->f_blocks = block; | ||
208 | |||
209 | /* Set up RPC arguments for callback */ | ||
210 | call = &block->b_call; | ||
211 | call->a_host = host; | ||
212 | call->a_flags = RPC_TASK_ASYNC; | ||
213 | |||
214 | return block; | ||
215 | |||
216 | failed_free: | ||
217 | kfree(block); | ||
218 | failed: | ||
219 | nlm_release_host(host); | ||
220 | return NULL; | ||
221 | } | ||
222 | |||
223 | /* | ||
224 | * Delete a block. If the lock was cancelled or the grant callback | ||
225 | * failed, unlock is set to 1. | ||
226 | * It is the caller's responsibility to check whether the file | ||
227 | * can be closed hereafter. | ||
228 | */ | ||
229 | static void | ||
230 | nlmsvc_delete_block(struct nlm_block *block, int unlock) | ||
231 | { | ||
232 | struct file_lock *fl = &block->b_call.a_args.lock.fl; | ||
233 | struct nlm_file *file = block->b_file; | ||
234 | struct nlm_block **bp; | ||
235 | |||
236 | dprintk("lockd: deleting block %p...\n", block); | ||
237 | |||
238 | /* Remove block from list */ | ||
239 | nlmsvc_remove_block(block); | ||
240 | if (fl->fl_next) | ||
241 | posix_unblock_lock(file->f_file, fl); | ||
242 | if (unlock) { | ||
243 | fl->fl_type = F_UNLCK; | ||
244 | posix_lock_file(file->f_file, fl); | ||
245 | block->b_granted = 0; | ||
246 | } | ||
247 | |||
248 | /* If the block is in the middle of a GRANT callback, | ||
249 | * don't kill it yet. */ | ||
250 | if (block->b_incall) { | ||
251 | nlmsvc_insert_block(block, NLM_NEVER); | ||
252 | block->b_done = 1; | ||
253 | return; | ||
254 | } | ||
255 | |||
256 | /* Remove block from file's list of blocks */ | ||
257 | for (bp = &file->f_blocks; *bp; bp = &(*bp)->b_fnext) { | ||
258 | if (*bp == block) { | ||
259 | *bp = block->b_fnext; | ||
260 | break; | ||
261 | } | ||
262 | } | ||
263 | |||
264 | if (block->b_host) | ||
265 | nlm_release_host(block->b_host); | ||
266 | nlmclnt_freegrantargs(&block->b_call); | ||
267 | kfree(block); | ||
268 | } | ||
269 | |||
270 | /* | ||
271 | * Loop over all blocks and perform the action specified. | ||
272 | * (NLM_ACT_CHECK handled by nlmsvc_inspect_file). | ||
273 | */ | ||
274 | int | ||
275 | nlmsvc_traverse_blocks(struct nlm_host *host, struct nlm_file *file, int action) | ||
276 | { | ||
277 | struct nlm_block *block, *next; | ||
278 | |||
279 | down(&file->f_sema); | ||
280 | for (block = file->f_blocks; block; block = next) { | ||
281 | next = block->b_fnext; | ||
282 | if (action == NLM_ACT_MARK) | ||
283 | block->b_host->h_inuse = 1; | ||
284 | else if (action == NLM_ACT_UNLOCK) { | ||
285 | if (host == NULL || host == block->b_host) | ||
286 | nlmsvc_delete_block(block, 1); | ||
287 | } | ||
288 | } | ||
289 | up(&file->f_sema); | ||
290 | return 0; | ||
291 | } | ||
292 | |||
293 | /* | ||
294 | * Attempt to establish a lock, and if it can't be granted, block it | ||
295 | * if required. | ||
296 | */ | ||
297 | u32 | ||
298 | nlmsvc_lock(struct svc_rqst *rqstp, struct nlm_file *file, | ||
299 | struct nlm_lock *lock, int wait, struct nlm_cookie *cookie) | ||
300 | { | ||
301 | struct file_lock *conflock; | ||
302 | struct nlm_block *block; | ||
303 | int error; | ||
304 | |||
305 | dprintk("lockd: nlmsvc_lock(%s/%ld, ty=%d, pi=%d, %Ld-%Ld, bl=%d)\n", | ||
306 | file->f_file->f_dentry->d_inode->i_sb->s_id, | ||
307 | file->f_file->f_dentry->d_inode->i_ino, | ||
308 | lock->fl.fl_type, lock->fl.fl_pid, | ||
309 | (long long)lock->fl.fl_start, | ||
310 | (long long)lock->fl.fl_end, | ||
311 | wait); | ||
312 | |||
313 | |||
314 | /* Get existing block (in case client is busy-waiting) */ | ||
315 | block = nlmsvc_lookup_block(file, lock, 0); | ||
316 | |||
317 | lock->fl.fl_flags |= FL_LOCKD; | ||
318 | |||
319 | again: | ||
320 | /* Lock file against concurrent access */ | ||
321 | down(&file->f_sema); | ||
322 | |||
323 | if (!(conflock = posix_test_lock(file->f_file, &lock->fl))) { | ||
324 | error = posix_lock_file(file->f_file, &lock->fl); | ||
325 | |||
326 | if (block) | ||
327 | nlmsvc_delete_block(block, 0); | ||
328 | up(&file->f_sema); | ||
329 | |||
330 | dprintk("lockd: posix_lock_file returned %d\n", -error); | ||
331 | switch(-error) { | ||
332 | case 0: | ||
333 | return nlm_granted; | ||
334 | case EDEADLK: | ||
335 | return nlm_deadlock; | ||
336 | case EAGAIN: | ||
337 | return nlm_lck_denied; | ||
338 | default: /* includes ENOLCK */ | ||
339 | return nlm_lck_denied_nolocks; | ||
340 | } | ||
341 | } | ||
342 | |||
343 | if (!wait) { | ||
344 | up(&file->f_sema); | ||
345 | return nlm_lck_denied; | ||
346 | } | ||
347 | |||
348 | if (posix_locks_deadlock(&lock->fl, conflock)) { | ||
349 | up(&file->f_sema); | ||
350 | return nlm_deadlock; | ||
351 | } | ||
352 | |||
353 | /* If we don't have a block, create and initialize it. Then | ||
354 | * retry because we may have slept in kmalloc. */ | ||
355 | /* We have to release f_sema as nlmsvc_create_block may try to | ||
356 | * to claim it while doing host garbage collection */ | ||
357 | if (block == NULL) { | ||
358 | up(&file->f_sema); | ||
359 | dprintk("lockd: blocking on this lock (allocating).\n"); | ||
360 | if (!(block = nlmsvc_create_block(rqstp, file, lock, cookie))) | ||
361 | return nlm_lck_denied_nolocks; | ||
362 | goto again; | ||
363 | } | ||
364 | |||
365 | /* Append to list of blocked */ | ||
366 | nlmsvc_insert_block(block, NLM_NEVER); | ||
367 | |||
368 | if (list_empty(&block->b_call.a_args.lock.fl.fl_block)) { | ||
369 | /* Now add block to block list of the conflicting lock | ||
370 | if we haven't done so. */ | ||
371 | dprintk("lockd: blocking on this lock.\n"); | ||
372 | posix_block_lock(conflock, &block->b_call.a_args.lock.fl); | ||
373 | } | ||
374 | |||
375 | up(&file->f_sema); | ||
376 | return nlm_lck_blocked; | ||
377 | } | ||
378 | |||
379 | /* | ||
380 | * Test for presence of a conflicting lock. | ||
381 | */ | ||
382 | u32 | ||
383 | nlmsvc_testlock(struct nlm_file *file, struct nlm_lock *lock, | ||
384 | struct nlm_lock *conflock) | ||
385 | { | ||
386 | struct file_lock *fl; | ||
387 | |||
388 | dprintk("lockd: nlmsvc_testlock(%s/%ld, ty=%d, %Ld-%Ld)\n", | ||
389 | file->f_file->f_dentry->d_inode->i_sb->s_id, | ||
390 | file->f_file->f_dentry->d_inode->i_ino, | ||
391 | lock->fl.fl_type, | ||
392 | (long long)lock->fl.fl_start, | ||
393 | (long long)lock->fl.fl_end); | ||
394 | |||
395 | if ((fl = posix_test_lock(file->f_file, &lock->fl)) != NULL) { | ||
396 | dprintk("lockd: conflicting lock(ty=%d, %Ld-%Ld)\n", | ||
397 | fl->fl_type, (long long)fl->fl_start, | ||
398 | (long long)fl->fl_end); | ||
399 | conflock->caller = "somehost"; /* FIXME */ | ||
400 | conflock->oh.len = 0; /* don't return OH info */ | ||
401 | conflock->fl = *fl; | ||
402 | return nlm_lck_denied; | ||
403 | } | ||
404 | |||
405 | return nlm_granted; | ||
406 | } | ||
407 | |||
408 | /* | ||
409 | * Remove a lock. | ||
410 | * This implies a CANCEL call: We send a GRANT_MSG, the client replies | ||
411 | * with a GRANT_RES call which gets lost, and calls UNLOCK immediately | ||
412 | * afterwards. In this case the block will still be there, and hence | ||
413 | * must be removed. | ||
414 | */ | ||
415 | u32 | ||
416 | nlmsvc_unlock(struct nlm_file *file, struct nlm_lock *lock) | ||
417 | { | ||
418 | int error; | ||
419 | |||
420 | dprintk("lockd: nlmsvc_unlock(%s/%ld, pi=%d, %Ld-%Ld)\n", | ||
421 | file->f_file->f_dentry->d_inode->i_sb->s_id, | ||
422 | file->f_file->f_dentry->d_inode->i_ino, | ||
423 | lock->fl.fl_pid, | ||
424 | (long long)lock->fl.fl_start, | ||
425 | (long long)lock->fl.fl_end); | ||
426 | |||
427 | /* First, cancel any lock that might be there */ | ||
428 | nlmsvc_cancel_blocked(file, lock); | ||
429 | |||
430 | lock->fl.fl_type = F_UNLCK; | ||
431 | error = posix_lock_file(file->f_file, &lock->fl); | ||
432 | |||
433 | return (error < 0)? nlm_lck_denied_nolocks : nlm_granted; | ||
434 | } | ||
435 | |||
436 | /* | ||
437 | * Cancel a previously blocked request. | ||
438 | * | ||
439 | * A cancel request always overrides any grant that may currently | ||
440 | * be in progress. | ||
441 | * The calling procedure must check whether the file can be closed. | ||
442 | */ | ||
443 | u32 | ||
444 | nlmsvc_cancel_blocked(struct nlm_file *file, struct nlm_lock *lock) | ||
445 | { | ||
446 | struct nlm_block *block; | ||
447 | |||
448 | dprintk("lockd: nlmsvc_cancel(%s/%ld, pi=%d, %Ld-%Ld)\n", | ||
449 | file->f_file->f_dentry->d_inode->i_sb->s_id, | ||
450 | file->f_file->f_dentry->d_inode->i_ino, | ||
451 | lock->fl.fl_pid, | ||
452 | (long long)lock->fl.fl_start, | ||
453 | (long long)lock->fl.fl_end); | ||
454 | |||
455 | down(&file->f_sema); | ||
456 | if ((block = nlmsvc_lookup_block(file, lock, 1)) != NULL) | ||
457 | nlmsvc_delete_block(block, 1); | ||
458 | up(&file->f_sema); | ||
459 | return nlm_granted; | ||
460 | } | ||
461 | |||
462 | /* | ||
463 | * Unblock a blocked lock request. This is a callback invoked from the | ||
464 | * VFS layer when a lock on which we blocked is removed. | ||
465 | * | ||
466 | * This function doesn't grant the blocked lock instantly, but rather moves | ||
467 | * the block to the head of nlm_blocked where it can be picked up by lockd. | ||
468 | */ | ||
469 | static void | ||
470 | nlmsvc_notify_blocked(struct file_lock *fl) | ||
471 | { | ||
472 | struct nlm_block **bp, *block; | ||
473 | |||
474 | dprintk("lockd: VFS unblock notification for block %p\n", fl); | ||
475 | for (bp = &nlm_blocked; (block = *bp) != 0; bp = &block->b_next) { | ||
476 | if (nlm_compare_locks(&block->b_call.a_args.lock.fl, fl)) { | ||
477 | nlmsvc_insert_block(block, 0); | ||
478 | svc_wake_up(block->b_daemon); | ||
479 | return; | ||
480 | } | ||
481 | } | ||
482 | |||
483 | printk(KERN_WARNING "lockd: notification for unknown block!\n"); | ||
484 | } | ||
485 | |||
486 | static int nlmsvc_same_owner(struct file_lock *fl1, struct file_lock *fl2) | ||
487 | { | ||
488 | return fl1->fl_owner == fl2->fl_owner && fl1->fl_pid == fl2->fl_pid; | ||
489 | } | ||
490 | |||
491 | struct lock_manager_operations nlmsvc_lock_operations = { | ||
492 | .fl_compare_owner = nlmsvc_same_owner, | ||
493 | .fl_notify = nlmsvc_notify_blocked, | ||
494 | }; | ||
495 | |||
496 | /* | ||
497 | * Try to claim a lock that was previously blocked. | ||
498 | * | ||
499 | * Note that we use both the RPC_GRANTED_MSG call _and_ an async | ||
500 | * RPC thread when notifying the client. This seems like overkill... | ||
501 | * Here's why: | ||
502 | * - we don't want to use a synchronous RPC thread, otherwise | ||
503 | * we might find ourselves hanging on a dead portmapper. | ||
504 | * - Some lockd implementations (e.g. HP) don't react to | ||
505 | * RPC_GRANTED calls; they seem to insist on RPC_GRANTED_MSG calls. | ||
506 | */ | ||
507 | static void | ||
508 | nlmsvc_grant_blocked(struct nlm_block *block) | ||
509 | { | ||
510 | struct nlm_file *file = block->b_file; | ||
511 | struct nlm_lock *lock = &block->b_call.a_args.lock; | ||
512 | struct file_lock *conflock; | ||
513 | int error; | ||
514 | |||
515 | dprintk("lockd: grant blocked lock %p\n", block); | ||
516 | |||
517 | /* First thing is lock the file */ | ||
518 | down(&file->f_sema); | ||
519 | |||
520 | /* Unlink block request from list */ | ||
521 | nlmsvc_remove_block(block); | ||
522 | |||
523 | /* If b_granted is true this means we've been here before. | ||
524 | * Just retry the grant callback, possibly refreshing the RPC | ||
525 | * binding */ | ||
526 | if (block->b_granted) { | ||
527 | nlm_rebind_host(block->b_host); | ||
528 | goto callback; | ||
529 | } | ||
530 | |||
531 | /* Try the lock operation again */ | ||
532 | if ((conflock = posix_test_lock(file->f_file, &lock->fl)) != NULL) { | ||
533 | /* Bummer, we blocked again */ | ||
534 | dprintk("lockd: lock still blocked\n"); | ||
535 | nlmsvc_insert_block(block, NLM_NEVER); | ||
536 | posix_block_lock(conflock, &lock->fl); | ||
537 | up(&file->f_sema); | ||
538 | return; | ||
539 | } | ||
540 | |||
541 | /* Alright, no conflicting lock. Now lock it for real. If the | ||
542 | * following yields an error, this is most probably due to low | ||
543 | * memory. Retry the lock in a few seconds. | ||
544 | */ | ||
545 | if ((error = posix_lock_file(file->f_file, &lock->fl)) < 0) { | ||
546 | printk(KERN_WARNING "lockd: unexpected error %d in %s!\n", | ||
547 | -error, __FUNCTION__); | ||
548 | nlmsvc_insert_block(block, 10 * HZ); | ||
549 | up(&file->f_sema); | ||
550 | return; | ||
551 | } | ||
552 | |||
553 | callback: | ||
554 | /* Lock was granted by VFS. */ | ||
555 | dprintk("lockd: GRANTing blocked lock.\n"); | ||
556 | block->b_granted = 1; | ||
557 | block->b_incall = 1; | ||
558 | |||
559 | /* Schedule next grant callback in 30 seconds */ | ||
560 | nlmsvc_insert_block(block, 30 * HZ); | ||
561 | |||
562 | /* Call the client */ | ||
563 | nlm_get_host(block->b_call.a_host); | ||
564 | if (nlmsvc_async_call(&block->b_call, NLMPROC_GRANTED_MSG, | ||
565 | nlmsvc_grant_callback) < 0) | ||
566 | nlm_release_host(block->b_call.a_host); | ||
567 | up(&file->f_sema); | ||
568 | } | ||
569 | |||
570 | /* | ||
571 | * This is the callback from the RPC layer when the NLM_GRANTED_MSG | ||
572 | * RPC call has succeeded or timed out. | ||
573 | * Like all RPC callbacks, it is invoked by the rpciod process, so it | ||
574 | * better not sleep. Therefore, we put the blocked lock on the nlm_blocked | ||
575 | * chain once more in order to have it removed by lockd itself (which can | ||
576 | * then sleep on the file semaphore without disrupting e.g. the nfs client). | ||
577 | */ | ||
578 | static void | ||
579 | nlmsvc_grant_callback(struct rpc_task *task) | ||
580 | { | ||
581 | struct nlm_rqst *call = (struct nlm_rqst *) task->tk_calldata; | ||
582 | struct nlm_block *block; | ||
583 | unsigned long timeout; | ||
584 | struct sockaddr_in *peer_addr = RPC_PEERADDR(task->tk_client); | ||
585 | |||
586 | dprintk("lockd: GRANT_MSG RPC callback\n"); | ||
587 | dprintk("callback: looking for cookie %s, host (%u.%u.%u.%u)\n", | ||
588 | nlmdbg_cookie2a(&call->a_args.cookie), | ||
589 | NIPQUAD(peer_addr->sin_addr.s_addr)); | ||
590 | if (!(block = nlmsvc_find_block(&call->a_args.cookie, peer_addr))) { | ||
591 | dprintk("lockd: no block for cookie %s, host (%u.%u.%u.%u)\n", | ||
592 | nlmdbg_cookie2a(&call->a_args.cookie), | ||
593 | NIPQUAD(peer_addr->sin_addr.s_addr)); | ||
594 | return; | ||
595 | } | ||
596 | |||
597 | /* Technically, we should down the file semaphore here. Since we | ||
598 | * move the block towards the head of the queue only, no harm | ||
599 | * can be done, though. */ | ||
600 | if (task->tk_status < 0) { | ||
601 | /* RPC error: Re-insert for retransmission */ | ||
602 | timeout = 10 * HZ; | ||
603 | } else if (block->b_done) { | ||
604 | /* Block already removed, kill it for real */ | ||
605 | timeout = 0; | ||
606 | } else { | ||
607 | /* Call was successful, now wait for client callback */ | ||
608 | timeout = 60 * HZ; | ||
609 | } | ||
610 | nlmsvc_insert_block(block, timeout); | ||
611 | svc_wake_up(block->b_daemon); | ||
612 | block->b_incall = 0; | ||
613 | |||
614 | nlm_release_host(call->a_host); | ||
615 | } | ||
616 | |||
617 | /* | ||
618 | * We received a GRANT_RES callback. Try to find the corresponding | ||
619 | * block. | ||
620 | */ | ||
621 | void | ||
622 | nlmsvc_grant_reply(struct svc_rqst *rqstp, struct nlm_cookie *cookie, u32 status) | ||
623 | { | ||
624 | struct nlm_block *block; | ||
625 | struct nlm_file *file; | ||
626 | |||
627 | dprintk("grant_reply: looking for cookie %x, host (%08x), s=%d \n", | ||
628 | *(unsigned int *)(cookie->data), | ||
629 | ntohl(rqstp->rq_addr.sin_addr.s_addr), status); | ||
630 | if (!(block = nlmsvc_find_block(cookie, &rqstp->rq_addr))) | ||
631 | return; | ||
632 | file = block->b_file; | ||
633 | |||
634 | file->f_count++; | ||
635 | down(&file->f_sema); | ||
636 | if ((block = nlmsvc_find_block(cookie,&rqstp->rq_addr)) != NULL) { | ||
637 | if (status == NLM_LCK_DENIED_GRACE_PERIOD) { | ||
638 | /* Try again in a couple of seconds */ | ||
639 | nlmsvc_insert_block(block, 10 * HZ); | ||
640 | block = NULL; | ||
641 | } else { | ||
642 | /* Lock is now held by client, or has been rejected. | ||
643 | * In both cases, the block should be removed. */ | ||
644 | up(&file->f_sema); | ||
645 | if (status == NLM_LCK_GRANTED) | ||
646 | nlmsvc_delete_block(block, 0); | ||
647 | else | ||
648 | nlmsvc_delete_block(block, 1); | ||
649 | } | ||
650 | } | ||
651 | if (!block) | ||
652 | up(&file->f_sema); | ||
653 | nlm_release_file(file); | ||
654 | } | ||
655 | |||
656 | /* | ||
657 | * Retry all blocked locks that have been notified. This is where lockd | ||
658 | * picks up locks that can be granted, or grant notifications that must | ||
659 | * be retransmitted. | ||
660 | */ | ||
661 | unsigned long | ||
662 | nlmsvc_retry_blocked(void) | ||
663 | { | ||
664 | struct nlm_block *block; | ||
665 | |||
666 | dprintk("nlmsvc_retry_blocked(%p, when=%ld)\n", | ||
667 | nlm_blocked, | ||
668 | nlm_blocked? nlm_blocked->b_when : 0); | ||
669 | while ((block = nlm_blocked) != 0) { | ||
670 | if (block->b_when == NLM_NEVER) | ||
671 | break; | ||
672 | if (time_after(block->b_when,jiffies)) | ||
673 | break; | ||
674 | dprintk("nlmsvc_retry_blocked(%p, when=%ld, done=%d)\n", | ||
675 | block, block->b_when, block->b_done); | ||
676 | if (block->b_done) | ||
677 | nlmsvc_delete_block(block, 0); | ||
678 | else | ||
679 | nlmsvc_grant_blocked(block); | ||
680 | } | ||
681 | |||
682 | if ((block = nlm_blocked) && block->b_when != NLM_NEVER) | ||
683 | return (block->b_when - jiffies); | ||
684 | |||
685 | return MAX_SCHEDULE_TIMEOUT; | ||
686 | } | ||
diff --git a/fs/lockd/svcproc.c b/fs/lockd/svcproc.c new file mode 100644 index 000000000000..757e344cf200 --- /dev/null +++ b/fs/lockd/svcproc.c | |||
@@ -0,0 +1,606 @@ | |||
1 | /* | ||
2 | * linux/fs/lockd/svcproc.c | ||
3 | * | ||
4 | * Lockd server procedures. We don't implement the NLM_*_RES | ||
5 | * procedures because we don't use the async procedures. | ||
6 | * | ||
7 | * Copyright (C) 1996, Olaf Kirch <okir@monad.swb.de> | ||
8 | */ | ||
9 | |||
10 | #include <linux/config.h> | ||
11 | #include <linux/types.h> | ||
12 | #include <linux/time.h> | ||
13 | #include <linux/slab.h> | ||
14 | #include <linux/in.h> | ||
15 | #include <linux/sunrpc/svc.h> | ||
16 | #include <linux/sunrpc/clnt.h> | ||
17 | #include <linux/nfsd/nfsd.h> | ||
18 | #include <linux/lockd/lockd.h> | ||
19 | #include <linux/lockd/share.h> | ||
20 | #include <linux/lockd/sm_inter.h> | ||
21 | |||
22 | |||
23 | #define NLMDBG_FACILITY NLMDBG_CLIENT | ||
24 | |||
25 | static u32 nlmsvc_callback(struct svc_rqst *, u32, struct nlm_res *); | ||
26 | static void nlmsvc_callback_exit(struct rpc_task *); | ||
27 | |||
28 | #ifdef CONFIG_LOCKD_V4 | ||
29 | static u32 | ||
30 | cast_to_nlm(u32 status, u32 vers) | ||
31 | { | ||
32 | /* Note: status is assumed to be in network byte order !!! */ | ||
33 | if (vers != 4){ | ||
34 | switch (status) { | ||
35 | case nlm_granted: | ||
36 | case nlm_lck_denied: | ||
37 | case nlm_lck_denied_nolocks: | ||
38 | case nlm_lck_blocked: | ||
39 | case nlm_lck_denied_grace_period: | ||
40 | break; | ||
41 | case nlm4_deadlock: | ||
42 | status = nlm_lck_denied; | ||
43 | break; | ||
44 | default: | ||
45 | status = nlm_lck_denied_nolocks; | ||
46 | } | ||
47 | } | ||
48 | |||
49 | return (status); | ||
50 | } | ||
51 | #define cast_status(status) (cast_to_nlm(status, rqstp->rq_vers)) | ||
52 | #else | ||
53 | #define cast_status(status) (status) | ||
54 | #endif | ||
55 | |||
56 | /* | ||
57 | * Obtain client and file from arguments | ||
58 | */ | ||
59 | static u32 | ||
60 | nlmsvc_retrieve_args(struct svc_rqst *rqstp, struct nlm_args *argp, | ||
61 | struct nlm_host **hostp, struct nlm_file **filp) | ||
62 | { | ||
63 | struct nlm_host *host = NULL; | ||
64 | struct nlm_file *file = NULL; | ||
65 | struct nlm_lock *lock = &argp->lock; | ||
66 | u32 error; | ||
67 | |||
68 | /* nfsd callbacks must have been installed for this procedure */ | ||
69 | if (!nlmsvc_ops) | ||
70 | return nlm_lck_denied_nolocks; | ||
71 | |||
72 | /* Obtain host handle */ | ||
73 | if (!(host = nlmsvc_lookup_host(rqstp)) | ||
74 | || (argp->monitor && !host->h_monitored && nsm_monitor(host) < 0)) | ||
75 | goto no_locks; | ||
76 | *hostp = host; | ||
77 | |||
78 | /* Obtain file pointer. Not used by FREE_ALL call. */ | ||
79 | if (filp != NULL) { | ||
80 | if ((error = nlm_lookup_file(rqstp, &file, &lock->fh)) != 0) | ||
81 | goto no_locks; | ||
82 | *filp = file; | ||
83 | |||
84 | /* Set up the missing parts of the file_lock structure */ | ||
85 | lock->fl.fl_file = file->f_file; | ||
86 | lock->fl.fl_owner = (fl_owner_t) host; | ||
87 | lock->fl.fl_lmops = &nlmsvc_lock_operations; | ||
88 | } | ||
89 | |||
90 | return 0; | ||
91 | |||
92 | no_locks: | ||
93 | if (host) | ||
94 | nlm_release_host(host); | ||
95 | return nlm_lck_denied_nolocks; | ||
96 | } | ||
97 | |||
98 | /* | ||
99 | * NULL: Test for presence of service | ||
100 | */ | ||
101 | static int | ||
102 | nlmsvc_proc_null(struct svc_rqst *rqstp, void *argp, void *resp) | ||
103 | { | ||
104 | dprintk("lockd: NULL called\n"); | ||
105 | return rpc_success; | ||
106 | } | ||
107 | |||
108 | /* | ||
109 | * TEST: Check for conflicting lock | ||
110 | */ | ||
111 | static int | ||
112 | nlmsvc_proc_test(struct svc_rqst *rqstp, struct nlm_args *argp, | ||
113 | struct nlm_res *resp) | ||
114 | { | ||
115 | struct nlm_host *host; | ||
116 | struct nlm_file *file; | ||
117 | |||
118 | dprintk("lockd: TEST called\n"); | ||
119 | resp->cookie = argp->cookie; | ||
120 | |||
121 | /* Don't accept test requests during grace period */ | ||
122 | if (nlmsvc_grace_period) { | ||
123 | resp->status = nlm_lck_denied_grace_period; | ||
124 | return rpc_success; | ||
125 | } | ||
126 | |||
127 | /* Obtain client and file */ | ||
128 | if ((resp->status = nlmsvc_retrieve_args(rqstp, argp, &host, &file))) | ||
129 | return rpc_success; | ||
130 | |||
131 | /* Now check for conflicting locks */ | ||
132 | resp->status = cast_status(nlmsvc_testlock(file, &argp->lock, &resp->lock)); | ||
133 | |||
134 | dprintk("lockd: TEST status %d vers %d\n", | ||
135 | ntohl(resp->status), rqstp->rq_vers); | ||
136 | nlm_release_host(host); | ||
137 | nlm_release_file(file); | ||
138 | return rpc_success; | ||
139 | } | ||
140 | |||
141 | static int | ||
142 | nlmsvc_proc_lock(struct svc_rqst *rqstp, struct nlm_args *argp, | ||
143 | struct nlm_res *resp) | ||
144 | { | ||
145 | struct nlm_host *host; | ||
146 | struct nlm_file *file; | ||
147 | |||
148 | dprintk("lockd: LOCK called\n"); | ||
149 | |||
150 | resp->cookie = argp->cookie; | ||
151 | |||
152 | /* Don't accept new lock requests during grace period */ | ||
153 | if (nlmsvc_grace_period && !argp->reclaim) { | ||
154 | resp->status = nlm_lck_denied_grace_period; | ||
155 | return rpc_success; | ||
156 | } | ||
157 | |||
158 | /* Obtain client and file */ | ||
159 | if ((resp->status = nlmsvc_retrieve_args(rqstp, argp, &host, &file))) | ||
160 | return rpc_success; | ||
161 | |||
162 | #if 0 | ||
163 | /* If supplied state doesn't match current state, we assume it's | ||
164 | * an old request that time-warped somehow. Any error return would | ||
165 | * do in this case because it's irrelevant anyway. | ||
166 | * | ||
167 | * NB: We don't retrieve the remote host's state yet. | ||
168 | */ | ||
169 | if (host->h_nsmstate && host->h_nsmstate != argp->state) { | ||
170 | resp->status = nlm_lck_denied_nolocks; | ||
171 | } else | ||
172 | #endif | ||
173 | |||
174 | /* Now try to lock the file */ | ||
175 | resp->status = cast_status(nlmsvc_lock(rqstp, file, &argp->lock, | ||
176 | argp->block, &argp->cookie)); | ||
177 | |||
178 | dprintk("lockd: LOCK status %d\n", ntohl(resp->status)); | ||
179 | nlm_release_host(host); | ||
180 | nlm_release_file(file); | ||
181 | return rpc_success; | ||
182 | } | ||
183 | |||
184 | static int | ||
185 | nlmsvc_proc_cancel(struct svc_rqst *rqstp, struct nlm_args *argp, | ||
186 | struct nlm_res *resp) | ||
187 | { | ||
188 | struct nlm_host *host; | ||
189 | struct nlm_file *file; | ||
190 | |||
191 | dprintk("lockd: CANCEL called\n"); | ||
192 | |||
193 | resp->cookie = argp->cookie; | ||
194 | |||
195 | /* Don't accept requests during grace period */ | ||
196 | if (nlmsvc_grace_period) { | ||
197 | resp->status = nlm_lck_denied_grace_period; | ||
198 | return rpc_success; | ||
199 | } | ||
200 | |||
201 | /* Obtain client and file */ | ||
202 | if ((resp->status = nlmsvc_retrieve_args(rqstp, argp, &host, &file))) | ||
203 | return rpc_success; | ||
204 | |||
205 | /* Try to cancel request. */ | ||
206 | resp->status = cast_status(nlmsvc_cancel_blocked(file, &argp->lock)); | ||
207 | |||
208 | dprintk("lockd: CANCEL status %d\n", ntohl(resp->status)); | ||
209 | nlm_release_host(host); | ||
210 | nlm_release_file(file); | ||
211 | return rpc_success; | ||
212 | } | ||
213 | |||
214 | /* | ||
215 | * UNLOCK: release a lock | ||
216 | */ | ||
217 | static int | ||
218 | nlmsvc_proc_unlock(struct svc_rqst *rqstp, struct nlm_args *argp, | ||
219 | struct nlm_res *resp) | ||
220 | { | ||
221 | struct nlm_host *host; | ||
222 | struct nlm_file *file; | ||
223 | |||
224 | dprintk("lockd: UNLOCK called\n"); | ||
225 | |||
226 | resp->cookie = argp->cookie; | ||
227 | |||
228 | /* Don't accept new lock requests during grace period */ | ||
229 | if (nlmsvc_grace_period) { | ||
230 | resp->status = nlm_lck_denied_grace_period; | ||
231 | return rpc_success; | ||
232 | } | ||
233 | |||
234 | /* Obtain client and file */ | ||
235 | if ((resp->status = nlmsvc_retrieve_args(rqstp, argp, &host, &file))) | ||
236 | return rpc_success; | ||
237 | |||
238 | /* Now try to remove the lock */ | ||
239 | resp->status = cast_status(nlmsvc_unlock(file, &argp->lock)); | ||
240 | |||
241 | dprintk("lockd: UNLOCK status %d\n", ntohl(resp->status)); | ||
242 | nlm_release_host(host); | ||
243 | nlm_release_file(file); | ||
244 | return rpc_success; | ||
245 | } | ||
246 | |||
247 | /* | ||
248 | * GRANTED: A server calls us to tell that a process' lock request | ||
249 | * was granted | ||
250 | */ | ||
251 | static int | ||
252 | nlmsvc_proc_granted(struct svc_rqst *rqstp, struct nlm_args *argp, | ||
253 | struct nlm_res *resp) | ||
254 | { | ||
255 | resp->cookie = argp->cookie; | ||
256 | |||
257 | dprintk("lockd: GRANTED called\n"); | ||
258 | resp->status = nlmclnt_grant(&argp->lock); | ||
259 | dprintk("lockd: GRANTED status %d\n", ntohl(resp->status)); | ||
260 | return rpc_success; | ||
261 | } | ||
262 | |||
263 | /* | ||
264 | * `Async' versions of the above service routines. They aren't really, | ||
265 | * because we send the callback before the reply proper. I hope this | ||
266 | * doesn't break any clients. | ||
267 | */ | ||
268 | static int | ||
269 | nlmsvc_proc_test_msg(struct svc_rqst *rqstp, struct nlm_args *argp, | ||
270 | void *resp) | ||
271 | { | ||
272 | struct nlm_res res; | ||
273 | u32 stat; | ||
274 | |||
275 | dprintk("lockd: TEST_MSG called\n"); | ||
276 | memset(&res, 0, sizeof(res)); | ||
277 | |||
278 | if ((stat = nlmsvc_proc_test(rqstp, argp, &res)) == 0) | ||
279 | stat = nlmsvc_callback(rqstp, NLMPROC_TEST_RES, &res); | ||
280 | return stat; | ||
281 | } | ||
282 | |||
283 | static int | ||
284 | nlmsvc_proc_lock_msg(struct svc_rqst *rqstp, struct nlm_args *argp, | ||
285 | void *resp) | ||
286 | { | ||
287 | struct nlm_res res; | ||
288 | u32 stat; | ||
289 | |||
290 | dprintk("lockd: LOCK_MSG called\n"); | ||
291 | memset(&res, 0, sizeof(res)); | ||
292 | |||
293 | if ((stat = nlmsvc_proc_lock(rqstp, argp, &res)) == 0) | ||
294 | stat = nlmsvc_callback(rqstp, NLMPROC_LOCK_RES, &res); | ||
295 | return stat; | ||
296 | } | ||
297 | |||
298 | static int | ||
299 | nlmsvc_proc_cancel_msg(struct svc_rqst *rqstp, struct nlm_args *argp, | ||
300 | void *resp) | ||
301 | { | ||
302 | struct nlm_res res; | ||
303 | u32 stat; | ||
304 | |||
305 | dprintk("lockd: CANCEL_MSG called\n"); | ||
306 | memset(&res, 0, sizeof(res)); | ||
307 | |||
308 | if ((stat = nlmsvc_proc_cancel(rqstp, argp, &res)) == 0) | ||
309 | stat = nlmsvc_callback(rqstp, NLMPROC_CANCEL_RES, &res); | ||
310 | return stat; | ||
311 | } | ||
312 | |||
313 | static int | ||
314 | nlmsvc_proc_unlock_msg(struct svc_rqst *rqstp, struct nlm_args *argp, | ||
315 | void *resp) | ||
316 | { | ||
317 | struct nlm_res res; | ||
318 | u32 stat; | ||
319 | |||
320 | dprintk("lockd: UNLOCK_MSG called\n"); | ||
321 | memset(&res, 0, sizeof(res)); | ||
322 | |||
323 | if ((stat = nlmsvc_proc_unlock(rqstp, argp, &res)) == 0) | ||
324 | stat = nlmsvc_callback(rqstp, NLMPROC_UNLOCK_RES, &res); | ||
325 | return stat; | ||
326 | } | ||
327 | |||
328 | static int | ||
329 | nlmsvc_proc_granted_msg(struct svc_rqst *rqstp, struct nlm_args *argp, | ||
330 | void *resp) | ||
331 | { | ||
332 | struct nlm_res res; | ||
333 | u32 stat; | ||
334 | |||
335 | dprintk("lockd: GRANTED_MSG called\n"); | ||
336 | memset(&res, 0, sizeof(res)); | ||
337 | |||
338 | if ((stat = nlmsvc_proc_granted(rqstp, argp, &res)) == 0) | ||
339 | stat = nlmsvc_callback(rqstp, NLMPROC_GRANTED_RES, &res); | ||
340 | return stat; | ||
341 | } | ||
342 | |||
343 | /* | ||
344 | * SHARE: create a DOS share or alter existing share. | ||
345 | */ | ||
346 | static int | ||
347 | nlmsvc_proc_share(struct svc_rqst *rqstp, struct nlm_args *argp, | ||
348 | struct nlm_res *resp) | ||
349 | { | ||
350 | struct nlm_host *host; | ||
351 | struct nlm_file *file; | ||
352 | |||
353 | dprintk("lockd: SHARE called\n"); | ||
354 | |||
355 | resp->cookie = argp->cookie; | ||
356 | |||
357 | /* Don't accept new lock requests during grace period */ | ||
358 | if (nlmsvc_grace_period && !argp->reclaim) { | ||
359 | resp->status = nlm_lck_denied_grace_period; | ||
360 | return rpc_success; | ||
361 | } | ||
362 | |||
363 | /* Obtain client and file */ | ||
364 | if ((resp->status = nlmsvc_retrieve_args(rqstp, argp, &host, &file))) | ||
365 | return rpc_success; | ||
366 | |||
367 | /* Now try to create the share */ | ||
368 | resp->status = cast_status(nlmsvc_share_file(host, file, argp)); | ||
369 | |||
370 | dprintk("lockd: SHARE status %d\n", ntohl(resp->status)); | ||
371 | nlm_release_host(host); | ||
372 | nlm_release_file(file); | ||
373 | return rpc_success; | ||
374 | } | ||
375 | |||
376 | /* | ||
377 | * UNSHARE: Release a DOS share. | ||
378 | */ | ||
379 | static int | ||
380 | nlmsvc_proc_unshare(struct svc_rqst *rqstp, struct nlm_args *argp, | ||
381 | struct nlm_res *resp) | ||
382 | { | ||
383 | struct nlm_host *host; | ||
384 | struct nlm_file *file; | ||
385 | |||
386 | dprintk("lockd: UNSHARE called\n"); | ||
387 | |||
388 | resp->cookie = argp->cookie; | ||
389 | |||
390 | /* Don't accept requests during grace period */ | ||
391 | if (nlmsvc_grace_period) { | ||
392 | resp->status = nlm_lck_denied_grace_period; | ||
393 | return rpc_success; | ||
394 | } | ||
395 | |||
396 | /* Obtain client and file */ | ||
397 | if ((resp->status = nlmsvc_retrieve_args(rqstp, argp, &host, &file))) | ||
398 | return rpc_success; | ||
399 | |||
400 | /* Now try to unshare the file */ | ||
401 | resp->status = cast_status(nlmsvc_unshare_file(host, file, argp)); | ||
402 | |||
403 | dprintk("lockd: UNSHARE status %d\n", ntohl(resp->status)); | ||
404 | nlm_release_host(host); | ||
405 | nlm_release_file(file); | ||
406 | return rpc_success; | ||
407 | } | ||
408 | |||
409 | /* | ||
410 | * NM_LOCK: Create an unmonitored lock | ||
411 | */ | ||
412 | static int | ||
413 | nlmsvc_proc_nm_lock(struct svc_rqst *rqstp, struct nlm_args *argp, | ||
414 | struct nlm_res *resp) | ||
415 | { | ||
416 | dprintk("lockd: NM_LOCK called\n"); | ||
417 | |||
418 | argp->monitor = 0; /* just clean the monitor flag */ | ||
419 | return nlmsvc_proc_lock(rqstp, argp, resp); | ||
420 | } | ||
421 | |||
422 | /* | ||
423 | * FREE_ALL: Release all locks and shares held by client | ||
424 | */ | ||
425 | static int | ||
426 | nlmsvc_proc_free_all(struct svc_rqst *rqstp, struct nlm_args *argp, | ||
427 | void *resp) | ||
428 | { | ||
429 | struct nlm_host *host; | ||
430 | |||
431 | /* Obtain client */ | ||
432 | if (nlmsvc_retrieve_args(rqstp, argp, &host, NULL)) | ||
433 | return rpc_success; | ||
434 | |||
435 | nlmsvc_free_host_resources(host); | ||
436 | nlm_release_host(host); | ||
437 | return rpc_success; | ||
438 | } | ||
439 | |||
440 | /* | ||
441 | * SM_NOTIFY: private callback from statd (not part of official NLM proto) | ||
442 | */ | ||
443 | static int | ||
444 | nlmsvc_proc_sm_notify(struct svc_rqst *rqstp, struct nlm_reboot *argp, | ||
445 | void *resp) | ||
446 | { | ||
447 | struct sockaddr_in saddr = rqstp->rq_addr; | ||
448 | int vers = argp->vers; | ||
449 | int prot = argp->proto >> 1; | ||
450 | struct nlm_host *host; | ||
451 | |||
452 | dprintk("lockd: SM_NOTIFY called\n"); | ||
453 | if (saddr.sin_addr.s_addr != htonl(INADDR_LOOPBACK) | ||
454 | || ntohs(saddr.sin_port) >= 1024) { | ||
455 | printk(KERN_WARNING | ||
456 | "lockd: rejected NSM callback from %08x:%d\n", | ||
457 | ntohl(rqstp->rq_addr.sin_addr.s_addr), | ||
458 | ntohs(rqstp->rq_addr.sin_port)); | ||
459 | return rpc_system_err; | ||
460 | } | ||
461 | |||
462 | /* Obtain the host pointer for this NFS server and try to | ||
463 | * reclaim all locks we hold on this server. | ||
464 | */ | ||
465 | saddr.sin_addr.s_addr = argp->addr; | ||
466 | if ((argp->proto & 1)==0) { | ||
467 | if ((host = nlmclnt_lookup_host(&saddr, prot, vers)) != NULL) { | ||
468 | nlmclnt_recovery(host, argp->state); | ||
469 | nlm_release_host(host); | ||
470 | } | ||
471 | } else { | ||
472 | /* If we run on an NFS server, delete all locks held by the client */ | ||
473 | if ((host = nlm_lookup_host(1, &saddr, prot, vers)) != NULL) { | ||
474 | nlmsvc_free_host_resources(host); | ||
475 | nlm_release_host(host); | ||
476 | } | ||
477 | } | ||
478 | |||
479 | return rpc_success; | ||
480 | } | ||
481 | |||
482 | /* | ||
483 | * client sent a GRANTED_RES, let's remove the associated block | ||
484 | */ | ||
485 | static int | ||
486 | nlmsvc_proc_granted_res(struct svc_rqst *rqstp, struct nlm_res *argp, | ||
487 | void *resp) | ||
488 | { | ||
489 | if (!nlmsvc_ops) | ||
490 | return rpc_success; | ||
491 | |||
492 | dprintk("lockd: GRANTED_RES called\n"); | ||
493 | |||
494 | nlmsvc_grant_reply(rqstp, &argp->cookie, argp->status); | ||
495 | return rpc_success; | ||
496 | } | ||
497 | |||
498 | /* | ||
499 | * This is the generic lockd callback for async RPC calls | ||
500 | */ | ||
501 | static u32 | ||
502 | nlmsvc_callback(struct svc_rqst *rqstp, u32 proc, struct nlm_res *resp) | ||
503 | { | ||
504 | struct nlm_host *host; | ||
505 | struct nlm_rqst *call; | ||
506 | |||
507 | if (!(call = nlmclnt_alloc_call())) | ||
508 | return rpc_system_err; | ||
509 | |||
510 | host = nlmclnt_lookup_host(&rqstp->rq_addr, | ||
511 | rqstp->rq_prot, rqstp->rq_vers); | ||
512 | if (!host) { | ||
513 | kfree(call); | ||
514 | return rpc_system_err; | ||
515 | } | ||
516 | |||
517 | call->a_flags = RPC_TASK_ASYNC; | ||
518 | call->a_host = host; | ||
519 | memcpy(&call->a_args, resp, sizeof(*resp)); | ||
520 | |||
521 | if (nlmsvc_async_call(call, proc, nlmsvc_callback_exit) < 0) | ||
522 | goto error; | ||
523 | |||
524 | return rpc_success; | ||
525 | error: | ||
526 | nlm_release_host(host); | ||
527 | kfree(call); | ||
528 | return rpc_system_err; | ||
529 | } | ||
530 | |||
531 | static void | ||
532 | nlmsvc_callback_exit(struct rpc_task *task) | ||
533 | { | ||
534 | struct nlm_rqst *call = (struct nlm_rqst *) task->tk_calldata; | ||
535 | |||
536 | if (task->tk_status < 0) { | ||
537 | dprintk("lockd: %4d callback failed (errno = %d)\n", | ||
538 | task->tk_pid, -task->tk_status); | ||
539 | } | ||
540 | nlm_release_host(call->a_host); | ||
541 | kfree(call); | ||
542 | } | ||
543 | |||
544 | /* | ||
545 | * NLM Server procedures. | ||
546 | */ | ||
547 | |||
548 | #define nlmsvc_encode_norep nlmsvc_encode_void | ||
549 | #define nlmsvc_decode_norep nlmsvc_decode_void | ||
550 | #define nlmsvc_decode_testres nlmsvc_decode_void | ||
551 | #define nlmsvc_decode_lockres nlmsvc_decode_void | ||
552 | #define nlmsvc_decode_unlockres nlmsvc_decode_void | ||
553 | #define nlmsvc_decode_cancelres nlmsvc_decode_void | ||
554 | #define nlmsvc_decode_grantedres nlmsvc_decode_void | ||
555 | |||
556 | #define nlmsvc_proc_none nlmsvc_proc_null | ||
557 | #define nlmsvc_proc_test_res nlmsvc_proc_null | ||
558 | #define nlmsvc_proc_lock_res nlmsvc_proc_null | ||
559 | #define nlmsvc_proc_cancel_res nlmsvc_proc_null | ||
560 | #define nlmsvc_proc_unlock_res nlmsvc_proc_null | ||
561 | |||
562 | struct nlm_void { int dummy; }; | ||
563 | |||
564 | #define PROC(name, xargt, xrest, argt, rest, respsize) \ | ||
565 | { .pc_func = (svc_procfunc) nlmsvc_proc_##name, \ | ||
566 | .pc_decode = (kxdrproc_t) nlmsvc_decode_##xargt, \ | ||
567 | .pc_encode = (kxdrproc_t) nlmsvc_encode_##xrest, \ | ||
568 | .pc_release = NULL, \ | ||
569 | .pc_argsize = sizeof(struct nlm_##argt), \ | ||
570 | .pc_ressize = sizeof(struct nlm_##rest), \ | ||
571 | .pc_xdrressize = respsize, \ | ||
572 | } | ||
573 | |||
574 | #define Ck (1+XDR_QUADLEN(NLM_MAXCOOKIELEN)) /* cookie */ | ||
575 | #define St 1 /* status */ | ||
576 | #define No (1+1024/4) /* Net Obj */ | ||
577 | #define Rg 2 /* range - offset + size */ | ||
578 | |||
579 | struct svc_procedure nlmsvc_procedures[] = { | ||
580 | PROC(null, void, void, void, void, 1), | ||
581 | PROC(test, testargs, testres, args, res, Ck+St+2+No+Rg), | ||
582 | PROC(lock, lockargs, res, args, res, Ck+St), | ||
583 | PROC(cancel, cancargs, res, args, res, Ck+St), | ||
584 | PROC(unlock, unlockargs, res, args, res, Ck+St), | ||
585 | PROC(granted, testargs, res, args, res, Ck+St), | ||
586 | PROC(test_msg, testargs, norep, args, void, 1), | ||
587 | PROC(lock_msg, lockargs, norep, args, void, 1), | ||
588 | PROC(cancel_msg, cancargs, norep, args, void, 1), | ||
589 | PROC(unlock_msg, unlockargs, norep, args, void, 1), | ||
590 | PROC(granted_msg, testargs, norep, args, void, 1), | ||
591 | PROC(test_res, testres, norep, res, void, 1), | ||
592 | PROC(lock_res, lockres, norep, res, void, 1), | ||
593 | PROC(cancel_res, cancelres, norep, res, void, 1), | ||
594 | PROC(unlock_res, unlockres, norep, res, void, 1), | ||
595 | PROC(granted_res, res, norep, res, void, 1), | ||
596 | /* statd callback */ | ||
597 | PROC(sm_notify, reboot, void, reboot, void, 1), | ||
598 | PROC(none, void, void, void, void, 1), | ||
599 | PROC(none, void, void, void, void, 1), | ||
600 | PROC(none, void, void, void, void, 1), | ||
601 | PROC(share, shareargs, shareres, args, res, Ck+St+1), | ||
602 | PROC(unshare, shareargs, shareres, args, res, Ck+St+1), | ||
603 | PROC(nm_lock, lockargs, res, args, res, Ck+St), | ||
604 | PROC(free_all, notify, void, args, void, 0), | ||
605 | |||
606 | }; | ||
diff --git a/fs/lockd/svcshare.c b/fs/lockd/svcshare.c new file mode 100644 index 000000000000..4943fb7836ce --- /dev/null +++ b/fs/lockd/svcshare.c | |||
@@ -0,0 +1,111 @@ | |||
1 | /* | ||
2 | * linux/fs/lockd/svcshare.c | ||
3 | * | ||
4 | * Management of DOS shares. | ||
5 | * | ||
6 | * Copyright (C) 1996 Olaf Kirch <okir@monad.swb.de> | ||
7 | */ | ||
8 | |||
9 | #include <linux/time.h> | ||
10 | #include <linux/unistd.h> | ||
11 | #include <linux/string.h> | ||
12 | #include <linux/slab.h> | ||
13 | |||
14 | #include <linux/sunrpc/clnt.h> | ||
15 | #include <linux/sunrpc/svc.h> | ||
16 | #include <linux/lockd/lockd.h> | ||
17 | #include <linux/lockd/share.h> | ||
18 | |||
19 | static inline int | ||
20 | nlm_cmp_owner(struct nlm_share *share, struct xdr_netobj *oh) | ||
21 | { | ||
22 | return share->s_owner.len == oh->len | ||
23 | && !memcmp(share->s_owner.data, oh->data, oh->len); | ||
24 | } | ||
25 | |||
26 | u32 | ||
27 | nlmsvc_share_file(struct nlm_host *host, struct nlm_file *file, | ||
28 | struct nlm_args *argp) | ||
29 | { | ||
30 | struct nlm_share *share; | ||
31 | struct xdr_netobj *oh = &argp->lock.oh; | ||
32 | u8 *ohdata; | ||
33 | |||
34 | for (share = file->f_shares; share; share = share->s_next) { | ||
35 | if (share->s_host == host && nlm_cmp_owner(share, oh)) | ||
36 | goto update; | ||
37 | if ((argp->fsm_access & share->s_mode) | ||
38 | || (argp->fsm_mode & share->s_access )) | ||
39 | return nlm_lck_denied; | ||
40 | } | ||
41 | |||
42 | share = (struct nlm_share *) kmalloc(sizeof(*share) + oh->len, | ||
43 | GFP_KERNEL); | ||
44 | if (share == NULL) | ||
45 | return nlm_lck_denied_nolocks; | ||
46 | |||
47 | /* Copy owner handle */ | ||
48 | ohdata = (u8 *) (share + 1); | ||
49 | memcpy(ohdata, oh->data, oh->len); | ||
50 | |||
51 | share->s_file = file; | ||
52 | share->s_host = host; | ||
53 | share->s_owner.data = ohdata; | ||
54 | share->s_owner.len = oh->len; | ||
55 | share->s_next = file->f_shares; | ||
56 | file->f_shares = share; | ||
57 | |||
58 | update: | ||
59 | share->s_access = argp->fsm_access; | ||
60 | share->s_mode = argp->fsm_mode; | ||
61 | return nlm_granted; | ||
62 | } | ||
63 | |||
64 | /* | ||
65 | * Delete a share. | ||
66 | */ | ||
67 | u32 | ||
68 | nlmsvc_unshare_file(struct nlm_host *host, struct nlm_file *file, | ||
69 | struct nlm_args *argp) | ||
70 | { | ||
71 | struct nlm_share *share, **shpp; | ||
72 | struct xdr_netobj *oh = &argp->lock.oh; | ||
73 | |||
74 | for (shpp = &file->f_shares; (share = *shpp) != 0; shpp = &share->s_next) { | ||
75 | if (share->s_host == host && nlm_cmp_owner(share, oh)) { | ||
76 | *shpp = share->s_next; | ||
77 | kfree(share); | ||
78 | return nlm_granted; | ||
79 | } | ||
80 | } | ||
81 | |||
82 | /* X/Open spec says return success even if there was no | ||
83 | * corresponding share. */ | ||
84 | return nlm_granted; | ||
85 | } | ||
86 | |||
87 | /* | ||
88 | * Traverse all shares for a given file (and host). | ||
89 | * NLM_ACT_CHECK is handled by nlmsvc_inspect_file. | ||
90 | */ | ||
91 | int | ||
92 | nlmsvc_traverse_shares(struct nlm_host *host, struct nlm_file *file, int action) | ||
93 | { | ||
94 | struct nlm_share *share, **shpp; | ||
95 | |||
96 | shpp = &file->f_shares; | ||
97 | while ((share = *shpp) != NULL) { | ||
98 | if (action == NLM_ACT_MARK) | ||
99 | share->s_host->h_inuse = 1; | ||
100 | else if (action == NLM_ACT_UNLOCK) { | ||
101 | if (host == NULL || host == share->s_host) { | ||
102 | *shpp = share->s_next; | ||
103 | kfree(share); | ||
104 | continue; | ||
105 | } | ||
106 | } | ||
107 | shpp = &share->s_next; | ||
108 | } | ||
109 | |||
110 | return 0; | ||
111 | } | ||
diff --git a/fs/lockd/svcsubs.c b/fs/lockd/svcsubs.c new file mode 100644 index 000000000000..de7536358c7c --- /dev/null +++ b/fs/lockd/svcsubs.c | |||
@@ -0,0 +1,309 @@ | |||
1 | /* | ||
2 | * linux/fs/lockd/svcsubs.c | ||
3 | * | ||
4 | * Various support routines for the NLM server. | ||
5 | * | ||
6 | * Copyright (C) 1996, Olaf Kirch <okir@monad.swb.de> | ||
7 | */ | ||
8 | |||
9 | #include <linux/config.h> | ||
10 | #include <linux/types.h> | ||
11 | #include <linux/string.h> | ||
12 | #include <linux/time.h> | ||
13 | #include <linux/in.h> | ||
14 | #include <linux/sunrpc/svc.h> | ||
15 | #include <linux/sunrpc/clnt.h> | ||
16 | #include <linux/nfsd/nfsfh.h> | ||
17 | #include <linux/nfsd/export.h> | ||
18 | #include <linux/lockd/lockd.h> | ||
19 | #include <linux/lockd/share.h> | ||
20 | #include <linux/lockd/sm_inter.h> | ||
21 | |||
22 | #define NLMDBG_FACILITY NLMDBG_SVCSUBS | ||
23 | |||
24 | |||
25 | /* | ||
26 | * Global file hash table | ||
27 | */ | ||
28 | #define FILE_HASH_BITS 5 | ||
29 | #define FILE_NRHASH (1<<FILE_HASH_BITS) | ||
30 | static struct nlm_file * nlm_files[FILE_NRHASH]; | ||
31 | static DECLARE_MUTEX(nlm_file_sema); | ||
32 | |||
33 | static inline unsigned int file_hash(struct nfs_fh *f) | ||
34 | { | ||
35 | unsigned int tmp=0; | ||
36 | int i; | ||
37 | for (i=0; i<NFS2_FHSIZE;i++) | ||
38 | tmp += f->data[i]; | ||
39 | return tmp & (FILE_NRHASH - 1); | ||
40 | } | ||
41 | |||
42 | /* | ||
43 | * Lookup file info. If it doesn't exist, create a file info struct | ||
44 | * and open a (VFS) file for the given inode. | ||
45 | * | ||
46 | * FIXME: | ||
47 | * Note that we open the file O_RDONLY even when creating write locks. | ||
48 | * This is not quite right, but for now, we assume the client performs | ||
49 | * the proper R/W checking. | ||
50 | */ | ||
51 | u32 | ||
52 | nlm_lookup_file(struct svc_rqst *rqstp, struct nlm_file **result, | ||
53 | struct nfs_fh *f) | ||
54 | { | ||
55 | struct nlm_file *file; | ||
56 | unsigned int hash; | ||
57 | u32 nfserr; | ||
58 | u32 *fhp = (u32*)f->data; | ||
59 | |||
60 | dprintk("lockd: nlm_file_lookup(%08x %08x %08x %08x %08x %08x)\n", | ||
61 | fhp[0], fhp[1], fhp[2], fhp[3], fhp[4], fhp[5]); | ||
62 | |||
63 | |||
64 | hash = file_hash(f); | ||
65 | |||
66 | /* Lock file table */ | ||
67 | down(&nlm_file_sema); | ||
68 | |||
69 | for (file = nlm_files[hash]; file; file = file->f_next) | ||
70 | if (!nfs_compare_fh(&file->f_handle, f)) | ||
71 | goto found; | ||
72 | |||
73 | dprintk("lockd: creating file for (%08x %08x %08x %08x %08x %08x)\n", | ||
74 | fhp[0], fhp[1], fhp[2], fhp[3], fhp[4], fhp[5]); | ||
75 | |||
76 | nfserr = nlm_lck_denied_nolocks; | ||
77 | file = (struct nlm_file *) kmalloc(sizeof(*file), GFP_KERNEL); | ||
78 | if (!file) | ||
79 | goto out_unlock; | ||
80 | |||
81 | memset(file, 0, sizeof(*file)); | ||
82 | memcpy(&file->f_handle, f, sizeof(struct nfs_fh)); | ||
83 | file->f_hash = hash; | ||
84 | init_MUTEX(&file->f_sema); | ||
85 | |||
86 | /* Open the file. Note that this must not sleep for too long, else | ||
87 | * we would lock up lockd:-) So no NFS re-exports, folks. | ||
88 | * | ||
89 | * We have to make sure we have the right credential to open | ||
90 | * the file. | ||
91 | */ | ||
92 | if ((nfserr = nlmsvc_ops->fopen(rqstp, f, &file->f_file)) != 0) { | ||
93 | dprintk("lockd: open failed (nfserr %d)\n", ntohl(nfserr)); | ||
94 | goto out_free; | ||
95 | } | ||
96 | |||
97 | file->f_next = nlm_files[hash]; | ||
98 | nlm_files[hash] = file; | ||
99 | |||
100 | found: | ||
101 | dprintk("lockd: found file %p (count %d)\n", file, file->f_count); | ||
102 | *result = file; | ||
103 | file->f_count++; | ||
104 | nfserr = 0; | ||
105 | |||
106 | out_unlock: | ||
107 | up(&nlm_file_sema); | ||
108 | return nfserr; | ||
109 | |||
110 | out_free: | ||
111 | kfree(file); | ||
112 | #ifdef CONFIG_LOCKD_V4 | ||
113 | if (nfserr == 1) | ||
114 | nfserr = nlm4_stale_fh; | ||
115 | else | ||
116 | #endif | ||
117 | nfserr = nlm_lck_denied; | ||
118 | goto out_unlock; | ||
119 | } | ||
120 | |||
121 | /* | ||
122 | * Delete a file after having released all locks, blocks and shares | ||
123 | */ | ||
124 | static inline void | ||
125 | nlm_delete_file(struct nlm_file *file) | ||
126 | { | ||
127 | struct inode *inode = file->f_file->f_dentry->d_inode; | ||
128 | struct nlm_file **fp, *f; | ||
129 | |||
130 | dprintk("lockd: closing file %s/%ld\n", | ||
131 | inode->i_sb->s_id, inode->i_ino); | ||
132 | fp = nlm_files + file->f_hash; | ||
133 | while ((f = *fp) != NULL) { | ||
134 | if (f == file) { | ||
135 | *fp = file->f_next; | ||
136 | nlmsvc_ops->fclose(file->f_file); | ||
137 | kfree(file); | ||
138 | return; | ||
139 | } | ||
140 | fp = &f->f_next; | ||
141 | } | ||
142 | |||
143 | printk(KERN_WARNING "lockd: attempt to release unknown file!\n"); | ||
144 | } | ||
145 | |||
146 | /* | ||
147 | * Loop over all locks on the given file and perform the specified | ||
148 | * action. | ||
149 | */ | ||
150 | static int | ||
151 | nlm_traverse_locks(struct nlm_host *host, struct nlm_file *file, int action) | ||
152 | { | ||
153 | struct inode *inode = nlmsvc_file_inode(file); | ||
154 | struct file_lock *fl; | ||
155 | struct nlm_host *lockhost; | ||
156 | |||
157 | again: | ||
158 | file->f_locks = 0; | ||
159 | for (fl = inode->i_flock; fl; fl = fl->fl_next) { | ||
160 | if (!(fl->fl_flags & FL_LOCKD)) | ||
161 | continue; | ||
162 | |||
163 | /* update current lock count */ | ||
164 | file->f_locks++; | ||
165 | lockhost = (struct nlm_host *) fl->fl_owner; | ||
166 | if (action == NLM_ACT_MARK) | ||
167 | lockhost->h_inuse = 1; | ||
168 | else if (action == NLM_ACT_CHECK) | ||
169 | return 1; | ||
170 | else if (action == NLM_ACT_UNLOCK) { | ||
171 | struct file_lock lock = *fl; | ||
172 | |||
173 | if (host && lockhost != host) | ||
174 | continue; | ||
175 | |||
176 | lock.fl_type = F_UNLCK; | ||
177 | lock.fl_start = 0; | ||
178 | lock.fl_end = OFFSET_MAX; | ||
179 | if (posix_lock_file(file->f_file, &lock) < 0) { | ||
180 | printk("lockd: unlock failure in %s:%d\n", | ||
181 | __FILE__, __LINE__); | ||
182 | return 1; | ||
183 | } | ||
184 | goto again; | ||
185 | } | ||
186 | } | ||
187 | |||
188 | return 0; | ||
189 | } | ||
190 | |||
191 | /* | ||
192 | * Operate on a single file | ||
193 | */ | ||
194 | static inline int | ||
195 | nlm_inspect_file(struct nlm_host *host, struct nlm_file *file, int action) | ||
196 | { | ||
197 | if (action == NLM_ACT_CHECK) { | ||
198 | /* Fast path for mark and sweep garbage collection */ | ||
199 | if (file->f_count || file->f_blocks || file->f_shares) | ||
200 | return 1; | ||
201 | } else { | ||
202 | if (nlmsvc_traverse_blocks(host, file, action) | ||
203 | || nlmsvc_traverse_shares(host, file, action)) | ||
204 | return 1; | ||
205 | } | ||
206 | return nlm_traverse_locks(host, file, action); | ||
207 | } | ||
208 | |||
209 | /* | ||
210 | * Loop over all files in the file table. | ||
211 | */ | ||
212 | static int | ||
213 | nlm_traverse_files(struct nlm_host *host, int action) | ||
214 | { | ||
215 | struct nlm_file *file, **fp; | ||
216 | int i; | ||
217 | |||
218 | down(&nlm_file_sema); | ||
219 | for (i = 0; i < FILE_NRHASH; i++) { | ||
220 | fp = nlm_files + i; | ||
221 | while ((file = *fp) != NULL) { | ||
222 | /* Traverse locks, blocks and shares of this file | ||
223 | * and update file->f_locks count */ | ||
224 | if (nlm_inspect_file(host, file, action)) { | ||
225 | up(&nlm_file_sema); | ||
226 | return 1; | ||
227 | } | ||
228 | |||
229 | /* No more references to this file. Let go of it. */ | ||
230 | if (!file->f_blocks && !file->f_locks | ||
231 | && !file->f_shares && !file->f_count) { | ||
232 | *fp = file->f_next; | ||
233 | nlmsvc_ops->fclose(file->f_file); | ||
234 | kfree(file); | ||
235 | } else { | ||
236 | fp = &file->f_next; | ||
237 | } | ||
238 | } | ||
239 | } | ||
240 | up(&nlm_file_sema); | ||
241 | return 0; | ||
242 | } | ||
243 | |||
244 | /* | ||
245 | * Release file. If there are no more remote locks on this file, | ||
246 | * close it and free the handle. | ||
247 | * | ||
248 | * Note that we can't do proper reference counting without major | ||
249 | * contortions because the code in fs/locks.c creates, deletes and | ||
250 | * splits locks without notification. Our only way is to walk the | ||
251 | * entire lock list each time we remove a lock. | ||
252 | */ | ||
253 | void | ||
254 | nlm_release_file(struct nlm_file *file) | ||
255 | { | ||
256 | dprintk("lockd: nlm_release_file(%p, ct = %d)\n", | ||
257 | file, file->f_count); | ||
258 | |||
259 | /* Lock file table */ | ||
260 | down(&nlm_file_sema); | ||
261 | |||
262 | /* If there are no more locks etc, delete the file */ | ||
263 | if(--file->f_count == 0) { | ||
264 | if(!nlm_inspect_file(NULL, file, NLM_ACT_CHECK)) | ||
265 | nlm_delete_file(file); | ||
266 | } | ||
267 | |||
268 | up(&nlm_file_sema); | ||
269 | } | ||
270 | |||
271 | /* | ||
272 | * Mark all hosts that still hold resources | ||
273 | */ | ||
274 | void | ||
275 | nlmsvc_mark_resources(void) | ||
276 | { | ||
277 | dprintk("lockd: nlmsvc_mark_resources\n"); | ||
278 | |||
279 | nlm_traverse_files(NULL, NLM_ACT_MARK); | ||
280 | } | ||
281 | |||
282 | /* | ||
283 | * Release all resources held by the given client | ||
284 | */ | ||
285 | void | ||
286 | nlmsvc_free_host_resources(struct nlm_host *host) | ||
287 | { | ||
288 | dprintk("lockd: nlmsvc_free_host_resources\n"); | ||
289 | |||
290 | if (nlm_traverse_files(host, NLM_ACT_UNLOCK)) | ||
291 | printk(KERN_WARNING | ||
292 | "lockd: couldn't remove all locks held by %s", | ||
293 | host->h_name); | ||
294 | } | ||
295 | |||
296 | /* | ||
297 | * delete all hosts structs for clients | ||
298 | */ | ||
299 | void | ||
300 | nlmsvc_invalidate_all(void) | ||
301 | { | ||
302 | struct nlm_host *host; | ||
303 | while ((host = nlm_find_client()) != NULL) { | ||
304 | nlmsvc_free_host_resources(host); | ||
305 | host->h_expires = 0; | ||
306 | host->h_killed = 1; | ||
307 | nlm_release_host(host); | ||
308 | } | ||
309 | } | ||
diff --git a/fs/lockd/xdr.c b/fs/lockd/xdr.c new file mode 100644 index 000000000000..f01e9c0d2677 --- /dev/null +++ b/fs/lockd/xdr.c | |||
@@ -0,0 +1,635 @@ | |||
1 | /* | ||
2 | * linux/fs/lockd/xdr.c | ||
3 | * | ||
4 | * XDR support for lockd and the lock client. | ||
5 | * | ||
6 | * Copyright (C) 1995, 1996 Olaf Kirch <okir@monad.swb.de> | ||
7 | */ | ||
8 | |||
9 | #include <linux/config.h> | ||
10 | #include <linux/types.h> | ||
11 | #include <linux/sched.h> | ||
12 | #include <linux/utsname.h> | ||
13 | #include <linux/nfs.h> | ||
14 | |||
15 | #include <linux/sunrpc/xdr.h> | ||
16 | #include <linux/sunrpc/clnt.h> | ||
17 | #include <linux/sunrpc/svc.h> | ||
18 | #include <linux/sunrpc/stats.h> | ||
19 | #include <linux/lockd/lockd.h> | ||
20 | #include <linux/lockd/sm_inter.h> | ||
21 | |||
22 | #define NLMDBG_FACILITY NLMDBG_XDR | ||
23 | |||
24 | |||
25 | static inline loff_t | ||
26 | s32_to_loff_t(__s32 offset) | ||
27 | { | ||
28 | return (loff_t)offset; | ||
29 | } | ||
30 | |||
31 | static inline __s32 | ||
32 | loff_t_to_s32(loff_t offset) | ||
33 | { | ||
34 | __s32 res; | ||
35 | if (offset >= NLM_OFFSET_MAX) | ||
36 | res = NLM_OFFSET_MAX; | ||
37 | else if (offset <= -NLM_OFFSET_MAX) | ||
38 | res = -NLM_OFFSET_MAX; | ||
39 | else | ||
40 | res = offset; | ||
41 | return res; | ||
42 | } | ||
43 | |||
44 | /* | ||
45 | * XDR functions for basic NLM types | ||
46 | */ | ||
47 | static inline u32 *nlm_decode_cookie(u32 *p, struct nlm_cookie *c) | ||
48 | { | ||
49 | unsigned int len; | ||
50 | |||
51 | len = ntohl(*p++); | ||
52 | |||
53 | if(len==0) | ||
54 | { | ||
55 | c->len=4; | ||
56 | memset(c->data, 0, 4); /* hockeypux brain damage */ | ||
57 | } | ||
58 | else if(len<=NLM_MAXCOOKIELEN) | ||
59 | { | ||
60 | c->len=len; | ||
61 | memcpy(c->data, p, len); | ||
62 | p+=XDR_QUADLEN(len); | ||
63 | } | ||
64 | else | ||
65 | { | ||
66 | printk(KERN_NOTICE | ||
67 | "lockd: bad cookie size %d (only cookies under %d bytes are supported.)\n", len, NLM_MAXCOOKIELEN); | ||
68 | return NULL; | ||
69 | } | ||
70 | return p; | ||
71 | } | ||
72 | |||
73 | static inline u32 * | ||
74 | nlm_encode_cookie(u32 *p, struct nlm_cookie *c) | ||
75 | { | ||
76 | *p++ = htonl(c->len); | ||
77 | memcpy(p, c->data, c->len); | ||
78 | p+=XDR_QUADLEN(c->len); | ||
79 | return p; | ||
80 | } | ||
81 | |||
82 | static inline u32 * | ||
83 | nlm_decode_fh(u32 *p, struct nfs_fh *f) | ||
84 | { | ||
85 | unsigned int len; | ||
86 | |||
87 | if ((len = ntohl(*p++)) != NFS2_FHSIZE) { | ||
88 | printk(KERN_NOTICE | ||
89 | "lockd: bad fhandle size %d (should be %d)\n", | ||
90 | len, NFS2_FHSIZE); | ||
91 | return NULL; | ||
92 | } | ||
93 | f->size = NFS2_FHSIZE; | ||
94 | memset(f->data, 0, sizeof(f->data)); | ||
95 | memcpy(f->data, p, NFS2_FHSIZE); | ||
96 | return p + XDR_QUADLEN(NFS2_FHSIZE); | ||
97 | } | ||
98 | |||
99 | static inline u32 * | ||
100 | nlm_encode_fh(u32 *p, struct nfs_fh *f) | ||
101 | { | ||
102 | *p++ = htonl(NFS2_FHSIZE); | ||
103 | memcpy(p, f->data, NFS2_FHSIZE); | ||
104 | return p + XDR_QUADLEN(NFS2_FHSIZE); | ||
105 | } | ||
106 | |||
107 | /* | ||
108 | * Encode and decode owner handle | ||
109 | */ | ||
110 | static inline u32 * | ||
111 | nlm_decode_oh(u32 *p, struct xdr_netobj *oh) | ||
112 | { | ||
113 | return xdr_decode_netobj(p, oh); | ||
114 | } | ||
115 | |||
116 | static inline u32 * | ||
117 | nlm_encode_oh(u32 *p, struct xdr_netobj *oh) | ||
118 | { | ||
119 | return xdr_encode_netobj(p, oh); | ||
120 | } | ||
121 | |||
122 | static inline u32 * | ||
123 | nlm_decode_lock(u32 *p, struct nlm_lock *lock) | ||
124 | { | ||
125 | struct file_lock *fl = &lock->fl; | ||
126 | s32 start, len, end; | ||
127 | |||
128 | if (!(p = xdr_decode_string_inplace(p, &lock->caller, | ||
129 | &lock->len, | ||
130 | NLM_MAXSTRLEN)) | ||
131 | || !(p = nlm_decode_fh(p, &lock->fh)) | ||
132 | || !(p = nlm_decode_oh(p, &lock->oh))) | ||
133 | return NULL; | ||
134 | |||
135 | locks_init_lock(fl); | ||
136 | fl->fl_owner = current->files; | ||
137 | fl->fl_pid = ntohl(*p++); | ||
138 | fl->fl_flags = FL_POSIX; | ||
139 | fl->fl_type = F_RDLCK; /* as good as anything else */ | ||
140 | start = ntohl(*p++); | ||
141 | len = ntohl(*p++); | ||
142 | end = start + len - 1; | ||
143 | |||
144 | fl->fl_start = s32_to_loff_t(start); | ||
145 | |||
146 | if (len == 0 || end < 0) | ||
147 | fl->fl_end = OFFSET_MAX; | ||
148 | else | ||
149 | fl->fl_end = s32_to_loff_t(end); | ||
150 | return p; | ||
151 | } | ||
152 | |||
153 | /* | ||
154 | * Encode a lock as part of an NLM call | ||
155 | */ | ||
156 | static u32 * | ||
157 | nlm_encode_lock(u32 *p, struct nlm_lock *lock) | ||
158 | { | ||
159 | struct file_lock *fl = &lock->fl; | ||
160 | __s32 start, len; | ||
161 | |||
162 | if (!(p = xdr_encode_string(p, lock->caller)) | ||
163 | || !(p = nlm_encode_fh(p, &lock->fh)) | ||
164 | || !(p = nlm_encode_oh(p, &lock->oh))) | ||
165 | return NULL; | ||
166 | |||
167 | if (fl->fl_start > NLM_OFFSET_MAX | ||
168 | || (fl->fl_end > NLM_OFFSET_MAX && fl->fl_end != OFFSET_MAX)) | ||
169 | return NULL; | ||
170 | |||
171 | start = loff_t_to_s32(fl->fl_start); | ||
172 | if (fl->fl_end == OFFSET_MAX) | ||
173 | len = 0; | ||
174 | else | ||
175 | len = loff_t_to_s32(fl->fl_end - fl->fl_start + 1); | ||
176 | |||
177 | *p++ = htonl(fl->fl_pid); | ||
178 | *p++ = htonl(start); | ||
179 | *p++ = htonl(len); | ||
180 | |||
181 | return p; | ||
182 | } | ||
183 | |||
184 | /* | ||
185 | * Encode result of a TEST/TEST_MSG call | ||
186 | */ | ||
187 | static u32 * | ||
188 | nlm_encode_testres(u32 *p, struct nlm_res *resp) | ||
189 | { | ||
190 | s32 start, len; | ||
191 | |||
192 | if (!(p = nlm_encode_cookie(p, &resp->cookie))) | ||
193 | return NULL; | ||
194 | *p++ = resp->status; | ||
195 | |||
196 | if (resp->status == nlm_lck_denied) { | ||
197 | struct file_lock *fl = &resp->lock.fl; | ||
198 | |||
199 | *p++ = (fl->fl_type == F_RDLCK)? xdr_zero : xdr_one; | ||
200 | *p++ = htonl(fl->fl_pid); | ||
201 | |||
202 | /* Encode owner handle. */ | ||
203 | if (!(p = xdr_encode_netobj(p, &resp->lock.oh))) | ||
204 | return NULL; | ||
205 | |||
206 | start = loff_t_to_s32(fl->fl_start); | ||
207 | if (fl->fl_end == OFFSET_MAX) | ||
208 | len = 0; | ||
209 | else | ||
210 | len = loff_t_to_s32(fl->fl_end - fl->fl_start + 1); | ||
211 | |||
212 | *p++ = htonl(start); | ||
213 | *p++ = htonl(len); | ||
214 | } | ||
215 | |||
216 | return p; | ||
217 | } | ||
218 | |||
219 | |||
220 | /* | ||
221 | * First, the server side XDR functions | ||
222 | */ | ||
223 | int | ||
224 | nlmsvc_decode_testargs(struct svc_rqst *rqstp, u32 *p, nlm_args *argp) | ||
225 | { | ||
226 | u32 exclusive; | ||
227 | |||
228 | if (!(p = nlm_decode_cookie(p, &argp->cookie))) | ||
229 | return 0; | ||
230 | |||
231 | exclusive = ntohl(*p++); | ||
232 | if (!(p = nlm_decode_lock(p, &argp->lock))) | ||
233 | return 0; | ||
234 | if (exclusive) | ||
235 | argp->lock.fl.fl_type = F_WRLCK; | ||
236 | |||
237 | return xdr_argsize_check(rqstp, p); | ||
238 | } | ||
239 | |||
240 | int | ||
241 | nlmsvc_encode_testres(struct svc_rqst *rqstp, u32 *p, struct nlm_res *resp) | ||
242 | { | ||
243 | if (!(p = nlm_encode_testres(p, resp))) | ||
244 | return 0; | ||
245 | return xdr_ressize_check(rqstp, p); | ||
246 | } | ||
247 | |||
248 | int | ||
249 | nlmsvc_decode_lockargs(struct svc_rqst *rqstp, u32 *p, nlm_args *argp) | ||
250 | { | ||
251 | u32 exclusive; | ||
252 | |||
253 | if (!(p = nlm_decode_cookie(p, &argp->cookie))) | ||
254 | return 0; | ||
255 | argp->block = ntohl(*p++); | ||
256 | exclusive = ntohl(*p++); | ||
257 | if (!(p = nlm_decode_lock(p, &argp->lock))) | ||
258 | return 0; | ||
259 | if (exclusive) | ||
260 | argp->lock.fl.fl_type = F_WRLCK; | ||
261 | argp->reclaim = ntohl(*p++); | ||
262 | argp->state = ntohl(*p++); | ||
263 | argp->monitor = 1; /* monitor client by default */ | ||
264 | |||
265 | return xdr_argsize_check(rqstp, p); | ||
266 | } | ||
267 | |||
268 | int | ||
269 | nlmsvc_decode_cancargs(struct svc_rqst *rqstp, u32 *p, nlm_args *argp) | ||
270 | { | ||
271 | u32 exclusive; | ||
272 | |||
273 | if (!(p = nlm_decode_cookie(p, &argp->cookie))) | ||
274 | return 0; | ||
275 | argp->block = ntohl(*p++); | ||
276 | exclusive = ntohl(*p++); | ||
277 | if (!(p = nlm_decode_lock(p, &argp->lock))) | ||
278 | return 0; | ||
279 | if (exclusive) | ||
280 | argp->lock.fl.fl_type = F_WRLCK; | ||
281 | return xdr_argsize_check(rqstp, p); | ||
282 | } | ||
283 | |||
284 | int | ||
285 | nlmsvc_decode_unlockargs(struct svc_rqst *rqstp, u32 *p, nlm_args *argp) | ||
286 | { | ||
287 | if (!(p = nlm_decode_cookie(p, &argp->cookie)) | ||
288 | || !(p = nlm_decode_lock(p, &argp->lock))) | ||
289 | return 0; | ||
290 | argp->lock.fl.fl_type = F_UNLCK; | ||
291 | return xdr_argsize_check(rqstp, p); | ||
292 | } | ||
293 | |||
294 | int | ||
295 | nlmsvc_decode_shareargs(struct svc_rqst *rqstp, u32 *p, nlm_args *argp) | ||
296 | { | ||
297 | struct nlm_lock *lock = &argp->lock; | ||
298 | |||
299 | memset(lock, 0, sizeof(*lock)); | ||
300 | locks_init_lock(&lock->fl); | ||
301 | lock->fl.fl_pid = ~(u32) 0; | ||
302 | |||
303 | if (!(p = nlm_decode_cookie(p, &argp->cookie)) | ||
304 | || !(p = xdr_decode_string_inplace(p, &lock->caller, | ||
305 | &lock->len, NLM_MAXSTRLEN)) | ||
306 | || !(p = nlm_decode_fh(p, &lock->fh)) | ||
307 | || !(p = nlm_decode_oh(p, &lock->oh))) | ||
308 | return 0; | ||
309 | argp->fsm_mode = ntohl(*p++); | ||
310 | argp->fsm_access = ntohl(*p++); | ||
311 | return xdr_argsize_check(rqstp, p); | ||
312 | } | ||
313 | |||
314 | int | ||
315 | nlmsvc_encode_shareres(struct svc_rqst *rqstp, u32 *p, struct nlm_res *resp) | ||
316 | { | ||
317 | if (!(p = nlm_encode_cookie(p, &resp->cookie))) | ||
318 | return 0; | ||
319 | *p++ = resp->status; | ||
320 | *p++ = xdr_zero; /* sequence argument */ | ||
321 | return xdr_ressize_check(rqstp, p); | ||
322 | } | ||
323 | |||
324 | int | ||
325 | nlmsvc_encode_res(struct svc_rqst *rqstp, u32 *p, struct nlm_res *resp) | ||
326 | { | ||
327 | if (!(p = nlm_encode_cookie(p, &resp->cookie))) | ||
328 | return 0; | ||
329 | *p++ = resp->status; | ||
330 | return xdr_ressize_check(rqstp, p); | ||
331 | } | ||
332 | |||
333 | int | ||
334 | nlmsvc_decode_notify(struct svc_rqst *rqstp, u32 *p, struct nlm_args *argp) | ||
335 | { | ||
336 | struct nlm_lock *lock = &argp->lock; | ||
337 | |||
338 | if (!(p = xdr_decode_string_inplace(p, &lock->caller, | ||
339 | &lock->len, NLM_MAXSTRLEN))) | ||
340 | return 0; | ||
341 | argp->state = ntohl(*p++); | ||
342 | return xdr_argsize_check(rqstp, p); | ||
343 | } | ||
344 | |||
345 | int | ||
346 | nlmsvc_decode_reboot(struct svc_rqst *rqstp, u32 *p, struct nlm_reboot *argp) | ||
347 | { | ||
348 | if (!(p = xdr_decode_string_inplace(p, &argp->mon, &argp->len, SM_MAXSTRLEN))) | ||
349 | return 0; | ||
350 | argp->state = ntohl(*p++); | ||
351 | /* Preserve the address in network byte order */ | ||
352 | argp->addr = *p++; | ||
353 | argp->vers = *p++; | ||
354 | argp->proto = *p++; | ||
355 | return xdr_argsize_check(rqstp, p); | ||
356 | } | ||
357 | |||
358 | int | ||
359 | nlmsvc_decode_res(struct svc_rqst *rqstp, u32 *p, struct nlm_res *resp) | ||
360 | { | ||
361 | if (!(p = nlm_decode_cookie(p, &resp->cookie))) | ||
362 | return 0; | ||
363 | resp->status = ntohl(*p++); | ||
364 | return xdr_argsize_check(rqstp, p); | ||
365 | } | ||
366 | |||
367 | int | ||
368 | nlmsvc_decode_void(struct svc_rqst *rqstp, u32 *p, void *dummy) | ||
369 | { | ||
370 | return xdr_argsize_check(rqstp, p); | ||
371 | } | ||
372 | |||
373 | int | ||
374 | nlmsvc_encode_void(struct svc_rqst *rqstp, u32 *p, void *dummy) | ||
375 | { | ||
376 | return xdr_ressize_check(rqstp, p); | ||
377 | } | ||
378 | |||
379 | /* | ||
380 | * Now, the client side XDR functions | ||
381 | */ | ||
382 | #ifdef NLMCLNT_SUPPORT_SHARES | ||
383 | static int | ||
384 | nlmclt_decode_void(struct rpc_rqst *req, u32 *p, void *ptr) | ||
385 | { | ||
386 | return 0; | ||
387 | } | ||
388 | #endif | ||
389 | |||
390 | static int | ||
391 | nlmclt_encode_testargs(struct rpc_rqst *req, u32 *p, nlm_args *argp) | ||
392 | { | ||
393 | struct nlm_lock *lock = &argp->lock; | ||
394 | |||
395 | if (!(p = nlm_encode_cookie(p, &argp->cookie))) | ||
396 | return -EIO; | ||
397 | *p++ = (lock->fl.fl_type == F_WRLCK)? xdr_one : xdr_zero; | ||
398 | if (!(p = nlm_encode_lock(p, lock))) | ||
399 | return -EIO; | ||
400 | req->rq_slen = xdr_adjust_iovec(req->rq_svec, p); | ||
401 | return 0; | ||
402 | } | ||
403 | |||
404 | static int | ||
405 | nlmclt_decode_testres(struct rpc_rqst *req, u32 *p, struct nlm_res *resp) | ||
406 | { | ||
407 | if (!(p = nlm_decode_cookie(p, &resp->cookie))) | ||
408 | return -EIO; | ||
409 | resp->status = ntohl(*p++); | ||
410 | if (resp->status == NLM_LCK_DENIED) { | ||
411 | struct file_lock *fl = &resp->lock.fl; | ||
412 | u32 excl; | ||
413 | s32 start, len, end; | ||
414 | |||
415 | memset(&resp->lock, 0, sizeof(resp->lock)); | ||
416 | locks_init_lock(fl); | ||
417 | excl = ntohl(*p++); | ||
418 | fl->fl_pid = ntohl(*p++); | ||
419 | if (!(p = nlm_decode_oh(p, &resp->lock.oh))) | ||
420 | return -EIO; | ||
421 | |||
422 | fl->fl_flags = FL_POSIX; | ||
423 | fl->fl_type = excl? F_WRLCK : F_RDLCK; | ||
424 | start = ntohl(*p++); | ||
425 | len = ntohl(*p++); | ||
426 | end = start + len - 1; | ||
427 | |||
428 | fl->fl_start = s32_to_loff_t(start); | ||
429 | if (len == 0 || end < 0) | ||
430 | fl->fl_end = OFFSET_MAX; | ||
431 | else | ||
432 | fl->fl_end = s32_to_loff_t(end); | ||
433 | } | ||
434 | return 0; | ||
435 | } | ||
436 | |||
437 | |||
438 | static int | ||
439 | nlmclt_encode_lockargs(struct rpc_rqst *req, u32 *p, nlm_args *argp) | ||
440 | { | ||
441 | struct nlm_lock *lock = &argp->lock; | ||
442 | |||
443 | if (!(p = nlm_encode_cookie(p, &argp->cookie))) | ||
444 | return -EIO; | ||
445 | *p++ = argp->block? xdr_one : xdr_zero; | ||
446 | *p++ = (lock->fl.fl_type == F_WRLCK)? xdr_one : xdr_zero; | ||
447 | if (!(p = nlm_encode_lock(p, lock))) | ||
448 | return -EIO; | ||
449 | *p++ = argp->reclaim? xdr_one : xdr_zero; | ||
450 | *p++ = htonl(argp->state); | ||
451 | req->rq_slen = xdr_adjust_iovec(req->rq_svec, p); | ||
452 | return 0; | ||
453 | } | ||
454 | |||
455 | static int | ||
456 | nlmclt_encode_cancargs(struct rpc_rqst *req, u32 *p, nlm_args *argp) | ||
457 | { | ||
458 | struct nlm_lock *lock = &argp->lock; | ||
459 | |||
460 | if (!(p = nlm_encode_cookie(p, &argp->cookie))) | ||
461 | return -EIO; | ||
462 | *p++ = argp->block? xdr_one : xdr_zero; | ||
463 | *p++ = (lock->fl.fl_type == F_WRLCK)? xdr_one : xdr_zero; | ||
464 | if (!(p = nlm_encode_lock(p, lock))) | ||
465 | return -EIO; | ||
466 | req->rq_slen = xdr_adjust_iovec(req->rq_svec, p); | ||
467 | return 0; | ||
468 | } | ||
469 | |||
470 | static int | ||
471 | nlmclt_encode_unlockargs(struct rpc_rqst *req, u32 *p, nlm_args *argp) | ||
472 | { | ||
473 | struct nlm_lock *lock = &argp->lock; | ||
474 | |||
475 | if (!(p = nlm_encode_cookie(p, &argp->cookie))) | ||
476 | return -EIO; | ||
477 | if (!(p = nlm_encode_lock(p, lock))) | ||
478 | return -EIO; | ||
479 | req->rq_slen = xdr_adjust_iovec(req->rq_svec, p); | ||
480 | return 0; | ||
481 | } | ||
482 | |||
483 | static int | ||
484 | nlmclt_encode_res(struct rpc_rqst *req, u32 *p, struct nlm_res *resp) | ||
485 | { | ||
486 | if (!(p = nlm_encode_cookie(p, &resp->cookie))) | ||
487 | return -EIO; | ||
488 | *p++ = resp->status; | ||
489 | req->rq_slen = xdr_adjust_iovec(req->rq_svec, p); | ||
490 | return 0; | ||
491 | } | ||
492 | |||
493 | static int | ||
494 | nlmclt_encode_testres(struct rpc_rqst *req, u32 *p, struct nlm_res *resp) | ||
495 | { | ||
496 | if (!(p = nlm_encode_testres(p, resp))) | ||
497 | return -EIO; | ||
498 | req->rq_slen = xdr_adjust_iovec(req->rq_svec, p); | ||
499 | return 0; | ||
500 | } | ||
501 | |||
502 | static int | ||
503 | nlmclt_decode_res(struct rpc_rqst *req, u32 *p, struct nlm_res *resp) | ||
504 | { | ||
505 | if (!(p = nlm_decode_cookie(p, &resp->cookie))) | ||
506 | return -EIO; | ||
507 | resp->status = ntohl(*p++); | ||
508 | return 0; | ||
509 | } | ||
510 | |||
511 | /* | ||
512 | * Buffer requirements for NLM | ||
513 | */ | ||
514 | #define NLM_void_sz 0 | ||
515 | #define NLM_cookie_sz 1+XDR_QUADLEN(NLM_MAXCOOKIELEN) | ||
516 | #define NLM_caller_sz 1+XDR_QUADLEN(sizeof(system_utsname.nodename)) | ||
517 | #define NLM_netobj_sz 1+XDR_QUADLEN(XDR_MAX_NETOBJ) | ||
518 | /* #define NLM_owner_sz 1+XDR_QUADLEN(NLM_MAXOWNER) */ | ||
519 | #define NLM_fhandle_sz 1+XDR_QUADLEN(NFS2_FHSIZE) | ||
520 | #define NLM_lock_sz 3+NLM_caller_sz+NLM_netobj_sz+NLM_fhandle_sz | ||
521 | #define NLM_holder_sz 4+NLM_netobj_sz | ||
522 | |||
523 | #define NLM_testargs_sz NLM_cookie_sz+1+NLM_lock_sz | ||
524 | #define NLM_lockargs_sz NLM_cookie_sz+4+NLM_lock_sz | ||
525 | #define NLM_cancargs_sz NLM_cookie_sz+2+NLM_lock_sz | ||
526 | #define NLM_unlockargs_sz NLM_cookie_sz+NLM_lock_sz | ||
527 | |||
528 | #define NLM_testres_sz NLM_cookie_sz+1+NLM_holder_sz | ||
529 | #define NLM_res_sz NLM_cookie_sz+1 | ||
530 | #define NLM_norep_sz 0 | ||
531 | |||
532 | #ifndef MAX | ||
533 | # define MAX(a, b) (((a) > (b))? (a) : (b)) | ||
534 | #endif | ||
535 | |||
536 | /* | ||
537 | * For NLM, a void procedure really returns nothing | ||
538 | */ | ||
539 | #define nlmclt_decode_norep NULL | ||
540 | |||
541 | #define PROC(proc, argtype, restype) \ | ||
542 | [NLMPROC_##proc] = { \ | ||
543 | .p_proc = NLMPROC_##proc, \ | ||
544 | .p_encode = (kxdrproc_t) nlmclt_encode_##argtype, \ | ||
545 | .p_decode = (kxdrproc_t) nlmclt_decode_##restype, \ | ||
546 | .p_bufsiz = MAX(NLM_##argtype##_sz, NLM_##restype##_sz) << 2 \ | ||
547 | } | ||
548 | |||
549 | static struct rpc_procinfo nlm_procedures[] = { | ||
550 | PROC(TEST, testargs, testres), | ||
551 | PROC(LOCK, lockargs, res), | ||
552 | PROC(CANCEL, cancargs, res), | ||
553 | PROC(UNLOCK, unlockargs, res), | ||
554 | PROC(GRANTED, testargs, res), | ||
555 | PROC(TEST_MSG, testargs, norep), | ||
556 | PROC(LOCK_MSG, lockargs, norep), | ||
557 | PROC(CANCEL_MSG, cancargs, norep), | ||
558 | PROC(UNLOCK_MSG, unlockargs, norep), | ||
559 | PROC(GRANTED_MSG, testargs, norep), | ||
560 | PROC(TEST_RES, testres, norep), | ||
561 | PROC(LOCK_RES, res, norep), | ||
562 | PROC(CANCEL_RES, res, norep), | ||
563 | PROC(UNLOCK_RES, res, norep), | ||
564 | PROC(GRANTED_RES, res, norep), | ||
565 | #ifdef NLMCLNT_SUPPORT_SHARES | ||
566 | PROC(SHARE, shareargs, shareres), | ||
567 | PROC(UNSHARE, shareargs, shareres), | ||
568 | PROC(NM_LOCK, lockargs, res), | ||
569 | PROC(FREE_ALL, notify, void), | ||
570 | #endif | ||
571 | }; | ||
572 | |||
573 | static struct rpc_version nlm_version1 = { | ||
574 | .number = 1, | ||
575 | .nrprocs = 16, | ||
576 | .procs = nlm_procedures, | ||
577 | }; | ||
578 | |||
579 | static struct rpc_version nlm_version3 = { | ||
580 | .number = 3, | ||
581 | .nrprocs = 24, | ||
582 | .procs = nlm_procedures, | ||
583 | }; | ||
584 | |||
585 | #ifdef CONFIG_LOCKD_V4 | ||
586 | extern struct rpc_version nlm_version4; | ||
587 | #endif | ||
588 | |||
589 | static struct rpc_version * nlm_versions[] = { | ||
590 | [1] = &nlm_version1, | ||
591 | [3] = &nlm_version3, | ||
592 | #ifdef CONFIG_LOCKD_V4 | ||
593 | [4] = &nlm_version4, | ||
594 | #endif | ||
595 | }; | ||
596 | |||
597 | static struct rpc_stat nlm_stats; | ||
598 | |||
599 | struct rpc_program nlm_program = { | ||
600 | .name = "lockd", | ||
601 | .number = NLM_PROGRAM, | ||
602 | .nrvers = sizeof(nlm_versions) / sizeof(nlm_versions[0]), | ||
603 | .version = nlm_versions, | ||
604 | .stats = &nlm_stats, | ||
605 | }; | ||
606 | |||
607 | #ifdef RPC_DEBUG | ||
608 | const char *nlmdbg_cookie2a(const struct nlm_cookie *cookie) | ||
609 | { | ||
610 | /* | ||
611 | * We can get away with a static buffer because we're only | ||
612 | * called with BKL held. | ||
613 | */ | ||
614 | static char buf[2*NLM_MAXCOOKIELEN+1]; | ||
615 | int i; | ||
616 | int len = sizeof(buf); | ||
617 | char *p = buf; | ||
618 | |||
619 | len--; /* allow for trailing \0 */ | ||
620 | if (len < 3) | ||
621 | return "???"; | ||
622 | for (i = 0 ; i < cookie->len ; i++) { | ||
623 | if (len < 2) { | ||
624 | strcpy(p-3, "..."); | ||
625 | break; | ||
626 | } | ||
627 | sprintf(p, "%02x", cookie->data[i]); | ||
628 | p += 2; | ||
629 | len -= 2; | ||
630 | } | ||
631 | *p = '\0'; | ||
632 | |||
633 | return buf; | ||
634 | } | ||
635 | #endif | ||
diff --git a/fs/lockd/xdr4.c b/fs/lockd/xdr4.c new file mode 100644 index 000000000000..ae4d6b426c62 --- /dev/null +++ b/fs/lockd/xdr4.c | |||
@@ -0,0 +1,580 @@ | |||
1 | /* | ||
2 | * linux/fs/lockd/xdr4.c | ||
3 | * | ||
4 | * XDR support for lockd and the lock client. | ||
5 | * | ||
6 | * Copyright (C) 1995, 1996 Olaf Kirch <okir@monad.swb.de> | ||
7 | * Copyright (C) 1999, Trond Myklebust <trond.myklebust@fys.uio.no> | ||
8 | */ | ||
9 | |||
10 | #include <linux/types.h> | ||
11 | #include <linux/sched.h> | ||
12 | #include <linux/utsname.h> | ||
13 | #include <linux/nfs.h> | ||
14 | |||
15 | #include <linux/sunrpc/xdr.h> | ||
16 | #include <linux/sunrpc/clnt.h> | ||
17 | #include <linux/sunrpc/svc.h> | ||
18 | #include <linux/sunrpc/stats.h> | ||
19 | #include <linux/lockd/lockd.h> | ||
20 | #include <linux/lockd/sm_inter.h> | ||
21 | |||
22 | #define NLMDBG_FACILITY NLMDBG_XDR | ||
23 | |||
24 | static inline loff_t | ||
25 | s64_to_loff_t(__s64 offset) | ||
26 | { | ||
27 | return (loff_t)offset; | ||
28 | } | ||
29 | |||
30 | |||
31 | static inline s64 | ||
32 | loff_t_to_s64(loff_t offset) | ||
33 | { | ||
34 | s64 res; | ||
35 | if (offset > NLM4_OFFSET_MAX) | ||
36 | res = NLM4_OFFSET_MAX; | ||
37 | else if (offset < -NLM4_OFFSET_MAX) | ||
38 | res = -NLM4_OFFSET_MAX; | ||
39 | else | ||
40 | res = offset; | ||
41 | return res; | ||
42 | } | ||
43 | |||
44 | /* | ||
45 | * XDR functions for basic NLM types | ||
46 | */ | ||
47 | static u32 * | ||
48 | nlm4_decode_cookie(u32 *p, struct nlm_cookie *c) | ||
49 | { | ||
50 | unsigned int len; | ||
51 | |||
52 | len = ntohl(*p++); | ||
53 | |||
54 | if(len==0) | ||
55 | { | ||
56 | c->len=4; | ||
57 | memset(c->data, 0, 4); /* hockeypux brain damage */ | ||
58 | } | ||
59 | else if(len<=NLM_MAXCOOKIELEN) | ||
60 | { | ||
61 | c->len=len; | ||
62 | memcpy(c->data, p, len); | ||
63 | p+=XDR_QUADLEN(len); | ||
64 | } | ||
65 | else | ||
66 | { | ||
67 | printk(KERN_NOTICE | ||
68 | "lockd: bad cookie size %d (only cookies under %d bytes are supported.)\n", len, NLM_MAXCOOKIELEN); | ||
69 | return NULL; | ||
70 | } | ||
71 | return p; | ||
72 | } | ||
73 | |||
74 | static u32 * | ||
75 | nlm4_encode_cookie(u32 *p, struct nlm_cookie *c) | ||
76 | { | ||
77 | *p++ = htonl(c->len); | ||
78 | memcpy(p, c->data, c->len); | ||
79 | p+=XDR_QUADLEN(c->len); | ||
80 | return p; | ||
81 | } | ||
82 | |||
83 | static u32 * | ||
84 | nlm4_decode_fh(u32 *p, struct nfs_fh *f) | ||
85 | { | ||
86 | memset(f->data, 0, sizeof(f->data)); | ||
87 | f->size = ntohl(*p++); | ||
88 | if (f->size > NFS_MAXFHSIZE) { | ||
89 | printk(KERN_NOTICE | ||
90 | "lockd: bad fhandle size %d (should be <=%d)\n", | ||
91 | f->size, NFS_MAXFHSIZE); | ||
92 | return NULL; | ||
93 | } | ||
94 | memcpy(f->data, p, f->size); | ||
95 | return p + XDR_QUADLEN(f->size); | ||
96 | } | ||
97 | |||
98 | static u32 * | ||
99 | nlm4_encode_fh(u32 *p, struct nfs_fh *f) | ||
100 | { | ||
101 | *p++ = htonl(f->size); | ||
102 | if (f->size) p[XDR_QUADLEN(f->size)-1] = 0; /* don't leak anything */ | ||
103 | memcpy(p, f->data, f->size); | ||
104 | return p + XDR_QUADLEN(f->size); | ||
105 | } | ||
106 | |||
107 | /* | ||
108 | * Encode and decode owner handle | ||
109 | */ | ||
110 | static u32 * | ||
111 | nlm4_decode_oh(u32 *p, struct xdr_netobj *oh) | ||
112 | { | ||
113 | return xdr_decode_netobj(p, oh); | ||
114 | } | ||
115 | |||
116 | static u32 * | ||
117 | nlm4_encode_oh(u32 *p, struct xdr_netobj *oh) | ||
118 | { | ||
119 | return xdr_encode_netobj(p, oh); | ||
120 | } | ||
121 | |||
122 | static u32 * | ||
123 | nlm4_decode_lock(u32 *p, struct nlm_lock *lock) | ||
124 | { | ||
125 | struct file_lock *fl = &lock->fl; | ||
126 | __s64 len, start, end; | ||
127 | |||
128 | if (!(p = xdr_decode_string_inplace(p, &lock->caller, | ||
129 | &lock->len, NLM_MAXSTRLEN)) | ||
130 | || !(p = nlm4_decode_fh(p, &lock->fh)) | ||
131 | || !(p = nlm4_decode_oh(p, &lock->oh))) | ||
132 | return NULL; | ||
133 | |||
134 | locks_init_lock(fl); | ||
135 | fl->fl_owner = current->files; | ||
136 | fl->fl_pid = ntohl(*p++); | ||
137 | fl->fl_flags = FL_POSIX; | ||
138 | fl->fl_type = F_RDLCK; /* as good as anything else */ | ||
139 | p = xdr_decode_hyper(p, &start); | ||
140 | p = xdr_decode_hyper(p, &len); | ||
141 | end = start + len - 1; | ||
142 | |||
143 | fl->fl_start = s64_to_loff_t(start); | ||
144 | |||
145 | if (len == 0 || end < 0) | ||
146 | fl->fl_end = OFFSET_MAX; | ||
147 | else | ||
148 | fl->fl_end = s64_to_loff_t(end); | ||
149 | return p; | ||
150 | } | ||
151 | |||
152 | /* | ||
153 | * Encode a lock as part of an NLM call | ||
154 | */ | ||
155 | static u32 * | ||
156 | nlm4_encode_lock(u32 *p, struct nlm_lock *lock) | ||
157 | { | ||
158 | struct file_lock *fl = &lock->fl; | ||
159 | __s64 start, len; | ||
160 | |||
161 | if (!(p = xdr_encode_string(p, lock->caller)) | ||
162 | || !(p = nlm4_encode_fh(p, &lock->fh)) | ||
163 | || !(p = nlm4_encode_oh(p, &lock->oh))) | ||
164 | return NULL; | ||
165 | |||
166 | if (fl->fl_start > NLM4_OFFSET_MAX | ||
167 | || (fl->fl_end > NLM4_OFFSET_MAX && fl->fl_end != OFFSET_MAX)) | ||
168 | return NULL; | ||
169 | |||
170 | *p++ = htonl(fl->fl_pid); | ||
171 | |||
172 | start = loff_t_to_s64(fl->fl_start); | ||
173 | if (fl->fl_end == OFFSET_MAX) | ||
174 | len = 0; | ||
175 | else | ||
176 | len = loff_t_to_s64(fl->fl_end - fl->fl_start + 1); | ||
177 | |||
178 | p = xdr_encode_hyper(p, start); | ||
179 | p = xdr_encode_hyper(p, len); | ||
180 | |||
181 | return p; | ||
182 | } | ||
183 | |||
184 | /* | ||
185 | * Encode result of a TEST/TEST_MSG call | ||
186 | */ | ||
187 | static u32 * | ||
188 | nlm4_encode_testres(u32 *p, struct nlm_res *resp) | ||
189 | { | ||
190 | s64 start, len; | ||
191 | |||
192 | dprintk("xdr: before encode_testres (p %p resp %p)\n", p, resp); | ||
193 | if (!(p = nlm4_encode_cookie(p, &resp->cookie))) | ||
194 | return NULL; | ||
195 | *p++ = resp->status; | ||
196 | |||
197 | if (resp->status == nlm_lck_denied) { | ||
198 | struct file_lock *fl = &resp->lock.fl; | ||
199 | |||
200 | *p++ = (fl->fl_type == F_RDLCK)? xdr_zero : xdr_one; | ||
201 | *p++ = htonl(fl->fl_pid); | ||
202 | |||
203 | /* Encode owner handle. */ | ||
204 | if (!(p = xdr_encode_netobj(p, &resp->lock.oh))) | ||
205 | return NULL; | ||
206 | |||
207 | start = loff_t_to_s64(fl->fl_start); | ||
208 | if (fl->fl_end == OFFSET_MAX) | ||
209 | len = 0; | ||
210 | else | ||
211 | len = loff_t_to_s64(fl->fl_end - fl->fl_start + 1); | ||
212 | |||
213 | p = xdr_encode_hyper(p, start); | ||
214 | p = xdr_encode_hyper(p, len); | ||
215 | dprintk("xdr: encode_testres (status %d pid %d type %d start %Ld end %Ld)\n", | ||
216 | resp->status, fl->fl_pid, fl->fl_type, | ||
217 | (long long)fl->fl_start, (long long)fl->fl_end); | ||
218 | } | ||
219 | |||
220 | dprintk("xdr: after encode_testres (p %p resp %p)\n", p, resp); | ||
221 | return p; | ||
222 | } | ||
223 | |||
224 | |||
225 | /* | ||
226 | * First, the server side XDR functions | ||
227 | */ | ||
228 | int | ||
229 | nlm4svc_decode_testargs(struct svc_rqst *rqstp, u32 *p, nlm_args *argp) | ||
230 | { | ||
231 | u32 exclusive; | ||
232 | |||
233 | if (!(p = nlm4_decode_cookie(p, &argp->cookie))) | ||
234 | return 0; | ||
235 | |||
236 | exclusive = ntohl(*p++); | ||
237 | if (!(p = nlm4_decode_lock(p, &argp->lock))) | ||
238 | return 0; | ||
239 | if (exclusive) | ||
240 | argp->lock.fl.fl_type = F_WRLCK; | ||
241 | |||
242 | return xdr_argsize_check(rqstp, p); | ||
243 | } | ||
244 | |||
245 | int | ||
246 | nlm4svc_encode_testres(struct svc_rqst *rqstp, u32 *p, struct nlm_res *resp) | ||
247 | { | ||
248 | if (!(p = nlm4_encode_testres(p, resp))) | ||
249 | return 0; | ||
250 | return xdr_ressize_check(rqstp, p); | ||
251 | } | ||
252 | |||
253 | int | ||
254 | nlm4svc_decode_lockargs(struct svc_rqst *rqstp, u32 *p, nlm_args *argp) | ||
255 | { | ||
256 | u32 exclusive; | ||
257 | |||
258 | if (!(p = nlm4_decode_cookie(p, &argp->cookie))) | ||
259 | return 0; | ||
260 | argp->block = ntohl(*p++); | ||
261 | exclusive = ntohl(*p++); | ||
262 | if (!(p = nlm4_decode_lock(p, &argp->lock))) | ||
263 | return 0; | ||
264 | if (exclusive) | ||
265 | argp->lock.fl.fl_type = F_WRLCK; | ||
266 | argp->reclaim = ntohl(*p++); | ||
267 | argp->state = ntohl(*p++); | ||
268 | argp->monitor = 1; /* monitor client by default */ | ||
269 | |||
270 | return xdr_argsize_check(rqstp, p); | ||
271 | } | ||
272 | |||
273 | int | ||
274 | nlm4svc_decode_cancargs(struct svc_rqst *rqstp, u32 *p, nlm_args *argp) | ||
275 | { | ||
276 | u32 exclusive; | ||
277 | |||
278 | if (!(p = nlm4_decode_cookie(p, &argp->cookie))) | ||
279 | return 0; | ||
280 | argp->block = ntohl(*p++); | ||
281 | exclusive = ntohl(*p++); | ||
282 | if (!(p = nlm4_decode_lock(p, &argp->lock))) | ||
283 | return 0; | ||
284 | if (exclusive) | ||
285 | argp->lock.fl.fl_type = F_WRLCK; | ||
286 | return xdr_argsize_check(rqstp, p); | ||
287 | } | ||
288 | |||
289 | int | ||
290 | nlm4svc_decode_unlockargs(struct svc_rqst *rqstp, u32 *p, nlm_args *argp) | ||
291 | { | ||
292 | if (!(p = nlm4_decode_cookie(p, &argp->cookie)) | ||
293 | || !(p = nlm4_decode_lock(p, &argp->lock))) | ||
294 | return 0; | ||
295 | argp->lock.fl.fl_type = F_UNLCK; | ||
296 | return xdr_argsize_check(rqstp, p); | ||
297 | } | ||
298 | |||
299 | int | ||
300 | nlm4svc_decode_shareargs(struct svc_rqst *rqstp, u32 *p, nlm_args *argp) | ||
301 | { | ||
302 | struct nlm_lock *lock = &argp->lock; | ||
303 | |||
304 | memset(lock, 0, sizeof(*lock)); | ||
305 | locks_init_lock(&lock->fl); | ||
306 | lock->fl.fl_pid = ~(u32) 0; | ||
307 | |||
308 | if (!(p = nlm4_decode_cookie(p, &argp->cookie)) | ||
309 | || !(p = xdr_decode_string_inplace(p, &lock->caller, | ||
310 | &lock->len, NLM_MAXSTRLEN)) | ||
311 | || !(p = nlm4_decode_fh(p, &lock->fh)) | ||
312 | || !(p = nlm4_decode_oh(p, &lock->oh))) | ||
313 | return 0; | ||
314 | argp->fsm_mode = ntohl(*p++); | ||
315 | argp->fsm_access = ntohl(*p++); | ||
316 | return xdr_argsize_check(rqstp, p); | ||
317 | } | ||
318 | |||
319 | int | ||
320 | nlm4svc_encode_shareres(struct svc_rqst *rqstp, u32 *p, struct nlm_res *resp) | ||
321 | { | ||
322 | if (!(p = nlm4_encode_cookie(p, &resp->cookie))) | ||
323 | return 0; | ||
324 | *p++ = resp->status; | ||
325 | *p++ = xdr_zero; /* sequence argument */ | ||
326 | return xdr_ressize_check(rqstp, p); | ||
327 | } | ||
328 | |||
329 | int | ||
330 | nlm4svc_encode_res(struct svc_rqst *rqstp, u32 *p, struct nlm_res *resp) | ||
331 | { | ||
332 | if (!(p = nlm4_encode_cookie(p, &resp->cookie))) | ||
333 | return 0; | ||
334 | *p++ = resp->status; | ||
335 | return xdr_ressize_check(rqstp, p); | ||
336 | } | ||
337 | |||
338 | int | ||
339 | nlm4svc_decode_notify(struct svc_rqst *rqstp, u32 *p, struct nlm_args *argp) | ||
340 | { | ||
341 | struct nlm_lock *lock = &argp->lock; | ||
342 | |||
343 | if (!(p = xdr_decode_string_inplace(p, &lock->caller, | ||
344 | &lock->len, NLM_MAXSTRLEN))) | ||
345 | return 0; | ||
346 | argp->state = ntohl(*p++); | ||
347 | return xdr_argsize_check(rqstp, p); | ||
348 | } | ||
349 | |||
350 | int | ||
351 | nlm4svc_decode_reboot(struct svc_rqst *rqstp, u32 *p, struct nlm_reboot *argp) | ||
352 | { | ||
353 | if (!(p = xdr_decode_string_inplace(p, &argp->mon, &argp->len, SM_MAXSTRLEN))) | ||
354 | return 0; | ||
355 | argp->state = ntohl(*p++); | ||
356 | /* Preserve the address in network byte order */ | ||
357 | argp->addr = *p++; | ||
358 | return xdr_argsize_check(rqstp, p); | ||
359 | } | ||
360 | |||
361 | int | ||
362 | nlm4svc_decode_res(struct svc_rqst *rqstp, u32 *p, struct nlm_res *resp) | ||
363 | { | ||
364 | if (!(p = nlm4_decode_cookie(p, &resp->cookie))) | ||
365 | return 0; | ||
366 | resp->status = ntohl(*p++); | ||
367 | return xdr_argsize_check(rqstp, p); | ||
368 | } | ||
369 | |||
370 | int | ||
371 | nlm4svc_decode_void(struct svc_rqst *rqstp, u32 *p, void *dummy) | ||
372 | { | ||
373 | return xdr_argsize_check(rqstp, p); | ||
374 | } | ||
375 | |||
376 | int | ||
377 | nlm4svc_encode_void(struct svc_rqst *rqstp, u32 *p, void *dummy) | ||
378 | { | ||
379 | return xdr_ressize_check(rqstp, p); | ||
380 | } | ||
381 | |||
382 | /* | ||
383 | * Now, the client side XDR functions | ||
384 | */ | ||
385 | #ifdef NLMCLNT_SUPPORT_SHARES | ||
386 | static int | ||
387 | nlm4clt_decode_void(struct rpc_rqst *req, u32 *p, void *ptr) | ||
388 | { | ||
389 | return 0; | ||
390 | } | ||
391 | #endif | ||
392 | |||
393 | static int | ||
394 | nlm4clt_encode_testargs(struct rpc_rqst *req, u32 *p, nlm_args *argp) | ||
395 | { | ||
396 | struct nlm_lock *lock = &argp->lock; | ||
397 | |||
398 | if (!(p = nlm4_encode_cookie(p, &argp->cookie))) | ||
399 | return -EIO; | ||
400 | *p++ = (lock->fl.fl_type == F_WRLCK)? xdr_one : xdr_zero; | ||
401 | if (!(p = nlm4_encode_lock(p, lock))) | ||
402 | return -EIO; | ||
403 | req->rq_slen = xdr_adjust_iovec(req->rq_svec, p); | ||
404 | return 0; | ||
405 | } | ||
406 | |||
407 | static int | ||
408 | nlm4clt_decode_testres(struct rpc_rqst *req, u32 *p, struct nlm_res *resp) | ||
409 | { | ||
410 | if (!(p = nlm4_decode_cookie(p, &resp->cookie))) | ||
411 | return -EIO; | ||
412 | resp->status = ntohl(*p++); | ||
413 | if (resp->status == NLM_LCK_DENIED) { | ||
414 | struct file_lock *fl = &resp->lock.fl; | ||
415 | u32 excl; | ||
416 | s64 start, end, len; | ||
417 | |||
418 | memset(&resp->lock, 0, sizeof(resp->lock)); | ||
419 | locks_init_lock(fl); | ||
420 | excl = ntohl(*p++); | ||
421 | fl->fl_pid = ntohl(*p++); | ||
422 | if (!(p = nlm4_decode_oh(p, &resp->lock.oh))) | ||
423 | return -EIO; | ||
424 | |||
425 | fl->fl_flags = FL_POSIX; | ||
426 | fl->fl_type = excl? F_WRLCK : F_RDLCK; | ||
427 | p = xdr_decode_hyper(p, &start); | ||
428 | p = xdr_decode_hyper(p, &len); | ||
429 | end = start + len - 1; | ||
430 | |||
431 | fl->fl_start = s64_to_loff_t(start); | ||
432 | if (len == 0 || end < 0) | ||
433 | fl->fl_end = OFFSET_MAX; | ||
434 | else | ||
435 | fl->fl_end = s64_to_loff_t(end); | ||
436 | } | ||
437 | return 0; | ||
438 | } | ||
439 | |||
440 | |||
441 | static int | ||
442 | nlm4clt_encode_lockargs(struct rpc_rqst *req, u32 *p, nlm_args *argp) | ||
443 | { | ||
444 | struct nlm_lock *lock = &argp->lock; | ||
445 | |||
446 | if (!(p = nlm4_encode_cookie(p, &argp->cookie))) | ||
447 | return -EIO; | ||
448 | *p++ = argp->block? xdr_one : xdr_zero; | ||
449 | *p++ = (lock->fl.fl_type == F_WRLCK)? xdr_one : xdr_zero; | ||
450 | if (!(p = nlm4_encode_lock(p, lock))) | ||
451 | return -EIO; | ||
452 | *p++ = argp->reclaim? xdr_one : xdr_zero; | ||
453 | *p++ = htonl(argp->state); | ||
454 | req->rq_slen = xdr_adjust_iovec(req->rq_svec, p); | ||
455 | return 0; | ||
456 | } | ||
457 | |||
458 | static int | ||
459 | nlm4clt_encode_cancargs(struct rpc_rqst *req, u32 *p, nlm_args *argp) | ||
460 | { | ||
461 | struct nlm_lock *lock = &argp->lock; | ||
462 | |||
463 | if (!(p = nlm4_encode_cookie(p, &argp->cookie))) | ||
464 | return -EIO; | ||
465 | *p++ = argp->block? xdr_one : xdr_zero; | ||
466 | *p++ = (lock->fl.fl_type == F_WRLCK)? xdr_one : xdr_zero; | ||
467 | if (!(p = nlm4_encode_lock(p, lock))) | ||
468 | return -EIO; | ||
469 | req->rq_slen = xdr_adjust_iovec(req->rq_svec, p); | ||
470 | return 0; | ||
471 | } | ||
472 | |||
473 | static int | ||
474 | nlm4clt_encode_unlockargs(struct rpc_rqst *req, u32 *p, nlm_args *argp) | ||
475 | { | ||
476 | struct nlm_lock *lock = &argp->lock; | ||
477 | |||
478 | if (!(p = nlm4_encode_cookie(p, &argp->cookie))) | ||
479 | return -EIO; | ||
480 | if (!(p = nlm4_encode_lock(p, lock))) | ||
481 | return -EIO; | ||
482 | req->rq_slen = xdr_adjust_iovec(req->rq_svec, p); | ||
483 | return 0; | ||
484 | } | ||
485 | |||
486 | static int | ||
487 | nlm4clt_encode_res(struct rpc_rqst *req, u32 *p, struct nlm_res *resp) | ||
488 | { | ||
489 | if (!(p = nlm4_encode_cookie(p, &resp->cookie))) | ||
490 | return -EIO; | ||
491 | *p++ = resp->status; | ||
492 | req->rq_slen = xdr_adjust_iovec(req->rq_svec, p); | ||
493 | return 0; | ||
494 | } | ||
495 | |||
496 | static int | ||
497 | nlm4clt_encode_testres(struct rpc_rqst *req, u32 *p, struct nlm_res *resp) | ||
498 | { | ||
499 | if (!(p = nlm4_encode_testres(p, resp))) | ||
500 | return -EIO; | ||
501 | req->rq_slen = xdr_adjust_iovec(req->rq_svec, p); | ||
502 | return 0; | ||
503 | } | ||
504 | |||
505 | static int | ||
506 | nlm4clt_decode_res(struct rpc_rqst *req, u32 *p, struct nlm_res *resp) | ||
507 | { | ||
508 | if (!(p = nlm4_decode_cookie(p, &resp->cookie))) | ||
509 | return -EIO; | ||
510 | resp->status = ntohl(*p++); | ||
511 | return 0; | ||
512 | } | ||
513 | |||
514 | /* | ||
515 | * Buffer requirements for NLM | ||
516 | */ | ||
517 | #define NLM4_void_sz 0 | ||
518 | #define NLM4_cookie_sz 1+XDR_QUADLEN(NLM_MAXCOOKIELEN) | ||
519 | #define NLM4_caller_sz 1+XDR_QUADLEN(NLM_MAXSTRLEN) | ||
520 | #define NLM4_netobj_sz 1+XDR_QUADLEN(XDR_MAX_NETOBJ) | ||
521 | /* #define NLM4_owner_sz 1+XDR_QUADLEN(NLM4_MAXOWNER) */ | ||
522 | #define NLM4_fhandle_sz 1+XDR_QUADLEN(NFS3_FHSIZE) | ||
523 | #define NLM4_lock_sz 5+NLM4_caller_sz+NLM4_netobj_sz+NLM4_fhandle_sz | ||
524 | #define NLM4_holder_sz 6+NLM4_netobj_sz | ||
525 | |||
526 | #define NLM4_testargs_sz NLM4_cookie_sz+1+NLM4_lock_sz | ||
527 | #define NLM4_lockargs_sz NLM4_cookie_sz+4+NLM4_lock_sz | ||
528 | #define NLM4_cancargs_sz NLM4_cookie_sz+2+NLM4_lock_sz | ||
529 | #define NLM4_unlockargs_sz NLM4_cookie_sz+NLM4_lock_sz | ||
530 | |||
531 | #define NLM4_testres_sz NLM4_cookie_sz+1+NLM4_holder_sz | ||
532 | #define NLM4_res_sz NLM4_cookie_sz+1 | ||
533 | #define NLM4_norep_sz 0 | ||
534 | |||
535 | #ifndef MAX | ||
536 | # define MAX(a,b) (((a) > (b))? (a) : (b)) | ||
537 | #endif | ||
538 | |||
539 | /* | ||
540 | * For NLM, a void procedure really returns nothing | ||
541 | */ | ||
542 | #define nlm4clt_decode_norep NULL | ||
543 | |||
544 | #define PROC(proc, argtype, restype) \ | ||
545 | [NLMPROC_##proc] = { \ | ||
546 | .p_proc = NLMPROC_##proc, \ | ||
547 | .p_encode = (kxdrproc_t) nlm4clt_encode_##argtype, \ | ||
548 | .p_decode = (kxdrproc_t) nlm4clt_decode_##restype, \ | ||
549 | .p_bufsiz = MAX(NLM4_##argtype##_sz, NLM4_##restype##_sz) << 2 \ | ||
550 | } | ||
551 | |||
552 | static struct rpc_procinfo nlm4_procedures[] = { | ||
553 | PROC(TEST, testargs, testres), | ||
554 | PROC(LOCK, lockargs, res), | ||
555 | PROC(CANCEL, cancargs, res), | ||
556 | PROC(UNLOCK, unlockargs, res), | ||
557 | PROC(GRANTED, testargs, res), | ||
558 | PROC(TEST_MSG, testargs, norep), | ||
559 | PROC(LOCK_MSG, lockargs, norep), | ||
560 | PROC(CANCEL_MSG, cancargs, norep), | ||
561 | PROC(UNLOCK_MSG, unlockargs, norep), | ||
562 | PROC(GRANTED_MSG, testargs, norep), | ||
563 | PROC(TEST_RES, testres, norep), | ||
564 | PROC(LOCK_RES, res, norep), | ||
565 | PROC(CANCEL_RES, res, norep), | ||
566 | PROC(UNLOCK_RES, res, norep), | ||
567 | PROC(GRANTED_RES, res, norep), | ||
568 | #ifdef NLMCLNT_SUPPORT_SHARES | ||
569 | PROC(SHARE, shareargs, shareres), | ||
570 | PROC(UNSHARE, shareargs, shareres), | ||
571 | PROC(NM_LOCK, lockargs, res), | ||
572 | PROC(FREE_ALL, notify, void), | ||
573 | #endif | ||
574 | }; | ||
575 | |||
576 | struct rpc_version nlm_version4 = { | ||
577 | .number = 4, | ||
578 | .nrprocs = 24, | ||
579 | .procs = nlm4_procedures, | ||
580 | }; | ||