diff options
author | Linus Torvalds <torvalds@g5.osdl.org> | 2006-03-25 12:18:27 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@g5.osdl.org> | 2006-03-25 12:18:27 -0500 |
commit | 53846a21c1766326bb14ce8ab6e997a0c120675d (patch) | |
tree | 37b04485e29844b4e734479181276a2f4d2447e4 /fs/lockd | |
parent | 2e9abdd9bad485970b37cd53a82f92702054984c (diff) | |
parent | 1ebbe2b20091d306453a5cf480a87e6cd28ae76f (diff) |
Merge git://git.linux-nfs.org/pub/linux/nfs-2.6
* git://git.linux-nfs.org/pub/linux/nfs-2.6: (103 commits)
SUNRPC,RPCSEC_GSS: spkm3--fix config dependencies
SUNRPC,RPCSEC_GSS: spkm3: import contexts using NID_cast5_cbc
LOCKD: Make nlmsvc_traverse_shares return void
LOCKD: nlmsvc_traverse_blocks return is unused
SUNRPC,RPCSEC_GSS: fix krb5 sequence numbers.
NFSv4: Dont list system.nfs4_acl for filesystems that don't support it.
SUNRPC,RPCSEC_GSS: remove unnecessary kmalloc of a checksum
SUNRPC: Ensure rpc_call_async() always calls tk_ops->rpc_release()
SUNRPC: Fix memory barriers for req->rq_received
NFS: Fix a race in nfs_sync_inode()
NFS: Clean up nfs_flush_list()
NFS: Fix a race with PG_private and nfs_release_page()
NFSv4: Ensure the callback daemon flushes signals
SUNRPC: Fix a 'Busy inodes' error in rpc_pipefs
NFS, NLM: Allow blocking locks to respect signals
NFS: Make nfs_fhget() return appropriate error values
NFSv4: Fix an oops in nfs4_fill_super
lockd: blocks should hold a reference to the nlm_file
NFSv4: SETCLIENTID_CONFIRM should handle NFS4ERR_DELAY/NFS4ERR_RESOURCE
NFSv4: Send the delegation stateid for SETATTR calls
...
Diffstat (limited to 'fs/lockd')
-rw-r--r-- | fs/lockd/clntlock.c | 112 | ||||
-rw-r--r-- | fs/lockd/clntproc.c | 317 | ||||
-rw-r--r-- | fs/lockd/host.c | 12 | ||||
-rw-r--r-- | fs/lockd/mon.c | 11 | ||||
-rw-r--r-- | fs/lockd/svc4proc.c | 157 | ||||
-rw-r--r-- | fs/lockd/svclock.c | 349 | ||||
-rw-r--r-- | fs/lockd/svcproc.c | 151 | ||||
-rw-r--r-- | fs/lockd/svcshare.c | 4 | ||||
-rw-r--r-- | fs/lockd/svcsubs.c | 7 | ||||
-rw-r--r-- | fs/lockd/xdr.c | 17 | ||||
-rw-r--r-- | fs/lockd/xdr4.c | 21 |
11 files changed, 505 insertions, 653 deletions
diff --git a/fs/lockd/clntlock.c b/fs/lockd/clntlock.c index da6354baa0b8..bce744468708 100644 --- a/fs/lockd/clntlock.c +++ b/fs/lockd/clntlock.c | |||
@@ -44,32 +44,25 @@ static LIST_HEAD(nlm_blocked); | |||
44 | /* | 44 | /* |
45 | * Queue up a lock for blocking so that the GRANTED request can see it | 45 | * Queue up a lock for blocking so that the GRANTED request can see it |
46 | */ | 46 | */ |
47 | int nlmclnt_prepare_block(struct nlm_rqst *req, struct nlm_host *host, struct file_lock *fl) | 47 | struct nlm_wait *nlmclnt_prepare_block(struct nlm_host *host, struct file_lock *fl) |
48 | { | 48 | { |
49 | struct nlm_wait *block; | 49 | struct nlm_wait *block; |
50 | 50 | ||
51 | BUG_ON(req->a_block != NULL); | ||
52 | block = kmalloc(sizeof(*block), GFP_KERNEL); | 51 | block = kmalloc(sizeof(*block), GFP_KERNEL); |
53 | if (block == NULL) | 52 | if (block != NULL) { |
54 | return -ENOMEM; | 53 | block->b_host = host; |
55 | block->b_host = host; | 54 | block->b_lock = fl; |
56 | block->b_lock = fl; | 55 | init_waitqueue_head(&block->b_wait); |
57 | init_waitqueue_head(&block->b_wait); | 56 | block->b_status = NLM_LCK_BLOCKED; |
58 | block->b_status = NLM_LCK_BLOCKED; | 57 | list_add(&block->b_list, &nlm_blocked); |
59 | 58 | } | |
60 | list_add(&block->b_list, &nlm_blocked); | 59 | return block; |
61 | req->a_block = block; | ||
62 | |||
63 | return 0; | ||
64 | } | 60 | } |
65 | 61 | ||
66 | void nlmclnt_finish_block(struct nlm_rqst *req) | 62 | void nlmclnt_finish_block(struct nlm_wait *block) |
67 | { | 63 | { |
68 | struct nlm_wait *block = req->a_block; | ||
69 | |||
70 | if (block == NULL) | 64 | if (block == NULL) |
71 | return; | 65 | return; |
72 | req->a_block = NULL; | ||
73 | list_del(&block->b_list); | 66 | list_del(&block->b_list); |
74 | kfree(block); | 67 | kfree(block); |
75 | } | 68 | } |
@@ -77,15 +70,14 @@ void nlmclnt_finish_block(struct nlm_rqst *req) | |||
77 | /* | 70 | /* |
78 | * Block on a lock | 71 | * Block on a lock |
79 | */ | 72 | */ |
80 | long nlmclnt_block(struct nlm_rqst *req, long timeout) | 73 | int nlmclnt_block(struct nlm_wait *block, struct nlm_rqst *req, long timeout) |
81 | { | 74 | { |
82 | struct nlm_wait *block = req->a_block; | ||
83 | long ret; | 75 | long ret; |
84 | 76 | ||
85 | /* A borken server might ask us to block even if we didn't | 77 | /* A borken server might ask us to block even if we didn't |
86 | * request it. Just say no! | 78 | * request it. Just say no! |
87 | */ | 79 | */ |
88 | if (!req->a_args.block) | 80 | if (block == NULL) |
89 | return -EAGAIN; | 81 | return -EAGAIN; |
90 | 82 | ||
91 | /* Go to sleep waiting for GRANT callback. Some servers seem | 83 | /* Go to sleep waiting for GRANT callback. Some servers seem |
@@ -99,13 +91,10 @@ long nlmclnt_block(struct nlm_rqst *req, long timeout) | |||
99 | ret = wait_event_interruptible_timeout(block->b_wait, | 91 | ret = wait_event_interruptible_timeout(block->b_wait, |
100 | block->b_status != NLM_LCK_BLOCKED, | 92 | block->b_status != NLM_LCK_BLOCKED, |
101 | timeout); | 93 | timeout); |
102 | 94 | if (ret < 0) | |
103 | if (block->b_status != NLM_LCK_BLOCKED) { | 95 | return -ERESTARTSYS; |
104 | req->a_res.status = block->b_status; | 96 | req->a_res.status = block->b_status; |
105 | block->b_status = NLM_LCK_BLOCKED; | 97 | return 0; |
106 | } | ||
107 | |||
108 | return ret; | ||
109 | } | 98 | } |
110 | 99 | ||
111 | /* | 100 | /* |
@@ -125,7 +114,15 @@ u32 nlmclnt_grant(const struct sockaddr_in *addr, const struct nlm_lock *lock) | |||
125 | list_for_each_entry(block, &nlm_blocked, b_list) { | 114 | list_for_each_entry(block, &nlm_blocked, b_list) { |
126 | struct file_lock *fl_blocked = block->b_lock; | 115 | struct file_lock *fl_blocked = block->b_lock; |
127 | 116 | ||
128 | if (!nlm_compare_locks(fl_blocked, fl)) | 117 | if (fl_blocked->fl_start != fl->fl_start) |
118 | continue; | ||
119 | if (fl_blocked->fl_end != fl->fl_end) | ||
120 | continue; | ||
121 | /* | ||
122 | * Careful! The NLM server will return the 32-bit "pid" that | ||
123 | * we put on the wire: in this case the lockowner "pid". | ||
124 | */ | ||
125 | if (fl_blocked->fl_u.nfs_fl.owner->pid != lock->svid) | ||
129 | continue; | 126 | continue; |
130 | if (!nlm_cmp_addr(&block->b_host->h_addr, addr)) | 127 | if (!nlm_cmp_addr(&block->b_host->h_addr, addr)) |
131 | continue; | 128 | continue; |
@@ -147,34 +144,6 @@ u32 nlmclnt_grant(const struct sockaddr_in *addr, const struct nlm_lock *lock) | |||
147 | */ | 144 | */ |
148 | 145 | ||
149 | /* | 146 | /* |
150 | * Mark the locks for reclaiming. | ||
151 | * FIXME: In 2.5 we don't want to iterate through any global file_lock_list. | ||
152 | * Maintain NLM lock reclaiming lists in the nlm_host instead. | ||
153 | */ | ||
154 | static | ||
155 | void nlmclnt_mark_reclaim(struct nlm_host *host) | ||
156 | { | ||
157 | struct file_lock *fl; | ||
158 | struct inode *inode; | ||
159 | struct list_head *tmp; | ||
160 | |||
161 | list_for_each(tmp, &file_lock_list) { | ||
162 | fl = list_entry(tmp, struct file_lock, fl_link); | ||
163 | |||
164 | inode = fl->fl_file->f_dentry->d_inode; | ||
165 | if (inode->i_sb->s_magic != NFS_SUPER_MAGIC) | ||
166 | continue; | ||
167 | if (fl->fl_u.nfs_fl.owner == NULL) | ||
168 | continue; | ||
169 | if (fl->fl_u.nfs_fl.owner->host != host) | ||
170 | continue; | ||
171 | if (!(fl->fl_u.nfs_fl.flags & NFS_LCK_GRANTED)) | ||
172 | continue; | ||
173 | fl->fl_u.nfs_fl.flags |= NFS_LCK_RECLAIM; | ||
174 | } | ||
175 | } | ||
176 | |||
177 | /* | ||
178 | * Someone has sent us an SM_NOTIFY. Ensure we bind to the new port number, | 147 | * Someone has sent us an SM_NOTIFY. Ensure we bind to the new port number, |
179 | * that we mark locks for reclaiming, and that we bump the pseudo NSM state. | 148 | * that we mark locks for reclaiming, and that we bump the pseudo NSM state. |
180 | */ | 149 | */ |
@@ -186,7 +155,12 @@ void nlmclnt_prepare_reclaim(struct nlm_host *host, u32 newstate) | |||
186 | host->h_state++; | 155 | host->h_state++; |
187 | host->h_nextrebind = 0; | 156 | host->h_nextrebind = 0; |
188 | nlm_rebind_host(host); | 157 | nlm_rebind_host(host); |
189 | nlmclnt_mark_reclaim(host); | 158 | |
159 | /* | ||
160 | * Mark the locks for reclaiming. | ||
161 | */ | ||
162 | list_splice_init(&host->h_granted, &host->h_reclaim); | ||
163 | |||
190 | dprintk("NLM: reclaiming locks for host %s", host->h_name); | 164 | dprintk("NLM: reclaiming locks for host %s", host->h_name); |
191 | } | 165 | } |
192 | 166 | ||
@@ -215,9 +189,7 @@ reclaimer(void *ptr) | |||
215 | { | 189 | { |
216 | struct nlm_host *host = (struct nlm_host *) ptr; | 190 | struct nlm_host *host = (struct nlm_host *) ptr; |
217 | struct nlm_wait *block; | 191 | struct nlm_wait *block; |
218 | struct list_head *tmp; | 192 | struct file_lock *fl, *next; |
219 | struct file_lock *fl; | ||
220 | struct inode *inode; | ||
221 | 193 | ||
222 | daemonize("%s-reclaim", host->h_name); | 194 | daemonize("%s-reclaim", host->h_name); |
223 | allow_signal(SIGKILL); | 195 | allow_signal(SIGKILL); |
@@ -229,23 +201,13 @@ reclaimer(void *ptr) | |||
229 | 201 | ||
230 | /* First, reclaim all locks that have been marked. */ | 202 | /* First, reclaim all locks that have been marked. */ |
231 | restart: | 203 | restart: |
232 | list_for_each(tmp, &file_lock_list) { | 204 | list_for_each_entry_safe(fl, next, &host->h_reclaim, fl_u.nfs_fl.list) { |
233 | fl = list_entry(tmp, struct file_lock, fl_link); | 205 | list_del_init(&fl->fl_u.nfs_fl.list); |
234 | 206 | ||
235 | inode = fl->fl_file->f_dentry->d_inode; | ||
236 | if (inode->i_sb->s_magic != NFS_SUPER_MAGIC) | ||
237 | continue; | ||
238 | if (fl->fl_u.nfs_fl.owner == NULL) | ||
239 | continue; | ||
240 | if (fl->fl_u.nfs_fl.owner->host != host) | ||
241 | continue; | ||
242 | if (!(fl->fl_u.nfs_fl.flags & NFS_LCK_RECLAIM)) | ||
243 | continue; | ||
244 | |||
245 | fl->fl_u.nfs_fl.flags &= ~NFS_LCK_RECLAIM; | ||
246 | nlmclnt_reclaim(host, fl); | ||
247 | if (signalled()) | 207 | if (signalled()) |
248 | break; | 208 | continue; |
209 | if (nlmclnt_reclaim(host, fl) == 0) | ||
210 | list_add_tail(&fl->fl_u.nfs_fl.list, &host->h_granted); | ||
249 | goto restart; | 211 | goto restart; |
250 | } | 212 | } |
251 | 213 | ||
diff --git a/fs/lockd/clntproc.c b/fs/lockd/clntproc.c index 970b6a6aa337..f96e38155b5c 100644 --- a/fs/lockd/clntproc.c +++ b/fs/lockd/clntproc.c | |||
@@ -132,59 +132,18 @@ static void nlmclnt_setlockargs(struct nlm_rqst *req, struct file_lock *fl) | |||
132 | memcpy(&lock->fh, NFS_FH(fl->fl_file->f_dentry->d_inode), sizeof(struct nfs_fh)); | 132 | memcpy(&lock->fh, NFS_FH(fl->fl_file->f_dentry->d_inode), sizeof(struct nfs_fh)); |
133 | lock->caller = system_utsname.nodename; | 133 | lock->caller = system_utsname.nodename; |
134 | lock->oh.data = req->a_owner; | 134 | lock->oh.data = req->a_owner; |
135 | lock->oh.len = sprintf(req->a_owner, "%d@%s", | 135 | lock->oh.len = snprintf(req->a_owner, sizeof(req->a_owner), "%u@%s", |
136 | current->pid, system_utsname.nodename); | 136 | (unsigned int)fl->fl_u.nfs_fl.owner->pid, |
137 | locks_copy_lock(&lock->fl, fl); | 137 | system_utsname.nodename); |
138 | lock->svid = fl->fl_u.nfs_fl.owner->pid; | ||
139 | lock->fl.fl_start = fl->fl_start; | ||
140 | lock->fl.fl_end = fl->fl_end; | ||
141 | lock->fl.fl_type = fl->fl_type; | ||
138 | } | 142 | } |
139 | 143 | ||
140 | static void nlmclnt_release_lockargs(struct nlm_rqst *req) | 144 | static void nlmclnt_release_lockargs(struct nlm_rqst *req) |
141 | { | 145 | { |
142 | struct file_lock *fl = &req->a_args.lock.fl; | 146 | BUG_ON(req->a_args.lock.fl.fl_ops != NULL); |
143 | |||
144 | if (fl->fl_ops && fl->fl_ops->fl_release_private) | ||
145 | fl->fl_ops->fl_release_private(fl); | ||
146 | } | ||
147 | |||
148 | /* | ||
149 | * Initialize arguments for GRANTED call. The nlm_rqst structure | ||
150 | * has been cleared already. | ||
151 | */ | ||
152 | int | ||
153 | nlmclnt_setgrantargs(struct nlm_rqst *call, struct nlm_lock *lock) | ||
154 | { | ||
155 | locks_copy_lock(&call->a_args.lock.fl, &lock->fl); | ||
156 | memcpy(&call->a_args.lock.fh, &lock->fh, sizeof(call->a_args.lock.fh)); | ||
157 | call->a_args.lock.caller = system_utsname.nodename; | ||
158 | call->a_args.lock.oh.len = lock->oh.len; | ||
159 | |||
160 | /* set default data area */ | ||
161 | call->a_args.lock.oh.data = call->a_owner; | ||
162 | |||
163 | if (lock->oh.len > NLMCLNT_OHSIZE) { | ||
164 | void *data = kmalloc(lock->oh.len, GFP_KERNEL); | ||
165 | if (!data) { | ||
166 | nlmclnt_freegrantargs(call); | ||
167 | return 0; | ||
168 | } | ||
169 | call->a_args.lock.oh.data = (u8 *) data; | ||
170 | } | ||
171 | |||
172 | memcpy(call->a_args.lock.oh.data, lock->oh.data, lock->oh.len); | ||
173 | return 1; | ||
174 | } | ||
175 | |||
176 | void | ||
177 | nlmclnt_freegrantargs(struct nlm_rqst *call) | ||
178 | { | ||
179 | struct file_lock *fl = &call->a_args.lock.fl; | ||
180 | /* | ||
181 | * Check whether we allocated memory for the owner. | ||
182 | */ | ||
183 | if (call->a_args.lock.oh.data != (u8 *) call->a_owner) { | ||
184 | kfree(call->a_args.lock.oh.data); | ||
185 | } | ||
186 | if (fl->fl_ops && fl->fl_ops->fl_release_private) | ||
187 | fl->fl_ops->fl_release_private(fl); | ||
188 | } | 147 | } |
189 | 148 | ||
190 | /* | 149 | /* |
@@ -193,9 +152,8 @@ nlmclnt_freegrantargs(struct nlm_rqst *call) | |||
193 | int | 152 | int |
194 | nlmclnt_proc(struct inode *inode, int cmd, struct file_lock *fl) | 153 | nlmclnt_proc(struct inode *inode, int cmd, struct file_lock *fl) |
195 | { | 154 | { |
196 | struct nfs_server *nfssrv = NFS_SERVER(inode); | ||
197 | struct nlm_host *host; | 155 | struct nlm_host *host; |
198 | struct nlm_rqst reqst, *call = &reqst; | 156 | struct nlm_rqst *call; |
199 | sigset_t oldset; | 157 | sigset_t oldset; |
200 | unsigned long flags; | 158 | unsigned long flags; |
201 | int status, proto, vers; | 159 | int status, proto, vers; |
@@ -209,23 +167,17 @@ nlmclnt_proc(struct inode *inode, int cmd, struct file_lock *fl) | |||
209 | /* Retrieve transport protocol from NFS client */ | 167 | /* Retrieve transport protocol from NFS client */ |
210 | proto = NFS_CLIENT(inode)->cl_xprt->prot; | 168 | proto = NFS_CLIENT(inode)->cl_xprt->prot; |
211 | 169 | ||
212 | if (!(host = nlmclnt_lookup_host(NFS_ADDR(inode), proto, vers))) | 170 | host = nlmclnt_lookup_host(NFS_ADDR(inode), proto, vers); |
171 | if (host == NULL) | ||
213 | return -ENOLCK; | 172 | return -ENOLCK; |
214 | 173 | ||
215 | /* Create RPC client handle if not there, and copy soft | 174 | call = nlm_alloc_call(host); |
216 | * and intr flags from NFS client. */ | 175 | if (call == NULL) |
217 | if (host->h_rpcclnt == NULL) { | 176 | return -ENOMEM; |
218 | struct rpc_clnt *clnt; | ||
219 | 177 | ||
220 | /* Bind an rpc client to this host handle (does not | 178 | nlmclnt_locks_init_private(fl, host); |
221 | * perform a portmapper lookup) */ | 179 | /* Set up the argument struct */ |
222 | if (!(clnt = nlm_bind_host(host))) { | 180 | nlmclnt_setlockargs(call, fl); |
223 | status = -ENOLCK; | ||
224 | goto done; | ||
225 | } | ||
226 | clnt->cl_softrtry = nfssrv->client->cl_softrtry; | ||
227 | clnt->cl_intr = nfssrv->client->cl_intr; | ||
228 | } | ||
229 | 181 | ||
230 | /* Keep the old signal mask */ | 182 | /* Keep the old signal mask */ |
231 | spin_lock_irqsave(¤t->sighand->siglock, flags); | 183 | spin_lock_irqsave(¤t->sighand->siglock, flags); |
@@ -238,26 +190,10 @@ nlmclnt_proc(struct inode *inode, int cmd, struct file_lock *fl) | |||
238 | && (current->flags & PF_EXITING)) { | 190 | && (current->flags & PF_EXITING)) { |
239 | sigfillset(¤t->blocked); /* Mask all signals */ | 191 | sigfillset(¤t->blocked); /* Mask all signals */ |
240 | recalc_sigpending(); | 192 | recalc_sigpending(); |
241 | spin_unlock_irqrestore(¤t->sighand->siglock, flags); | ||
242 | 193 | ||
243 | call = nlmclnt_alloc_call(); | ||
244 | if (!call) { | ||
245 | status = -ENOMEM; | ||
246 | goto out_restore; | ||
247 | } | ||
248 | call->a_flags = RPC_TASK_ASYNC; | 194 | call->a_flags = RPC_TASK_ASYNC; |
249 | } else { | ||
250 | spin_unlock_irqrestore(¤t->sighand->siglock, flags); | ||
251 | memset(call, 0, sizeof(*call)); | ||
252 | locks_init_lock(&call->a_args.lock.fl); | ||
253 | locks_init_lock(&call->a_res.lock.fl); | ||
254 | } | 195 | } |
255 | call->a_host = host; | 196 | spin_unlock_irqrestore(¤t->sighand->siglock, flags); |
256 | |||
257 | nlmclnt_locks_init_private(fl, host); | ||
258 | |||
259 | /* Set up the argument struct */ | ||
260 | nlmclnt_setlockargs(call, fl); | ||
261 | 197 | ||
262 | if (IS_SETLK(cmd) || IS_SETLKW(cmd)) { | 198 | if (IS_SETLK(cmd) || IS_SETLKW(cmd)) { |
263 | if (fl->fl_type != F_UNLCK) { | 199 | if (fl->fl_type != F_UNLCK) { |
@@ -270,41 +206,58 @@ nlmclnt_proc(struct inode *inode, int cmd, struct file_lock *fl) | |||
270 | else | 206 | else |
271 | status = -EINVAL; | 207 | status = -EINVAL; |
272 | 208 | ||
273 | out_restore: | 209 | fl->fl_ops->fl_release_private(fl); |
210 | fl->fl_ops = NULL; | ||
211 | |||
274 | spin_lock_irqsave(¤t->sighand->siglock, flags); | 212 | spin_lock_irqsave(¤t->sighand->siglock, flags); |
275 | current->blocked = oldset; | 213 | current->blocked = oldset; |
276 | recalc_sigpending(); | 214 | recalc_sigpending(); |
277 | spin_unlock_irqrestore(¤t->sighand->siglock, flags); | 215 | spin_unlock_irqrestore(¤t->sighand->siglock, flags); |
278 | 216 | ||
279 | done: | ||
280 | dprintk("lockd: clnt proc returns %d\n", status); | 217 | dprintk("lockd: clnt proc returns %d\n", status); |
281 | nlm_release_host(host); | ||
282 | return status; | 218 | return status; |
283 | } | 219 | } |
284 | EXPORT_SYMBOL(nlmclnt_proc); | 220 | EXPORT_SYMBOL(nlmclnt_proc); |
285 | 221 | ||
286 | /* | 222 | /* |
287 | * Allocate an NLM RPC call struct | 223 | * Allocate an NLM RPC call struct |
224 | * | ||
225 | * Note: the caller must hold a reference to host. In case of failure, | ||
226 | * this reference will be released. | ||
288 | */ | 227 | */ |
289 | struct nlm_rqst * | 228 | struct nlm_rqst *nlm_alloc_call(struct nlm_host *host) |
290 | nlmclnt_alloc_call(void) | ||
291 | { | 229 | { |
292 | struct nlm_rqst *call; | 230 | struct nlm_rqst *call; |
293 | 231 | ||
294 | while (!signalled()) { | 232 | for(;;) { |
295 | call = (struct nlm_rqst *) kmalloc(sizeof(struct nlm_rqst), GFP_KERNEL); | 233 | call = kzalloc(sizeof(*call), GFP_KERNEL); |
296 | if (call) { | 234 | if (call != NULL) { |
297 | memset(call, 0, sizeof(*call)); | ||
298 | locks_init_lock(&call->a_args.lock.fl); | 235 | locks_init_lock(&call->a_args.lock.fl); |
299 | locks_init_lock(&call->a_res.lock.fl); | 236 | locks_init_lock(&call->a_res.lock.fl); |
237 | call->a_host = host; | ||
300 | return call; | 238 | return call; |
301 | } | 239 | } |
302 | printk("nlmclnt_alloc_call: failed, waiting for memory\n"); | 240 | if (signalled()) |
241 | break; | ||
242 | printk("nlm_alloc_call: failed, waiting for memory\n"); | ||
303 | schedule_timeout_interruptible(5*HZ); | 243 | schedule_timeout_interruptible(5*HZ); |
304 | } | 244 | } |
245 | nlm_release_host(host); | ||
305 | return NULL; | 246 | return NULL; |
306 | } | 247 | } |
307 | 248 | ||
249 | void nlm_release_call(struct nlm_rqst *call) | ||
250 | { | ||
251 | nlm_release_host(call->a_host); | ||
252 | nlmclnt_release_lockargs(call); | ||
253 | kfree(call); | ||
254 | } | ||
255 | |||
256 | static void nlmclnt_rpc_release(void *data) | ||
257 | { | ||
258 | return nlm_release_call(data); | ||
259 | } | ||
260 | |||
308 | static int nlm_wait_on_grace(wait_queue_head_t *queue) | 261 | static int nlm_wait_on_grace(wait_queue_head_t *queue) |
309 | { | 262 | { |
310 | DEFINE_WAIT(wait); | 263 | DEFINE_WAIT(wait); |
@@ -401,57 +354,45 @@ in_grace_period: | |||
401 | /* | 354 | /* |
402 | * Generic NLM call, async version. | 355 | * Generic NLM call, async version. |
403 | */ | 356 | */ |
404 | int nlmsvc_async_call(struct nlm_rqst *req, u32 proc, const struct rpc_call_ops *tk_ops) | 357 | static int __nlm_async_call(struct nlm_rqst *req, u32 proc, struct rpc_message *msg, const struct rpc_call_ops *tk_ops) |
405 | { | 358 | { |
406 | struct nlm_host *host = req->a_host; | 359 | struct nlm_host *host = req->a_host; |
407 | struct rpc_clnt *clnt; | 360 | struct rpc_clnt *clnt; |
408 | struct rpc_message msg = { | 361 | int status = -ENOLCK; |
409 | .rpc_argp = &req->a_args, | ||
410 | .rpc_resp = &req->a_res, | ||
411 | }; | ||
412 | int status; | ||
413 | 362 | ||
414 | dprintk("lockd: call procedure %d on %s (async)\n", | 363 | dprintk("lockd: call procedure %d on %s (async)\n", |
415 | (int)proc, host->h_name); | 364 | (int)proc, host->h_name); |
416 | 365 | ||
417 | /* If we have no RPC client yet, create one. */ | 366 | /* If we have no RPC client yet, create one. */ |
418 | if ((clnt = nlm_bind_host(host)) == NULL) | 367 | clnt = nlm_bind_host(host); |
419 | return -ENOLCK; | 368 | if (clnt == NULL) |
420 | msg.rpc_proc = &clnt->cl_procinfo[proc]; | 369 | goto out_err; |
370 | msg->rpc_proc = &clnt->cl_procinfo[proc]; | ||
421 | 371 | ||
422 | /* bootstrap and kick off the async RPC call */ | 372 | /* bootstrap and kick off the async RPC call */ |
423 | status = rpc_call_async(clnt, &msg, RPC_TASK_ASYNC, tk_ops, req); | 373 | status = rpc_call_async(clnt, msg, RPC_TASK_ASYNC, tk_ops, req); |
424 | 374 | if (status == 0) | |
375 | return 0; | ||
376 | out_err: | ||
377 | nlm_release_call(req); | ||
425 | return status; | 378 | return status; |
426 | } | 379 | } |
427 | 380 | ||
428 | static int nlmclnt_async_call(struct nlm_rqst *req, u32 proc, const struct rpc_call_ops *tk_ops) | 381 | int nlm_async_call(struct nlm_rqst *req, u32 proc, const struct rpc_call_ops *tk_ops) |
429 | { | 382 | { |
430 | struct nlm_host *host = req->a_host; | ||
431 | struct rpc_clnt *clnt; | ||
432 | struct nlm_args *argp = &req->a_args; | ||
433 | struct nlm_res *resp = &req->a_res; | ||
434 | struct rpc_message msg = { | 383 | struct rpc_message msg = { |
435 | .rpc_argp = argp, | 384 | .rpc_argp = &req->a_args, |
436 | .rpc_resp = resp, | 385 | .rpc_resp = &req->a_res, |
437 | }; | 386 | }; |
438 | int status; | 387 | return __nlm_async_call(req, proc, &msg, tk_ops); |
439 | 388 | } | |
440 | dprintk("lockd: call procedure %d on %s (async)\n", | ||
441 | (int)proc, host->h_name); | ||
442 | |||
443 | /* If we have no RPC client yet, create one. */ | ||
444 | if ((clnt = nlm_bind_host(host)) == NULL) | ||
445 | return -ENOLCK; | ||
446 | msg.rpc_proc = &clnt->cl_procinfo[proc]; | ||
447 | 389 | ||
448 | /* Increment host refcount */ | 390 | int nlm_async_reply(struct nlm_rqst *req, u32 proc, const struct rpc_call_ops *tk_ops) |
449 | nlm_get_host(host); | 391 | { |
450 | /* bootstrap and kick off the async RPC call */ | 392 | struct rpc_message msg = { |
451 | status = rpc_call_async(clnt, &msg, RPC_TASK_ASYNC, tk_ops, req); | 393 | .rpc_argp = &req->a_res, |
452 | if (status < 0) | 394 | }; |
453 | nlm_release_host(host); | 395 | return __nlm_async_call(req, proc, &msg, tk_ops); |
454 | return status; | ||
455 | } | 396 | } |
456 | 397 | ||
457 | /* | 398 | /* |
@@ -463,36 +404,41 @@ nlmclnt_test(struct nlm_rqst *req, struct file_lock *fl) | |||
463 | int status; | 404 | int status; |
464 | 405 | ||
465 | status = nlmclnt_call(req, NLMPROC_TEST); | 406 | status = nlmclnt_call(req, NLMPROC_TEST); |
466 | nlmclnt_release_lockargs(req); | ||
467 | if (status < 0) | 407 | if (status < 0) |
468 | return status; | 408 | goto out; |
469 | 409 | ||
470 | status = req->a_res.status; | 410 | switch (req->a_res.status) { |
471 | if (status == NLM_LCK_GRANTED) { | 411 | case NLM_LCK_GRANTED: |
472 | fl->fl_type = F_UNLCK; | 412 | fl->fl_type = F_UNLCK; |
473 | } if (status == NLM_LCK_DENIED) { | 413 | break; |
474 | /* | 414 | case NLM_LCK_DENIED: |
475 | * Report the conflicting lock back to the application. | 415 | /* |
476 | */ | 416 | * Report the conflicting lock back to the application. |
477 | locks_copy_lock(fl, &req->a_res.lock.fl); | 417 | */ |
478 | fl->fl_pid = 0; | 418 | fl->fl_start = req->a_res.lock.fl.fl_start; |
479 | } else { | 419 | fl->fl_end = req->a_res.lock.fl.fl_start; |
480 | return nlm_stat_to_errno(req->a_res.status); | 420 | fl->fl_type = req->a_res.lock.fl.fl_type; |
421 | fl->fl_pid = 0; | ||
422 | break; | ||
423 | default: | ||
424 | status = nlm_stat_to_errno(req->a_res.status); | ||
481 | } | 425 | } |
482 | 426 | out: | |
483 | return 0; | 427 | nlm_release_call(req); |
428 | return status; | ||
484 | } | 429 | } |
485 | 430 | ||
486 | static void nlmclnt_locks_copy_lock(struct file_lock *new, struct file_lock *fl) | 431 | static void nlmclnt_locks_copy_lock(struct file_lock *new, struct file_lock *fl) |
487 | { | 432 | { |
488 | memcpy(&new->fl_u.nfs_fl, &fl->fl_u.nfs_fl, sizeof(new->fl_u.nfs_fl)); | 433 | new->fl_u.nfs_fl.state = fl->fl_u.nfs_fl.state; |
489 | nlm_get_lockowner(new->fl_u.nfs_fl.owner); | 434 | new->fl_u.nfs_fl.owner = nlm_get_lockowner(fl->fl_u.nfs_fl.owner); |
435 | list_add_tail(&new->fl_u.nfs_fl.list, &fl->fl_u.nfs_fl.owner->host->h_granted); | ||
490 | } | 436 | } |
491 | 437 | ||
492 | static void nlmclnt_locks_release_private(struct file_lock *fl) | 438 | static void nlmclnt_locks_release_private(struct file_lock *fl) |
493 | { | 439 | { |
440 | list_del(&fl->fl_u.nfs_fl.list); | ||
494 | nlm_put_lockowner(fl->fl_u.nfs_fl.owner); | 441 | nlm_put_lockowner(fl->fl_u.nfs_fl.owner); |
495 | fl->fl_ops = NULL; | ||
496 | } | 442 | } |
497 | 443 | ||
498 | static struct file_lock_operations nlmclnt_lock_ops = { | 444 | static struct file_lock_operations nlmclnt_lock_ops = { |
@@ -504,8 +450,8 @@ static void nlmclnt_locks_init_private(struct file_lock *fl, struct nlm_host *ho | |||
504 | { | 450 | { |
505 | BUG_ON(fl->fl_ops != NULL); | 451 | BUG_ON(fl->fl_ops != NULL); |
506 | fl->fl_u.nfs_fl.state = 0; | 452 | fl->fl_u.nfs_fl.state = 0; |
507 | fl->fl_u.nfs_fl.flags = 0; | ||
508 | fl->fl_u.nfs_fl.owner = nlm_find_lockowner(host, fl->fl_owner); | 453 | fl->fl_u.nfs_fl.owner = nlm_find_lockowner(host, fl->fl_owner); |
454 | INIT_LIST_HEAD(&fl->fl_u.nfs_fl.list); | ||
509 | fl->fl_ops = &nlmclnt_lock_ops; | 455 | fl->fl_ops = &nlmclnt_lock_ops; |
510 | } | 456 | } |
511 | 457 | ||
@@ -552,57 +498,52 @@ nlmclnt_lock(struct nlm_rqst *req, struct file_lock *fl) | |||
552 | { | 498 | { |
553 | struct nlm_host *host = req->a_host; | 499 | struct nlm_host *host = req->a_host; |
554 | struct nlm_res *resp = &req->a_res; | 500 | struct nlm_res *resp = &req->a_res; |
555 | long timeout; | 501 | struct nlm_wait *block = NULL; |
556 | int status; | 502 | int status = -ENOLCK; |
557 | 503 | ||
558 | if (!host->h_monitored && nsm_monitor(host) < 0) { | 504 | if (!host->h_monitored && nsm_monitor(host) < 0) { |
559 | printk(KERN_NOTICE "lockd: failed to monitor %s\n", | 505 | printk(KERN_NOTICE "lockd: failed to monitor %s\n", |
560 | host->h_name); | 506 | host->h_name); |
561 | status = -ENOLCK; | ||
562 | goto out; | 507 | goto out; |
563 | } | 508 | } |
564 | 509 | ||
565 | if (req->a_args.block) { | 510 | block = nlmclnt_prepare_block(host, fl); |
566 | status = nlmclnt_prepare_block(req, host, fl); | ||
567 | if (status < 0) | ||
568 | goto out; | ||
569 | } | ||
570 | for(;;) { | 511 | for(;;) { |
571 | status = nlmclnt_call(req, NLMPROC_LOCK); | 512 | status = nlmclnt_call(req, NLMPROC_LOCK); |
572 | if (status < 0) | 513 | if (status < 0) |
573 | goto out_unblock; | 514 | goto out_unblock; |
574 | if (resp->status != NLM_LCK_BLOCKED) | 515 | if (!req->a_args.block) |
575 | break; | 516 | break; |
576 | /* Wait on an NLM blocking lock */ | ||
577 | timeout = nlmclnt_block(req, NLMCLNT_POLL_TIMEOUT); | ||
578 | /* Did a reclaimer thread notify us of a server reboot? */ | 517 | /* Did a reclaimer thread notify us of a server reboot? */ |
579 | if (resp->status == NLM_LCK_DENIED_GRACE_PERIOD) | 518 | if (resp->status == NLM_LCK_DENIED_GRACE_PERIOD) |
580 | continue; | 519 | continue; |
581 | if (resp->status != NLM_LCK_BLOCKED) | 520 | if (resp->status != NLM_LCK_BLOCKED) |
582 | break; | 521 | break; |
583 | if (timeout >= 0) | 522 | /* Wait on an NLM blocking lock */ |
584 | continue; | 523 | status = nlmclnt_block(block, req, NLMCLNT_POLL_TIMEOUT); |
585 | /* We were interrupted. Send a CANCEL request to the server | 524 | /* if we were interrupted. Send a CANCEL request to the server |
586 | * and exit | 525 | * and exit |
587 | */ | 526 | */ |
588 | status = (int)timeout; | 527 | if (status < 0) |
589 | goto out_unblock; | 528 | goto out_unblock; |
529 | if (resp->status != NLM_LCK_BLOCKED) | ||
530 | break; | ||
590 | } | 531 | } |
591 | 532 | ||
592 | if (resp->status == NLM_LCK_GRANTED) { | 533 | if (resp->status == NLM_LCK_GRANTED) { |
593 | fl->fl_u.nfs_fl.state = host->h_state; | 534 | fl->fl_u.nfs_fl.state = host->h_state; |
594 | fl->fl_u.nfs_fl.flags |= NFS_LCK_GRANTED; | ||
595 | fl->fl_flags |= FL_SLEEP; | 535 | fl->fl_flags |= FL_SLEEP; |
536 | /* Ensure the resulting lock will get added to granted list */ | ||
596 | do_vfs_lock(fl); | 537 | do_vfs_lock(fl); |
597 | } | 538 | } |
598 | status = nlm_stat_to_errno(resp->status); | 539 | status = nlm_stat_to_errno(resp->status); |
599 | out_unblock: | 540 | out_unblock: |
600 | nlmclnt_finish_block(req); | 541 | nlmclnt_finish_block(block); |
601 | /* Cancel the blocked request if it is still pending */ | 542 | /* Cancel the blocked request if it is still pending */ |
602 | if (resp->status == NLM_LCK_BLOCKED) | 543 | if (resp->status == NLM_LCK_BLOCKED) |
603 | nlmclnt_cancel(host, req->a_args.block, fl); | 544 | nlmclnt_cancel(host, req->a_args.block, fl); |
604 | out: | 545 | out: |
605 | nlmclnt_release_lockargs(req); | 546 | nlm_release_call(req); |
606 | return status; | 547 | return status; |
607 | } | 548 | } |
608 | 549 | ||
@@ -658,10 +599,6 @@ nlmclnt_unlock(struct nlm_rqst *req, struct file_lock *fl) | |||
658 | struct nlm_res *resp = &req->a_res; | 599 | struct nlm_res *resp = &req->a_res; |
659 | int status; | 600 | int status; |
660 | 601 | ||
661 | /* Clean the GRANTED flag now so the lock doesn't get | ||
662 | * reclaimed while we're stuck in the unlock call. */ | ||
663 | fl->fl_u.nfs_fl.flags &= ~NFS_LCK_GRANTED; | ||
664 | |||
665 | /* | 602 | /* |
666 | * Note: the server is supposed to either grant us the unlock | 603 | * Note: the server is supposed to either grant us the unlock |
667 | * request, or to deny it with NLM_LCK_DENIED_GRACE_PERIOD. In either | 604 | * request, or to deny it with NLM_LCK_DENIED_GRACE_PERIOD. In either |
@@ -669,32 +606,24 @@ nlmclnt_unlock(struct nlm_rqst *req, struct file_lock *fl) | |||
669 | */ | 606 | */ |
670 | do_vfs_lock(fl); | 607 | do_vfs_lock(fl); |
671 | 608 | ||
672 | if (req->a_flags & RPC_TASK_ASYNC) { | 609 | if (req->a_flags & RPC_TASK_ASYNC) |
673 | status = nlmclnt_async_call(req, NLMPROC_UNLOCK, | 610 | return nlm_async_call(req, NLMPROC_UNLOCK, &nlmclnt_unlock_ops); |
674 | &nlmclnt_unlock_ops); | ||
675 | /* Hrmf... Do the unlock early since locks_remove_posix() | ||
676 | * really expects us to free the lock synchronously */ | ||
677 | if (status < 0) { | ||
678 | nlmclnt_release_lockargs(req); | ||
679 | kfree(req); | ||
680 | } | ||
681 | return status; | ||
682 | } | ||
683 | 611 | ||
684 | status = nlmclnt_call(req, NLMPROC_UNLOCK); | 612 | status = nlmclnt_call(req, NLMPROC_UNLOCK); |
685 | nlmclnt_release_lockargs(req); | ||
686 | if (status < 0) | 613 | if (status < 0) |
687 | return status; | 614 | goto out; |
688 | 615 | ||
616 | status = 0; | ||
689 | if (resp->status == NLM_LCK_GRANTED) | 617 | if (resp->status == NLM_LCK_GRANTED) |
690 | return 0; | 618 | goto out; |
691 | 619 | ||
692 | if (resp->status != NLM_LCK_DENIED_NOLOCKS) | 620 | if (resp->status != NLM_LCK_DENIED_NOLOCKS) |
693 | printk("lockd: unexpected unlock status: %d\n", resp->status); | 621 | printk("lockd: unexpected unlock status: %d\n", resp->status); |
694 | |||
695 | /* What to do now? I'm out of my depth... */ | 622 | /* What to do now? I'm out of my depth... */ |
696 | 623 | status = -ENOLCK; | |
697 | return -ENOLCK; | 624 | out: |
625 | nlm_release_call(req); | ||
626 | return status; | ||
698 | } | 627 | } |
699 | 628 | ||
700 | static void nlmclnt_unlock_callback(struct rpc_task *task, void *data) | 629 | static void nlmclnt_unlock_callback(struct rpc_task *task, void *data) |
@@ -716,9 +645,6 @@ static void nlmclnt_unlock_callback(struct rpc_task *task, void *data) | |||
716 | if (status != NLM_LCK_GRANTED) | 645 | if (status != NLM_LCK_GRANTED) |
717 | printk(KERN_WARNING "lockd: unexpected unlock status: %d\n", status); | 646 | printk(KERN_WARNING "lockd: unexpected unlock status: %d\n", status); |
718 | die: | 647 | die: |
719 | nlm_release_host(req->a_host); | ||
720 | nlmclnt_release_lockargs(req); | ||
721 | kfree(req); | ||
722 | return; | 648 | return; |
723 | retry_rebind: | 649 | retry_rebind: |
724 | nlm_rebind_host(req->a_host); | 650 | nlm_rebind_host(req->a_host); |
@@ -728,6 +654,7 @@ die: | |||
728 | 654 | ||
729 | static const struct rpc_call_ops nlmclnt_unlock_ops = { | 655 | static const struct rpc_call_ops nlmclnt_unlock_ops = { |
730 | .rpc_call_done = nlmclnt_unlock_callback, | 656 | .rpc_call_done = nlmclnt_unlock_callback, |
657 | .rpc_release = nlmclnt_rpc_release, | ||
731 | }; | 658 | }; |
732 | 659 | ||
733 | /* | 660 | /* |
@@ -749,20 +676,15 @@ static int nlmclnt_cancel(struct nlm_host *host, int block, struct file_lock *fl | |||
749 | recalc_sigpending(); | 676 | recalc_sigpending(); |
750 | spin_unlock_irqrestore(¤t->sighand->siglock, flags); | 677 | spin_unlock_irqrestore(¤t->sighand->siglock, flags); |
751 | 678 | ||
752 | req = nlmclnt_alloc_call(); | 679 | req = nlm_alloc_call(nlm_get_host(host)); |
753 | if (!req) | 680 | if (!req) |
754 | return -ENOMEM; | 681 | return -ENOMEM; |
755 | req->a_host = host; | ||
756 | req->a_flags = RPC_TASK_ASYNC; | 682 | req->a_flags = RPC_TASK_ASYNC; |
757 | 683 | ||
758 | nlmclnt_setlockargs(req, fl); | 684 | nlmclnt_setlockargs(req, fl); |
759 | req->a_args.block = block; | 685 | req->a_args.block = block; |
760 | 686 | ||
761 | status = nlmclnt_async_call(req, NLMPROC_CANCEL, &nlmclnt_cancel_ops); | 687 | status = nlm_async_call(req, NLMPROC_CANCEL, &nlmclnt_cancel_ops); |
762 | if (status < 0) { | ||
763 | nlmclnt_release_lockargs(req); | ||
764 | kfree(req); | ||
765 | } | ||
766 | 688 | ||
767 | spin_lock_irqsave(¤t->sighand->siglock, flags); | 689 | spin_lock_irqsave(¤t->sighand->siglock, flags); |
768 | current->blocked = oldset; | 690 | current->blocked = oldset; |
@@ -791,6 +713,7 @@ static void nlmclnt_cancel_callback(struct rpc_task *task, void *data) | |||
791 | switch (req->a_res.status) { | 713 | switch (req->a_res.status) { |
792 | case NLM_LCK_GRANTED: | 714 | case NLM_LCK_GRANTED: |
793 | case NLM_LCK_DENIED_GRACE_PERIOD: | 715 | case NLM_LCK_DENIED_GRACE_PERIOD: |
716 | case NLM_LCK_DENIED: | ||
794 | /* Everything's good */ | 717 | /* Everything's good */ |
795 | break; | 718 | break; |
796 | case NLM_LCK_DENIED_NOLOCKS: | 719 | case NLM_LCK_DENIED_NOLOCKS: |
@@ -802,9 +725,6 @@ static void nlmclnt_cancel_callback(struct rpc_task *task, void *data) | |||
802 | } | 725 | } |
803 | 726 | ||
804 | die: | 727 | die: |
805 | nlm_release_host(req->a_host); | ||
806 | nlmclnt_release_lockargs(req); | ||
807 | kfree(req); | ||
808 | return; | 728 | return; |
809 | 729 | ||
810 | retry_cancel: | 730 | retry_cancel: |
@@ -818,6 +738,7 @@ retry_cancel: | |||
818 | 738 | ||
819 | static const struct rpc_call_ops nlmclnt_cancel_ops = { | 739 | static const struct rpc_call_ops nlmclnt_cancel_ops = { |
820 | .rpc_call_done = nlmclnt_cancel_callback, | 740 | .rpc_call_done = nlmclnt_cancel_callback, |
741 | .rpc_release = nlmclnt_rpc_release, | ||
821 | }; | 742 | }; |
822 | 743 | ||
823 | /* | 744 | /* |
diff --git a/fs/lockd/host.c b/fs/lockd/host.c index 82f7a0b1d8ae..112ebf8b8dfe 100644 --- a/fs/lockd/host.c +++ b/fs/lockd/host.c | |||
@@ -123,6 +123,8 @@ nlm_lookup_host(int server, struct sockaddr_in *sin, | |||
123 | nlm_hosts[hash] = host; | 123 | nlm_hosts[hash] = host; |
124 | INIT_LIST_HEAD(&host->h_lockowners); | 124 | INIT_LIST_HEAD(&host->h_lockowners); |
125 | spin_lock_init(&host->h_lock); | 125 | spin_lock_init(&host->h_lock); |
126 | INIT_LIST_HEAD(&host->h_granted); | ||
127 | INIT_LIST_HEAD(&host->h_reclaim); | ||
126 | 128 | ||
127 | if (++nrhosts > NLM_HOST_MAX) | 129 | if (++nrhosts > NLM_HOST_MAX) |
128 | next_gc = 0; | 130 | next_gc = 0; |
@@ -191,11 +193,12 @@ nlm_bind_host(struct nlm_host *host) | |||
191 | xprt->resvport = 1; /* NLM requires a reserved port */ | 193 | xprt->resvport = 1; /* NLM requires a reserved port */ |
192 | 194 | ||
193 | /* Existing NLM servers accept AUTH_UNIX only */ | 195 | /* Existing NLM servers accept AUTH_UNIX only */ |
194 | clnt = rpc_create_client(xprt, host->h_name, &nlm_program, | 196 | clnt = rpc_new_client(xprt, host->h_name, &nlm_program, |
195 | host->h_version, RPC_AUTH_UNIX); | 197 | host->h_version, RPC_AUTH_UNIX); |
196 | if (IS_ERR(clnt)) | 198 | if (IS_ERR(clnt)) |
197 | goto forgetit; | 199 | goto forgetit; |
198 | clnt->cl_autobind = 1; /* turn on pmap queries */ | 200 | clnt->cl_autobind = 1; /* turn on pmap queries */ |
201 | clnt->cl_softrtry = 1; /* All queries are soft */ | ||
199 | 202 | ||
200 | host->h_rpcclnt = clnt; | 203 | host->h_rpcclnt = clnt; |
201 | } | 204 | } |
@@ -242,8 +245,12 @@ void nlm_release_host(struct nlm_host *host) | |||
242 | { | 245 | { |
243 | if (host != NULL) { | 246 | if (host != NULL) { |
244 | dprintk("lockd: release host %s\n", host->h_name); | 247 | dprintk("lockd: release host %s\n", host->h_name); |
245 | atomic_dec(&host->h_count); | ||
246 | BUG_ON(atomic_read(&host->h_count) < 0); | 248 | BUG_ON(atomic_read(&host->h_count) < 0); |
249 | if (atomic_dec_and_test(&host->h_count)) { | ||
250 | BUG_ON(!list_empty(&host->h_lockowners)); | ||
251 | BUG_ON(!list_empty(&host->h_granted)); | ||
252 | BUG_ON(!list_empty(&host->h_reclaim)); | ||
253 | } | ||
247 | } | 254 | } |
248 | } | 255 | } |
249 | 256 | ||
@@ -331,7 +338,6 @@ nlm_gc_hosts(void) | |||
331 | rpc_destroy_client(host->h_rpcclnt); | 338 | rpc_destroy_client(host->h_rpcclnt); |
332 | } | 339 | } |
333 | } | 340 | } |
334 | BUG_ON(!list_empty(&host->h_lockowners)); | ||
335 | kfree(host); | 341 | kfree(host); |
336 | nrhosts--; | 342 | nrhosts--; |
337 | } | 343 | } |
diff --git a/fs/lockd/mon.c b/fs/lockd/mon.c index a89cb8aa2c88..3fc683f46b3e 100644 --- a/fs/lockd/mon.c +++ b/fs/lockd/mon.c | |||
@@ -35,6 +35,10 @@ nsm_mon_unmon(struct nlm_host *host, u32 proc, struct nsm_res *res) | |||
35 | struct rpc_clnt *clnt; | 35 | struct rpc_clnt *clnt; |
36 | int status; | 36 | int status; |
37 | struct nsm_args args; | 37 | struct nsm_args args; |
38 | struct rpc_message msg = { | ||
39 | .rpc_argp = &args, | ||
40 | .rpc_resp = res, | ||
41 | }; | ||
38 | 42 | ||
39 | clnt = nsm_create(); | 43 | clnt = nsm_create(); |
40 | if (IS_ERR(clnt)) { | 44 | if (IS_ERR(clnt)) { |
@@ -49,7 +53,8 @@ nsm_mon_unmon(struct nlm_host *host, u32 proc, struct nsm_res *res) | |||
49 | args.proc = NLMPROC_NSM_NOTIFY; | 53 | args.proc = NLMPROC_NSM_NOTIFY; |
50 | memset(res, 0, sizeof(*res)); | 54 | memset(res, 0, sizeof(*res)); |
51 | 55 | ||
52 | status = rpc_call(clnt, proc, &args, res, 0); | 56 | msg.rpc_proc = &clnt->cl_procinfo[proc]; |
57 | status = rpc_call_sync(clnt, &msg, 0); | ||
53 | if (status < 0) | 58 | if (status < 0) |
54 | printk(KERN_DEBUG "nsm_mon_unmon: rpc failed, status=%d\n", | 59 | printk(KERN_DEBUG "nsm_mon_unmon: rpc failed, status=%d\n", |
55 | status); | 60 | status); |
@@ -214,12 +219,16 @@ static struct rpc_procinfo nsm_procedures[] = { | |||
214 | .p_encode = (kxdrproc_t) xdr_encode_mon, | 219 | .p_encode = (kxdrproc_t) xdr_encode_mon, |
215 | .p_decode = (kxdrproc_t) xdr_decode_stat_res, | 220 | .p_decode = (kxdrproc_t) xdr_decode_stat_res, |
216 | .p_bufsiz = MAX(SM_mon_sz, SM_monres_sz) << 2, | 221 | .p_bufsiz = MAX(SM_mon_sz, SM_monres_sz) << 2, |
222 | .p_statidx = SM_MON, | ||
223 | .p_name = "MONITOR", | ||
217 | }, | 224 | }, |
218 | [SM_UNMON] = { | 225 | [SM_UNMON] = { |
219 | .p_proc = SM_UNMON, | 226 | .p_proc = SM_UNMON, |
220 | .p_encode = (kxdrproc_t) xdr_encode_unmon, | 227 | .p_encode = (kxdrproc_t) xdr_encode_unmon, |
221 | .p_decode = (kxdrproc_t) xdr_decode_stat, | 228 | .p_decode = (kxdrproc_t) xdr_decode_stat, |
222 | .p_bufsiz = MAX(SM_mon_id_sz, SM_unmonres_sz) << 2, | 229 | .p_bufsiz = MAX(SM_mon_id_sz, SM_unmonres_sz) << 2, |
230 | .p_statidx = SM_UNMON, | ||
231 | .p_name = "UNMONITOR", | ||
223 | }, | 232 | }, |
224 | }; | 233 | }; |
225 | 234 | ||
diff --git a/fs/lockd/svc4proc.c b/fs/lockd/svc4proc.c index b10f913aa06a..a2dd9ccb9b32 100644 --- a/fs/lockd/svc4proc.c +++ b/fs/lockd/svc4proc.c | |||
@@ -21,10 +21,6 @@ | |||
21 | 21 | ||
22 | #define NLMDBG_FACILITY NLMDBG_CLIENT | 22 | #define NLMDBG_FACILITY NLMDBG_CLIENT |
23 | 23 | ||
24 | static u32 nlm4svc_callback(struct svc_rqst *, u32, struct nlm_res *); | ||
25 | |||
26 | static const struct rpc_call_ops nlm4svc_callback_ops; | ||
27 | |||
28 | /* | 24 | /* |
29 | * Obtain client and file from arguments | 25 | * Obtain client and file from arguments |
30 | */ | 26 | */ |
@@ -234,83 +230,89 @@ nlm4svc_proc_granted(struct svc_rqst *rqstp, struct nlm_args *argp, | |||
234 | } | 230 | } |
235 | 231 | ||
236 | /* | 232 | /* |
233 | * This is the generic lockd callback for async RPC calls | ||
234 | */ | ||
235 | static void nlm4svc_callback_exit(struct rpc_task *task, void *data) | ||
236 | { | ||
237 | dprintk("lockd: %4d callback returned %d\n", task->tk_pid, | ||
238 | -task->tk_status); | ||
239 | } | ||
240 | |||
241 | static void nlm4svc_callback_release(void *data) | ||
242 | { | ||
243 | nlm_release_call(data); | ||
244 | } | ||
245 | |||
246 | static const struct rpc_call_ops nlm4svc_callback_ops = { | ||
247 | .rpc_call_done = nlm4svc_callback_exit, | ||
248 | .rpc_release = nlm4svc_callback_release, | ||
249 | }; | ||
250 | |||
251 | /* | ||
237 | * `Async' versions of the above service routines. They aren't really, | 252 | * `Async' versions of the above service routines. They aren't really, |
238 | * because we send the callback before the reply proper. I hope this | 253 | * because we send the callback before the reply proper. I hope this |
239 | * doesn't break any clients. | 254 | * doesn't break any clients. |
240 | */ | 255 | */ |
241 | static int | 256 | static int nlm4svc_callback(struct svc_rqst *rqstp, u32 proc, struct nlm_args *argp, |
242 | nlm4svc_proc_test_msg(struct svc_rqst *rqstp, struct nlm_args *argp, | 257 | int (*func)(struct svc_rqst *, struct nlm_args *, struct nlm_res *)) |
243 | void *resp) | ||
244 | { | 258 | { |
245 | struct nlm_res res; | 259 | struct nlm_host *host; |
246 | u32 stat; | 260 | struct nlm_rqst *call; |
261 | int stat; | ||
247 | 262 | ||
248 | dprintk("lockd: TEST_MSG called\n"); | 263 | host = nlmsvc_lookup_host(rqstp); |
249 | memset(&res, 0, sizeof(res)); | 264 | if (host == NULL) |
265 | return rpc_system_err; | ||
266 | |||
267 | call = nlm_alloc_call(host); | ||
268 | if (call == NULL) | ||
269 | return rpc_system_err; | ||
250 | 270 | ||
251 | if ((stat = nlm4svc_proc_test(rqstp, argp, &res)) == 0) | 271 | stat = func(rqstp, argp, &call->a_res); |
252 | stat = nlm4svc_callback(rqstp, NLMPROC_TEST_RES, &res); | 272 | if (stat != 0) { |
253 | return stat; | 273 | nlm_release_call(call); |
274 | return stat; | ||
275 | } | ||
276 | |||
277 | call->a_flags = RPC_TASK_ASYNC; | ||
278 | if (nlm_async_reply(call, proc, &nlm4svc_callback_ops) < 0) | ||
279 | return rpc_system_err; | ||
280 | return rpc_success; | ||
254 | } | 281 | } |
255 | 282 | ||
256 | static int | 283 | static int nlm4svc_proc_test_msg(struct svc_rqst *rqstp, struct nlm_args *argp, |
257 | nlm4svc_proc_lock_msg(struct svc_rqst *rqstp, struct nlm_args *argp, | ||
258 | void *resp) | 284 | void *resp) |
259 | { | 285 | { |
260 | struct nlm_res res; | 286 | dprintk("lockd: TEST_MSG called\n"); |
261 | u32 stat; | 287 | return nlm4svc_callback(rqstp, NLMPROC_TEST_RES, argp, nlm4svc_proc_test); |
288 | } | ||
262 | 289 | ||
290 | static int nlm4svc_proc_lock_msg(struct svc_rqst *rqstp, struct nlm_args *argp, | ||
291 | void *resp) | ||
292 | { | ||
263 | dprintk("lockd: LOCK_MSG called\n"); | 293 | dprintk("lockd: LOCK_MSG called\n"); |
264 | memset(&res, 0, sizeof(res)); | 294 | return nlm4svc_callback(rqstp, NLMPROC_LOCK_RES, argp, nlm4svc_proc_lock); |
265 | |||
266 | if ((stat = nlm4svc_proc_lock(rqstp, argp, &res)) == 0) | ||
267 | stat = nlm4svc_callback(rqstp, NLMPROC_LOCK_RES, &res); | ||
268 | return stat; | ||
269 | } | 295 | } |
270 | 296 | ||
271 | static int | 297 | static int nlm4svc_proc_cancel_msg(struct svc_rqst *rqstp, struct nlm_args *argp, |
272 | nlm4svc_proc_cancel_msg(struct svc_rqst *rqstp, struct nlm_args *argp, | ||
273 | void *resp) | 298 | void *resp) |
274 | { | 299 | { |
275 | struct nlm_res res; | ||
276 | u32 stat; | ||
277 | |||
278 | dprintk("lockd: CANCEL_MSG called\n"); | 300 | dprintk("lockd: CANCEL_MSG called\n"); |
279 | memset(&res, 0, sizeof(res)); | 301 | return nlm4svc_callback(rqstp, NLMPROC_CANCEL_RES, argp, nlm4svc_proc_cancel); |
280 | |||
281 | if ((stat = nlm4svc_proc_cancel(rqstp, argp, &res)) == 0) | ||
282 | stat = nlm4svc_callback(rqstp, NLMPROC_CANCEL_RES, &res); | ||
283 | return stat; | ||
284 | } | 302 | } |
285 | 303 | ||
286 | static int | 304 | static int nlm4svc_proc_unlock_msg(struct svc_rqst *rqstp, struct nlm_args *argp, |
287 | nlm4svc_proc_unlock_msg(struct svc_rqst *rqstp, struct nlm_args *argp, | ||
288 | void *resp) | 305 | void *resp) |
289 | { | 306 | { |
290 | struct nlm_res res; | ||
291 | u32 stat; | ||
292 | |||
293 | dprintk("lockd: UNLOCK_MSG called\n"); | 307 | dprintk("lockd: UNLOCK_MSG called\n"); |
294 | memset(&res, 0, sizeof(res)); | 308 | return nlm4svc_callback(rqstp, NLMPROC_UNLOCK_RES, argp, nlm4svc_proc_unlock); |
295 | |||
296 | if ((stat = nlm4svc_proc_unlock(rqstp, argp, &res)) == 0) | ||
297 | stat = nlm4svc_callback(rqstp, NLMPROC_UNLOCK_RES, &res); | ||
298 | return stat; | ||
299 | } | 309 | } |
300 | 310 | ||
301 | static int | 311 | static int nlm4svc_proc_granted_msg(struct svc_rqst *rqstp, struct nlm_args *argp, |
302 | nlm4svc_proc_granted_msg(struct svc_rqst *rqstp, struct nlm_args *argp, | ||
303 | void *resp) | 312 | void *resp) |
304 | { | 313 | { |
305 | struct nlm_res res; | ||
306 | u32 stat; | ||
307 | |||
308 | dprintk("lockd: GRANTED_MSG called\n"); | 314 | dprintk("lockd: GRANTED_MSG called\n"); |
309 | memset(&res, 0, sizeof(res)); | 315 | return nlm4svc_callback(rqstp, NLMPROC_GRANTED_RES, argp, nlm4svc_proc_granted); |
310 | |||
311 | if ((stat = nlm4svc_proc_granted(rqstp, argp, &res)) == 0) | ||
312 | stat = nlm4svc_callback(rqstp, NLMPROC_GRANTED_RES, &res); | ||
313 | return stat; | ||
314 | } | 316 | } |
315 | 317 | ||
316 | /* | 318 | /* |
@@ -472,55 +474,6 @@ nlm4svc_proc_granted_res(struct svc_rqst *rqstp, struct nlm_res *argp, | |||
472 | 474 | ||
473 | 475 | ||
474 | /* | 476 | /* |
475 | * This is the generic lockd callback for async RPC calls | ||
476 | */ | ||
477 | static u32 | ||
478 | nlm4svc_callback(struct svc_rqst *rqstp, u32 proc, struct nlm_res *resp) | ||
479 | { | ||
480 | struct nlm_host *host; | ||
481 | struct nlm_rqst *call; | ||
482 | |||
483 | if (!(call = nlmclnt_alloc_call())) | ||
484 | return rpc_system_err; | ||
485 | |||
486 | host = nlmclnt_lookup_host(&rqstp->rq_addr, | ||
487 | rqstp->rq_prot, rqstp->rq_vers); | ||
488 | if (!host) { | ||
489 | kfree(call); | ||
490 | return rpc_system_err; | ||
491 | } | ||
492 | |||
493 | call->a_flags = RPC_TASK_ASYNC; | ||
494 | call->a_host = host; | ||
495 | memcpy(&call->a_args, resp, sizeof(*resp)); | ||
496 | |||
497 | if (nlmsvc_async_call(call, proc, &nlm4svc_callback_ops) < 0) | ||
498 | goto error; | ||
499 | |||
500 | return rpc_success; | ||
501 | error: | ||
502 | kfree(call); | ||
503 | nlm_release_host(host); | ||
504 | return rpc_system_err; | ||
505 | } | ||
506 | |||
507 | static void nlm4svc_callback_exit(struct rpc_task *task, void *data) | ||
508 | { | ||
509 | struct nlm_rqst *call = data; | ||
510 | |||
511 | if (task->tk_status < 0) { | ||
512 | dprintk("lockd: %4d callback failed (errno = %d)\n", | ||
513 | task->tk_pid, -task->tk_status); | ||
514 | } | ||
515 | nlm_release_host(call->a_host); | ||
516 | kfree(call); | ||
517 | } | ||
518 | |||
519 | static const struct rpc_call_ops nlm4svc_callback_ops = { | ||
520 | .rpc_call_done = nlm4svc_callback_exit, | ||
521 | }; | ||
522 | |||
523 | /* | ||
524 | * NLM Server procedures. | 477 | * NLM Server procedures. |
525 | */ | 478 | */ |
526 | 479 | ||
diff --git a/fs/lockd/svclock.c b/fs/lockd/svclock.c index 9cfced65d4a2..d2b66bad7d50 100644 --- a/fs/lockd/svclock.c +++ b/fs/lockd/svclock.c | |||
@@ -39,9 +39,12 @@ | |||
39 | #define nlm_deadlock nlm_lck_denied | 39 | #define nlm_deadlock nlm_lck_denied |
40 | #endif | 40 | #endif |
41 | 41 | ||
42 | static void nlmsvc_release_block(struct nlm_block *block); | ||
42 | static void nlmsvc_insert_block(struct nlm_block *block, unsigned long); | 43 | static void nlmsvc_insert_block(struct nlm_block *block, unsigned long); |
43 | static int nlmsvc_remove_block(struct nlm_block *block); | 44 | static int nlmsvc_remove_block(struct nlm_block *block); |
44 | 45 | ||
46 | static int nlmsvc_setgrantargs(struct nlm_rqst *call, struct nlm_lock *lock); | ||
47 | static void nlmsvc_freegrantargs(struct nlm_rqst *call); | ||
45 | static const struct rpc_call_ops nlmsvc_grant_ops; | 48 | static const struct rpc_call_ops nlmsvc_grant_ops; |
46 | 49 | ||
47 | /* | 50 | /* |
@@ -58,6 +61,7 @@ nlmsvc_insert_block(struct nlm_block *block, unsigned long when) | |||
58 | struct nlm_block **bp, *b; | 61 | struct nlm_block **bp, *b; |
59 | 62 | ||
60 | dprintk("lockd: nlmsvc_insert_block(%p, %ld)\n", block, when); | 63 | dprintk("lockd: nlmsvc_insert_block(%p, %ld)\n", block, when); |
64 | kref_get(&block->b_count); | ||
61 | if (block->b_queued) | 65 | if (block->b_queued) |
62 | nlmsvc_remove_block(block); | 66 | nlmsvc_remove_block(block); |
63 | bp = &nlm_blocked; | 67 | bp = &nlm_blocked; |
@@ -90,6 +94,7 @@ nlmsvc_remove_block(struct nlm_block *block) | |||
90 | if (b == block) { | 94 | if (b == block) { |
91 | *bp = block->b_next; | 95 | *bp = block->b_next; |
92 | block->b_queued = 0; | 96 | block->b_queued = 0; |
97 | nlmsvc_release_block(block); | ||
93 | return 1; | 98 | return 1; |
94 | } | 99 | } |
95 | } | 100 | } |
@@ -98,11 +103,10 @@ nlmsvc_remove_block(struct nlm_block *block) | |||
98 | } | 103 | } |
99 | 104 | ||
100 | /* | 105 | /* |
101 | * Find a block for a given lock and optionally remove it from | 106 | * Find a block for a given lock |
102 | * the list. | ||
103 | */ | 107 | */ |
104 | static struct nlm_block * | 108 | static struct nlm_block * |
105 | nlmsvc_lookup_block(struct nlm_file *file, struct nlm_lock *lock, int remove) | 109 | nlmsvc_lookup_block(struct nlm_file *file, struct nlm_lock *lock) |
106 | { | 110 | { |
107 | struct nlm_block **head, *block; | 111 | struct nlm_block **head, *block; |
108 | struct file_lock *fl; | 112 | struct file_lock *fl; |
@@ -112,17 +116,14 @@ nlmsvc_lookup_block(struct nlm_file *file, struct nlm_lock *lock, int remove) | |||
112 | (long long)lock->fl.fl_start, | 116 | (long long)lock->fl.fl_start, |
113 | (long long)lock->fl.fl_end, lock->fl.fl_type); | 117 | (long long)lock->fl.fl_end, lock->fl.fl_type); |
114 | for (head = &nlm_blocked; (block = *head) != 0; head = &block->b_next) { | 118 | for (head = &nlm_blocked; (block = *head) != 0; head = &block->b_next) { |
115 | fl = &block->b_call.a_args.lock.fl; | 119 | fl = &block->b_call->a_args.lock.fl; |
116 | dprintk("lockd: check f=%p pd=%d %Ld-%Ld ty=%d cookie=%s\n", | 120 | dprintk("lockd: check f=%p pd=%d %Ld-%Ld ty=%d cookie=%s\n", |
117 | block->b_file, fl->fl_pid, | 121 | block->b_file, fl->fl_pid, |
118 | (long long)fl->fl_start, | 122 | (long long)fl->fl_start, |
119 | (long long)fl->fl_end, fl->fl_type, | 123 | (long long)fl->fl_end, fl->fl_type, |
120 | nlmdbg_cookie2a(&block->b_call.a_args.cookie)); | 124 | nlmdbg_cookie2a(&block->b_call->a_args.cookie)); |
121 | if (block->b_file == file && nlm_compare_locks(fl, &lock->fl)) { | 125 | if (block->b_file == file && nlm_compare_locks(fl, &lock->fl)) { |
122 | if (remove) { | 126 | kref_get(&block->b_count); |
123 | *head = block->b_next; | ||
124 | block->b_queued = 0; | ||
125 | } | ||
126 | return block; | 127 | return block; |
127 | } | 128 | } |
128 | } | 129 | } |
@@ -150,11 +151,13 @@ nlmsvc_find_block(struct nlm_cookie *cookie, struct sockaddr_in *sin) | |||
150 | for (block = nlm_blocked; block; block = block->b_next) { | 151 | for (block = nlm_blocked; block; block = block->b_next) { |
151 | dprintk("cookie: head of blocked queue %p, block %p\n", | 152 | dprintk("cookie: head of blocked queue %p, block %p\n", |
152 | nlm_blocked, block); | 153 | nlm_blocked, block); |
153 | if (nlm_cookie_match(&block->b_call.a_args.cookie,cookie) | 154 | if (nlm_cookie_match(&block->b_call->a_args.cookie,cookie) |
154 | && nlm_cmp_addr(sin, &block->b_host->h_addr)) | 155 | && nlm_cmp_addr(sin, &block->b_host->h_addr)) |
155 | break; | 156 | break; |
156 | } | 157 | } |
157 | 158 | ||
159 | if (block != NULL) | ||
160 | kref_get(&block->b_count); | ||
158 | return block; | 161 | return block; |
159 | } | 162 | } |
160 | 163 | ||
@@ -174,27 +177,30 @@ nlmsvc_create_block(struct svc_rqst *rqstp, struct nlm_file *file, | |||
174 | { | 177 | { |
175 | struct nlm_block *block; | 178 | struct nlm_block *block; |
176 | struct nlm_host *host; | 179 | struct nlm_host *host; |
177 | struct nlm_rqst *call; | 180 | struct nlm_rqst *call = NULL; |
178 | 181 | ||
179 | /* Create host handle for callback */ | 182 | /* Create host handle for callback */ |
180 | host = nlmclnt_lookup_host(&rqstp->rq_addr, | 183 | host = nlmsvc_lookup_host(rqstp); |
181 | rqstp->rq_prot, rqstp->rq_vers); | ||
182 | if (host == NULL) | 184 | if (host == NULL) |
183 | return NULL; | 185 | return NULL; |
184 | 186 | ||
187 | call = nlm_alloc_call(host); | ||
188 | if (call == NULL) | ||
189 | return NULL; | ||
190 | |||
185 | /* Allocate memory for block, and initialize arguments */ | 191 | /* Allocate memory for block, and initialize arguments */ |
186 | if (!(block = (struct nlm_block *) kmalloc(sizeof(*block), GFP_KERNEL))) | 192 | block = kzalloc(sizeof(*block), GFP_KERNEL); |
193 | if (block == NULL) | ||
187 | goto failed; | 194 | goto failed; |
188 | memset(block, 0, sizeof(*block)); | 195 | kref_init(&block->b_count); |
189 | locks_init_lock(&block->b_call.a_args.lock.fl); | ||
190 | locks_init_lock(&block->b_call.a_res.lock.fl); | ||
191 | 196 | ||
192 | if (!nlmclnt_setgrantargs(&block->b_call, lock)) | 197 | if (!nlmsvc_setgrantargs(call, lock)) |
193 | goto failed_free; | 198 | goto failed_free; |
194 | 199 | ||
195 | /* Set notifier function for VFS, and init args */ | 200 | /* Set notifier function for VFS, and init args */ |
196 | block->b_call.a_args.lock.fl.fl_lmops = &nlmsvc_lock_operations; | 201 | call->a_args.lock.fl.fl_flags |= FL_SLEEP; |
197 | block->b_call.a_args.cookie = *cookie; /* see above */ | 202 | call->a_args.lock.fl.fl_lmops = &nlmsvc_lock_operations; |
203 | call->a_args.cookie = *cookie; /* see above */ | ||
198 | 204 | ||
199 | dprintk("lockd: created block %p...\n", block); | 205 | dprintk("lockd: created block %p...\n", block); |
200 | 206 | ||
@@ -202,22 +208,23 @@ nlmsvc_create_block(struct svc_rqst *rqstp, struct nlm_file *file, | |||
202 | block->b_daemon = rqstp->rq_server; | 208 | block->b_daemon = rqstp->rq_server; |
203 | block->b_host = host; | 209 | block->b_host = host; |
204 | block->b_file = file; | 210 | block->b_file = file; |
211 | file->f_count++; | ||
205 | 212 | ||
206 | /* Add to file's list of blocks */ | 213 | /* Add to file's list of blocks */ |
207 | block->b_fnext = file->f_blocks; | 214 | block->b_fnext = file->f_blocks; |
208 | file->f_blocks = block; | 215 | file->f_blocks = block; |
209 | 216 | ||
210 | /* Set up RPC arguments for callback */ | 217 | /* Set up RPC arguments for callback */ |
211 | call = &block->b_call; | 218 | block->b_call = call; |
212 | call->a_host = host; | ||
213 | call->a_flags = RPC_TASK_ASYNC; | 219 | call->a_flags = RPC_TASK_ASYNC; |
220 | call->a_block = block; | ||
214 | 221 | ||
215 | return block; | 222 | return block; |
216 | 223 | ||
217 | failed_free: | 224 | failed_free: |
218 | kfree(block); | 225 | kfree(block); |
219 | failed: | 226 | failed: |
220 | nlm_release_host(host); | 227 | nlm_release_call(call); |
221 | return NULL; | 228 | return NULL; |
222 | } | 229 | } |
223 | 230 | ||
@@ -227,29 +234,26 @@ failed: | |||
227 | * It is the caller's responsibility to check whether the file | 234 | * It is the caller's responsibility to check whether the file |
228 | * can be closed hereafter. | 235 | * can be closed hereafter. |
229 | */ | 236 | */ |
230 | static int | 237 | static int nlmsvc_unlink_block(struct nlm_block *block) |
231 | nlmsvc_delete_block(struct nlm_block *block, int unlock) | ||
232 | { | 238 | { |
233 | struct file_lock *fl = &block->b_call.a_args.lock.fl; | 239 | int status; |
234 | struct nlm_file *file = block->b_file; | 240 | dprintk("lockd: unlinking block %p...\n", block); |
235 | struct nlm_block **bp; | ||
236 | int status = 0; | ||
237 | |||
238 | dprintk("lockd: deleting block %p...\n", block); | ||
239 | 241 | ||
240 | /* Remove block from list */ | 242 | /* Remove block from list */ |
243 | status = posix_unblock_lock(block->b_file->f_file, &block->b_call->a_args.lock.fl); | ||
241 | nlmsvc_remove_block(block); | 244 | nlmsvc_remove_block(block); |
242 | if (unlock) | 245 | return status; |
243 | status = posix_unblock_lock(file->f_file, fl); | 246 | } |
244 | 247 | ||
245 | /* If the block is in the middle of a GRANT callback, | 248 | static void nlmsvc_free_block(struct kref *kref) |
246 | * don't kill it yet. */ | 249 | { |
247 | if (block->b_incall) { | 250 | struct nlm_block *block = container_of(kref, struct nlm_block, b_count); |
248 | nlmsvc_insert_block(block, NLM_NEVER); | 251 | struct nlm_file *file = block->b_file; |
249 | block->b_done = 1; | 252 | struct nlm_block **bp; |
250 | return status; | ||
251 | } | ||
252 | 253 | ||
254 | dprintk("lockd: freeing block %p...\n", block); | ||
255 | |||
256 | down(&file->f_sema); | ||
253 | /* Remove block from file's list of blocks */ | 257 | /* Remove block from file's list of blocks */ |
254 | for (bp = &file->f_blocks; *bp; bp = &(*bp)->b_fnext) { | 258 | for (bp = &file->f_blocks; *bp; bp = &(*bp)->b_fnext) { |
255 | if (*bp == block) { | 259 | if (*bp == block) { |
@@ -257,36 +261,93 @@ nlmsvc_delete_block(struct nlm_block *block, int unlock) | |||
257 | break; | 261 | break; |
258 | } | 262 | } |
259 | } | 263 | } |
264 | up(&file->f_sema); | ||
260 | 265 | ||
261 | if (block->b_host) | 266 | nlmsvc_freegrantargs(block->b_call); |
262 | nlm_release_host(block->b_host); | 267 | nlm_release_call(block->b_call); |
263 | nlmclnt_freegrantargs(&block->b_call); | 268 | nlm_release_file(block->b_file); |
264 | kfree(block); | 269 | kfree(block); |
265 | return status; | 270 | } |
271 | |||
272 | static void nlmsvc_release_block(struct nlm_block *block) | ||
273 | { | ||
274 | if (block != NULL) | ||
275 | kref_put(&block->b_count, nlmsvc_free_block); | ||
276 | } | ||
277 | |||
278 | static void nlmsvc_act_mark(struct nlm_host *host, struct nlm_file *file) | ||
279 | { | ||
280 | struct nlm_block *block; | ||
281 | |||
282 | down(&file->f_sema); | ||
283 | for (block = file->f_blocks; block != NULL; block = block->b_fnext) | ||
284 | block->b_host->h_inuse = 1; | ||
285 | up(&file->f_sema); | ||
286 | } | ||
287 | |||
288 | static void nlmsvc_act_unlock(struct nlm_host *host, struct nlm_file *file) | ||
289 | { | ||
290 | struct nlm_block *block; | ||
291 | |||
292 | restart: | ||
293 | down(&file->f_sema); | ||
294 | for (block = file->f_blocks; block != NULL; block = block->b_fnext) { | ||
295 | if (host != NULL && host != block->b_host) | ||
296 | continue; | ||
297 | if (!block->b_queued) | ||
298 | continue; | ||
299 | kref_get(&block->b_count); | ||
300 | up(&file->f_sema); | ||
301 | nlmsvc_unlink_block(block); | ||
302 | nlmsvc_release_block(block); | ||
303 | goto restart; | ||
304 | } | ||
305 | up(&file->f_sema); | ||
266 | } | 306 | } |
267 | 307 | ||
268 | /* | 308 | /* |
269 | * Loop over all blocks and perform the action specified. | 309 | * Loop over all blocks and perform the action specified. |
270 | * (NLM_ACT_CHECK handled by nlmsvc_inspect_file). | 310 | * (NLM_ACT_CHECK handled by nlmsvc_inspect_file). |
271 | */ | 311 | */ |
272 | int | 312 | void |
273 | nlmsvc_traverse_blocks(struct nlm_host *host, struct nlm_file *file, int action) | 313 | nlmsvc_traverse_blocks(struct nlm_host *host, struct nlm_file *file, int action) |
274 | { | 314 | { |
275 | struct nlm_block *block, *next; | 315 | if (action == NLM_ACT_MARK) |
276 | /* XXX: Will everything get cleaned up if we don't unlock here? */ | 316 | nlmsvc_act_mark(host, file); |
317 | else | ||
318 | nlmsvc_act_unlock(host, file); | ||
319 | } | ||
277 | 320 | ||
278 | down(&file->f_sema); | 321 | /* |
279 | for (block = file->f_blocks; block; block = next) { | 322 | * Initialize arguments for GRANTED call. The nlm_rqst structure |
280 | next = block->b_fnext; | 323 | * has been cleared already. |
281 | if (action == NLM_ACT_MARK) | 324 | */ |
282 | block->b_host->h_inuse = 1; | 325 | static int nlmsvc_setgrantargs(struct nlm_rqst *call, struct nlm_lock *lock) |
283 | else if (action == NLM_ACT_UNLOCK) { | 326 | { |
284 | if (host == NULL || host == block->b_host) | 327 | locks_copy_lock(&call->a_args.lock.fl, &lock->fl); |
285 | nlmsvc_delete_block(block, 1); | 328 | memcpy(&call->a_args.lock.fh, &lock->fh, sizeof(call->a_args.lock.fh)); |
286 | } | 329 | call->a_args.lock.caller = system_utsname.nodename; |
330 | call->a_args.lock.oh.len = lock->oh.len; | ||
331 | |||
332 | /* set default data area */ | ||
333 | call->a_args.lock.oh.data = call->a_owner; | ||
334 | call->a_args.lock.svid = lock->fl.fl_pid; | ||
335 | |||
336 | if (lock->oh.len > NLMCLNT_OHSIZE) { | ||
337 | void *data = kmalloc(lock->oh.len, GFP_KERNEL); | ||
338 | if (!data) | ||
339 | return 0; | ||
340 | call->a_args.lock.oh.data = (u8 *) data; | ||
287 | } | 341 | } |
288 | up(&file->f_sema); | 342 | |
289 | return 0; | 343 | memcpy(call->a_args.lock.oh.data, lock->oh.data, lock->oh.len); |
344 | return 1; | ||
345 | } | ||
346 | |||
347 | static void nlmsvc_freegrantargs(struct nlm_rqst *call) | ||
348 | { | ||
349 | if (call->a_args.lock.oh.data != call->a_owner) | ||
350 | kfree(call->a_args.lock.oh.data); | ||
290 | } | 351 | } |
291 | 352 | ||
292 | /* | 353 | /* |
@@ -297,9 +358,9 @@ u32 | |||
297 | nlmsvc_lock(struct svc_rqst *rqstp, struct nlm_file *file, | 358 | nlmsvc_lock(struct svc_rqst *rqstp, struct nlm_file *file, |
298 | struct nlm_lock *lock, int wait, struct nlm_cookie *cookie) | 359 | struct nlm_lock *lock, int wait, struct nlm_cookie *cookie) |
299 | { | 360 | { |
300 | struct file_lock *conflock; | 361 | struct nlm_block *block, *newblock = NULL; |
301 | struct nlm_block *block; | ||
302 | int error; | 362 | int error; |
363 | u32 ret; | ||
303 | 364 | ||
304 | dprintk("lockd: nlmsvc_lock(%s/%ld, ty=%d, pi=%d, %Ld-%Ld, bl=%d)\n", | 365 | dprintk("lockd: nlmsvc_lock(%s/%ld, ty=%d, pi=%d, %Ld-%Ld, bl=%d)\n", |
305 | file->f_file->f_dentry->d_inode->i_sb->s_id, | 366 | file->f_file->f_dentry->d_inode->i_sb->s_id, |
@@ -310,69 +371,65 @@ nlmsvc_lock(struct svc_rqst *rqstp, struct nlm_file *file, | |||
310 | wait); | 371 | wait); |
311 | 372 | ||
312 | 373 | ||
313 | /* Get existing block (in case client is busy-waiting) */ | 374 | lock->fl.fl_flags &= ~FL_SLEEP; |
314 | block = nlmsvc_lookup_block(file, lock, 0); | ||
315 | |||
316 | lock->fl.fl_flags |= FL_LOCKD; | ||
317 | |||
318 | again: | 375 | again: |
319 | /* Lock file against concurrent access */ | 376 | /* Lock file against concurrent access */ |
320 | down(&file->f_sema); | 377 | down(&file->f_sema); |
378 | /* Get existing block (in case client is busy-waiting) */ | ||
379 | block = nlmsvc_lookup_block(file, lock); | ||
380 | if (block == NULL) { | ||
381 | if (newblock != NULL) | ||
382 | lock = &newblock->b_call->a_args.lock; | ||
383 | } else | ||
384 | lock = &block->b_call->a_args.lock; | ||
321 | 385 | ||
322 | if (!(conflock = posix_test_lock(file->f_file, &lock->fl))) { | 386 | error = posix_lock_file(file->f_file, &lock->fl); |
323 | error = posix_lock_file(file->f_file, &lock->fl); | 387 | lock->fl.fl_flags &= ~FL_SLEEP; |
324 | 388 | ||
325 | if (block) | 389 | dprintk("lockd: posix_lock_file returned %d\n", error); |
326 | nlmsvc_delete_block(block, 0); | ||
327 | up(&file->f_sema); | ||
328 | 390 | ||
329 | dprintk("lockd: posix_lock_file returned %d\n", -error); | 391 | switch(error) { |
330 | switch(-error) { | ||
331 | case 0: | 392 | case 0: |
332 | return nlm_granted; | 393 | ret = nlm_granted; |
333 | case EDEADLK: | 394 | goto out; |
334 | return nlm_deadlock; | 395 | case -EAGAIN: |
335 | case EAGAIN: | 396 | break; |
336 | return nlm_lck_denied; | 397 | case -EDEADLK: |
398 | ret = nlm_deadlock; | ||
399 | goto out; | ||
337 | default: /* includes ENOLCK */ | 400 | default: /* includes ENOLCK */ |
338 | return nlm_lck_denied_nolocks; | 401 | ret = nlm_lck_denied_nolocks; |
339 | } | 402 | goto out; |
340 | } | 403 | } |
341 | 404 | ||
342 | if (!wait) { | 405 | ret = nlm_lck_denied; |
343 | up(&file->f_sema); | 406 | if (!wait) |
344 | return nlm_lck_denied; | 407 | goto out; |
345 | } | ||
346 | 408 | ||
347 | if (posix_locks_deadlock(&lock->fl, conflock)) { | 409 | ret = nlm_lck_blocked; |
348 | up(&file->f_sema); | 410 | if (block != NULL) |
349 | return nlm_deadlock; | 411 | goto out; |
350 | } | ||
351 | 412 | ||
352 | /* If we don't have a block, create and initialize it. Then | 413 | /* If we don't have a block, create and initialize it. Then |
353 | * retry because we may have slept in kmalloc. */ | 414 | * retry because we may have slept in kmalloc. */ |
354 | /* We have to release f_sema as nlmsvc_create_block may try to | 415 | /* We have to release f_sema as nlmsvc_create_block may try to |
355 | * to claim it while doing host garbage collection */ | 416 | * to claim it while doing host garbage collection */ |
356 | if (block == NULL) { | 417 | if (newblock == NULL) { |
357 | up(&file->f_sema); | 418 | up(&file->f_sema); |
358 | dprintk("lockd: blocking on this lock (allocating).\n"); | 419 | dprintk("lockd: blocking on this lock (allocating).\n"); |
359 | if (!(block = nlmsvc_create_block(rqstp, file, lock, cookie))) | 420 | if (!(newblock = nlmsvc_create_block(rqstp, file, lock, cookie))) |
360 | return nlm_lck_denied_nolocks; | 421 | return nlm_lck_denied_nolocks; |
361 | goto again; | 422 | goto again; |
362 | } | 423 | } |
363 | 424 | ||
364 | /* Append to list of blocked */ | 425 | /* Append to list of blocked */ |
365 | nlmsvc_insert_block(block, NLM_NEVER); | 426 | nlmsvc_insert_block(newblock, NLM_NEVER); |
366 | 427 | out: | |
367 | if (list_empty(&block->b_call.a_args.lock.fl.fl_block)) { | ||
368 | /* Now add block to block list of the conflicting lock | ||
369 | if we haven't done so. */ | ||
370 | dprintk("lockd: blocking on this lock.\n"); | ||
371 | posix_block_lock(conflock, &block->b_call.a_args.lock.fl); | ||
372 | } | ||
373 | |||
374 | up(&file->f_sema); | 428 | up(&file->f_sema); |
375 | return nlm_lck_blocked; | 429 | nlmsvc_release_block(newblock); |
430 | nlmsvc_release_block(block); | ||
431 | dprintk("lockd: nlmsvc_lock returned %u\n", ret); | ||
432 | return ret; | ||
376 | } | 433 | } |
377 | 434 | ||
378 | /* | 435 | /* |
@@ -382,8 +439,6 @@ u32 | |||
382 | nlmsvc_testlock(struct nlm_file *file, struct nlm_lock *lock, | 439 | nlmsvc_testlock(struct nlm_file *file, struct nlm_lock *lock, |
383 | struct nlm_lock *conflock) | 440 | struct nlm_lock *conflock) |
384 | { | 441 | { |
385 | struct file_lock *fl; | ||
386 | |||
387 | dprintk("lockd: nlmsvc_testlock(%s/%ld, ty=%d, %Ld-%Ld)\n", | 442 | dprintk("lockd: nlmsvc_testlock(%s/%ld, ty=%d, %Ld-%Ld)\n", |
388 | file->f_file->f_dentry->d_inode->i_sb->s_id, | 443 | file->f_file->f_dentry->d_inode->i_sb->s_id, |
389 | file->f_file->f_dentry->d_inode->i_ino, | 444 | file->f_file->f_dentry->d_inode->i_ino, |
@@ -391,13 +446,14 @@ nlmsvc_testlock(struct nlm_file *file, struct nlm_lock *lock, | |||
391 | (long long)lock->fl.fl_start, | 446 | (long long)lock->fl.fl_start, |
392 | (long long)lock->fl.fl_end); | 447 | (long long)lock->fl.fl_end); |
393 | 448 | ||
394 | if ((fl = posix_test_lock(file->f_file, &lock->fl)) != NULL) { | 449 | if (posix_test_lock(file->f_file, &lock->fl, &conflock->fl)) { |
395 | dprintk("lockd: conflicting lock(ty=%d, %Ld-%Ld)\n", | 450 | dprintk("lockd: conflicting lock(ty=%d, %Ld-%Ld)\n", |
396 | fl->fl_type, (long long)fl->fl_start, | 451 | conflock->fl.fl_type, |
397 | (long long)fl->fl_end); | 452 | (long long)conflock->fl.fl_start, |
453 | (long long)conflock->fl.fl_end); | ||
398 | conflock->caller = "somehost"; /* FIXME */ | 454 | conflock->caller = "somehost"; /* FIXME */ |
399 | conflock->oh.len = 0; /* don't return OH info */ | 455 | conflock->oh.len = 0; /* don't return OH info */ |
400 | conflock->fl = *fl; | 456 | conflock->svid = conflock->fl.fl_pid; |
401 | return nlm_lck_denied; | 457 | return nlm_lck_denied; |
402 | } | 458 | } |
403 | 459 | ||
@@ -453,9 +509,12 @@ nlmsvc_cancel_blocked(struct nlm_file *file, struct nlm_lock *lock) | |||
453 | (long long)lock->fl.fl_end); | 509 | (long long)lock->fl.fl_end); |
454 | 510 | ||
455 | down(&file->f_sema); | 511 | down(&file->f_sema); |
456 | if ((block = nlmsvc_lookup_block(file, lock, 1)) != NULL) | 512 | block = nlmsvc_lookup_block(file, lock); |
457 | status = nlmsvc_delete_block(block, 1); | ||
458 | up(&file->f_sema); | 513 | up(&file->f_sema); |
514 | if (block != NULL) { | ||
515 | status = nlmsvc_unlink_block(block); | ||
516 | nlmsvc_release_block(block); | ||
517 | } | ||
459 | return status ? nlm_lck_denied : nlm_granted; | 518 | return status ? nlm_lck_denied : nlm_granted; |
460 | } | 519 | } |
461 | 520 | ||
@@ -473,7 +532,7 @@ nlmsvc_notify_blocked(struct file_lock *fl) | |||
473 | 532 | ||
474 | dprintk("lockd: VFS unblock notification for block %p\n", fl); | 533 | dprintk("lockd: VFS unblock notification for block %p\n", fl); |
475 | for (bp = &nlm_blocked; (block = *bp) != 0; bp = &block->b_next) { | 534 | for (bp = &nlm_blocked; (block = *bp) != 0; bp = &block->b_next) { |
476 | if (nlm_compare_locks(&block->b_call.a_args.lock.fl, fl)) { | 535 | if (nlm_compare_locks(&block->b_call->a_args.lock.fl, fl)) { |
477 | nlmsvc_insert_block(block, 0); | 536 | nlmsvc_insert_block(block, 0); |
478 | svc_wake_up(block->b_daemon); | 537 | svc_wake_up(block->b_daemon); |
479 | return; | 538 | return; |
@@ -508,17 +567,13 @@ static void | |||
508 | nlmsvc_grant_blocked(struct nlm_block *block) | 567 | nlmsvc_grant_blocked(struct nlm_block *block) |
509 | { | 568 | { |
510 | struct nlm_file *file = block->b_file; | 569 | struct nlm_file *file = block->b_file; |
511 | struct nlm_lock *lock = &block->b_call.a_args.lock; | 570 | struct nlm_lock *lock = &block->b_call->a_args.lock; |
512 | struct file_lock *conflock; | ||
513 | int error; | 571 | int error; |
514 | 572 | ||
515 | dprintk("lockd: grant blocked lock %p\n", block); | 573 | dprintk("lockd: grant blocked lock %p\n", block); |
516 | 574 | ||
517 | /* First thing is lock the file */ | ||
518 | down(&file->f_sema); | ||
519 | |||
520 | /* Unlink block request from list */ | 575 | /* Unlink block request from list */ |
521 | nlmsvc_remove_block(block); | 576 | nlmsvc_unlink_block(block); |
522 | 577 | ||
523 | /* If b_granted is true this means we've been here before. | 578 | /* If b_granted is true this means we've been here before. |
524 | * Just retry the grant callback, possibly refreshing the RPC | 579 | * Just retry the grant callback, possibly refreshing the RPC |
@@ -529,24 +584,21 @@ nlmsvc_grant_blocked(struct nlm_block *block) | |||
529 | } | 584 | } |
530 | 585 | ||
531 | /* Try the lock operation again */ | 586 | /* Try the lock operation again */ |
532 | if ((conflock = posix_test_lock(file->f_file, &lock->fl)) != NULL) { | 587 | lock->fl.fl_flags |= FL_SLEEP; |
533 | /* Bummer, we blocked again */ | 588 | error = posix_lock_file(file->f_file, &lock->fl); |
589 | lock->fl.fl_flags &= ~FL_SLEEP; | ||
590 | |||
591 | switch (error) { | ||
592 | case 0: | ||
593 | break; | ||
594 | case -EAGAIN: | ||
534 | dprintk("lockd: lock still blocked\n"); | 595 | dprintk("lockd: lock still blocked\n"); |
535 | nlmsvc_insert_block(block, NLM_NEVER); | 596 | nlmsvc_insert_block(block, NLM_NEVER); |
536 | posix_block_lock(conflock, &lock->fl); | ||
537 | up(&file->f_sema); | ||
538 | return; | 597 | return; |
539 | } | 598 | default: |
540 | |||
541 | /* Alright, no conflicting lock. Now lock it for real. If the | ||
542 | * following yields an error, this is most probably due to low | ||
543 | * memory. Retry the lock in a few seconds. | ||
544 | */ | ||
545 | if ((error = posix_lock_file(file->f_file, &lock->fl)) < 0) { | ||
546 | printk(KERN_WARNING "lockd: unexpected error %d in %s!\n", | 599 | printk(KERN_WARNING "lockd: unexpected error %d in %s!\n", |
547 | -error, __FUNCTION__); | 600 | -error, __FUNCTION__); |
548 | nlmsvc_insert_block(block, 10 * HZ); | 601 | nlmsvc_insert_block(block, 10 * HZ); |
549 | up(&file->f_sema); | ||
550 | return; | 602 | return; |
551 | } | 603 | } |
552 | 604 | ||
@@ -554,17 +606,15 @@ callback: | |||
554 | /* Lock was granted by VFS. */ | 606 | /* Lock was granted by VFS. */ |
555 | dprintk("lockd: GRANTing blocked lock.\n"); | 607 | dprintk("lockd: GRANTing blocked lock.\n"); |
556 | block->b_granted = 1; | 608 | block->b_granted = 1; |
557 | block->b_incall = 1; | ||
558 | 609 | ||
559 | /* Schedule next grant callback in 30 seconds */ | 610 | /* Schedule next grant callback in 30 seconds */ |
560 | nlmsvc_insert_block(block, 30 * HZ); | 611 | nlmsvc_insert_block(block, 30 * HZ); |
561 | 612 | ||
562 | /* Call the client */ | 613 | /* Call the client */ |
563 | nlm_get_host(block->b_call.a_host); | 614 | kref_get(&block->b_count); |
564 | if (nlmsvc_async_call(&block->b_call, NLMPROC_GRANTED_MSG, | 615 | if (nlm_async_call(block->b_call, NLMPROC_GRANTED_MSG, |
565 | &nlmsvc_grant_ops) < 0) | 616 | &nlmsvc_grant_ops) < 0) |
566 | nlm_release_host(block->b_call.a_host); | 617 | nlmsvc_release_block(block); |
567 | up(&file->f_sema); | ||
568 | } | 618 | } |
569 | 619 | ||
570 | /* | 620 | /* |
@@ -578,20 +628,10 @@ callback: | |||
578 | static void nlmsvc_grant_callback(struct rpc_task *task, void *data) | 628 | static void nlmsvc_grant_callback(struct rpc_task *task, void *data) |
579 | { | 629 | { |
580 | struct nlm_rqst *call = data; | 630 | struct nlm_rqst *call = data; |
581 | struct nlm_block *block; | 631 | struct nlm_block *block = call->a_block; |
582 | unsigned long timeout; | 632 | unsigned long timeout; |
583 | struct sockaddr_in *peer_addr = RPC_PEERADDR(task->tk_client); | ||
584 | 633 | ||
585 | dprintk("lockd: GRANT_MSG RPC callback\n"); | 634 | dprintk("lockd: GRANT_MSG RPC callback\n"); |
586 | dprintk("callback: looking for cookie %s, host (%u.%u.%u.%u)\n", | ||
587 | nlmdbg_cookie2a(&call->a_args.cookie), | ||
588 | NIPQUAD(peer_addr->sin_addr.s_addr)); | ||
589 | if (!(block = nlmsvc_find_block(&call->a_args.cookie, peer_addr))) { | ||
590 | dprintk("lockd: no block for cookie %s, host (%u.%u.%u.%u)\n", | ||
591 | nlmdbg_cookie2a(&call->a_args.cookie), | ||
592 | NIPQUAD(peer_addr->sin_addr.s_addr)); | ||
593 | return; | ||
594 | } | ||
595 | 635 | ||
596 | /* Technically, we should down the file semaphore here. Since we | 636 | /* Technically, we should down the file semaphore here. Since we |
597 | * move the block towards the head of the queue only, no harm | 637 | * move the block towards the head of the queue only, no harm |
@@ -608,13 +648,18 @@ static void nlmsvc_grant_callback(struct rpc_task *task, void *data) | |||
608 | } | 648 | } |
609 | nlmsvc_insert_block(block, timeout); | 649 | nlmsvc_insert_block(block, timeout); |
610 | svc_wake_up(block->b_daemon); | 650 | svc_wake_up(block->b_daemon); |
611 | block->b_incall = 0; | 651 | } |
612 | 652 | ||
613 | nlm_release_host(call->a_host); | 653 | void nlmsvc_grant_release(void *data) |
654 | { | ||
655 | struct nlm_rqst *call = data; | ||
656 | |||
657 | nlmsvc_release_block(call->a_block); | ||
614 | } | 658 | } |
615 | 659 | ||
616 | static const struct rpc_call_ops nlmsvc_grant_ops = { | 660 | static const struct rpc_call_ops nlmsvc_grant_ops = { |
617 | .rpc_call_done = nlmsvc_grant_callback, | 661 | .rpc_call_done = nlmsvc_grant_callback, |
662 | .rpc_release = nlmsvc_grant_release, | ||
618 | }; | 663 | }; |
619 | 664 | ||
620 | /* | 665 | /* |
@@ -634,25 +679,17 @@ nlmsvc_grant_reply(struct svc_rqst *rqstp, struct nlm_cookie *cookie, u32 status | |||
634 | return; | 679 | return; |
635 | file = block->b_file; | 680 | file = block->b_file; |
636 | 681 | ||
637 | file->f_count++; | ||
638 | down(&file->f_sema); | ||
639 | block = nlmsvc_find_block(cookie, &rqstp->rq_addr); | ||
640 | if (block) { | 682 | if (block) { |
641 | if (status == NLM_LCK_DENIED_GRACE_PERIOD) { | 683 | if (status == NLM_LCK_DENIED_GRACE_PERIOD) { |
642 | /* Try again in a couple of seconds */ | 684 | /* Try again in a couple of seconds */ |
643 | nlmsvc_insert_block(block, 10 * HZ); | 685 | nlmsvc_insert_block(block, 10 * HZ); |
644 | up(&file->f_sema); | ||
645 | } else { | 686 | } else { |
646 | /* Lock is now held by client, or has been rejected. | 687 | /* Lock is now held by client, or has been rejected. |
647 | * In both cases, the block should be removed. */ | 688 | * In both cases, the block should be removed. */ |
648 | up(&file->f_sema); | 689 | nlmsvc_unlink_block(block); |
649 | if (status == NLM_LCK_GRANTED) | ||
650 | nlmsvc_delete_block(block, 0); | ||
651 | else | ||
652 | nlmsvc_delete_block(block, 1); | ||
653 | } | 690 | } |
654 | } | 691 | } |
655 | nlm_release_file(file); | 692 | nlmsvc_release_block(block); |
656 | } | 693 | } |
657 | 694 | ||
658 | /* | 695 | /* |
@@ -675,10 +712,12 @@ nlmsvc_retry_blocked(void) | |||
675 | break; | 712 | break; |
676 | dprintk("nlmsvc_retry_blocked(%p, when=%ld, done=%d)\n", | 713 | dprintk("nlmsvc_retry_blocked(%p, when=%ld, done=%d)\n", |
677 | block, block->b_when, block->b_done); | 714 | block, block->b_when, block->b_done); |
715 | kref_get(&block->b_count); | ||
678 | if (block->b_done) | 716 | if (block->b_done) |
679 | nlmsvc_delete_block(block, 0); | 717 | nlmsvc_unlink_block(block); |
680 | else | 718 | else |
681 | nlmsvc_grant_blocked(block); | 719 | nlmsvc_grant_blocked(block); |
720 | nlmsvc_release_block(block); | ||
682 | } | 721 | } |
683 | 722 | ||
684 | if ((block = nlm_blocked) && block->b_when != NLM_NEVER) | 723 | if ((block = nlm_blocked) && block->b_when != NLM_NEVER) |
diff --git a/fs/lockd/svcproc.c b/fs/lockd/svcproc.c index 35681d9cf1fc..d210cf304e92 100644 --- a/fs/lockd/svcproc.c +++ b/fs/lockd/svcproc.c | |||
@@ -22,10 +22,6 @@ | |||
22 | 22 | ||
23 | #define NLMDBG_FACILITY NLMDBG_CLIENT | 23 | #define NLMDBG_FACILITY NLMDBG_CLIENT |
24 | 24 | ||
25 | static u32 nlmsvc_callback(struct svc_rqst *, u32, struct nlm_res *); | ||
26 | |||
27 | static const struct rpc_call_ops nlmsvc_callback_ops; | ||
28 | |||
29 | #ifdef CONFIG_LOCKD_V4 | 25 | #ifdef CONFIG_LOCKD_V4 |
30 | static u32 | 26 | static u32 |
31 | cast_to_nlm(u32 status, u32 vers) | 27 | cast_to_nlm(u32 status, u32 vers) |
@@ -262,83 +258,91 @@ nlmsvc_proc_granted(struct svc_rqst *rqstp, struct nlm_args *argp, | |||
262 | } | 258 | } |
263 | 259 | ||
264 | /* | 260 | /* |
261 | * This is the generic lockd callback for async RPC calls | ||
262 | */ | ||
263 | static void nlmsvc_callback_exit(struct rpc_task *task, void *data) | ||
264 | { | ||
265 | dprintk("lockd: %4d callback returned %d\n", task->tk_pid, | ||
266 | -task->tk_status); | ||
267 | } | ||
268 | |||
269 | static void nlmsvc_callback_release(void *data) | ||
270 | { | ||
271 | nlm_release_call(data); | ||
272 | } | ||
273 | |||
274 | static const struct rpc_call_ops nlmsvc_callback_ops = { | ||
275 | .rpc_call_done = nlmsvc_callback_exit, | ||
276 | .rpc_release = nlmsvc_callback_release, | ||
277 | }; | ||
278 | |||
279 | /* | ||
265 | * `Async' versions of the above service routines. They aren't really, | 280 | * `Async' versions of the above service routines. They aren't really, |
266 | * because we send the callback before the reply proper. I hope this | 281 | * because we send the callback before the reply proper. I hope this |
267 | * doesn't break any clients. | 282 | * doesn't break any clients. |
268 | */ | 283 | */ |
269 | static int | 284 | static int nlmsvc_callback(struct svc_rqst *rqstp, u32 proc, struct nlm_args *argp, |
270 | nlmsvc_proc_test_msg(struct svc_rqst *rqstp, struct nlm_args *argp, | 285 | int (*func)(struct svc_rqst *, struct nlm_args *, struct nlm_res *)) |
271 | void *resp) | ||
272 | { | 286 | { |
273 | struct nlm_res res; | 287 | struct nlm_host *host; |
274 | u32 stat; | 288 | struct nlm_rqst *call; |
289 | int stat; | ||
275 | 290 | ||
276 | dprintk("lockd: TEST_MSG called\n"); | 291 | host = nlmsvc_lookup_host(rqstp); |
277 | memset(&res, 0, sizeof(res)); | 292 | if (host == NULL) |
293 | return rpc_system_err; | ||
278 | 294 | ||
279 | if ((stat = nlmsvc_proc_test(rqstp, argp, &res)) == 0) | 295 | call = nlm_alloc_call(host); |
280 | stat = nlmsvc_callback(rqstp, NLMPROC_TEST_RES, &res); | 296 | if (call == NULL) |
281 | return stat; | 297 | return rpc_system_err; |
298 | |||
299 | stat = func(rqstp, argp, &call->a_res); | ||
300 | if (stat != 0) { | ||
301 | nlm_release_call(call); | ||
302 | return stat; | ||
303 | } | ||
304 | |||
305 | call->a_flags = RPC_TASK_ASYNC; | ||
306 | if (nlm_async_reply(call, proc, &nlmsvc_callback_ops) < 0) | ||
307 | return rpc_system_err; | ||
308 | return rpc_success; | ||
282 | } | 309 | } |
283 | 310 | ||
284 | static int | 311 | static int nlmsvc_proc_test_msg(struct svc_rqst *rqstp, struct nlm_args *argp, |
285 | nlmsvc_proc_lock_msg(struct svc_rqst *rqstp, struct nlm_args *argp, | ||
286 | void *resp) | 312 | void *resp) |
287 | { | 313 | { |
288 | struct nlm_res res; | 314 | dprintk("lockd: TEST_MSG called\n"); |
289 | u32 stat; | 315 | return nlmsvc_callback(rqstp, NLMPROC_TEST_RES, argp, nlmsvc_proc_test); |
316 | } | ||
290 | 317 | ||
318 | static int nlmsvc_proc_lock_msg(struct svc_rqst *rqstp, struct nlm_args *argp, | ||
319 | void *resp) | ||
320 | { | ||
291 | dprintk("lockd: LOCK_MSG called\n"); | 321 | dprintk("lockd: LOCK_MSG called\n"); |
292 | memset(&res, 0, sizeof(res)); | 322 | return nlmsvc_callback(rqstp, NLMPROC_LOCK_RES, argp, nlmsvc_proc_lock); |
293 | |||
294 | if ((stat = nlmsvc_proc_lock(rqstp, argp, &res)) == 0) | ||
295 | stat = nlmsvc_callback(rqstp, NLMPROC_LOCK_RES, &res); | ||
296 | return stat; | ||
297 | } | 323 | } |
298 | 324 | ||
299 | static int | 325 | static int nlmsvc_proc_cancel_msg(struct svc_rqst *rqstp, struct nlm_args *argp, |
300 | nlmsvc_proc_cancel_msg(struct svc_rqst *rqstp, struct nlm_args *argp, | ||
301 | void *resp) | 326 | void *resp) |
302 | { | 327 | { |
303 | struct nlm_res res; | ||
304 | u32 stat; | ||
305 | |||
306 | dprintk("lockd: CANCEL_MSG called\n"); | 328 | dprintk("lockd: CANCEL_MSG called\n"); |
307 | memset(&res, 0, sizeof(res)); | 329 | return nlmsvc_callback(rqstp, NLMPROC_CANCEL_RES, argp, nlmsvc_proc_cancel); |
308 | |||
309 | if ((stat = nlmsvc_proc_cancel(rqstp, argp, &res)) == 0) | ||
310 | stat = nlmsvc_callback(rqstp, NLMPROC_CANCEL_RES, &res); | ||
311 | return stat; | ||
312 | } | 330 | } |
313 | 331 | ||
314 | static int | 332 | static int |
315 | nlmsvc_proc_unlock_msg(struct svc_rqst *rqstp, struct nlm_args *argp, | 333 | nlmsvc_proc_unlock_msg(struct svc_rqst *rqstp, struct nlm_args *argp, |
316 | void *resp) | 334 | void *resp) |
317 | { | 335 | { |
318 | struct nlm_res res; | ||
319 | u32 stat; | ||
320 | |||
321 | dprintk("lockd: UNLOCK_MSG called\n"); | 336 | dprintk("lockd: UNLOCK_MSG called\n"); |
322 | memset(&res, 0, sizeof(res)); | 337 | return nlmsvc_callback(rqstp, NLMPROC_UNLOCK_RES, argp, nlmsvc_proc_unlock); |
323 | |||
324 | if ((stat = nlmsvc_proc_unlock(rqstp, argp, &res)) == 0) | ||
325 | stat = nlmsvc_callback(rqstp, NLMPROC_UNLOCK_RES, &res); | ||
326 | return stat; | ||
327 | } | 338 | } |
328 | 339 | ||
329 | static int | 340 | static int |
330 | nlmsvc_proc_granted_msg(struct svc_rqst *rqstp, struct nlm_args *argp, | 341 | nlmsvc_proc_granted_msg(struct svc_rqst *rqstp, struct nlm_args *argp, |
331 | void *resp) | 342 | void *resp) |
332 | { | 343 | { |
333 | struct nlm_res res; | ||
334 | u32 stat; | ||
335 | |||
336 | dprintk("lockd: GRANTED_MSG called\n"); | 344 | dprintk("lockd: GRANTED_MSG called\n"); |
337 | memset(&res, 0, sizeof(res)); | 345 | return nlmsvc_callback(rqstp, NLMPROC_GRANTED_RES, argp, nlmsvc_proc_granted); |
338 | |||
339 | if ((stat = nlmsvc_proc_granted(rqstp, argp, &res)) == 0) | ||
340 | stat = nlmsvc_callback(rqstp, NLMPROC_GRANTED_RES, &res); | ||
341 | return stat; | ||
342 | } | 346 | } |
343 | 347 | ||
344 | /* | 348 | /* |
@@ -497,55 +501,6 @@ nlmsvc_proc_granted_res(struct svc_rqst *rqstp, struct nlm_res *argp, | |||
497 | } | 501 | } |
498 | 502 | ||
499 | /* | 503 | /* |
500 | * This is the generic lockd callback for async RPC calls | ||
501 | */ | ||
502 | static u32 | ||
503 | nlmsvc_callback(struct svc_rqst *rqstp, u32 proc, struct nlm_res *resp) | ||
504 | { | ||
505 | struct nlm_host *host; | ||
506 | struct nlm_rqst *call; | ||
507 | |||
508 | if (!(call = nlmclnt_alloc_call())) | ||
509 | return rpc_system_err; | ||
510 | |||
511 | host = nlmclnt_lookup_host(&rqstp->rq_addr, | ||
512 | rqstp->rq_prot, rqstp->rq_vers); | ||
513 | if (!host) { | ||
514 | kfree(call); | ||
515 | return rpc_system_err; | ||
516 | } | ||
517 | |||
518 | call->a_flags = RPC_TASK_ASYNC; | ||
519 | call->a_host = host; | ||
520 | memcpy(&call->a_args, resp, sizeof(*resp)); | ||
521 | |||
522 | if (nlmsvc_async_call(call, proc, &nlmsvc_callback_ops) < 0) | ||
523 | goto error; | ||
524 | |||
525 | return rpc_success; | ||
526 | error: | ||
527 | nlm_release_host(host); | ||
528 | kfree(call); | ||
529 | return rpc_system_err; | ||
530 | } | ||
531 | |||
532 | static void nlmsvc_callback_exit(struct rpc_task *task, void *data) | ||
533 | { | ||
534 | struct nlm_rqst *call = data; | ||
535 | |||
536 | if (task->tk_status < 0) { | ||
537 | dprintk("lockd: %4d callback failed (errno = %d)\n", | ||
538 | task->tk_pid, -task->tk_status); | ||
539 | } | ||
540 | nlm_release_host(call->a_host); | ||
541 | kfree(call); | ||
542 | } | ||
543 | |||
544 | static const struct rpc_call_ops nlmsvc_callback_ops = { | ||
545 | .rpc_call_done = nlmsvc_callback_exit, | ||
546 | }; | ||
547 | |||
548 | /* | ||
549 | * NLM Server procedures. | 504 | * NLM Server procedures. |
550 | */ | 505 | */ |
551 | 506 | ||
diff --git a/fs/lockd/svcshare.c b/fs/lockd/svcshare.c index 4943fb7836ce..27288c83da96 100644 --- a/fs/lockd/svcshare.c +++ b/fs/lockd/svcshare.c | |||
@@ -88,7 +88,7 @@ nlmsvc_unshare_file(struct nlm_host *host, struct nlm_file *file, | |||
88 | * Traverse all shares for a given file (and host). | 88 | * Traverse all shares for a given file (and host). |
89 | * NLM_ACT_CHECK is handled by nlmsvc_inspect_file. | 89 | * NLM_ACT_CHECK is handled by nlmsvc_inspect_file. |
90 | */ | 90 | */ |
91 | int | 91 | void |
92 | nlmsvc_traverse_shares(struct nlm_host *host, struct nlm_file *file, int action) | 92 | nlmsvc_traverse_shares(struct nlm_host *host, struct nlm_file *file, int action) |
93 | { | 93 | { |
94 | struct nlm_share *share, **shpp; | 94 | struct nlm_share *share, **shpp; |
@@ -106,6 +106,4 @@ nlmsvc_traverse_shares(struct nlm_host *host, struct nlm_file *file, int action) | |||
106 | } | 106 | } |
107 | shpp = &share->s_next; | 107 | shpp = &share->s_next; |
108 | } | 108 | } |
109 | |||
110 | return 0; | ||
111 | } | 109 | } |
diff --git a/fs/lockd/svcsubs.c b/fs/lockd/svcsubs.c index 62f4a385177f..c7a6e3ae44d6 100644 --- a/fs/lockd/svcsubs.c +++ b/fs/lockd/svcsubs.c | |||
@@ -182,7 +182,7 @@ nlm_traverse_locks(struct nlm_host *host, struct nlm_file *file, int action) | |||
182 | again: | 182 | again: |
183 | file->f_locks = 0; | 183 | file->f_locks = 0; |
184 | for (fl = inode->i_flock; fl; fl = fl->fl_next) { | 184 | for (fl = inode->i_flock; fl; fl = fl->fl_next) { |
185 | if (!(fl->fl_flags & FL_LOCKD)) | 185 | if (fl->fl_lmops != &nlmsvc_lock_operations) |
186 | continue; | 186 | continue; |
187 | 187 | ||
188 | /* update current lock count */ | 188 | /* update current lock count */ |
@@ -224,9 +224,8 @@ nlm_inspect_file(struct nlm_host *host, struct nlm_file *file, int action) | |||
224 | if (file->f_count || file->f_blocks || file->f_shares) | 224 | if (file->f_count || file->f_blocks || file->f_shares) |
225 | return 1; | 225 | return 1; |
226 | } else { | 226 | } else { |
227 | if (nlmsvc_traverse_blocks(host, file, action) | 227 | nlmsvc_traverse_blocks(host, file, action); |
228 | || nlmsvc_traverse_shares(host, file, action)) | 228 | nlmsvc_traverse_shares(host, file, action); |
229 | return 1; | ||
230 | } | 229 | } |
231 | return nlm_traverse_locks(host, file, action); | 230 | return nlm_traverse_locks(host, file, action); |
232 | } | 231 | } |
diff --git a/fs/lockd/xdr.c b/fs/lockd/xdr.c index 1d700a4dd0b5..f22a3764461a 100644 --- a/fs/lockd/xdr.c +++ b/fs/lockd/xdr.c | |||
@@ -131,10 +131,11 @@ nlm_decode_lock(u32 *p, struct nlm_lock *lock) | |||
131 | || !(p = nlm_decode_fh(p, &lock->fh)) | 131 | || !(p = nlm_decode_fh(p, &lock->fh)) |
132 | || !(p = nlm_decode_oh(p, &lock->oh))) | 132 | || !(p = nlm_decode_oh(p, &lock->oh))) |
133 | return NULL; | 133 | return NULL; |
134 | lock->svid = ntohl(*p++); | ||
134 | 135 | ||
135 | locks_init_lock(fl); | 136 | locks_init_lock(fl); |
136 | fl->fl_owner = current->files; | 137 | fl->fl_owner = current->files; |
137 | fl->fl_pid = ntohl(*p++); | 138 | fl->fl_pid = (pid_t)lock->svid; |
138 | fl->fl_flags = FL_POSIX; | 139 | fl->fl_flags = FL_POSIX; |
139 | fl->fl_type = F_RDLCK; /* as good as anything else */ | 140 | fl->fl_type = F_RDLCK; /* as good as anything else */ |
140 | start = ntohl(*p++); | 141 | start = ntohl(*p++); |
@@ -174,7 +175,7 @@ nlm_encode_lock(u32 *p, struct nlm_lock *lock) | |||
174 | else | 175 | else |
175 | len = loff_t_to_s32(fl->fl_end - fl->fl_start + 1); | 176 | len = loff_t_to_s32(fl->fl_end - fl->fl_start + 1); |
176 | 177 | ||
177 | *p++ = htonl(fl->fl_pid); | 178 | *p++ = htonl(lock->svid); |
178 | *p++ = htonl(start); | 179 | *p++ = htonl(start); |
179 | *p++ = htonl(len); | 180 | *p++ = htonl(len); |
180 | 181 | ||
@@ -197,7 +198,7 @@ nlm_encode_testres(u32 *p, struct nlm_res *resp) | |||
197 | struct file_lock *fl = &resp->lock.fl; | 198 | struct file_lock *fl = &resp->lock.fl; |
198 | 199 | ||
199 | *p++ = (fl->fl_type == F_RDLCK)? xdr_zero : xdr_one; | 200 | *p++ = (fl->fl_type == F_RDLCK)? xdr_zero : xdr_one; |
200 | *p++ = htonl(fl->fl_pid); | 201 | *p++ = htonl(resp->lock.svid); |
201 | 202 | ||
202 | /* Encode owner handle. */ | 203 | /* Encode owner handle. */ |
203 | if (!(p = xdr_encode_netobj(p, &resp->lock.oh))) | 204 | if (!(p = xdr_encode_netobj(p, &resp->lock.oh))) |
@@ -298,7 +299,8 @@ nlmsvc_decode_shareargs(struct svc_rqst *rqstp, u32 *p, nlm_args *argp) | |||
298 | 299 | ||
299 | memset(lock, 0, sizeof(*lock)); | 300 | memset(lock, 0, sizeof(*lock)); |
300 | locks_init_lock(&lock->fl); | 301 | locks_init_lock(&lock->fl); |
301 | lock->fl.fl_pid = ~(u32) 0; | 302 | lock->svid = ~(u32) 0; |
303 | lock->fl.fl_pid = (pid_t)lock->svid; | ||
302 | 304 | ||
303 | if (!(p = nlm_decode_cookie(p, &argp->cookie)) | 305 | if (!(p = nlm_decode_cookie(p, &argp->cookie)) |
304 | || !(p = xdr_decode_string_inplace(p, &lock->caller, | 306 | || !(p = xdr_decode_string_inplace(p, &lock->caller, |
@@ -415,7 +417,8 @@ nlmclt_decode_testres(struct rpc_rqst *req, u32 *p, struct nlm_res *resp) | |||
415 | memset(&resp->lock, 0, sizeof(resp->lock)); | 417 | memset(&resp->lock, 0, sizeof(resp->lock)); |
416 | locks_init_lock(fl); | 418 | locks_init_lock(fl); |
417 | excl = ntohl(*p++); | 419 | excl = ntohl(*p++); |
418 | fl->fl_pid = ntohl(*p++); | 420 | resp->lock.svid = ntohl(*p++); |
421 | fl->fl_pid = (pid_t)resp->lock.svid; | ||
419 | if (!(p = nlm_decode_oh(p, &resp->lock.oh))) | 422 | if (!(p = nlm_decode_oh(p, &resp->lock.oh))) |
420 | return -EIO; | 423 | return -EIO; |
421 | 424 | ||
@@ -543,7 +546,9 @@ nlmclt_decode_res(struct rpc_rqst *req, u32 *p, struct nlm_res *resp) | |||
543 | .p_proc = NLMPROC_##proc, \ | 546 | .p_proc = NLMPROC_##proc, \ |
544 | .p_encode = (kxdrproc_t) nlmclt_encode_##argtype, \ | 547 | .p_encode = (kxdrproc_t) nlmclt_encode_##argtype, \ |
545 | .p_decode = (kxdrproc_t) nlmclt_decode_##restype, \ | 548 | .p_decode = (kxdrproc_t) nlmclt_decode_##restype, \ |
546 | .p_bufsiz = MAX(NLM_##argtype##_sz, NLM_##restype##_sz) << 2 \ | 549 | .p_bufsiz = MAX(NLM_##argtype##_sz, NLM_##restype##_sz) << 2, \ |
550 | .p_statidx = NLMPROC_##proc, \ | ||
551 | .p_name = #proc, \ | ||
547 | } | 552 | } |
548 | 553 | ||
549 | static struct rpc_procinfo nlm_procedures[] = { | 554 | static struct rpc_procinfo nlm_procedures[] = { |
diff --git a/fs/lockd/xdr4.c b/fs/lockd/xdr4.c index fdcf105a5303..36eb175ec335 100644 --- a/fs/lockd/xdr4.c +++ b/fs/lockd/xdr4.c | |||
@@ -130,10 +130,11 @@ nlm4_decode_lock(u32 *p, struct nlm_lock *lock) | |||
130 | || !(p = nlm4_decode_fh(p, &lock->fh)) | 130 | || !(p = nlm4_decode_fh(p, &lock->fh)) |
131 | || !(p = nlm4_decode_oh(p, &lock->oh))) | 131 | || !(p = nlm4_decode_oh(p, &lock->oh))) |
132 | return NULL; | 132 | return NULL; |
133 | lock->svid = ntohl(*p++); | ||
133 | 134 | ||
134 | locks_init_lock(fl); | 135 | locks_init_lock(fl); |
135 | fl->fl_owner = current->files; | 136 | fl->fl_owner = current->files; |
136 | fl->fl_pid = ntohl(*p++); | 137 | fl->fl_pid = (pid_t)lock->svid; |
137 | fl->fl_flags = FL_POSIX; | 138 | fl->fl_flags = FL_POSIX; |
138 | fl->fl_type = F_RDLCK; /* as good as anything else */ | 139 | fl->fl_type = F_RDLCK; /* as good as anything else */ |
139 | p = xdr_decode_hyper(p, &start); | 140 | p = xdr_decode_hyper(p, &start); |
@@ -167,7 +168,7 @@ nlm4_encode_lock(u32 *p, struct nlm_lock *lock) | |||
167 | || (fl->fl_end > NLM4_OFFSET_MAX && fl->fl_end != OFFSET_MAX)) | 168 | || (fl->fl_end > NLM4_OFFSET_MAX && fl->fl_end != OFFSET_MAX)) |
168 | return NULL; | 169 | return NULL; |
169 | 170 | ||
170 | *p++ = htonl(fl->fl_pid); | 171 | *p++ = htonl(lock->svid); |
171 | 172 | ||
172 | start = loff_t_to_s64(fl->fl_start); | 173 | start = loff_t_to_s64(fl->fl_start); |
173 | if (fl->fl_end == OFFSET_MAX) | 174 | if (fl->fl_end == OFFSET_MAX) |
@@ -198,7 +199,7 @@ nlm4_encode_testres(u32 *p, struct nlm_res *resp) | |||
198 | struct file_lock *fl = &resp->lock.fl; | 199 | struct file_lock *fl = &resp->lock.fl; |
199 | 200 | ||
200 | *p++ = (fl->fl_type == F_RDLCK)? xdr_zero : xdr_one; | 201 | *p++ = (fl->fl_type == F_RDLCK)? xdr_zero : xdr_one; |
201 | *p++ = htonl(fl->fl_pid); | 202 | *p++ = htonl(resp->lock.svid); |
202 | 203 | ||
203 | /* Encode owner handle. */ | 204 | /* Encode owner handle. */ |
204 | if (!(p = xdr_encode_netobj(p, &resp->lock.oh))) | 205 | if (!(p = xdr_encode_netobj(p, &resp->lock.oh))) |
@@ -212,8 +213,8 @@ nlm4_encode_testres(u32 *p, struct nlm_res *resp) | |||
212 | 213 | ||
213 | p = xdr_encode_hyper(p, start); | 214 | p = xdr_encode_hyper(p, start); |
214 | p = xdr_encode_hyper(p, len); | 215 | p = xdr_encode_hyper(p, len); |
215 | dprintk("xdr: encode_testres (status %d pid %d type %d start %Ld end %Ld)\n", | 216 | dprintk("xdr: encode_testres (status %u pid %d type %d start %Ld end %Ld)\n", |
216 | resp->status, fl->fl_pid, fl->fl_type, | 217 | resp->status, (int)resp->lock.svid, fl->fl_type, |
217 | (long long)fl->fl_start, (long long)fl->fl_end); | 218 | (long long)fl->fl_start, (long long)fl->fl_end); |
218 | } | 219 | } |
219 | 220 | ||
@@ -303,7 +304,8 @@ nlm4svc_decode_shareargs(struct svc_rqst *rqstp, u32 *p, nlm_args *argp) | |||
303 | 304 | ||
304 | memset(lock, 0, sizeof(*lock)); | 305 | memset(lock, 0, sizeof(*lock)); |
305 | locks_init_lock(&lock->fl); | 306 | locks_init_lock(&lock->fl); |
306 | lock->fl.fl_pid = ~(u32) 0; | 307 | lock->svid = ~(u32) 0; |
308 | lock->fl.fl_pid = (pid_t)lock->svid; | ||
307 | 309 | ||
308 | if (!(p = nlm4_decode_cookie(p, &argp->cookie)) | 310 | if (!(p = nlm4_decode_cookie(p, &argp->cookie)) |
309 | || !(p = xdr_decode_string_inplace(p, &lock->caller, | 311 | || !(p = xdr_decode_string_inplace(p, &lock->caller, |
@@ -420,7 +422,8 @@ nlm4clt_decode_testres(struct rpc_rqst *req, u32 *p, struct nlm_res *resp) | |||
420 | memset(&resp->lock, 0, sizeof(resp->lock)); | 422 | memset(&resp->lock, 0, sizeof(resp->lock)); |
421 | locks_init_lock(fl); | 423 | locks_init_lock(fl); |
422 | excl = ntohl(*p++); | 424 | excl = ntohl(*p++); |
423 | fl->fl_pid = ntohl(*p++); | 425 | resp->lock.svid = ntohl(*p++); |
426 | fl->fl_pid = (pid_t)resp->lock.svid; | ||
424 | if (!(p = nlm4_decode_oh(p, &resp->lock.oh))) | 427 | if (!(p = nlm4_decode_oh(p, &resp->lock.oh))) |
425 | return -EIO; | 428 | return -EIO; |
426 | 429 | ||
@@ -548,7 +551,9 @@ nlm4clt_decode_res(struct rpc_rqst *req, u32 *p, struct nlm_res *resp) | |||
548 | .p_proc = NLMPROC_##proc, \ | 551 | .p_proc = NLMPROC_##proc, \ |
549 | .p_encode = (kxdrproc_t) nlm4clt_encode_##argtype, \ | 552 | .p_encode = (kxdrproc_t) nlm4clt_encode_##argtype, \ |
550 | .p_decode = (kxdrproc_t) nlm4clt_decode_##restype, \ | 553 | .p_decode = (kxdrproc_t) nlm4clt_decode_##restype, \ |
551 | .p_bufsiz = MAX(NLM4_##argtype##_sz, NLM4_##restype##_sz) << 2 \ | 554 | .p_bufsiz = MAX(NLM4_##argtype##_sz, NLM4_##restype##_sz) << 2, \ |
555 | .p_statidx = NLMPROC_##proc, \ | ||
556 | .p_name = #proc, \ | ||
552 | } | 557 | } |
553 | 558 | ||
554 | static struct rpc_procinfo nlm4_procedures[] = { | 559 | static struct rpc_procinfo nlm4_procedures[] = { |