diff options
author | Linus Torvalds <torvalds@ppc970.osdl.org> | 2005-04-16 18:20:36 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@ppc970.osdl.org> | 2005-04-16 18:20:36 -0400 |
commit | 1da177e4c3f41524e886b7f1b8a0c1fc7321cac2 (patch) | |
tree | 0bba044c4ce775e45a88a51686b5d9f90697ea9d /net/sunrpc/svcsock.c |
Linux-2.6.12-rc2v2.6.12-rc2
Initial git repository build. I'm not bothering with the full history,
even though we have it. We can create a separate "historical" git
archive of that later if we want to, and in the meantime it's about
3.2GB when imported into git - space that would just make the early
git days unnecessarily complicated, when we don't have a lot of good
infrastructure for it.
Let it rip!
Diffstat (limited to 'net/sunrpc/svcsock.c')
-rw-r--r-- | net/sunrpc/svcsock.c | 1585 |
1 files changed, 1585 insertions, 0 deletions
diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c new file mode 100644 index 000000000000..05907035bc96 --- /dev/null +++ b/net/sunrpc/svcsock.c | |||
@@ -0,0 +1,1585 @@ | |||
1 | /* | ||
2 | * linux/net/sunrpc/svcsock.c | ||
3 | * | ||
4 | * These are the RPC server socket internals. | ||
5 | * | ||
6 | * The server scheduling algorithm does not always distribute the load | ||
7 | * evenly when servicing a single client. May need to modify the | ||
8 | * svc_sock_enqueue procedure... | ||
9 | * | ||
10 | * TCP support is largely untested and may be a little slow. The problem | ||
11 | * is that we currently do two separate recvfrom's, one for the 4-byte | ||
12 | * record length, and the second for the actual record. This could possibly | ||
13 | * be improved by always reading a minimum size of around 100 bytes and | ||
14 | * tucking any superfluous bytes away in a temporary store. Still, that | ||
15 | * leaves write requests out in the rain. An alternative may be to peek at | ||
16 | * the first skb in the queue, and if it matches the next TCP sequence | ||
17 | * number, to extract the record marker. Yuck. | ||
18 | * | ||
19 | * Copyright (C) 1995, 1996 Olaf Kirch <okir@monad.swb.de> | ||
20 | */ | ||
21 | |||
22 | #include <linux/sched.h> | ||
23 | #include <linux/errno.h> | ||
24 | #include <linux/fcntl.h> | ||
25 | #include <linux/net.h> | ||
26 | #include <linux/in.h> | ||
27 | #include <linux/inet.h> | ||
28 | #include <linux/udp.h> | ||
29 | #include <linux/tcp.h> | ||
30 | #include <linux/unistd.h> | ||
31 | #include <linux/slab.h> | ||
32 | #include <linux/netdevice.h> | ||
33 | #include <linux/skbuff.h> | ||
34 | #include <net/sock.h> | ||
35 | #include <net/checksum.h> | ||
36 | #include <net/ip.h> | ||
37 | #include <net/tcp.h> | ||
38 | #include <asm/uaccess.h> | ||
39 | #include <asm/ioctls.h> | ||
40 | |||
41 | #include <linux/sunrpc/types.h> | ||
42 | #include <linux/sunrpc/xdr.h> | ||
43 | #include <linux/sunrpc/svcsock.h> | ||
44 | #include <linux/sunrpc/stats.h> | ||
45 | |||
46 | /* SMP locking strategy: | ||
47 | * | ||
48 | * svc_serv->sv_lock protects most stuff for that service. | ||
49 | * | ||
50 | * Some flags can be set to certain values at any time | ||
51 | * providing that certain rules are followed: | ||
52 | * | ||
53 | * SK_BUSY can be set to 0 at any time. | ||
54 | * svc_sock_enqueue must be called afterwards | ||
55 | * SK_CONN, SK_DATA, can be set or cleared at any time. | ||
56 | * after a set, svc_sock_enqueue must be called. | ||
57 | * after a clear, the socket must be read/accepted | ||
58 | * if this succeeds, it must be set again. | ||
59 | * SK_CLOSE can set at any time. It is never cleared. | ||
60 | * | ||
61 | */ | ||
62 | |||
63 | #define RPCDBG_FACILITY RPCDBG_SVCSOCK | ||
64 | |||
65 | |||
66 | static struct svc_sock *svc_setup_socket(struct svc_serv *, struct socket *, | ||
67 | int *errp, int pmap_reg); | ||
68 | static void svc_udp_data_ready(struct sock *, int); | ||
69 | static int svc_udp_recvfrom(struct svc_rqst *); | ||
70 | static int svc_udp_sendto(struct svc_rqst *); | ||
71 | |||
72 | static struct svc_deferred_req *svc_deferred_dequeue(struct svc_sock *svsk); | ||
73 | static int svc_deferred_recv(struct svc_rqst *rqstp); | ||
74 | static struct cache_deferred_req *svc_defer(struct cache_req *req); | ||
75 | |||
76 | /* | ||
77 | * Queue up an idle server thread. Must have serv->sv_lock held. | ||
78 | * Note: this is really a stack rather than a queue, so that we only | ||
79 | * use as many different threads as we need, and the rest don't polute | ||
80 | * the cache. | ||
81 | */ | ||
82 | static inline void | ||
83 | svc_serv_enqueue(struct svc_serv *serv, struct svc_rqst *rqstp) | ||
84 | { | ||
85 | list_add(&rqstp->rq_list, &serv->sv_threads); | ||
86 | } | ||
87 | |||
88 | /* | ||
89 | * Dequeue an nfsd thread. Must have serv->sv_lock held. | ||
90 | */ | ||
91 | static inline void | ||
92 | svc_serv_dequeue(struct svc_serv *serv, struct svc_rqst *rqstp) | ||
93 | { | ||
94 | list_del(&rqstp->rq_list); | ||
95 | } | ||
96 | |||
97 | /* | ||
98 | * Release an skbuff after use | ||
99 | */ | ||
100 | static inline void | ||
101 | svc_release_skb(struct svc_rqst *rqstp) | ||
102 | { | ||
103 | struct sk_buff *skb = rqstp->rq_skbuff; | ||
104 | struct svc_deferred_req *dr = rqstp->rq_deferred; | ||
105 | |||
106 | if (skb) { | ||
107 | rqstp->rq_skbuff = NULL; | ||
108 | |||
109 | dprintk("svc: service %p, releasing skb %p\n", rqstp, skb); | ||
110 | skb_free_datagram(rqstp->rq_sock->sk_sk, skb); | ||
111 | } | ||
112 | if (dr) { | ||
113 | rqstp->rq_deferred = NULL; | ||
114 | kfree(dr); | ||
115 | } | ||
116 | } | ||
117 | |||
118 | /* | ||
119 | * Any space to write? | ||
120 | */ | ||
121 | static inline unsigned long | ||
122 | svc_sock_wspace(struct svc_sock *svsk) | ||
123 | { | ||
124 | int wspace; | ||
125 | |||
126 | if (svsk->sk_sock->type == SOCK_STREAM) | ||
127 | wspace = sk_stream_wspace(svsk->sk_sk); | ||
128 | else | ||
129 | wspace = sock_wspace(svsk->sk_sk); | ||
130 | |||
131 | return wspace; | ||
132 | } | ||
133 | |||
134 | /* | ||
135 | * Queue up a socket with data pending. If there are idle nfsd | ||
136 | * processes, wake 'em up. | ||
137 | * | ||
138 | */ | ||
139 | static void | ||
140 | svc_sock_enqueue(struct svc_sock *svsk) | ||
141 | { | ||
142 | struct svc_serv *serv = svsk->sk_server; | ||
143 | struct svc_rqst *rqstp; | ||
144 | |||
145 | if (!(svsk->sk_flags & | ||
146 | ( (1<<SK_CONN)|(1<<SK_DATA)|(1<<SK_CLOSE)|(1<<SK_DEFERRED)) )) | ||
147 | return; | ||
148 | if (test_bit(SK_DEAD, &svsk->sk_flags)) | ||
149 | return; | ||
150 | |||
151 | spin_lock_bh(&serv->sv_lock); | ||
152 | |||
153 | if (!list_empty(&serv->sv_threads) && | ||
154 | !list_empty(&serv->sv_sockets)) | ||
155 | printk(KERN_ERR | ||
156 | "svc_sock_enqueue: threads and sockets both waiting??\n"); | ||
157 | |||
158 | if (test_bit(SK_DEAD, &svsk->sk_flags)) { | ||
159 | /* Don't enqueue dead sockets */ | ||
160 | dprintk("svc: socket %p is dead, not enqueued\n", svsk->sk_sk); | ||
161 | goto out_unlock; | ||
162 | } | ||
163 | |||
164 | if (test_bit(SK_BUSY, &svsk->sk_flags)) { | ||
165 | /* Don't enqueue socket while daemon is receiving */ | ||
166 | dprintk("svc: socket %p busy, not enqueued\n", svsk->sk_sk); | ||
167 | goto out_unlock; | ||
168 | } | ||
169 | |||
170 | set_bit(SOCK_NOSPACE, &svsk->sk_sock->flags); | ||
171 | if (((svsk->sk_reserved + serv->sv_bufsz)*2 | ||
172 | > svc_sock_wspace(svsk)) | ||
173 | && !test_bit(SK_CLOSE, &svsk->sk_flags) | ||
174 | && !test_bit(SK_CONN, &svsk->sk_flags)) { | ||
175 | /* Don't enqueue while not enough space for reply */ | ||
176 | dprintk("svc: socket %p no space, %d*2 > %ld, not enqueued\n", | ||
177 | svsk->sk_sk, svsk->sk_reserved+serv->sv_bufsz, | ||
178 | svc_sock_wspace(svsk)); | ||
179 | goto out_unlock; | ||
180 | } | ||
181 | clear_bit(SOCK_NOSPACE, &svsk->sk_sock->flags); | ||
182 | |||
183 | /* Mark socket as busy. It will remain in this state until the | ||
184 | * server has processed all pending data and put the socket back | ||
185 | * on the idle list. | ||
186 | */ | ||
187 | set_bit(SK_BUSY, &svsk->sk_flags); | ||
188 | |||
189 | if (!list_empty(&serv->sv_threads)) { | ||
190 | rqstp = list_entry(serv->sv_threads.next, | ||
191 | struct svc_rqst, | ||
192 | rq_list); | ||
193 | dprintk("svc: socket %p served by daemon %p\n", | ||
194 | svsk->sk_sk, rqstp); | ||
195 | svc_serv_dequeue(serv, rqstp); | ||
196 | if (rqstp->rq_sock) | ||
197 | printk(KERN_ERR | ||
198 | "svc_sock_enqueue: server %p, rq_sock=%p!\n", | ||
199 | rqstp, rqstp->rq_sock); | ||
200 | rqstp->rq_sock = svsk; | ||
201 | svsk->sk_inuse++; | ||
202 | rqstp->rq_reserved = serv->sv_bufsz; | ||
203 | svsk->sk_reserved += rqstp->rq_reserved; | ||
204 | wake_up(&rqstp->rq_wait); | ||
205 | } else { | ||
206 | dprintk("svc: socket %p put into queue\n", svsk->sk_sk); | ||
207 | list_add_tail(&svsk->sk_ready, &serv->sv_sockets); | ||
208 | } | ||
209 | |||
210 | out_unlock: | ||
211 | spin_unlock_bh(&serv->sv_lock); | ||
212 | } | ||
213 | |||
214 | /* | ||
215 | * Dequeue the first socket. Must be called with the serv->sv_lock held. | ||
216 | */ | ||
217 | static inline struct svc_sock * | ||
218 | svc_sock_dequeue(struct svc_serv *serv) | ||
219 | { | ||
220 | struct svc_sock *svsk; | ||
221 | |||
222 | if (list_empty(&serv->sv_sockets)) | ||
223 | return NULL; | ||
224 | |||
225 | svsk = list_entry(serv->sv_sockets.next, | ||
226 | struct svc_sock, sk_ready); | ||
227 | list_del_init(&svsk->sk_ready); | ||
228 | |||
229 | dprintk("svc: socket %p dequeued, inuse=%d\n", | ||
230 | svsk->sk_sk, svsk->sk_inuse); | ||
231 | |||
232 | return svsk; | ||
233 | } | ||
234 | |||
235 | /* | ||
236 | * Having read something from a socket, check whether it | ||
237 | * needs to be re-enqueued. | ||
238 | * Note: SK_DATA only gets cleared when a read-attempt finds | ||
239 | * no (or insufficient) data. | ||
240 | */ | ||
241 | static inline void | ||
242 | svc_sock_received(struct svc_sock *svsk) | ||
243 | { | ||
244 | clear_bit(SK_BUSY, &svsk->sk_flags); | ||
245 | svc_sock_enqueue(svsk); | ||
246 | } | ||
247 | |||
248 | |||
249 | /** | ||
250 | * svc_reserve - change the space reserved for the reply to a request. | ||
251 | * @rqstp: The request in question | ||
252 | * @space: new max space to reserve | ||
253 | * | ||
254 | * Each request reserves some space on the output queue of the socket | ||
255 | * to make sure the reply fits. This function reduces that reserved | ||
256 | * space to be the amount of space used already, plus @space. | ||
257 | * | ||
258 | */ | ||
259 | void svc_reserve(struct svc_rqst *rqstp, int space) | ||
260 | { | ||
261 | space += rqstp->rq_res.head[0].iov_len; | ||
262 | |||
263 | if (space < rqstp->rq_reserved) { | ||
264 | struct svc_sock *svsk = rqstp->rq_sock; | ||
265 | spin_lock_bh(&svsk->sk_server->sv_lock); | ||
266 | svsk->sk_reserved -= (rqstp->rq_reserved - space); | ||
267 | rqstp->rq_reserved = space; | ||
268 | spin_unlock_bh(&svsk->sk_server->sv_lock); | ||
269 | |||
270 | svc_sock_enqueue(svsk); | ||
271 | } | ||
272 | } | ||
273 | |||
274 | /* | ||
275 | * Release a socket after use. | ||
276 | */ | ||
277 | static inline void | ||
278 | svc_sock_put(struct svc_sock *svsk) | ||
279 | { | ||
280 | struct svc_serv *serv = svsk->sk_server; | ||
281 | |||
282 | spin_lock_bh(&serv->sv_lock); | ||
283 | if (!--(svsk->sk_inuse) && test_bit(SK_DEAD, &svsk->sk_flags)) { | ||
284 | spin_unlock_bh(&serv->sv_lock); | ||
285 | dprintk("svc: releasing dead socket\n"); | ||
286 | sock_release(svsk->sk_sock); | ||
287 | kfree(svsk); | ||
288 | } | ||
289 | else | ||
290 | spin_unlock_bh(&serv->sv_lock); | ||
291 | } | ||
292 | |||
293 | static void | ||
294 | svc_sock_release(struct svc_rqst *rqstp) | ||
295 | { | ||
296 | struct svc_sock *svsk = rqstp->rq_sock; | ||
297 | |||
298 | svc_release_skb(rqstp); | ||
299 | |||
300 | svc_free_allpages(rqstp); | ||
301 | rqstp->rq_res.page_len = 0; | ||
302 | rqstp->rq_res.page_base = 0; | ||
303 | |||
304 | |||
305 | /* Reset response buffer and release | ||
306 | * the reservation. | ||
307 | * But first, check that enough space was reserved | ||
308 | * for the reply, otherwise we have a bug! | ||
309 | */ | ||
310 | if ((rqstp->rq_res.len) > rqstp->rq_reserved) | ||
311 | printk(KERN_ERR "RPC request reserved %d but used %d\n", | ||
312 | rqstp->rq_reserved, | ||
313 | rqstp->rq_res.len); | ||
314 | |||
315 | rqstp->rq_res.head[0].iov_len = 0; | ||
316 | svc_reserve(rqstp, 0); | ||
317 | rqstp->rq_sock = NULL; | ||
318 | |||
319 | svc_sock_put(svsk); | ||
320 | } | ||
321 | |||
322 | /* | ||
323 | * External function to wake up a server waiting for data | ||
324 | */ | ||
325 | void | ||
326 | svc_wake_up(struct svc_serv *serv) | ||
327 | { | ||
328 | struct svc_rqst *rqstp; | ||
329 | |||
330 | spin_lock_bh(&serv->sv_lock); | ||
331 | if (!list_empty(&serv->sv_threads)) { | ||
332 | rqstp = list_entry(serv->sv_threads.next, | ||
333 | struct svc_rqst, | ||
334 | rq_list); | ||
335 | dprintk("svc: daemon %p woken up.\n", rqstp); | ||
336 | /* | ||
337 | svc_serv_dequeue(serv, rqstp); | ||
338 | rqstp->rq_sock = NULL; | ||
339 | */ | ||
340 | wake_up(&rqstp->rq_wait); | ||
341 | } | ||
342 | spin_unlock_bh(&serv->sv_lock); | ||
343 | } | ||
344 | |||
345 | /* | ||
346 | * Generic sendto routine | ||
347 | */ | ||
348 | static int | ||
349 | svc_sendto(struct svc_rqst *rqstp, struct xdr_buf *xdr) | ||
350 | { | ||
351 | struct svc_sock *svsk = rqstp->rq_sock; | ||
352 | struct socket *sock = svsk->sk_sock; | ||
353 | int slen; | ||
354 | char buffer[CMSG_SPACE(sizeof(struct in_pktinfo))]; | ||
355 | struct cmsghdr *cmh = (struct cmsghdr *)buffer; | ||
356 | struct in_pktinfo *pki = (struct in_pktinfo *)CMSG_DATA(cmh); | ||
357 | int len = 0; | ||
358 | int result; | ||
359 | int size; | ||
360 | struct page **ppage = xdr->pages; | ||
361 | size_t base = xdr->page_base; | ||
362 | unsigned int pglen = xdr->page_len; | ||
363 | unsigned int flags = MSG_MORE; | ||
364 | |||
365 | slen = xdr->len; | ||
366 | |||
367 | if (rqstp->rq_prot == IPPROTO_UDP) { | ||
368 | /* set the source and destination */ | ||
369 | struct msghdr msg; | ||
370 | msg.msg_name = &rqstp->rq_addr; | ||
371 | msg.msg_namelen = sizeof(rqstp->rq_addr); | ||
372 | msg.msg_iov = NULL; | ||
373 | msg.msg_iovlen = 0; | ||
374 | msg.msg_flags = MSG_MORE; | ||
375 | |||
376 | msg.msg_control = cmh; | ||
377 | msg.msg_controllen = sizeof(buffer); | ||
378 | cmh->cmsg_len = CMSG_LEN(sizeof(*pki)); | ||
379 | cmh->cmsg_level = SOL_IP; | ||
380 | cmh->cmsg_type = IP_PKTINFO; | ||
381 | pki->ipi_ifindex = 0; | ||
382 | pki->ipi_spec_dst.s_addr = rqstp->rq_daddr; | ||
383 | |||
384 | if (sock_sendmsg(sock, &msg, 0) < 0) | ||
385 | goto out; | ||
386 | } | ||
387 | |||
388 | /* send head */ | ||
389 | if (slen == xdr->head[0].iov_len) | ||
390 | flags = 0; | ||
391 | len = sock->ops->sendpage(sock, rqstp->rq_respages[0], 0, xdr->head[0].iov_len, flags); | ||
392 | if (len != xdr->head[0].iov_len) | ||
393 | goto out; | ||
394 | slen -= xdr->head[0].iov_len; | ||
395 | if (slen == 0) | ||
396 | goto out; | ||
397 | |||
398 | /* send page data */ | ||
399 | size = PAGE_SIZE - base < pglen ? PAGE_SIZE - base : pglen; | ||
400 | while (pglen > 0) { | ||
401 | if (slen == size) | ||
402 | flags = 0; | ||
403 | result = sock->ops->sendpage(sock, *ppage, base, size, flags); | ||
404 | if (result > 0) | ||
405 | len += result; | ||
406 | if (result != size) | ||
407 | goto out; | ||
408 | slen -= size; | ||
409 | pglen -= size; | ||
410 | size = PAGE_SIZE < pglen ? PAGE_SIZE : pglen; | ||
411 | base = 0; | ||
412 | ppage++; | ||
413 | } | ||
414 | /* send tail */ | ||
415 | if (xdr->tail[0].iov_len) { | ||
416 | result = sock->ops->sendpage(sock, rqstp->rq_respages[rqstp->rq_restailpage], | ||
417 | ((unsigned long)xdr->tail[0].iov_base)& (PAGE_SIZE-1), | ||
418 | xdr->tail[0].iov_len, 0); | ||
419 | |||
420 | if (result > 0) | ||
421 | len += result; | ||
422 | } | ||
423 | out: | ||
424 | dprintk("svc: socket %p sendto([%p %Zu... ], %d) = %d (addr %x)\n", | ||
425 | rqstp->rq_sock, xdr->head[0].iov_base, xdr->head[0].iov_len, xdr->len, len, | ||
426 | rqstp->rq_addr.sin_addr.s_addr); | ||
427 | |||
428 | return len; | ||
429 | } | ||
430 | |||
431 | /* | ||
432 | * Check input queue length | ||
433 | */ | ||
434 | static int | ||
435 | svc_recv_available(struct svc_sock *svsk) | ||
436 | { | ||
437 | mm_segment_t oldfs; | ||
438 | struct socket *sock = svsk->sk_sock; | ||
439 | int avail, err; | ||
440 | |||
441 | oldfs = get_fs(); set_fs(KERNEL_DS); | ||
442 | err = sock->ops->ioctl(sock, TIOCINQ, (unsigned long) &avail); | ||
443 | set_fs(oldfs); | ||
444 | |||
445 | return (err >= 0)? avail : err; | ||
446 | } | ||
447 | |||
448 | /* | ||
449 | * Generic recvfrom routine. | ||
450 | */ | ||
451 | static int | ||
452 | svc_recvfrom(struct svc_rqst *rqstp, struct kvec *iov, int nr, int buflen) | ||
453 | { | ||
454 | struct msghdr msg; | ||
455 | struct socket *sock; | ||
456 | int len, alen; | ||
457 | |||
458 | rqstp->rq_addrlen = sizeof(rqstp->rq_addr); | ||
459 | sock = rqstp->rq_sock->sk_sock; | ||
460 | |||
461 | msg.msg_name = &rqstp->rq_addr; | ||
462 | msg.msg_namelen = sizeof(rqstp->rq_addr); | ||
463 | msg.msg_control = NULL; | ||
464 | msg.msg_controllen = 0; | ||
465 | |||
466 | msg.msg_flags = MSG_DONTWAIT; | ||
467 | |||
468 | len = kernel_recvmsg(sock, &msg, iov, nr, buflen, MSG_DONTWAIT); | ||
469 | |||
470 | /* sock_recvmsg doesn't fill in the name/namelen, so we must.. | ||
471 | * possibly we should cache this in the svc_sock structure | ||
472 | * at accept time. FIXME | ||
473 | */ | ||
474 | alen = sizeof(rqstp->rq_addr); | ||
475 | sock->ops->getname(sock, (struct sockaddr *)&rqstp->rq_addr, &alen, 1); | ||
476 | |||
477 | dprintk("svc: socket %p recvfrom(%p, %Zu) = %d\n", | ||
478 | rqstp->rq_sock, iov[0].iov_base, iov[0].iov_len, len); | ||
479 | |||
480 | return len; | ||
481 | } | ||
482 | |||
483 | /* | ||
484 | * Set socket snd and rcv buffer lengths | ||
485 | */ | ||
486 | static inline void | ||
487 | svc_sock_setbufsize(struct socket *sock, unsigned int snd, unsigned int rcv) | ||
488 | { | ||
489 | #if 0 | ||
490 | mm_segment_t oldfs; | ||
491 | oldfs = get_fs(); set_fs(KERNEL_DS); | ||
492 | sock_setsockopt(sock, SOL_SOCKET, SO_SNDBUF, | ||
493 | (char*)&snd, sizeof(snd)); | ||
494 | sock_setsockopt(sock, SOL_SOCKET, SO_RCVBUF, | ||
495 | (char*)&rcv, sizeof(rcv)); | ||
496 | #else | ||
497 | /* sock_setsockopt limits use to sysctl_?mem_max, | ||
498 | * which isn't acceptable. Until that is made conditional | ||
499 | * on not having CAP_SYS_RESOURCE or similar, we go direct... | ||
500 | * DaveM said I could! | ||
501 | */ | ||
502 | lock_sock(sock->sk); | ||
503 | sock->sk->sk_sndbuf = snd * 2; | ||
504 | sock->sk->sk_rcvbuf = rcv * 2; | ||
505 | sock->sk->sk_userlocks |= SOCK_SNDBUF_LOCK|SOCK_RCVBUF_LOCK; | ||
506 | release_sock(sock->sk); | ||
507 | #endif | ||
508 | } | ||
509 | /* | ||
510 | * INET callback when data has been received on the socket. | ||
511 | */ | ||
512 | static void | ||
513 | svc_udp_data_ready(struct sock *sk, int count) | ||
514 | { | ||
515 | struct svc_sock *svsk = (struct svc_sock *)(sk->sk_user_data); | ||
516 | |||
517 | if (!svsk) | ||
518 | goto out; | ||
519 | dprintk("svc: socket %p(inet %p), count=%d, busy=%d\n", | ||
520 | svsk, sk, count, test_bit(SK_BUSY, &svsk->sk_flags)); | ||
521 | set_bit(SK_DATA, &svsk->sk_flags); | ||
522 | svc_sock_enqueue(svsk); | ||
523 | out: | ||
524 | if (sk->sk_sleep && waitqueue_active(sk->sk_sleep)) | ||
525 | wake_up_interruptible(sk->sk_sleep); | ||
526 | } | ||
527 | |||
528 | /* | ||
529 | * INET callback when space is newly available on the socket. | ||
530 | */ | ||
531 | static void | ||
532 | svc_write_space(struct sock *sk) | ||
533 | { | ||
534 | struct svc_sock *svsk = (struct svc_sock *)(sk->sk_user_data); | ||
535 | |||
536 | if (svsk) { | ||
537 | dprintk("svc: socket %p(inet %p), write_space busy=%d\n", | ||
538 | svsk, sk, test_bit(SK_BUSY, &svsk->sk_flags)); | ||
539 | svc_sock_enqueue(svsk); | ||
540 | } | ||
541 | |||
542 | if (sk->sk_sleep && waitqueue_active(sk->sk_sleep)) { | ||
543 | printk(KERN_WARNING "RPC svc_write_space: some sleeping on %p\n", | ||
544 | svsk); | ||
545 | wake_up_interruptible(sk->sk_sleep); | ||
546 | } | ||
547 | } | ||
548 | |||
549 | /* | ||
550 | * Receive a datagram from a UDP socket. | ||
551 | */ | ||
552 | extern int | ||
553 | csum_partial_copy_to_xdr(struct xdr_buf *xdr, struct sk_buff *skb); | ||
554 | |||
555 | static int | ||
556 | svc_udp_recvfrom(struct svc_rqst *rqstp) | ||
557 | { | ||
558 | struct svc_sock *svsk = rqstp->rq_sock; | ||
559 | struct svc_serv *serv = svsk->sk_server; | ||
560 | struct sk_buff *skb; | ||
561 | int err, len; | ||
562 | |||
563 | if (test_and_clear_bit(SK_CHNGBUF, &svsk->sk_flags)) | ||
564 | /* udp sockets need large rcvbuf as all pending | ||
565 | * requests are still in that buffer. sndbuf must | ||
566 | * also be large enough that there is enough space | ||
567 | * for one reply per thread. | ||
568 | */ | ||
569 | svc_sock_setbufsize(svsk->sk_sock, | ||
570 | (serv->sv_nrthreads+3) * serv->sv_bufsz, | ||
571 | (serv->sv_nrthreads+3) * serv->sv_bufsz); | ||
572 | |||
573 | if ((rqstp->rq_deferred = svc_deferred_dequeue(svsk))) { | ||
574 | svc_sock_received(svsk); | ||
575 | return svc_deferred_recv(rqstp); | ||
576 | } | ||
577 | |||
578 | clear_bit(SK_DATA, &svsk->sk_flags); | ||
579 | while ((skb = skb_recv_datagram(svsk->sk_sk, 0, 1, &err)) == NULL) { | ||
580 | if (err == -EAGAIN) { | ||
581 | svc_sock_received(svsk); | ||
582 | return err; | ||
583 | } | ||
584 | /* possibly an icmp error */ | ||
585 | dprintk("svc: recvfrom returned error %d\n", -err); | ||
586 | } | ||
587 | if (skb->stamp.tv_sec == 0) { | ||
588 | skb->stamp.tv_sec = xtime.tv_sec; | ||
589 | skb->stamp.tv_usec = xtime.tv_nsec * 1000; | ||
590 | /* Don't enable netstamp, sunrpc doesn't | ||
591 | need that much accuracy */ | ||
592 | } | ||
593 | svsk->sk_sk->sk_stamp = skb->stamp; | ||
594 | set_bit(SK_DATA, &svsk->sk_flags); /* there may be more data... */ | ||
595 | |||
596 | /* | ||
597 | * Maybe more packets - kick another thread ASAP. | ||
598 | */ | ||
599 | svc_sock_received(svsk); | ||
600 | |||
601 | len = skb->len - sizeof(struct udphdr); | ||
602 | rqstp->rq_arg.len = len; | ||
603 | |||
604 | rqstp->rq_prot = IPPROTO_UDP; | ||
605 | |||
606 | /* Get sender address */ | ||
607 | rqstp->rq_addr.sin_family = AF_INET; | ||
608 | rqstp->rq_addr.sin_port = skb->h.uh->source; | ||
609 | rqstp->rq_addr.sin_addr.s_addr = skb->nh.iph->saddr; | ||
610 | rqstp->rq_daddr = skb->nh.iph->daddr; | ||
611 | |||
612 | if (skb_is_nonlinear(skb)) { | ||
613 | /* we have to copy */ | ||
614 | local_bh_disable(); | ||
615 | if (csum_partial_copy_to_xdr(&rqstp->rq_arg, skb)) { | ||
616 | local_bh_enable(); | ||
617 | /* checksum error */ | ||
618 | skb_free_datagram(svsk->sk_sk, skb); | ||
619 | return 0; | ||
620 | } | ||
621 | local_bh_enable(); | ||
622 | skb_free_datagram(svsk->sk_sk, skb); | ||
623 | } else { | ||
624 | /* we can use it in-place */ | ||
625 | rqstp->rq_arg.head[0].iov_base = skb->data + sizeof(struct udphdr); | ||
626 | rqstp->rq_arg.head[0].iov_len = len; | ||
627 | if (skb->ip_summed != CHECKSUM_UNNECESSARY) { | ||
628 | if ((unsigned short)csum_fold(skb_checksum(skb, 0, skb->len, skb->csum))) { | ||
629 | skb_free_datagram(svsk->sk_sk, skb); | ||
630 | return 0; | ||
631 | } | ||
632 | skb->ip_summed = CHECKSUM_UNNECESSARY; | ||
633 | } | ||
634 | rqstp->rq_skbuff = skb; | ||
635 | } | ||
636 | |||
637 | rqstp->rq_arg.page_base = 0; | ||
638 | if (len <= rqstp->rq_arg.head[0].iov_len) { | ||
639 | rqstp->rq_arg.head[0].iov_len = len; | ||
640 | rqstp->rq_arg.page_len = 0; | ||
641 | } else { | ||
642 | rqstp->rq_arg.page_len = len - rqstp->rq_arg.head[0].iov_len; | ||
643 | rqstp->rq_argused += (rqstp->rq_arg.page_len + PAGE_SIZE - 1)/ PAGE_SIZE; | ||
644 | } | ||
645 | |||
646 | if (serv->sv_stats) | ||
647 | serv->sv_stats->netudpcnt++; | ||
648 | |||
649 | return len; | ||
650 | } | ||
651 | |||
652 | static int | ||
653 | svc_udp_sendto(struct svc_rqst *rqstp) | ||
654 | { | ||
655 | int error; | ||
656 | |||
657 | error = svc_sendto(rqstp, &rqstp->rq_res); | ||
658 | if (error == -ECONNREFUSED) | ||
659 | /* ICMP error on earlier request. */ | ||
660 | error = svc_sendto(rqstp, &rqstp->rq_res); | ||
661 | |||
662 | return error; | ||
663 | } | ||
664 | |||
665 | static void | ||
666 | svc_udp_init(struct svc_sock *svsk) | ||
667 | { | ||
668 | svsk->sk_sk->sk_data_ready = svc_udp_data_ready; | ||
669 | svsk->sk_sk->sk_write_space = svc_write_space; | ||
670 | svsk->sk_recvfrom = svc_udp_recvfrom; | ||
671 | svsk->sk_sendto = svc_udp_sendto; | ||
672 | |||
673 | /* initialise setting must have enough space to | ||
674 | * receive and respond to one request. | ||
675 | * svc_udp_recvfrom will re-adjust if necessary | ||
676 | */ | ||
677 | svc_sock_setbufsize(svsk->sk_sock, | ||
678 | 3 * svsk->sk_server->sv_bufsz, | ||
679 | 3 * svsk->sk_server->sv_bufsz); | ||
680 | |||
681 | set_bit(SK_DATA, &svsk->sk_flags); /* might have come in before data_ready set up */ | ||
682 | set_bit(SK_CHNGBUF, &svsk->sk_flags); | ||
683 | } | ||
684 | |||
685 | /* | ||
686 | * A data_ready event on a listening socket means there's a connection | ||
687 | * pending. Do not use state_change as a substitute for it. | ||
688 | */ | ||
689 | static void | ||
690 | svc_tcp_listen_data_ready(struct sock *sk, int count_unused) | ||
691 | { | ||
692 | struct svc_sock *svsk; | ||
693 | |||
694 | dprintk("svc: socket %p TCP (listen) state change %d\n", | ||
695 | sk, sk->sk_state); | ||
696 | |||
697 | if (sk->sk_state != TCP_LISTEN) { | ||
698 | /* | ||
699 | * This callback may called twice when a new connection | ||
700 | * is established as a child socket inherits everything | ||
701 | * from a parent LISTEN socket. | ||
702 | * 1) data_ready method of the parent socket will be called | ||
703 | * when one of child sockets become ESTABLISHED. | ||
704 | * 2) data_ready method of the child socket may be called | ||
705 | * when it receives data before the socket is accepted. | ||
706 | * In case of 2, we should ignore it silently. | ||
707 | */ | ||
708 | goto out; | ||
709 | } | ||
710 | if (!(svsk = (struct svc_sock *) sk->sk_user_data)) { | ||
711 | printk("svc: socket %p: no user data\n", sk); | ||
712 | goto out; | ||
713 | } | ||
714 | set_bit(SK_CONN, &svsk->sk_flags); | ||
715 | svc_sock_enqueue(svsk); | ||
716 | out: | ||
717 | if (sk->sk_sleep && waitqueue_active(sk->sk_sleep)) | ||
718 | wake_up_interruptible_all(sk->sk_sleep); | ||
719 | } | ||
720 | |||
721 | /* | ||
722 | * A state change on a connected socket means it's dying or dead. | ||
723 | */ | ||
724 | static void | ||
725 | svc_tcp_state_change(struct sock *sk) | ||
726 | { | ||
727 | struct svc_sock *svsk; | ||
728 | |||
729 | dprintk("svc: socket %p TCP (connected) state change %d (svsk %p)\n", | ||
730 | sk, sk->sk_state, sk->sk_user_data); | ||
731 | |||
732 | if (!(svsk = (struct svc_sock *) sk->sk_user_data)) { | ||
733 | printk("svc: socket %p: no user data\n", sk); | ||
734 | goto out; | ||
735 | } | ||
736 | set_bit(SK_CLOSE, &svsk->sk_flags); | ||
737 | svc_sock_enqueue(svsk); | ||
738 | out: | ||
739 | if (sk->sk_sleep && waitqueue_active(sk->sk_sleep)) | ||
740 | wake_up_interruptible_all(sk->sk_sleep); | ||
741 | } | ||
742 | |||
743 | static void | ||
744 | svc_tcp_data_ready(struct sock *sk, int count) | ||
745 | { | ||
746 | struct svc_sock * svsk; | ||
747 | |||
748 | dprintk("svc: socket %p TCP data ready (svsk %p)\n", | ||
749 | sk, sk->sk_user_data); | ||
750 | if (!(svsk = (struct svc_sock *)(sk->sk_user_data))) | ||
751 | goto out; | ||
752 | set_bit(SK_DATA, &svsk->sk_flags); | ||
753 | svc_sock_enqueue(svsk); | ||
754 | out: | ||
755 | if (sk->sk_sleep && waitqueue_active(sk->sk_sleep)) | ||
756 | wake_up_interruptible(sk->sk_sleep); | ||
757 | } | ||
758 | |||
759 | /* | ||
760 | * Accept a TCP connection | ||
761 | */ | ||
762 | static void | ||
763 | svc_tcp_accept(struct svc_sock *svsk) | ||
764 | { | ||
765 | struct sockaddr_in sin; | ||
766 | struct svc_serv *serv = svsk->sk_server; | ||
767 | struct socket *sock = svsk->sk_sock; | ||
768 | struct socket *newsock; | ||
769 | struct proto_ops *ops; | ||
770 | struct svc_sock *newsvsk; | ||
771 | int err, slen; | ||
772 | |||
773 | dprintk("svc: tcp_accept %p sock %p\n", svsk, sock); | ||
774 | if (!sock) | ||
775 | return; | ||
776 | |||
777 | err = sock_create_lite(PF_INET, SOCK_STREAM, IPPROTO_TCP, &newsock); | ||
778 | if (err) { | ||
779 | if (err == -ENOMEM) | ||
780 | printk(KERN_WARNING "%s: no more sockets!\n", | ||
781 | serv->sv_name); | ||
782 | return; | ||
783 | } | ||
784 | |||
785 | dprintk("svc: tcp_accept %p allocated\n", newsock); | ||
786 | newsock->ops = ops = sock->ops; | ||
787 | |||
788 | clear_bit(SK_CONN, &svsk->sk_flags); | ||
789 | if ((err = ops->accept(sock, newsock, O_NONBLOCK)) < 0) { | ||
790 | if (err != -EAGAIN && net_ratelimit()) | ||
791 | printk(KERN_WARNING "%s: accept failed (err %d)!\n", | ||
792 | serv->sv_name, -err); | ||
793 | goto failed; /* aborted connection or whatever */ | ||
794 | } | ||
795 | set_bit(SK_CONN, &svsk->sk_flags); | ||
796 | svc_sock_enqueue(svsk); | ||
797 | |||
798 | slen = sizeof(sin); | ||
799 | err = ops->getname(newsock, (struct sockaddr *) &sin, &slen, 1); | ||
800 | if (err < 0) { | ||
801 | if (net_ratelimit()) | ||
802 | printk(KERN_WARNING "%s: peername failed (err %d)!\n", | ||
803 | serv->sv_name, -err); | ||
804 | goto failed; /* aborted connection or whatever */ | ||
805 | } | ||
806 | |||
807 | /* Ideally, we would want to reject connections from unauthorized | ||
808 | * hosts here, but when we get encription, the IP of the host won't | ||
809 | * tell us anything. For now just warn about unpriv connections. | ||
810 | */ | ||
811 | if (ntohs(sin.sin_port) >= 1024) { | ||
812 | dprintk(KERN_WARNING | ||
813 | "%s: connect from unprivileged port: %u.%u.%u.%u:%d\n", | ||
814 | serv->sv_name, | ||
815 | NIPQUAD(sin.sin_addr.s_addr), ntohs(sin.sin_port)); | ||
816 | } | ||
817 | |||
818 | dprintk("%s: connect from %u.%u.%u.%u:%04x\n", serv->sv_name, | ||
819 | NIPQUAD(sin.sin_addr.s_addr), ntohs(sin.sin_port)); | ||
820 | |||
821 | /* make sure that a write doesn't block forever when | ||
822 | * low on memory | ||
823 | */ | ||
824 | newsock->sk->sk_sndtimeo = HZ*30; | ||
825 | |||
826 | if (!(newsvsk = svc_setup_socket(serv, newsock, &err, 0))) | ||
827 | goto failed; | ||
828 | |||
829 | |||
830 | /* make sure that we don't have too many active connections. | ||
831 | * If we have, something must be dropped. | ||
832 | * | ||
833 | * There's no point in trying to do random drop here for | ||
834 | * DoS prevention. The NFS clients does 1 reconnect in 15 | ||
835 | * seconds. An attacker can easily beat that. | ||
836 | * | ||
837 | * The only somewhat efficient mechanism would be if drop | ||
838 | * old connections from the same IP first. But right now | ||
839 | * we don't even record the client IP in svc_sock. | ||
840 | */ | ||
841 | if (serv->sv_tmpcnt > (serv->sv_nrthreads+3)*20) { | ||
842 | struct svc_sock *svsk = NULL; | ||
843 | spin_lock_bh(&serv->sv_lock); | ||
844 | if (!list_empty(&serv->sv_tempsocks)) { | ||
845 | if (net_ratelimit()) { | ||
846 | /* Try to help the admin */ | ||
847 | printk(KERN_NOTICE "%s: too many open TCP " | ||
848 | "sockets, consider increasing the " | ||
849 | "number of nfsd threads\n", | ||
850 | serv->sv_name); | ||
851 | printk(KERN_NOTICE "%s: last TCP connect from " | ||
852 | "%u.%u.%u.%u:%d\n", | ||
853 | serv->sv_name, | ||
854 | NIPQUAD(sin.sin_addr.s_addr), | ||
855 | ntohs(sin.sin_port)); | ||
856 | } | ||
857 | /* | ||
858 | * Always select the oldest socket. It's not fair, | ||
859 | * but so is life | ||
860 | */ | ||
861 | svsk = list_entry(serv->sv_tempsocks.prev, | ||
862 | struct svc_sock, | ||
863 | sk_list); | ||
864 | set_bit(SK_CLOSE, &svsk->sk_flags); | ||
865 | svsk->sk_inuse ++; | ||
866 | } | ||
867 | spin_unlock_bh(&serv->sv_lock); | ||
868 | |||
869 | if (svsk) { | ||
870 | svc_sock_enqueue(svsk); | ||
871 | svc_sock_put(svsk); | ||
872 | } | ||
873 | |||
874 | } | ||
875 | |||
876 | if (serv->sv_stats) | ||
877 | serv->sv_stats->nettcpconn++; | ||
878 | |||
879 | return; | ||
880 | |||
881 | failed: | ||
882 | sock_release(newsock); | ||
883 | return; | ||
884 | } | ||
885 | |||
886 | /* | ||
887 | * Receive data from a TCP socket. | ||
888 | */ | ||
889 | static int | ||
890 | svc_tcp_recvfrom(struct svc_rqst *rqstp) | ||
891 | { | ||
892 | struct svc_sock *svsk = rqstp->rq_sock; | ||
893 | struct svc_serv *serv = svsk->sk_server; | ||
894 | int len; | ||
895 | struct kvec vec[RPCSVC_MAXPAGES]; | ||
896 | int pnum, vlen; | ||
897 | |||
898 | dprintk("svc: tcp_recv %p data %d conn %d close %d\n", | ||
899 | svsk, test_bit(SK_DATA, &svsk->sk_flags), | ||
900 | test_bit(SK_CONN, &svsk->sk_flags), | ||
901 | test_bit(SK_CLOSE, &svsk->sk_flags)); | ||
902 | |||
903 | if ((rqstp->rq_deferred = svc_deferred_dequeue(svsk))) { | ||
904 | svc_sock_received(svsk); | ||
905 | return svc_deferred_recv(rqstp); | ||
906 | } | ||
907 | |||
908 | if (test_bit(SK_CLOSE, &svsk->sk_flags)) { | ||
909 | svc_delete_socket(svsk); | ||
910 | return 0; | ||
911 | } | ||
912 | |||
913 | if (test_bit(SK_CONN, &svsk->sk_flags)) { | ||
914 | svc_tcp_accept(svsk); | ||
915 | svc_sock_received(svsk); | ||
916 | return 0; | ||
917 | } | ||
918 | |||
919 | if (test_and_clear_bit(SK_CHNGBUF, &svsk->sk_flags)) | ||
920 | /* sndbuf needs to have room for one request | ||
921 | * per thread, otherwise we can stall even when the | ||
922 | * network isn't a bottleneck. | ||
923 | * rcvbuf just needs to be able to hold a few requests. | ||
924 | * Normally they will be removed from the queue | ||
925 | * as soon a a complete request arrives. | ||
926 | */ | ||
927 | svc_sock_setbufsize(svsk->sk_sock, | ||
928 | (serv->sv_nrthreads+3) * serv->sv_bufsz, | ||
929 | 3 * serv->sv_bufsz); | ||
930 | |||
931 | clear_bit(SK_DATA, &svsk->sk_flags); | ||
932 | |||
933 | /* Receive data. If we haven't got the record length yet, get | ||
934 | * the next four bytes. Otherwise try to gobble up as much as | ||
935 | * possible up to the complete record length. | ||
936 | */ | ||
937 | if (svsk->sk_tcplen < 4) { | ||
938 | unsigned long want = 4 - svsk->sk_tcplen; | ||
939 | struct kvec iov; | ||
940 | |||
941 | iov.iov_base = ((char *) &svsk->sk_reclen) + svsk->sk_tcplen; | ||
942 | iov.iov_len = want; | ||
943 | if ((len = svc_recvfrom(rqstp, &iov, 1, want)) < 0) | ||
944 | goto error; | ||
945 | svsk->sk_tcplen += len; | ||
946 | |||
947 | if (len < want) { | ||
948 | dprintk("svc: short recvfrom while reading record length (%d of %lu)\n", | ||
949 | len, want); | ||
950 | svc_sock_received(svsk); | ||
951 | return -EAGAIN; /* record header not complete */ | ||
952 | } | ||
953 | |||
954 | svsk->sk_reclen = ntohl(svsk->sk_reclen); | ||
955 | if (!(svsk->sk_reclen & 0x80000000)) { | ||
956 | /* FIXME: technically, a record can be fragmented, | ||
957 | * and non-terminal fragments will not have the top | ||
958 | * bit set in the fragment length header. | ||
959 | * But apparently no known nfs clients send fragmented | ||
960 | * records. */ | ||
961 | printk(KERN_NOTICE "RPC: bad TCP reclen 0x%08lx (non-terminal)\n", | ||
962 | (unsigned long) svsk->sk_reclen); | ||
963 | goto err_delete; | ||
964 | } | ||
965 | svsk->sk_reclen &= 0x7fffffff; | ||
966 | dprintk("svc: TCP record, %d bytes\n", svsk->sk_reclen); | ||
967 | if (svsk->sk_reclen > serv->sv_bufsz) { | ||
968 | printk(KERN_NOTICE "RPC: bad TCP reclen 0x%08lx (large)\n", | ||
969 | (unsigned long) svsk->sk_reclen); | ||
970 | goto err_delete; | ||
971 | } | ||
972 | } | ||
973 | |||
974 | /* Check whether enough data is available */ | ||
975 | len = svc_recv_available(svsk); | ||
976 | if (len < 0) | ||
977 | goto error; | ||
978 | |||
979 | if (len < svsk->sk_reclen) { | ||
980 | dprintk("svc: incomplete TCP record (%d of %d)\n", | ||
981 | len, svsk->sk_reclen); | ||
982 | svc_sock_received(svsk); | ||
983 | return -EAGAIN; /* record not complete */ | ||
984 | } | ||
985 | len = svsk->sk_reclen; | ||
986 | set_bit(SK_DATA, &svsk->sk_flags); | ||
987 | |||
988 | vec[0] = rqstp->rq_arg.head[0]; | ||
989 | vlen = PAGE_SIZE; | ||
990 | pnum = 1; | ||
991 | while (vlen < len) { | ||
992 | vec[pnum].iov_base = page_address(rqstp->rq_argpages[rqstp->rq_argused++]); | ||
993 | vec[pnum].iov_len = PAGE_SIZE; | ||
994 | pnum++; | ||
995 | vlen += PAGE_SIZE; | ||
996 | } | ||
997 | |||
998 | /* Now receive data */ | ||
999 | len = svc_recvfrom(rqstp, vec, pnum, len); | ||
1000 | if (len < 0) | ||
1001 | goto error; | ||
1002 | |||
1003 | dprintk("svc: TCP complete record (%d bytes)\n", len); | ||
1004 | rqstp->rq_arg.len = len; | ||
1005 | rqstp->rq_arg.page_base = 0; | ||
1006 | if (len <= rqstp->rq_arg.head[0].iov_len) { | ||
1007 | rqstp->rq_arg.head[0].iov_len = len; | ||
1008 | rqstp->rq_arg.page_len = 0; | ||
1009 | } else { | ||
1010 | rqstp->rq_arg.page_len = len - rqstp->rq_arg.head[0].iov_len; | ||
1011 | } | ||
1012 | |||
1013 | rqstp->rq_skbuff = NULL; | ||
1014 | rqstp->rq_prot = IPPROTO_TCP; | ||
1015 | |||
1016 | /* Reset TCP read info */ | ||
1017 | svsk->sk_reclen = 0; | ||
1018 | svsk->sk_tcplen = 0; | ||
1019 | |||
1020 | svc_sock_received(svsk); | ||
1021 | if (serv->sv_stats) | ||
1022 | serv->sv_stats->nettcpcnt++; | ||
1023 | |||
1024 | return len; | ||
1025 | |||
1026 | err_delete: | ||
1027 | svc_delete_socket(svsk); | ||
1028 | return -EAGAIN; | ||
1029 | |||
1030 | error: | ||
1031 | if (len == -EAGAIN) { | ||
1032 | dprintk("RPC: TCP recvfrom got EAGAIN\n"); | ||
1033 | svc_sock_received(svsk); | ||
1034 | } else { | ||
1035 | printk(KERN_NOTICE "%s: recvfrom returned errno %d\n", | ||
1036 | svsk->sk_server->sv_name, -len); | ||
1037 | svc_sock_received(svsk); | ||
1038 | } | ||
1039 | |||
1040 | return len; | ||
1041 | } | ||
1042 | |||
1043 | /* | ||
1044 | * Send out data on TCP socket. | ||
1045 | */ | ||
1046 | static int | ||
1047 | svc_tcp_sendto(struct svc_rqst *rqstp) | ||
1048 | { | ||
1049 | struct xdr_buf *xbufp = &rqstp->rq_res; | ||
1050 | int sent; | ||
1051 | u32 reclen; | ||
1052 | |||
1053 | /* Set up the first element of the reply kvec. | ||
1054 | * Any other kvecs that may be in use have been taken | ||
1055 | * care of by the server implementation itself. | ||
1056 | */ | ||
1057 | reclen = htonl(0x80000000|((xbufp->len ) - 4)); | ||
1058 | memcpy(xbufp->head[0].iov_base, &reclen, 4); | ||
1059 | |||
1060 | if (test_bit(SK_DEAD, &rqstp->rq_sock->sk_flags)) | ||
1061 | return -ENOTCONN; | ||
1062 | |||
1063 | sent = svc_sendto(rqstp, &rqstp->rq_res); | ||
1064 | if (sent != xbufp->len) { | ||
1065 | printk(KERN_NOTICE "rpc-srv/tcp: %s: %s %d when sending %d bytes - shutting down socket\n", | ||
1066 | rqstp->rq_sock->sk_server->sv_name, | ||
1067 | (sent<0)?"got error":"sent only", | ||
1068 | sent, xbufp->len); | ||
1069 | svc_delete_socket(rqstp->rq_sock); | ||
1070 | sent = -EAGAIN; | ||
1071 | } | ||
1072 | return sent; | ||
1073 | } | ||
1074 | |||
1075 | static void | ||
1076 | svc_tcp_init(struct svc_sock *svsk) | ||
1077 | { | ||
1078 | struct sock *sk = svsk->sk_sk; | ||
1079 | struct tcp_sock *tp = tcp_sk(sk); | ||
1080 | |||
1081 | svsk->sk_recvfrom = svc_tcp_recvfrom; | ||
1082 | svsk->sk_sendto = svc_tcp_sendto; | ||
1083 | |||
1084 | if (sk->sk_state == TCP_LISTEN) { | ||
1085 | dprintk("setting up TCP socket for listening\n"); | ||
1086 | sk->sk_data_ready = svc_tcp_listen_data_ready; | ||
1087 | set_bit(SK_CONN, &svsk->sk_flags); | ||
1088 | } else { | ||
1089 | dprintk("setting up TCP socket for reading\n"); | ||
1090 | sk->sk_state_change = svc_tcp_state_change; | ||
1091 | sk->sk_data_ready = svc_tcp_data_ready; | ||
1092 | sk->sk_write_space = svc_write_space; | ||
1093 | |||
1094 | svsk->sk_reclen = 0; | ||
1095 | svsk->sk_tcplen = 0; | ||
1096 | |||
1097 | tp->nonagle = 1; /* disable Nagle's algorithm */ | ||
1098 | |||
1099 | /* initialise setting must have enough space to | ||
1100 | * receive and respond to one request. | ||
1101 | * svc_tcp_recvfrom will re-adjust if necessary | ||
1102 | */ | ||
1103 | svc_sock_setbufsize(svsk->sk_sock, | ||
1104 | 3 * svsk->sk_server->sv_bufsz, | ||
1105 | 3 * svsk->sk_server->sv_bufsz); | ||
1106 | |||
1107 | set_bit(SK_CHNGBUF, &svsk->sk_flags); | ||
1108 | set_bit(SK_DATA, &svsk->sk_flags); | ||
1109 | if (sk->sk_state != TCP_ESTABLISHED) | ||
1110 | set_bit(SK_CLOSE, &svsk->sk_flags); | ||
1111 | } | ||
1112 | } | ||
1113 | |||
1114 | void | ||
1115 | svc_sock_update_bufs(struct svc_serv *serv) | ||
1116 | { | ||
1117 | /* | ||
1118 | * The number of server threads has changed. Update | ||
1119 | * rcvbuf and sndbuf accordingly on all sockets | ||
1120 | */ | ||
1121 | struct list_head *le; | ||
1122 | |||
1123 | spin_lock_bh(&serv->sv_lock); | ||
1124 | list_for_each(le, &serv->sv_permsocks) { | ||
1125 | struct svc_sock *svsk = | ||
1126 | list_entry(le, struct svc_sock, sk_list); | ||
1127 | set_bit(SK_CHNGBUF, &svsk->sk_flags); | ||
1128 | } | ||
1129 | list_for_each(le, &serv->sv_tempsocks) { | ||
1130 | struct svc_sock *svsk = | ||
1131 | list_entry(le, struct svc_sock, sk_list); | ||
1132 | set_bit(SK_CHNGBUF, &svsk->sk_flags); | ||
1133 | } | ||
1134 | spin_unlock_bh(&serv->sv_lock); | ||
1135 | } | ||
1136 | |||
1137 | /* | ||
1138 | * Receive the next request on any socket. | ||
1139 | */ | ||
1140 | int | ||
1141 | svc_recv(struct svc_serv *serv, struct svc_rqst *rqstp, long timeout) | ||
1142 | { | ||
1143 | struct svc_sock *svsk =NULL; | ||
1144 | int len; | ||
1145 | int pages; | ||
1146 | struct xdr_buf *arg; | ||
1147 | DECLARE_WAITQUEUE(wait, current); | ||
1148 | |||
1149 | dprintk("svc: server %p waiting for data (to = %ld)\n", | ||
1150 | rqstp, timeout); | ||
1151 | |||
1152 | if (rqstp->rq_sock) | ||
1153 | printk(KERN_ERR | ||
1154 | "svc_recv: service %p, socket not NULL!\n", | ||
1155 | rqstp); | ||
1156 | if (waitqueue_active(&rqstp->rq_wait)) | ||
1157 | printk(KERN_ERR | ||
1158 | "svc_recv: service %p, wait queue active!\n", | ||
1159 | rqstp); | ||
1160 | |||
1161 | /* Initialize the buffers */ | ||
1162 | /* first reclaim pages that were moved to response list */ | ||
1163 | svc_pushback_allpages(rqstp); | ||
1164 | |||
1165 | /* now allocate needed pages. If we get a failure, sleep briefly */ | ||
1166 | pages = 2 + (serv->sv_bufsz + PAGE_SIZE -1) / PAGE_SIZE; | ||
1167 | while (rqstp->rq_arghi < pages) { | ||
1168 | struct page *p = alloc_page(GFP_KERNEL); | ||
1169 | if (!p) { | ||
1170 | set_current_state(TASK_UNINTERRUPTIBLE); | ||
1171 | schedule_timeout(HZ/2); | ||
1172 | continue; | ||
1173 | } | ||
1174 | rqstp->rq_argpages[rqstp->rq_arghi++] = p; | ||
1175 | } | ||
1176 | |||
1177 | /* Make arg->head point to first page and arg->pages point to rest */ | ||
1178 | arg = &rqstp->rq_arg; | ||
1179 | arg->head[0].iov_base = page_address(rqstp->rq_argpages[0]); | ||
1180 | arg->head[0].iov_len = PAGE_SIZE; | ||
1181 | rqstp->rq_argused = 1; | ||
1182 | arg->pages = rqstp->rq_argpages + 1; | ||
1183 | arg->page_base = 0; | ||
1184 | /* save at least one page for response */ | ||
1185 | arg->page_len = (pages-2)*PAGE_SIZE; | ||
1186 | arg->len = (pages-1)*PAGE_SIZE; | ||
1187 | arg->tail[0].iov_len = 0; | ||
1188 | |||
1189 | try_to_freeze(PF_FREEZE); | ||
1190 | if (signalled()) | ||
1191 | return -EINTR; | ||
1192 | |||
1193 | spin_lock_bh(&serv->sv_lock); | ||
1194 | if (!list_empty(&serv->sv_tempsocks)) { | ||
1195 | svsk = list_entry(serv->sv_tempsocks.next, | ||
1196 | struct svc_sock, sk_list); | ||
1197 | /* apparently the "standard" is that clients close | ||
1198 | * idle connections after 5 minutes, servers after | ||
1199 | * 6 minutes | ||
1200 | * http://www.connectathon.org/talks96/nfstcp.pdf | ||
1201 | */ | ||
1202 | if (get_seconds() - svsk->sk_lastrecv < 6*60 | ||
1203 | || test_bit(SK_BUSY, &svsk->sk_flags)) | ||
1204 | svsk = NULL; | ||
1205 | } | ||
1206 | if (svsk) { | ||
1207 | set_bit(SK_BUSY, &svsk->sk_flags); | ||
1208 | set_bit(SK_CLOSE, &svsk->sk_flags); | ||
1209 | rqstp->rq_sock = svsk; | ||
1210 | svsk->sk_inuse++; | ||
1211 | } else if ((svsk = svc_sock_dequeue(serv)) != NULL) { | ||
1212 | rqstp->rq_sock = svsk; | ||
1213 | svsk->sk_inuse++; | ||
1214 | rqstp->rq_reserved = serv->sv_bufsz; | ||
1215 | svsk->sk_reserved += rqstp->rq_reserved; | ||
1216 | } else { | ||
1217 | /* No data pending. Go to sleep */ | ||
1218 | svc_serv_enqueue(serv, rqstp); | ||
1219 | |||
1220 | /* | ||
1221 | * We have to be able to interrupt this wait | ||
1222 | * to bring down the daemons ... | ||
1223 | */ | ||
1224 | set_current_state(TASK_INTERRUPTIBLE); | ||
1225 | add_wait_queue(&rqstp->rq_wait, &wait); | ||
1226 | spin_unlock_bh(&serv->sv_lock); | ||
1227 | |||
1228 | schedule_timeout(timeout); | ||
1229 | |||
1230 | try_to_freeze(PF_FREEZE); | ||
1231 | |||
1232 | spin_lock_bh(&serv->sv_lock); | ||
1233 | remove_wait_queue(&rqstp->rq_wait, &wait); | ||
1234 | |||
1235 | if (!(svsk = rqstp->rq_sock)) { | ||
1236 | svc_serv_dequeue(serv, rqstp); | ||
1237 | spin_unlock_bh(&serv->sv_lock); | ||
1238 | dprintk("svc: server %p, no data yet\n", rqstp); | ||
1239 | return signalled()? -EINTR : -EAGAIN; | ||
1240 | } | ||
1241 | } | ||
1242 | spin_unlock_bh(&serv->sv_lock); | ||
1243 | |||
1244 | dprintk("svc: server %p, socket %p, inuse=%d\n", | ||
1245 | rqstp, svsk, svsk->sk_inuse); | ||
1246 | len = svsk->sk_recvfrom(rqstp); | ||
1247 | dprintk("svc: got len=%d\n", len); | ||
1248 | |||
1249 | /* No data, incomplete (TCP) read, or accept() */ | ||
1250 | if (len == 0 || len == -EAGAIN) { | ||
1251 | rqstp->rq_res.len = 0; | ||
1252 | svc_sock_release(rqstp); | ||
1253 | return -EAGAIN; | ||
1254 | } | ||
1255 | svsk->sk_lastrecv = get_seconds(); | ||
1256 | if (test_bit(SK_TEMP, &svsk->sk_flags)) { | ||
1257 | /* push active sockets to end of list */ | ||
1258 | spin_lock_bh(&serv->sv_lock); | ||
1259 | if (!list_empty(&svsk->sk_list)) | ||
1260 | list_move_tail(&svsk->sk_list, &serv->sv_tempsocks); | ||
1261 | spin_unlock_bh(&serv->sv_lock); | ||
1262 | } | ||
1263 | |||
1264 | rqstp->rq_secure = ntohs(rqstp->rq_addr.sin_port) < 1024; | ||
1265 | rqstp->rq_chandle.defer = svc_defer; | ||
1266 | |||
1267 | if (serv->sv_stats) | ||
1268 | serv->sv_stats->netcnt++; | ||
1269 | return len; | ||
1270 | } | ||
1271 | |||
1272 | /* | ||
1273 | * Drop request | ||
1274 | */ | ||
1275 | void | ||
1276 | svc_drop(struct svc_rqst *rqstp) | ||
1277 | { | ||
1278 | dprintk("svc: socket %p dropped request\n", rqstp->rq_sock); | ||
1279 | svc_sock_release(rqstp); | ||
1280 | } | ||
1281 | |||
1282 | /* | ||
1283 | * Return reply to client. | ||
1284 | */ | ||
1285 | int | ||
1286 | svc_send(struct svc_rqst *rqstp) | ||
1287 | { | ||
1288 | struct svc_sock *svsk; | ||
1289 | int len; | ||
1290 | struct xdr_buf *xb; | ||
1291 | |||
1292 | if ((svsk = rqstp->rq_sock) == NULL) { | ||
1293 | printk(KERN_WARNING "NULL socket pointer in %s:%d\n", | ||
1294 | __FILE__, __LINE__); | ||
1295 | return -EFAULT; | ||
1296 | } | ||
1297 | |||
1298 | /* release the receive skb before sending the reply */ | ||
1299 | svc_release_skb(rqstp); | ||
1300 | |||
1301 | /* calculate over-all length */ | ||
1302 | xb = & rqstp->rq_res; | ||
1303 | xb->len = xb->head[0].iov_len + | ||
1304 | xb->page_len + | ||
1305 | xb->tail[0].iov_len; | ||
1306 | |||
1307 | /* Grab svsk->sk_sem to serialize outgoing data. */ | ||
1308 | down(&svsk->sk_sem); | ||
1309 | if (test_bit(SK_DEAD, &svsk->sk_flags)) | ||
1310 | len = -ENOTCONN; | ||
1311 | else | ||
1312 | len = svsk->sk_sendto(rqstp); | ||
1313 | up(&svsk->sk_sem); | ||
1314 | svc_sock_release(rqstp); | ||
1315 | |||
1316 | if (len == -ECONNREFUSED || len == -ENOTCONN || len == -EAGAIN) | ||
1317 | return 0; | ||
1318 | return len; | ||
1319 | } | ||
1320 | |||
1321 | /* | ||
1322 | * Initialize socket for RPC use and create svc_sock struct | ||
1323 | * XXX: May want to setsockopt SO_SNDBUF and SO_RCVBUF. | ||
1324 | */ | ||
1325 | static struct svc_sock * | ||
1326 | svc_setup_socket(struct svc_serv *serv, struct socket *sock, | ||
1327 | int *errp, int pmap_register) | ||
1328 | { | ||
1329 | struct svc_sock *svsk; | ||
1330 | struct sock *inet; | ||
1331 | |||
1332 | dprintk("svc: svc_setup_socket %p\n", sock); | ||
1333 | if (!(svsk = kmalloc(sizeof(*svsk), GFP_KERNEL))) { | ||
1334 | *errp = -ENOMEM; | ||
1335 | return NULL; | ||
1336 | } | ||
1337 | memset(svsk, 0, sizeof(*svsk)); | ||
1338 | |||
1339 | inet = sock->sk; | ||
1340 | |||
1341 | /* Register socket with portmapper */ | ||
1342 | if (*errp >= 0 && pmap_register) | ||
1343 | *errp = svc_register(serv, inet->sk_protocol, | ||
1344 | ntohs(inet_sk(inet)->sport)); | ||
1345 | |||
1346 | if (*errp < 0) { | ||
1347 | kfree(svsk); | ||
1348 | return NULL; | ||
1349 | } | ||
1350 | |||
1351 | set_bit(SK_BUSY, &svsk->sk_flags); | ||
1352 | inet->sk_user_data = svsk; | ||
1353 | svsk->sk_sock = sock; | ||
1354 | svsk->sk_sk = inet; | ||
1355 | svsk->sk_ostate = inet->sk_state_change; | ||
1356 | svsk->sk_odata = inet->sk_data_ready; | ||
1357 | svsk->sk_owspace = inet->sk_write_space; | ||
1358 | svsk->sk_server = serv; | ||
1359 | svsk->sk_lastrecv = get_seconds(); | ||
1360 | INIT_LIST_HEAD(&svsk->sk_deferred); | ||
1361 | INIT_LIST_HEAD(&svsk->sk_ready); | ||
1362 | sema_init(&svsk->sk_sem, 1); | ||
1363 | |||
1364 | /* Initialize the socket */ | ||
1365 | if (sock->type == SOCK_DGRAM) | ||
1366 | svc_udp_init(svsk); | ||
1367 | else | ||
1368 | svc_tcp_init(svsk); | ||
1369 | |||
1370 | spin_lock_bh(&serv->sv_lock); | ||
1371 | if (!pmap_register) { | ||
1372 | set_bit(SK_TEMP, &svsk->sk_flags); | ||
1373 | list_add(&svsk->sk_list, &serv->sv_tempsocks); | ||
1374 | serv->sv_tmpcnt++; | ||
1375 | } else { | ||
1376 | clear_bit(SK_TEMP, &svsk->sk_flags); | ||
1377 | list_add(&svsk->sk_list, &serv->sv_permsocks); | ||
1378 | } | ||
1379 | spin_unlock_bh(&serv->sv_lock); | ||
1380 | |||
1381 | dprintk("svc: svc_setup_socket created %p (inet %p)\n", | ||
1382 | svsk, svsk->sk_sk); | ||
1383 | |||
1384 | clear_bit(SK_BUSY, &svsk->sk_flags); | ||
1385 | svc_sock_enqueue(svsk); | ||
1386 | return svsk; | ||
1387 | } | ||
1388 | |||
1389 | /* | ||
1390 | * Create socket for RPC service. | ||
1391 | */ | ||
1392 | static int | ||
1393 | svc_create_socket(struct svc_serv *serv, int protocol, struct sockaddr_in *sin) | ||
1394 | { | ||
1395 | struct svc_sock *svsk; | ||
1396 | struct socket *sock; | ||
1397 | int error; | ||
1398 | int type; | ||
1399 | |||
1400 | dprintk("svc: svc_create_socket(%s, %d, %u.%u.%u.%u:%d)\n", | ||
1401 | serv->sv_program->pg_name, protocol, | ||
1402 | NIPQUAD(sin->sin_addr.s_addr), | ||
1403 | ntohs(sin->sin_port)); | ||
1404 | |||
1405 | if (protocol != IPPROTO_UDP && protocol != IPPROTO_TCP) { | ||
1406 | printk(KERN_WARNING "svc: only UDP and TCP " | ||
1407 | "sockets supported\n"); | ||
1408 | return -EINVAL; | ||
1409 | } | ||
1410 | type = (protocol == IPPROTO_UDP)? SOCK_DGRAM : SOCK_STREAM; | ||
1411 | |||
1412 | if ((error = sock_create_kern(PF_INET, type, protocol, &sock)) < 0) | ||
1413 | return error; | ||
1414 | |||
1415 | if (sin != NULL) { | ||
1416 | if (type == SOCK_STREAM) | ||
1417 | sock->sk->sk_reuse = 1; /* allow address reuse */ | ||
1418 | error = sock->ops->bind(sock, (struct sockaddr *) sin, | ||
1419 | sizeof(*sin)); | ||
1420 | if (error < 0) | ||
1421 | goto bummer; | ||
1422 | } | ||
1423 | |||
1424 | if (protocol == IPPROTO_TCP) { | ||
1425 | if ((error = sock->ops->listen(sock, 64)) < 0) | ||
1426 | goto bummer; | ||
1427 | } | ||
1428 | |||
1429 | if ((svsk = svc_setup_socket(serv, sock, &error, 1)) != NULL) | ||
1430 | return 0; | ||
1431 | |||
1432 | bummer: | ||
1433 | dprintk("svc: svc_create_socket error = %d\n", -error); | ||
1434 | sock_release(sock); | ||
1435 | return error; | ||
1436 | } | ||
1437 | |||
1438 | /* | ||
1439 | * Remove a dead socket | ||
1440 | */ | ||
1441 | void | ||
1442 | svc_delete_socket(struct svc_sock *svsk) | ||
1443 | { | ||
1444 | struct svc_serv *serv; | ||
1445 | struct sock *sk; | ||
1446 | |||
1447 | dprintk("svc: svc_delete_socket(%p)\n", svsk); | ||
1448 | |||
1449 | serv = svsk->sk_server; | ||
1450 | sk = svsk->sk_sk; | ||
1451 | |||
1452 | sk->sk_state_change = svsk->sk_ostate; | ||
1453 | sk->sk_data_ready = svsk->sk_odata; | ||
1454 | sk->sk_write_space = svsk->sk_owspace; | ||
1455 | |||
1456 | spin_lock_bh(&serv->sv_lock); | ||
1457 | |||
1458 | list_del_init(&svsk->sk_list); | ||
1459 | list_del_init(&svsk->sk_ready); | ||
1460 | if (!test_and_set_bit(SK_DEAD, &svsk->sk_flags)) | ||
1461 | if (test_bit(SK_TEMP, &svsk->sk_flags)) | ||
1462 | serv->sv_tmpcnt--; | ||
1463 | |||
1464 | if (!svsk->sk_inuse) { | ||
1465 | spin_unlock_bh(&serv->sv_lock); | ||
1466 | sock_release(svsk->sk_sock); | ||
1467 | kfree(svsk); | ||
1468 | } else { | ||
1469 | spin_unlock_bh(&serv->sv_lock); | ||
1470 | dprintk(KERN_NOTICE "svc: server socket destroy delayed\n"); | ||
1471 | /* svsk->sk_server = NULL; */ | ||
1472 | } | ||
1473 | } | ||
1474 | |||
1475 | /* | ||
1476 | * Make a socket for nfsd and lockd | ||
1477 | */ | ||
1478 | int | ||
1479 | svc_makesock(struct svc_serv *serv, int protocol, unsigned short port) | ||
1480 | { | ||
1481 | struct sockaddr_in sin; | ||
1482 | |||
1483 | dprintk("svc: creating socket proto = %d\n", protocol); | ||
1484 | sin.sin_family = AF_INET; | ||
1485 | sin.sin_addr.s_addr = INADDR_ANY; | ||
1486 | sin.sin_port = htons(port); | ||
1487 | return svc_create_socket(serv, protocol, &sin); | ||
1488 | } | ||
1489 | |||
1490 | /* | ||
1491 | * Handle defer and revisit of requests | ||
1492 | */ | ||
1493 | |||
1494 | static void svc_revisit(struct cache_deferred_req *dreq, int too_many) | ||
1495 | { | ||
1496 | struct svc_deferred_req *dr = container_of(dreq, struct svc_deferred_req, handle); | ||
1497 | struct svc_serv *serv = dreq->owner; | ||
1498 | struct svc_sock *svsk; | ||
1499 | |||
1500 | if (too_many) { | ||
1501 | svc_sock_put(dr->svsk); | ||
1502 | kfree(dr); | ||
1503 | return; | ||
1504 | } | ||
1505 | dprintk("revisit queued\n"); | ||
1506 | svsk = dr->svsk; | ||
1507 | dr->svsk = NULL; | ||
1508 | spin_lock_bh(&serv->sv_lock); | ||
1509 | list_add(&dr->handle.recent, &svsk->sk_deferred); | ||
1510 | spin_unlock_bh(&serv->sv_lock); | ||
1511 | set_bit(SK_DEFERRED, &svsk->sk_flags); | ||
1512 | svc_sock_enqueue(svsk); | ||
1513 | svc_sock_put(svsk); | ||
1514 | } | ||
1515 | |||
1516 | static struct cache_deferred_req * | ||
1517 | svc_defer(struct cache_req *req) | ||
1518 | { | ||
1519 | struct svc_rqst *rqstp = container_of(req, struct svc_rqst, rq_chandle); | ||
1520 | int size = sizeof(struct svc_deferred_req) + (rqstp->rq_arg.len); | ||
1521 | struct svc_deferred_req *dr; | ||
1522 | |||
1523 | if (rqstp->rq_arg.page_len) | ||
1524 | return NULL; /* if more than a page, give up FIXME */ | ||
1525 | if (rqstp->rq_deferred) { | ||
1526 | dr = rqstp->rq_deferred; | ||
1527 | rqstp->rq_deferred = NULL; | ||
1528 | } else { | ||
1529 | int skip = rqstp->rq_arg.len - rqstp->rq_arg.head[0].iov_len; | ||
1530 | /* FIXME maybe discard if size too large */ | ||
1531 | dr = kmalloc(size, GFP_KERNEL); | ||
1532 | if (dr == NULL) | ||
1533 | return NULL; | ||
1534 | |||
1535 | dr->handle.owner = rqstp->rq_server; | ||
1536 | dr->prot = rqstp->rq_prot; | ||
1537 | dr->addr = rqstp->rq_addr; | ||
1538 | dr->argslen = rqstp->rq_arg.len >> 2; | ||
1539 | memcpy(dr->args, rqstp->rq_arg.head[0].iov_base-skip, dr->argslen<<2); | ||
1540 | } | ||
1541 | spin_lock_bh(&rqstp->rq_server->sv_lock); | ||
1542 | rqstp->rq_sock->sk_inuse++; | ||
1543 | dr->svsk = rqstp->rq_sock; | ||
1544 | spin_unlock_bh(&rqstp->rq_server->sv_lock); | ||
1545 | |||
1546 | dr->handle.revisit = svc_revisit; | ||
1547 | return &dr->handle; | ||
1548 | } | ||
1549 | |||
1550 | /* | ||
1551 | * recv data from a deferred request into an active one | ||
1552 | */ | ||
1553 | static int svc_deferred_recv(struct svc_rqst *rqstp) | ||
1554 | { | ||
1555 | struct svc_deferred_req *dr = rqstp->rq_deferred; | ||
1556 | |||
1557 | rqstp->rq_arg.head[0].iov_base = dr->args; | ||
1558 | rqstp->rq_arg.head[0].iov_len = dr->argslen<<2; | ||
1559 | rqstp->rq_arg.page_len = 0; | ||
1560 | rqstp->rq_arg.len = dr->argslen<<2; | ||
1561 | rqstp->rq_prot = dr->prot; | ||
1562 | rqstp->rq_addr = dr->addr; | ||
1563 | return dr->argslen<<2; | ||
1564 | } | ||
1565 | |||
1566 | |||
1567 | static struct svc_deferred_req *svc_deferred_dequeue(struct svc_sock *svsk) | ||
1568 | { | ||
1569 | struct svc_deferred_req *dr = NULL; | ||
1570 | struct svc_serv *serv = svsk->sk_server; | ||
1571 | |||
1572 | if (!test_bit(SK_DEFERRED, &svsk->sk_flags)) | ||
1573 | return NULL; | ||
1574 | spin_lock_bh(&serv->sv_lock); | ||
1575 | clear_bit(SK_DEFERRED, &svsk->sk_flags); | ||
1576 | if (!list_empty(&svsk->sk_deferred)) { | ||
1577 | dr = list_entry(svsk->sk_deferred.next, | ||
1578 | struct svc_deferred_req, | ||
1579 | handle.recent); | ||
1580 | list_del_init(&dr->handle.recent); | ||
1581 | set_bit(SK_DEFERRED, &svsk->sk_flags); | ||
1582 | } | ||
1583 | spin_unlock_bh(&serv->sv_lock); | ||
1584 | return dr; | ||
1585 | } | ||