diff options
author | Tom Tucker <tom@opengridcomputing.com> | 2007-12-30 22:08:27 -0500 |
---|---|---|
committer | J. Bruce Fields <bfields@citi.umich.edu> | 2008-02-01 16:42:13 -0500 |
commit | 0f0257eaa5d29b80f6ab2c40ed21aa65bb4527f6 (patch) | |
tree | 542f64ec74fc045c06f5a1ffc0d48f823de12ccb /net/sunrpc/svc_xprt.c | |
parent | 18d19f949d5a9c927b2b88402630c5137971b619 (diff) |
svc: Move the xprt independent code to the svc_xprt.c file
This functionally trivial patch moves all of the transport independent
functions from the svcsock.c file to the transport independent svc_xprt.c
file.
In addition the following formatting changes were made:
- White space cleanup
- Function signatures on single line
- The inline directive was removed
- Lines over 80 columns were reformatted
- The term 'socket' was changed to 'transport' in comments
- The SMP comment was moved and updated.
Signed-off-by: Tom Tucker <tom@opengridcomputing.com>
Acked-by: Neil Brown <neilb@suse.de>
Reviewed-by: Chuck Lever <chuck.lever@oracle.com>
Reviewed-by: Greg Banks <gnb@sgi.com>
Signed-off-by: J. Bruce Fields <bfields@citi.umich.edu>
Diffstat (limited to 'net/sunrpc/svc_xprt.c')
-rw-r--r-- | net/sunrpc/svc_xprt.c | 753 |
1 files changed, 753 insertions, 0 deletions
diff --git a/net/sunrpc/svc_xprt.c b/net/sunrpc/svc_xprt.c index 271467c5138d..23165aef59d9 100644 --- a/net/sunrpc/svc_xprt.c +++ b/net/sunrpc/svc_xprt.c | |||
@@ -35,10 +35,53 @@ | |||
35 | 35 | ||
36 | #define RPCDBG_FACILITY RPCDBG_SVCXPRT | 36 | #define RPCDBG_FACILITY RPCDBG_SVCXPRT |
37 | 37 | ||
38 | static struct svc_deferred_req *svc_deferred_dequeue(struct svc_xprt *xprt); | ||
39 | static int svc_deferred_recv(struct svc_rqst *rqstp); | ||
40 | static struct cache_deferred_req *svc_defer(struct cache_req *req); | ||
41 | static void svc_age_temp_xprts(unsigned long closure); | ||
42 | |||
43 | /* apparently the "standard" is that clients close | ||
44 | * idle connections after 5 minutes, servers after | ||
45 | * 6 minutes | ||
46 | * http://www.connectathon.org/talks96/nfstcp.pdf | ||
47 | */ | ||
48 | static int svc_conn_age_period = 6*60; | ||
49 | |||
38 | /* List of registered transport classes */ | 50 | /* List of registered transport classes */ |
39 | static DEFINE_SPINLOCK(svc_xprt_class_lock); | 51 | static DEFINE_SPINLOCK(svc_xprt_class_lock); |
40 | static LIST_HEAD(svc_xprt_class_list); | 52 | static LIST_HEAD(svc_xprt_class_list); |
41 | 53 | ||
54 | /* SMP locking strategy: | ||
55 | * | ||
56 | * svc_pool->sp_lock protects most of the fields of that pool. | ||
57 | * svc_serv->sv_lock protects sv_tempsocks, sv_permsocks, sv_tmpcnt. | ||
58 | * when both need to be taken (rare), svc_serv->sv_lock is first. | ||
59 | * BKL protects svc_serv->sv_nrthread. | ||
60 | * svc_sock->sk_lock protects the svc_sock->sk_deferred list | ||
61 | * and the ->sk_info_authunix cache. | ||
62 | * | ||
63 | * The XPT_BUSY bit in xprt->xpt_flags prevents a transport being | ||
64 | * enqueued multiply. During normal transport processing this bit | ||
65 | * is set by svc_xprt_enqueue and cleared by svc_xprt_received. | ||
66 | * Providers should not manipulate this bit directly. | ||
67 | * | ||
68 | * Some flags can be set to certain values at any time | ||
69 | * providing that certain rules are followed: | ||
70 | * | ||
71 | * XPT_CONN, XPT_DATA: | ||
72 | * - Can be set or cleared at any time. | ||
73 | * - After a set, svc_xprt_enqueue must be called to enqueue | ||
74 | * the transport for processing. | ||
75 | * - After a clear, the transport must be read/accepted. | ||
76 | * If this succeeds, it must be set again. | ||
77 | * XPT_CLOSE: | ||
78 | * - Can set at any time. It is never cleared. | ||
79 | * XPT_DEAD: | ||
80 | * - Can only be set while XPT_BUSY is held which ensures | ||
81 | * that no other thread will be using the transport or will | ||
82 | * try to set XPT_DEAD. | ||
83 | */ | ||
84 | |||
42 | int svc_reg_xprt_class(struct svc_xprt_class *xcl) | 85 | int svc_reg_xprt_class(struct svc_xprt_class *xcl) |
43 | { | 86 | { |
44 | struct svc_xprt_class *cl; | 87 | struct svc_xprt_class *cl; |
@@ -178,3 +221,713 @@ void svc_xprt_copy_addrs(struct svc_rqst *rqstp, struct svc_xprt *xprt) | |||
178 | } | 221 | } |
179 | EXPORT_SYMBOL_GPL(svc_xprt_copy_addrs); | 222 | EXPORT_SYMBOL_GPL(svc_xprt_copy_addrs); |
180 | 223 | ||
224 | /** | ||
225 | * svc_print_addr - Format rq_addr field for printing | ||
226 | * @rqstp: svc_rqst struct containing address to print | ||
227 | * @buf: target buffer for formatted address | ||
228 | * @len: length of target buffer | ||
229 | * | ||
230 | */ | ||
231 | char *svc_print_addr(struct svc_rqst *rqstp, char *buf, size_t len) | ||
232 | { | ||
233 | return __svc_print_addr(svc_addr(rqstp), buf, len); | ||
234 | } | ||
235 | EXPORT_SYMBOL_GPL(svc_print_addr); | ||
236 | |||
237 | /* | ||
238 | * Queue up an idle server thread. Must have pool->sp_lock held. | ||
239 | * Note: this is really a stack rather than a queue, so that we only | ||
240 | * use as many different threads as we need, and the rest don't pollute | ||
241 | * the cache. | ||
242 | */ | ||
243 | static void svc_thread_enqueue(struct svc_pool *pool, struct svc_rqst *rqstp) | ||
244 | { | ||
245 | list_add(&rqstp->rq_list, &pool->sp_threads); | ||
246 | } | ||
247 | |||
248 | /* | ||
249 | * Dequeue an nfsd thread. Must have pool->sp_lock held. | ||
250 | */ | ||
251 | static void svc_thread_dequeue(struct svc_pool *pool, struct svc_rqst *rqstp) | ||
252 | { | ||
253 | list_del(&rqstp->rq_list); | ||
254 | } | ||
255 | |||
256 | /* | ||
257 | * Queue up a transport with data pending. If there are idle nfsd | ||
258 | * processes, wake 'em up. | ||
259 | * | ||
260 | */ | ||
261 | void svc_xprt_enqueue(struct svc_xprt *xprt) | ||
262 | { | ||
263 | struct svc_serv *serv = xprt->xpt_server; | ||
264 | struct svc_pool *pool; | ||
265 | struct svc_rqst *rqstp; | ||
266 | int cpu; | ||
267 | |||
268 | if (!(xprt->xpt_flags & | ||
269 | ((1<<XPT_CONN)|(1<<XPT_DATA)|(1<<XPT_CLOSE)|(1<<XPT_DEFERRED)))) | ||
270 | return; | ||
271 | if (test_bit(XPT_DEAD, &xprt->xpt_flags)) | ||
272 | return; | ||
273 | |||
274 | cpu = get_cpu(); | ||
275 | pool = svc_pool_for_cpu(xprt->xpt_server, cpu); | ||
276 | put_cpu(); | ||
277 | |||
278 | spin_lock_bh(&pool->sp_lock); | ||
279 | |||
280 | if (!list_empty(&pool->sp_threads) && | ||
281 | !list_empty(&pool->sp_sockets)) | ||
282 | printk(KERN_ERR | ||
283 | "svc_xprt_enqueue: " | ||
284 | "threads and transports both waiting??\n"); | ||
285 | |||
286 | if (test_bit(XPT_DEAD, &xprt->xpt_flags)) { | ||
287 | /* Don't enqueue dead transports */ | ||
288 | dprintk("svc: transport %p is dead, not enqueued\n", xprt); | ||
289 | goto out_unlock; | ||
290 | } | ||
291 | |||
292 | /* Mark transport as busy. It will remain in this state until | ||
293 | * the provider calls svc_xprt_received. We update XPT_BUSY | ||
294 | * atomically because it also guards against trying to enqueue | ||
295 | * the transport twice. | ||
296 | */ | ||
297 | if (test_and_set_bit(XPT_BUSY, &xprt->xpt_flags)) { | ||
298 | /* Don't enqueue transport while already enqueued */ | ||
299 | dprintk("svc: transport %p busy, not enqueued\n", xprt); | ||
300 | goto out_unlock; | ||
301 | } | ||
302 | BUG_ON(xprt->xpt_pool != NULL); | ||
303 | xprt->xpt_pool = pool; | ||
304 | |||
305 | /* Handle pending connection */ | ||
306 | if (test_bit(XPT_CONN, &xprt->xpt_flags)) | ||
307 | goto process; | ||
308 | |||
309 | /* Handle close in-progress */ | ||
310 | if (test_bit(XPT_CLOSE, &xprt->xpt_flags)) | ||
311 | goto process; | ||
312 | |||
313 | /* Check if we have space to reply to a request */ | ||
314 | if (!xprt->xpt_ops->xpo_has_wspace(xprt)) { | ||
315 | /* Don't enqueue while not enough space for reply */ | ||
316 | dprintk("svc: no write space, transport %p not enqueued\n", | ||
317 | xprt); | ||
318 | xprt->xpt_pool = NULL; | ||
319 | clear_bit(XPT_BUSY, &xprt->xpt_flags); | ||
320 | goto out_unlock; | ||
321 | } | ||
322 | |||
323 | process: | ||
324 | if (!list_empty(&pool->sp_threads)) { | ||
325 | rqstp = list_entry(pool->sp_threads.next, | ||
326 | struct svc_rqst, | ||
327 | rq_list); | ||
328 | dprintk("svc: transport %p served by daemon %p\n", | ||
329 | xprt, rqstp); | ||
330 | svc_thread_dequeue(pool, rqstp); | ||
331 | if (rqstp->rq_xprt) | ||
332 | printk(KERN_ERR | ||
333 | "svc_xprt_enqueue: server %p, rq_xprt=%p!\n", | ||
334 | rqstp, rqstp->rq_xprt); | ||
335 | rqstp->rq_xprt = xprt; | ||
336 | svc_xprt_get(xprt); | ||
337 | rqstp->rq_reserved = serv->sv_max_mesg; | ||
338 | atomic_add(rqstp->rq_reserved, &xprt->xpt_reserved); | ||
339 | BUG_ON(xprt->xpt_pool != pool); | ||
340 | wake_up(&rqstp->rq_wait); | ||
341 | } else { | ||
342 | dprintk("svc: transport %p put into queue\n", xprt); | ||
343 | list_add_tail(&xprt->xpt_ready, &pool->sp_sockets); | ||
344 | BUG_ON(xprt->xpt_pool != pool); | ||
345 | } | ||
346 | |||
347 | out_unlock: | ||
348 | spin_unlock_bh(&pool->sp_lock); | ||
349 | } | ||
350 | EXPORT_SYMBOL_GPL(svc_xprt_enqueue); | ||
351 | |||
352 | /* | ||
353 | * Dequeue the first transport. Must be called with the pool->sp_lock held. | ||
354 | */ | ||
355 | static struct svc_xprt *svc_xprt_dequeue(struct svc_pool *pool) | ||
356 | { | ||
357 | struct svc_xprt *xprt; | ||
358 | |||
359 | if (list_empty(&pool->sp_sockets)) | ||
360 | return NULL; | ||
361 | |||
362 | xprt = list_entry(pool->sp_sockets.next, | ||
363 | struct svc_xprt, xpt_ready); | ||
364 | list_del_init(&xprt->xpt_ready); | ||
365 | |||
366 | dprintk("svc: transport %p dequeued, inuse=%d\n", | ||
367 | xprt, atomic_read(&xprt->xpt_ref.refcount)); | ||
368 | |||
369 | return xprt; | ||
370 | } | ||
371 | |||
372 | /* | ||
373 | * svc_xprt_received conditionally queues the transport for processing | ||
374 | * by another thread. The caller must hold the XPT_BUSY bit and must | ||
375 | * not thereafter touch transport data. | ||
376 | * | ||
377 | * Note: XPT_DATA only gets cleared when a read-attempt finds no (or | ||
378 | * insufficient) data. | ||
379 | */ | ||
380 | void svc_xprt_received(struct svc_xprt *xprt) | ||
381 | { | ||
382 | BUG_ON(!test_bit(XPT_BUSY, &xprt->xpt_flags)); | ||
383 | xprt->xpt_pool = NULL; | ||
384 | clear_bit(XPT_BUSY, &xprt->xpt_flags); | ||
385 | svc_xprt_enqueue(xprt); | ||
386 | } | ||
387 | EXPORT_SYMBOL_GPL(svc_xprt_received); | ||
388 | |||
389 | /** | ||
390 | * svc_reserve - change the space reserved for the reply to a request. | ||
391 | * @rqstp: The request in question | ||
392 | * @space: new max space to reserve | ||
393 | * | ||
394 | * Each request reserves some space on the output queue of the transport | ||
395 | * to make sure the reply fits. This function reduces that reserved | ||
396 | * space to be the amount of space used already, plus @space. | ||
397 | * | ||
398 | */ | ||
399 | void svc_reserve(struct svc_rqst *rqstp, int space) | ||
400 | { | ||
401 | space += rqstp->rq_res.head[0].iov_len; | ||
402 | |||
403 | if (space < rqstp->rq_reserved) { | ||
404 | struct svc_xprt *xprt = rqstp->rq_xprt; | ||
405 | atomic_sub((rqstp->rq_reserved - space), &xprt->xpt_reserved); | ||
406 | rqstp->rq_reserved = space; | ||
407 | |||
408 | svc_xprt_enqueue(xprt); | ||
409 | } | ||
410 | } | ||
411 | |||
412 | static void svc_xprt_release(struct svc_rqst *rqstp) | ||
413 | { | ||
414 | struct svc_xprt *xprt = rqstp->rq_xprt; | ||
415 | |||
416 | rqstp->rq_xprt->xpt_ops->xpo_release_rqst(rqstp); | ||
417 | |||
418 | svc_free_res_pages(rqstp); | ||
419 | rqstp->rq_res.page_len = 0; | ||
420 | rqstp->rq_res.page_base = 0; | ||
421 | |||
422 | /* Reset response buffer and release | ||
423 | * the reservation. | ||
424 | * But first, check that enough space was reserved | ||
425 | * for the reply, otherwise we have a bug! | ||
426 | */ | ||
427 | if ((rqstp->rq_res.len) > rqstp->rq_reserved) | ||
428 | printk(KERN_ERR "RPC request reserved %d but used %d\n", | ||
429 | rqstp->rq_reserved, | ||
430 | rqstp->rq_res.len); | ||
431 | |||
432 | rqstp->rq_res.head[0].iov_len = 0; | ||
433 | svc_reserve(rqstp, 0); | ||
434 | rqstp->rq_xprt = NULL; | ||
435 | |||
436 | svc_xprt_put(xprt); | ||
437 | } | ||
438 | |||
439 | /* | ||
440 | * External function to wake up a server waiting for data | ||
441 | * This really only makes sense for services like lockd | ||
442 | * which have exactly one thread anyway. | ||
443 | */ | ||
444 | void svc_wake_up(struct svc_serv *serv) | ||
445 | { | ||
446 | struct svc_rqst *rqstp; | ||
447 | unsigned int i; | ||
448 | struct svc_pool *pool; | ||
449 | |||
450 | for (i = 0; i < serv->sv_nrpools; i++) { | ||
451 | pool = &serv->sv_pools[i]; | ||
452 | |||
453 | spin_lock_bh(&pool->sp_lock); | ||
454 | if (!list_empty(&pool->sp_threads)) { | ||
455 | rqstp = list_entry(pool->sp_threads.next, | ||
456 | struct svc_rqst, | ||
457 | rq_list); | ||
458 | dprintk("svc: daemon %p woken up.\n", rqstp); | ||
459 | /* | ||
460 | svc_thread_dequeue(pool, rqstp); | ||
461 | rqstp->rq_xprt = NULL; | ||
462 | */ | ||
463 | wake_up(&rqstp->rq_wait); | ||
464 | } | ||
465 | spin_unlock_bh(&pool->sp_lock); | ||
466 | } | ||
467 | } | ||
468 | |||
469 | int svc_port_is_privileged(struct sockaddr *sin) | ||
470 | { | ||
471 | switch (sin->sa_family) { | ||
472 | case AF_INET: | ||
473 | return ntohs(((struct sockaddr_in *)sin)->sin_port) | ||
474 | < PROT_SOCK; | ||
475 | case AF_INET6: | ||
476 | return ntohs(((struct sockaddr_in6 *)sin)->sin6_port) | ||
477 | < PROT_SOCK; | ||
478 | default: | ||
479 | return 0; | ||
480 | } | ||
481 | } | ||
482 | |||
483 | /* | ||
484 | * Make sure that we don't have too many active connections. If we | ||
485 | * have, something must be dropped. | ||
486 | * | ||
487 | * There's no point in trying to do random drop here for DoS | ||
488 | * prevention. The NFS clients does 1 reconnect in 15 seconds. An | ||
489 | * attacker can easily beat that. | ||
490 | * | ||
491 | * The only somewhat efficient mechanism would be if drop old | ||
492 | * connections from the same IP first. But right now we don't even | ||
493 | * record the client IP in svc_sock. | ||
494 | */ | ||
495 | static void svc_check_conn_limits(struct svc_serv *serv) | ||
496 | { | ||
497 | if (serv->sv_tmpcnt > (serv->sv_nrthreads+3)*20) { | ||
498 | struct svc_xprt *xprt = NULL; | ||
499 | spin_lock_bh(&serv->sv_lock); | ||
500 | if (!list_empty(&serv->sv_tempsocks)) { | ||
501 | if (net_ratelimit()) { | ||
502 | /* Try to help the admin */ | ||
503 | printk(KERN_NOTICE "%s: too many open " | ||
504 | "connections, consider increasing the " | ||
505 | "number of nfsd threads\n", | ||
506 | serv->sv_name); | ||
507 | } | ||
508 | /* | ||
509 | * Always select the oldest connection. It's not fair, | ||
510 | * but so is life | ||
511 | */ | ||
512 | xprt = list_entry(serv->sv_tempsocks.prev, | ||
513 | struct svc_xprt, | ||
514 | xpt_list); | ||
515 | set_bit(XPT_CLOSE, &xprt->xpt_flags); | ||
516 | svc_xprt_get(xprt); | ||
517 | } | ||
518 | spin_unlock_bh(&serv->sv_lock); | ||
519 | |||
520 | if (xprt) { | ||
521 | svc_xprt_enqueue(xprt); | ||
522 | svc_xprt_put(xprt); | ||
523 | } | ||
524 | } | ||
525 | } | ||
526 | |||
527 | /* | ||
528 | * Receive the next request on any transport. This code is carefully | ||
529 | * organised not to touch any cachelines in the shared svc_serv | ||
530 | * structure, only cachelines in the local svc_pool. | ||
531 | */ | ||
532 | int svc_recv(struct svc_rqst *rqstp, long timeout) | ||
533 | { | ||
534 | struct svc_xprt *xprt = NULL; | ||
535 | struct svc_serv *serv = rqstp->rq_server; | ||
536 | struct svc_pool *pool = rqstp->rq_pool; | ||
537 | int len, i; | ||
538 | int pages; | ||
539 | struct xdr_buf *arg; | ||
540 | DECLARE_WAITQUEUE(wait, current); | ||
541 | |||
542 | dprintk("svc: server %p waiting for data (to = %ld)\n", | ||
543 | rqstp, timeout); | ||
544 | |||
545 | if (rqstp->rq_xprt) | ||
546 | printk(KERN_ERR | ||
547 | "svc_recv: service %p, transport not NULL!\n", | ||
548 | rqstp); | ||
549 | if (waitqueue_active(&rqstp->rq_wait)) | ||
550 | printk(KERN_ERR | ||
551 | "svc_recv: service %p, wait queue active!\n", | ||
552 | rqstp); | ||
553 | |||
554 | /* now allocate needed pages. If we get a failure, sleep briefly */ | ||
555 | pages = (serv->sv_max_mesg + PAGE_SIZE) / PAGE_SIZE; | ||
556 | for (i = 0; i < pages ; i++) | ||
557 | while (rqstp->rq_pages[i] == NULL) { | ||
558 | struct page *p = alloc_page(GFP_KERNEL); | ||
559 | if (!p) { | ||
560 | int j = msecs_to_jiffies(500); | ||
561 | schedule_timeout_uninterruptible(j); | ||
562 | } | ||
563 | rqstp->rq_pages[i] = p; | ||
564 | } | ||
565 | rqstp->rq_pages[i++] = NULL; /* this might be seen in nfs_read_actor */ | ||
566 | BUG_ON(pages >= RPCSVC_MAXPAGES); | ||
567 | |||
568 | /* Make arg->head point to first page and arg->pages point to rest */ | ||
569 | arg = &rqstp->rq_arg; | ||
570 | arg->head[0].iov_base = page_address(rqstp->rq_pages[0]); | ||
571 | arg->head[0].iov_len = PAGE_SIZE; | ||
572 | arg->pages = rqstp->rq_pages + 1; | ||
573 | arg->page_base = 0; | ||
574 | /* save at least one page for response */ | ||
575 | arg->page_len = (pages-2)*PAGE_SIZE; | ||
576 | arg->len = (pages-1)*PAGE_SIZE; | ||
577 | arg->tail[0].iov_len = 0; | ||
578 | |||
579 | try_to_freeze(); | ||
580 | cond_resched(); | ||
581 | if (signalled()) | ||
582 | return -EINTR; | ||
583 | |||
584 | spin_lock_bh(&pool->sp_lock); | ||
585 | xprt = svc_xprt_dequeue(pool); | ||
586 | if (xprt) { | ||
587 | rqstp->rq_xprt = xprt; | ||
588 | svc_xprt_get(xprt); | ||
589 | rqstp->rq_reserved = serv->sv_max_mesg; | ||
590 | atomic_add(rqstp->rq_reserved, &xprt->xpt_reserved); | ||
591 | } else { | ||
592 | /* No data pending. Go to sleep */ | ||
593 | svc_thread_enqueue(pool, rqstp); | ||
594 | |||
595 | /* | ||
596 | * We have to be able to interrupt this wait | ||
597 | * to bring down the daemons ... | ||
598 | */ | ||
599 | set_current_state(TASK_INTERRUPTIBLE); | ||
600 | add_wait_queue(&rqstp->rq_wait, &wait); | ||
601 | spin_unlock_bh(&pool->sp_lock); | ||
602 | |||
603 | schedule_timeout(timeout); | ||
604 | |||
605 | try_to_freeze(); | ||
606 | |||
607 | spin_lock_bh(&pool->sp_lock); | ||
608 | remove_wait_queue(&rqstp->rq_wait, &wait); | ||
609 | |||
610 | xprt = rqstp->rq_xprt; | ||
611 | if (!xprt) { | ||
612 | svc_thread_dequeue(pool, rqstp); | ||
613 | spin_unlock_bh(&pool->sp_lock); | ||
614 | dprintk("svc: server %p, no data yet\n", rqstp); | ||
615 | return signalled()? -EINTR : -EAGAIN; | ||
616 | } | ||
617 | } | ||
618 | spin_unlock_bh(&pool->sp_lock); | ||
619 | |||
620 | len = 0; | ||
621 | if (test_bit(XPT_CLOSE, &xprt->xpt_flags)) { | ||
622 | dprintk("svc_recv: found XPT_CLOSE\n"); | ||
623 | svc_delete_xprt(xprt); | ||
624 | } else if (test_bit(XPT_LISTENER, &xprt->xpt_flags)) { | ||
625 | struct svc_xprt *newxpt; | ||
626 | newxpt = xprt->xpt_ops->xpo_accept(xprt); | ||
627 | if (newxpt) { | ||
628 | /* | ||
629 | * We know this module_get will succeed because the | ||
630 | * listener holds a reference too | ||
631 | */ | ||
632 | __module_get(newxpt->xpt_class->xcl_owner); | ||
633 | svc_check_conn_limits(xprt->xpt_server); | ||
634 | spin_lock_bh(&serv->sv_lock); | ||
635 | set_bit(XPT_TEMP, &newxpt->xpt_flags); | ||
636 | list_add(&newxpt->xpt_list, &serv->sv_tempsocks); | ||
637 | serv->sv_tmpcnt++; | ||
638 | if (serv->sv_temptimer.function == NULL) { | ||
639 | /* setup timer to age temp transports */ | ||
640 | setup_timer(&serv->sv_temptimer, | ||
641 | svc_age_temp_xprts, | ||
642 | (unsigned long)serv); | ||
643 | mod_timer(&serv->sv_temptimer, | ||
644 | jiffies + svc_conn_age_period * HZ); | ||
645 | } | ||
646 | spin_unlock_bh(&serv->sv_lock); | ||
647 | svc_xprt_received(newxpt); | ||
648 | } | ||
649 | svc_xprt_received(xprt); | ||
650 | } else { | ||
651 | dprintk("svc: server %p, pool %u, transport %p, inuse=%d\n", | ||
652 | rqstp, pool->sp_id, xprt, | ||
653 | atomic_read(&xprt->xpt_ref.refcount)); | ||
654 | rqstp->rq_deferred = svc_deferred_dequeue(xprt); | ||
655 | if (rqstp->rq_deferred) { | ||
656 | svc_xprt_received(xprt); | ||
657 | len = svc_deferred_recv(rqstp); | ||
658 | } else | ||
659 | len = xprt->xpt_ops->xpo_recvfrom(rqstp); | ||
660 | dprintk("svc: got len=%d\n", len); | ||
661 | } | ||
662 | |||
663 | /* No data, incomplete (TCP) read, or accept() */ | ||
664 | if (len == 0 || len == -EAGAIN) { | ||
665 | rqstp->rq_res.len = 0; | ||
666 | svc_xprt_release(rqstp); | ||
667 | return -EAGAIN; | ||
668 | } | ||
669 | clear_bit(XPT_OLD, &xprt->xpt_flags); | ||
670 | |||
671 | rqstp->rq_secure = svc_port_is_privileged(svc_addr(rqstp)); | ||
672 | rqstp->rq_chandle.defer = svc_defer; | ||
673 | |||
674 | if (serv->sv_stats) | ||
675 | serv->sv_stats->netcnt++; | ||
676 | return len; | ||
677 | } | ||
678 | |||
679 | /* | ||
680 | * Drop request | ||
681 | */ | ||
682 | void svc_drop(struct svc_rqst *rqstp) | ||
683 | { | ||
684 | dprintk("svc: xprt %p dropped request\n", rqstp->rq_xprt); | ||
685 | svc_xprt_release(rqstp); | ||
686 | } | ||
687 | |||
688 | /* | ||
689 | * Return reply to client. | ||
690 | */ | ||
691 | int svc_send(struct svc_rqst *rqstp) | ||
692 | { | ||
693 | struct svc_xprt *xprt; | ||
694 | int len; | ||
695 | struct xdr_buf *xb; | ||
696 | |||
697 | xprt = rqstp->rq_xprt; | ||
698 | if (!xprt) | ||
699 | return -EFAULT; | ||
700 | |||
701 | /* release the receive skb before sending the reply */ | ||
702 | rqstp->rq_xprt->xpt_ops->xpo_release_rqst(rqstp); | ||
703 | |||
704 | /* calculate over-all length */ | ||
705 | xb = &rqstp->rq_res; | ||
706 | xb->len = xb->head[0].iov_len + | ||
707 | xb->page_len + | ||
708 | xb->tail[0].iov_len; | ||
709 | |||
710 | /* Grab mutex to serialize outgoing data. */ | ||
711 | mutex_lock(&xprt->xpt_mutex); | ||
712 | if (test_bit(XPT_DEAD, &xprt->xpt_flags)) | ||
713 | len = -ENOTCONN; | ||
714 | else | ||
715 | len = xprt->xpt_ops->xpo_sendto(rqstp); | ||
716 | mutex_unlock(&xprt->xpt_mutex); | ||
717 | svc_xprt_release(rqstp); | ||
718 | |||
719 | if (len == -ECONNREFUSED || len == -ENOTCONN || len == -EAGAIN) | ||
720 | return 0; | ||
721 | return len; | ||
722 | } | ||
723 | |||
724 | /* | ||
725 | * Timer function to close old temporary transports, using | ||
726 | * a mark-and-sweep algorithm. | ||
727 | */ | ||
728 | static void svc_age_temp_xprts(unsigned long closure) | ||
729 | { | ||
730 | struct svc_serv *serv = (struct svc_serv *)closure; | ||
731 | struct svc_xprt *xprt; | ||
732 | struct list_head *le, *next; | ||
733 | LIST_HEAD(to_be_aged); | ||
734 | |||
735 | dprintk("svc_age_temp_xprts\n"); | ||
736 | |||
737 | if (!spin_trylock_bh(&serv->sv_lock)) { | ||
738 | /* busy, try again 1 sec later */ | ||
739 | dprintk("svc_age_temp_xprts: busy\n"); | ||
740 | mod_timer(&serv->sv_temptimer, jiffies + HZ); | ||
741 | return; | ||
742 | } | ||
743 | |||
744 | list_for_each_safe(le, next, &serv->sv_tempsocks) { | ||
745 | xprt = list_entry(le, struct svc_xprt, xpt_list); | ||
746 | |||
747 | /* First time through, just mark it OLD. Second time | ||
748 | * through, close it. */ | ||
749 | if (!test_and_set_bit(XPT_OLD, &xprt->xpt_flags)) | ||
750 | continue; | ||
751 | if (atomic_read(&xprt->xpt_ref.refcount) > 1 | ||
752 | || test_bit(XPT_BUSY, &xprt->xpt_flags)) | ||
753 | continue; | ||
754 | svc_xprt_get(xprt); | ||
755 | list_move(le, &to_be_aged); | ||
756 | set_bit(XPT_CLOSE, &xprt->xpt_flags); | ||
757 | set_bit(XPT_DETACHED, &xprt->xpt_flags); | ||
758 | } | ||
759 | spin_unlock_bh(&serv->sv_lock); | ||
760 | |||
761 | while (!list_empty(&to_be_aged)) { | ||
762 | le = to_be_aged.next; | ||
763 | /* fiddling the xpt_list node is safe 'cos we're XPT_DETACHED */ | ||
764 | list_del_init(le); | ||
765 | xprt = list_entry(le, struct svc_xprt, xpt_list); | ||
766 | |||
767 | dprintk("queuing xprt %p for closing\n", xprt); | ||
768 | |||
769 | /* a thread will dequeue and close it soon */ | ||
770 | svc_xprt_enqueue(xprt); | ||
771 | svc_xprt_put(xprt); | ||
772 | } | ||
773 | |||
774 | mod_timer(&serv->sv_temptimer, jiffies + svc_conn_age_period * HZ); | ||
775 | } | ||
776 | |||
777 | /* | ||
778 | * Remove a dead transport | ||
779 | */ | ||
780 | void svc_delete_xprt(struct svc_xprt *xprt) | ||
781 | { | ||
782 | struct svc_serv *serv = xprt->xpt_server; | ||
783 | |||
784 | dprintk("svc: svc_delete_xprt(%p)\n", xprt); | ||
785 | xprt->xpt_ops->xpo_detach(xprt); | ||
786 | |||
787 | spin_lock_bh(&serv->sv_lock); | ||
788 | if (!test_and_set_bit(XPT_DETACHED, &xprt->xpt_flags)) | ||
789 | list_del_init(&xprt->xpt_list); | ||
790 | /* | ||
791 | * We used to delete the transport from whichever list | ||
792 | * it's sk_xprt.xpt_ready node was on, but we don't actually | ||
793 | * need to. This is because the only time we're called | ||
794 | * while still attached to a queue, the queue itself | ||
795 | * is about to be destroyed (in svc_destroy). | ||
796 | */ | ||
797 | if (!test_and_set_bit(XPT_DEAD, &xprt->xpt_flags)) { | ||
798 | BUG_ON(atomic_read(&xprt->xpt_ref.refcount) < 2); | ||
799 | if (test_bit(XPT_TEMP, &xprt->xpt_flags)) | ||
800 | serv->sv_tmpcnt--; | ||
801 | svc_xprt_put(xprt); | ||
802 | } | ||
803 | spin_unlock_bh(&serv->sv_lock); | ||
804 | } | ||
805 | |||
806 | void svc_close_xprt(struct svc_xprt *xprt) | ||
807 | { | ||
808 | set_bit(XPT_CLOSE, &xprt->xpt_flags); | ||
809 | if (test_and_set_bit(XPT_BUSY, &xprt->xpt_flags)) | ||
810 | /* someone else will have to effect the close */ | ||
811 | return; | ||
812 | |||
813 | svc_xprt_get(xprt); | ||
814 | svc_delete_xprt(xprt); | ||
815 | clear_bit(XPT_BUSY, &xprt->xpt_flags); | ||
816 | svc_xprt_put(xprt); | ||
817 | } | ||
818 | |||
819 | void svc_close_all(struct list_head *xprt_list) | ||
820 | { | ||
821 | struct svc_xprt *xprt; | ||
822 | struct svc_xprt *tmp; | ||
823 | |||
824 | list_for_each_entry_safe(xprt, tmp, xprt_list, xpt_list) { | ||
825 | set_bit(XPT_CLOSE, &xprt->xpt_flags); | ||
826 | if (test_bit(XPT_BUSY, &xprt->xpt_flags)) { | ||
827 | /* Waiting to be processed, but no threads left, | ||
828 | * So just remove it from the waiting list | ||
829 | */ | ||
830 | list_del_init(&xprt->xpt_ready); | ||
831 | clear_bit(XPT_BUSY, &xprt->xpt_flags); | ||
832 | } | ||
833 | svc_close_xprt(xprt); | ||
834 | } | ||
835 | } | ||
836 | |||
837 | /* | ||
838 | * Handle defer and revisit of requests | ||
839 | */ | ||
840 | |||
841 | static void svc_revisit(struct cache_deferred_req *dreq, int too_many) | ||
842 | { | ||
843 | struct svc_deferred_req *dr = | ||
844 | container_of(dreq, struct svc_deferred_req, handle); | ||
845 | struct svc_xprt *xprt = dr->xprt; | ||
846 | |||
847 | if (too_many) { | ||
848 | svc_xprt_put(xprt); | ||
849 | kfree(dr); | ||
850 | return; | ||
851 | } | ||
852 | dprintk("revisit queued\n"); | ||
853 | dr->xprt = NULL; | ||
854 | spin_lock(&xprt->xpt_lock); | ||
855 | list_add(&dr->handle.recent, &xprt->xpt_deferred); | ||
856 | spin_unlock(&xprt->xpt_lock); | ||
857 | set_bit(XPT_DEFERRED, &xprt->xpt_flags); | ||
858 | svc_xprt_enqueue(xprt); | ||
859 | svc_xprt_put(xprt); | ||
860 | } | ||
861 | |||
862 | static struct cache_deferred_req *svc_defer(struct cache_req *req) | ||
863 | { | ||
864 | struct svc_rqst *rqstp = container_of(req, struct svc_rqst, rq_chandle); | ||
865 | int size = sizeof(struct svc_deferred_req) + (rqstp->rq_arg.len); | ||
866 | struct svc_deferred_req *dr; | ||
867 | |||
868 | if (rqstp->rq_arg.page_len) | ||
869 | return NULL; /* if more than a page, give up FIXME */ | ||
870 | if (rqstp->rq_deferred) { | ||
871 | dr = rqstp->rq_deferred; | ||
872 | rqstp->rq_deferred = NULL; | ||
873 | } else { | ||
874 | int skip = rqstp->rq_arg.len - rqstp->rq_arg.head[0].iov_len; | ||
875 | /* FIXME maybe discard if size too large */ | ||
876 | dr = kmalloc(size, GFP_KERNEL); | ||
877 | if (dr == NULL) | ||
878 | return NULL; | ||
879 | |||
880 | dr->handle.owner = rqstp->rq_server; | ||
881 | dr->prot = rqstp->rq_prot; | ||
882 | memcpy(&dr->addr, &rqstp->rq_addr, rqstp->rq_addrlen); | ||
883 | dr->addrlen = rqstp->rq_addrlen; | ||
884 | dr->daddr = rqstp->rq_daddr; | ||
885 | dr->argslen = rqstp->rq_arg.len >> 2; | ||
886 | memcpy(dr->args, rqstp->rq_arg.head[0].iov_base-skip, | ||
887 | dr->argslen<<2); | ||
888 | } | ||
889 | svc_xprt_get(rqstp->rq_xprt); | ||
890 | dr->xprt = rqstp->rq_xprt; | ||
891 | |||
892 | dr->handle.revisit = svc_revisit; | ||
893 | return &dr->handle; | ||
894 | } | ||
895 | |||
896 | /* | ||
897 | * recv data from a deferred request into an active one | ||
898 | */ | ||
899 | static int svc_deferred_recv(struct svc_rqst *rqstp) | ||
900 | { | ||
901 | struct svc_deferred_req *dr = rqstp->rq_deferred; | ||
902 | |||
903 | rqstp->rq_arg.head[0].iov_base = dr->args; | ||
904 | rqstp->rq_arg.head[0].iov_len = dr->argslen<<2; | ||
905 | rqstp->rq_arg.page_len = 0; | ||
906 | rqstp->rq_arg.len = dr->argslen<<2; | ||
907 | rqstp->rq_prot = dr->prot; | ||
908 | memcpy(&rqstp->rq_addr, &dr->addr, dr->addrlen); | ||
909 | rqstp->rq_addrlen = dr->addrlen; | ||
910 | rqstp->rq_daddr = dr->daddr; | ||
911 | rqstp->rq_respages = rqstp->rq_pages; | ||
912 | return dr->argslen<<2; | ||
913 | } | ||
914 | |||
915 | |||
916 | static struct svc_deferred_req *svc_deferred_dequeue(struct svc_xprt *xprt) | ||
917 | { | ||
918 | struct svc_deferred_req *dr = NULL; | ||
919 | |||
920 | if (!test_bit(XPT_DEFERRED, &xprt->xpt_flags)) | ||
921 | return NULL; | ||
922 | spin_lock(&xprt->xpt_lock); | ||
923 | clear_bit(XPT_DEFERRED, &xprt->xpt_flags); | ||
924 | if (!list_empty(&xprt->xpt_deferred)) { | ||
925 | dr = list_entry(xprt->xpt_deferred.next, | ||
926 | struct svc_deferred_req, | ||
927 | handle.recent); | ||
928 | list_del_init(&dr->handle.recent); | ||
929 | set_bit(XPT_DEFERRED, &xprt->xpt_flags); | ||
930 | } | ||
931 | spin_unlock(&xprt->xpt_lock); | ||
932 | return dr; | ||
933 | } | ||