diff options
author | Tom Tucker <tom@opengridcomputing.com> | 2007-12-30 22:08:16 -0500 |
---|---|---|
committer | J. Bruce Fields <bfields@citi.umich.edu> | 2008-02-01 16:42:12 -0500 |
commit | c36adb2a7f9132b37d4b669b2e2c04e46d5188b2 (patch) | |
tree | 0047a71515d96cdd4085adbd1691232db42c2b9f /net/sunrpc | |
parent | eab996d4aca7a9d8621d2b98c00ce420df85eaed (diff) |
svc: Make svc_recv transport neutral
All of the transport field and functions used by svc_recv are now
transport independent. Change the svc_recv function to use the svc_xprt
structure directly instead of the transport specific svc_sock structure.
Signed-off-by: Tom Tucker <tom@opengridcomputing.com>
Acked-by: Neil Brown <neilb@suse.de>
Reviewed-by: Chuck Lever <chuck.lever@oracle.com>
Reviewed-by: Greg Banks <gnb@sgi.com>
Signed-off-by: J. Bruce Fields <bfields@citi.umich.edu>
Diffstat (limited to 'net/sunrpc')
-rw-r--r-- | net/sunrpc/svcsock.c | 63 |
1 files changed, 32 insertions, 31 deletions
diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c index 355ab8da54fe..fa57e9bc68e4 100644 --- a/net/sunrpc/svcsock.c +++ b/net/sunrpc/svcsock.c | |||
@@ -310,22 +310,21 @@ EXPORT_SYMBOL_GPL(svc_xprt_enqueue); | |||
310 | /* | 310 | /* |
311 | * Dequeue the first socket. Must be called with the pool->sp_lock held. | 311 | * Dequeue the first socket. Must be called with the pool->sp_lock held. |
312 | */ | 312 | */ |
313 | static inline struct svc_sock * | 313 | static struct svc_xprt *svc_xprt_dequeue(struct svc_pool *pool) |
314 | svc_sock_dequeue(struct svc_pool *pool) | ||
315 | { | 314 | { |
316 | struct svc_sock *svsk; | 315 | struct svc_xprt *xprt; |
317 | 316 | ||
318 | if (list_empty(&pool->sp_sockets)) | 317 | if (list_empty(&pool->sp_sockets)) |
319 | return NULL; | 318 | return NULL; |
320 | 319 | ||
321 | svsk = list_entry(pool->sp_sockets.next, | 320 | xprt = list_entry(pool->sp_sockets.next, |
322 | struct svc_sock, sk_xprt.xpt_ready); | 321 | struct svc_xprt, xpt_ready); |
323 | list_del_init(&svsk->sk_xprt.xpt_ready); | 322 | list_del_init(&xprt->xpt_ready); |
324 | 323 | ||
325 | dprintk("svc: socket %p dequeued, inuse=%d\n", | 324 | dprintk("svc: transport %p dequeued, inuse=%d\n", |
326 | svsk->sk_sk, atomic_read(&svsk->sk_xprt.xpt_ref.refcount)); | 325 | xprt, atomic_read(&xprt->xpt_ref.refcount)); |
327 | 326 | ||
328 | return svsk; | 327 | return xprt; |
329 | } | 328 | } |
330 | 329 | ||
331 | /* | 330 | /* |
@@ -1475,20 +1474,20 @@ static void svc_check_conn_limits(struct svc_serv *serv) | |||
1475 | int | 1474 | int |
1476 | svc_recv(struct svc_rqst *rqstp, long timeout) | 1475 | svc_recv(struct svc_rqst *rqstp, long timeout) |
1477 | { | 1476 | { |
1478 | struct svc_sock *svsk = NULL; | 1477 | struct svc_xprt *xprt = NULL; |
1479 | struct svc_serv *serv = rqstp->rq_server; | 1478 | struct svc_serv *serv = rqstp->rq_server; |
1480 | struct svc_pool *pool = rqstp->rq_pool; | 1479 | struct svc_pool *pool = rqstp->rq_pool; |
1481 | int len, i; | 1480 | int len, i; |
1482 | int pages; | 1481 | int pages; |
1483 | struct xdr_buf *arg; | 1482 | struct xdr_buf *arg; |
1484 | DECLARE_WAITQUEUE(wait, current); | 1483 | DECLARE_WAITQUEUE(wait, current); |
1485 | 1484 | ||
1486 | dprintk("svc: server %p waiting for data (to = %ld)\n", | 1485 | dprintk("svc: server %p waiting for data (to = %ld)\n", |
1487 | rqstp, timeout); | 1486 | rqstp, timeout); |
1488 | 1487 | ||
1489 | if (rqstp->rq_sock) | 1488 | if (rqstp->rq_xprt) |
1490 | printk(KERN_ERR | 1489 | printk(KERN_ERR |
1491 | "svc_recv: service %p, socket not NULL!\n", | 1490 | "svc_recv: service %p, transport not NULL!\n", |
1492 | rqstp); | 1491 | rqstp); |
1493 | if (waitqueue_active(&rqstp->rq_wait)) | 1492 | if (waitqueue_active(&rqstp->rq_wait)) |
1494 | printk(KERN_ERR | 1493 | printk(KERN_ERR |
@@ -1525,11 +1524,12 @@ svc_recv(struct svc_rqst *rqstp, long timeout) | |||
1525 | return -EINTR; | 1524 | return -EINTR; |
1526 | 1525 | ||
1527 | spin_lock_bh(&pool->sp_lock); | 1526 | spin_lock_bh(&pool->sp_lock); |
1528 | if ((svsk = svc_sock_dequeue(pool)) != NULL) { | 1527 | xprt = svc_xprt_dequeue(pool); |
1529 | rqstp->rq_sock = svsk; | 1528 | if (xprt) { |
1530 | svc_xprt_get(&svsk->sk_xprt); | 1529 | rqstp->rq_xprt = xprt; |
1530 | svc_xprt_get(xprt); | ||
1531 | rqstp->rq_reserved = serv->sv_max_mesg; | 1531 | rqstp->rq_reserved = serv->sv_max_mesg; |
1532 | atomic_add(rqstp->rq_reserved, &svsk->sk_xprt.xpt_reserved); | 1532 | atomic_add(rqstp->rq_reserved, &xprt->xpt_reserved); |
1533 | } else { | 1533 | } else { |
1534 | /* No data pending. Go to sleep */ | 1534 | /* No data pending. Go to sleep */ |
1535 | svc_thread_enqueue(pool, rqstp); | 1535 | svc_thread_enqueue(pool, rqstp); |
@@ -1549,7 +1549,8 @@ svc_recv(struct svc_rqst *rqstp, long timeout) | |||
1549 | spin_lock_bh(&pool->sp_lock); | 1549 | spin_lock_bh(&pool->sp_lock); |
1550 | remove_wait_queue(&rqstp->rq_wait, &wait); | 1550 | remove_wait_queue(&rqstp->rq_wait, &wait); |
1551 | 1551 | ||
1552 | if (!(svsk = rqstp->rq_sock)) { | 1552 | xprt = rqstp->rq_xprt; |
1553 | if (!xprt) { | ||
1553 | svc_thread_dequeue(pool, rqstp); | 1554 | svc_thread_dequeue(pool, rqstp); |
1554 | spin_unlock_bh(&pool->sp_lock); | 1555 | spin_unlock_bh(&pool->sp_lock); |
1555 | dprintk("svc: server %p, no data yet\n", rqstp); | 1556 | dprintk("svc: server %p, no data yet\n", rqstp); |
@@ -1559,32 +1560,32 @@ svc_recv(struct svc_rqst *rqstp, long timeout) | |||
1559 | spin_unlock_bh(&pool->sp_lock); | 1560 | spin_unlock_bh(&pool->sp_lock); |
1560 | 1561 | ||
1561 | len = 0; | 1562 | len = 0; |
1562 | if (test_bit(XPT_CLOSE, &svsk->sk_xprt.xpt_flags)) { | 1563 | if (test_bit(XPT_CLOSE, &xprt->xpt_flags)) { |
1563 | dprintk("svc_recv: found XPT_CLOSE\n"); | 1564 | dprintk("svc_recv: found XPT_CLOSE\n"); |
1564 | svc_delete_xprt(&svsk->sk_xprt); | 1565 | svc_delete_xprt(xprt); |
1565 | } else if (test_bit(XPT_LISTENER, &svsk->sk_xprt.xpt_flags)) { | 1566 | } else if (test_bit(XPT_LISTENER, &xprt->xpt_flags)) { |
1566 | struct svc_xprt *newxpt; | 1567 | struct svc_xprt *newxpt; |
1567 | newxpt = svsk->sk_xprt.xpt_ops->xpo_accept(&svsk->sk_xprt); | 1568 | newxpt = xprt->xpt_ops->xpo_accept(xprt); |
1568 | if (newxpt) { | 1569 | if (newxpt) { |
1569 | /* | 1570 | /* |
1570 | * We know this module_get will succeed because the | 1571 | * We know this module_get will succeed because the |
1571 | * listener holds a reference too | 1572 | * listener holds a reference too |
1572 | */ | 1573 | */ |
1573 | __module_get(newxpt->xpt_class->xcl_owner); | 1574 | __module_get(newxpt->xpt_class->xcl_owner); |
1574 | svc_check_conn_limits(svsk->sk_xprt.xpt_server); | 1575 | svc_check_conn_limits(xprt->xpt_server); |
1575 | svc_xprt_received(newxpt); | 1576 | svc_xprt_received(newxpt); |
1576 | } | 1577 | } |
1577 | svc_xprt_received(&svsk->sk_xprt); | 1578 | svc_xprt_received(xprt); |
1578 | } else { | 1579 | } else { |
1579 | dprintk("svc: server %p, pool %u, socket %p, inuse=%d\n", | 1580 | dprintk("svc: server %p, pool %u, transport %p, inuse=%d\n", |
1580 | rqstp, pool->sp_id, svsk, | 1581 | rqstp, pool->sp_id, xprt, |
1581 | atomic_read(&svsk->sk_xprt.xpt_ref.refcount)); | 1582 | atomic_read(&xprt->xpt_ref.refcount)); |
1582 | rqstp->rq_deferred = svc_deferred_dequeue(&svsk->sk_xprt); | 1583 | rqstp->rq_deferred = svc_deferred_dequeue(xprt); |
1583 | if (rqstp->rq_deferred) { | 1584 | if (rqstp->rq_deferred) { |
1584 | svc_xprt_received(&svsk->sk_xprt); | 1585 | svc_xprt_received(xprt); |
1585 | len = svc_deferred_recv(rqstp); | 1586 | len = svc_deferred_recv(rqstp); |
1586 | } else | 1587 | } else |
1587 | len = svsk->sk_xprt.xpt_ops->xpo_recvfrom(rqstp); | 1588 | len = xprt->xpt_ops->xpo_recvfrom(rqstp); |
1588 | dprintk("svc: got len=%d\n", len); | 1589 | dprintk("svc: got len=%d\n", len); |
1589 | } | 1590 | } |
1590 | 1591 | ||
@@ -1594,7 +1595,7 @@ svc_recv(struct svc_rqst *rqstp, long timeout) | |||
1594 | svc_xprt_release(rqstp); | 1595 | svc_xprt_release(rqstp); |
1595 | return -EAGAIN; | 1596 | return -EAGAIN; |
1596 | } | 1597 | } |
1597 | clear_bit(XPT_OLD, &svsk->sk_xprt.xpt_flags); | 1598 | clear_bit(XPT_OLD, &xprt->xpt_flags); |
1598 | 1599 | ||
1599 | rqstp->rq_secure = svc_port_is_privileged(svc_addr(rqstp)); | 1600 | rqstp->rq_secure = svc_port_is_privileged(svc_addr(rqstp)); |
1600 | rqstp->rq_chandle.defer = svc_defer; | 1601 | rqstp->rq_chandle.defer = svc_defer; |