aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorTom Tucker <tom@opengridcomputing.com>2007-12-30 22:08:10 -0500
committerJ. Bruce Fields <bfields@citi.umich.edu>2008-02-01 16:42:12 -0500
commit8c7b0172a1db8120d25ecb4eff69664c52ee7639 (patch)
treed048ab4c5c378d2a90e5dd52a09dee4e24712cc9
parentdef13d7401e9b95bbd34c20057ebeb2972708b1b (diff)
svc: Make deferral processing xprt independent
This patch moves the transport independent sk_deferred list to the svc_xprt structure and updates the svc_deferred_req structure to keep pointers to svc_xprt's directly. The deferral processing code is also moved out of the transport dependent recvfrom functions and into the generic svc_recv path. Signed-off-by: Tom Tucker <tom@opengridcomputing.com> Acked-by: Neil Brown <neilb@suse.de> Reviewed-by: Chuck Lever <chuck.lever@oracle.com> Reviewed-by: Greg Banks <gnb@sgi.com> Signed-off-by: J. Bruce Fields <bfields@citi.umich.edu>
-rw-r--r--include/linux/sunrpc/svc.h2
-rw-r--r--include/linux/sunrpc/svc_xprt.h2
-rw-r--r--include/linux/sunrpc/svcsock.h3
-rw-r--r--net/sunrpc/svc_xprt.c1
-rw-r--r--net/sunrpc/svcsock.c57
5 files changed, 29 insertions, 36 deletions
diff --git a/include/linux/sunrpc/svc.h b/include/linux/sunrpc/svc.h
index cfb2652f6f8f..40adc9d75a6d 100644
--- a/include/linux/sunrpc/svc.h
+++ b/include/linux/sunrpc/svc.h
@@ -320,7 +320,7 @@ static inline void svc_free_res_pages(struct svc_rqst *rqstp)
320 320
321struct svc_deferred_req { 321struct svc_deferred_req {
322 u32 prot; /* protocol (UDP or TCP) */ 322 u32 prot; /* protocol (UDP or TCP) */
323 struct svc_sock *svsk; 323 struct svc_xprt *xprt;
324 struct sockaddr_storage addr; /* where reply must go */ 324 struct sockaddr_storage addr; /* where reply must go */
325 size_t addrlen; 325 size_t addrlen;
326 union svc_addr_u daddr; /* where reply must come from */ 326 union svc_addr_u daddr; /* where reply must come from */
diff --git a/include/linux/sunrpc/svc_xprt.h b/include/linux/sunrpc/svc_xprt.h
index 1b5da39bb461..6a8445b9dfd9 100644
--- a/include/linux/sunrpc/svc_xprt.h
+++ b/include/linux/sunrpc/svc_xprt.h
@@ -59,6 +59,8 @@ struct svc_xprt {
59 spinlock_t xpt_lock; /* protects sk_deferred 59 spinlock_t xpt_lock; /* protects sk_deferred
60 * and xpt_auth_cache */ 60 * and xpt_auth_cache */
61 void *xpt_auth_cache;/* auth cache */ 61 void *xpt_auth_cache;/* auth cache */
62 struct list_head xpt_deferred; /* deferred requests that need
63 * to be revisted */
62}; 64};
63 65
64int svc_reg_xprt_class(struct svc_xprt_class *); 66int svc_reg_xprt_class(struct svc_xprt_class *);
diff --git a/include/linux/sunrpc/svcsock.h b/include/linux/sunrpc/svcsock.h
index f2ed6a25a7aa..96a229e6b9c9 100644
--- a/include/linux/sunrpc/svcsock.h
+++ b/include/linux/sunrpc/svcsock.h
@@ -20,9 +20,6 @@ struct svc_sock {
20 struct socket * sk_sock; /* berkeley socket layer */ 20 struct socket * sk_sock; /* berkeley socket layer */
21 struct sock * sk_sk; /* INET layer */ 21 struct sock * sk_sk; /* INET layer */
22 22
23 struct list_head sk_deferred; /* deferred requests that need to
24 * be revisted */
25
26 /* We keep the old state_change and data_ready CB's here */ 23 /* We keep the old state_change and data_ready CB's here */
27 void (*sk_ostate)(struct sock *); 24 void (*sk_ostate)(struct sock *);
28 void (*sk_odata)(struct sock *, int bytes); 25 void (*sk_odata)(struct sock *, int bytes);
diff --git a/net/sunrpc/svc_xprt.c b/net/sunrpc/svc_xprt.c
index d2ac130b9040..023aeb0ecfa9 100644
--- a/net/sunrpc/svc_xprt.c
+++ b/net/sunrpc/svc_xprt.c
@@ -102,6 +102,7 @@ void svc_xprt_init(struct svc_xprt_class *xcl, struct svc_xprt *xprt,
102 xprt->xpt_server = serv; 102 xprt->xpt_server = serv;
103 INIT_LIST_HEAD(&xprt->xpt_list); 103 INIT_LIST_HEAD(&xprt->xpt_list);
104 INIT_LIST_HEAD(&xprt->xpt_ready); 104 INIT_LIST_HEAD(&xprt->xpt_ready);
105 INIT_LIST_HEAD(&xprt->xpt_deferred);
105 mutex_init(&xprt->xpt_mutex); 106 mutex_init(&xprt->xpt_mutex);
106 spin_lock_init(&xprt->xpt_lock); 107 spin_lock_init(&xprt->xpt_lock);
107} 108}
diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c
index 5c9422c9a980..9d0a9e6c0e10 100644
--- a/net/sunrpc/svcsock.c
+++ b/net/sunrpc/svcsock.c
@@ -89,7 +89,7 @@ static void svc_close_xprt(struct svc_xprt *xprt);
89static void svc_sock_detach(struct svc_xprt *); 89static void svc_sock_detach(struct svc_xprt *);
90static void svc_sock_free(struct svc_xprt *); 90static void svc_sock_free(struct svc_xprt *);
91 91
92static struct svc_deferred_req *svc_deferred_dequeue(struct svc_sock *svsk); 92static struct svc_deferred_req *svc_deferred_dequeue(struct svc_xprt *xprt);
93static int svc_deferred_recv(struct svc_rqst *rqstp); 93static int svc_deferred_recv(struct svc_rqst *rqstp);
94static struct cache_deferred_req *svc_defer(struct cache_req *req); 94static struct cache_deferred_req *svc_defer(struct cache_req *req);
95static struct svc_xprt *svc_create_socket(struct svc_serv *, int, 95static struct svc_xprt *svc_create_socket(struct svc_serv *, int,
@@ -771,11 +771,6 @@ svc_udp_recvfrom(struct svc_rqst *rqstp)
771 (serv->sv_nrthreads+3) * serv->sv_max_mesg, 771 (serv->sv_nrthreads+3) * serv->sv_max_mesg,
772 (serv->sv_nrthreads+3) * serv->sv_max_mesg); 772 (serv->sv_nrthreads+3) * serv->sv_max_mesg);
773 773
774 if ((rqstp->rq_deferred = svc_deferred_dequeue(svsk))) {
775 svc_xprt_received(&svsk->sk_xprt);
776 return svc_deferred_recv(rqstp);
777 }
778
779 clear_bit(XPT_DATA, &svsk->sk_xprt.xpt_flags); 774 clear_bit(XPT_DATA, &svsk->sk_xprt.xpt_flags);
780 skb = NULL; 775 skb = NULL;
781 err = kernel_recvmsg(svsk->sk_sock, &msg, NULL, 776 err = kernel_recvmsg(svsk->sk_sock, &msg, NULL,
@@ -1138,11 +1133,6 @@ svc_tcp_recvfrom(struct svc_rqst *rqstp)
1138 test_bit(XPT_CONN, &svsk->sk_xprt.xpt_flags), 1133 test_bit(XPT_CONN, &svsk->sk_xprt.xpt_flags),
1139 test_bit(XPT_CLOSE, &svsk->sk_xprt.xpt_flags)); 1134 test_bit(XPT_CLOSE, &svsk->sk_xprt.xpt_flags));
1140 1135
1141 if ((rqstp->rq_deferred = svc_deferred_dequeue(svsk))) {
1142 svc_xprt_received(&svsk->sk_xprt);
1143 return svc_deferred_recv(rqstp);
1144 }
1145
1146 if (test_and_clear_bit(XPT_CHNGBUF, &svsk->sk_xprt.xpt_flags)) 1136 if (test_and_clear_bit(XPT_CHNGBUF, &svsk->sk_xprt.xpt_flags))
1147 /* sndbuf needs to have room for one request 1137 /* sndbuf needs to have room for one request
1148 * per thread, otherwise we can stall even when the 1138 * per thread, otherwise we can stall even when the
@@ -1601,7 +1591,12 @@ svc_recv(struct svc_rqst *rqstp, long timeout)
1601 dprintk("svc: server %p, pool %u, socket %p, inuse=%d\n", 1591 dprintk("svc: server %p, pool %u, socket %p, inuse=%d\n",
1602 rqstp, pool->sp_id, svsk, 1592 rqstp, pool->sp_id, svsk,
1603 atomic_read(&svsk->sk_xprt.xpt_ref.refcount)); 1593 atomic_read(&svsk->sk_xprt.xpt_ref.refcount));
1604 len = svsk->sk_xprt.xpt_ops->xpo_recvfrom(rqstp); 1594 rqstp->rq_deferred = svc_deferred_dequeue(&svsk->sk_xprt);
1595 if (rqstp->rq_deferred) {
1596 svc_xprt_received(&svsk->sk_xprt);
1597 len = svc_deferred_recv(rqstp);
1598 } else
1599 len = svsk->sk_xprt.xpt_ops->xpo_recvfrom(rqstp);
1605 dprintk("svc: got len=%d\n", len); 1600 dprintk("svc: got len=%d\n", len);
1606 } 1601 }
1607 1602
@@ -1758,7 +1753,6 @@ static struct svc_sock *svc_setup_socket(struct svc_serv *serv,
1758 svsk->sk_ostate = inet->sk_state_change; 1753 svsk->sk_ostate = inet->sk_state_change;
1759 svsk->sk_odata = inet->sk_data_ready; 1754 svsk->sk_odata = inet->sk_data_ready;
1760 svsk->sk_owspace = inet->sk_write_space; 1755 svsk->sk_owspace = inet->sk_write_space;
1761 INIT_LIST_HEAD(&svsk->sk_deferred);
1762 1756
1763 /* Initialize the socket */ 1757 /* Initialize the socket */
1764 if (sock->type == SOCK_DGRAM) 1758 if (sock->type == SOCK_DGRAM)
@@ -1976,22 +1970,21 @@ void svc_close_all(struct list_head *xprt_list)
1976static void svc_revisit(struct cache_deferred_req *dreq, int too_many) 1970static void svc_revisit(struct cache_deferred_req *dreq, int too_many)
1977{ 1971{
1978 struct svc_deferred_req *dr = container_of(dreq, struct svc_deferred_req, handle); 1972 struct svc_deferred_req *dr = container_of(dreq, struct svc_deferred_req, handle);
1979 struct svc_sock *svsk; 1973 struct svc_xprt *xprt = dr->xprt;
1980 1974
1981 if (too_many) { 1975 if (too_many) {
1982 svc_xprt_put(&dr->svsk->sk_xprt); 1976 svc_xprt_put(xprt);
1983 kfree(dr); 1977 kfree(dr);
1984 return; 1978 return;
1985 } 1979 }
1986 dprintk("revisit queued\n"); 1980 dprintk("revisit queued\n");
1987 svsk = dr->svsk; 1981 dr->xprt = NULL;
1988 dr->svsk = NULL; 1982 spin_lock(&xprt->xpt_lock);
1989 spin_lock(&svsk->sk_xprt.xpt_lock); 1983 list_add(&dr->handle.recent, &xprt->xpt_deferred);
1990 list_add(&dr->handle.recent, &svsk->sk_deferred); 1984 spin_unlock(&xprt->xpt_lock);
1991 spin_unlock(&svsk->sk_xprt.xpt_lock); 1985 set_bit(XPT_DEFERRED, &xprt->xpt_flags);
1992 set_bit(XPT_DEFERRED, &svsk->sk_xprt.xpt_flags); 1986 svc_xprt_enqueue(xprt);
1993 svc_xprt_enqueue(&svsk->sk_xprt); 1987 svc_xprt_put(xprt);
1994 svc_xprt_put(&svsk->sk_xprt);
1995} 1988}
1996 1989
1997static struct cache_deferred_req * 1990static struct cache_deferred_req *
@@ -2022,7 +2015,7 @@ svc_defer(struct cache_req *req)
2022 memcpy(dr->args, rqstp->rq_arg.head[0].iov_base-skip, dr->argslen<<2); 2015 memcpy(dr->args, rqstp->rq_arg.head[0].iov_base-skip, dr->argslen<<2);
2023 } 2016 }
2024 svc_xprt_get(rqstp->rq_xprt); 2017 svc_xprt_get(rqstp->rq_xprt);
2025 dr->svsk = rqstp->rq_sock; 2018 dr->xprt = rqstp->rq_xprt;
2026 2019
2027 dr->handle.revisit = svc_revisit; 2020 dr->handle.revisit = svc_revisit;
2028 return &dr->handle; 2021 return &dr->handle;
@@ -2048,21 +2041,21 @@ static int svc_deferred_recv(struct svc_rqst *rqstp)
2048} 2041}
2049 2042
2050 2043
2051static struct svc_deferred_req *svc_deferred_dequeue(struct svc_sock *svsk) 2044static struct svc_deferred_req *svc_deferred_dequeue(struct svc_xprt *xprt)
2052{ 2045{
2053 struct svc_deferred_req *dr = NULL; 2046 struct svc_deferred_req *dr = NULL;
2054 2047
2055 if (!test_bit(XPT_DEFERRED, &svsk->sk_xprt.xpt_flags)) 2048 if (!test_bit(XPT_DEFERRED, &xprt->xpt_flags))
2056 return NULL; 2049 return NULL;
2057 spin_lock(&svsk->sk_xprt.xpt_lock); 2050 spin_lock(&xprt->xpt_lock);
2058 clear_bit(XPT_DEFERRED, &svsk->sk_xprt.xpt_flags); 2051 clear_bit(XPT_DEFERRED, &xprt->xpt_flags);
2059 if (!list_empty(&svsk->sk_deferred)) { 2052 if (!list_empty(&xprt->xpt_deferred)) {
2060 dr = list_entry(svsk->sk_deferred.next, 2053 dr = list_entry(xprt->xpt_deferred.next,
2061 struct svc_deferred_req, 2054 struct svc_deferred_req,
2062 handle.recent); 2055 handle.recent);
2063 list_del_init(&dr->handle.recent); 2056 list_del_init(&dr->handle.recent);
2064 set_bit(XPT_DEFERRED, &svsk->sk_xprt.xpt_flags); 2057 set_bit(XPT_DEFERRED, &xprt->xpt_flags);
2065 } 2058 }
2066 spin_unlock(&svsk->sk_xprt.xpt_lock); 2059 spin_unlock(&xprt->xpt_lock);
2067 return dr; 2060 return dr;
2068} 2061}