aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorTom Tucker <tom@opengridcomputing.com>2007-12-30 22:07:55 -0500
committerJ. Bruce Fields <bfields@citi.umich.edu>2008-02-01 16:42:11 -0500
commit7a90e8cc21ad80529b3a3371dc97acc8832cc592 (patch)
tree3a5527b0cce3c046067c72705d6acd5d6bc1184b
parent7a18208383ab3f3ce4a1f4e0536acc9372523d81 (diff)
svc: Move sk_reserved to svc_xprt
This functionally trivial patch moves the sk_reserved field to the transport independent svc_xprt structure. Signed-off-by: Tom Tucker <tom@opengridcomputing.com> Acked-by: Neil Brown <neilb@suse.de> Reviewed-by: Chuck Lever <chuck.lever@oracle.com> Reviewed-by: Greg Banks <gnb@sgi.com> Signed-off-by: J. Bruce Fields <bfields@citi.umich.edu>
-rw-r--r--include/linux/sunrpc/svc_xprt.h1
-rw-r--r--include/linux/sunrpc/svcsock.h2
-rw-r--r--net/sunrpc/svcsock.c10
3 files changed, 6 insertions, 7 deletions
diff --git a/include/linux/sunrpc/svc_xprt.h b/include/linux/sunrpc/svc_xprt.h
index 0a3e09b42a83..0b8ee06f99c0 100644
--- a/include/linux/sunrpc/svc_xprt.h
+++ b/include/linux/sunrpc/svc_xprt.h
@@ -53,6 +53,7 @@ struct svc_xprt {
53 53
54 struct svc_pool *xpt_pool; /* current pool iff queued */ 54 struct svc_pool *xpt_pool; /* current pool iff queued */
55 struct svc_serv *xpt_server; /* service for transport */ 55 struct svc_serv *xpt_server; /* service for transport */
56 atomic_t xpt_reserved; /* space on outq that is rsvd */
56}; 57};
57 58
58int svc_reg_xprt_class(struct svc_xprt_class *); 59int svc_reg_xprt_class(struct svc_xprt_class *);
diff --git a/include/linux/sunrpc/svcsock.h b/include/linux/sunrpc/svcsock.h
index 060508ba358b..ba41f11788f2 100644
--- a/include/linux/sunrpc/svcsock.h
+++ b/include/linux/sunrpc/svcsock.h
@@ -20,8 +20,6 @@ struct svc_sock {
20 struct socket * sk_sock; /* berkeley socket layer */ 20 struct socket * sk_sock; /* berkeley socket layer */
21 struct sock * sk_sk; /* INET layer */ 21 struct sock * sk_sk; /* INET layer */
22 22
23 atomic_t sk_reserved; /* space on outq that is reserved */
24
25 spinlock_t sk_lock; /* protects sk_deferred and 23 spinlock_t sk_lock; /* protects sk_deferred and
26 * sk_info_authunix */ 24 * sk_info_authunix */
27 struct list_head sk_deferred; /* deferred requests that need to 25 struct list_head sk_deferred; /* deferred requests that need to
diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c
index 6f63a5ca6a91..c47bede754ea 100644
--- a/net/sunrpc/svcsock.c
+++ b/net/sunrpc/svcsock.c
@@ -288,7 +288,7 @@ svc_sock_enqueue(struct svc_sock *svsk)
288 rqstp->rq_sock = svsk; 288 rqstp->rq_sock = svsk;
289 svc_xprt_get(&svsk->sk_xprt); 289 svc_xprt_get(&svsk->sk_xprt);
290 rqstp->rq_reserved = serv->sv_max_mesg; 290 rqstp->rq_reserved = serv->sv_max_mesg;
291 atomic_add(rqstp->rq_reserved, &svsk->sk_reserved); 291 atomic_add(rqstp->rq_reserved, &svsk->sk_xprt.xpt_reserved);
292 BUG_ON(svsk->sk_xprt.xpt_pool != pool); 292 BUG_ON(svsk->sk_xprt.xpt_pool != pool);
293 wake_up(&rqstp->rq_wait); 293 wake_up(&rqstp->rq_wait);
294 } else { 294 } else {
@@ -353,7 +353,7 @@ void svc_reserve(struct svc_rqst *rqstp, int space)
353 353
354 if (space < rqstp->rq_reserved) { 354 if (space < rqstp->rq_reserved) {
355 struct svc_sock *svsk = rqstp->rq_sock; 355 struct svc_sock *svsk = rqstp->rq_sock;
356 atomic_sub((rqstp->rq_reserved - space), &svsk->sk_reserved); 356 atomic_sub((rqstp->rq_reserved - space), &svsk->sk_xprt.xpt_reserved);
357 rqstp->rq_reserved = space; 357 rqstp->rq_reserved = space;
358 358
359 svc_sock_enqueue(svsk); 359 svc_sock_enqueue(svsk);
@@ -881,7 +881,7 @@ static int svc_udp_has_wspace(struct svc_xprt *xprt)
881 * sock space. 881 * sock space.
882 */ 882 */
883 set_bit(SOCK_NOSPACE, &svsk->sk_sock->flags); 883 set_bit(SOCK_NOSPACE, &svsk->sk_sock->flags);
884 required = atomic_read(&svsk->sk_reserved) + serv->sv_max_mesg; 884 required = atomic_read(&svsk->sk_xprt.xpt_reserved) + serv->sv_max_mesg;
885 if (required*2 > sock_wspace(svsk->sk_sk)) 885 if (required*2 > sock_wspace(svsk->sk_sk))
886 return 0; 886 return 0;
887 clear_bit(SOCK_NOSPACE, &svsk->sk_sock->flags); 887 clear_bit(SOCK_NOSPACE, &svsk->sk_sock->flags);
@@ -1327,7 +1327,7 @@ static int svc_tcp_has_wspace(struct svc_xprt *xprt)
1327 * sock space. 1327 * sock space.
1328 */ 1328 */
1329 set_bit(SOCK_NOSPACE, &svsk->sk_sock->flags); 1329 set_bit(SOCK_NOSPACE, &svsk->sk_sock->flags);
1330 required = atomic_read(&svsk->sk_reserved) + serv->sv_max_mesg; 1330 required = atomic_read(&svsk->sk_xprt.xpt_reserved) + serv->sv_max_mesg;
1331 wspace = sk_stream_wspace(svsk->sk_sk); 1331 wspace = sk_stream_wspace(svsk->sk_sk);
1332 1332
1333 if (wspace < sk_stream_min_wspace(svsk->sk_sk)) 1333 if (wspace < sk_stream_min_wspace(svsk->sk_sk))
@@ -1544,7 +1544,7 @@ svc_recv(struct svc_rqst *rqstp, long timeout)
1544 rqstp->rq_sock = svsk; 1544 rqstp->rq_sock = svsk;
1545 svc_xprt_get(&svsk->sk_xprt); 1545 svc_xprt_get(&svsk->sk_xprt);
1546 rqstp->rq_reserved = serv->sv_max_mesg; 1546 rqstp->rq_reserved = serv->sv_max_mesg;
1547 atomic_add(rqstp->rq_reserved, &svsk->sk_reserved); 1547 atomic_add(rqstp->rq_reserved, &svsk->sk_xprt.xpt_reserved);
1548 } else { 1548 } else {
1549 /* No data pending. Go to sleep */ 1549 /* No data pending. Go to sleep */
1550 svc_thread_enqueue(pool, rqstp); 1550 svc_thread_enqueue(pool, rqstp);