aboutsummaryrefslogtreecommitdiffstats
path: root/net/sunrpc
diff options
context:
space:
mode:
authorTom Tucker <tom@opengridcomputing.com>2007-12-30 22:07:55 -0500
committerJ. Bruce Fields <bfields@citi.umich.edu>2008-02-01 16:42:11 -0500
commit7a90e8cc21ad80529b3a3371dc97acc8832cc592 (patch)
tree3a5527b0cce3c046067c72705d6acd5d6bc1184b /net/sunrpc
parent7a18208383ab3f3ce4a1f4e0536acc9372523d81 (diff)
svc: Move sk_reserved to svc_xprt
This functionally trivial patch moves the sk_reserved field to the transport independent svc_xprt structure. Signed-off-by: Tom Tucker <tom@opengridcomputing.com> Acked-by: Neil Brown <neilb@suse.de> Reviewed-by: Chuck Lever <chuck.lever@oracle.com> Reviewed-by: Greg Banks <gnb@sgi.com> Signed-off-by: J. Bruce Fields <bfields@citi.umich.edu>
Diffstat (limited to 'net/sunrpc')
-rw-r--r--net/sunrpc/svcsock.c10
1 files changed, 5 insertions, 5 deletions
diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c
index 6f63a5ca6a91..c47bede754ea 100644
--- a/net/sunrpc/svcsock.c
+++ b/net/sunrpc/svcsock.c
@@ -288,7 +288,7 @@ svc_sock_enqueue(struct svc_sock *svsk)
288 rqstp->rq_sock = svsk; 288 rqstp->rq_sock = svsk;
289 svc_xprt_get(&svsk->sk_xprt); 289 svc_xprt_get(&svsk->sk_xprt);
290 rqstp->rq_reserved = serv->sv_max_mesg; 290 rqstp->rq_reserved = serv->sv_max_mesg;
291 atomic_add(rqstp->rq_reserved, &svsk->sk_reserved); 291 atomic_add(rqstp->rq_reserved, &svsk->sk_xprt.xpt_reserved);
292 BUG_ON(svsk->sk_xprt.xpt_pool != pool); 292 BUG_ON(svsk->sk_xprt.xpt_pool != pool);
293 wake_up(&rqstp->rq_wait); 293 wake_up(&rqstp->rq_wait);
294 } else { 294 } else {
@@ -353,7 +353,7 @@ void svc_reserve(struct svc_rqst *rqstp, int space)
353 353
354 if (space < rqstp->rq_reserved) { 354 if (space < rqstp->rq_reserved) {
355 struct svc_sock *svsk = rqstp->rq_sock; 355 struct svc_sock *svsk = rqstp->rq_sock;
356 atomic_sub((rqstp->rq_reserved - space), &svsk->sk_reserved); 356 atomic_sub((rqstp->rq_reserved - space), &svsk->sk_xprt.xpt_reserved);
357 rqstp->rq_reserved = space; 357 rqstp->rq_reserved = space;
358 358
359 svc_sock_enqueue(svsk); 359 svc_sock_enqueue(svsk);
@@ -881,7 +881,7 @@ static int svc_udp_has_wspace(struct svc_xprt *xprt)
881 * sock space. 881 * sock space.
882 */ 882 */
883 set_bit(SOCK_NOSPACE, &svsk->sk_sock->flags); 883 set_bit(SOCK_NOSPACE, &svsk->sk_sock->flags);
884 required = atomic_read(&svsk->sk_reserved) + serv->sv_max_mesg; 884 required = atomic_read(&svsk->sk_xprt.xpt_reserved) + serv->sv_max_mesg;
885 if (required*2 > sock_wspace(svsk->sk_sk)) 885 if (required*2 > sock_wspace(svsk->sk_sk))
886 return 0; 886 return 0;
887 clear_bit(SOCK_NOSPACE, &svsk->sk_sock->flags); 887 clear_bit(SOCK_NOSPACE, &svsk->sk_sock->flags);
@@ -1327,7 +1327,7 @@ static int svc_tcp_has_wspace(struct svc_xprt *xprt)
1327 * sock space. 1327 * sock space.
1328 */ 1328 */
1329 set_bit(SOCK_NOSPACE, &svsk->sk_sock->flags); 1329 set_bit(SOCK_NOSPACE, &svsk->sk_sock->flags);
1330 required = atomic_read(&svsk->sk_reserved) + serv->sv_max_mesg; 1330 required = atomic_read(&svsk->sk_xprt.xpt_reserved) + serv->sv_max_mesg;
1331 wspace = sk_stream_wspace(svsk->sk_sk); 1331 wspace = sk_stream_wspace(svsk->sk_sk);
1332 1332
1333 if (wspace < sk_stream_min_wspace(svsk->sk_sk)) 1333 if (wspace < sk_stream_min_wspace(svsk->sk_sk))
@@ -1544,7 +1544,7 @@ svc_recv(struct svc_rqst *rqstp, long timeout)
1544 rqstp->rq_sock = svsk; 1544 rqstp->rq_sock = svsk;
1545 svc_xprt_get(&svsk->sk_xprt); 1545 svc_xprt_get(&svsk->sk_xprt);
1546 rqstp->rq_reserved = serv->sv_max_mesg; 1546 rqstp->rq_reserved = serv->sv_max_mesg;
1547 atomic_add(rqstp->rq_reserved, &svsk->sk_reserved); 1547 atomic_add(rqstp->rq_reserved, &svsk->sk_xprt.xpt_reserved);
1548 } else { 1548 } else {
1549 /* No data pending. Go to sleep */ 1549 /* No data pending. Go to sleep */
1550 svc_thread_enqueue(pool, rqstp); 1550 svc_thread_enqueue(pool, rqstp);