aboutsummaryrefslogtreecommitdiffstats
path: root/net/sunrpc/svcsock.c
diff options
context:
space:
mode:
authorTom Tucker <tom@opengridcomputing.com>2007-12-30 22:07:48 -0500
committerJ. Bruce Fields <bfields@citi.umich.edu>2008-02-01 16:42:11 -0500
commit02fc6c36188be0ad19502cfd39266150ffab7603 (patch)
treee1d85b49c18944209232bbb1ebaf51001b6af602 /net/sunrpc/svcsock.c
parente1b3157f9710622bad6c7747d3b08ed3d2394cf6 (diff)
svc: Move sk_flags to the svc_xprt structure
This functionally trivial change moves the transport independent sk_flags field to the transport independent svc_xprt structure. Signed-off-by: Tom Tucker <tom@opengridcomputing.com> Acked-by: Neil Brown <neilb@suse.de> Reviewed-by: Chuck Lever <chuck.lever@oracle.com> Reviewed-by: Greg Banks <gnb@sgi.com> Signed-off-by: J. Bruce Fields <bfields@citi.umich.edu>
Diffstat (limited to 'net/sunrpc/svcsock.c')
-rw-r--r--net/sunrpc/svcsock.c151
1 files changed, 76 insertions, 75 deletions
diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c
index db589d187170..0a7125271d44 100644
--- a/net/sunrpc/svcsock.c
+++ b/net/sunrpc/svcsock.c
@@ -56,22 +56,23 @@
56 * BKL protects svc_serv->sv_nrthread. 56 * BKL protects svc_serv->sv_nrthread.
57 * svc_sock->sk_lock protects the svc_sock->sk_deferred list 57 * svc_sock->sk_lock protects the svc_sock->sk_deferred list
58 * and the ->sk_info_authunix cache. 58 * and the ->sk_info_authunix cache.
59 * svc_sock->sk_flags.SK_BUSY prevents a svc_sock being enqueued multiply. 59 * svc_sock->sk_xprt.xpt_flags.XPT_BUSY prevents a svc_sock being
60 * enqueued multiply.
60 * 61 *
61 * Some flags can be set to certain values at any time 62 * Some flags can be set to certain values at any time
62 * providing that certain rules are followed: 63 * providing that certain rules are followed:
63 * 64 *
64 * SK_CONN, SK_DATA, can be set or cleared at any time. 65 * XPT_CONN, XPT_DATA, can be set or cleared at any time.
65 * after a set, svc_sock_enqueue must be called. 66 * after a set, svc_sock_enqueue must be called.
66 * after a clear, the socket must be read/accepted 67 * after a clear, the socket must be read/accepted
67 * if this succeeds, it must be set again. 68 * if this succeeds, it must be set again.
68 * SK_CLOSE can set at any time. It is never cleared. 69 * XPT_CLOSE can set at any time. It is never cleared.
69 * xpt_ref contains a bias of '1' until SK_DEAD is set. 70 * xpt_ref contains a bias of '1' until XPT_DEAD is set.
70 * so when xprt_ref hits zero, we know the transport is dead 71 * so when xprt_ref hits zero, we know the transport is dead
71 * and no-one is using it. 72 * and no-one is using it.
72 * SK_DEAD can only be set while SK_BUSY is held which ensures 73 * XPT_DEAD can only be set while XPT_BUSY is held which ensures
73 * no other thread will be using the socket or will try to 74 * no other thread will be using the socket or will try to
74 * set SK_DEAD. 75 * set XPT_DEAD.
75 * 76 *
76 */ 77 */
77 78
@@ -219,10 +220,10 @@ svc_sock_enqueue(struct svc_sock *svsk)
219 struct svc_rqst *rqstp; 220 struct svc_rqst *rqstp;
220 int cpu; 221 int cpu;
221 222
222 if (!(svsk->sk_flags & 223 if (!(svsk->sk_xprt.xpt_flags &
223 ( (1<<SK_CONN)|(1<<SK_DATA)|(1<<SK_CLOSE)|(1<<SK_DEFERRED)) )) 224 ((1<<XPT_CONN)|(1<<XPT_DATA)|(1<<XPT_CLOSE)|(1<<XPT_DEFERRED))))
224 return; 225 return;
225 if (test_bit(SK_DEAD, &svsk->sk_flags)) 226 if (test_bit(XPT_DEAD, &svsk->sk_xprt.xpt_flags))
226 return; 227 return;
227 228
228 cpu = get_cpu(); 229 cpu = get_cpu();
@@ -236,7 +237,7 @@ svc_sock_enqueue(struct svc_sock *svsk)
236 printk(KERN_ERR 237 printk(KERN_ERR
237 "svc_sock_enqueue: threads and sockets both waiting??\n"); 238 "svc_sock_enqueue: threads and sockets both waiting??\n");
238 239
239 if (test_bit(SK_DEAD, &svsk->sk_flags)) { 240 if (test_bit(XPT_DEAD, &svsk->sk_xprt.xpt_flags)) {
240 /* Don't enqueue dead sockets */ 241 /* Don't enqueue dead sockets */
241 dprintk("svc: socket %p is dead, not enqueued\n", svsk->sk_sk); 242 dprintk("svc: socket %p is dead, not enqueued\n", svsk->sk_sk);
242 goto out_unlock; 243 goto out_unlock;
@@ -244,10 +245,10 @@ svc_sock_enqueue(struct svc_sock *svsk)
244 245
245 /* Mark socket as busy. It will remain in this state until the 246 /* Mark socket as busy. It will remain in this state until the
246 * server has processed all pending data and put the socket back 247 * server has processed all pending data and put the socket back
247 * on the idle list. We update SK_BUSY atomically because 248 * on the idle list. We update XPT_BUSY atomically because
248 * it also guards against trying to enqueue the svc_sock twice. 249 * it also guards against trying to enqueue the svc_sock twice.
249 */ 250 */
250 if (test_and_set_bit(SK_BUSY, &svsk->sk_flags)) { 251 if (test_and_set_bit(XPT_BUSY, &svsk->sk_xprt.xpt_flags)) {
251 /* Don't enqueue socket while already enqueued */ 252 /* Don't enqueue socket while already enqueued */
252 dprintk("svc: socket %p busy, not enqueued\n", svsk->sk_sk); 253 dprintk("svc: socket %p busy, not enqueued\n", svsk->sk_sk);
253 goto out_unlock; 254 goto out_unlock;
@@ -256,11 +257,11 @@ svc_sock_enqueue(struct svc_sock *svsk)
256 svsk->sk_pool = pool; 257 svsk->sk_pool = pool;
257 258
258 /* Handle pending connection */ 259 /* Handle pending connection */
259 if (test_bit(SK_CONN, &svsk->sk_flags)) 260 if (test_bit(XPT_CONN, &svsk->sk_xprt.xpt_flags))
260 goto process; 261 goto process;
261 262
262 /* Handle close in-progress */ 263 /* Handle close in-progress */
263 if (test_bit(SK_CLOSE, &svsk->sk_flags)) 264 if (test_bit(XPT_CLOSE, &svsk->sk_xprt.xpt_flags))
264 goto process; 265 goto process;
265 266
266 /* Check if we have space to reply to a request */ 267 /* Check if we have space to reply to a request */
@@ -268,7 +269,7 @@ svc_sock_enqueue(struct svc_sock *svsk)
268 /* Don't enqueue while not enough space for reply */ 269 /* Don't enqueue while not enough space for reply */
269 dprintk("svc: no write space, socket %p not enqueued\n", svsk); 270 dprintk("svc: no write space, socket %p not enqueued\n", svsk);
270 svsk->sk_pool = NULL; 271 svsk->sk_pool = NULL;
271 clear_bit(SK_BUSY, &svsk->sk_flags); 272 clear_bit(XPT_BUSY, &svsk->sk_xprt.xpt_flags);
272 goto out_unlock; 273 goto out_unlock;
273 } 274 }
274 275
@@ -324,14 +325,14 @@ svc_sock_dequeue(struct svc_pool *pool)
324/* 325/*
325 * Having read something from a socket, check whether it 326 * Having read something from a socket, check whether it
326 * needs to be re-enqueued. 327 * needs to be re-enqueued.
327 * Note: SK_DATA only gets cleared when a read-attempt finds 328 * Note: XPT_DATA only gets cleared when a read-attempt finds
328 * no (or insufficient) data. 329 * no (or insufficient) data.
329 */ 330 */
330static inline void 331static inline void
331svc_sock_received(struct svc_sock *svsk) 332svc_sock_received(struct svc_sock *svsk)
332{ 333{
333 svsk->sk_pool = NULL; 334 svsk->sk_pool = NULL;
334 clear_bit(SK_BUSY, &svsk->sk_flags); 335 clear_bit(XPT_BUSY, &svsk->sk_xprt.xpt_flags);
335 svc_sock_enqueue(svsk); 336 svc_sock_enqueue(svsk);
336} 337}
337 338
@@ -680,8 +681,9 @@ svc_udp_data_ready(struct sock *sk, int count)
680 681
681 if (svsk) { 682 if (svsk) {
682 dprintk("svc: socket %p(inet %p), count=%d, busy=%d\n", 683 dprintk("svc: socket %p(inet %p), count=%d, busy=%d\n",
683 svsk, sk, count, test_bit(SK_BUSY, &svsk->sk_flags)); 684 svsk, sk, count,
684 set_bit(SK_DATA, &svsk->sk_flags); 685 test_bit(XPT_BUSY, &svsk->sk_xprt.xpt_flags));
686 set_bit(XPT_DATA, &svsk->sk_xprt.xpt_flags);
685 svc_sock_enqueue(svsk); 687 svc_sock_enqueue(svsk);
686 } 688 }
687 if (sk->sk_sleep && waitqueue_active(sk->sk_sleep)) 689 if (sk->sk_sleep && waitqueue_active(sk->sk_sleep))
@@ -698,7 +700,7 @@ svc_write_space(struct sock *sk)
698 700
699 if (svsk) { 701 if (svsk) {
700 dprintk("svc: socket %p(inet %p), write_space busy=%d\n", 702 dprintk("svc: socket %p(inet %p), write_space busy=%d\n",
701 svsk, sk, test_bit(SK_BUSY, &svsk->sk_flags)); 703 svsk, sk, test_bit(XPT_BUSY, &svsk->sk_xprt.xpt_flags));
702 svc_sock_enqueue(svsk); 704 svc_sock_enqueue(svsk);
703 } 705 }
704 706
@@ -748,7 +750,7 @@ svc_udp_recvfrom(struct svc_rqst *rqstp)
748 .msg_flags = MSG_DONTWAIT, 750 .msg_flags = MSG_DONTWAIT,
749 }; 751 };
750 752
751 if (test_and_clear_bit(SK_CHNGBUF, &svsk->sk_flags)) 753 if (test_and_clear_bit(XPT_CHNGBUF, &svsk->sk_xprt.xpt_flags))
752 /* udp sockets need large rcvbuf as all pending 754 /* udp sockets need large rcvbuf as all pending
753 * requests are still in that buffer. sndbuf must 755 * requests are still in that buffer. sndbuf must
754 * also be large enough that there is enough space 756 * also be large enough that there is enough space
@@ -766,7 +768,7 @@ svc_udp_recvfrom(struct svc_rqst *rqstp)
766 return svc_deferred_recv(rqstp); 768 return svc_deferred_recv(rqstp);
767 } 769 }
768 770
769 clear_bit(SK_DATA, &svsk->sk_flags); 771 clear_bit(XPT_DATA, &svsk->sk_xprt.xpt_flags);
770 skb = NULL; 772 skb = NULL;
771 err = kernel_recvmsg(svsk->sk_sock, &msg, NULL, 773 err = kernel_recvmsg(svsk->sk_sock, &msg, NULL,
772 0, 0, MSG_PEEK | MSG_DONTWAIT); 774 0, 0, MSG_PEEK | MSG_DONTWAIT);
@@ -777,7 +779,7 @@ svc_udp_recvfrom(struct svc_rqst *rqstp)
777 if (err != -EAGAIN) { 779 if (err != -EAGAIN) {
778 /* possibly an icmp error */ 780 /* possibly an icmp error */
779 dprintk("svc: recvfrom returned error %d\n", -err); 781 dprintk("svc: recvfrom returned error %d\n", -err);
780 set_bit(SK_DATA, &svsk->sk_flags); 782 set_bit(XPT_DATA, &svsk->sk_xprt.xpt_flags);
781 } 783 }
782 svc_sock_received(svsk); 784 svc_sock_received(svsk);
783 return -EAGAIN; 785 return -EAGAIN;
@@ -789,7 +791,7 @@ svc_udp_recvfrom(struct svc_rqst *rqstp)
789 need that much accuracy */ 791 need that much accuracy */
790 } 792 }
791 svsk->sk_sk->sk_stamp = skb->tstamp; 793 svsk->sk_sk->sk_stamp = skb->tstamp;
792 set_bit(SK_DATA, &svsk->sk_flags); /* there may be more data... */ 794 set_bit(XPT_DATA, &svsk->sk_xprt.xpt_flags); /* there may be more data... */
793 795
794 /* 796 /*
795 * Maybe more packets - kick another thread ASAP. 797 * Maybe more packets - kick another thread ASAP.
@@ -936,8 +938,8 @@ svc_udp_init(struct svc_sock *svsk)
936 3 * svsk->sk_server->sv_max_mesg, 938 3 * svsk->sk_server->sv_max_mesg,
937 3 * svsk->sk_server->sv_max_mesg); 939 3 * svsk->sk_server->sv_max_mesg);
938 940
939 set_bit(SK_DATA, &svsk->sk_flags); /* might have come in before data_ready set up */ 941 set_bit(XPT_DATA, &svsk->sk_xprt.xpt_flags); /* might have come in before data_ready set up */
940 set_bit(SK_CHNGBUF, &svsk->sk_flags); 942 set_bit(XPT_CHNGBUF, &svsk->sk_xprt.xpt_flags);
941 943
942 oldfs = get_fs(); 944 oldfs = get_fs();
943 set_fs(KERNEL_DS); 945 set_fs(KERNEL_DS);
@@ -971,7 +973,7 @@ svc_tcp_listen_data_ready(struct sock *sk, int count_unused)
971 */ 973 */
972 if (sk->sk_state == TCP_LISTEN) { 974 if (sk->sk_state == TCP_LISTEN) {
973 if (svsk) { 975 if (svsk) {
974 set_bit(SK_CONN, &svsk->sk_flags); 976 set_bit(XPT_CONN, &svsk->sk_xprt.xpt_flags);
975 svc_sock_enqueue(svsk); 977 svc_sock_enqueue(svsk);
976 } else 978 } else
977 printk("svc: socket %p: no user data\n", sk); 979 printk("svc: socket %p: no user data\n", sk);
@@ -995,7 +997,7 @@ svc_tcp_state_change(struct sock *sk)
995 if (!svsk) 997 if (!svsk)
996 printk("svc: socket %p: no user data\n", sk); 998 printk("svc: socket %p: no user data\n", sk);
997 else { 999 else {
998 set_bit(SK_CLOSE, &svsk->sk_flags); 1000 set_bit(XPT_CLOSE, &svsk->sk_xprt.xpt_flags);
999 svc_sock_enqueue(svsk); 1001 svc_sock_enqueue(svsk);
1000 } 1002 }
1001 if (sk->sk_sleep && waitqueue_active(sk->sk_sleep)) 1003 if (sk->sk_sleep && waitqueue_active(sk->sk_sleep))
@@ -1010,7 +1012,7 @@ svc_tcp_data_ready(struct sock *sk, int count)
1010 dprintk("svc: socket %p TCP data ready (svsk %p)\n", 1012 dprintk("svc: socket %p TCP data ready (svsk %p)\n",
1011 sk, sk->sk_user_data); 1013 sk, sk->sk_user_data);
1012 if (svsk) { 1014 if (svsk) {
1013 set_bit(SK_DATA, &svsk->sk_flags); 1015 set_bit(XPT_DATA, &svsk->sk_xprt.xpt_flags);
1014 svc_sock_enqueue(svsk); 1016 svc_sock_enqueue(svsk);
1015 } 1017 }
1016 if (sk->sk_sleep && waitqueue_active(sk->sk_sleep)) 1018 if (sk->sk_sleep && waitqueue_active(sk->sk_sleep))
@@ -1050,7 +1052,7 @@ static struct svc_xprt *svc_tcp_accept(struct svc_xprt *xprt)
1050 if (!sock) 1052 if (!sock)
1051 return NULL; 1053 return NULL;
1052 1054
1053 clear_bit(SK_CONN, &svsk->sk_flags); 1055 clear_bit(XPT_CONN, &svsk->sk_xprt.xpt_flags);
1054 err = kernel_accept(sock, &newsock, O_NONBLOCK); 1056 err = kernel_accept(sock, &newsock, O_NONBLOCK);
1055 if (err < 0) { 1057 if (err < 0) {
1056 if (err == -ENOMEM) 1058 if (err == -ENOMEM)
@@ -1061,8 +1063,7 @@ static struct svc_xprt *svc_tcp_accept(struct svc_xprt *xprt)
1061 serv->sv_name, -err); 1063 serv->sv_name, -err);
1062 return NULL; 1064 return NULL;
1063 } 1065 }
1064 1066 set_bit(XPT_CONN, &svsk->sk_xprt.xpt_flags);
1065 set_bit(SK_CONN, &svsk->sk_flags);
1066 1067
1067 err = kernel_getpeername(newsock, sin, &slen); 1068 err = kernel_getpeername(newsock, sin, &slen);
1068 if (err < 0) { 1069 if (err < 0) {
@@ -1127,16 +1128,16 @@ svc_tcp_recvfrom(struct svc_rqst *rqstp)
1127 int pnum, vlen; 1128 int pnum, vlen;
1128 1129
1129 dprintk("svc: tcp_recv %p data %d conn %d close %d\n", 1130 dprintk("svc: tcp_recv %p data %d conn %d close %d\n",
1130 svsk, test_bit(SK_DATA, &svsk->sk_flags), 1131 svsk, test_bit(XPT_DATA, &svsk->sk_xprt.xpt_flags),
1131 test_bit(SK_CONN, &svsk->sk_flags), 1132 test_bit(XPT_CONN, &svsk->sk_xprt.xpt_flags),
1132 test_bit(SK_CLOSE, &svsk->sk_flags)); 1133 test_bit(XPT_CLOSE, &svsk->sk_xprt.xpt_flags));
1133 1134
1134 if ((rqstp->rq_deferred = svc_deferred_dequeue(svsk))) { 1135 if ((rqstp->rq_deferred = svc_deferred_dequeue(svsk))) {
1135 svc_sock_received(svsk); 1136 svc_sock_received(svsk);
1136 return svc_deferred_recv(rqstp); 1137 return svc_deferred_recv(rqstp);
1137 } 1138 }
1138 1139
1139 if (test_and_clear_bit(SK_CHNGBUF, &svsk->sk_flags)) 1140 if (test_and_clear_bit(XPT_CHNGBUF, &svsk->sk_xprt.xpt_flags))
1140 /* sndbuf needs to have room for one request 1141 /* sndbuf needs to have room for one request
1141 * per thread, otherwise we can stall even when the 1142 * per thread, otherwise we can stall even when the
1142 * network isn't a bottleneck. 1143 * network isn't a bottleneck.
@@ -1153,7 +1154,7 @@ svc_tcp_recvfrom(struct svc_rqst *rqstp)
1153 (serv->sv_nrthreads+3) * serv->sv_max_mesg, 1154 (serv->sv_nrthreads+3) * serv->sv_max_mesg,
1154 3 * serv->sv_max_mesg); 1155 3 * serv->sv_max_mesg);
1155 1156
1156 clear_bit(SK_DATA, &svsk->sk_flags); 1157 clear_bit(XPT_DATA, &svsk->sk_xprt.xpt_flags);
1157 1158
1158 /* Receive data. If we haven't got the record length yet, get 1159 /* Receive data. If we haven't got the record length yet, get
1159 * the next four bytes. Otherwise try to gobble up as much as 1160 * the next four bytes. Otherwise try to gobble up as much as
@@ -1212,7 +1213,7 @@ svc_tcp_recvfrom(struct svc_rqst *rqstp)
1212 return -EAGAIN; /* record not complete */ 1213 return -EAGAIN; /* record not complete */
1213 } 1214 }
1214 len = svsk->sk_reclen; 1215 len = svsk->sk_reclen;
1215 set_bit(SK_DATA, &svsk->sk_flags); 1216 set_bit(XPT_DATA, &svsk->sk_xprt.xpt_flags);
1216 1217
1217 vec = rqstp->rq_vec; 1218 vec = rqstp->rq_vec;
1218 vec[0] = rqstp->rq_arg.head[0]; 1219 vec[0] = rqstp->rq_arg.head[0];
@@ -1255,7 +1256,7 @@ svc_tcp_recvfrom(struct svc_rqst *rqstp)
1255 return len; 1256 return len;
1256 1257
1257 err_delete: 1258 err_delete:
1258 set_bit(SK_CLOSE, &svsk->sk_flags); 1259 set_bit(XPT_CLOSE, &svsk->sk_xprt.xpt_flags);
1259 return -EAGAIN; 1260 return -EAGAIN;
1260 1261
1261 error: 1262 error:
@@ -1288,7 +1289,7 @@ svc_tcp_sendto(struct svc_rqst *rqstp)
1288 reclen = htonl(0x80000000|((xbufp->len ) - 4)); 1289 reclen = htonl(0x80000000|((xbufp->len ) - 4));
1289 memcpy(xbufp->head[0].iov_base, &reclen, 4); 1290 memcpy(xbufp->head[0].iov_base, &reclen, 4);
1290 1291
1291 if (test_bit(SK_DEAD, &rqstp->rq_sock->sk_flags)) 1292 if (test_bit(XPT_DEAD, &rqstp->rq_sock->sk_xprt.xpt_flags))
1292 return -ENOTCONN; 1293 return -ENOTCONN;
1293 1294
1294 sent = svc_sendto(rqstp, &rqstp->rq_res); 1295 sent = svc_sendto(rqstp, &rqstp->rq_res);
@@ -1297,7 +1298,7 @@ svc_tcp_sendto(struct svc_rqst *rqstp)
1297 rqstp->rq_sock->sk_server->sv_name, 1298 rqstp->rq_sock->sk_server->sv_name,
1298 (sent<0)?"got error":"sent only", 1299 (sent<0)?"got error":"sent only",
1299 sent, xbufp->len); 1300 sent, xbufp->len);
1300 set_bit(SK_CLOSE, &rqstp->rq_sock->sk_flags); 1301 set_bit(XPT_CLOSE, &rqstp->rq_sock->sk_xprt.xpt_flags);
1301 svc_sock_enqueue(rqstp->rq_sock); 1302 svc_sock_enqueue(rqstp->rq_sock);
1302 sent = -EAGAIN; 1303 sent = -EAGAIN;
1303 } 1304 }
@@ -1387,9 +1388,9 @@ svc_tcp_init(struct svc_sock *svsk)
1387 1388
1388 if (sk->sk_state == TCP_LISTEN) { 1389 if (sk->sk_state == TCP_LISTEN) {
1389 dprintk("setting up TCP socket for listening\n"); 1390 dprintk("setting up TCP socket for listening\n");
1390 set_bit(SK_LISTENER, &svsk->sk_flags); 1391 set_bit(XPT_LISTENER, &svsk->sk_xprt.xpt_flags);
1391 sk->sk_data_ready = svc_tcp_listen_data_ready; 1392 sk->sk_data_ready = svc_tcp_listen_data_ready;
1392 set_bit(SK_CONN, &svsk->sk_flags); 1393 set_bit(XPT_CONN, &svsk->sk_xprt.xpt_flags);
1393 } else { 1394 } else {
1394 dprintk("setting up TCP socket for reading\n"); 1395 dprintk("setting up TCP socket for reading\n");
1395 sk->sk_state_change = svc_tcp_state_change; 1396 sk->sk_state_change = svc_tcp_state_change;
@@ -1409,10 +1410,10 @@ svc_tcp_init(struct svc_sock *svsk)
1409 3 * svsk->sk_server->sv_max_mesg, 1410 3 * svsk->sk_server->sv_max_mesg,
1410 3 * svsk->sk_server->sv_max_mesg); 1411 3 * svsk->sk_server->sv_max_mesg);
1411 1412
1412 set_bit(SK_CHNGBUF, &svsk->sk_flags); 1413 set_bit(XPT_CHNGBUF, &svsk->sk_xprt.xpt_flags);
1413 set_bit(SK_DATA, &svsk->sk_flags); 1414 set_bit(XPT_DATA, &svsk->sk_xprt.xpt_flags);
1414 if (sk->sk_state != TCP_ESTABLISHED) 1415 if (sk->sk_state != TCP_ESTABLISHED)
1415 set_bit(SK_CLOSE, &svsk->sk_flags); 1416 set_bit(XPT_CLOSE, &svsk->sk_xprt.xpt_flags);
1416 } 1417 }
1417} 1418}
1418 1419
@@ -1429,12 +1430,12 @@ svc_sock_update_bufs(struct svc_serv *serv)
1429 list_for_each(le, &serv->sv_permsocks) { 1430 list_for_each(le, &serv->sv_permsocks) {
1430 struct svc_sock *svsk = 1431 struct svc_sock *svsk =
1431 list_entry(le, struct svc_sock, sk_list); 1432 list_entry(le, struct svc_sock, sk_list);
1432 set_bit(SK_CHNGBUF, &svsk->sk_flags); 1433 set_bit(XPT_CHNGBUF, &svsk->sk_xprt.xpt_flags);
1433 } 1434 }
1434 list_for_each(le, &serv->sv_tempsocks) { 1435 list_for_each(le, &serv->sv_tempsocks) {
1435 struct svc_sock *svsk = 1436 struct svc_sock *svsk =
1436 list_entry(le, struct svc_sock, sk_list); 1437 list_entry(le, struct svc_sock, sk_list);
1437 set_bit(SK_CHNGBUF, &svsk->sk_flags); 1438 set_bit(XPT_CHNGBUF, &svsk->sk_xprt.xpt_flags);
1438 } 1439 }
1439 spin_unlock_bh(&serv->sv_lock); 1440 spin_unlock_bh(&serv->sv_lock);
1440} 1441}
@@ -1471,7 +1472,7 @@ static void svc_check_conn_limits(struct svc_serv *serv)
1471 svsk = list_entry(serv->sv_tempsocks.prev, 1472 svsk = list_entry(serv->sv_tempsocks.prev,
1472 struct svc_sock, 1473 struct svc_sock,
1473 sk_list); 1474 sk_list);
1474 set_bit(SK_CLOSE, &svsk->sk_flags); 1475 set_bit(XPT_CLOSE, &svsk->sk_xprt.xpt_flags);
1475 svc_xprt_get(&svsk->sk_xprt); 1476 svc_xprt_get(&svsk->sk_xprt);
1476 } 1477 }
1477 spin_unlock_bh(&serv->sv_lock); 1478 spin_unlock_bh(&serv->sv_lock);
@@ -1575,10 +1576,10 @@ svc_recv(struct svc_rqst *rqstp, long timeout)
1575 spin_unlock_bh(&pool->sp_lock); 1576 spin_unlock_bh(&pool->sp_lock);
1576 1577
1577 len = 0; 1578 len = 0;
1578 if (test_bit(SK_CLOSE, &svsk->sk_flags)) { 1579 if (test_bit(XPT_CLOSE, &svsk->sk_xprt.xpt_flags)) {
1579 dprintk("svc_recv: found SK_CLOSE\n"); 1580 dprintk("svc_recv: found XPT_CLOSE\n");
1580 svc_delete_socket(svsk); 1581 svc_delete_socket(svsk);
1581 } else if (test_bit(SK_LISTENER, &svsk->sk_flags)) { 1582 } else if (test_bit(XPT_LISTENER, &svsk->sk_xprt.xpt_flags)) {
1582 struct svc_xprt *newxpt; 1583 struct svc_xprt *newxpt;
1583 newxpt = svsk->sk_xprt.xpt_ops->xpo_accept(&svsk->sk_xprt); 1584 newxpt = svsk->sk_xprt.xpt_ops->xpo_accept(&svsk->sk_xprt);
1584 if (newxpt) { 1585 if (newxpt) {
@@ -1605,7 +1606,7 @@ svc_recv(struct svc_rqst *rqstp, long timeout)
1605 return -EAGAIN; 1606 return -EAGAIN;
1606 } 1607 }
1607 svsk->sk_lastrecv = get_seconds(); 1608 svsk->sk_lastrecv = get_seconds();
1608 clear_bit(SK_OLD, &svsk->sk_flags); 1609 clear_bit(XPT_OLD, &svsk->sk_xprt.xpt_flags);
1609 1610
1610 rqstp->rq_secure = svc_port_is_privileged(svc_addr(rqstp)); 1611 rqstp->rq_secure = svc_port_is_privileged(svc_addr(rqstp));
1611 rqstp->rq_chandle.defer = svc_defer; 1612 rqstp->rq_chandle.defer = svc_defer;
@@ -1652,7 +1653,7 @@ svc_send(struct svc_rqst *rqstp)
1652 1653
1653 /* Grab svsk->sk_mutex to serialize outgoing data. */ 1654 /* Grab svsk->sk_mutex to serialize outgoing data. */
1654 mutex_lock(&svsk->sk_mutex); 1655 mutex_lock(&svsk->sk_mutex);
1655 if (test_bit(SK_DEAD, &svsk->sk_flags)) 1656 if (test_bit(XPT_DEAD, &svsk->sk_xprt.xpt_flags))
1656 len = -ENOTCONN; 1657 len = -ENOTCONN;
1657 else 1658 else
1658 len = svsk->sk_xprt.xpt_ops->xpo_sendto(rqstp); 1659 len = svsk->sk_xprt.xpt_ops->xpo_sendto(rqstp);
@@ -1688,21 +1689,21 @@ svc_age_temp_sockets(unsigned long closure)
1688 list_for_each_safe(le, next, &serv->sv_tempsocks) { 1689 list_for_each_safe(le, next, &serv->sv_tempsocks) {
1689 svsk = list_entry(le, struct svc_sock, sk_list); 1690 svsk = list_entry(le, struct svc_sock, sk_list);
1690 1691
1691 if (!test_and_set_bit(SK_OLD, &svsk->sk_flags)) 1692 if (!test_and_set_bit(XPT_OLD, &svsk->sk_xprt.xpt_flags))
1692 continue; 1693 continue;
1693 if (atomic_read(&svsk->sk_xprt.xpt_ref.refcount) > 1 1694 if (atomic_read(&svsk->sk_xprt.xpt_ref.refcount) > 1
1694 || test_bit(SK_BUSY, &svsk->sk_flags)) 1695 || test_bit(XPT_BUSY, &svsk->sk_xprt.xpt_flags))
1695 continue; 1696 continue;
1696 svc_xprt_get(&svsk->sk_xprt); 1697 svc_xprt_get(&svsk->sk_xprt);
1697 list_move(le, &to_be_aged); 1698 list_move(le, &to_be_aged);
1698 set_bit(SK_CLOSE, &svsk->sk_flags); 1699 set_bit(XPT_CLOSE, &svsk->sk_xprt.xpt_flags);
1699 set_bit(SK_DETACHED, &svsk->sk_flags); 1700 set_bit(XPT_DETACHED, &svsk->sk_xprt.xpt_flags);
1700 } 1701 }
1701 spin_unlock_bh(&serv->sv_lock); 1702 spin_unlock_bh(&serv->sv_lock);
1702 1703
1703 while (!list_empty(&to_be_aged)) { 1704 while (!list_empty(&to_be_aged)) {
1704 le = to_be_aged.next; 1705 le = to_be_aged.next;
1705 /* fiddling the sk_list node is safe 'cos we're SK_DETACHED */ 1706 /* fiddling the sk_list node is safe 'cos we're XPT_DETACHED */
1706 list_del_init(le); 1707 list_del_init(le);
1707 svsk = list_entry(le, struct svc_sock, sk_list); 1708 svsk = list_entry(le, struct svc_sock, sk_list);
1708 1709
@@ -1748,7 +1749,7 @@ static struct svc_sock *svc_setup_socket(struct svc_serv *serv,
1748 return NULL; 1749 return NULL;
1749 } 1750 }
1750 1751
1751 set_bit(SK_BUSY, &svsk->sk_flags); 1752 set_bit(XPT_BUSY, &svsk->sk_xprt.xpt_flags);
1752 inet->sk_user_data = svsk; 1753 inet->sk_user_data = svsk;
1753 svsk->sk_sock = sock; 1754 svsk->sk_sock = sock;
1754 svsk->sk_sk = inet; 1755 svsk->sk_sk = inet;
@@ -1770,7 +1771,7 @@ static struct svc_sock *svc_setup_socket(struct svc_serv *serv,
1770 1771
1771 spin_lock_bh(&serv->sv_lock); 1772 spin_lock_bh(&serv->sv_lock);
1772 if (is_temporary) { 1773 if (is_temporary) {
1773 set_bit(SK_TEMP, &svsk->sk_flags); 1774 set_bit(XPT_TEMP, &svsk->sk_xprt.xpt_flags);
1774 list_add(&svsk->sk_list, &serv->sv_tempsocks); 1775 list_add(&svsk->sk_list, &serv->sv_tempsocks);
1775 serv->sv_tmpcnt++; 1776 serv->sv_tmpcnt++;
1776 if (serv->sv_temptimer.function == NULL) { 1777 if (serv->sv_temptimer.function == NULL) {
@@ -1781,7 +1782,7 @@ static struct svc_sock *svc_setup_socket(struct svc_serv *serv,
1781 jiffies + svc_conn_age_period * HZ); 1782 jiffies + svc_conn_age_period * HZ);
1782 } 1783 }
1783 } else { 1784 } else {
1784 clear_bit(SK_TEMP, &svsk->sk_flags); 1785 clear_bit(XPT_TEMP, &svsk->sk_xprt.xpt_flags);
1785 list_add(&svsk->sk_list, &serv->sv_permsocks); 1786 list_add(&svsk->sk_list, &serv->sv_permsocks);
1786 } 1787 }
1787 spin_unlock_bh(&serv->sv_lock); 1788 spin_unlock_bh(&serv->sv_lock);
@@ -1931,7 +1932,7 @@ svc_delete_socket(struct svc_sock *svsk)
1931 1932
1932 spin_lock_bh(&serv->sv_lock); 1933 spin_lock_bh(&serv->sv_lock);
1933 1934
1934 if (!test_and_set_bit(SK_DETACHED, &svsk->sk_flags)) 1935 if (!test_and_set_bit(XPT_DETACHED, &svsk->sk_xprt.xpt_flags))
1935 list_del_init(&svsk->sk_list); 1936 list_del_init(&svsk->sk_list);
1936 /* 1937 /*
1937 * We used to delete the svc_sock from whichever list 1938 * We used to delete the svc_sock from whichever list
@@ -1940,9 +1941,9 @@ svc_delete_socket(struct svc_sock *svsk)
1940 * while still attached to a queue, the queue itself 1941 * while still attached to a queue, the queue itself
1941 * is about to be destroyed (in svc_destroy). 1942 * is about to be destroyed (in svc_destroy).
1942 */ 1943 */
1943 if (!test_and_set_bit(SK_DEAD, &svsk->sk_flags)) { 1944 if (!test_and_set_bit(XPT_DEAD, &svsk->sk_xprt.xpt_flags)) {
1944 BUG_ON(atomic_read(&svsk->sk_xprt.xpt_ref.refcount) < 2); 1945 BUG_ON(atomic_read(&svsk->sk_xprt.xpt_ref.refcount) < 2);
1945 if (test_bit(SK_TEMP, &svsk->sk_flags)) 1946 if (test_bit(XPT_TEMP, &svsk->sk_xprt.xpt_flags))
1946 serv->sv_tmpcnt--; 1947 serv->sv_tmpcnt--;
1947 svc_xprt_put(&svsk->sk_xprt); 1948 svc_xprt_put(&svsk->sk_xprt);
1948 } 1949 }
@@ -1952,26 +1953,26 @@ svc_delete_socket(struct svc_sock *svsk)
1952 1953
1953static void svc_close_socket(struct svc_sock *svsk) 1954static void svc_close_socket(struct svc_sock *svsk)
1954{ 1955{
1955 set_bit(SK_CLOSE, &svsk->sk_flags); 1956 set_bit(XPT_CLOSE, &svsk->sk_xprt.xpt_flags);
1956 if (test_and_set_bit(SK_BUSY, &svsk->sk_flags)) 1957 if (test_and_set_bit(XPT_BUSY, &svsk->sk_xprt.xpt_flags))
1957 /* someone else will have to effect the close */ 1958 /* someone else will have to effect the close */
1958 return; 1959 return;
1959 1960
1960 svc_xprt_get(&svsk->sk_xprt); 1961 svc_xprt_get(&svsk->sk_xprt);
1961 svc_delete_socket(svsk); 1962 svc_delete_socket(svsk);
1962 clear_bit(SK_BUSY, &svsk->sk_flags); 1963 clear_bit(XPT_BUSY, &svsk->sk_xprt.xpt_flags);
1963 svc_xprt_put(&svsk->sk_xprt); 1964 svc_xprt_put(&svsk->sk_xprt);
1964} 1965}
1965 1966
1966void svc_force_close_socket(struct svc_sock *svsk) 1967void svc_force_close_socket(struct svc_sock *svsk)
1967{ 1968{
1968 set_bit(SK_CLOSE, &svsk->sk_flags); 1969 set_bit(XPT_CLOSE, &svsk->sk_xprt.xpt_flags);
1969 if (test_bit(SK_BUSY, &svsk->sk_flags)) { 1970 if (test_bit(XPT_BUSY, &svsk->sk_xprt.xpt_flags)) {
1970 /* Waiting to be processed, but no threads left, 1971 /* Waiting to be processed, but no threads left,
1971 * So just remove it from the waiting list 1972 * So just remove it from the waiting list
1972 */ 1973 */
1973 list_del_init(&svsk->sk_ready); 1974 list_del_init(&svsk->sk_ready);
1974 clear_bit(SK_BUSY, &svsk->sk_flags); 1975 clear_bit(XPT_BUSY, &svsk->sk_xprt.xpt_flags);
1975 } 1976 }
1976 svc_close_socket(svsk); 1977 svc_close_socket(svsk);
1977} 1978}
@@ -1996,7 +1997,7 @@ static void svc_revisit(struct cache_deferred_req *dreq, int too_many)
1996 spin_lock(&svsk->sk_lock); 1997 spin_lock(&svsk->sk_lock);
1997 list_add(&dr->handle.recent, &svsk->sk_deferred); 1998 list_add(&dr->handle.recent, &svsk->sk_deferred);
1998 spin_unlock(&svsk->sk_lock); 1999 spin_unlock(&svsk->sk_lock);
1999 set_bit(SK_DEFERRED, &svsk->sk_flags); 2000 set_bit(XPT_DEFERRED, &svsk->sk_xprt.xpt_flags);
2000 svc_sock_enqueue(svsk); 2001 svc_sock_enqueue(svsk);
2001 svc_xprt_put(&svsk->sk_xprt); 2002 svc_xprt_put(&svsk->sk_xprt);
2002} 2003}
@@ -2059,16 +2060,16 @@ static struct svc_deferred_req *svc_deferred_dequeue(struct svc_sock *svsk)
2059{ 2060{
2060 struct svc_deferred_req *dr = NULL; 2061 struct svc_deferred_req *dr = NULL;
2061 2062
2062 if (!test_bit(SK_DEFERRED, &svsk->sk_flags)) 2063 if (!test_bit(XPT_DEFERRED, &svsk->sk_xprt.xpt_flags))
2063 return NULL; 2064 return NULL;
2064 spin_lock(&svsk->sk_lock); 2065 spin_lock(&svsk->sk_lock);
2065 clear_bit(SK_DEFERRED, &svsk->sk_flags); 2066 clear_bit(XPT_DEFERRED, &svsk->sk_xprt.xpt_flags);
2066 if (!list_empty(&svsk->sk_deferred)) { 2067 if (!list_empty(&svsk->sk_deferred)) {
2067 dr = list_entry(svsk->sk_deferred.next, 2068 dr = list_entry(svsk->sk_deferred.next,
2068 struct svc_deferred_req, 2069 struct svc_deferred_req,
2069 handle.recent); 2070 handle.recent);
2070 list_del_init(&dr->handle.recent); 2071 list_del_init(&dr->handle.recent);
2071 set_bit(SK_DEFERRED, &svsk->sk_flags); 2072 set_bit(XPT_DEFERRED, &svsk->sk_xprt.xpt_flags);
2072 } 2073 }
2073 spin_unlock(&svsk->sk_lock); 2074 spin_unlock(&svsk->sk_lock);
2074 return dr; 2075 return dr;