aboutsummaryrefslogtreecommitdiffstats
path: root/net/sunrpc/svcsock.c
diff options
context:
space:
mode:
authorYOSHIFUJI Hideaki <yoshfuji@linux-ipv6.org>2007-02-09 18:38:13 -0500
committerDavid S. Miller <davem@sunset.davemloft.net>2007-02-11 02:20:13 -0500
commitcca5172a7ec10dfdb0b787cd8e9d5b0b8f179793 (patch)
tree1b9e86cf95ab5e1e2b3180ebe59be2a05ebbe1bf /net/sunrpc/svcsock.c
parentd808ad9ab8b1109239027c248c4652503b9d3029 (diff)
[NET] SUNRPC: Fix whitespace errors.
Signed-off-by: YOSHIFUJI Hideaki <yoshfuji@linux-ipv6.org> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/sunrpc/svcsock.c')
-rw-r--r--net/sunrpc/svcsock.c36
1 files changed, 18 insertions, 18 deletions
diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c
index cf93cd1d857b..2fd0ba2b20df 100644
--- a/net/sunrpc/svcsock.c
+++ b/net/sunrpc/svcsock.c
@@ -58,7 +58,7 @@
58 * providing that certain rules are followed: 58 * providing that certain rules are followed:
59 * 59 *
60 * SK_CONN, SK_DATA, can be set or cleared at any time. 60 * SK_CONN, SK_DATA, can be set or cleared at any time.
61 * after a set, svc_sock_enqueue must be called. 61 * after a set, svc_sock_enqueue must be called.
62 * after a clear, the socket must be read/accepted 62 * after a clear, the socket must be read/accepted
63 * if this succeeds, it must be set again. 63 * if this succeeds, it must be set again.
64 * SK_CLOSE can set at any time. It is never cleared. 64 * SK_CLOSE can set at any time. It is never cleared.
@@ -252,7 +252,7 @@ svc_sock_enqueue(struct svc_sock *svsk)
252 svsk->sk_sk, rqstp); 252 svsk->sk_sk, rqstp);
253 svc_thread_dequeue(pool, rqstp); 253 svc_thread_dequeue(pool, rqstp);
254 if (rqstp->rq_sock) 254 if (rqstp->rq_sock)
255 printk(KERN_ERR 255 printk(KERN_ERR
256 "svc_sock_enqueue: server %p, rq_sock=%p!\n", 256 "svc_sock_enqueue: server %p, rq_sock=%p!\n",
257 rqstp, rqstp->rq_sock); 257 rqstp, rqstp->rq_sock);
258 rqstp->rq_sock = svsk; 258 rqstp->rq_sock = svsk;
@@ -484,7 +484,7 @@ svc_sendto(struct svc_rqst *rqstp, struct xdr_buf *xdr)
484 if (xdr->tail[0].iov_len) { 484 if (xdr->tail[0].iov_len) {
485 result = kernel_sendpage(sock, rqstp->rq_respages[0], 485 result = kernel_sendpage(sock, rqstp->rq_respages[0],
486 ((unsigned long)xdr->tail[0].iov_base) 486 ((unsigned long)xdr->tail[0].iov_base)
487 & (PAGE_SIZE-1), 487 & (PAGE_SIZE-1),
488 xdr->tail[0].iov_len, 0); 488 xdr->tail[0].iov_len, 0);
489 489
490 if (result > 0) 490 if (result > 0)
@@ -711,7 +711,7 @@ svc_udp_recvfrom(struct svc_rqst *rqstp)
711 tv.tv_sec = xtime.tv_sec; 711 tv.tv_sec = xtime.tv_sec;
712 tv.tv_usec = xtime.tv_nsec / NSEC_PER_USEC; 712 tv.tv_usec = xtime.tv_nsec / NSEC_PER_USEC;
713 skb_set_timestamp(skb, &tv); 713 skb_set_timestamp(skb, &tv);
714 /* Don't enable netstamp, sunrpc doesn't 714 /* Don't enable netstamp, sunrpc doesn't
715 need that much accuracy */ 715 need that much accuracy */
716 } 716 }
717 skb_get_timestamp(skb, &svsk->sk_sk->sk_stamp); 717 skb_get_timestamp(skb, &svsk->sk_sk->sk_stamp);
@@ -743,7 +743,7 @@ svc_udp_recvfrom(struct svc_rqst *rqstp)
743 return 0; 743 return 0;
744 } 744 }
745 local_bh_enable(); 745 local_bh_enable();
746 skb_free_datagram(svsk->sk_sk, skb); 746 skb_free_datagram(svsk->sk_sk, skb);
747 } else { 747 } else {
748 /* we can use it in-place */ 748 /* we can use it in-place */
749 rqstp->rq_arg.head[0].iov_base = skb->data + sizeof(struct udphdr); 749 rqstp->rq_arg.head[0].iov_base = skb->data + sizeof(struct udphdr);
@@ -794,7 +794,7 @@ svc_udp_init(struct svc_sock *svsk)
794 svsk->sk_sendto = svc_udp_sendto; 794 svsk->sk_sendto = svc_udp_sendto;
795 795
796 /* initialise setting must have enough space to 796 /* initialise setting must have enough space to
797 * receive and respond to one request. 797 * receive and respond to one request.
798 * svc_udp_recvfrom will re-adjust if necessary 798 * svc_udp_recvfrom will re-adjust if necessary
799 */ 799 */
800 svc_sock_setbufsize(svsk->sk_sock, 800 svc_sock_setbufsize(svsk->sk_sock,
@@ -923,7 +923,7 @@ svc_tcp_accept(struct svc_sock *svsk)
923 if (ntohs(sin.sin_port) >= 1024) { 923 if (ntohs(sin.sin_port) >= 1024) {
924 dprintk(KERN_WARNING 924 dprintk(KERN_WARNING
925 "%s: connect from unprivileged port: %u.%u.%u.%u:%d\n", 925 "%s: connect from unprivileged port: %u.%u.%u.%u:%d\n",
926 serv->sv_name, 926 serv->sv_name,
927 NIPQUAD(sin.sin_addr.s_addr), ntohs(sin.sin_port)); 927 NIPQUAD(sin.sin_addr.s_addr), ntohs(sin.sin_port));
928 } 928 }
929 929
@@ -1038,7 +1038,7 @@ svc_tcp_recvfrom(struct svc_rqst *rqstp)
1038 * on the number of threads which will access the socket. 1038 * on the number of threads which will access the socket.
1039 * 1039 *
1040 * rcvbuf just needs to be able to hold a few requests. 1040 * rcvbuf just needs to be able to hold a few requests.
1041 * Normally they will be removed from the queue 1041 * Normally they will be removed from the queue
1042 * as soon a a complete request arrives. 1042 * as soon a a complete request arrives.
1043 */ 1043 */
1044 svc_sock_setbufsize(svsk->sk_sock, 1044 svc_sock_setbufsize(svsk->sk_sock,
@@ -1063,7 +1063,7 @@ svc_tcp_recvfrom(struct svc_rqst *rqstp)
1063 1063
1064 if (len < want) { 1064 if (len < want) {
1065 dprintk("svc: short recvfrom while reading record length (%d of %lu)\n", 1065 dprintk("svc: short recvfrom while reading record length (%d of %lu)\n",
1066 len, want); 1066 len, want);
1067 svc_sock_received(svsk); 1067 svc_sock_received(svsk);
1068 return -EAGAIN; /* record header not complete */ 1068 return -EAGAIN; /* record header not complete */
1069 } 1069 }
@@ -1221,7 +1221,7 @@ svc_tcp_init(struct svc_sock *svsk)
1221 tp->nonagle = 1; /* disable Nagle's algorithm */ 1221 tp->nonagle = 1; /* disable Nagle's algorithm */
1222 1222
1223 /* initialise setting must have enough space to 1223 /* initialise setting must have enough space to
1224 * receive and respond to one request. 1224 * receive and respond to one request.
1225 * svc_tcp_recvfrom will re-adjust if necessary 1225 * svc_tcp_recvfrom will re-adjust if necessary
1226 */ 1226 */
1227 svc_sock_setbufsize(svsk->sk_sock, 1227 svc_sock_setbufsize(svsk->sk_sock,
@@ -1230,7 +1230,7 @@ svc_tcp_init(struct svc_sock *svsk)
1230 1230
1231 set_bit(SK_CHNGBUF, &svsk->sk_flags); 1231 set_bit(SK_CHNGBUF, &svsk->sk_flags);
1232 set_bit(SK_DATA, &svsk->sk_flags); 1232 set_bit(SK_DATA, &svsk->sk_flags);
1233 if (sk->sk_state != TCP_ESTABLISHED) 1233 if (sk->sk_state != TCP_ESTABLISHED)
1234 set_bit(SK_CLOSE, &svsk->sk_flags); 1234 set_bit(SK_CLOSE, &svsk->sk_flags);
1235 } 1235 }
1236} 1236}
@@ -1246,7 +1246,7 @@ svc_sock_update_bufs(struct svc_serv *serv)
1246 1246
1247 spin_lock_bh(&serv->sv_lock); 1247 spin_lock_bh(&serv->sv_lock);
1248 list_for_each(le, &serv->sv_permsocks) { 1248 list_for_each(le, &serv->sv_permsocks) {
1249 struct svc_sock *svsk = 1249 struct svc_sock *svsk =
1250 list_entry(le, struct svc_sock, sk_list); 1250 list_entry(le, struct svc_sock, sk_list);
1251 set_bit(SK_CHNGBUF, &svsk->sk_flags); 1251 set_bit(SK_CHNGBUF, &svsk->sk_flags);
1252 } 1252 }
@@ -1278,11 +1278,11 @@ svc_recv(struct svc_rqst *rqstp, long timeout)
1278 rqstp, timeout); 1278 rqstp, timeout);
1279 1279
1280 if (rqstp->rq_sock) 1280 if (rqstp->rq_sock)
1281 printk(KERN_ERR 1281 printk(KERN_ERR
1282 "svc_recv: service %p, socket not NULL!\n", 1282 "svc_recv: service %p, socket not NULL!\n",
1283 rqstp); 1283 rqstp);
1284 if (waitqueue_active(&rqstp->rq_wait)) 1284 if (waitqueue_active(&rqstp->rq_wait))
1285 printk(KERN_ERR 1285 printk(KERN_ERR
1286 "svc_recv: service %p, wait queue active!\n", 1286 "svc_recv: service %p, wait queue active!\n",
1287 rqstp); 1287 rqstp);
1288 1288
@@ -1371,7 +1371,7 @@ svc_recv(struct svc_rqst *rqstp, long timeout)
1371 return len; 1371 return len;
1372} 1372}
1373 1373
1374/* 1374/*
1375 * Drop request 1375 * Drop request
1376 */ 1376 */
1377void 1377void
@@ -1651,7 +1651,7 @@ svc_delete_socket(struct svc_sock *svsk)
1651 1651
1652 if (!test_and_set_bit(SK_DETACHED, &svsk->sk_flags)) 1652 if (!test_and_set_bit(SK_DETACHED, &svsk->sk_flags))
1653 list_del_init(&svsk->sk_list); 1653 list_del_init(&svsk->sk_list);
1654 /* 1654 /*
1655 * We used to delete the svc_sock from whichever list 1655 * We used to delete the svc_sock from whichever list
1656 * it's sk_ready node was on, but we don't actually 1656 * it's sk_ready node was on, but we don't actually
1657 * need to. This is because the only time we're called 1657 * need to. This is because the only time we're called
@@ -1697,7 +1697,7 @@ svc_makesock(struct svc_serv *serv, int protocol, unsigned short port)
1697} 1697}
1698 1698
1699/* 1699/*
1700 * Handle defer and revisit of requests 1700 * Handle defer and revisit of requests
1701 */ 1701 */
1702 1702
1703static void svc_revisit(struct cache_deferred_req *dreq, int too_many) 1703static void svc_revisit(struct cache_deferred_req *dreq, int too_many)
@@ -1776,7 +1776,7 @@ static int svc_deferred_recv(struct svc_rqst *rqstp)
1776static struct svc_deferred_req *svc_deferred_dequeue(struct svc_sock *svsk) 1776static struct svc_deferred_req *svc_deferred_dequeue(struct svc_sock *svsk)
1777{ 1777{
1778 struct svc_deferred_req *dr = NULL; 1778 struct svc_deferred_req *dr = NULL;
1779 1779
1780 if (!test_bit(SK_DEFERRED, &svsk->sk_flags)) 1780 if (!test_bit(SK_DEFERRED, &svsk->sk_flags))
1781 return NULL; 1781 return NULL;
1782 spin_lock_bh(&svsk->sk_defer_lock); 1782 spin_lock_bh(&svsk->sk_defer_lock);