aboutsummaryrefslogtreecommitdiffstats
path: root/net/sunrpc/xprtsock.c
diff options
context:
space:
mode:
Diffstat (limited to 'net/sunrpc/xprtsock.c')
-rw-r--r--net/sunrpc/xprtsock.c222
1 files changed, 107 insertions, 115 deletions
diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c
index 3d739e5d15d8..fe9306bf10cc 100644
--- a/net/sunrpc/xprtsock.c
+++ b/net/sunrpc/xprtsock.c
@@ -138,20 +138,6 @@ static ctl_table sunrpc_table[] = {
138#endif 138#endif
139 139
140/* 140/*
141 * Time out for an RPC UDP socket connect. UDP socket connects are
142 * synchronous, but we set a timeout anyway in case of resource
143 * exhaustion on the local host.
144 */
145#define XS_UDP_CONN_TO (5U * HZ)
146
147/*
148 * Wait duration for an RPC TCP connection to be established. Solaris
149 * NFS over TCP uses 60 seconds, for example, which is in line with how
150 * long a server takes to reboot.
151 */
152#define XS_TCP_CONN_TO (60U * HZ)
153
154/*
155 * Wait duration for a reply from the RPC portmapper. 141 * Wait duration for a reply from the RPC portmapper.
156 */ 142 */
157#define XS_BIND_TO (60U * HZ) 143#define XS_BIND_TO (60U * HZ)
@@ -224,7 +210,8 @@ struct sock_xprt {
224 * State of TCP reply receive 210 * State of TCP reply receive
225 */ 211 */
226 __be32 tcp_fraghdr, 212 __be32 tcp_fraghdr,
227 tcp_xid; 213 tcp_xid,
214 tcp_calldir;
228 215
229 u32 tcp_offset, 216 u32 tcp_offset,
230 tcp_reclen; 217 tcp_reclen;
@@ -297,12 +284,11 @@ static void xs_format_common_peer_addresses(struct rpc_xprt *xprt)
297 switch (sap->sa_family) { 284 switch (sap->sa_family) {
298 case AF_INET: 285 case AF_INET:
299 sin = xs_addr_in(xprt); 286 sin = xs_addr_in(xprt);
300 (void)snprintf(buf, sizeof(buf), "%02x%02x%02x%02x", 287 snprintf(buf, sizeof(buf), "%08x", ntohl(sin->sin_addr.s_addr));
301 NIPQUAD(sin->sin_addr.s_addr));
302 break; 288 break;
303 case AF_INET6: 289 case AF_INET6:
304 sin6 = xs_addr_in6(xprt); 290 sin6 = xs_addr_in6(xprt);
305 (void)snprintf(buf, sizeof(buf), "%pi6", &sin6->sin6_addr); 291 snprintf(buf, sizeof(buf), "%pi6", &sin6->sin6_addr);
306 break; 292 break;
307 default: 293 default:
308 BUG(); 294 BUG();
@@ -315,10 +301,10 @@ static void xs_format_common_peer_ports(struct rpc_xprt *xprt)
315 struct sockaddr *sap = xs_addr(xprt); 301 struct sockaddr *sap = xs_addr(xprt);
316 char buf[128]; 302 char buf[128];
317 303
318 (void)snprintf(buf, sizeof(buf), "%u", rpc_get_port(sap)); 304 snprintf(buf, sizeof(buf), "%u", rpc_get_port(sap));
319 xprt->address_strings[RPC_DISPLAY_PORT] = kstrdup(buf, GFP_KERNEL); 305 xprt->address_strings[RPC_DISPLAY_PORT] = kstrdup(buf, GFP_KERNEL);
320 306
321 (void)snprintf(buf, sizeof(buf), "%4hx", rpc_get_port(sap)); 307 snprintf(buf, sizeof(buf), "%4hx", rpc_get_port(sap));
322 xprt->address_strings[RPC_DISPLAY_HEX_PORT] = kstrdup(buf, GFP_KERNEL); 308 xprt->address_strings[RPC_DISPLAY_HEX_PORT] = kstrdup(buf, GFP_KERNEL);
323} 309}
324 310
@@ -543,14 +529,12 @@ static int xs_udp_send_request(struct rpc_task *task)
543 xdr->len - req->rq_bytes_sent, status); 529 xdr->len - req->rq_bytes_sent, status);
544 530
545 if (status >= 0) { 531 if (status >= 0) {
546 task->tk_bytes_sent += status; 532 req->rq_xmit_bytes_sent += status;
547 if (status >= req->rq_slen) 533 if (status >= req->rq_slen)
548 return 0; 534 return 0;
549 /* Still some bytes left; set up for a retry later. */ 535 /* Still some bytes left; set up for a retry later. */
550 status = -EAGAIN; 536 status = -EAGAIN;
551 } 537 }
552 if (!transport->sock)
553 goto out;
554 538
555 switch (status) { 539 switch (status) {
556 case -ENOTSOCK: 540 case -ENOTSOCK:
@@ -570,7 +554,7 @@ static int xs_udp_send_request(struct rpc_task *task)
570 * prompts ECONNREFUSED. */ 554 * prompts ECONNREFUSED. */
571 clear_bit(SOCK_ASYNC_NOSPACE, &transport->sock->flags); 555 clear_bit(SOCK_ASYNC_NOSPACE, &transport->sock->flags);
572 } 556 }
573out: 557
574 return status; 558 return status;
575} 559}
576 560
@@ -641,7 +625,7 @@ static int xs_tcp_send_request(struct rpc_task *task)
641 /* If we've sent the entire packet, immediately 625 /* If we've sent the entire packet, immediately
642 * reset the count of bytes sent. */ 626 * reset the count of bytes sent. */
643 req->rq_bytes_sent += status; 627 req->rq_bytes_sent += status;
644 task->tk_bytes_sent += status; 628 req->rq_xmit_bytes_sent += status;
645 if (likely(req->rq_bytes_sent >= req->rq_slen)) { 629 if (likely(req->rq_bytes_sent >= req->rq_slen)) {
646 req->rq_bytes_sent = 0; 630 req->rq_bytes_sent = 0;
647 return 0; 631 return 0;
@@ -652,8 +636,6 @@ static int xs_tcp_send_request(struct rpc_task *task)
652 status = -EAGAIN; 636 status = -EAGAIN;
653 break; 637 break;
654 } 638 }
655 if (!transport->sock)
656 goto out;
657 639
658 switch (status) { 640 switch (status) {
659 case -ENOTSOCK: 641 case -ENOTSOCK:
@@ -673,7 +655,7 @@ static int xs_tcp_send_request(struct rpc_task *task)
673 case -ENOTCONN: 655 case -ENOTCONN:
674 clear_bit(SOCK_ASYNC_NOSPACE, &transport->sock->flags); 656 clear_bit(SOCK_ASYNC_NOSPACE, &transport->sock->flags);
675 } 657 }
676out: 658
677 return status; 659 return status;
678} 660}
679 661
@@ -818,7 +800,7 @@ static void xs_udp_data_ready(struct sock *sk, int len)
818 u32 _xid; 800 u32 _xid;
819 __be32 *xp; 801 __be32 *xp;
820 802
821 read_lock(&sk->sk_callback_lock); 803 read_lock_bh(&sk->sk_callback_lock);
822 dprintk("RPC: xs_udp_data_ready...\n"); 804 dprintk("RPC: xs_udp_data_ready...\n");
823 if (!(xprt = xprt_from_sock(sk))) 805 if (!(xprt = xprt_from_sock(sk)))
824 goto out; 806 goto out;
@@ -863,7 +845,6 @@ static void xs_udp_data_ready(struct sock *sk, int len)
863 dst_confirm(skb_dst(skb)); 845 dst_confirm(skb_dst(skb));
864 846
865 xprt_adjust_cwnd(task, copied); 847 xprt_adjust_cwnd(task, copied);
866 xprt_update_rtt(task);
867 xprt_complete_rqst(task, copied); 848 xprt_complete_rqst(task, copied);
868 849
869 out_unlock: 850 out_unlock:
@@ -871,7 +852,7 @@ static void xs_udp_data_ready(struct sock *sk, int len)
871 dropit: 852 dropit:
872 skb_free_datagram(sk, skb); 853 skb_free_datagram(sk, skb);
873 out: 854 out:
874 read_unlock(&sk->sk_callback_lock); 855 read_unlock_bh(&sk->sk_callback_lock);
875} 856}
876 857
877static inline void xs_tcp_read_fraghdr(struct rpc_xprt *xprt, struct xdr_skb_reader *desc) 858static inline void xs_tcp_read_fraghdr(struct rpc_xprt *xprt, struct xdr_skb_reader *desc)
@@ -947,7 +928,7 @@ static inline void xs_tcp_read_calldir(struct sock_xprt *transport,
947{ 928{
948 size_t len, used; 929 size_t len, used;
949 u32 offset; 930 u32 offset;
950 __be32 calldir; 931 char *p;
951 932
952 /* 933 /*
953 * We want transport->tcp_offset to be 8 at the end of this routine 934 * We want transport->tcp_offset to be 8 at the end of this routine
@@ -956,26 +937,33 @@ static inline void xs_tcp_read_calldir(struct sock_xprt *transport,
956 * transport->tcp_offset is 4 (after having already read the xid). 937 * transport->tcp_offset is 4 (after having already read the xid).
957 */ 938 */
958 offset = transport->tcp_offset - sizeof(transport->tcp_xid); 939 offset = transport->tcp_offset - sizeof(transport->tcp_xid);
959 len = sizeof(calldir) - offset; 940 len = sizeof(transport->tcp_calldir) - offset;
960 dprintk("RPC: reading CALL/REPLY flag (%Zu bytes)\n", len); 941 dprintk("RPC: reading CALL/REPLY flag (%Zu bytes)\n", len);
961 used = xdr_skb_read_bits(desc, &calldir, len); 942 p = ((char *) &transport->tcp_calldir) + offset;
943 used = xdr_skb_read_bits(desc, p, len);
962 transport->tcp_offset += used; 944 transport->tcp_offset += used;
963 if (used != len) 945 if (used != len)
964 return; 946 return;
965 transport->tcp_flags &= ~TCP_RCV_READ_CALLDIR; 947 transport->tcp_flags &= ~TCP_RCV_READ_CALLDIR;
966 transport->tcp_flags |= TCP_RCV_COPY_CALLDIR;
967 transport->tcp_flags |= TCP_RCV_COPY_DATA;
968 /* 948 /*
969 * We don't yet have the XDR buffer, so we will write the calldir 949 * We don't yet have the XDR buffer, so we will write the calldir
970 * out after we get the buffer from the 'struct rpc_rqst' 950 * out after we get the buffer from the 'struct rpc_rqst'
971 */ 951 */
972 if (ntohl(calldir) == RPC_REPLY) 952 switch (ntohl(transport->tcp_calldir)) {
953 case RPC_REPLY:
954 transport->tcp_flags |= TCP_RCV_COPY_CALLDIR;
955 transport->tcp_flags |= TCP_RCV_COPY_DATA;
973 transport->tcp_flags |= TCP_RPC_REPLY; 956 transport->tcp_flags |= TCP_RPC_REPLY;
974 else 957 break;
958 case RPC_CALL:
959 transport->tcp_flags |= TCP_RCV_COPY_CALLDIR;
960 transport->tcp_flags |= TCP_RCV_COPY_DATA;
975 transport->tcp_flags &= ~TCP_RPC_REPLY; 961 transport->tcp_flags &= ~TCP_RPC_REPLY;
976 dprintk("RPC: reading %s CALL/REPLY flag %08x\n", 962 break;
977 (transport->tcp_flags & TCP_RPC_REPLY) ? 963 default:
978 "reply for" : "request with", calldir); 964 dprintk("RPC: invalid request message type\n");
965 xprt_force_disconnect(&transport->xprt);
966 }
979 xs_tcp_check_fraghdr(transport); 967 xs_tcp_check_fraghdr(transport);
980} 968}
981 969
@@ -995,12 +983,10 @@ static inline void xs_tcp_read_common(struct rpc_xprt *xprt,
995 /* 983 /*
996 * Save the RPC direction in the XDR buffer 984 * Save the RPC direction in the XDR buffer
997 */ 985 */
998 __be32 calldir = transport->tcp_flags & TCP_RPC_REPLY ?
999 htonl(RPC_REPLY) : 0;
1000
1001 memcpy(rcvbuf->head[0].iov_base + transport->tcp_copied, 986 memcpy(rcvbuf->head[0].iov_base + transport->tcp_copied,
1002 &calldir, sizeof(calldir)); 987 &transport->tcp_calldir,
1003 transport->tcp_copied += sizeof(calldir); 988 sizeof(transport->tcp_calldir));
989 transport->tcp_copied += sizeof(transport->tcp_calldir);
1004 transport->tcp_flags &= ~TCP_RCV_COPY_CALLDIR; 990 transport->tcp_flags &= ~TCP_RCV_COPY_CALLDIR;
1005 } 991 }
1006 992
@@ -1055,8 +1041,6 @@ static inline void xs_tcp_read_common(struct rpc_xprt *xprt,
1055 if (transport->tcp_flags & TCP_RCV_LAST_FRAG) 1041 if (transport->tcp_flags & TCP_RCV_LAST_FRAG)
1056 transport->tcp_flags &= ~TCP_RCV_COPY_DATA; 1042 transport->tcp_flags &= ~TCP_RCV_COPY_DATA;
1057 } 1043 }
1058
1059 return;
1060} 1044}
1061 1045
1062/* 1046/*
@@ -1245,7 +1229,7 @@ static void xs_tcp_data_ready(struct sock *sk, int bytes)
1245 1229
1246 dprintk("RPC: xs_tcp_data_ready...\n"); 1230 dprintk("RPC: xs_tcp_data_ready...\n");
1247 1231
1248 read_lock(&sk->sk_callback_lock); 1232 read_lock_bh(&sk->sk_callback_lock);
1249 if (!(xprt = xprt_from_sock(sk))) 1233 if (!(xprt = xprt_from_sock(sk)))
1250 goto out; 1234 goto out;
1251 if (xprt->shutdown) 1235 if (xprt->shutdown)
@@ -1264,7 +1248,7 @@ static void xs_tcp_data_ready(struct sock *sk, int bytes)
1264 read = tcp_read_sock(sk, &rd_desc, xs_tcp_data_recv); 1248 read = tcp_read_sock(sk, &rd_desc, xs_tcp_data_recv);
1265 } while (read > 0); 1249 } while (read > 0);
1266out: 1250out:
1267 read_unlock(&sk->sk_callback_lock); 1251 read_unlock_bh(&sk->sk_callback_lock);
1268} 1252}
1269 1253
1270/* 1254/*
@@ -1317,18 +1301,19 @@ static void xs_tcp_state_change(struct sock *sk)
1317{ 1301{
1318 struct rpc_xprt *xprt; 1302 struct rpc_xprt *xprt;
1319 1303
1320 read_lock(&sk->sk_callback_lock); 1304 read_lock_bh(&sk->sk_callback_lock);
1321 if (!(xprt = xprt_from_sock(sk))) 1305 if (!(xprt = xprt_from_sock(sk)))
1322 goto out; 1306 goto out;
1323 dprintk("RPC: xs_tcp_state_change client %p...\n", xprt); 1307 dprintk("RPC: xs_tcp_state_change client %p...\n", xprt);
1324 dprintk("RPC: state %x conn %d dead %d zapped %d\n", 1308 dprintk("RPC: state %x conn %d dead %d zapped %d sk_shutdown %d\n",
1325 sk->sk_state, xprt_connected(xprt), 1309 sk->sk_state, xprt_connected(xprt),
1326 sock_flag(sk, SOCK_DEAD), 1310 sock_flag(sk, SOCK_DEAD),
1327 sock_flag(sk, SOCK_ZAPPED)); 1311 sock_flag(sk, SOCK_ZAPPED),
1312 sk->sk_shutdown);
1328 1313
1329 switch (sk->sk_state) { 1314 switch (sk->sk_state) {
1330 case TCP_ESTABLISHED: 1315 case TCP_ESTABLISHED:
1331 spin_lock_bh(&xprt->transport_lock); 1316 spin_lock(&xprt->transport_lock);
1332 if (!xprt_test_and_set_connected(xprt)) { 1317 if (!xprt_test_and_set_connected(xprt)) {
1333 struct sock_xprt *transport = container_of(xprt, 1318 struct sock_xprt *transport = container_of(xprt,
1334 struct sock_xprt, xprt); 1319 struct sock_xprt, xprt);
@@ -1342,7 +1327,7 @@ static void xs_tcp_state_change(struct sock *sk)
1342 1327
1343 xprt_wake_pending_tasks(xprt, -EAGAIN); 1328 xprt_wake_pending_tasks(xprt, -EAGAIN);
1344 } 1329 }
1345 spin_unlock_bh(&xprt->transport_lock); 1330 spin_unlock(&xprt->transport_lock);
1346 break; 1331 break;
1347 case TCP_FIN_WAIT1: 1332 case TCP_FIN_WAIT1:
1348 /* The client initiated a shutdown of the socket */ 1333 /* The client initiated a shutdown of the socket */
@@ -1380,7 +1365,7 @@ static void xs_tcp_state_change(struct sock *sk)
1380 xs_sock_mark_closed(xprt); 1365 xs_sock_mark_closed(xprt);
1381 } 1366 }
1382 out: 1367 out:
1383 read_unlock(&sk->sk_callback_lock); 1368 read_unlock_bh(&sk->sk_callback_lock);
1384} 1369}
1385 1370
1386/** 1371/**
@@ -1391,7 +1376,7 @@ static void xs_error_report(struct sock *sk)
1391{ 1376{
1392 struct rpc_xprt *xprt; 1377 struct rpc_xprt *xprt;
1393 1378
1394 read_lock(&sk->sk_callback_lock); 1379 read_lock_bh(&sk->sk_callback_lock);
1395 if (!(xprt = xprt_from_sock(sk))) 1380 if (!(xprt = xprt_from_sock(sk)))
1396 goto out; 1381 goto out;
1397 dprintk("RPC: %s client %p...\n" 1382 dprintk("RPC: %s client %p...\n"
@@ -1399,7 +1384,7 @@ static void xs_error_report(struct sock *sk)
1399 __func__, xprt, sk->sk_err); 1384 __func__, xprt, sk->sk_err);
1400 xprt_wake_pending_tasks(xprt, -EAGAIN); 1385 xprt_wake_pending_tasks(xprt, -EAGAIN);
1401out: 1386out:
1402 read_unlock(&sk->sk_callback_lock); 1387 read_unlock_bh(&sk->sk_callback_lock);
1403} 1388}
1404 1389
1405static void xs_write_space(struct sock *sk) 1390static void xs_write_space(struct sock *sk)
@@ -1431,13 +1416,13 @@ static void xs_write_space(struct sock *sk)
1431 */ 1416 */
1432static void xs_udp_write_space(struct sock *sk) 1417static void xs_udp_write_space(struct sock *sk)
1433{ 1418{
1434 read_lock(&sk->sk_callback_lock); 1419 read_lock_bh(&sk->sk_callback_lock);
1435 1420
1436 /* from net/core/sock.c:sock_def_write_space */ 1421 /* from net/core/sock.c:sock_def_write_space */
1437 if (sock_writeable(sk)) 1422 if (sock_writeable(sk))
1438 xs_write_space(sk); 1423 xs_write_space(sk);
1439 1424
1440 read_unlock(&sk->sk_callback_lock); 1425 read_unlock_bh(&sk->sk_callback_lock);
1441} 1426}
1442 1427
1443/** 1428/**
@@ -1452,13 +1437,13 @@ static void xs_udp_write_space(struct sock *sk)
1452 */ 1437 */
1453static void xs_tcp_write_space(struct sock *sk) 1438static void xs_tcp_write_space(struct sock *sk)
1454{ 1439{
1455 read_lock(&sk->sk_callback_lock); 1440 read_lock_bh(&sk->sk_callback_lock);
1456 1441
1457 /* from net/core/stream.c:sk_stream_write_space */ 1442 /* from net/core/stream.c:sk_stream_write_space */
1458 if (sk_stream_wspace(sk) >= sk_stream_min_wspace(sk)) 1443 if (sk_stream_wspace(sk) >= sk_stream_min_wspace(sk))
1459 xs_write_space(sk); 1444 xs_write_space(sk);
1460 1445
1461 read_unlock(&sk->sk_callback_lock); 1446 read_unlock_bh(&sk->sk_callback_lock);
1462} 1447}
1463 1448
1464static void xs_udp_do_set_buffer_size(struct rpc_xprt *xprt) 1449static void xs_udp_do_set_buffer_size(struct rpc_xprt *xprt)
@@ -1795,10 +1780,25 @@ static void xs_tcp_reuse_connection(struct rpc_xprt *xprt, struct sock_xprt *tra
1795{ 1780{
1796 unsigned int state = transport->inet->sk_state; 1781 unsigned int state = transport->inet->sk_state;
1797 1782
1798 if (state == TCP_CLOSE && transport->sock->state == SS_UNCONNECTED) 1783 if (state == TCP_CLOSE && transport->sock->state == SS_UNCONNECTED) {
1799 return; 1784 /* we don't need to abort the connection if the socket
1800 if ((1 << state) & (TCPF_ESTABLISHED|TCPF_SYN_SENT)) 1785 * hasn't undergone a shutdown
1801 return; 1786 */
1787 if (transport->inet->sk_shutdown == 0)
1788 return;
1789 dprintk("RPC: %s: TCP_CLOSEd and sk_shutdown set to %d\n",
1790 __func__, transport->inet->sk_shutdown);
1791 }
1792 if ((1 << state) & (TCPF_ESTABLISHED|TCPF_SYN_SENT)) {
1793 /* we don't need to abort the connection if the socket
1794 * hasn't undergone a shutdown
1795 */
1796 if (transport->inet->sk_shutdown == 0)
1797 return;
1798 dprintk("RPC: %s: ESTABLISHED/SYN_SENT "
1799 "sk_shutdown set to %d\n",
1800 __func__, transport->inet->sk_shutdown);
1801 }
1802 xs_abort_connection(xprt, transport); 1802 xs_abort_connection(xprt, transport);
1803} 1803}
1804 1804
@@ -1912,6 +1912,11 @@ static void xs_tcp_setup_socket(struct rpc_xprt *xprt,
1912 case -EALREADY: 1912 case -EALREADY:
1913 xprt_clear_connecting(xprt); 1913 xprt_clear_connecting(xprt);
1914 return; 1914 return;
1915 case -EINVAL:
1916 /* Happens, for instance, if the user specified a link
1917 * local IPv6 address without a scope-id.
1918 */
1919 goto out;
1915 } 1920 }
1916out_eagain: 1921out_eagain:
1917 status = -EAGAIN; 1922 status = -EAGAIN;
@@ -2016,9 +2021,6 @@ static void xs_connect(struct rpc_task *task)
2016 struct rpc_xprt *xprt = task->tk_xprt; 2021 struct rpc_xprt *xprt = task->tk_xprt;
2017 struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt); 2022 struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
2018 2023
2019 if (xprt_test_and_set_connecting(xprt))
2020 return;
2021
2022 if (transport->sock != NULL && !RPC_IS_SOFTCONN(task)) { 2024 if (transport->sock != NULL && !RPC_IS_SOFTCONN(task)) {
2023 dprintk("RPC: xs_connect delayed xprt %p for %lu " 2025 dprintk("RPC: xs_connect delayed xprt %p for %lu "
2024 "seconds\n", 2026 "seconds\n",
@@ -2038,16 +2040,6 @@ static void xs_connect(struct rpc_task *task)
2038 } 2040 }
2039} 2041}
2040 2042
2041static void xs_tcp_connect(struct rpc_task *task)
2042{
2043 struct rpc_xprt *xprt = task->tk_xprt;
2044
2045 /* Exit if we need to wait for socket shutdown to complete */
2046 if (test_bit(XPRT_CLOSING, &xprt->state))
2047 return;
2048 xs_connect(task);
2049}
2050
2051/** 2043/**
2052 * xs_udp_print_stats - display UDP socket-specifc stats 2044 * xs_udp_print_stats - display UDP socket-specifc stats
2053 * @xprt: rpc_xprt struct containing statistics 2045 * @xprt: rpc_xprt struct containing statistics
@@ -2100,7 +2092,7 @@ static void xs_tcp_print_stats(struct rpc_xprt *xprt, struct seq_file *seq)
2100 * we allocate pages instead doing a kmalloc like rpc_malloc is because we want 2092 * we allocate pages instead doing a kmalloc like rpc_malloc is because we want
2101 * to use the server side send routines. 2093 * to use the server side send routines.
2102 */ 2094 */
2103void *bc_malloc(struct rpc_task *task, size_t size) 2095static void *bc_malloc(struct rpc_task *task, size_t size)
2104{ 2096{
2105 struct page *page; 2097 struct page *page;
2106 struct rpc_buffer *buf; 2098 struct rpc_buffer *buf;
@@ -2120,7 +2112,7 @@ void *bc_malloc(struct rpc_task *task, size_t size)
2120/* 2112/*
2121 * Free the space allocated in the bc_alloc routine 2113 * Free the space allocated in the bc_alloc routine
2122 */ 2114 */
2123void bc_free(void *buffer) 2115static void bc_free(void *buffer)
2124{ 2116{
2125 struct rpc_buffer *buf; 2117 struct rpc_buffer *buf;
2126 2118
@@ -2210,7 +2202,6 @@ static int bc_send_request(struct rpc_task *task)
2210 2202
2211static void bc_close(struct rpc_xprt *xprt) 2203static void bc_close(struct rpc_xprt *xprt)
2212{ 2204{
2213 return;
2214} 2205}
2215 2206
2216/* 2207/*
@@ -2220,7 +2211,6 @@ static void bc_close(struct rpc_xprt *xprt)
2220 2211
2221static void bc_destroy(struct rpc_xprt *xprt) 2212static void bc_destroy(struct rpc_xprt *xprt)
2222{ 2213{
2223 return;
2224} 2214}
2225 2215
2226static struct rpc_xprt_ops xs_udp_ops = { 2216static struct rpc_xprt_ops xs_udp_ops = {
@@ -2246,14 +2236,11 @@ static struct rpc_xprt_ops xs_tcp_ops = {
2246 .release_xprt = xs_tcp_release_xprt, 2236 .release_xprt = xs_tcp_release_xprt,
2247 .rpcbind = rpcb_getport_async, 2237 .rpcbind = rpcb_getport_async,
2248 .set_port = xs_set_port, 2238 .set_port = xs_set_port,
2249 .connect = xs_tcp_connect, 2239 .connect = xs_connect,
2250 .buf_alloc = rpc_malloc, 2240 .buf_alloc = rpc_malloc,
2251 .buf_free = rpc_free, 2241 .buf_free = rpc_free,
2252 .send_request = xs_tcp_send_request, 2242 .send_request = xs_tcp_send_request,
2253 .set_retrans_timeout = xprt_set_retrans_timeout_def, 2243 .set_retrans_timeout = xprt_set_retrans_timeout_def,
2254#if defined(CONFIG_NFS_V4_1)
2255 .release_request = bc_release_request,
2256#endif /* CONFIG_NFS_V4_1 */
2257 .close = xs_tcp_close, 2244 .close = xs_tcp_close,
2258 .destroy = xs_destroy, 2245 .destroy = xs_destroy,
2259 .print_stats = xs_tcp_print_stats, 2246 .print_stats = xs_tcp_print_stats,
@@ -2328,6 +2315,7 @@ static struct rpc_xprt *xs_setup_udp(struct xprt_create *args)
2328 struct sockaddr *addr = args->dstaddr; 2315 struct sockaddr *addr = args->dstaddr;
2329 struct rpc_xprt *xprt; 2316 struct rpc_xprt *xprt;
2330 struct sock_xprt *transport; 2317 struct sock_xprt *transport;
2318 struct rpc_xprt *ret;
2331 2319
2332 xprt = xs_setup_xprt(args, xprt_udp_slot_table_entries); 2320 xprt = xs_setup_xprt(args, xprt_udp_slot_table_entries);
2333 if (IS_ERR(xprt)) 2321 if (IS_ERR(xprt))
@@ -2340,7 +2328,6 @@ static struct rpc_xprt *xs_setup_udp(struct xprt_create *args)
2340 xprt->max_payload = (1U << 16) - (MAX_HEADER << 3); 2328 xprt->max_payload = (1U << 16) - (MAX_HEADER << 3);
2341 2329
2342 xprt->bind_timeout = XS_BIND_TO; 2330 xprt->bind_timeout = XS_BIND_TO;
2343 xprt->connect_timeout = XS_UDP_CONN_TO;
2344 xprt->reestablish_timeout = XS_UDP_REEST_TO; 2331 xprt->reestablish_timeout = XS_UDP_REEST_TO;
2345 xprt->idle_timeout = XS_IDLE_DISC_TO; 2332 xprt->idle_timeout = XS_IDLE_DISC_TO;
2346 2333
@@ -2366,8 +2353,8 @@ static struct rpc_xprt *xs_setup_udp(struct xprt_create *args)
2366 xs_format_peer_addresses(xprt, "udp", RPCBIND_NETID_UDP6); 2353 xs_format_peer_addresses(xprt, "udp", RPCBIND_NETID_UDP6);
2367 break; 2354 break;
2368 default: 2355 default:
2369 kfree(xprt); 2356 ret = ERR_PTR(-EAFNOSUPPORT);
2370 return ERR_PTR(-EAFNOSUPPORT); 2357 goto out_err;
2371 } 2358 }
2372 2359
2373 if (xprt_bound(xprt)) 2360 if (xprt_bound(xprt))
@@ -2382,10 +2369,11 @@ static struct rpc_xprt *xs_setup_udp(struct xprt_create *args)
2382 2369
2383 if (try_module_get(THIS_MODULE)) 2370 if (try_module_get(THIS_MODULE))
2384 return xprt; 2371 return xprt;
2385 2372 ret = ERR_PTR(-EINVAL);
2373out_err:
2386 kfree(xprt->slot); 2374 kfree(xprt->slot);
2387 kfree(xprt); 2375 kfree(xprt);
2388 return ERR_PTR(-EINVAL); 2376 return ret;
2389} 2377}
2390 2378
2391static const struct rpc_timeout xs_tcp_default_timeout = { 2379static const struct rpc_timeout xs_tcp_default_timeout = {
@@ -2404,6 +2392,7 @@ static struct rpc_xprt *xs_setup_tcp(struct xprt_create *args)
2404 struct sockaddr *addr = args->dstaddr; 2392 struct sockaddr *addr = args->dstaddr;
2405 struct rpc_xprt *xprt; 2393 struct rpc_xprt *xprt;
2406 struct sock_xprt *transport; 2394 struct sock_xprt *transport;
2395 struct rpc_xprt *ret;
2407 2396
2408 xprt = xs_setup_xprt(args, xprt_tcp_slot_table_entries); 2397 xprt = xs_setup_xprt(args, xprt_tcp_slot_table_entries);
2409 if (IS_ERR(xprt)) 2398 if (IS_ERR(xprt))
@@ -2415,7 +2404,6 @@ static struct rpc_xprt *xs_setup_tcp(struct xprt_create *args)
2415 xprt->max_payload = RPC_MAX_FRAGMENT_SIZE; 2404 xprt->max_payload = RPC_MAX_FRAGMENT_SIZE;
2416 2405
2417 xprt->bind_timeout = XS_BIND_TO; 2406 xprt->bind_timeout = XS_BIND_TO;
2418 xprt->connect_timeout = XS_TCP_CONN_TO;
2419 xprt->reestablish_timeout = XS_TCP_INIT_REEST_TO; 2407 xprt->reestablish_timeout = XS_TCP_INIT_REEST_TO;
2420 xprt->idle_timeout = XS_IDLE_DISC_TO; 2408 xprt->idle_timeout = XS_IDLE_DISC_TO;
2421 2409
@@ -2440,8 +2428,8 @@ static struct rpc_xprt *xs_setup_tcp(struct xprt_create *args)
2440 xs_format_peer_addresses(xprt, "tcp", RPCBIND_NETID_TCP6); 2428 xs_format_peer_addresses(xprt, "tcp", RPCBIND_NETID_TCP6);
2441 break; 2429 break;
2442 default: 2430 default:
2443 kfree(xprt); 2431 ret = ERR_PTR(-EAFNOSUPPORT);
2444 return ERR_PTR(-EAFNOSUPPORT); 2432 goto out_err;
2445 } 2433 }
2446 2434
2447 if (xprt_bound(xprt)) 2435 if (xprt_bound(xprt))
@@ -2457,10 +2445,11 @@ static struct rpc_xprt *xs_setup_tcp(struct xprt_create *args)
2457 2445
2458 if (try_module_get(THIS_MODULE)) 2446 if (try_module_get(THIS_MODULE))
2459 return xprt; 2447 return xprt;
2460 2448 ret = ERR_PTR(-EINVAL);
2449out_err:
2461 kfree(xprt->slot); 2450 kfree(xprt->slot);
2462 kfree(xprt); 2451 kfree(xprt);
2463 return ERR_PTR(-EINVAL); 2452 return ret;
2464} 2453}
2465 2454
2466/** 2455/**
@@ -2474,9 +2463,7 @@ static struct rpc_xprt *xs_setup_bc_tcp(struct xprt_create *args)
2474 struct rpc_xprt *xprt; 2463 struct rpc_xprt *xprt;
2475 struct sock_xprt *transport; 2464 struct sock_xprt *transport;
2476 struct svc_sock *bc_sock; 2465 struct svc_sock *bc_sock;
2477 2466 struct rpc_xprt *ret;
2478 if (!args->bc_xprt)
2479 ERR_PTR(-EINVAL);
2480 2467
2481 xprt = xs_setup_xprt(args, xprt_tcp_slot_table_entries); 2468 xprt = xs_setup_xprt(args, xprt_tcp_slot_table_entries);
2482 if (IS_ERR(xprt)) 2469 if (IS_ERR(xprt))
@@ -2491,7 +2478,6 @@ static struct rpc_xprt *xs_setup_bc_tcp(struct xprt_create *args)
2491 /* backchannel */ 2478 /* backchannel */
2492 xprt_set_bound(xprt); 2479 xprt_set_bound(xprt);
2493 xprt->bind_timeout = 0; 2480 xprt->bind_timeout = 0;
2494 xprt->connect_timeout = 0;
2495 xprt->reestablish_timeout = 0; 2481 xprt->reestablish_timeout = 0;
2496 xprt->idle_timeout = 0; 2482 xprt->idle_timeout = 0;
2497 2483
@@ -2517,8 +2503,8 @@ static struct rpc_xprt *xs_setup_bc_tcp(struct xprt_create *args)
2517 RPCBIND_NETID_TCP6); 2503 RPCBIND_NETID_TCP6);
2518 break; 2504 break;
2519 default: 2505 default:
2520 kfree(xprt); 2506 ret = ERR_PTR(-EAFNOSUPPORT);
2521 return ERR_PTR(-EAFNOSUPPORT); 2507 goto out_err;
2522 } 2508 }
2523 2509
2524 if (xprt_bound(xprt)) 2510 if (xprt_bound(xprt))
@@ -2540,9 +2526,11 @@ static struct rpc_xprt *xs_setup_bc_tcp(struct xprt_create *args)
2540 2526
2541 if (try_module_get(THIS_MODULE)) 2527 if (try_module_get(THIS_MODULE))
2542 return xprt; 2528 return xprt;
2529 ret = ERR_PTR(-EINVAL);
2530out_err:
2543 kfree(xprt->slot); 2531 kfree(xprt->slot);
2544 kfree(xprt); 2532 kfree(xprt);
2545 return ERR_PTR(-EINVAL); 2533 return ret;
2546} 2534}
2547 2535
2548static struct xprt_class xs_udp_transport = { 2536static struct xprt_class xs_udp_transport = {
@@ -2605,7 +2593,8 @@ void cleanup_socket_xprt(void)
2605 xprt_unregister_transport(&xs_bc_tcp_transport); 2593 xprt_unregister_transport(&xs_bc_tcp_transport);
2606} 2594}
2607 2595
2608static int param_set_uint_minmax(const char *val, struct kernel_param *kp, 2596static int param_set_uint_minmax(const char *val,
2597 const struct kernel_param *kp,
2609 unsigned int min, unsigned int max) 2598 unsigned int min, unsigned int max)
2610{ 2599{
2611 unsigned long num; 2600 unsigned long num;
@@ -2620,34 +2609,37 @@ static int param_set_uint_minmax(const char *val, struct kernel_param *kp,
2620 return 0; 2609 return 0;
2621} 2610}
2622 2611
2623static int param_set_portnr(const char *val, struct kernel_param *kp) 2612static int param_set_portnr(const char *val, const struct kernel_param *kp)
2624{ 2613{
2625 return param_set_uint_minmax(val, kp, 2614 return param_set_uint_minmax(val, kp,
2626 RPC_MIN_RESVPORT, 2615 RPC_MIN_RESVPORT,
2627 RPC_MAX_RESVPORT); 2616 RPC_MAX_RESVPORT);
2628} 2617}
2629 2618
2630static int param_get_portnr(char *buffer, struct kernel_param *kp) 2619static struct kernel_param_ops param_ops_portnr = {
2631{ 2620 .set = param_set_portnr,
2632 return param_get_uint(buffer, kp); 2621 .get = param_get_uint,
2633} 2622};
2623
2634#define param_check_portnr(name, p) \ 2624#define param_check_portnr(name, p) \
2635 __param_check(name, p, unsigned int); 2625 __param_check(name, p, unsigned int);
2636 2626
2637module_param_named(min_resvport, xprt_min_resvport, portnr, 0644); 2627module_param_named(min_resvport, xprt_min_resvport, portnr, 0644);
2638module_param_named(max_resvport, xprt_max_resvport, portnr, 0644); 2628module_param_named(max_resvport, xprt_max_resvport, portnr, 0644);
2639 2629
2640static int param_set_slot_table_size(const char *val, struct kernel_param *kp) 2630static int param_set_slot_table_size(const char *val,
2631 const struct kernel_param *kp)
2641{ 2632{
2642 return param_set_uint_minmax(val, kp, 2633 return param_set_uint_minmax(val, kp,
2643 RPC_MIN_SLOT_TABLE, 2634 RPC_MIN_SLOT_TABLE,
2644 RPC_MAX_SLOT_TABLE); 2635 RPC_MAX_SLOT_TABLE);
2645} 2636}
2646 2637
2647static int param_get_slot_table_size(char *buffer, struct kernel_param *kp) 2638static struct kernel_param_ops param_ops_slot_table_size = {
2648{ 2639 .set = param_set_slot_table_size,
2649 return param_get_uint(buffer, kp); 2640 .get = param_get_uint,
2650} 2641};
2642
2651#define param_check_slot_table_size(name, p) \ 2643#define param_check_slot_table_size(name, p) \
2652 __param_check(name, p, unsigned int); 2644 __param_check(name, p, unsigned int);
2653 2645