aboutsummaryrefslogtreecommitdiffstats
path: root/net/sunrpc/xprtsock.c
diff options
context:
space:
mode:
Diffstat (limited to 'net/sunrpc/xprtsock.c')
-rw-r--r--net/sunrpc/xprtsock.c251
1 files changed, 249 insertions, 2 deletions
diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c
index 62438f3a914d..37c5475ba258 100644
--- a/net/sunrpc/xprtsock.c
+++ b/net/sunrpc/xprtsock.c
@@ -32,6 +32,7 @@
32#include <linux/tcp.h> 32#include <linux/tcp.h>
33#include <linux/sunrpc/clnt.h> 33#include <linux/sunrpc/clnt.h>
34#include <linux/sunrpc/sched.h> 34#include <linux/sunrpc/sched.h>
35#include <linux/sunrpc/svcsock.h>
35#include <linux/sunrpc/xprtsock.h> 36#include <linux/sunrpc/xprtsock.h>
36#include <linux/file.h> 37#include <linux/file.h>
37#ifdef CONFIG_NFS_V4_1 38#ifdef CONFIG_NFS_V4_1
@@ -43,6 +44,7 @@
43#include <net/udp.h> 44#include <net/udp.h>
44#include <net/tcp.h> 45#include <net/tcp.h>
45 46
47#include "sunrpc.h"
46/* 48/*
47 * xprtsock tunables 49 * xprtsock tunables
48 */ 50 */
@@ -771,6 +773,7 @@ static void xs_close(struct rpc_xprt *xprt)
771 dprintk("RPC: xs_close xprt %p\n", xprt); 773 dprintk("RPC: xs_close xprt %p\n", xprt);
772 774
773 xs_reset_transport(transport); 775 xs_reset_transport(transport);
776 xprt->reestablish_timeout = 0;
774 777
775 smp_mb__before_clear_bit(); 778 smp_mb__before_clear_bit();
776 clear_bit(XPRT_CONNECTION_ABORT, &xprt->state); 779 clear_bit(XPRT_CONNECTION_ABORT, &xprt->state);
@@ -1262,6 +1265,12 @@ static void xs_tcp_data_ready(struct sock *sk, int bytes)
1262 if (xprt->shutdown) 1265 if (xprt->shutdown)
1263 goto out; 1266 goto out;
1264 1267
1268 /* Any data means we had a useful conversation, so
1269 * the we don't need to delay the next reconnect
1270 */
1271 if (xprt->reestablish_timeout)
1272 xprt->reestablish_timeout = 0;
1273
1265 /* We use rd_desc to pass struct xprt to xs_tcp_data_recv */ 1274 /* We use rd_desc to pass struct xprt to xs_tcp_data_recv */
1266 rd_desc.arg.data = xprt; 1275 rd_desc.arg.data = xprt;
1267 do { 1276 do {
@@ -2032,6 +2041,8 @@ static void xs_connect(struct rpc_task *task)
2032 &transport->connect_worker, 2041 &transport->connect_worker,
2033 xprt->reestablish_timeout); 2042 xprt->reestablish_timeout);
2034 xprt->reestablish_timeout <<= 1; 2043 xprt->reestablish_timeout <<= 1;
2044 if (xprt->reestablish_timeout < XS_TCP_INIT_REEST_TO)
2045 xprt->reestablish_timeout = XS_TCP_INIT_REEST_TO;
2035 if (xprt->reestablish_timeout > XS_TCP_MAX_REEST_TO) 2046 if (xprt->reestablish_timeout > XS_TCP_MAX_REEST_TO)
2036 xprt->reestablish_timeout = XS_TCP_MAX_REEST_TO; 2047 xprt->reestablish_timeout = XS_TCP_MAX_REEST_TO;
2037 } else { 2048 } else {
@@ -2098,6 +2109,134 @@ static void xs_tcp_print_stats(struct rpc_xprt *xprt, struct seq_file *seq)
2098 xprt->stat.bklog_u); 2109 xprt->stat.bklog_u);
2099} 2110}
2100 2111
2112/*
2113 * Allocate a bunch of pages for a scratch buffer for the rpc code. The reason
2114 * we allocate pages instead doing a kmalloc like rpc_malloc is because we want
2115 * to use the server side send routines.
2116 */
2117void *bc_malloc(struct rpc_task *task, size_t size)
2118{
2119 struct page *page;
2120 struct rpc_buffer *buf;
2121
2122 BUG_ON(size > PAGE_SIZE - sizeof(struct rpc_buffer));
2123 page = alloc_page(GFP_KERNEL);
2124
2125 if (!page)
2126 return NULL;
2127
2128 buf = page_address(page);
2129 buf->len = PAGE_SIZE;
2130
2131 return buf->data;
2132}
2133
2134/*
2135 * Free the space allocated in the bc_alloc routine
2136 */
2137void bc_free(void *buffer)
2138{
2139 struct rpc_buffer *buf;
2140
2141 if (!buffer)
2142 return;
2143
2144 buf = container_of(buffer, struct rpc_buffer, data);
2145 free_page((unsigned long)buf);
2146}
2147
2148/*
2149 * Use the svc_sock to send the callback. Must be called with svsk->sk_mutex
2150 * held. Borrows heavily from svc_tcp_sendto and xs_tcp_send_request.
2151 */
2152static int bc_sendto(struct rpc_rqst *req)
2153{
2154 int len;
2155 struct xdr_buf *xbufp = &req->rq_snd_buf;
2156 struct rpc_xprt *xprt = req->rq_xprt;
2157 struct sock_xprt *transport =
2158 container_of(xprt, struct sock_xprt, xprt);
2159 struct socket *sock = transport->sock;
2160 unsigned long headoff;
2161 unsigned long tailoff;
2162
2163 /*
2164 * Set up the rpc header and record marker stuff
2165 */
2166 xs_encode_tcp_record_marker(xbufp);
2167
2168 tailoff = (unsigned long)xbufp->tail[0].iov_base & ~PAGE_MASK;
2169 headoff = (unsigned long)xbufp->head[0].iov_base & ~PAGE_MASK;
2170 len = svc_send_common(sock, xbufp,
2171 virt_to_page(xbufp->head[0].iov_base), headoff,
2172 xbufp->tail[0].iov_base, tailoff);
2173
2174 if (len != xbufp->len) {
2175 printk(KERN_NOTICE "Error sending entire callback!\n");
2176 len = -EAGAIN;
2177 }
2178
2179 return len;
2180}
2181
2182/*
2183 * The send routine. Borrows from svc_send
2184 */
2185static int bc_send_request(struct rpc_task *task)
2186{
2187 struct rpc_rqst *req = task->tk_rqstp;
2188 struct svc_xprt *xprt;
2189 struct svc_sock *svsk;
2190 u32 len;
2191
2192 dprintk("sending request with xid: %08x\n", ntohl(req->rq_xid));
2193 /*
2194 * Get the server socket associated with this callback xprt
2195 */
2196 xprt = req->rq_xprt->bc_xprt;
2197 svsk = container_of(xprt, struct svc_sock, sk_xprt);
2198
2199 /*
2200 * Grab the mutex to serialize data as the connection is shared
2201 * with the fore channel
2202 */
2203 if (!mutex_trylock(&xprt->xpt_mutex)) {
2204 rpc_sleep_on(&xprt->xpt_bc_pending, task, NULL);
2205 if (!mutex_trylock(&xprt->xpt_mutex))
2206 return -EAGAIN;
2207 rpc_wake_up_queued_task(&xprt->xpt_bc_pending, task);
2208 }
2209 if (test_bit(XPT_DEAD, &xprt->xpt_flags))
2210 len = -ENOTCONN;
2211 else
2212 len = bc_sendto(req);
2213 mutex_unlock(&xprt->xpt_mutex);
2214
2215 if (len > 0)
2216 len = 0;
2217
2218 return len;
2219}
2220
2221/*
2222 * The close routine. Since this is client initiated, we do nothing
2223 */
2224
2225static void bc_close(struct rpc_xprt *xprt)
2226{
2227 return;
2228}
2229
2230/*
2231 * The xprt destroy routine. Again, because this connection is client
2232 * initiated, we do nothing
2233 */
2234
2235static void bc_destroy(struct rpc_xprt *xprt)
2236{
2237 return;
2238}
2239
2101static struct rpc_xprt_ops xs_udp_ops = { 2240static struct rpc_xprt_ops xs_udp_ops = {
2102 .set_buffer_size = xs_udp_set_buffer_size, 2241 .set_buffer_size = xs_udp_set_buffer_size,
2103 .reserve_xprt = xprt_reserve_xprt_cong, 2242 .reserve_xprt = xprt_reserve_xprt_cong,
@@ -2134,6 +2273,22 @@ static struct rpc_xprt_ops xs_tcp_ops = {
2134 .print_stats = xs_tcp_print_stats, 2273 .print_stats = xs_tcp_print_stats,
2135}; 2274};
2136 2275
2276/*
2277 * The rpc_xprt_ops for the server backchannel
2278 */
2279
2280static struct rpc_xprt_ops bc_tcp_ops = {
2281 .reserve_xprt = xprt_reserve_xprt,
2282 .release_xprt = xprt_release_xprt,
2283 .buf_alloc = bc_malloc,
2284 .buf_free = bc_free,
2285 .send_request = bc_send_request,
2286 .set_retrans_timeout = xprt_set_retrans_timeout_def,
2287 .close = bc_close,
2288 .destroy = bc_destroy,
2289 .print_stats = xs_tcp_print_stats,
2290};
2291
2137static struct rpc_xprt *xs_setup_xprt(struct xprt_create *args, 2292static struct rpc_xprt *xs_setup_xprt(struct xprt_create *args,
2138 unsigned int slot_table_size) 2293 unsigned int slot_table_size)
2139{ 2294{
@@ -2322,11 +2477,93 @@ static struct rpc_xprt *xs_setup_tcp(struct xprt_create *args)
2322 return ERR_PTR(-EINVAL); 2477 return ERR_PTR(-EINVAL);
2323} 2478}
2324 2479
2480/**
2481 * xs_setup_bc_tcp - Set up transport to use a TCP backchannel socket
2482 * @args: rpc transport creation arguments
2483 *
2484 */
2485static struct rpc_xprt *xs_setup_bc_tcp(struct xprt_create *args)
2486{
2487 struct sockaddr *addr = args->dstaddr;
2488 struct rpc_xprt *xprt;
2489 struct sock_xprt *transport;
2490 struct svc_sock *bc_sock;
2491
2492 if (!args->bc_xprt)
2493 ERR_PTR(-EINVAL);
2494
2495 xprt = xs_setup_xprt(args, xprt_tcp_slot_table_entries);
2496 if (IS_ERR(xprt))
2497 return xprt;
2498 transport = container_of(xprt, struct sock_xprt, xprt);
2499
2500 xprt->prot = IPPROTO_TCP;
2501 xprt->tsh_size = sizeof(rpc_fraghdr) / sizeof(u32);
2502 xprt->max_payload = RPC_MAX_FRAGMENT_SIZE;
2503 xprt->timeout = &xs_tcp_default_timeout;
2504
2505 /* backchannel */
2506 xprt_set_bound(xprt);
2507 xprt->bind_timeout = 0;
2508 xprt->connect_timeout = 0;
2509 xprt->reestablish_timeout = 0;
2510 xprt->idle_timeout = 0;
2511
2512 /*
2513 * The backchannel uses the same socket connection as the
2514 * forechannel
2515 */
2516 xprt->bc_xprt = args->bc_xprt;
2517 bc_sock = container_of(args->bc_xprt, struct svc_sock, sk_xprt);
2518 bc_sock->sk_bc_xprt = xprt;
2519 transport->sock = bc_sock->sk_sock;
2520 transport->inet = bc_sock->sk_sk;
2521
2522 xprt->ops = &bc_tcp_ops;
2523
2524 switch (addr->sa_family) {
2525 case AF_INET:
2526 xs_format_peer_addresses(xprt, "tcp",
2527 RPCBIND_NETID_TCP);
2528 break;
2529 case AF_INET6:
2530 xs_format_peer_addresses(xprt, "tcp",
2531 RPCBIND_NETID_TCP6);
2532 break;
2533 default:
2534 kfree(xprt);
2535 return ERR_PTR(-EAFNOSUPPORT);
2536 }
2537
2538 if (xprt_bound(xprt))
2539 dprintk("RPC: set up xprt to %s (port %s) via %s\n",
2540 xprt->address_strings[RPC_DISPLAY_ADDR],
2541 xprt->address_strings[RPC_DISPLAY_PORT],
2542 xprt->address_strings[RPC_DISPLAY_PROTO]);
2543 else
2544 dprintk("RPC: set up xprt to %s (autobind) via %s\n",
2545 xprt->address_strings[RPC_DISPLAY_ADDR],
2546 xprt->address_strings[RPC_DISPLAY_PROTO]);
2547
2548 /*
2549 * Since we don't want connections for the backchannel, we set
2550 * the xprt status to connected
2551 */
2552 xprt_set_connected(xprt);
2553
2554
2555 if (try_module_get(THIS_MODULE))
2556 return xprt;
2557 kfree(xprt->slot);
2558 kfree(xprt);
2559 return ERR_PTR(-EINVAL);
2560}
2561
2325static struct xprt_class xs_udp_transport = { 2562static struct xprt_class xs_udp_transport = {
2326 .list = LIST_HEAD_INIT(xs_udp_transport.list), 2563 .list = LIST_HEAD_INIT(xs_udp_transport.list),
2327 .name = "udp", 2564 .name = "udp",
2328 .owner = THIS_MODULE, 2565 .owner = THIS_MODULE,
2329 .ident = IPPROTO_UDP, 2566 .ident = XPRT_TRANSPORT_UDP,
2330 .setup = xs_setup_udp, 2567 .setup = xs_setup_udp,
2331}; 2568};
2332 2569
@@ -2334,10 +2571,18 @@ static struct xprt_class xs_tcp_transport = {
2334 .list = LIST_HEAD_INIT(xs_tcp_transport.list), 2571 .list = LIST_HEAD_INIT(xs_tcp_transport.list),
2335 .name = "tcp", 2572 .name = "tcp",
2336 .owner = THIS_MODULE, 2573 .owner = THIS_MODULE,
2337 .ident = IPPROTO_TCP, 2574 .ident = XPRT_TRANSPORT_TCP,
2338 .setup = xs_setup_tcp, 2575 .setup = xs_setup_tcp,
2339}; 2576};
2340 2577
2578static struct xprt_class xs_bc_tcp_transport = {
2579 .list = LIST_HEAD_INIT(xs_bc_tcp_transport.list),
2580 .name = "tcp NFSv4.1 backchannel",
2581 .owner = THIS_MODULE,
2582 .ident = XPRT_TRANSPORT_BC_TCP,
2583 .setup = xs_setup_bc_tcp,
2584};
2585
2341/** 2586/**
2342 * init_socket_xprt - set up xprtsock's sysctls, register with RPC client 2587 * init_socket_xprt - set up xprtsock's sysctls, register with RPC client
2343 * 2588 *
@@ -2351,6 +2596,7 @@ int init_socket_xprt(void)
2351 2596
2352 xprt_register_transport(&xs_udp_transport); 2597 xprt_register_transport(&xs_udp_transport);
2353 xprt_register_transport(&xs_tcp_transport); 2598 xprt_register_transport(&xs_tcp_transport);
2599 xprt_register_transport(&xs_bc_tcp_transport);
2354 2600
2355 return 0; 2601 return 0;
2356} 2602}
@@ -2370,6 +2616,7 @@ void cleanup_socket_xprt(void)
2370 2616
2371 xprt_unregister_transport(&xs_udp_transport); 2617 xprt_unregister_transport(&xs_udp_transport);
2372 xprt_unregister_transport(&xs_tcp_transport); 2618 xprt_unregister_transport(&xs_tcp_transport);
2619 xprt_unregister_transport(&xs_bc_tcp_transport);
2373} 2620}
2374 2621
2375static int param_set_uint_minmax(const char *val, struct kernel_param *kp, 2622static int param_set_uint_minmax(const char *val, struct kernel_param *kp,