diff options
Diffstat (limited to 'net/sunrpc/xprtsock.c')
-rw-r--r-- | net/sunrpc/xprtsock.c | 716 |
1 files changed, 449 insertions, 267 deletions
diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c index cfe3c15be948..3bb232eb5d90 100644 --- a/net/sunrpc/xprtsock.c +++ b/net/sunrpc/xprtsock.c | |||
@@ -46,6 +46,92 @@ unsigned int xprt_min_resvport = RPC_DEF_MIN_RESVPORT; | |||
46 | unsigned int xprt_max_resvport = RPC_DEF_MAX_RESVPORT; | 46 | unsigned int xprt_max_resvport = RPC_DEF_MAX_RESVPORT; |
47 | 47 | ||
48 | /* | 48 | /* |
49 | * We can register our own files under /proc/sys/sunrpc by | ||
50 | * calling register_sysctl_table() again. The files in that | ||
51 | * directory become the union of all files registered there. | ||
52 | * | ||
53 | * We simply need to make sure that we don't collide with | ||
54 | * someone else's file names! | ||
55 | */ | ||
56 | |||
57 | #ifdef RPC_DEBUG | ||
58 | |||
59 | static unsigned int min_slot_table_size = RPC_MIN_SLOT_TABLE; | ||
60 | static unsigned int max_slot_table_size = RPC_MAX_SLOT_TABLE; | ||
61 | static unsigned int xprt_min_resvport_limit = RPC_MIN_RESVPORT; | ||
62 | static unsigned int xprt_max_resvport_limit = RPC_MAX_RESVPORT; | ||
63 | |||
64 | static struct ctl_table_header *sunrpc_table_header; | ||
65 | |||
66 | /* | ||
67 | * FIXME: changing the UDP slot table size should also resize the UDP | ||
68 | * socket buffers for existing UDP transports | ||
69 | */ | ||
70 | static ctl_table xs_tunables_table[] = { | ||
71 | { | ||
72 | .ctl_name = CTL_SLOTTABLE_UDP, | ||
73 | .procname = "udp_slot_table_entries", | ||
74 | .data = &xprt_udp_slot_table_entries, | ||
75 | .maxlen = sizeof(unsigned int), | ||
76 | .mode = 0644, | ||
77 | .proc_handler = &proc_dointvec_minmax, | ||
78 | .strategy = &sysctl_intvec, | ||
79 | .extra1 = &min_slot_table_size, | ||
80 | .extra2 = &max_slot_table_size | ||
81 | }, | ||
82 | { | ||
83 | .ctl_name = CTL_SLOTTABLE_TCP, | ||
84 | .procname = "tcp_slot_table_entries", | ||
85 | .data = &xprt_tcp_slot_table_entries, | ||
86 | .maxlen = sizeof(unsigned int), | ||
87 | .mode = 0644, | ||
88 | .proc_handler = &proc_dointvec_minmax, | ||
89 | .strategy = &sysctl_intvec, | ||
90 | .extra1 = &min_slot_table_size, | ||
91 | .extra2 = &max_slot_table_size | ||
92 | }, | ||
93 | { | ||
94 | .ctl_name = CTL_MIN_RESVPORT, | ||
95 | .procname = "min_resvport", | ||
96 | .data = &xprt_min_resvport, | ||
97 | .maxlen = sizeof(unsigned int), | ||
98 | .mode = 0644, | ||
99 | .proc_handler = &proc_dointvec_minmax, | ||
100 | .strategy = &sysctl_intvec, | ||
101 | .extra1 = &xprt_min_resvport_limit, | ||
102 | .extra2 = &xprt_max_resvport_limit | ||
103 | }, | ||
104 | { | ||
105 | .ctl_name = CTL_MAX_RESVPORT, | ||
106 | .procname = "max_resvport", | ||
107 | .data = &xprt_max_resvport, | ||
108 | .maxlen = sizeof(unsigned int), | ||
109 | .mode = 0644, | ||
110 | .proc_handler = &proc_dointvec_minmax, | ||
111 | .strategy = &sysctl_intvec, | ||
112 | .extra1 = &xprt_min_resvport_limit, | ||
113 | .extra2 = &xprt_max_resvport_limit | ||
114 | }, | ||
115 | { | ||
116 | .ctl_name = 0, | ||
117 | }, | ||
118 | }; | ||
119 | |||
120 | static ctl_table sunrpc_table[] = { | ||
121 | { | ||
122 | .ctl_name = CTL_SUNRPC, | ||
123 | .procname = "sunrpc", | ||
124 | .mode = 0555, | ||
125 | .child = xs_tunables_table | ||
126 | }, | ||
127 | { | ||
128 | .ctl_name = 0, | ||
129 | }, | ||
130 | }; | ||
131 | |||
132 | #endif | ||
133 | |||
134 | /* | ||
49 | * How many times to try sending a request on a socket before waiting | 135 | * How many times to try sending a request on a socket before waiting |
50 | * for the socket buffer to clear. | 136 | * for the socket buffer to clear. |
51 | */ | 137 | */ |
@@ -125,6 +211,55 @@ static inline void xs_pktdump(char *msg, u32 *packet, unsigned int count) | |||
125 | } | 211 | } |
126 | #endif | 212 | #endif |
127 | 213 | ||
214 | struct sock_xprt { | ||
215 | struct rpc_xprt xprt; | ||
216 | |||
217 | /* | ||
218 | * Network layer | ||
219 | */ | ||
220 | struct socket * sock; | ||
221 | struct sock * inet; | ||
222 | |||
223 | /* | ||
224 | * State of TCP reply receive | ||
225 | */ | ||
226 | __be32 tcp_fraghdr, | ||
227 | tcp_xid; | ||
228 | |||
229 | u32 tcp_offset, | ||
230 | tcp_reclen; | ||
231 | |||
232 | unsigned long tcp_copied, | ||
233 | tcp_flags; | ||
234 | |||
235 | /* | ||
236 | * Connection of transports | ||
237 | */ | ||
238 | struct delayed_work connect_worker; | ||
239 | unsigned short port; | ||
240 | |||
241 | /* | ||
242 | * UDP socket buffer size parameters | ||
243 | */ | ||
244 | size_t rcvsize, | ||
245 | sndsize; | ||
246 | |||
247 | /* | ||
248 | * Saved socket callback addresses | ||
249 | */ | ||
250 | void (*old_data_ready)(struct sock *, int); | ||
251 | void (*old_state_change)(struct sock *); | ||
252 | void (*old_write_space)(struct sock *); | ||
253 | }; | ||
254 | |||
255 | /* | ||
256 | * TCP receive state flags | ||
257 | */ | ||
258 | #define TCP_RCV_LAST_FRAG (1UL << 0) | ||
259 | #define TCP_RCV_COPY_FRAGHDR (1UL << 1) | ||
260 | #define TCP_RCV_COPY_XID (1UL << 2) | ||
261 | #define TCP_RCV_COPY_DATA (1UL << 3) | ||
262 | |||
128 | static void xs_format_peer_addresses(struct rpc_xprt *xprt) | 263 | static void xs_format_peer_addresses(struct rpc_xprt *xprt) |
129 | { | 264 | { |
130 | struct sockaddr_in *addr = (struct sockaddr_in *) &xprt->addr; | 265 | struct sockaddr_in *addr = (struct sockaddr_in *) &xprt->addr; |
@@ -168,37 +303,52 @@ static void xs_free_peer_addresses(struct rpc_xprt *xprt) | |||
168 | 303 | ||
169 | #define XS_SENDMSG_FLAGS (MSG_DONTWAIT | MSG_NOSIGNAL) | 304 | #define XS_SENDMSG_FLAGS (MSG_DONTWAIT | MSG_NOSIGNAL) |
170 | 305 | ||
171 | static inline int xs_send_head(struct socket *sock, struct sockaddr *addr, int addrlen, struct xdr_buf *xdr, unsigned int base, unsigned int len) | 306 | static int xs_send_kvec(struct socket *sock, struct sockaddr *addr, int addrlen, struct kvec *vec, unsigned int base, int more) |
172 | { | 307 | { |
173 | struct kvec iov = { | ||
174 | .iov_base = xdr->head[0].iov_base + base, | ||
175 | .iov_len = len - base, | ||
176 | }; | ||
177 | struct msghdr msg = { | 308 | struct msghdr msg = { |
178 | .msg_name = addr, | 309 | .msg_name = addr, |
179 | .msg_namelen = addrlen, | 310 | .msg_namelen = addrlen, |
180 | .msg_flags = XS_SENDMSG_FLAGS, | 311 | .msg_flags = XS_SENDMSG_FLAGS | (more ? MSG_MORE : 0), |
312 | }; | ||
313 | struct kvec iov = { | ||
314 | .iov_base = vec->iov_base + base, | ||
315 | .iov_len = vec->iov_len - base, | ||
181 | }; | 316 | }; |
182 | 317 | ||
183 | if (xdr->len > len) | 318 | if (iov.iov_len != 0) |
184 | msg.msg_flags |= MSG_MORE; | ||
185 | |||
186 | if (likely(iov.iov_len)) | ||
187 | return kernel_sendmsg(sock, &msg, &iov, 1, iov.iov_len); | 319 | return kernel_sendmsg(sock, &msg, &iov, 1, iov.iov_len); |
188 | return kernel_sendmsg(sock, &msg, NULL, 0, 0); | 320 | return kernel_sendmsg(sock, &msg, NULL, 0, 0); |
189 | } | 321 | } |
190 | 322 | ||
191 | static int xs_send_tail(struct socket *sock, struct xdr_buf *xdr, unsigned int base, unsigned int len) | 323 | static int xs_send_pagedata(struct socket *sock, struct xdr_buf *xdr, unsigned int base, int more) |
192 | { | 324 | { |
193 | struct kvec iov = { | 325 | struct page **ppage; |
194 | .iov_base = xdr->tail[0].iov_base + base, | 326 | unsigned int remainder; |
195 | .iov_len = len - base, | 327 | int err, sent = 0; |
196 | }; | 328 | |
197 | struct msghdr msg = { | 329 | remainder = xdr->page_len - base; |
198 | .msg_flags = XS_SENDMSG_FLAGS, | 330 | base += xdr->page_base; |
199 | }; | 331 | ppage = xdr->pages + (base >> PAGE_SHIFT); |
332 | base &= ~PAGE_MASK; | ||
333 | for(;;) { | ||
334 | unsigned int len = min_t(unsigned int, PAGE_SIZE - base, remainder); | ||
335 | int flags = XS_SENDMSG_FLAGS; | ||
200 | 336 | ||
201 | return kernel_sendmsg(sock, &msg, &iov, 1, iov.iov_len); | 337 | remainder -= len; |
338 | if (remainder != 0 || more) | ||
339 | flags |= MSG_MORE; | ||
340 | err = sock->ops->sendpage(sock, *ppage, base, len, flags); | ||
341 | if (remainder == 0 || err != len) | ||
342 | break; | ||
343 | sent += err; | ||
344 | ppage++; | ||
345 | base = 0; | ||
346 | } | ||
347 | if (sent == 0) | ||
348 | return err; | ||
349 | if (err > 0) | ||
350 | sent += err; | ||
351 | return sent; | ||
202 | } | 352 | } |
203 | 353 | ||
204 | /** | 354 | /** |
@@ -210,76 +360,51 @@ static int xs_send_tail(struct socket *sock, struct xdr_buf *xdr, unsigned int b | |||
210 | * @base: starting position in the buffer | 360 | * @base: starting position in the buffer |
211 | * | 361 | * |
212 | */ | 362 | */ |
213 | static inline int xs_sendpages(struct socket *sock, struct sockaddr *addr, int addrlen, struct xdr_buf *xdr, unsigned int base) | 363 | static int xs_sendpages(struct socket *sock, struct sockaddr *addr, int addrlen, struct xdr_buf *xdr, unsigned int base) |
214 | { | 364 | { |
215 | struct page **ppage = xdr->pages; | 365 | unsigned int remainder = xdr->len - base; |
216 | unsigned int len, pglen = xdr->page_len; | 366 | int err, sent = 0; |
217 | int err, ret = 0; | ||
218 | 367 | ||
219 | if (unlikely(!sock)) | 368 | if (unlikely(!sock)) |
220 | return -ENOTCONN; | 369 | return -ENOTCONN; |
221 | 370 | ||
222 | clear_bit(SOCK_ASYNC_NOSPACE, &sock->flags); | 371 | clear_bit(SOCK_ASYNC_NOSPACE, &sock->flags); |
372 | if (base != 0) { | ||
373 | addr = NULL; | ||
374 | addrlen = 0; | ||
375 | } | ||
223 | 376 | ||
224 | len = xdr->head[0].iov_len; | 377 | if (base < xdr->head[0].iov_len || addr != NULL) { |
225 | if (base < len || (addr != NULL && base == 0)) { | 378 | unsigned int len = xdr->head[0].iov_len - base; |
226 | err = xs_send_head(sock, addr, addrlen, xdr, base, len); | 379 | remainder -= len; |
227 | if (ret == 0) | 380 | err = xs_send_kvec(sock, addr, addrlen, &xdr->head[0], base, remainder != 0); |
228 | ret = err; | 381 | if (remainder == 0 || err != len) |
229 | else if (err > 0) | ||
230 | ret += err; | ||
231 | if (err != (len - base)) | ||
232 | goto out; | 382 | goto out; |
383 | sent += err; | ||
233 | base = 0; | 384 | base = 0; |
234 | } else | 385 | } else |
235 | base -= len; | 386 | base -= xdr->head[0].iov_len; |
236 | |||
237 | if (unlikely(pglen == 0)) | ||
238 | goto copy_tail; | ||
239 | if (unlikely(base >= pglen)) { | ||
240 | base -= pglen; | ||
241 | goto copy_tail; | ||
242 | } | ||
243 | if (base || xdr->page_base) { | ||
244 | pglen -= base; | ||
245 | base += xdr->page_base; | ||
246 | ppage += base >> PAGE_CACHE_SHIFT; | ||
247 | base &= ~PAGE_CACHE_MASK; | ||
248 | } | ||
249 | |||
250 | do { | ||
251 | int flags = XS_SENDMSG_FLAGS; | ||
252 | |||
253 | len = PAGE_CACHE_SIZE; | ||
254 | if (base) | ||
255 | len -= base; | ||
256 | if (pglen < len) | ||
257 | len = pglen; | ||
258 | |||
259 | if (pglen != len || xdr->tail[0].iov_len != 0) | ||
260 | flags |= MSG_MORE; | ||
261 | 387 | ||
262 | err = kernel_sendpage(sock, *ppage, base, len, flags); | 388 | if (base < xdr->page_len) { |
263 | if (ret == 0) | 389 | unsigned int len = xdr->page_len - base; |
264 | ret = err; | 390 | remainder -= len; |
265 | else if (err > 0) | 391 | err = xs_send_pagedata(sock, xdr, base, remainder != 0); |
266 | ret += err; | 392 | if (remainder == 0 || err != len) |
267 | if (err != len) | ||
268 | goto out; | 393 | goto out; |
394 | sent += err; | ||
269 | base = 0; | 395 | base = 0; |
270 | ppage++; | 396 | } else |
271 | } while ((pglen -= len) != 0); | 397 | base -= xdr->page_len; |
272 | copy_tail: | 398 | |
273 | len = xdr->tail[0].iov_len; | 399 | if (base >= xdr->tail[0].iov_len) |
274 | if (base < len) { | 400 | return sent; |
275 | err = xs_send_tail(sock, xdr, base, len); | 401 | err = xs_send_kvec(sock, NULL, 0, &xdr->tail[0], base, 0); |
276 | if (ret == 0) | ||
277 | ret = err; | ||
278 | else if (err > 0) | ||
279 | ret += err; | ||
280 | } | ||
281 | out: | 402 | out: |
282 | return ret; | 403 | if (sent == 0) |
404 | return err; | ||
405 | if (err > 0) | ||
406 | sent += err; | ||
407 | return sent; | ||
283 | } | 408 | } |
284 | 409 | ||
285 | /** | 410 | /** |
@@ -291,19 +416,20 @@ static void xs_nospace(struct rpc_task *task) | |||
291 | { | 416 | { |
292 | struct rpc_rqst *req = task->tk_rqstp; | 417 | struct rpc_rqst *req = task->tk_rqstp; |
293 | struct rpc_xprt *xprt = req->rq_xprt; | 418 | struct rpc_xprt *xprt = req->rq_xprt; |
419 | struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt); | ||
294 | 420 | ||
295 | dprintk("RPC: %4d xmit incomplete (%u left of %u)\n", | 421 | dprintk("RPC: %4d xmit incomplete (%u left of %u)\n", |
296 | task->tk_pid, req->rq_slen - req->rq_bytes_sent, | 422 | task->tk_pid, req->rq_slen - req->rq_bytes_sent, |
297 | req->rq_slen); | 423 | req->rq_slen); |
298 | 424 | ||
299 | if (test_bit(SOCK_ASYNC_NOSPACE, &xprt->sock->flags)) { | 425 | if (test_bit(SOCK_ASYNC_NOSPACE, &transport->sock->flags)) { |
300 | /* Protect against races with write_space */ | 426 | /* Protect against races with write_space */ |
301 | spin_lock_bh(&xprt->transport_lock); | 427 | spin_lock_bh(&xprt->transport_lock); |
302 | 428 | ||
303 | /* Don't race with disconnect */ | 429 | /* Don't race with disconnect */ |
304 | if (!xprt_connected(xprt)) | 430 | if (!xprt_connected(xprt)) |
305 | task->tk_status = -ENOTCONN; | 431 | task->tk_status = -ENOTCONN; |
306 | else if (test_bit(SOCK_NOSPACE, &xprt->sock->flags)) | 432 | else if (test_bit(SOCK_NOSPACE, &transport->sock->flags)) |
307 | xprt_wait_for_buffer_space(task); | 433 | xprt_wait_for_buffer_space(task); |
308 | 434 | ||
309 | spin_unlock_bh(&xprt->transport_lock); | 435 | spin_unlock_bh(&xprt->transport_lock); |
@@ -327,6 +453,7 @@ static int xs_udp_send_request(struct rpc_task *task) | |||
327 | { | 453 | { |
328 | struct rpc_rqst *req = task->tk_rqstp; | 454 | struct rpc_rqst *req = task->tk_rqstp; |
329 | struct rpc_xprt *xprt = req->rq_xprt; | 455 | struct rpc_xprt *xprt = req->rq_xprt; |
456 | struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt); | ||
330 | struct xdr_buf *xdr = &req->rq_snd_buf; | 457 | struct xdr_buf *xdr = &req->rq_snd_buf; |
331 | int status; | 458 | int status; |
332 | 459 | ||
@@ -335,8 +462,10 @@ static int xs_udp_send_request(struct rpc_task *task) | |||
335 | req->rq_svec->iov_len); | 462 | req->rq_svec->iov_len); |
336 | 463 | ||
337 | req->rq_xtime = jiffies; | 464 | req->rq_xtime = jiffies; |
338 | status = xs_sendpages(xprt->sock, (struct sockaddr *) &xprt->addr, | 465 | status = xs_sendpages(transport->sock, |
339 | xprt->addrlen, xdr, req->rq_bytes_sent); | 466 | (struct sockaddr *) &xprt->addr, |
467 | xprt->addrlen, xdr, | ||
468 | req->rq_bytes_sent); | ||
340 | 469 | ||
341 | dprintk("RPC: xs_udp_send_request(%u) = %d\n", | 470 | dprintk("RPC: xs_udp_send_request(%u) = %d\n", |
342 | xdr->len - req->rq_bytes_sent, status); | 471 | xdr->len - req->rq_bytes_sent, status); |
@@ -392,6 +521,7 @@ static int xs_tcp_send_request(struct rpc_task *task) | |||
392 | { | 521 | { |
393 | struct rpc_rqst *req = task->tk_rqstp; | 522 | struct rpc_rqst *req = task->tk_rqstp; |
394 | struct rpc_xprt *xprt = req->rq_xprt; | 523 | struct rpc_xprt *xprt = req->rq_xprt; |
524 | struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt); | ||
395 | struct xdr_buf *xdr = &req->rq_snd_buf; | 525 | struct xdr_buf *xdr = &req->rq_snd_buf; |
396 | int status, retry = 0; | 526 | int status, retry = 0; |
397 | 527 | ||
@@ -406,8 +536,8 @@ static int xs_tcp_send_request(struct rpc_task *task) | |||
406 | * called sendmsg(). */ | 536 | * called sendmsg(). */ |
407 | while (1) { | 537 | while (1) { |
408 | req->rq_xtime = jiffies; | 538 | req->rq_xtime = jiffies; |
409 | status = xs_sendpages(xprt->sock, NULL, 0, xdr, | 539 | status = xs_sendpages(transport->sock, |
410 | req->rq_bytes_sent); | 540 | NULL, 0, xdr, req->rq_bytes_sent); |
411 | 541 | ||
412 | dprintk("RPC: xs_tcp_send_request(%u) = %d\n", | 542 | dprintk("RPC: xs_tcp_send_request(%u) = %d\n", |
413 | xdr->len - req->rq_bytes_sent, status); | 543 | xdr->len - req->rq_bytes_sent, status); |
@@ -485,8 +615,9 @@ out_release: | |||
485 | */ | 615 | */ |
486 | static void xs_close(struct rpc_xprt *xprt) | 616 | static void xs_close(struct rpc_xprt *xprt) |
487 | { | 617 | { |
488 | struct socket *sock = xprt->sock; | 618 | struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt); |
489 | struct sock *sk = xprt->inet; | 619 | struct socket *sock = transport->sock; |
620 | struct sock *sk = transport->inet; | ||
490 | 621 | ||
491 | if (!sk) | 622 | if (!sk) |
492 | goto clear_close_wait; | 623 | goto clear_close_wait; |
@@ -494,13 +625,13 @@ static void xs_close(struct rpc_xprt *xprt) | |||
494 | dprintk("RPC: xs_close xprt %p\n", xprt); | 625 | dprintk("RPC: xs_close xprt %p\n", xprt); |
495 | 626 | ||
496 | write_lock_bh(&sk->sk_callback_lock); | 627 | write_lock_bh(&sk->sk_callback_lock); |
497 | xprt->inet = NULL; | 628 | transport->inet = NULL; |
498 | xprt->sock = NULL; | 629 | transport->sock = NULL; |
499 | 630 | ||
500 | sk->sk_user_data = NULL; | 631 | sk->sk_user_data = NULL; |
501 | sk->sk_data_ready = xprt->old_data_ready; | 632 | sk->sk_data_ready = transport->old_data_ready; |
502 | sk->sk_state_change = xprt->old_state_change; | 633 | sk->sk_state_change = transport->old_state_change; |
503 | sk->sk_write_space = xprt->old_write_space; | 634 | sk->sk_write_space = transport->old_write_space; |
504 | write_unlock_bh(&sk->sk_callback_lock); | 635 | write_unlock_bh(&sk->sk_callback_lock); |
505 | 636 | ||
506 | sk->sk_no_check = 0; | 637 | sk->sk_no_check = 0; |
@@ -519,15 +650,18 @@ clear_close_wait: | |||
519 | */ | 650 | */ |
520 | static void xs_destroy(struct rpc_xprt *xprt) | 651 | static void xs_destroy(struct rpc_xprt *xprt) |
521 | { | 652 | { |
653 | struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt); | ||
654 | |||
522 | dprintk("RPC: xs_destroy xprt %p\n", xprt); | 655 | dprintk("RPC: xs_destroy xprt %p\n", xprt); |
523 | 656 | ||
524 | cancel_delayed_work(&xprt->connect_worker); | 657 | cancel_delayed_work(&transport->connect_worker); |
525 | flush_scheduled_work(); | 658 | flush_scheduled_work(); |
526 | 659 | ||
527 | xprt_disconnect(xprt); | 660 | xprt_disconnect(xprt); |
528 | xs_close(xprt); | 661 | xs_close(xprt); |
529 | xs_free_peer_addresses(xprt); | 662 | xs_free_peer_addresses(xprt); |
530 | kfree(xprt->slot); | 663 | kfree(xprt->slot); |
664 | kfree(xprt); | ||
531 | } | 665 | } |
532 | 666 | ||
533 | static inline struct rpc_xprt *xprt_from_sock(struct sock *sk) | 667 | static inline struct rpc_xprt *xprt_from_sock(struct sock *sk) |
@@ -603,91 +737,75 @@ static void xs_udp_data_ready(struct sock *sk, int len) | |||
603 | read_unlock(&sk->sk_callback_lock); | 737 | read_unlock(&sk->sk_callback_lock); |
604 | } | 738 | } |
605 | 739 | ||
606 | static inline size_t xs_tcp_copy_data(skb_reader_t *desc, void *p, size_t len) | 740 | static inline void xs_tcp_read_fraghdr(struct rpc_xprt *xprt, struct xdr_skb_reader *desc) |
607 | { | ||
608 | if (len > desc->count) | ||
609 | len = desc->count; | ||
610 | if (skb_copy_bits(desc->skb, desc->offset, p, len)) { | ||
611 | dprintk("RPC: failed to copy %zu bytes from skb. %zu bytes remain\n", | ||
612 | len, desc->count); | ||
613 | return 0; | ||
614 | } | ||
615 | desc->offset += len; | ||
616 | desc->count -= len; | ||
617 | dprintk("RPC: copied %zu bytes from skb. %zu bytes remain\n", | ||
618 | len, desc->count); | ||
619 | return len; | ||
620 | } | ||
621 | |||
622 | static inline void xs_tcp_read_fraghdr(struct rpc_xprt *xprt, skb_reader_t *desc) | ||
623 | { | 741 | { |
742 | struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt); | ||
624 | size_t len, used; | 743 | size_t len, used; |
625 | char *p; | 744 | char *p; |
626 | 745 | ||
627 | p = ((char *) &xprt->tcp_recm) + xprt->tcp_offset; | 746 | p = ((char *) &transport->tcp_fraghdr) + transport->tcp_offset; |
628 | len = sizeof(xprt->tcp_recm) - xprt->tcp_offset; | 747 | len = sizeof(transport->tcp_fraghdr) - transport->tcp_offset; |
629 | used = xs_tcp_copy_data(desc, p, len); | 748 | used = xdr_skb_read_bits(desc, p, len); |
630 | xprt->tcp_offset += used; | 749 | transport->tcp_offset += used; |
631 | if (used != len) | 750 | if (used != len) |
632 | return; | 751 | return; |
633 | 752 | ||
634 | xprt->tcp_reclen = ntohl(xprt->tcp_recm); | 753 | transport->tcp_reclen = ntohl(transport->tcp_fraghdr); |
635 | if (xprt->tcp_reclen & RPC_LAST_STREAM_FRAGMENT) | 754 | if (transport->tcp_reclen & RPC_LAST_STREAM_FRAGMENT) |
636 | xprt->tcp_flags |= XPRT_LAST_FRAG; | 755 | transport->tcp_flags |= TCP_RCV_LAST_FRAG; |
637 | else | 756 | else |
638 | xprt->tcp_flags &= ~XPRT_LAST_FRAG; | 757 | transport->tcp_flags &= ~TCP_RCV_LAST_FRAG; |
639 | xprt->tcp_reclen &= RPC_FRAGMENT_SIZE_MASK; | 758 | transport->tcp_reclen &= RPC_FRAGMENT_SIZE_MASK; |
640 | 759 | ||
641 | xprt->tcp_flags &= ~XPRT_COPY_RECM; | 760 | transport->tcp_flags &= ~TCP_RCV_COPY_FRAGHDR; |
642 | xprt->tcp_offset = 0; | 761 | transport->tcp_offset = 0; |
643 | 762 | ||
644 | /* Sanity check of the record length */ | 763 | /* Sanity check of the record length */ |
645 | if (unlikely(xprt->tcp_reclen < 4)) { | 764 | if (unlikely(transport->tcp_reclen < 4)) { |
646 | dprintk("RPC: invalid TCP record fragment length\n"); | 765 | dprintk("RPC: invalid TCP record fragment length\n"); |
647 | xprt_disconnect(xprt); | 766 | xprt_disconnect(xprt); |
648 | return; | 767 | return; |
649 | } | 768 | } |
650 | dprintk("RPC: reading TCP record fragment of length %d\n", | 769 | dprintk("RPC: reading TCP record fragment of length %d\n", |
651 | xprt->tcp_reclen); | 770 | transport->tcp_reclen); |
652 | } | 771 | } |
653 | 772 | ||
654 | static void xs_tcp_check_recm(struct rpc_xprt *xprt) | 773 | static void xs_tcp_check_fraghdr(struct sock_xprt *transport) |
655 | { | 774 | { |
656 | dprintk("RPC: xprt = %p, tcp_copied = %lu, tcp_offset = %u, tcp_reclen = %u, tcp_flags = %lx\n", | 775 | if (transport->tcp_offset == transport->tcp_reclen) { |
657 | xprt, xprt->tcp_copied, xprt->tcp_offset, xprt->tcp_reclen, xprt->tcp_flags); | 776 | transport->tcp_flags |= TCP_RCV_COPY_FRAGHDR; |
658 | if (xprt->tcp_offset == xprt->tcp_reclen) { | 777 | transport->tcp_offset = 0; |
659 | xprt->tcp_flags |= XPRT_COPY_RECM; | 778 | if (transport->tcp_flags & TCP_RCV_LAST_FRAG) { |
660 | xprt->tcp_offset = 0; | 779 | transport->tcp_flags &= ~TCP_RCV_COPY_DATA; |
661 | if (xprt->tcp_flags & XPRT_LAST_FRAG) { | 780 | transport->tcp_flags |= TCP_RCV_COPY_XID; |
662 | xprt->tcp_flags &= ~XPRT_COPY_DATA; | 781 | transport->tcp_copied = 0; |
663 | xprt->tcp_flags |= XPRT_COPY_XID; | ||
664 | xprt->tcp_copied = 0; | ||
665 | } | 782 | } |
666 | } | 783 | } |
667 | } | 784 | } |
668 | 785 | ||
669 | static inline void xs_tcp_read_xid(struct rpc_xprt *xprt, skb_reader_t *desc) | 786 | static inline void xs_tcp_read_xid(struct sock_xprt *transport, struct xdr_skb_reader *desc) |
670 | { | 787 | { |
671 | size_t len, used; | 788 | size_t len, used; |
672 | char *p; | 789 | char *p; |
673 | 790 | ||
674 | len = sizeof(xprt->tcp_xid) - xprt->tcp_offset; | 791 | len = sizeof(transport->tcp_xid) - transport->tcp_offset; |
675 | dprintk("RPC: reading XID (%Zu bytes)\n", len); | 792 | dprintk("RPC: reading XID (%Zu bytes)\n", len); |
676 | p = ((char *) &xprt->tcp_xid) + xprt->tcp_offset; | 793 | p = ((char *) &transport->tcp_xid) + transport->tcp_offset; |
677 | used = xs_tcp_copy_data(desc, p, len); | 794 | used = xdr_skb_read_bits(desc, p, len); |
678 | xprt->tcp_offset += used; | 795 | transport->tcp_offset += used; |
679 | if (used != len) | 796 | if (used != len) |
680 | return; | 797 | return; |
681 | xprt->tcp_flags &= ~XPRT_COPY_XID; | 798 | transport->tcp_flags &= ~TCP_RCV_COPY_XID; |
682 | xprt->tcp_flags |= XPRT_COPY_DATA; | 799 | transport->tcp_flags |= TCP_RCV_COPY_DATA; |
683 | xprt->tcp_copied = 4; | 800 | transport->tcp_copied = 4; |
684 | dprintk("RPC: reading reply for XID %08x\n", | 801 | dprintk("RPC: reading reply for XID %08x\n", |
685 | ntohl(xprt->tcp_xid)); | 802 | ntohl(transport->tcp_xid)); |
686 | xs_tcp_check_recm(xprt); | 803 | xs_tcp_check_fraghdr(transport); |
687 | } | 804 | } |
688 | 805 | ||
689 | static inline void xs_tcp_read_request(struct rpc_xprt *xprt, skb_reader_t *desc) | 806 | static inline void xs_tcp_read_request(struct rpc_xprt *xprt, struct xdr_skb_reader *desc) |
690 | { | 807 | { |
808 | struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt); | ||
691 | struct rpc_rqst *req; | 809 | struct rpc_rqst *req; |
692 | struct xdr_buf *rcvbuf; | 810 | struct xdr_buf *rcvbuf; |
693 | size_t len; | 811 | size_t len; |
@@ -695,116 +813,118 @@ static inline void xs_tcp_read_request(struct rpc_xprt *xprt, skb_reader_t *desc | |||
695 | 813 | ||
696 | /* Find and lock the request corresponding to this xid */ | 814 | /* Find and lock the request corresponding to this xid */ |
697 | spin_lock(&xprt->transport_lock); | 815 | spin_lock(&xprt->transport_lock); |
698 | req = xprt_lookup_rqst(xprt, xprt->tcp_xid); | 816 | req = xprt_lookup_rqst(xprt, transport->tcp_xid); |
699 | if (!req) { | 817 | if (!req) { |
700 | xprt->tcp_flags &= ~XPRT_COPY_DATA; | 818 | transport->tcp_flags &= ~TCP_RCV_COPY_DATA; |
701 | dprintk("RPC: XID %08x request not found!\n", | 819 | dprintk("RPC: XID %08x request not found!\n", |
702 | ntohl(xprt->tcp_xid)); | 820 | ntohl(transport->tcp_xid)); |
703 | spin_unlock(&xprt->transport_lock); | 821 | spin_unlock(&xprt->transport_lock); |
704 | return; | 822 | return; |
705 | } | 823 | } |
706 | 824 | ||
707 | rcvbuf = &req->rq_private_buf; | 825 | rcvbuf = &req->rq_private_buf; |
708 | len = desc->count; | 826 | len = desc->count; |
709 | if (len > xprt->tcp_reclen - xprt->tcp_offset) { | 827 | if (len > transport->tcp_reclen - transport->tcp_offset) { |
710 | skb_reader_t my_desc; | 828 | struct xdr_skb_reader my_desc; |
711 | 829 | ||
712 | len = xprt->tcp_reclen - xprt->tcp_offset; | 830 | len = transport->tcp_reclen - transport->tcp_offset; |
713 | memcpy(&my_desc, desc, sizeof(my_desc)); | 831 | memcpy(&my_desc, desc, sizeof(my_desc)); |
714 | my_desc.count = len; | 832 | my_desc.count = len; |
715 | r = xdr_partial_copy_from_skb(rcvbuf, xprt->tcp_copied, | 833 | r = xdr_partial_copy_from_skb(rcvbuf, transport->tcp_copied, |
716 | &my_desc, xs_tcp_copy_data); | 834 | &my_desc, xdr_skb_read_bits); |
717 | desc->count -= r; | 835 | desc->count -= r; |
718 | desc->offset += r; | 836 | desc->offset += r; |
719 | } else | 837 | } else |
720 | r = xdr_partial_copy_from_skb(rcvbuf, xprt->tcp_copied, | 838 | r = xdr_partial_copy_from_skb(rcvbuf, transport->tcp_copied, |
721 | desc, xs_tcp_copy_data); | 839 | desc, xdr_skb_read_bits); |
722 | 840 | ||
723 | if (r > 0) { | 841 | if (r > 0) { |
724 | xprt->tcp_copied += r; | 842 | transport->tcp_copied += r; |
725 | xprt->tcp_offset += r; | 843 | transport->tcp_offset += r; |
726 | } | 844 | } |
727 | if (r != len) { | 845 | if (r != len) { |
728 | /* Error when copying to the receive buffer, | 846 | /* Error when copying to the receive buffer, |
729 | * usually because we weren't able to allocate | 847 | * usually because we weren't able to allocate |
730 | * additional buffer pages. All we can do now | 848 | * additional buffer pages. All we can do now |
731 | * is turn off XPRT_COPY_DATA, so the request | 849 | * is turn off TCP_RCV_COPY_DATA, so the request |
732 | * will not receive any additional updates, | 850 | * will not receive any additional updates, |
733 | * and time out. | 851 | * and time out. |
734 | * Any remaining data from this record will | 852 | * Any remaining data from this record will |
735 | * be discarded. | 853 | * be discarded. |
736 | */ | 854 | */ |
737 | xprt->tcp_flags &= ~XPRT_COPY_DATA; | 855 | transport->tcp_flags &= ~TCP_RCV_COPY_DATA; |
738 | dprintk("RPC: XID %08x truncated request\n", | 856 | dprintk("RPC: XID %08x truncated request\n", |
739 | ntohl(xprt->tcp_xid)); | 857 | ntohl(transport->tcp_xid)); |
740 | dprintk("RPC: xprt = %p, tcp_copied = %lu, tcp_offset = %u, tcp_reclen = %u\n", | 858 | dprintk("RPC: xprt = %p, tcp_copied = %lu, tcp_offset = %u, tcp_reclen = %u\n", |
741 | xprt, xprt->tcp_copied, xprt->tcp_offset, xprt->tcp_reclen); | 859 | xprt, transport->tcp_copied, transport->tcp_offset, |
860 | transport->tcp_reclen); | ||
742 | goto out; | 861 | goto out; |
743 | } | 862 | } |
744 | 863 | ||
745 | dprintk("RPC: XID %08x read %Zd bytes\n", | 864 | dprintk("RPC: XID %08x read %Zd bytes\n", |
746 | ntohl(xprt->tcp_xid), r); | 865 | ntohl(transport->tcp_xid), r); |
747 | dprintk("RPC: xprt = %p, tcp_copied = %lu, tcp_offset = %u, tcp_reclen = %u\n", | 866 | dprintk("RPC: xprt = %p, tcp_copied = %lu, tcp_offset = %u, tcp_reclen = %u\n", |
748 | xprt, xprt->tcp_copied, xprt->tcp_offset, xprt->tcp_reclen); | 867 | xprt, transport->tcp_copied, transport->tcp_offset, |
749 | 868 | transport->tcp_reclen); | |
750 | if (xprt->tcp_copied == req->rq_private_buf.buflen) | 869 | |
751 | xprt->tcp_flags &= ~XPRT_COPY_DATA; | 870 | if (transport->tcp_copied == req->rq_private_buf.buflen) |
752 | else if (xprt->tcp_offset == xprt->tcp_reclen) { | 871 | transport->tcp_flags &= ~TCP_RCV_COPY_DATA; |
753 | if (xprt->tcp_flags & XPRT_LAST_FRAG) | 872 | else if (transport->tcp_offset == transport->tcp_reclen) { |
754 | xprt->tcp_flags &= ~XPRT_COPY_DATA; | 873 | if (transport->tcp_flags & TCP_RCV_LAST_FRAG) |
874 | transport->tcp_flags &= ~TCP_RCV_COPY_DATA; | ||
755 | } | 875 | } |
756 | 876 | ||
757 | out: | 877 | out: |
758 | if (!(xprt->tcp_flags & XPRT_COPY_DATA)) | 878 | if (!(transport->tcp_flags & TCP_RCV_COPY_DATA)) |
759 | xprt_complete_rqst(req->rq_task, xprt->tcp_copied); | 879 | xprt_complete_rqst(req->rq_task, transport->tcp_copied); |
760 | spin_unlock(&xprt->transport_lock); | 880 | spin_unlock(&xprt->transport_lock); |
761 | xs_tcp_check_recm(xprt); | 881 | xs_tcp_check_fraghdr(transport); |
762 | } | 882 | } |
763 | 883 | ||
764 | static inline void xs_tcp_read_discard(struct rpc_xprt *xprt, skb_reader_t *desc) | 884 | static inline void xs_tcp_read_discard(struct sock_xprt *transport, struct xdr_skb_reader *desc) |
765 | { | 885 | { |
766 | size_t len; | 886 | size_t len; |
767 | 887 | ||
768 | len = xprt->tcp_reclen - xprt->tcp_offset; | 888 | len = transport->tcp_reclen - transport->tcp_offset; |
769 | if (len > desc->count) | 889 | if (len > desc->count) |
770 | len = desc->count; | 890 | len = desc->count; |
771 | desc->count -= len; | 891 | desc->count -= len; |
772 | desc->offset += len; | 892 | desc->offset += len; |
773 | xprt->tcp_offset += len; | 893 | transport->tcp_offset += len; |
774 | dprintk("RPC: discarded %Zu bytes\n", len); | 894 | dprintk("RPC: discarded %Zu bytes\n", len); |
775 | xs_tcp_check_recm(xprt); | 895 | xs_tcp_check_fraghdr(transport); |
776 | } | 896 | } |
777 | 897 | ||
778 | static int xs_tcp_data_recv(read_descriptor_t *rd_desc, struct sk_buff *skb, unsigned int offset, size_t len) | 898 | static int xs_tcp_data_recv(read_descriptor_t *rd_desc, struct sk_buff *skb, unsigned int offset, size_t len) |
779 | { | 899 | { |
780 | struct rpc_xprt *xprt = rd_desc->arg.data; | 900 | struct rpc_xprt *xprt = rd_desc->arg.data; |
781 | skb_reader_t desc = { | 901 | struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt); |
902 | struct xdr_skb_reader desc = { | ||
782 | .skb = skb, | 903 | .skb = skb, |
783 | .offset = offset, | 904 | .offset = offset, |
784 | .count = len, | 905 | .count = len, |
785 | .csum = 0 | ||
786 | }; | 906 | }; |
787 | 907 | ||
788 | dprintk("RPC: xs_tcp_data_recv started\n"); | 908 | dprintk("RPC: xs_tcp_data_recv started\n"); |
789 | do { | 909 | do { |
790 | /* Read in a new fragment marker if necessary */ | 910 | /* Read in a new fragment marker if necessary */ |
791 | /* Can we ever really expect to get completely empty fragments? */ | 911 | /* Can we ever really expect to get completely empty fragments? */ |
792 | if (xprt->tcp_flags & XPRT_COPY_RECM) { | 912 | if (transport->tcp_flags & TCP_RCV_COPY_FRAGHDR) { |
793 | xs_tcp_read_fraghdr(xprt, &desc); | 913 | xs_tcp_read_fraghdr(xprt, &desc); |
794 | continue; | 914 | continue; |
795 | } | 915 | } |
796 | /* Read in the xid if necessary */ | 916 | /* Read in the xid if necessary */ |
797 | if (xprt->tcp_flags & XPRT_COPY_XID) { | 917 | if (transport->tcp_flags & TCP_RCV_COPY_XID) { |
798 | xs_tcp_read_xid(xprt, &desc); | 918 | xs_tcp_read_xid(transport, &desc); |
799 | continue; | 919 | continue; |
800 | } | 920 | } |
801 | /* Read in the request data */ | 921 | /* Read in the request data */ |
802 | if (xprt->tcp_flags & XPRT_COPY_DATA) { | 922 | if (transport->tcp_flags & TCP_RCV_COPY_DATA) { |
803 | xs_tcp_read_request(xprt, &desc); | 923 | xs_tcp_read_request(xprt, &desc); |
804 | continue; | 924 | continue; |
805 | } | 925 | } |
806 | /* Skip over any trailing bytes on short reads */ | 926 | /* Skip over any trailing bytes on short reads */ |
807 | xs_tcp_read_discard(xprt, &desc); | 927 | xs_tcp_read_discard(transport, &desc); |
808 | } while (desc.count); | 928 | } while (desc.count); |
809 | dprintk("RPC: xs_tcp_data_recv done\n"); | 929 | dprintk("RPC: xs_tcp_data_recv done\n"); |
810 | return len - desc.count; | 930 | return len - desc.count; |
@@ -858,11 +978,16 @@ static void xs_tcp_state_change(struct sock *sk) | |||
858 | case TCP_ESTABLISHED: | 978 | case TCP_ESTABLISHED: |
859 | spin_lock_bh(&xprt->transport_lock); | 979 | spin_lock_bh(&xprt->transport_lock); |
860 | if (!xprt_test_and_set_connected(xprt)) { | 980 | if (!xprt_test_and_set_connected(xprt)) { |
981 | struct sock_xprt *transport = container_of(xprt, | ||
982 | struct sock_xprt, xprt); | ||
983 | |||
861 | /* Reset TCP record info */ | 984 | /* Reset TCP record info */ |
862 | xprt->tcp_offset = 0; | 985 | transport->tcp_offset = 0; |
863 | xprt->tcp_reclen = 0; | 986 | transport->tcp_reclen = 0; |
864 | xprt->tcp_copied = 0; | 987 | transport->tcp_copied = 0; |
865 | xprt->tcp_flags = XPRT_COPY_RECM | XPRT_COPY_XID; | 988 | transport->tcp_flags = |
989 | TCP_RCV_COPY_FRAGHDR | TCP_RCV_COPY_XID; | ||
990 | |||
866 | xprt->reestablish_timeout = XS_TCP_INIT_REEST_TO; | 991 | xprt->reestablish_timeout = XS_TCP_INIT_REEST_TO; |
867 | xprt_wake_pending_tasks(xprt, 0); | 992 | xprt_wake_pending_tasks(xprt, 0); |
868 | } | 993 | } |
@@ -951,15 +1076,16 @@ static void xs_tcp_write_space(struct sock *sk) | |||
951 | 1076 | ||
952 | static void xs_udp_do_set_buffer_size(struct rpc_xprt *xprt) | 1077 | static void xs_udp_do_set_buffer_size(struct rpc_xprt *xprt) |
953 | { | 1078 | { |
954 | struct sock *sk = xprt->inet; | 1079 | struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt); |
1080 | struct sock *sk = transport->inet; | ||
955 | 1081 | ||
956 | if (xprt->rcvsize) { | 1082 | if (transport->rcvsize) { |
957 | sk->sk_userlocks |= SOCK_RCVBUF_LOCK; | 1083 | sk->sk_userlocks |= SOCK_RCVBUF_LOCK; |
958 | sk->sk_rcvbuf = xprt->rcvsize * xprt->max_reqs * 2; | 1084 | sk->sk_rcvbuf = transport->rcvsize * xprt->max_reqs * 2; |
959 | } | 1085 | } |
960 | if (xprt->sndsize) { | 1086 | if (transport->sndsize) { |
961 | sk->sk_userlocks |= SOCK_SNDBUF_LOCK; | 1087 | sk->sk_userlocks |= SOCK_SNDBUF_LOCK; |
962 | sk->sk_sndbuf = xprt->sndsize * xprt->max_reqs * 2; | 1088 | sk->sk_sndbuf = transport->sndsize * xprt->max_reqs * 2; |
963 | sk->sk_write_space(sk); | 1089 | sk->sk_write_space(sk); |
964 | } | 1090 | } |
965 | } | 1091 | } |
@@ -974,12 +1100,14 @@ static void xs_udp_do_set_buffer_size(struct rpc_xprt *xprt) | |||
974 | */ | 1100 | */ |
975 | static void xs_udp_set_buffer_size(struct rpc_xprt *xprt, size_t sndsize, size_t rcvsize) | 1101 | static void xs_udp_set_buffer_size(struct rpc_xprt *xprt, size_t sndsize, size_t rcvsize) |
976 | { | 1102 | { |
977 | xprt->sndsize = 0; | 1103 | struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt); |
1104 | |||
1105 | transport->sndsize = 0; | ||
978 | if (sndsize) | 1106 | if (sndsize) |
979 | xprt->sndsize = sndsize + 1024; | 1107 | transport->sndsize = sndsize + 1024; |
980 | xprt->rcvsize = 0; | 1108 | transport->rcvsize = 0; |
981 | if (rcvsize) | 1109 | if (rcvsize) |
982 | xprt->rcvsize = rcvsize + 1024; | 1110 | transport->rcvsize = rcvsize + 1024; |
983 | 1111 | ||
984 | xs_udp_do_set_buffer_size(xprt); | 1112 | xs_udp_do_set_buffer_size(xprt); |
985 | } | 1113 | } |
@@ -1003,19 +1131,6 @@ static unsigned short xs_get_random_port(void) | |||
1003 | } | 1131 | } |
1004 | 1132 | ||
1005 | /** | 1133 | /** |
1006 | * xs_print_peer_address - format an IPv4 address for printing | ||
1007 | * @xprt: generic transport | ||
1008 | * @format: flags field indicating which parts of the address to render | ||
1009 | */ | ||
1010 | static char *xs_print_peer_address(struct rpc_xprt *xprt, enum rpc_display_format_t format) | ||
1011 | { | ||
1012 | if (xprt->address_strings[format] != NULL) | ||
1013 | return xprt->address_strings[format]; | ||
1014 | else | ||
1015 | return "unprintable"; | ||
1016 | } | ||
1017 | |||
1018 | /** | ||
1019 | * xs_set_port - reset the port number in the remote endpoint address | 1134 | * xs_set_port - reset the port number in the remote endpoint address |
1020 | * @xprt: generic transport | 1135 | * @xprt: generic transport |
1021 | * @port: new port number | 1136 | * @port: new port number |
@@ -1030,20 +1145,20 @@ static void xs_set_port(struct rpc_xprt *xprt, unsigned short port) | |||
1030 | sap->sin_port = htons(port); | 1145 | sap->sin_port = htons(port); |
1031 | } | 1146 | } |
1032 | 1147 | ||
1033 | static int xs_bindresvport(struct rpc_xprt *xprt, struct socket *sock) | 1148 | static int xs_bindresvport(struct sock_xprt *transport, struct socket *sock) |
1034 | { | 1149 | { |
1035 | struct sockaddr_in myaddr = { | 1150 | struct sockaddr_in myaddr = { |
1036 | .sin_family = AF_INET, | 1151 | .sin_family = AF_INET, |
1037 | }; | 1152 | }; |
1038 | int err; | 1153 | int err; |
1039 | unsigned short port = xprt->port; | 1154 | unsigned short port = transport->port; |
1040 | 1155 | ||
1041 | do { | 1156 | do { |
1042 | myaddr.sin_port = htons(port); | 1157 | myaddr.sin_port = htons(port); |
1043 | err = kernel_bind(sock, (struct sockaddr *) &myaddr, | 1158 | err = kernel_bind(sock, (struct sockaddr *) &myaddr, |
1044 | sizeof(myaddr)); | 1159 | sizeof(myaddr)); |
1045 | if (err == 0) { | 1160 | if (err == 0) { |
1046 | xprt->port = port; | 1161 | transport->port = port; |
1047 | dprintk("RPC: xs_bindresvport bound to port %u\n", | 1162 | dprintk("RPC: xs_bindresvport bound to port %u\n", |
1048 | port); | 1163 | port); |
1049 | return 0; | 1164 | return 0; |
@@ -1052,7 +1167,7 @@ static int xs_bindresvport(struct rpc_xprt *xprt, struct socket *sock) | |||
1052 | port = xprt_max_resvport; | 1167 | port = xprt_max_resvport; |
1053 | else | 1168 | else |
1054 | port--; | 1169 | port--; |
1055 | } while (err == -EADDRINUSE && port != xprt->port); | 1170 | } while (err == -EADDRINUSE && port != transport->port); |
1056 | 1171 | ||
1057 | dprintk("RPC: can't bind to reserved port (%d).\n", -err); | 1172 | dprintk("RPC: can't bind to reserved port (%d).\n", -err); |
1058 | return err; | 1173 | return err; |
@@ -1066,9 +1181,10 @@ static int xs_bindresvport(struct rpc_xprt *xprt, struct socket *sock) | |||
1066 | */ | 1181 | */ |
1067 | static void xs_udp_connect_worker(struct work_struct *work) | 1182 | static void xs_udp_connect_worker(struct work_struct *work) |
1068 | { | 1183 | { |
1069 | struct rpc_xprt *xprt = | 1184 | struct sock_xprt *transport = |
1070 | container_of(work, struct rpc_xprt, connect_worker.work); | 1185 | container_of(work, struct sock_xprt, connect_worker.work); |
1071 | struct socket *sock = xprt->sock; | 1186 | struct rpc_xprt *xprt = &transport->xprt; |
1187 | struct socket *sock = transport->sock; | ||
1072 | int err, status = -EIO; | 1188 | int err, status = -EIO; |
1073 | 1189 | ||
1074 | if (xprt->shutdown || !xprt_bound(xprt)) | 1190 | if (xprt->shutdown || !xprt_bound(xprt)) |
@@ -1082,23 +1198,23 @@ static void xs_udp_connect_worker(struct work_struct *work) | |||
1082 | goto out; | 1198 | goto out; |
1083 | } | 1199 | } |
1084 | 1200 | ||
1085 | if (xprt->resvport && xs_bindresvport(xprt, sock) < 0) { | 1201 | if (xprt->resvport && xs_bindresvport(transport, sock) < 0) { |
1086 | sock_release(sock); | 1202 | sock_release(sock); |
1087 | goto out; | 1203 | goto out; |
1088 | } | 1204 | } |
1089 | 1205 | ||
1090 | dprintk("RPC: worker connecting xprt %p to address: %s\n", | 1206 | dprintk("RPC: worker connecting xprt %p to address: %s\n", |
1091 | xprt, xs_print_peer_address(xprt, RPC_DISPLAY_ALL)); | 1207 | xprt, xprt->address_strings[RPC_DISPLAY_ALL]); |
1092 | 1208 | ||
1093 | if (!xprt->inet) { | 1209 | if (!transport->inet) { |
1094 | struct sock *sk = sock->sk; | 1210 | struct sock *sk = sock->sk; |
1095 | 1211 | ||
1096 | write_lock_bh(&sk->sk_callback_lock); | 1212 | write_lock_bh(&sk->sk_callback_lock); |
1097 | 1213 | ||
1098 | sk->sk_user_data = xprt; | 1214 | sk->sk_user_data = xprt; |
1099 | xprt->old_data_ready = sk->sk_data_ready; | 1215 | transport->old_data_ready = sk->sk_data_ready; |
1100 | xprt->old_state_change = sk->sk_state_change; | 1216 | transport->old_state_change = sk->sk_state_change; |
1101 | xprt->old_write_space = sk->sk_write_space; | 1217 | transport->old_write_space = sk->sk_write_space; |
1102 | sk->sk_data_ready = xs_udp_data_ready; | 1218 | sk->sk_data_ready = xs_udp_data_ready; |
1103 | sk->sk_write_space = xs_udp_write_space; | 1219 | sk->sk_write_space = xs_udp_write_space; |
1104 | sk->sk_no_check = UDP_CSUM_NORCV; | 1220 | sk->sk_no_check = UDP_CSUM_NORCV; |
@@ -1107,8 +1223,8 @@ static void xs_udp_connect_worker(struct work_struct *work) | |||
1107 | xprt_set_connected(xprt); | 1223 | xprt_set_connected(xprt); |
1108 | 1224 | ||
1109 | /* Reset to new socket */ | 1225 | /* Reset to new socket */ |
1110 | xprt->sock = sock; | 1226 | transport->sock = sock; |
1111 | xprt->inet = sk; | 1227 | transport->inet = sk; |
1112 | 1228 | ||
1113 | write_unlock_bh(&sk->sk_callback_lock); | 1229 | write_unlock_bh(&sk->sk_callback_lock); |
1114 | } | 1230 | } |
@@ -1126,7 +1242,7 @@ out: | |||
1126 | static void xs_tcp_reuse_connection(struct rpc_xprt *xprt) | 1242 | static void xs_tcp_reuse_connection(struct rpc_xprt *xprt) |
1127 | { | 1243 | { |
1128 | int result; | 1244 | int result; |
1129 | struct socket *sock = xprt->sock; | 1245 | struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt); |
1130 | struct sockaddr any; | 1246 | struct sockaddr any; |
1131 | 1247 | ||
1132 | dprintk("RPC: disconnecting xprt %p to reuse port\n", xprt); | 1248 | dprintk("RPC: disconnecting xprt %p to reuse port\n", xprt); |
@@ -1137,7 +1253,7 @@ static void xs_tcp_reuse_connection(struct rpc_xprt *xprt) | |||
1137 | */ | 1253 | */ |
1138 | memset(&any, 0, sizeof(any)); | 1254 | memset(&any, 0, sizeof(any)); |
1139 | any.sa_family = AF_UNSPEC; | 1255 | any.sa_family = AF_UNSPEC; |
1140 | result = kernel_connect(sock, &any, sizeof(any), 0); | 1256 | result = kernel_connect(transport->sock, &any, sizeof(any), 0); |
1141 | if (result) | 1257 | if (result) |
1142 | dprintk("RPC: AF_UNSPEC connect return code %d\n", | 1258 | dprintk("RPC: AF_UNSPEC connect return code %d\n", |
1143 | result); | 1259 | result); |
@@ -1151,22 +1267,23 @@ static void xs_tcp_reuse_connection(struct rpc_xprt *xprt) | |||
1151 | */ | 1267 | */ |
1152 | static void xs_tcp_connect_worker(struct work_struct *work) | 1268 | static void xs_tcp_connect_worker(struct work_struct *work) |
1153 | { | 1269 | { |
1154 | struct rpc_xprt *xprt = | 1270 | struct sock_xprt *transport = |
1155 | container_of(work, struct rpc_xprt, connect_worker.work); | 1271 | container_of(work, struct sock_xprt, connect_worker.work); |
1156 | struct socket *sock = xprt->sock; | 1272 | struct rpc_xprt *xprt = &transport->xprt; |
1273 | struct socket *sock = transport->sock; | ||
1157 | int err, status = -EIO; | 1274 | int err, status = -EIO; |
1158 | 1275 | ||
1159 | if (xprt->shutdown || !xprt_bound(xprt)) | 1276 | if (xprt->shutdown || !xprt_bound(xprt)) |
1160 | goto out; | 1277 | goto out; |
1161 | 1278 | ||
1162 | if (!xprt->sock) { | 1279 | if (!sock) { |
1163 | /* start from scratch */ | 1280 | /* start from scratch */ |
1164 | if ((err = sock_create_kern(PF_INET, SOCK_STREAM, IPPROTO_TCP, &sock)) < 0) { | 1281 | if ((err = sock_create_kern(PF_INET, SOCK_STREAM, IPPROTO_TCP, &sock)) < 0) { |
1165 | dprintk("RPC: can't create TCP transport socket (%d).\n", -err); | 1282 | dprintk("RPC: can't create TCP transport socket (%d).\n", -err); |
1166 | goto out; | 1283 | goto out; |
1167 | } | 1284 | } |
1168 | 1285 | ||
1169 | if (xprt->resvport && xs_bindresvport(xprt, sock) < 0) { | 1286 | if (xprt->resvport && xs_bindresvport(transport, sock) < 0) { |
1170 | sock_release(sock); | 1287 | sock_release(sock); |
1171 | goto out; | 1288 | goto out; |
1172 | } | 1289 | } |
@@ -1175,17 +1292,17 @@ static void xs_tcp_connect_worker(struct work_struct *work) | |||
1175 | xs_tcp_reuse_connection(xprt); | 1292 | xs_tcp_reuse_connection(xprt); |
1176 | 1293 | ||
1177 | dprintk("RPC: worker connecting xprt %p to address: %s\n", | 1294 | dprintk("RPC: worker connecting xprt %p to address: %s\n", |
1178 | xprt, xs_print_peer_address(xprt, RPC_DISPLAY_ALL)); | 1295 | xprt, xprt->address_strings[RPC_DISPLAY_ALL]); |
1179 | 1296 | ||
1180 | if (!xprt->inet) { | 1297 | if (!transport->inet) { |
1181 | struct sock *sk = sock->sk; | 1298 | struct sock *sk = sock->sk; |
1182 | 1299 | ||
1183 | write_lock_bh(&sk->sk_callback_lock); | 1300 | write_lock_bh(&sk->sk_callback_lock); |
1184 | 1301 | ||
1185 | sk->sk_user_data = xprt; | 1302 | sk->sk_user_data = xprt; |
1186 | xprt->old_data_ready = sk->sk_data_ready; | 1303 | transport->old_data_ready = sk->sk_data_ready; |
1187 | xprt->old_state_change = sk->sk_state_change; | 1304 | transport->old_state_change = sk->sk_state_change; |
1188 | xprt->old_write_space = sk->sk_write_space; | 1305 | transport->old_write_space = sk->sk_write_space; |
1189 | sk->sk_data_ready = xs_tcp_data_ready; | 1306 | sk->sk_data_ready = xs_tcp_data_ready; |
1190 | sk->sk_state_change = xs_tcp_state_change; | 1307 | sk->sk_state_change = xs_tcp_state_change; |
1191 | sk->sk_write_space = xs_tcp_write_space; | 1308 | sk->sk_write_space = xs_tcp_write_space; |
@@ -1200,8 +1317,8 @@ static void xs_tcp_connect_worker(struct work_struct *work) | |||
1200 | xprt_clear_connected(xprt); | 1317 | xprt_clear_connected(xprt); |
1201 | 1318 | ||
1202 | /* Reset to new socket */ | 1319 | /* Reset to new socket */ |
1203 | xprt->sock = sock; | 1320 | transport->sock = sock; |
1204 | xprt->inet = sk; | 1321 | transport->inet = sk; |
1205 | 1322 | ||
1206 | write_unlock_bh(&sk->sk_callback_lock); | 1323 | write_unlock_bh(&sk->sk_callback_lock); |
1207 | } | 1324 | } |
@@ -1250,21 +1367,22 @@ out_clear: | |||
1250 | static void xs_connect(struct rpc_task *task) | 1367 | static void xs_connect(struct rpc_task *task) |
1251 | { | 1368 | { |
1252 | struct rpc_xprt *xprt = task->tk_xprt; | 1369 | struct rpc_xprt *xprt = task->tk_xprt; |
1370 | struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt); | ||
1253 | 1371 | ||
1254 | if (xprt_test_and_set_connecting(xprt)) | 1372 | if (xprt_test_and_set_connecting(xprt)) |
1255 | return; | 1373 | return; |
1256 | 1374 | ||
1257 | if (xprt->sock != NULL) { | 1375 | if (transport->sock != NULL) { |
1258 | dprintk("RPC: xs_connect delayed xprt %p for %lu seconds\n", | 1376 | dprintk("RPC: xs_connect delayed xprt %p for %lu seconds\n", |
1259 | xprt, xprt->reestablish_timeout / HZ); | 1377 | xprt, xprt->reestablish_timeout / HZ); |
1260 | schedule_delayed_work(&xprt->connect_worker, | 1378 | schedule_delayed_work(&transport->connect_worker, |
1261 | xprt->reestablish_timeout); | 1379 | xprt->reestablish_timeout); |
1262 | xprt->reestablish_timeout <<= 1; | 1380 | xprt->reestablish_timeout <<= 1; |
1263 | if (xprt->reestablish_timeout > XS_TCP_MAX_REEST_TO) | 1381 | if (xprt->reestablish_timeout > XS_TCP_MAX_REEST_TO) |
1264 | xprt->reestablish_timeout = XS_TCP_MAX_REEST_TO; | 1382 | xprt->reestablish_timeout = XS_TCP_MAX_REEST_TO; |
1265 | } else { | 1383 | } else { |
1266 | dprintk("RPC: xs_connect scheduled xprt %p\n", xprt); | 1384 | dprintk("RPC: xs_connect scheduled xprt %p\n", xprt); |
1267 | schedule_delayed_work(&xprt->connect_worker, 0); | 1385 | schedule_delayed_work(&transport->connect_worker, 0); |
1268 | 1386 | ||
1269 | /* flush_scheduled_work can sleep... */ | 1387 | /* flush_scheduled_work can sleep... */ |
1270 | if (!RPC_IS_ASYNC(task)) | 1388 | if (!RPC_IS_ASYNC(task)) |
@@ -1280,8 +1398,10 @@ static void xs_connect(struct rpc_task *task) | |||
1280 | */ | 1398 | */ |
1281 | static void xs_udp_print_stats(struct rpc_xprt *xprt, struct seq_file *seq) | 1399 | static void xs_udp_print_stats(struct rpc_xprt *xprt, struct seq_file *seq) |
1282 | { | 1400 | { |
1401 | struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt); | ||
1402 | |||
1283 | seq_printf(seq, "\txprt:\tudp %u %lu %lu %lu %lu %Lu %Lu\n", | 1403 | seq_printf(seq, "\txprt:\tudp %u %lu %lu %lu %lu %Lu %Lu\n", |
1284 | xprt->port, | 1404 | transport->port, |
1285 | xprt->stat.bind_count, | 1405 | xprt->stat.bind_count, |
1286 | xprt->stat.sends, | 1406 | xprt->stat.sends, |
1287 | xprt->stat.recvs, | 1407 | xprt->stat.recvs, |
@@ -1298,13 +1418,14 @@ static void xs_udp_print_stats(struct rpc_xprt *xprt, struct seq_file *seq) | |||
1298 | */ | 1418 | */ |
1299 | static void xs_tcp_print_stats(struct rpc_xprt *xprt, struct seq_file *seq) | 1419 | static void xs_tcp_print_stats(struct rpc_xprt *xprt, struct seq_file *seq) |
1300 | { | 1420 | { |
1421 | struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt); | ||
1301 | long idle_time = 0; | 1422 | long idle_time = 0; |
1302 | 1423 | ||
1303 | if (xprt_connected(xprt)) | 1424 | if (xprt_connected(xprt)) |
1304 | idle_time = (long)(jiffies - xprt->last_used) / HZ; | 1425 | idle_time = (long)(jiffies - xprt->last_used) / HZ; |
1305 | 1426 | ||
1306 | seq_printf(seq, "\txprt:\ttcp %u %lu %lu %lu %ld %lu %lu %lu %Lu %Lu\n", | 1427 | seq_printf(seq, "\txprt:\ttcp %u %lu %lu %lu %ld %lu %lu %lu %Lu %Lu\n", |
1307 | xprt->port, | 1428 | transport->port, |
1308 | xprt->stat.bind_count, | 1429 | xprt->stat.bind_count, |
1309 | xprt->stat.connect_count, | 1430 | xprt->stat.connect_count, |
1310 | xprt->stat.connect_time, | 1431 | xprt->stat.connect_time, |
@@ -1318,7 +1439,6 @@ static void xs_tcp_print_stats(struct rpc_xprt *xprt, struct seq_file *seq) | |||
1318 | 1439 | ||
1319 | static struct rpc_xprt_ops xs_udp_ops = { | 1440 | static struct rpc_xprt_ops xs_udp_ops = { |
1320 | .set_buffer_size = xs_udp_set_buffer_size, | 1441 | .set_buffer_size = xs_udp_set_buffer_size, |
1321 | .print_addr = xs_print_peer_address, | ||
1322 | .reserve_xprt = xprt_reserve_xprt_cong, | 1442 | .reserve_xprt = xprt_reserve_xprt_cong, |
1323 | .release_xprt = xprt_release_xprt_cong, | 1443 | .release_xprt = xprt_release_xprt_cong, |
1324 | .rpcbind = rpc_getport, | 1444 | .rpcbind = rpc_getport, |
@@ -1336,7 +1456,6 @@ static struct rpc_xprt_ops xs_udp_ops = { | |||
1336 | }; | 1456 | }; |
1337 | 1457 | ||
1338 | static struct rpc_xprt_ops xs_tcp_ops = { | 1458 | static struct rpc_xprt_ops xs_tcp_ops = { |
1339 | .print_addr = xs_print_peer_address, | ||
1340 | .reserve_xprt = xprt_reserve_xprt, | 1459 | .reserve_xprt = xprt_reserve_xprt, |
1341 | .release_xprt = xs_tcp_release_xprt, | 1460 | .release_xprt = xs_tcp_release_xprt, |
1342 | .rpcbind = rpc_getport, | 1461 | .rpcbind = rpc_getport, |
@@ -1351,33 +1470,64 @@ static struct rpc_xprt_ops xs_tcp_ops = { | |||
1351 | .print_stats = xs_tcp_print_stats, | 1470 | .print_stats = xs_tcp_print_stats, |
1352 | }; | 1471 | }; |
1353 | 1472 | ||
1473 | static struct rpc_xprt *xs_setup_xprt(struct sockaddr *addr, size_t addrlen, unsigned int slot_table_size) | ||
1474 | { | ||
1475 | struct rpc_xprt *xprt; | ||
1476 | struct sock_xprt *new; | ||
1477 | |||
1478 | if (addrlen > sizeof(xprt->addr)) { | ||
1479 | dprintk("RPC: xs_setup_xprt: address too large\n"); | ||
1480 | return ERR_PTR(-EBADF); | ||
1481 | } | ||
1482 | |||
1483 | new = kzalloc(sizeof(*new), GFP_KERNEL); | ||
1484 | if (new == NULL) { | ||
1485 | dprintk("RPC: xs_setup_xprt: couldn't allocate rpc_xprt\n"); | ||
1486 | return ERR_PTR(-ENOMEM); | ||
1487 | } | ||
1488 | xprt = &new->xprt; | ||
1489 | |||
1490 | xprt->max_reqs = slot_table_size; | ||
1491 | xprt->slot = kcalloc(xprt->max_reqs, sizeof(struct rpc_rqst), GFP_KERNEL); | ||
1492 | if (xprt->slot == NULL) { | ||
1493 | kfree(xprt); | ||
1494 | dprintk("RPC: xs_setup_xprt: couldn't allocate slot table\n"); | ||
1495 | return ERR_PTR(-ENOMEM); | ||
1496 | } | ||
1497 | |||
1498 | memcpy(&xprt->addr, addr, addrlen); | ||
1499 | xprt->addrlen = addrlen; | ||
1500 | new->port = xs_get_random_port(); | ||
1501 | |||
1502 | return xprt; | ||
1503 | } | ||
1504 | |||
1354 | /** | 1505 | /** |
1355 | * xs_setup_udp - Set up transport to use a UDP socket | 1506 | * xs_setup_udp - Set up transport to use a UDP socket |
1356 | * @xprt: transport to set up | 1507 | * @addr: address of remote server |
1508 | * @addrlen: length of address in bytes | ||
1357 | * @to: timeout parameters | 1509 | * @to: timeout parameters |
1358 | * | 1510 | * |
1359 | */ | 1511 | */ |
1360 | int xs_setup_udp(struct rpc_xprt *xprt, struct rpc_timeout *to) | 1512 | struct rpc_xprt *xs_setup_udp(struct sockaddr *addr, size_t addrlen, struct rpc_timeout *to) |
1361 | { | 1513 | { |
1362 | size_t slot_table_size; | 1514 | struct rpc_xprt *xprt; |
1363 | struct sockaddr_in *addr = (struct sockaddr_in *) &xprt->addr; | 1515 | struct sock_xprt *transport; |
1364 | 1516 | ||
1365 | xprt->max_reqs = xprt_udp_slot_table_entries; | 1517 | xprt = xs_setup_xprt(addr, addrlen, xprt_udp_slot_table_entries); |
1366 | slot_table_size = xprt->max_reqs * sizeof(xprt->slot[0]); | 1518 | if (IS_ERR(xprt)) |
1367 | xprt->slot = kzalloc(slot_table_size, GFP_KERNEL); | 1519 | return xprt; |
1368 | if (xprt->slot == NULL) | 1520 | transport = container_of(xprt, struct sock_xprt, xprt); |
1369 | return -ENOMEM; | ||
1370 | 1521 | ||
1371 | if (ntohs(addr->sin_port) != 0) | 1522 | if (ntohs(((struct sockaddr_in *)addr)->sin_port) != 0) |
1372 | xprt_set_bound(xprt); | 1523 | xprt_set_bound(xprt); |
1373 | xprt->port = xs_get_random_port(); | ||
1374 | 1524 | ||
1375 | xprt->prot = IPPROTO_UDP; | 1525 | xprt->prot = IPPROTO_UDP; |
1376 | xprt->tsh_size = 0; | 1526 | xprt->tsh_size = 0; |
1377 | /* XXX: header size can vary due to auth type, IPv6, etc. */ | 1527 | /* XXX: header size can vary due to auth type, IPv6, etc. */ |
1378 | xprt->max_payload = (1U << 16) - (MAX_HEADER << 3); | 1528 | xprt->max_payload = (1U << 16) - (MAX_HEADER << 3); |
1379 | 1529 | ||
1380 | INIT_DELAYED_WORK(&xprt->connect_worker, xs_udp_connect_worker); | 1530 | INIT_DELAYED_WORK(&transport->connect_worker, xs_udp_connect_worker); |
1381 | xprt->bind_timeout = XS_BIND_TO; | 1531 | xprt->bind_timeout = XS_BIND_TO; |
1382 | xprt->connect_timeout = XS_UDP_CONN_TO; | 1532 | xprt->connect_timeout = XS_UDP_CONN_TO; |
1383 | xprt->reestablish_timeout = XS_UDP_REEST_TO; | 1533 | xprt->reestablish_timeout = XS_UDP_REEST_TO; |
@@ -1392,37 +1542,36 @@ int xs_setup_udp(struct rpc_xprt *xprt, struct rpc_timeout *to) | |||
1392 | 1542 | ||
1393 | xs_format_peer_addresses(xprt); | 1543 | xs_format_peer_addresses(xprt); |
1394 | dprintk("RPC: set up transport to address %s\n", | 1544 | dprintk("RPC: set up transport to address %s\n", |
1395 | xs_print_peer_address(xprt, RPC_DISPLAY_ALL)); | 1545 | xprt->address_strings[RPC_DISPLAY_ALL]); |
1396 | 1546 | ||
1397 | return 0; | 1547 | return xprt; |
1398 | } | 1548 | } |
1399 | 1549 | ||
1400 | /** | 1550 | /** |
1401 | * xs_setup_tcp - Set up transport to use a TCP socket | 1551 | * xs_setup_tcp - Set up transport to use a TCP socket |
1402 | * @xprt: transport to set up | 1552 | * @addr: address of remote server |
1553 | * @addrlen: length of address in bytes | ||
1403 | * @to: timeout parameters | 1554 | * @to: timeout parameters |
1404 | * | 1555 | * |
1405 | */ | 1556 | */ |
1406 | int xs_setup_tcp(struct rpc_xprt *xprt, struct rpc_timeout *to) | 1557 | struct rpc_xprt *xs_setup_tcp(struct sockaddr *addr, size_t addrlen, struct rpc_timeout *to) |
1407 | { | 1558 | { |
1408 | size_t slot_table_size; | 1559 | struct rpc_xprt *xprt; |
1409 | struct sockaddr_in *addr = (struct sockaddr_in *) &xprt->addr; | 1560 | struct sock_xprt *transport; |
1410 | 1561 | ||
1411 | xprt->max_reqs = xprt_tcp_slot_table_entries; | 1562 | xprt = xs_setup_xprt(addr, addrlen, xprt_tcp_slot_table_entries); |
1412 | slot_table_size = xprt->max_reqs * sizeof(xprt->slot[0]); | 1563 | if (IS_ERR(xprt)) |
1413 | xprt->slot = kzalloc(slot_table_size, GFP_KERNEL); | 1564 | return xprt; |
1414 | if (xprt->slot == NULL) | 1565 | transport = container_of(xprt, struct sock_xprt, xprt); |
1415 | return -ENOMEM; | ||
1416 | 1566 | ||
1417 | if (ntohs(addr->sin_port) != 0) | 1567 | if (ntohs(((struct sockaddr_in *)addr)->sin_port) != 0) |
1418 | xprt_set_bound(xprt); | 1568 | xprt_set_bound(xprt); |
1419 | xprt->port = xs_get_random_port(); | ||
1420 | 1569 | ||
1421 | xprt->prot = IPPROTO_TCP; | 1570 | xprt->prot = IPPROTO_TCP; |
1422 | xprt->tsh_size = sizeof(rpc_fraghdr) / sizeof(u32); | 1571 | xprt->tsh_size = sizeof(rpc_fraghdr) / sizeof(u32); |
1423 | xprt->max_payload = RPC_MAX_FRAGMENT_SIZE; | 1572 | xprt->max_payload = RPC_MAX_FRAGMENT_SIZE; |
1424 | 1573 | ||
1425 | INIT_DELAYED_WORK(&xprt->connect_worker, xs_tcp_connect_worker); | 1574 | INIT_DELAYED_WORK(&transport->connect_worker, xs_tcp_connect_worker); |
1426 | xprt->bind_timeout = XS_BIND_TO; | 1575 | xprt->bind_timeout = XS_BIND_TO; |
1427 | xprt->connect_timeout = XS_TCP_CONN_TO; | 1576 | xprt->connect_timeout = XS_TCP_CONN_TO; |
1428 | xprt->reestablish_timeout = XS_TCP_INIT_REEST_TO; | 1577 | xprt->reestablish_timeout = XS_TCP_INIT_REEST_TO; |
@@ -1437,7 +1586,40 @@ int xs_setup_tcp(struct rpc_xprt *xprt, struct rpc_timeout *to) | |||
1437 | 1586 | ||
1438 | xs_format_peer_addresses(xprt); | 1587 | xs_format_peer_addresses(xprt); |
1439 | dprintk("RPC: set up transport to address %s\n", | 1588 | dprintk("RPC: set up transport to address %s\n", |
1440 | xs_print_peer_address(xprt, RPC_DISPLAY_ALL)); | 1589 | xprt->address_strings[RPC_DISPLAY_ALL]); |
1590 | |||
1591 | return xprt; | ||
1592 | } | ||
1593 | |||
1594 | /** | ||
1595 | * init_socket_xprt - set up xprtsock's sysctls | ||
1596 | * | ||
1597 | */ | ||
1598 | int init_socket_xprt(void) | ||
1599 | { | ||
1600 | #ifdef RPC_DEBUG | ||
1601 | if (!sunrpc_table_header) { | ||
1602 | sunrpc_table_header = register_sysctl_table(sunrpc_table, 1); | ||
1603 | #ifdef CONFIG_PROC_FS | ||
1604 | if (sunrpc_table[0].de) | ||
1605 | sunrpc_table[0].de->owner = THIS_MODULE; | ||
1606 | #endif | ||
1607 | } | ||
1608 | #endif | ||
1441 | 1609 | ||
1442 | return 0; | 1610 | return 0; |
1443 | } | 1611 | } |
1612 | |||
1613 | /** | ||
1614 | * cleanup_socket_xprt - remove xprtsock's sysctls | ||
1615 | * | ||
1616 | */ | ||
1617 | void cleanup_socket_xprt(void) | ||
1618 | { | ||
1619 | #ifdef RPC_DEBUG | ||
1620 | if (sunrpc_table_header) { | ||
1621 | unregister_sysctl_table(sunrpc_table_header); | ||
1622 | sunrpc_table_header = NULL; | ||
1623 | } | ||
1624 | #endif | ||
1625 | } | ||