diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2015-02-11 20:14:54 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2015-02-11 20:14:54 -0500 |
commit | 6f83e5bd3e96228ee0caff0b103addb5f4e95459 (patch) | |
tree | bf27fcc754ea8c5287088df2803edbfb3c70f716 /net | |
parent | 73b4f63aebd6d57db4ca1d31fa6f8516651207b0 (diff) | |
parent | c627d31ba0696cbd829437af2be2f2dee3546b1e (diff) |
Merge tag 'nfs-for-3.20-1' of git://git.linux-nfs.org/projects/trondmy/linux-nfs
Pull NFS client updates from Trond Myklebust:
"Highlights incluse:
Features:
- Removing the forced serialisation of open()/close() calls in
NFSv4.x (x>0) makes for a significant performance improvement in
metadata intensive workloads.
- Full support for the pNFS "flexible files" layout type
- Further RPC/RDMA client improvements from Chuck
Bugfixes:
- Stable fix: NFSv4.1 backchannel calls blocking operations with !TASK_RUNNING
- Stable fix: pnfs_generic_pg_init_read/write can be called with lseg == NULL
- Stable fix: Fix an Oopsable condition when nsm_mon_unmon is called
as part of the namespace cleanup,
- Stable fix: Ensure we reference the inode for return-on-close in
delegreturn
- Use SO_REUSEPORT to ensure that NFSv3 TCP connections can rebind to
the same source address/port combination during a disconnect/
reconnect event. This is a requirement imposed by most NFSv3
server duplicate reply cache implementations.
Optimisations:
- Ask for no NFSv4.1 delegations on OPEN if using O_DIRECT
Other:
- Add Anna Schumaker as co-maintainer for the NFS client"
* tag 'nfs-for-3.20-1' of git://git.linux-nfs.org/projects/trondmy/linux-nfs: (119 commits)
SUNRPC: Cleanup to remove xs_tcp_close()
pnfs: delete an unintended goto
pnfs/flexfiles: Do not dprintk after the free
SUNRPC: Fix stupid typo in xs_sock_set_reuseport
SUNRPC: Define xs_tcp_fin_timeout only if CONFIG_SUNRPC_DEBUG
SUNRPC: Handle connection reset more efficiently.
SUNRPC: Remove the redundant XPRT_CONNECTION_CLOSE flag
SUNRPC: Make xs_tcp_close() do a socket shutdown rather than a sock_release
SUNRPC: Ensure xs_tcp_shutdown() requests a full close of the connection
SUNRPC: Cleanup to remove remaining uses of XPRT_CONNECTION_ABORT
SUNRPC: Remove TCP socket linger code
SUNRPC: Remove TCP client connection reset hack
SUNRPC: TCP/UDP always close the old socket before reconnecting
SUNRPC: Add helpers to prevent socket create from racing
SUNRPC: Ensure xs_reset_transport() resets the close connection flags
SUNRPC: Do not clear the source port in xs_reset_transport
SUNRPC: Handle EADDRINUSE on connect
SUNRPC: Set SO_REUSEPORT socket option for TCP connections
NFSv4.1: Fix pnfs_put_lseg races
NFSv4.1: pnfs_send_layoutreturn should use GFP_NOFS
...
Diffstat (limited to 'net')
-rw-r--r-- | net/sunrpc/clnt.c | 15 | ||||
-rw-r--r-- | net/sunrpc/rpcb_clnt.c | 8 | ||||
-rw-r--r-- | net/sunrpc/sched.c | 7 | ||||
-rw-r--r-- | net/sunrpc/stats.c | 26 | ||||
-rw-r--r-- | net/sunrpc/xprt.c | 38 | ||||
-rw-r--r-- | net/sunrpc/xprtrdma/rpc_rdma.c | 108 | ||||
-rw-r--r-- | net/sunrpc/xprtrdma/transport.c | 182 | ||||
-rw-r--r-- | net/sunrpc/xprtrdma/verbs.c | 411 | ||||
-rw-r--r-- | net/sunrpc/xprtrdma/xprt_rdma.h | 112 | ||||
-rw-r--r-- | net/sunrpc/xprtsock.c | 238 |
10 files changed, 628 insertions, 517 deletions
diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c index 05da12a33945..612aa73bbc60 100644 --- a/net/sunrpc/clnt.c +++ b/net/sunrpc/clnt.c | |||
@@ -286,10 +286,8 @@ static struct rpc_xprt *rpc_clnt_set_transport(struct rpc_clnt *clnt, | |||
286 | 286 | ||
287 | static void rpc_clnt_set_nodename(struct rpc_clnt *clnt, const char *nodename) | 287 | static void rpc_clnt_set_nodename(struct rpc_clnt *clnt, const char *nodename) |
288 | { | 288 | { |
289 | clnt->cl_nodelen = strlen(nodename); | 289 | clnt->cl_nodelen = strlcpy(clnt->cl_nodename, |
290 | if (clnt->cl_nodelen > UNX_MAXNODENAME) | 290 | nodename, sizeof(clnt->cl_nodename)); |
291 | clnt->cl_nodelen = UNX_MAXNODENAME; | ||
292 | memcpy(clnt->cl_nodename, nodename, clnt->cl_nodelen); | ||
293 | } | 291 | } |
294 | 292 | ||
295 | static int rpc_client_register(struct rpc_clnt *clnt, | 293 | static int rpc_client_register(struct rpc_clnt *clnt, |
@@ -365,6 +363,7 @@ static struct rpc_clnt * rpc_new_client(const struct rpc_create_args *args, | |||
365 | const struct rpc_version *version; | 363 | const struct rpc_version *version; |
366 | struct rpc_clnt *clnt = NULL; | 364 | struct rpc_clnt *clnt = NULL; |
367 | const struct rpc_timeout *timeout; | 365 | const struct rpc_timeout *timeout; |
366 | const char *nodename = args->nodename; | ||
368 | int err; | 367 | int err; |
369 | 368 | ||
370 | /* sanity check the name before trying to print it */ | 369 | /* sanity check the name before trying to print it */ |
@@ -420,8 +419,10 @@ static struct rpc_clnt * rpc_new_client(const struct rpc_create_args *args, | |||
420 | 419 | ||
421 | atomic_set(&clnt->cl_count, 1); | 420 | atomic_set(&clnt->cl_count, 1); |
422 | 421 | ||
422 | if (nodename == NULL) | ||
423 | nodename = utsname()->nodename; | ||
423 | /* save the nodename */ | 424 | /* save the nodename */ |
424 | rpc_clnt_set_nodename(clnt, utsname()->nodename); | 425 | rpc_clnt_set_nodename(clnt, nodename); |
425 | 426 | ||
426 | err = rpc_client_register(clnt, args->authflavor, args->client_name); | 427 | err = rpc_client_register(clnt, args->authflavor, args->client_name); |
427 | if (err) | 428 | if (err) |
@@ -576,6 +577,7 @@ static struct rpc_clnt *__rpc_clone_client(struct rpc_create_args *args, | |||
576 | if (xprt == NULL) | 577 | if (xprt == NULL) |
577 | goto out_err; | 578 | goto out_err; |
578 | args->servername = xprt->servername; | 579 | args->servername = xprt->servername; |
580 | args->nodename = clnt->cl_nodename; | ||
579 | 581 | ||
580 | new = rpc_new_client(args, xprt, clnt); | 582 | new = rpc_new_client(args, xprt, clnt); |
581 | if (IS_ERR(new)) { | 583 | if (IS_ERR(new)) { |
@@ -1824,6 +1826,7 @@ call_connect_status(struct rpc_task *task) | |||
1824 | case -ECONNABORTED: | 1826 | case -ECONNABORTED: |
1825 | case -ENETUNREACH: | 1827 | case -ENETUNREACH: |
1826 | case -EHOSTUNREACH: | 1828 | case -EHOSTUNREACH: |
1829 | case -EADDRINUSE: | ||
1827 | case -ENOBUFS: | 1830 | case -ENOBUFS: |
1828 | case -EPIPE: | 1831 | case -EPIPE: |
1829 | if (RPC_IS_SOFTCONN(task)) | 1832 | if (RPC_IS_SOFTCONN(task)) |
@@ -1932,6 +1935,7 @@ call_transmit_status(struct rpc_task *task) | |||
1932 | } | 1935 | } |
1933 | case -ECONNRESET: | 1936 | case -ECONNRESET: |
1934 | case -ECONNABORTED: | 1937 | case -ECONNABORTED: |
1938 | case -EADDRINUSE: | ||
1935 | case -ENOTCONN: | 1939 | case -ENOTCONN: |
1936 | case -ENOBUFS: | 1940 | case -ENOBUFS: |
1937 | case -EPIPE: | 1941 | case -EPIPE: |
@@ -2051,6 +2055,7 @@ call_status(struct rpc_task *task) | |||
2051 | case -ECONNRESET: | 2055 | case -ECONNRESET: |
2052 | case -ECONNABORTED: | 2056 | case -ECONNABORTED: |
2053 | rpc_force_rebind(clnt); | 2057 | rpc_force_rebind(clnt); |
2058 | case -EADDRINUSE: | ||
2054 | case -ENOBUFS: | 2059 | case -ENOBUFS: |
2055 | rpc_delay(task, 3*HZ); | 2060 | rpc_delay(task, 3*HZ); |
2056 | case -EPIPE: | 2061 | case -EPIPE: |
diff --git a/net/sunrpc/rpcb_clnt.c b/net/sunrpc/rpcb_clnt.c index 05202012bcfc..cf5770d8f49a 100644 --- a/net/sunrpc/rpcb_clnt.c +++ b/net/sunrpc/rpcb_clnt.c | |||
@@ -355,7 +355,8 @@ out: | |||
355 | return result; | 355 | return result; |
356 | } | 356 | } |
357 | 357 | ||
358 | static struct rpc_clnt *rpcb_create(struct net *net, const char *hostname, | 358 | static struct rpc_clnt *rpcb_create(struct net *net, const char *nodename, |
359 | const char *hostname, | ||
359 | struct sockaddr *srvaddr, size_t salen, | 360 | struct sockaddr *srvaddr, size_t salen, |
360 | int proto, u32 version) | 361 | int proto, u32 version) |
361 | { | 362 | { |
@@ -365,6 +366,7 @@ static struct rpc_clnt *rpcb_create(struct net *net, const char *hostname, | |||
365 | .address = srvaddr, | 366 | .address = srvaddr, |
366 | .addrsize = salen, | 367 | .addrsize = salen, |
367 | .servername = hostname, | 368 | .servername = hostname, |
369 | .nodename = nodename, | ||
368 | .program = &rpcb_program, | 370 | .program = &rpcb_program, |
369 | .version = version, | 371 | .version = version, |
370 | .authflavor = RPC_AUTH_UNIX, | 372 | .authflavor = RPC_AUTH_UNIX, |
@@ -740,7 +742,9 @@ void rpcb_getport_async(struct rpc_task *task) | |||
740 | dprintk("RPC: %5u %s: trying rpcbind version %u\n", | 742 | dprintk("RPC: %5u %s: trying rpcbind version %u\n", |
741 | task->tk_pid, __func__, bind_version); | 743 | task->tk_pid, __func__, bind_version); |
742 | 744 | ||
743 | rpcb_clnt = rpcb_create(xprt->xprt_net, xprt->servername, sap, salen, | 745 | rpcb_clnt = rpcb_create(xprt->xprt_net, |
746 | clnt->cl_nodename, | ||
747 | xprt->servername, sap, salen, | ||
744 | xprt->prot, bind_version); | 748 | xprt->prot, bind_version); |
745 | if (IS_ERR(rpcb_clnt)) { | 749 | if (IS_ERR(rpcb_clnt)) { |
746 | status = PTR_ERR(rpcb_clnt); | 750 | status = PTR_ERR(rpcb_clnt); |
diff --git a/net/sunrpc/sched.c b/net/sunrpc/sched.c index d20f2329eea3..b91fd9c597b4 100644 --- a/net/sunrpc/sched.c +++ b/net/sunrpc/sched.c | |||
@@ -844,10 +844,10 @@ static void rpc_async_schedule(struct work_struct *work) | |||
844 | void *rpc_malloc(struct rpc_task *task, size_t size) | 844 | void *rpc_malloc(struct rpc_task *task, size_t size) |
845 | { | 845 | { |
846 | struct rpc_buffer *buf; | 846 | struct rpc_buffer *buf; |
847 | gfp_t gfp = GFP_NOWAIT | __GFP_NOWARN; | 847 | gfp_t gfp = GFP_NOIO | __GFP_NOWARN; |
848 | 848 | ||
849 | if (RPC_IS_SWAPPER(task)) | 849 | if (RPC_IS_SWAPPER(task)) |
850 | gfp |= __GFP_MEMALLOC; | 850 | gfp = __GFP_MEMALLOC | GFP_NOWAIT | __GFP_NOWARN; |
851 | 851 | ||
852 | size += sizeof(struct rpc_buffer); | 852 | size += sizeof(struct rpc_buffer); |
853 | if (size <= RPC_BUFFER_MAXSIZE) | 853 | if (size <= RPC_BUFFER_MAXSIZE) |
@@ -1069,7 +1069,8 @@ static int rpciod_start(void) | |||
1069 | * Create the rpciod thread and wait for it to start. | 1069 | * Create the rpciod thread and wait for it to start. |
1070 | */ | 1070 | */ |
1071 | dprintk("RPC: creating workqueue rpciod\n"); | 1071 | dprintk("RPC: creating workqueue rpciod\n"); |
1072 | wq = alloc_workqueue("rpciod", WQ_MEM_RECLAIM, 1); | 1072 | /* Note: highpri because network receive is latency sensitive */ |
1073 | wq = alloc_workqueue("rpciod", WQ_MEM_RECLAIM | WQ_HIGHPRI, 0); | ||
1073 | rpciod_workqueue = wq; | 1074 | rpciod_workqueue = wq; |
1074 | return rpciod_workqueue != NULL; | 1075 | return rpciod_workqueue != NULL; |
1075 | } | 1076 | } |
diff --git a/net/sunrpc/stats.c b/net/sunrpc/stats.c index 9711a155bc50..2ecb994314c1 100644 --- a/net/sunrpc/stats.c +++ b/net/sunrpc/stats.c | |||
@@ -140,22 +140,20 @@ void rpc_free_iostats(struct rpc_iostats *stats) | |||
140 | EXPORT_SYMBOL_GPL(rpc_free_iostats); | 140 | EXPORT_SYMBOL_GPL(rpc_free_iostats); |
141 | 141 | ||
142 | /** | 142 | /** |
143 | * rpc_count_iostats - tally up per-task stats | 143 | * rpc_count_iostats_metrics - tally up per-task stats |
144 | * @task: completed rpc_task | 144 | * @task: completed rpc_task |
145 | * @stats: array of stat structures | 145 | * @op_metrics: stat structure for OP that will accumulate stats from @task |
146 | */ | 146 | */ |
147 | void rpc_count_iostats(const struct rpc_task *task, struct rpc_iostats *stats) | 147 | void rpc_count_iostats_metrics(const struct rpc_task *task, |
148 | struct rpc_iostats *op_metrics) | ||
148 | { | 149 | { |
149 | struct rpc_rqst *req = task->tk_rqstp; | 150 | struct rpc_rqst *req = task->tk_rqstp; |
150 | struct rpc_iostats *op_metrics; | ||
151 | ktime_t delta, now; | 151 | ktime_t delta, now; |
152 | 152 | ||
153 | if (!stats || !req) | 153 | if (!op_metrics || !req) |
154 | return; | 154 | return; |
155 | 155 | ||
156 | now = ktime_get(); | 156 | now = ktime_get(); |
157 | op_metrics = &stats[task->tk_msg.rpc_proc->p_statidx]; | ||
158 | |||
159 | spin_lock(&op_metrics->om_lock); | 157 | spin_lock(&op_metrics->om_lock); |
160 | 158 | ||
161 | op_metrics->om_ops++; | 159 | op_metrics->om_ops++; |
@@ -175,6 +173,20 @@ void rpc_count_iostats(const struct rpc_task *task, struct rpc_iostats *stats) | |||
175 | 173 | ||
176 | spin_unlock(&op_metrics->om_lock); | 174 | spin_unlock(&op_metrics->om_lock); |
177 | } | 175 | } |
176 | EXPORT_SYMBOL_GPL(rpc_count_iostats_metrics); | ||
177 | |||
178 | /** | ||
179 | * rpc_count_iostats - tally up per-task stats | ||
180 | * @task: completed rpc_task | ||
181 | * @stats: array of stat structures | ||
182 | * | ||
183 | * Uses the statidx from @task | ||
184 | */ | ||
185 | void rpc_count_iostats(const struct rpc_task *task, struct rpc_iostats *stats) | ||
186 | { | ||
187 | rpc_count_iostats_metrics(task, | ||
188 | &stats[task->tk_msg.rpc_proc->p_statidx]); | ||
189 | } | ||
178 | EXPORT_SYMBOL_GPL(rpc_count_iostats); | 190 | EXPORT_SYMBOL_GPL(rpc_count_iostats); |
179 | 191 | ||
180 | static void _print_name(struct seq_file *seq, unsigned int op, | 192 | static void _print_name(struct seq_file *seq, unsigned int op, |
diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c index ebbefad21a37..e3015aede0d9 100644 --- a/net/sunrpc/xprt.c +++ b/net/sunrpc/xprt.c | |||
@@ -683,13 +683,43 @@ xprt_init_autodisconnect(unsigned long data) | |||
683 | if (test_and_set_bit(XPRT_LOCKED, &xprt->state)) | 683 | if (test_and_set_bit(XPRT_LOCKED, &xprt->state)) |
684 | goto out_abort; | 684 | goto out_abort; |
685 | spin_unlock(&xprt->transport_lock); | 685 | spin_unlock(&xprt->transport_lock); |
686 | set_bit(XPRT_CONNECTION_CLOSE, &xprt->state); | ||
687 | queue_work(rpciod_workqueue, &xprt->task_cleanup); | 686 | queue_work(rpciod_workqueue, &xprt->task_cleanup); |
688 | return; | 687 | return; |
689 | out_abort: | 688 | out_abort: |
690 | spin_unlock(&xprt->transport_lock); | 689 | spin_unlock(&xprt->transport_lock); |
691 | } | 690 | } |
692 | 691 | ||
692 | bool xprt_lock_connect(struct rpc_xprt *xprt, | ||
693 | struct rpc_task *task, | ||
694 | void *cookie) | ||
695 | { | ||
696 | bool ret = false; | ||
697 | |||
698 | spin_lock_bh(&xprt->transport_lock); | ||
699 | if (!test_bit(XPRT_LOCKED, &xprt->state)) | ||
700 | goto out; | ||
701 | if (xprt->snd_task != task) | ||
702 | goto out; | ||
703 | xprt->snd_task = cookie; | ||
704 | ret = true; | ||
705 | out: | ||
706 | spin_unlock_bh(&xprt->transport_lock); | ||
707 | return ret; | ||
708 | } | ||
709 | |||
710 | void xprt_unlock_connect(struct rpc_xprt *xprt, void *cookie) | ||
711 | { | ||
712 | spin_lock_bh(&xprt->transport_lock); | ||
713 | if (xprt->snd_task != cookie) | ||
714 | goto out; | ||
715 | if (!test_bit(XPRT_LOCKED, &xprt->state)) | ||
716 | goto out; | ||
717 | xprt->snd_task =NULL; | ||
718 | xprt->ops->release_xprt(xprt, NULL); | ||
719 | out: | ||
720 | spin_unlock_bh(&xprt->transport_lock); | ||
721 | } | ||
722 | |||
693 | /** | 723 | /** |
694 | * xprt_connect - schedule a transport connect operation | 724 | * xprt_connect - schedule a transport connect operation |
695 | * @task: RPC task that is requesting the connect | 725 | * @task: RPC task that is requesting the connect |
@@ -712,9 +742,7 @@ void xprt_connect(struct rpc_task *task) | |||
712 | if (test_and_clear_bit(XPRT_CLOSE_WAIT, &xprt->state)) | 742 | if (test_and_clear_bit(XPRT_CLOSE_WAIT, &xprt->state)) |
713 | xprt->ops->close(xprt); | 743 | xprt->ops->close(xprt); |
714 | 744 | ||
715 | if (xprt_connected(xprt)) | 745 | if (!xprt_connected(xprt)) { |
716 | xprt_release_write(xprt, task); | ||
717 | else { | ||
718 | task->tk_rqstp->rq_bytes_sent = 0; | 746 | task->tk_rqstp->rq_bytes_sent = 0; |
719 | task->tk_timeout = task->tk_rqstp->rq_timeout; | 747 | task->tk_timeout = task->tk_rqstp->rq_timeout; |
720 | rpc_sleep_on(&xprt->pending, task, xprt_connect_status); | 748 | rpc_sleep_on(&xprt->pending, task, xprt_connect_status); |
@@ -726,6 +754,7 @@ void xprt_connect(struct rpc_task *task) | |||
726 | xprt->stat.connect_start = jiffies; | 754 | xprt->stat.connect_start = jiffies; |
727 | xprt->ops->connect(xprt, task); | 755 | xprt->ops->connect(xprt, task); |
728 | } | 756 | } |
757 | xprt_release_write(xprt, task); | ||
729 | } | 758 | } |
730 | 759 | ||
731 | static void xprt_connect_status(struct rpc_task *task) | 760 | static void xprt_connect_status(struct rpc_task *task) |
@@ -758,7 +787,6 @@ static void xprt_connect_status(struct rpc_task *task) | |||
758 | dprintk("RPC: %5u xprt_connect_status: error %d connecting to " | 787 | dprintk("RPC: %5u xprt_connect_status: error %d connecting to " |
759 | "server %s\n", task->tk_pid, -task->tk_status, | 788 | "server %s\n", task->tk_pid, -task->tk_status, |
760 | xprt->servername); | 789 | xprt->servername); |
761 | xprt_release_write(xprt, task); | ||
762 | task->tk_status = -EIO; | 790 | task->tk_status = -EIO; |
763 | } | 791 | } |
764 | } | 792 | } |
diff --git a/net/sunrpc/xprtrdma/rpc_rdma.c b/net/sunrpc/xprtrdma/rpc_rdma.c index df01d124936c..7e9acd9361c5 100644 --- a/net/sunrpc/xprtrdma/rpc_rdma.c +++ b/net/sunrpc/xprtrdma/rpc_rdma.c | |||
@@ -209,9 +209,11 @@ rpcrdma_create_chunks(struct rpc_rqst *rqst, struct xdr_buf *target, | |||
209 | if (cur_rchunk) { /* read */ | 209 | if (cur_rchunk) { /* read */ |
210 | cur_rchunk->rc_discrim = xdr_one; | 210 | cur_rchunk->rc_discrim = xdr_one; |
211 | /* all read chunks have the same "position" */ | 211 | /* all read chunks have the same "position" */ |
212 | cur_rchunk->rc_position = htonl(pos); | 212 | cur_rchunk->rc_position = cpu_to_be32(pos); |
213 | cur_rchunk->rc_target.rs_handle = htonl(seg->mr_rkey); | 213 | cur_rchunk->rc_target.rs_handle = |
214 | cur_rchunk->rc_target.rs_length = htonl(seg->mr_len); | 214 | cpu_to_be32(seg->mr_rkey); |
215 | cur_rchunk->rc_target.rs_length = | ||
216 | cpu_to_be32(seg->mr_len); | ||
215 | xdr_encode_hyper( | 217 | xdr_encode_hyper( |
216 | (__be32 *)&cur_rchunk->rc_target.rs_offset, | 218 | (__be32 *)&cur_rchunk->rc_target.rs_offset, |
217 | seg->mr_base); | 219 | seg->mr_base); |
@@ -222,8 +224,10 @@ rpcrdma_create_chunks(struct rpc_rqst *rqst, struct xdr_buf *target, | |||
222 | cur_rchunk++; | 224 | cur_rchunk++; |
223 | r_xprt->rx_stats.read_chunk_count++; | 225 | r_xprt->rx_stats.read_chunk_count++; |
224 | } else { /* write/reply */ | 226 | } else { /* write/reply */ |
225 | cur_wchunk->wc_target.rs_handle = htonl(seg->mr_rkey); | 227 | cur_wchunk->wc_target.rs_handle = |
226 | cur_wchunk->wc_target.rs_length = htonl(seg->mr_len); | 228 | cpu_to_be32(seg->mr_rkey); |
229 | cur_wchunk->wc_target.rs_length = | ||
230 | cpu_to_be32(seg->mr_len); | ||
227 | xdr_encode_hyper( | 231 | xdr_encode_hyper( |
228 | (__be32 *)&cur_wchunk->wc_target.rs_offset, | 232 | (__be32 *)&cur_wchunk->wc_target.rs_offset, |
229 | seg->mr_base); | 233 | seg->mr_base); |
@@ -257,7 +261,7 @@ rpcrdma_create_chunks(struct rpc_rqst *rqst, struct xdr_buf *target, | |||
257 | *iptr++ = xdr_zero; /* encode a NULL reply chunk */ | 261 | *iptr++ = xdr_zero; /* encode a NULL reply chunk */ |
258 | } else { | 262 | } else { |
259 | warray->wc_discrim = xdr_one; | 263 | warray->wc_discrim = xdr_one; |
260 | warray->wc_nchunks = htonl(nchunks); | 264 | warray->wc_nchunks = cpu_to_be32(nchunks); |
261 | iptr = (__be32 *) cur_wchunk; | 265 | iptr = (__be32 *) cur_wchunk; |
262 | if (type == rpcrdma_writech) { | 266 | if (type == rpcrdma_writech) { |
263 | *iptr++ = xdr_zero; /* finish the write chunk list */ | 267 | *iptr++ = xdr_zero; /* finish the write chunk list */ |
@@ -290,7 +294,7 @@ ssize_t | |||
290 | rpcrdma_marshal_chunks(struct rpc_rqst *rqst, ssize_t result) | 294 | rpcrdma_marshal_chunks(struct rpc_rqst *rqst, ssize_t result) |
291 | { | 295 | { |
292 | struct rpcrdma_req *req = rpcr_to_rdmar(rqst); | 296 | struct rpcrdma_req *req = rpcr_to_rdmar(rqst); |
293 | struct rpcrdma_msg *headerp = (struct rpcrdma_msg *)req->rl_base; | 297 | struct rpcrdma_msg *headerp = rdmab_to_msg(req->rl_rdmabuf); |
294 | 298 | ||
295 | if (req->rl_rtype != rpcrdma_noch) | 299 | if (req->rl_rtype != rpcrdma_noch) |
296 | result = rpcrdma_create_chunks(rqst, &rqst->rq_snd_buf, | 300 | result = rpcrdma_create_chunks(rqst, &rqst->rq_snd_buf, |
@@ -402,13 +406,12 @@ rpcrdma_marshal_req(struct rpc_rqst *rqst) | |||
402 | base = rqst->rq_svec[0].iov_base; | 406 | base = rqst->rq_svec[0].iov_base; |
403 | rpclen = rqst->rq_svec[0].iov_len; | 407 | rpclen = rqst->rq_svec[0].iov_len; |
404 | 408 | ||
405 | /* build RDMA header in private area at front */ | 409 | headerp = rdmab_to_msg(req->rl_rdmabuf); |
406 | headerp = (struct rpcrdma_msg *) req->rl_base; | 410 | /* don't byte-swap XID, it's already done in request */ |
407 | /* don't htonl XID, it's already done in request */ | ||
408 | headerp->rm_xid = rqst->rq_xid; | 411 | headerp->rm_xid = rqst->rq_xid; |
409 | headerp->rm_vers = xdr_one; | 412 | headerp->rm_vers = rpcrdma_version; |
410 | headerp->rm_credit = htonl(r_xprt->rx_buf.rb_max_requests); | 413 | headerp->rm_credit = cpu_to_be32(r_xprt->rx_buf.rb_max_requests); |
411 | headerp->rm_type = htonl(RDMA_MSG); | 414 | headerp->rm_type = rdma_msg; |
412 | 415 | ||
413 | /* | 416 | /* |
414 | * Chunks needed for results? | 417 | * Chunks needed for results? |
@@ -468,7 +471,7 @@ rpcrdma_marshal_req(struct rpc_rqst *rqst) | |||
468 | return -EIO; | 471 | return -EIO; |
469 | } | 472 | } |
470 | 473 | ||
471 | hdrlen = 28; /*sizeof *headerp;*/ | 474 | hdrlen = RPCRDMA_HDRLEN_MIN; |
472 | padlen = 0; | 475 | padlen = 0; |
473 | 476 | ||
474 | /* | 477 | /* |
@@ -482,11 +485,11 @@ rpcrdma_marshal_req(struct rpc_rqst *rqst) | |||
482 | RPCRDMA_INLINE_PAD_VALUE(rqst)); | 485 | RPCRDMA_INLINE_PAD_VALUE(rqst)); |
483 | 486 | ||
484 | if (padlen) { | 487 | if (padlen) { |
485 | headerp->rm_type = htonl(RDMA_MSGP); | 488 | headerp->rm_type = rdma_msgp; |
486 | headerp->rm_body.rm_padded.rm_align = | 489 | headerp->rm_body.rm_padded.rm_align = |
487 | htonl(RPCRDMA_INLINE_PAD_VALUE(rqst)); | 490 | cpu_to_be32(RPCRDMA_INLINE_PAD_VALUE(rqst)); |
488 | headerp->rm_body.rm_padded.rm_thresh = | 491 | headerp->rm_body.rm_padded.rm_thresh = |
489 | htonl(RPCRDMA_INLINE_PAD_THRESH); | 492 | cpu_to_be32(RPCRDMA_INLINE_PAD_THRESH); |
490 | headerp->rm_body.rm_padded.rm_pempty[0] = xdr_zero; | 493 | headerp->rm_body.rm_padded.rm_pempty[0] = xdr_zero; |
491 | headerp->rm_body.rm_padded.rm_pempty[1] = xdr_zero; | 494 | headerp->rm_body.rm_padded.rm_pempty[1] = xdr_zero; |
492 | headerp->rm_body.rm_padded.rm_pempty[2] = xdr_zero; | 495 | headerp->rm_body.rm_padded.rm_pempty[2] = xdr_zero; |
@@ -524,7 +527,7 @@ rpcrdma_marshal_req(struct rpc_rqst *rqst) | |||
524 | dprintk("RPC: %s: %s: hdrlen %zd rpclen %zd padlen %zd" | 527 | dprintk("RPC: %s: %s: hdrlen %zd rpclen %zd padlen %zd" |
525 | " headerp 0x%p base 0x%p lkey 0x%x\n", | 528 | " headerp 0x%p base 0x%p lkey 0x%x\n", |
526 | __func__, transfertypes[req->rl_wtype], hdrlen, rpclen, padlen, | 529 | __func__, transfertypes[req->rl_wtype], hdrlen, rpclen, padlen, |
527 | headerp, base, req->rl_iov.lkey); | 530 | headerp, base, rdmab_lkey(req->rl_rdmabuf)); |
528 | 531 | ||
529 | /* | 532 | /* |
530 | * initialize send_iov's - normally only two: rdma chunk header and | 533 | * initialize send_iov's - normally only two: rdma chunk header and |
@@ -533,26 +536,26 @@ rpcrdma_marshal_req(struct rpc_rqst *rqst) | |||
533 | * header and any write data. In all non-rdma cases, any following | 536 | * header and any write data. In all non-rdma cases, any following |
534 | * data has been copied into the RPC header buffer. | 537 | * data has been copied into the RPC header buffer. |
535 | */ | 538 | */ |
536 | req->rl_send_iov[0].addr = req->rl_iov.addr; | 539 | req->rl_send_iov[0].addr = rdmab_addr(req->rl_rdmabuf); |
537 | req->rl_send_iov[0].length = hdrlen; | 540 | req->rl_send_iov[0].length = hdrlen; |
538 | req->rl_send_iov[0].lkey = req->rl_iov.lkey; | 541 | req->rl_send_iov[0].lkey = rdmab_lkey(req->rl_rdmabuf); |
539 | 542 | ||
540 | req->rl_send_iov[1].addr = req->rl_iov.addr + (base - req->rl_base); | 543 | req->rl_send_iov[1].addr = rdmab_addr(req->rl_sendbuf); |
541 | req->rl_send_iov[1].length = rpclen; | 544 | req->rl_send_iov[1].length = rpclen; |
542 | req->rl_send_iov[1].lkey = req->rl_iov.lkey; | 545 | req->rl_send_iov[1].lkey = rdmab_lkey(req->rl_sendbuf); |
543 | 546 | ||
544 | req->rl_niovs = 2; | 547 | req->rl_niovs = 2; |
545 | 548 | ||
546 | if (padlen) { | 549 | if (padlen) { |
547 | struct rpcrdma_ep *ep = &r_xprt->rx_ep; | 550 | struct rpcrdma_ep *ep = &r_xprt->rx_ep; |
548 | 551 | ||
549 | req->rl_send_iov[2].addr = ep->rep_pad.addr; | 552 | req->rl_send_iov[2].addr = rdmab_addr(ep->rep_padbuf); |
550 | req->rl_send_iov[2].length = padlen; | 553 | req->rl_send_iov[2].length = padlen; |
551 | req->rl_send_iov[2].lkey = ep->rep_pad.lkey; | 554 | req->rl_send_iov[2].lkey = rdmab_lkey(ep->rep_padbuf); |
552 | 555 | ||
553 | req->rl_send_iov[3].addr = req->rl_send_iov[1].addr + rpclen; | 556 | req->rl_send_iov[3].addr = req->rl_send_iov[1].addr + rpclen; |
554 | req->rl_send_iov[3].length = rqst->rq_slen - rpclen; | 557 | req->rl_send_iov[3].length = rqst->rq_slen - rpclen; |
555 | req->rl_send_iov[3].lkey = req->rl_iov.lkey; | 558 | req->rl_send_iov[3].lkey = rdmab_lkey(req->rl_sendbuf); |
556 | 559 | ||
557 | req->rl_niovs = 4; | 560 | req->rl_niovs = 4; |
558 | } | 561 | } |
@@ -569,8 +572,9 @@ rpcrdma_count_chunks(struct rpcrdma_rep *rep, unsigned int max, int wrchunk, __b | |||
569 | { | 572 | { |
570 | unsigned int i, total_len; | 573 | unsigned int i, total_len; |
571 | struct rpcrdma_write_chunk *cur_wchunk; | 574 | struct rpcrdma_write_chunk *cur_wchunk; |
575 | char *base = (char *)rdmab_to_msg(rep->rr_rdmabuf); | ||
572 | 576 | ||
573 | i = ntohl(**iptrp); /* get array count */ | 577 | i = be32_to_cpu(**iptrp); |
574 | if (i > max) | 578 | if (i > max) |
575 | return -1; | 579 | return -1; |
576 | cur_wchunk = (struct rpcrdma_write_chunk *) (*iptrp + 1); | 580 | cur_wchunk = (struct rpcrdma_write_chunk *) (*iptrp + 1); |
@@ -582,11 +586,11 @@ rpcrdma_count_chunks(struct rpcrdma_rep *rep, unsigned int max, int wrchunk, __b | |||
582 | xdr_decode_hyper((__be32 *)&seg->rs_offset, &off); | 586 | xdr_decode_hyper((__be32 *)&seg->rs_offset, &off); |
583 | dprintk("RPC: %s: chunk %d@0x%llx:0x%x\n", | 587 | dprintk("RPC: %s: chunk %d@0x%llx:0x%x\n", |
584 | __func__, | 588 | __func__, |
585 | ntohl(seg->rs_length), | 589 | be32_to_cpu(seg->rs_length), |
586 | (unsigned long long)off, | 590 | (unsigned long long)off, |
587 | ntohl(seg->rs_handle)); | 591 | be32_to_cpu(seg->rs_handle)); |
588 | } | 592 | } |
589 | total_len += ntohl(seg->rs_length); | 593 | total_len += be32_to_cpu(seg->rs_length); |
590 | ++cur_wchunk; | 594 | ++cur_wchunk; |
591 | } | 595 | } |
592 | /* check and adjust for properly terminated write chunk */ | 596 | /* check and adjust for properly terminated write chunk */ |
@@ -596,7 +600,7 @@ rpcrdma_count_chunks(struct rpcrdma_rep *rep, unsigned int max, int wrchunk, __b | |||
596 | return -1; | 600 | return -1; |
597 | cur_wchunk = (struct rpcrdma_write_chunk *) w; | 601 | cur_wchunk = (struct rpcrdma_write_chunk *) w; |
598 | } | 602 | } |
599 | if ((char *) cur_wchunk > rep->rr_base + rep->rr_len) | 603 | if ((char *)cur_wchunk > base + rep->rr_len) |
600 | return -1; | 604 | return -1; |
601 | 605 | ||
602 | *iptrp = (__be32 *) cur_wchunk; | 606 | *iptrp = (__be32 *) cur_wchunk; |
@@ -691,7 +695,9 @@ rpcrdma_connect_worker(struct work_struct *work) | |||
691 | { | 695 | { |
692 | struct rpcrdma_ep *ep = | 696 | struct rpcrdma_ep *ep = |
693 | container_of(work, struct rpcrdma_ep, rep_connect_worker.work); | 697 | container_of(work, struct rpcrdma_ep, rep_connect_worker.work); |
694 | struct rpc_xprt *xprt = ep->rep_xprt; | 698 | struct rpcrdma_xprt *r_xprt = |
699 | container_of(ep, struct rpcrdma_xprt, rx_ep); | ||
700 | struct rpc_xprt *xprt = &r_xprt->rx_xprt; | ||
695 | 701 | ||
696 | spin_lock_bh(&xprt->transport_lock); | 702 | spin_lock_bh(&xprt->transport_lock); |
697 | if (++xprt->connect_cookie == 0) /* maintain a reserved value */ | 703 | if (++xprt->connect_cookie == 0) /* maintain a reserved value */ |
@@ -732,7 +738,7 @@ rpcrdma_reply_handler(struct rpcrdma_rep *rep) | |||
732 | struct rpc_xprt *xprt = rep->rr_xprt; | 738 | struct rpc_xprt *xprt = rep->rr_xprt; |
733 | struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(xprt); | 739 | struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(xprt); |
734 | __be32 *iptr; | 740 | __be32 *iptr; |
735 | int rdmalen, status; | 741 | int credits, rdmalen, status; |
736 | unsigned long cwnd; | 742 | unsigned long cwnd; |
737 | 743 | ||
738 | /* Check status. If bad, signal disconnect and return rep to pool */ | 744 | /* Check status. If bad, signal disconnect and return rep to pool */ |
@@ -744,14 +750,14 @@ rpcrdma_reply_handler(struct rpcrdma_rep *rep) | |||
744 | } | 750 | } |
745 | return; | 751 | return; |
746 | } | 752 | } |
747 | if (rep->rr_len < 28) { | 753 | if (rep->rr_len < RPCRDMA_HDRLEN_MIN) { |
748 | dprintk("RPC: %s: short/invalid reply\n", __func__); | 754 | dprintk("RPC: %s: short/invalid reply\n", __func__); |
749 | goto repost; | 755 | goto repost; |
750 | } | 756 | } |
751 | headerp = (struct rpcrdma_msg *) rep->rr_base; | 757 | headerp = rdmab_to_msg(rep->rr_rdmabuf); |
752 | if (headerp->rm_vers != xdr_one) { | 758 | if (headerp->rm_vers != rpcrdma_version) { |
753 | dprintk("RPC: %s: invalid version %d\n", | 759 | dprintk("RPC: %s: invalid version %d\n", |
754 | __func__, ntohl(headerp->rm_vers)); | 760 | __func__, be32_to_cpu(headerp->rm_vers)); |
755 | goto repost; | 761 | goto repost; |
756 | } | 762 | } |
757 | 763 | ||
@@ -762,7 +768,8 @@ rpcrdma_reply_handler(struct rpcrdma_rep *rep) | |||
762 | spin_unlock(&xprt->transport_lock); | 768 | spin_unlock(&xprt->transport_lock); |
763 | dprintk("RPC: %s: reply 0x%p failed " | 769 | dprintk("RPC: %s: reply 0x%p failed " |
764 | "to match any request xid 0x%08x len %d\n", | 770 | "to match any request xid 0x%08x len %d\n", |
765 | __func__, rep, headerp->rm_xid, rep->rr_len); | 771 | __func__, rep, be32_to_cpu(headerp->rm_xid), |
772 | rep->rr_len); | ||
766 | repost: | 773 | repost: |
767 | r_xprt->rx_stats.bad_reply_count++; | 774 | r_xprt->rx_stats.bad_reply_count++; |
768 | rep->rr_func = rpcrdma_reply_handler; | 775 | rep->rr_func = rpcrdma_reply_handler; |
@@ -778,13 +785,14 @@ repost: | |||
778 | spin_unlock(&xprt->transport_lock); | 785 | spin_unlock(&xprt->transport_lock); |
779 | dprintk("RPC: %s: duplicate reply 0x%p to RPC " | 786 | dprintk("RPC: %s: duplicate reply 0x%p to RPC " |
780 | "request 0x%p: xid 0x%08x\n", __func__, rep, req, | 787 | "request 0x%p: xid 0x%08x\n", __func__, rep, req, |
781 | headerp->rm_xid); | 788 | be32_to_cpu(headerp->rm_xid)); |
782 | goto repost; | 789 | goto repost; |
783 | } | 790 | } |
784 | 791 | ||
785 | dprintk("RPC: %s: reply 0x%p completes request 0x%p\n" | 792 | dprintk("RPC: %s: reply 0x%p completes request 0x%p\n" |
786 | " RPC request 0x%p xid 0x%08x\n", | 793 | " RPC request 0x%p xid 0x%08x\n", |
787 | __func__, rep, req, rqst, headerp->rm_xid); | 794 | __func__, rep, req, rqst, |
795 | be32_to_cpu(headerp->rm_xid)); | ||
788 | 796 | ||
789 | /* from here on, the reply is no longer an orphan */ | 797 | /* from here on, the reply is no longer an orphan */ |
790 | req->rl_reply = rep; | 798 | req->rl_reply = rep; |
@@ -793,7 +801,7 @@ repost: | |||
793 | /* check for expected message types */ | 801 | /* check for expected message types */ |
794 | /* The order of some of these tests is important. */ | 802 | /* The order of some of these tests is important. */ |
795 | switch (headerp->rm_type) { | 803 | switch (headerp->rm_type) { |
796 | case htonl(RDMA_MSG): | 804 | case rdma_msg: |
797 | /* never expect read chunks */ | 805 | /* never expect read chunks */ |
798 | /* never expect reply chunks (two ways to check) */ | 806 | /* never expect reply chunks (two ways to check) */ |
799 | /* never expect write chunks without having offered RDMA */ | 807 | /* never expect write chunks without having offered RDMA */ |
@@ -824,22 +832,24 @@ repost: | |||
824 | } else { | 832 | } else { |
825 | /* else ordinary inline */ | 833 | /* else ordinary inline */ |
826 | rdmalen = 0; | 834 | rdmalen = 0; |
827 | iptr = (__be32 *)((unsigned char *)headerp + 28); | 835 | iptr = (__be32 *)((unsigned char *)headerp + |
828 | rep->rr_len -= 28; /*sizeof *headerp;*/ | 836 | RPCRDMA_HDRLEN_MIN); |
837 | rep->rr_len -= RPCRDMA_HDRLEN_MIN; | ||
829 | status = rep->rr_len; | 838 | status = rep->rr_len; |
830 | } | 839 | } |
831 | /* Fix up the rpc results for upper layer */ | 840 | /* Fix up the rpc results for upper layer */ |
832 | rpcrdma_inline_fixup(rqst, (char *)iptr, rep->rr_len, rdmalen); | 841 | rpcrdma_inline_fixup(rqst, (char *)iptr, rep->rr_len, rdmalen); |
833 | break; | 842 | break; |
834 | 843 | ||
835 | case htonl(RDMA_NOMSG): | 844 | case rdma_nomsg: |
836 | /* never expect read or write chunks, always reply chunks */ | 845 | /* never expect read or write chunks, always reply chunks */ |
837 | if (headerp->rm_body.rm_chunks[0] != xdr_zero || | 846 | if (headerp->rm_body.rm_chunks[0] != xdr_zero || |
838 | headerp->rm_body.rm_chunks[1] != xdr_zero || | 847 | headerp->rm_body.rm_chunks[1] != xdr_zero || |
839 | headerp->rm_body.rm_chunks[2] != xdr_one || | 848 | headerp->rm_body.rm_chunks[2] != xdr_one || |
840 | req->rl_nchunks == 0) | 849 | req->rl_nchunks == 0) |
841 | goto badheader; | 850 | goto badheader; |
842 | iptr = (__be32 *)((unsigned char *)headerp + 28); | 851 | iptr = (__be32 *)((unsigned char *)headerp + |
852 | RPCRDMA_HDRLEN_MIN); | ||
843 | rdmalen = rpcrdma_count_chunks(rep, req->rl_nchunks, 0, &iptr); | 853 | rdmalen = rpcrdma_count_chunks(rep, req->rl_nchunks, 0, &iptr); |
844 | if (rdmalen < 0) | 854 | if (rdmalen < 0) |
845 | goto badheader; | 855 | goto badheader; |
@@ -853,7 +863,7 @@ badheader: | |||
853 | dprintk("%s: invalid rpcrdma reply header (type %d):" | 863 | dprintk("%s: invalid rpcrdma reply header (type %d):" |
854 | " chunks[012] == %d %d %d" | 864 | " chunks[012] == %d %d %d" |
855 | " expected chunks <= %d\n", | 865 | " expected chunks <= %d\n", |
856 | __func__, ntohl(headerp->rm_type), | 866 | __func__, be32_to_cpu(headerp->rm_type), |
857 | headerp->rm_body.rm_chunks[0], | 867 | headerp->rm_body.rm_chunks[0], |
858 | headerp->rm_body.rm_chunks[1], | 868 | headerp->rm_body.rm_chunks[1], |
859 | headerp->rm_body.rm_chunks[2], | 869 | headerp->rm_body.rm_chunks[2], |
@@ -863,8 +873,14 @@ badheader: | |||
863 | break; | 873 | break; |
864 | } | 874 | } |
865 | 875 | ||
876 | credits = be32_to_cpu(headerp->rm_credit); | ||
877 | if (credits == 0) | ||
878 | credits = 1; /* don't deadlock */ | ||
879 | else if (credits > r_xprt->rx_buf.rb_max_requests) | ||
880 | credits = r_xprt->rx_buf.rb_max_requests; | ||
881 | |||
866 | cwnd = xprt->cwnd; | 882 | cwnd = xprt->cwnd; |
867 | xprt->cwnd = atomic_read(&r_xprt->rx_buf.rb_credits) << RPC_CWNDSHIFT; | 883 | xprt->cwnd = credits << RPC_CWNDSHIFT; |
868 | if (xprt->cwnd > cwnd) | 884 | if (xprt->cwnd > cwnd) |
869 | xprt_release_rqst_cong(rqst->rq_task); | 885 | xprt_release_rqst_cong(rqst->rq_task); |
870 | 886 | ||
diff --git a/net/sunrpc/xprtrdma/transport.c b/net/sunrpc/xprtrdma/transport.c index bbd6155d3e34..2e192baa59f3 100644 --- a/net/sunrpc/xprtrdma/transport.c +++ b/net/sunrpc/xprtrdma/transport.c | |||
@@ -200,9 +200,9 @@ xprt_rdma_free_addresses(struct rpc_xprt *xprt) | |||
200 | static void | 200 | static void |
201 | xprt_rdma_connect_worker(struct work_struct *work) | 201 | xprt_rdma_connect_worker(struct work_struct *work) |
202 | { | 202 | { |
203 | struct rpcrdma_xprt *r_xprt = | 203 | struct rpcrdma_xprt *r_xprt = container_of(work, struct rpcrdma_xprt, |
204 | container_of(work, struct rpcrdma_xprt, rdma_connect.work); | 204 | rx_connect_worker.work); |
205 | struct rpc_xprt *xprt = &r_xprt->xprt; | 205 | struct rpc_xprt *xprt = &r_xprt->rx_xprt; |
206 | int rc = 0; | 206 | int rc = 0; |
207 | 207 | ||
208 | xprt_clear_connected(xprt); | 208 | xprt_clear_connected(xprt); |
@@ -235,7 +235,7 @@ xprt_rdma_destroy(struct rpc_xprt *xprt) | |||
235 | 235 | ||
236 | dprintk("RPC: %s: called\n", __func__); | 236 | dprintk("RPC: %s: called\n", __func__); |
237 | 237 | ||
238 | cancel_delayed_work_sync(&r_xprt->rdma_connect); | 238 | cancel_delayed_work_sync(&r_xprt->rx_connect_worker); |
239 | 239 | ||
240 | xprt_clear_connected(xprt); | 240 | xprt_clear_connected(xprt); |
241 | 241 | ||
@@ -364,8 +364,7 @@ xprt_setup_rdma(struct xprt_create *args) | |||
364 | * any inline data. Also specify any padding which will be provided | 364 | * any inline data. Also specify any padding which will be provided |
365 | * from a preregistered zero buffer. | 365 | * from a preregistered zero buffer. |
366 | */ | 366 | */ |
367 | rc = rpcrdma_buffer_create(&new_xprt->rx_buf, new_ep, &new_xprt->rx_ia, | 367 | rc = rpcrdma_buffer_create(new_xprt); |
368 | &new_xprt->rx_data); | ||
369 | if (rc) | 368 | if (rc) |
370 | goto out3; | 369 | goto out3; |
371 | 370 | ||
@@ -374,9 +373,8 @@ xprt_setup_rdma(struct xprt_create *args) | |||
374 | * connection loss notification is async. We also catch connection loss | 373 | * connection loss notification is async. We also catch connection loss |
375 | * when reaping receives. | 374 | * when reaping receives. |
376 | */ | 375 | */ |
377 | INIT_DELAYED_WORK(&new_xprt->rdma_connect, xprt_rdma_connect_worker); | 376 | INIT_DELAYED_WORK(&new_xprt->rx_connect_worker, |
378 | new_ep->rep_func = rpcrdma_conn_func; | 377 | xprt_rdma_connect_worker); |
379 | new_ep->rep_xprt = xprt; | ||
380 | 378 | ||
381 | xprt_rdma_format_addresses(xprt); | 379 | xprt_rdma_format_addresses(xprt); |
382 | xprt->max_payload = rpcrdma_max_payload(new_xprt); | 380 | xprt->max_payload = rpcrdma_max_payload(new_xprt); |
@@ -434,94 +432,101 @@ xprt_rdma_connect(struct rpc_xprt *xprt, struct rpc_task *task) | |||
434 | 432 | ||
435 | if (r_xprt->rx_ep.rep_connected != 0) { | 433 | if (r_xprt->rx_ep.rep_connected != 0) { |
436 | /* Reconnect */ | 434 | /* Reconnect */ |
437 | schedule_delayed_work(&r_xprt->rdma_connect, | 435 | schedule_delayed_work(&r_xprt->rx_connect_worker, |
438 | xprt->reestablish_timeout); | 436 | xprt->reestablish_timeout); |
439 | xprt->reestablish_timeout <<= 1; | 437 | xprt->reestablish_timeout <<= 1; |
440 | if (xprt->reestablish_timeout > RPCRDMA_MAX_REEST_TO) | 438 | if (xprt->reestablish_timeout > RPCRDMA_MAX_REEST_TO) |
441 | xprt->reestablish_timeout = RPCRDMA_MAX_REEST_TO; | 439 | xprt->reestablish_timeout = RPCRDMA_MAX_REEST_TO; |
442 | else if (xprt->reestablish_timeout < RPCRDMA_INIT_REEST_TO) | 440 | else if (xprt->reestablish_timeout < RPCRDMA_INIT_REEST_TO) |
443 | xprt->reestablish_timeout = RPCRDMA_INIT_REEST_TO; | 441 | xprt->reestablish_timeout = RPCRDMA_INIT_REEST_TO; |
444 | } else { | 442 | } else { |
445 | schedule_delayed_work(&r_xprt->rdma_connect, 0); | 443 | schedule_delayed_work(&r_xprt->rx_connect_worker, 0); |
446 | if (!RPC_IS_ASYNC(task)) | 444 | if (!RPC_IS_ASYNC(task)) |
447 | flush_delayed_work(&r_xprt->rdma_connect); | 445 | flush_delayed_work(&r_xprt->rx_connect_worker); |
448 | } | 446 | } |
449 | } | 447 | } |
450 | 448 | ||
451 | /* | 449 | /* |
452 | * The RDMA allocate/free functions need the task structure as a place | 450 | * The RDMA allocate/free functions need the task structure as a place |
453 | * to hide the struct rpcrdma_req, which is necessary for the actual send/recv | 451 | * to hide the struct rpcrdma_req, which is necessary for the actual send/recv |
454 | * sequence. For this reason, the recv buffers are attached to send | 452 | * sequence. |
455 | * buffers for portions of the RPC. Note that the RPC layer allocates | 453 | * |
456 | * both send and receive buffers in the same call. We may register | 454 | * The RPC layer allocates both send and receive buffers in the same call |
457 | * the receive buffer portion when using reply chunks. | 455 | * (rq_send_buf and rq_rcv_buf are both part of a single contiguous buffer). |
456 | * We may register rq_rcv_buf when using reply chunks. | ||
458 | */ | 457 | */ |
459 | static void * | 458 | static void * |
460 | xprt_rdma_allocate(struct rpc_task *task, size_t size) | 459 | xprt_rdma_allocate(struct rpc_task *task, size_t size) |
461 | { | 460 | { |
462 | struct rpc_xprt *xprt = task->tk_rqstp->rq_xprt; | 461 | struct rpc_xprt *xprt = task->tk_rqstp->rq_xprt; |
463 | struct rpcrdma_req *req, *nreq; | 462 | struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(xprt); |
463 | struct rpcrdma_regbuf *rb; | ||
464 | struct rpcrdma_req *req; | ||
465 | size_t min_size; | ||
466 | gfp_t flags; | ||
464 | 467 | ||
465 | req = rpcrdma_buffer_get(&rpcx_to_rdmax(xprt)->rx_buf); | 468 | req = rpcrdma_buffer_get(&r_xprt->rx_buf); |
466 | if (req == NULL) | 469 | if (req == NULL) |
467 | return NULL; | 470 | return NULL; |
468 | 471 | ||
469 | if (size > req->rl_size) { | 472 | flags = GFP_NOIO | __GFP_NOWARN; |
470 | dprintk("RPC: %s: size %zd too large for buffer[%zd]: " | 473 | if (RPC_IS_SWAPPER(task)) |
471 | "prog %d vers %d proc %d\n", | 474 | flags = __GFP_MEMALLOC | GFP_NOWAIT | __GFP_NOWARN; |
472 | __func__, size, req->rl_size, | 475 | |
473 | task->tk_client->cl_prog, task->tk_client->cl_vers, | 476 | if (req->rl_rdmabuf == NULL) |
474 | task->tk_msg.rpc_proc->p_proc); | 477 | goto out_rdmabuf; |
475 | /* | 478 | if (req->rl_sendbuf == NULL) |
476 | * Outgoing length shortage. Our inline write max must have | 479 | goto out_sendbuf; |
477 | * been configured to perform direct i/o. | 480 | if (size > req->rl_sendbuf->rg_size) |
478 | * | 481 | goto out_sendbuf; |
479 | * This is therefore a large metadata operation, and the | 482 | |
480 | * allocate call was made on the maximum possible message, | 483 | out: |
481 | * e.g. containing long filename(s) or symlink data. In | ||
482 | * fact, while these metadata operations *might* carry | ||
483 | * large outgoing payloads, they rarely *do*. However, we | ||
484 | * have to commit to the request here, so reallocate and | ||
485 | * register it now. The data path will never require this | ||
486 | * reallocation. | ||
487 | * | ||
488 | * If the allocation or registration fails, the RPC framework | ||
489 | * will (doggedly) retry. | ||
490 | */ | ||
491 | if (task->tk_flags & RPC_TASK_SWAPPER) | ||
492 | nreq = kmalloc(sizeof *req + size, GFP_ATOMIC); | ||
493 | else | ||
494 | nreq = kmalloc(sizeof *req + size, GFP_NOFS); | ||
495 | if (nreq == NULL) | ||
496 | goto outfail; | ||
497 | |||
498 | if (rpcrdma_register_internal(&rpcx_to_rdmax(xprt)->rx_ia, | ||
499 | nreq->rl_base, size + sizeof(struct rpcrdma_req) | ||
500 | - offsetof(struct rpcrdma_req, rl_base), | ||
501 | &nreq->rl_handle, &nreq->rl_iov)) { | ||
502 | kfree(nreq); | ||
503 | goto outfail; | ||
504 | } | ||
505 | rpcx_to_rdmax(xprt)->rx_stats.hardway_register_count += size; | ||
506 | nreq->rl_size = size; | ||
507 | nreq->rl_niovs = 0; | ||
508 | nreq->rl_nchunks = 0; | ||
509 | nreq->rl_buffer = (struct rpcrdma_buffer *)req; | ||
510 | nreq->rl_reply = req->rl_reply; | ||
511 | memcpy(nreq->rl_segments, | ||
512 | req->rl_segments, sizeof nreq->rl_segments); | ||
513 | /* flag the swap with an unused field */ | ||
514 | nreq->rl_iov.length = 0; | ||
515 | req->rl_reply = NULL; | ||
516 | req = nreq; | ||
517 | } | ||
518 | dprintk("RPC: %s: size %zd, request 0x%p\n", __func__, size, req); | 484 | dprintk("RPC: %s: size %zd, request 0x%p\n", __func__, size, req); |
519 | req->rl_connect_cookie = 0; /* our reserved value */ | 485 | req->rl_connect_cookie = 0; /* our reserved value */ |
520 | return req->rl_xdr_buf; | 486 | return req->rl_sendbuf->rg_base; |
521 | 487 | ||
522 | outfail: | 488 | out_rdmabuf: |
489 | min_size = RPCRDMA_INLINE_WRITE_THRESHOLD(task->tk_rqstp); | ||
490 | rb = rpcrdma_alloc_regbuf(&r_xprt->rx_ia, min_size, flags); | ||
491 | if (IS_ERR(rb)) | ||
492 | goto out_fail; | ||
493 | req->rl_rdmabuf = rb; | ||
494 | |||
495 | out_sendbuf: | ||
496 | /* XDR encoding and RPC/RDMA marshaling of this request has not | ||
497 | * yet occurred. Thus a lower bound is needed to prevent buffer | ||
498 | * overrun during marshaling. | ||
499 | * | ||
500 | * RPC/RDMA marshaling may choose to send payload bearing ops | ||
501 | * inline, if the result is smaller than the inline threshold. | ||
502 | * The value of the "size" argument accounts for header | ||
503 | * requirements but not for the payload in these cases. | ||
504 | * | ||
505 | * Likewise, allocate enough space to receive a reply up to the | ||
506 | * size of the inline threshold. | ||
507 | * | ||
508 | * It's unlikely that both the send header and the received | ||
509 | * reply will be large, but slush is provided here to allow | ||
510 | * flexibility when marshaling. | ||
511 | */ | ||
512 | min_size = RPCRDMA_INLINE_READ_THRESHOLD(task->tk_rqstp); | ||
513 | min_size += RPCRDMA_INLINE_WRITE_THRESHOLD(task->tk_rqstp); | ||
514 | if (size < min_size) | ||
515 | size = min_size; | ||
516 | |||
517 | rb = rpcrdma_alloc_regbuf(&r_xprt->rx_ia, size, flags); | ||
518 | if (IS_ERR(rb)) | ||
519 | goto out_fail; | ||
520 | rb->rg_owner = req; | ||
521 | |||
522 | r_xprt->rx_stats.hardway_register_count += size; | ||
523 | rpcrdma_free_regbuf(&r_xprt->rx_ia, req->rl_sendbuf); | ||
524 | req->rl_sendbuf = rb; | ||
525 | goto out; | ||
526 | |||
527 | out_fail: | ||
523 | rpcrdma_buffer_put(req); | 528 | rpcrdma_buffer_put(req); |
524 | rpcx_to_rdmax(xprt)->rx_stats.failed_marshal_count++; | 529 | r_xprt->rx_stats.failed_marshal_count++; |
525 | return NULL; | 530 | return NULL; |
526 | } | 531 | } |
527 | 532 | ||
@@ -533,47 +538,24 @@ xprt_rdma_free(void *buffer) | |||
533 | { | 538 | { |
534 | struct rpcrdma_req *req; | 539 | struct rpcrdma_req *req; |
535 | struct rpcrdma_xprt *r_xprt; | 540 | struct rpcrdma_xprt *r_xprt; |
536 | struct rpcrdma_rep *rep; | 541 | struct rpcrdma_regbuf *rb; |
537 | int i; | 542 | int i; |
538 | 543 | ||
539 | if (buffer == NULL) | 544 | if (buffer == NULL) |
540 | return; | 545 | return; |
541 | 546 | ||
542 | req = container_of(buffer, struct rpcrdma_req, rl_xdr_buf[0]); | 547 | rb = container_of(buffer, struct rpcrdma_regbuf, rg_base[0]); |
543 | if (req->rl_iov.length == 0) { /* see allocate above */ | 548 | req = rb->rg_owner; |
544 | r_xprt = container_of(((struct rpcrdma_req *) req->rl_buffer)->rl_buffer, | 549 | r_xprt = container_of(req->rl_buffer, struct rpcrdma_xprt, rx_buf); |
545 | struct rpcrdma_xprt, rx_buf); | ||
546 | } else | ||
547 | r_xprt = container_of(req->rl_buffer, struct rpcrdma_xprt, rx_buf); | ||
548 | rep = req->rl_reply; | ||
549 | 550 | ||
550 | dprintk("RPC: %s: called on 0x%p%s\n", | 551 | dprintk("RPC: %s: called on 0x%p\n", __func__, req->rl_reply); |
551 | __func__, rep, (rep && rep->rr_func) ? " (with waiter)" : ""); | ||
552 | 552 | ||
553 | /* | ||
554 | * Finish the deregistration. The process is considered | ||
555 | * complete when the rr_func vector becomes NULL - this | ||
556 | * was put in place during rpcrdma_reply_handler() - the wait | ||
557 | * call below will not block if the dereg is "done". If | ||
558 | * interrupted, our framework will clean up. | ||
559 | */ | ||
560 | for (i = 0; req->rl_nchunks;) { | 553 | for (i = 0; req->rl_nchunks;) { |
561 | --req->rl_nchunks; | 554 | --req->rl_nchunks; |
562 | i += rpcrdma_deregister_external( | 555 | i += rpcrdma_deregister_external( |
563 | &req->rl_segments[i], r_xprt); | 556 | &req->rl_segments[i], r_xprt); |
564 | } | 557 | } |
565 | 558 | ||
566 | if (req->rl_iov.length == 0) { /* see allocate above */ | ||
567 | struct rpcrdma_req *oreq = (struct rpcrdma_req *)req->rl_buffer; | ||
568 | oreq->rl_reply = req->rl_reply; | ||
569 | (void) rpcrdma_deregister_internal(&r_xprt->rx_ia, | ||
570 | req->rl_handle, | ||
571 | &req->rl_iov); | ||
572 | kfree(req); | ||
573 | req = oreq; | ||
574 | } | ||
575 | |||
576 | /* Put back request+reply buffers */ | ||
577 | rpcrdma_buffer_put(req); | 559 | rpcrdma_buffer_put(req); |
578 | } | 560 | } |
579 | 561 | ||
diff --git a/net/sunrpc/xprtrdma/verbs.c b/net/sunrpc/xprtrdma/verbs.c index c98e40643910..124676c13780 100644 --- a/net/sunrpc/xprtrdma/verbs.c +++ b/net/sunrpc/xprtrdma/verbs.c | |||
@@ -49,6 +49,7 @@ | |||
49 | 49 | ||
50 | #include <linux/interrupt.h> | 50 | #include <linux/interrupt.h> |
51 | #include <linux/slab.h> | 51 | #include <linux/slab.h> |
52 | #include <linux/prefetch.h> | ||
52 | #include <asm/bitops.h> | 53 | #include <asm/bitops.h> |
53 | 54 | ||
54 | #include "xprt_rdma.h" | 55 | #include "xprt_rdma.h" |
@@ -153,7 +154,7 @@ rpcrdma_qp_async_error_upcall(struct ib_event *event, void *context) | |||
153 | event->device->name, context); | 154 | event->device->name, context); |
154 | if (ep->rep_connected == 1) { | 155 | if (ep->rep_connected == 1) { |
155 | ep->rep_connected = -EIO; | 156 | ep->rep_connected = -EIO; |
156 | ep->rep_func(ep); | 157 | rpcrdma_conn_func(ep); |
157 | wake_up_all(&ep->rep_connect_wait); | 158 | wake_up_all(&ep->rep_connect_wait); |
158 | } | 159 | } |
159 | } | 160 | } |
@@ -168,23 +169,59 @@ rpcrdma_cq_async_error_upcall(struct ib_event *event, void *context) | |||
168 | event->device->name, context); | 169 | event->device->name, context); |
169 | if (ep->rep_connected == 1) { | 170 | if (ep->rep_connected == 1) { |
170 | ep->rep_connected = -EIO; | 171 | ep->rep_connected = -EIO; |
171 | ep->rep_func(ep); | 172 | rpcrdma_conn_func(ep); |
172 | wake_up_all(&ep->rep_connect_wait); | 173 | wake_up_all(&ep->rep_connect_wait); |
173 | } | 174 | } |
174 | } | 175 | } |
175 | 176 | ||
177 | static const char * const wc_status[] = { | ||
178 | "success", | ||
179 | "local length error", | ||
180 | "local QP operation error", | ||
181 | "local EE context operation error", | ||
182 | "local protection error", | ||
183 | "WR flushed", | ||
184 | "memory management operation error", | ||
185 | "bad response error", | ||
186 | "local access error", | ||
187 | "remote invalid request error", | ||
188 | "remote access error", | ||
189 | "remote operation error", | ||
190 | "transport retry counter exceeded", | ||
191 | "RNR retrycounter exceeded", | ||
192 | "local RDD violation error", | ||
193 | "remove invalid RD request", | ||
194 | "operation aborted", | ||
195 | "invalid EE context number", | ||
196 | "invalid EE context state", | ||
197 | "fatal error", | ||
198 | "response timeout error", | ||
199 | "general error", | ||
200 | }; | ||
201 | |||
202 | #define COMPLETION_MSG(status) \ | ||
203 | ((status) < ARRAY_SIZE(wc_status) ? \ | ||
204 | wc_status[(status)] : "unexpected completion error") | ||
205 | |||
176 | static void | 206 | static void |
177 | rpcrdma_sendcq_process_wc(struct ib_wc *wc) | 207 | rpcrdma_sendcq_process_wc(struct ib_wc *wc) |
178 | { | 208 | { |
179 | struct rpcrdma_mw *frmr = (struct rpcrdma_mw *)(unsigned long)wc->wr_id; | 209 | if (likely(wc->status == IB_WC_SUCCESS)) |
210 | return; | ||
180 | 211 | ||
181 | dprintk("RPC: %s: frmr %p status %X opcode %d\n", | 212 | /* WARNING: Only wr_id and status are reliable at this point */ |
182 | __func__, frmr, wc->status, wc->opcode); | 213 | if (wc->wr_id == 0ULL) { |
214 | if (wc->status != IB_WC_WR_FLUSH_ERR) | ||
215 | pr_err("RPC: %s: SEND: %s\n", | ||
216 | __func__, COMPLETION_MSG(wc->status)); | ||
217 | } else { | ||
218 | struct rpcrdma_mw *r; | ||
183 | 219 | ||
184 | if (wc->wr_id == 0ULL) | 220 | r = (struct rpcrdma_mw *)(unsigned long)wc->wr_id; |
185 | return; | 221 | r->r.frmr.fr_state = FRMR_IS_STALE; |
186 | if (wc->status != IB_WC_SUCCESS) | 222 | pr_err("RPC: %s: frmr %p (stale): %s\n", |
187 | frmr->r.frmr.fr_state = FRMR_IS_STALE; | 223 | __func__, r, COMPLETION_MSG(wc->status)); |
224 | } | ||
188 | } | 225 | } |
189 | 226 | ||
190 | static int | 227 | static int |
@@ -248,33 +285,32 @@ rpcrdma_recvcq_process_wc(struct ib_wc *wc, struct list_head *sched_list) | |||
248 | struct rpcrdma_rep *rep = | 285 | struct rpcrdma_rep *rep = |
249 | (struct rpcrdma_rep *)(unsigned long)wc->wr_id; | 286 | (struct rpcrdma_rep *)(unsigned long)wc->wr_id; |
250 | 287 | ||
251 | dprintk("RPC: %s: rep %p status %X opcode %X length %u\n", | 288 | /* WARNING: Only wr_id and status are reliable at this point */ |
252 | __func__, rep, wc->status, wc->opcode, wc->byte_len); | 289 | if (wc->status != IB_WC_SUCCESS) |
290 | goto out_fail; | ||
253 | 291 | ||
254 | if (wc->status != IB_WC_SUCCESS) { | 292 | /* status == SUCCESS means all fields in wc are trustworthy */ |
255 | rep->rr_len = ~0U; | ||
256 | goto out_schedule; | ||
257 | } | ||
258 | if (wc->opcode != IB_WC_RECV) | 293 | if (wc->opcode != IB_WC_RECV) |
259 | return; | 294 | return; |
260 | 295 | ||
296 | dprintk("RPC: %s: rep %p opcode 'recv', length %u: success\n", | ||
297 | __func__, rep, wc->byte_len); | ||
298 | |||
261 | rep->rr_len = wc->byte_len; | 299 | rep->rr_len = wc->byte_len; |
262 | ib_dma_sync_single_for_cpu(rdmab_to_ia(rep->rr_buffer)->ri_id->device, | 300 | ib_dma_sync_single_for_cpu(rdmab_to_ia(rep->rr_buffer)->ri_id->device, |
263 | rep->rr_iov.addr, rep->rr_len, DMA_FROM_DEVICE); | 301 | rdmab_addr(rep->rr_rdmabuf), |
264 | 302 | rep->rr_len, DMA_FROM_DEVICE); | |
265 | if (rep->rr_len >= 16) { | 303 | prefetch(rdmab_to_msg(rep->rr_rdmabuf)); |
266 | struct rpcrdma_msg *p = (struct rpcrdma_msg *)rep->rr_base; | ||
267 | unsigned int credits = ntohl(p->rm_credit); | ||
268 | |||
269 | if (credits == 0) | ||
270 | credits = 1; /* don't deadlock */ | ||
271 | else if (credits > rep->rr_buffer->rb_max_requests) | ||
272 | credits = rep->rr_buffer->rb_max_requests; | ||
273 | atomic_set(&rep->rr_buffer->rb_credits, credits); | ||
274 | } | ||
275 | 304 | ||
276 | out_schedule: | 305 | out_schedule: |
277 | list_add_tail(&rep->rr_list, sched_list); | 306 | list_add_tail(&rep->rr_list, sched_list); |
307 | return; | ||
308 | out_fail: | ||
309 | if (wc->status != IB_WC_WR_FLUSH_ERR) | ||
310 | pr_err("RPC: %s: rep %p: %s\n", | ||
311 | __func__, rep, COMPLETION_MSG(wc->status)); | ||
312 | rep->rr_len = ~0U; | ||
313 | goto out_schedule; | ||
278 | } | 314 | } |
279 | 315 | ||
280 | static int | 316 | static int |
@@ -390,8 +426,8 @@ rpcrdma_conn_upcall(struct rdma_cm_id *id, struct rdma_cm_event *event) | |||
390 | #if IS_ENABLED(CONFIG_SUNRPC_DEBUG) | 426 | #if IS_ENABLED(CONFIG_SUNRPC_DEBUG) |
391 | struct sockaddr_in *addr = (struct sockaddr_in *) &ep->rep_remote_addr; | 427 | struct sockaddr_in *addr = (struct sockaddr_in *) &ep->rep_remote_addr; |
392 | #endif | 428 | #endif |
393 | struct ib_qp_attr attr; | 429 | struct ib_qp_attr *attr = &ia->ri_qp_attr; |
394 | struct ib_qp_init_attr iattr; | 430 | struct ib_qp_init_attr *iattr = &ia->ri_qp_init_attr; |
395 | int connstate = 0; | 431 | int connstate = 0; |
396 | 432 | ||
397 | switch (event->event) { | 433 | switch (event->event) { |
@@ -414,12 +450,13 @@ rpcrdma_conn_upcall(struct rdma_cm_id *id, struct rdma_cm_event *event) | |||
414 | break; | 450 | break; |
415 | case RDMA_CM_EVENT_ESTABLISHED: | 451 | case RDMA_CM_EVENT_ESTABLISHED: |
416 | connstate = 1; | 452 | connstate = 1; |
417 | ib_query_qp(ia->ri_id->qp, &attr, | 453 | ib_query_qp(ia->ri_id->qp, attr, |
418 | IB_QP_MAX_QP_RD_ATOMIC | IB_QP_MAX_DEST_RD_ATOMIC, | 454 | IB_QP_MAX_QP_RD_ATOMIC | IB_QP_MAX_DEST_RD_ATOMIC, |
419 | &iattr); | 455 | iattr); |
420 | dprintk("RPC: %s: %d responder resources" | 456 | dprintk("RPC: %s: %d responder resources" |
421 | " (%d initiator)\n", | 457 | " (%d initiator)\n", |
422 | __func__, attr.max_dest_rd_atomic, attr.max_rd_atomic); | 458 | __func__, attr->max_dest_rd_atomic, |
459 | attr->max_rd_atomic); | ||
423 | goto connected; | 460 | goto connected; |
424 | case RDMA_CM_EVENT_CONNECT_ERROR: | 461 | case RDMA_CM_EVENT_CONNECT_ERROR: |
425 | connstate = -ENOTCONN; | 462 | connstate = -ENOTCONN; |
@@ -436,11 +473,10 @@ rpcrdma_conn_upcall(struct rdma_cm_id *id, struct rdma_cm_event *event) | |||
436 | case RDMA_CM_EVENT_DEVICE_REMOVAL: | 473 | case RDMA_CM_EVENT_DEVICE_REMOVAL: |
437 | connstate = -ENODEV; | 474 | connstate = -ENODEV; |
438 | connected: | 475 | connected: |
439 | atomic_set(&rpcx_to_rdmax(ep->rep_xprt)->rx_buf.rb_credits, 1); | ||
440 | dprintk("RPC: %s: %sconnected\n", | 476 | dprintk("RPC: %s: %sconnected\n", |
441 | __func__, connstate > 0 ? "" : "dis"); | 477 | __func__, connstate > 0 ? "" : "dis"); |
442 | ep->rep_connected = connstate; | 478 | ep->rep_connected = connstate; |
443 | ep->rep_func(ep); | 479 | rpcrdma_conn_func(ep); |
444 | wake_up_all(&ep->rep_connect_wait); | 480 | wake_up_all(&ep->rep_connect_wait); |
445 | /*FALLTHROUGH*/ | 481 | /*FALLTHROUGH*/ |
446 | default: | 482 | default: |
@@ -453,7 +489,7 @@ connected: | |||
453 | 489 | ||
454 | #if IS_ENABLED(CONFIG_SUNRPC_DEBUG) | 490 | #if IS_ENABLED(CONFIG_SUNRPC_DEBUG) |
455 | if (connstate == 1) { | 491 | if (connstate == 1) { |
456 | int ird = attr.max_dest_rd_atomic; | 492 | int ird = attr->max_dest_rd_atomic; |
457 | int tird = ep->rep_remote_cma.responder_resources; | 493 | int tird = ep->rep_remote_cma.responder_resources; |
458 | printk(KERN_INFO "rpcrdma: connection to %pI4:%u " | 494 | printk(KERN_INFO "rpcrdma: connection to %pI4:%u " |
459 | "on %s, memreg %d slots %d ird %d%s\n", | 495 | "on %s, memreg %d slots %d ird %d%s\n", |
@@ -554,8 +590,8 @@ int | |||
554 | rpcrdma_ia_open(struct rpcrdma_xprt *xprt, struct sockaddr *addr, int memreg) | 590 | rpcrdma_ia_open(struct rpcrdma_xprt *xprt, struct sockaddr *addr, int memreg) |
555 | { | 591 | { |
556 | int rc, mem_priv; | 592 | int rc, mem_priv; |
557 | struct ib_device_attr devattr; | ||
558 | struct rpcrdma_ia *ia = &xprt->rx_ia; | 593 | struct rpcrdma_ia *ia = &xprt->rx_ia; |
594 | struct ib_device_attr *devattr = &ia->ri_devattr; | ||
559 | 595 | ||
560 | ia->ri_id = rpcrdma_create_id(xprt, ia, addr); | 596 | ia->ri_id = rpcrdma_create_id(xprt, ia, addr); |
561 | if (IS_ERR(ia->ri_id)) { | 597 | if (IS_ERR(ia->ri_id)) { |
@@ -571,26 +607,21 @@ rpcrdma_ia_open(struct rpcrdma_xprt *xprt, struct sockaddr *addr, int memreg) | |||
571 | goto out2; | 607 | goto out2; |
572 | } | 608 | } |
573 | 609 | ||
574 | /* | 610 | rc = ib_query_device(ia->ri_id->device, devattr); |
575 | * Query the device to determine if the requested memory | ||
576 | * registration strategy is supported. If it isn't, set the | ||
577 | * strategy to a globally supported model. | ||
578 | */ | ||
579 | rc = ib_query_device(ia->ri_id->device, &devattr); | ||
580 | if (rc) { | 611 | if (rc) { |
581 | dprintk("RPC: %s: ib_query_device failed %d\n", | 612 | dprintk("RPC: %s: ib_query_device failed %d\n", |
582 | __func__, rc); | 613 | __func__, rc); |
583 | goto out2; | 614 | goto out3; |
584 | } | 615 | } |
585 | 616 | ||
586 | if (devattr.device_cap_flags & IB_DEVICE_LOCAL_DMA_LKEY) { | 617 | if (devattr->device_cap_flags & IB_DEVICE_LOCAL_DMA_LKEY) { |
587 | ia->ri_have_dma_lkey = 1; | 618 | ia->ri_have_dma_lkey = 1; |
588 | ia->ri_dma_lkey = ia->ri_id->device->local_dma_lkey; | 619 | ia->ri_dma_lkey = ia->ri_id->device->local_dma_lkey; |
589 | } | 620 | } |
590 | 621 | ||
591 | if (memreg == RPCRDMA_FRMR) { | 622 | if (memreg == RPCRDMA_FRMR) { |
592 | /* Requires both frmr reg and local dma lkey */ | 623 | /* Requires both frmr reg and local dma lkey */ |
593 | if ((devattr.device_cap_flags & | 624 | if ((devattr->device_cap_flags & |
594 | (IB_DEVICE_MEM_MGT_EXTENSIONS|IB_DEVICE_LOCAL_DMA_LKEY)) != | 625 | (IB_DEVICE_MEM_MGT_EXTENSIONS|IB_DEVICE_LOCAL_DMA_LKEY)) != |
595 | (IB_DEVICE_MEM_MGT_EXTENSIONS|IB_DEVICE_LOCAL_DMA_LKEY)) { | 626 | (IB_DEVICE_MEM_MGT_EXTENSIONS|IB_DEVICE_LOCAL_DMA_LKEY)) { |
596 | dprintk("RPC: %s: FRMR registration " | 627 | dprintk("RPC: %s: FRMR registration " |
@@ -600,7 +631,7 @@ rpcrdma_ia_open(struct rpcrdma_xprt *xprt, struct sockaddr *addr, int memreg) | |||
600 | /* Mind the ia limit on FRMR page list depth */ | 631 | /* Mind the ia limit on FRMR page list depth */ |
601 | ia->ri_max_frmr_depth = min_t(unsigned int, | 632 | ia->ri_max_frmr_depth = min_t(unsigned int, |
602 | RPCRDMA_MAX_DATA_SEGS, | 633 | RPCRDMA_MAX_DATA_SEGS, |
603 | devattr.max_fast_reg_page_list_len); | 634 | devattr->max_fast_reg_page_list_len); |
604 | } | 635 | } |
605 | } | 636 | } |
606 | if (memreg == RPCRDMA_MTHCAFMR) { | 637 | if (memreg == RPCRDMA_MTHCAFMR) { |
@@ -638,14 +669,14 @@ rpcrdma_ia_open(struct rpcrdma_xprt *xprt, struct sockaddr *addr, int memreg) | |||
638 | "phys register failed with %lX\n", | 669 | "phys register failed with %lX\n", |
639 | __func__, PTR_ERR(ia->ri_bind_mem)); | 670 | __func__, PTR_ERR(ia->ri_bind_mem)); |
640 | rc = -ENOMEM; | 671 | rc = -ENOMEM; |
641 | goto out2; | 672 | goto out3; |
642 | } | 673 | } |
643 | break; | 674 | break; |
644 | default: | 675 | default: |
645 | printk(KERN_ERR "RPC: Unsupported memory " | 676 | printk(KERN_ERR "RPC: Unsupported memory " |
646 | "registration mode: %d\n", memreg); | 677 | "registration mode: %d\n", memreg); |
647 | rc = -ENOMEM; | 678 | rc = -ENOMEM; |
648 | goto out2; | 679 | goto out3; |
649 | } | 680 | } |
650 | dprintk("RPC: %s: memory registration strategy is %d\n", | 681 | dprintk("RPC: %s: memory registration strategy is %d\n", |
651 | __func__, memreg); | 682 | __func__, memreg); |
@@ -655,6 +686,10 @@ rpcrdma_ia_open(struct rpcrdma_xprt *xprt, struct sockaddr *addr, int memreg) | |||
655 | 686 | ||
656 | rwlock_init(&ia->ri_qplock); | 687 | rwlock_init(&ia->ri_qplock); |
657 | return 0; | 688 | return 0; |
689 | |||
690 | out3: | ||
691 | ib_dealloc_pd(ia->ri_pd); | ||
692 | ia->ri_pd = NULL; | ||
658 | out2: | 693 | out2: |
659 | rdma_destroy_id(ia->ri_id); | 694 | rdma_destroy_id(ia->ri_id); |
660 | ia->ri_id = NULL; | 695 | ia->ri_id = NULL; |
@@ -698,20 +733,13 @@ int | |||
698 | rpcrdma_ep_create(struct rpcrdma_ep *ep, struct rpcrdma_ia *ia, | 733 | rpcrdma_ep_create(struct rpcrdma_ep *ep, struct rpcrdma_ia *ia, |
699 | struct rpcrdma_create_data_internal *cdata) | 734 | struct rpcrdma_create_data_internal *cdata) |
700 | { | 735 | { |
701 | struct ib_device_attr devattr; | 736 | struct ib_device_attr *devattr = &ia->ri_devattr; |
702 | struct ib_cq *sendcq, *recvcq; | 737 | struct ib_cq *sendcq, *recvcq; |
703 | int rc, err; | 738 | int rc, err; |
704 | 739 | ||
705 | rc = ib_query_device(ia->ri_id->device, &devattr); | ||
706 | if (rc) { | ||
707 | dprintk("RPC: %s: ib_query_device failed %d\n", | ||
708 | __func__, rc); | ||
709 | return rc; | ||
710 | } | ||
711 | |||
712 | /* check provider's send/recv wr limits */ | 740 | /* check provider's send/recv wr limits */ |
713 | if (cdata->max_requests > devattr.max_qp_wr) | 741 | if (cdata->max_requests > devattr->max_qp_wr) |
714 | cdata->max_requests = devattr.max_qp_wr; | 742 | cdata->max_requests = devattr->max_qp_wr; |
715 | 743 | ||
716 | ep->rep_attr.event_handler = rpcrdma_qp_async_error_upcall; | 744 | ep->rep_attr.event_handler = rpcrdma_qp_async_error_upcall; |
717 | ep->rep_attr.qp_context = ep; | 745 | ep->rep_attr.qp_context = ep; |
@@ -746,8 +774,8 @@ rpcrdma_ep_create(struct rpcrdma_ep *ep, struct rpcrdma_ia *ia, | |||
746 | 774 | ||
747 | } | 775 | } |
748 | ep->rep_attr.cap.max_send_wr *= depth; | 776 | ep->rep_attr.cap.max_send_wr *= depth; |
749 | if (ep->rep_attr.cap.max_send_wr > devattr.max_qp_wr) { | 777 | if (ep->rep_attr.cap.max_send_wr > devattr->max_qp_wr) { |
750 | cdata->max_requests = devattr.max_qp_wr / depth; | 778 | cdata->max_requests = devattr->max_qp_wr / depth; |
751 | if (!cdata->max_requests) | 779 | if (!cdata->max_requests) |
752 | return -EINVAL; | 780 | return -EINVAL; |
753 | ep->rep_attr.cap.max_send_wr = cdata->max_requests * | 781 | ep->rep_attr.cap.max_send_wr = cdata->max_requests * |
@@ -766,6 +794,14 @@ rpcrdma_ep_create(struct rpcrdma_ep *ep, struct rpcrdma_ia *ia, | |||
766 | ep->rep_attr.qp_type = IB_QPT_RC; | 794 | ep->rep_attr.qp_type = IB_QPT_RC; |
767 | ep->rep_attr.port_num = ~0; | 795 | ep->rep_attr.port_num = ~0; |
768 | 796 | ||
797 | if (cdata->padding) { | ||
798 | ep->rep_padbuf = rpcrdma_alloc_regbuf(ia, cdata->padding, | ||
799 | GFP_KERNEL); | ||
800 | if (IS_ERR(ep->rep_padbuf)) | ||
801 | return PTR_ERR(ep->rep_padbuf); | ||
802 | } else | ||
803 | ep->rep_padbuf = NULL; | ||
804 | |||
769 | dprintk("RPC: %s: requested max: dtos: send %d recv %d; " | 805 | dprintk("RPC: %s: requested max: dtos: send %d recv %d; " |
770 | "iovs: send %d recv %d\n", | 806 | "iovs: send %d recv %d\n", |
771 | __func__, | 807 | __func__, |
@@ -781,7 +817,6 @@ rpcrdma_ep_create(struct rpcrdma_ep *ep, struct rpcrdma_ia *ia, | |||
781 | else if (ep->rep_cqinit <= 2) | 817 | else if (ep->rep_cqinit <= 2) |
782 | ep->rep_cqinit = 0; | 818 | ep->rep_cqinit = 0; |
783 | INIT_CQCOUNT(ep); | 819 | INIT_CQCOUNT(ep); |
784 | ep->rep_ia = ia; | ||
785 | init_waitqueue_head(&ep->rep_connect_wait); | 820 | init_waitqueue_head(&ep->rep_connect_wait); |
786 | INIT_DELAYED_WORK(&ep->rep_connect_worker, rpcrdma_connect_worker); | 821 | INIT_DELAYED_WORK(&ep->rep_connect_worker, rpcrdma_connect_worker); |
787 | 822 | ||
@@ -831,10 +866,11 @@ rpcrdma_ep_create(struct rpcrdma_ep *ep, struct rpcrdma_ia *ia, | |||
831 | 866 | ||
832 | /* Client offers RDMA Read but does not initiate */ | 867 | /* Client offers RDMA Read but does not initiate */ |
833 | ep->rep_remote_cma.initiator_depth = 0; | 868 | ep->rep_remote_cma.initiator_depth = 0; |
834 | if (devattr.max_qp_rd_atom > 32) /* arbitrary but <= 255 */ | 869 | if (devattr->max_qp_rd_atom > 32) /* arbitrary but <= 255 */ |
835 | ep->rep_remote_cma.responder_resources = 32; | 870 | ep->rep_remote_cma.responder_resources = 32; |
836 | else | 871 | else |
837 | ep->rep_remote_cma.responder_resources = devattr.max_qp_rd_atom; | 872 | ep->rep_remote_cma.responder_resources = |
873 | devattr->max_qp_rd_atom; | ||
838 | 874 | ||
839 | ep->rep_remote_cma.retry_count = 7; | 875 | ep->rep_remote_cma.retry_count = 7; |
840 | ep->rep_remote_cma.flow_control = 0; | 876 | ep->rep_remote_cma.flow_control = 0; |
@@ -848,6 +884,7 @@ out2: | |||
848 | dprintk("RPC: %s: ib_destroy_cq returned %i\n", | 884 | dprintk("RPC: %s: ib_destroy_cq returned %i\n", |
849 | __func__, err); | 885 | __func__, err); |
850 | out1: | 886 | out1: |
887 | rpcrdma_free_regbuf(ia, ep->rep_padbuf); | ||
851 | return rc; | 888 | return rc; |
852 | } | 889 | } |
853 | 890 | ||
@@ -874,11 +911,7 @@ rpcrdma_ep_destroy(struct rpcrdma_ep *ep, struct rpcrdma_ia *ia) | |||
874 | ia->ri_id->qp = NULL; | 911 | ia->ri_id->qp = NULL; |
875 | } | 912 | } |
876 | 913 | ||
877 | /* padding - could be done in rpcrdma_buffer_destroy... */ | 914 | rpcrdma_free_regbuf(ia, ep->rep_padbuf); |
878 | if (ep->rep_pad_mr) { | ||
879 | rpcrdma_deregister_internal(ia, ep->rep_pad_mr, &ep->rep_pad); | ||
880 | ep->rep_pad_mr = NULL; | ||
881 | } | ||
882 | 915 | ||
883 | rpcrdma_clean_cq(ep->rep_attr.recv_cq); | 916 | rpcrdma_clean_cq(ep->rep_attr.recv_cq); |
884 | rc = ib_destroy_cq(ep->rep_attr.recv_cq); | 917 | rc = ib_destroy_cq(ep->rep_attr.recv_cq); |
@@ -1048,6 +1081,48 @@ rpcrdma_ep_disconnect(struct rpcrdma_ep *ep, struct rpcrdma_ia *ia) | |||
1048 | } | 1081 | } |
1049 | } | 1082 | } |
1050 | 1083 | ||
1084 | static struct rpcrdma_req * | ||
1085 | rpcrdma_create_req(struct rpcrdma_xprt *r_xprt) | ||
1086 | { | ||
1087 | struct rpcrdma_req *req; | ||
1088 | |||
1089 | req = kzalloc(sizeof(*req), GFP_KERNEL); | ||
1090 | if (req == NULL) | ||
1091 | return ERR_PTR(-ENOMEM); | ||
1092 | |||
1093 | req->rl_buffer = &r_xprt->rx_buf; | ||
1094 | return req; | ||
1095 | } | ||
1096 | |||
1097 | static struct rpcrdma_rep * | ||
1098 | rpcrdma_create_rep(struct rpcrdma_xprt *r_xprt) | ||
1099 | { | ||
1100 | struct rpcrdma_create_data_internal *cdata = &r_xprt->rx_data; | ||
1101 | struct rpcrdma_ia *ia = &r_xprt->rx_ia; | ||
1102 | struct rpcrdma_rep *rep; | ||
1103 | int rc; | ||
1104 | |||
1105 | rc = -ENOMEM; | ||
1106 | rep = kzalloc(sizeof(*rep), GFP_KERNEL); | ||
1107 | if (rep == NULL) | ||
1108 | goto out; | ||
1109 | |||
1110 | rep->rr_rdmabuf = rpcrdma_alloc_regbuf(ia, cdata->inline_rsize, | ||
1111 | GFP_KERNEL); | ||
1112 | if (IS_ERR(rep->rr_rdmabuf)) { | ||
1113 | rc = PTR_ERR(rep->rr_rdmabuf); | ||
1114 | goto out_free; | ||
1115 | } | ||
1116 | |||
1117 | rep->rr_buffer = &r_xprt->rx_buf; | ||
1118 | return rep; | ||
1119 | |||
1120 | out_free: | ||
1121 | kfree(rep); | ||
1122 | out: | ||
1123 | return ERR_PTR(rc); | ||
1124 | } | ||
1125 | |||
1051 | static int | 1126 | static int |
1052 | rpcrdma_init_fmrs(struct rpcrdma_ia *ia, struct rpcrdma_buffer *buf) | 1127 | rpcrdma_init_fmrs(struct rpcrdma_ia *ia, struct rpcrdma_buffer *buf) |
1053 | { | 1128 | { |
@@ -1134,27 +1209,26 @@ out_free: | |||
1134 | } | 1209 | } |
1135 | 1210 | ||
1136 | int | 1211 | int |
1137 | rpcrdma_buffer_create(struct rpcrdma_buffer *buf, struct rpcrdma_ep *ep, | 1212 | rpcrdma_buffer_create(struct rpcrdma_xprt *r_xprt) |
1138 | struct rpcrdma_ia *ia, struct rpcrdma_create_data_internal *cdata) | ||
1139 | { | 1213 | { |
1214 | struct rpcrdma_buffer *buf = &r_xprt->rx_buf; | ||
1215 | struct rpcrdma_ia *ia = &r_xprt->rx_ia; | ||
1216 | struct rpcrdma_create_data_internal *cdata = &r_xprt->rx_data; | ||
1140 | char *p; | 1217 | char *p; |
1141 | size_t len, rlen, wlen; | 1218 | size_t len; |
1142 | int i, rc; | 1219 | int i, rc; |
1143 | 1220 | ||
1144 | buf->rb_max_requests = cdata->max_requests; | 1221 | buf->rb_max_requests = cdata->max_requests; |
1145 | spin_lock_init(&buf->rb_lock); | 1222 | spin_lock_init(&buf->rb_lock); |
1146 | atomic_set(&buf->rb_credits, 1); | ||
1147 | 1223 | ||
1148 | /* Need to allocate: | 1224 | /* Need to allocate: |
1149 | * 1. arrays for send and recv pointers | 1225 | * 1. arrays for send and recv pointers |
1150 | * 2. arrays of struct rpcrdma_req to fill in pointers | 1226 | * 2. arrays of struct rpcrdma_req to fill in pointers |
1151 | * 3. array of struct rpcrdma_rep for replies | 1227 | * 3. array of struct rpcrdma_rep for replies |
1152 | * 4. padding, if any | ||
1153 | * Send/recv buffers in req/rep need to be registered | 1228 | * Send/recv buffers in req/rep need to be registered |
1154 | */ | 1229 | */ |
1155 | len = buf->rb_max_requests * | 1230 | len = buf->rb_max_requests * |
1156 | (sizeof(struct rpcrdma_req *) + sizeof(struct rpcrdma_rep *)); | 1231 | (sizeof(struct rpcrdma_req *) + sizeof(struct rpcrdma_rep *)); |
1157 | len += cdata->padding; | ||
1158 | 1232 | ||
1159 | p = kzalloc(len, GFP_KERNEL); | 1233 | p = kzalloc(len, GFP_KERNEL); |
1160 | if (p == NULL) { | 1234 | if (p == NULL) { |
@@ -1170,17 +1244,6 @@ rpcrdma_buffer_create(struct rpcrdma_buffer *buf, struct rpcrdma_ep *ep, | |||
1170 | buf->rb_recv_bufs = (struct rpcrdma_rep **) p; | 1244 | buf->rb_recv_bufs = (struct rpcrdma_rep **) p; |
1171 | p = (char *) &buf->rb_recv_bufs[buf->rb_max_requests]; | 1245 | p = (char *) &buf->rb_recv_bufs[buf->rb_max_requests]; |
1172 | 1246 | ||
1173 | /* | ||
1174 | * Register the zeroed pad buffer, if any. | ||
1175 | */ | ||
1176 | if (cdata->padding) { | ||
1177 | rc = rpcrdma_register_internal(ia, p, cdata->padding, | ||
1178 | &ep->rep_pad_mr, &ep->rep_pad); | ||
1179 | if (rc) | ||
1180 | goto out; | ||
1181 | } | ||
1182 | p += cdata->padding; | ||
1183 | |||
1184 | INIT_LIST_HEAD(&buf->rb_mws); | 1247 | INIT_LIST_HEAD(&buf->rb_mws); |
1185 | INIT_LIST_HEAD(&buf->rb_all); | 1248 | INIT_LIST_HEAD(&buf->rb_all); |
1186 | switch (ia->ri_memreg_strategy) { | 1249 | switch (ia->ri_memreg_strategy) { |
@@ -1198,62 +1261,29 @@ rpcrdma_buffer_create(struct rpcrdma_buffer *buf, struct rpcrdma_ep *ep, | |||
1198 | break; | 1261 | break; |
1199 | } | 1262 | } |
1200 | 1263 | ||
1201 | /* | ||
1202 | * Allocate/init the request/reply buffers. Doing this | ||
1203 | * using kmalloc for now -- one for each buf. | ||
1204 | */ | ||
1205 | wlen = 1 << fls(cdata->inline_wsize + sizeof(struct rpcrdma_req)); | ||
1206 | rlen = 1 << fls(cdata->inline_rsize + sizeof(struct rpcrdma_rep)); | ||
1207 | dprintk("RPC: %s: wlen = %zu, rlen = %zu\n", | ||
1208 | __func__, wlen, rlen); | ||
1209 | |||
1210 | for (i = 0; i < buf->rb_max_requests; i++) { | 1264 | for (i = 0; i < buf->rb_max_requests; i++) { |
1211 | struct rpcrdma_req *req; | 1265 | struct rpcrdma_req *req; |
1212 | struct rpcrdma_rep *rep; | 1266 | struct rpcrdma_rep *rep; |
1213 | 1267 | ||
1214 | req = kmalloc(wlen, GFP_KERNEL); | 1268 | req = rpcrdma_create_req(r_xprt); |
1215 | if (req == NULL) { | 1269 | if (IS_ERR(req)) { |
1216 | dprintk("RPC: %s: request buffer %d alloc" | 1270 | dprintk("RPC: %s: request buffer %d alloc" |
1217 | " failed\n", __func__, i); | 1271 | " failed\n", __func__, i); |
1218 | rc = -ENOMEM; | 1272 | rc = PTR_ERR(req); |
1219 | goto out; | 1273 | goto out; |
1220 | } | 1274 | } |
1221 | memset(req, 0, sizeof(struct rpcrdma_req)); | ||
1222 | buf->rb_send_bufs[i] = req; | 1275 | buf->rb_send_bufs[i] = req; |
1223 | buf->rb_send_bufs[i]->rl_buffer = buf; | ||
1224 | 1276 | ||
1225 | rc = rpcrdma_register_internal(ia, req->rl_base, | 1277 | rep = rpcrdma_create_rep(r_xprt); |
1226 | wlen - offsetof(struct rpcrdma_req, rl_base), | 1278 | if (IS_ERR(rep)) { |
1227 | &buf->rb_send_bufs[i]->rl_handle, | ||
1228 | &buf->rb_send_bufs[i]->rl_iov); | ||
1229 | if (rc) | ||
1230 | goto out; | ||
1231 | |||
1232 | buf->rb_send_bufs[i]->rl_size = wlen - | ||
1233 | sizeof(struct rpcrdma_req); | ||
1234 | |||
1235 | rep = kmalloc(rlen, GFP_KERNEL); | ||
1236 | if (rep == NULL) { | ||
1237 | dprintk("RPC: %s: reply buffer %d alloc failed\n", | 1279 | dprintk("RPC: %s: reply buffer %d alloc failed\n", |
1238 | __func__, i); | 1280 | __func__, i); |
1239 | rc = -ENOMEM; | 1281 | rc = PTR_ERR(rep); |
1240 | goto out; | 1282 | goto out; |
1241 | } | 1283 | } |
1242 | memset(rep, 0, sizeof(struct rpcrdma_rep)); | ||
1243 | buf->rb_recv_bufs[i] = rep; | 1284 | buf->rb_recv_bufs[i] = rep; |
1244 | buf->rb_recv_bufs[i]->rr_buffer = buf; | ||
1245 | |||
1246 | rc = rpcrdma_register_internal(ia, rep->rr_base, | ||
1247 | rlen - offsetof(struct rpcrdma_rep, rr_base), | ||
1248 | &buf->rb_recv_bufs[i]->rr_handle, | ||
1249 | &buf->rb_recv_bufs[i]->rr_iov); | ||
1250 | if (rc) | ||
1251 | goto out; | ||
1252 | |||
1253 | } | 1285 | } |
1254 | dprintk("RPC: %s: max_requests %d\n", | 1286 | |
1255 | __func__, buf->rb_max_requests); | ||
1256 | /* done */ | ||
1257 | return 0; | 1287 | return 0; |
1258 | out: | 1288 | out: |
1259 | rpcrdma_buffer_destroy(buf); | 1289 | rpcrdma_buffer_destroy(buf); |
@@ -1261,6 +1291,27 @@ out: | |||
1261 | } | 1291 | } |
1262 | 1292 | ||
1263 | static void | 1293 | static void |
1294 | rpcrdma_destroy_rep(struct rpcrdma_ia *ia, struct rpcrdma_rep *rep) | ||
1295 | { | ||
1296 | if (!rep) | ||
1297 | return; | ||
1298 | |||
1299 | rpcrdma_free_regbuf(ia, rep->rr_rdmabuf); | ||
1300 | kfree(rep); | ||
1301 | } | ||
1302 | |||
1303 | static void | ||
1304 | rpcrdma_destroy_req(struct rpcrdma_ia *ia, struct rpcrdma_req *req) | ||
1305 | { | ||
1306 | if (!req) | ||
1307 | return; | ||
1308 | |||
1309 | rpcrdma_free_regbuf(ia, req->rl_sendbuf); | ||
1310 | rpcrdma_free_regbuf(ia, req->rl_rdmabuf); | ||
1311 | kfree(req); | ||
1312 | } | ||
1313 | |||
1314 | static void | ||
1264 | rpcrdma_destroy_fmrs(struct rpcrdma_buffer *buf) | 1315 | rpcrdma_destroy_fmrs(struct rpcrdma_buffer *buf) |
1265 | { | 1316 | { |
1266 | struct rpcrdma_mw *r; | 1317 | struct rpcrdma_mw *r; |
@@ -1315,18 +1366,10 @@ rpcrdma_buffer_destroy(struct rpcrdma_buffer *buf) | |||
1315 | dprintk("RPC: %s: entering\n", __func__); | 1366 | dprintk("RPC: %s: entering\n", __func__); |
1316 | 1367 | ||
1317 | for (i = 0; i < buf->rb_max_requests; i++) { | 1368 | for (i = 0; i < buf->rb_max_requests; i++) { |
1318 | if (buf->rb_recv_bufs && buf->rb_recv_bufs[i]) { | 1369 | if (buf->rb_recv_bufs) |
1319 | rpcrdma_deregister_internal(ia, | 1370 | rpcrdma_destroy_rep(ia, buf->rb_recv_bufs[i]); |
1320 | buf->rb_recv_bufs[i]->rr_handle, | 1371 | if (buf->rb_send_bufs) |
1321 | &buf->rb_recv_bufs[i]->rr_iov); | 1372 | rpcrdma_destroy_req(ia, buf->rb_send_bufs[i]); |
1322 | kfree(buf->rb_recv_bufs[i]); | ||
1323 | } | ||
1324 | if (buf->rb_send_bufs && buf->rb_send_bufs[i]) { | ||
1325 | rpcrdma_deregister_internal(ia, | ||
1326 | buf->rb_send_bufs[i]->rl_handle, | ||
1327 | &buf->rb_send_bufs[i]->rl_iov); | ||
1328 | kfree(buf->rb_send_bufs[i]); | ||
1329 | } | ||
1330 | } | 1373 | } |
1331 | 1374 | ||
1332 | switch (ia->ri_memreg_strategy) { | 1375 | switch (ia->ri_memreg_strategy) { |
@@ -1450,8 +1493,8 @@ rpcrdma_buffer_put_mrs(struct rpcrdma_req *req, struct rpcrdma_buffer *buf) | |||
1450 | int i; | 1493 | int i; |
1451 | 1494 | ||
1452 | for (i = 1, seg++; i < RPCRDMA_MAX_SEGS; seg++, i++) | 1495 | for (i = 1, seg++; i < RPCRDMA_MAX_SEGS; seg++, i++) |
1453 | rpcrdma_buffer_put_mr(&seg->mr_chunk.rl_mw, buf); | 1496 | rpcrdma_buffer_put_mr(&seg->rl_mw, buf); |
1454 | rpcrdma_buffer_put_mr(&seg1->mr_chunk.rl_mw, buf); | 1497 | rpcrdma_buffer_put_mr(&seg1->rl_mw, buf); |
1455 | } | 1498 | } |
1456 | 1499 | ||
1457 | static void | 1500 | static void |
@@ -1537,7 +1580,7 @@ rpcrdma_buffer_get_frmrs(struct rpcrdma_req *req, struct rpcrdma_buffer *buf, | |||
1537 | list_add(&r->mw_list, stale); | 1580 | list_add(&r->mw_list, stale); |
1538 | continue; | 1581 | continue; |
1539 | } | 1582 | } |
1540 | req->rl_segments[i].mr_chunk.rl_mw = r; | 1583 | req->rl_segments[i].rl_mw = r; |
1541 | if (unlikely(i-- == 0)) | 1584 | if (unlikely(i-- == 0)) |
1542 | return req; /* Success */ | 1585 | return req; /* Success */ |
1543 | } | 1586 | } |
@@ -1559,7 +1602,7 @@ rpcrdma_buffer_get_fmrs(struct rpcrdma_req *req, struct rpcrdma_buffer *buf) | |||
1559 | r = list_entry(buf->rb_mws.next, | 1602 | r = list_entry(buf->rb_mws.next, |
1560 | struct rpcrdma_mw, mw_list); | 1603 | struct rpcrdma_mw, mw_list); |
1561 | list_del(&r->mw_list); | 1604 | list_del(&r->mw_list); |
1562 | req->rl_segments[i].mr_chunk.rl_mw = r; | 1605 | req->rl_segments[i].rl_mw = r; |
1563 | if (unlikely(i-- == 0)) | 1606 | if (unlikely(i-- == 0)) |
1564 | return req; /* Success */ | 1607 | return req; /* Success */ |
1565 | } | 1608 | } |
@@ -1658,8 +1701,6 @@ rpcrdma_recv_buffer_get(struct rpcrdma_req *req) | |||
1658 | struct rpcrdma_buffer *buffers = req->rl_buffer; | 1701 | struct rpcrdma_buffer *buffers = req->rl_buffer; |
1659 | unsigned long flags; | 1702 | unsigned long flags; |
1660 | 1703 | ||
1661 | if (req->rl_iov.length == 0) /* special case xprt_rdma_allocate() */ | ||
1662 | buffers = ((struct rpcrdma_req *) buffers)->rl_buffer; | ||
1663 | spin_lock_irqsave(&buffers->rb_lock, flags); | 1704 | spin_lock_irqsave(&buffers->rb_lock, flags); |
1664 | if (buffers->rb_recv_index < buffers->rb_max_requests) { | 1705 | if (buffers->rb_recv_index < buffers->rb_max_requests) { |
1665 | req->rl_reply = buffers->rb_recv_bufs[buffers->rb_recv_index]; | 1706 | req->rl_reply = buffers->rb_recv_bufs[buffers->rb_recv_index]; |
@@ -1688,7 +1729,7 @@ rpcrdma_recv_buffer_put(struct rpcrdma_rep *rep) | |||
1688 | * Wrappers for internal-use kmalloc memory registration, used by buffer code. | 1729 | * Wrappers for internal-use kmalloc memory registration, used by buffer code. |
1689 | */ | 1730 | */ |
1690 | 1731 | ||
1691 | int | 1732 | static int |
1692 | rpcrdma_register_internal(struct rpcrdma_ia *ia, void *va, int len, | 1733 | rpcrdma_register_internal(struct rpcrdma_ia *ia, void *va, int len, |
1693 | struct ib_mr **mrp, struct ib_sge *iov) | 1734 | struct ib_mr **mrp, struct ib_sge *iov) |
1694 | { | 1735 | { |
@@ -1739,7 +1780,7 @@ rpcrdma_register_internal(struct rpcrdma_ia *ia, void *va, int len, | |||
1739 | return rc; | 1780 | return rc; |
1740 | } | 1781 | } |
1741 | 1782 | ||
1742 | int | 1783 | static int |
1743 | rpcrdma_deregister_internal(struct rpcrdma_ia *ia, | 1784 | rpcrdma_deregister_internal(struct rpcrdma_ia *ia, |
1744 | struct ib_mr *mr, struct ib_sge *iov) | 1785 | struct ib_mr *mr, struct ib_sge *iov) |
1745 | { | 1786 | { |
@@ -1757,6 +1798,61 @@ rpcrdma_deregister_internal(struct rpcrdma_ia *ia, | |||
1757 | return rc; | 1798 | return rc; |
1758 | } | 1799 | } |
1759 | 1800 | ||
1801 | /** | ||
1802 | * rpcrdma_alloc_regbuf - kmalloc and register memory for SEND/RECV buffers | ||
1803 | * @ia: controlling rpcrdma_ia | ||
1804 | * @size: size of buffer to be allocated, in bytes | ||
1805 | * @flags: GFP flags | ||
1806 | * | ||
1807 | * Returns pointer to private header of an area of internally | ||
1808 | * registered memory, or an ERR_PTR. The registered buffer follows | ||
1809 | * the end of the private header. | ||
1810 | * | ||
1811 | * xprtrdma uses a regbuf for posting an outgoing RDMA SEND, or for | ||
1812 | * receiving the payload of RDMA RECV operations. regbufs are not | ||
1813 | * used for RDMA READ/WRITE operations, thus are registered only for | ||
1814 | * LOCAL access. | ||
1815 | */ | ||
1816 | struct rpcrdma_regbuf * | ||
1817 | rpcrdma_alloc_regbuf(struct rpcrdma_ia *ia, size_t size, gfp_t flags) | ||
1818 | { | ||
1819 | struct rpcrdma_regbuf *rb; | ||
1820 | int rc; | ||
1821 | |||
1822 | rc = -ENOMEM; | ||
1823 | rb = kmalloc(sizeof(*rb) + size, flags); | ||
1824 | if (rb == NULL) | ||
1825 | goto out; | ||
1826 | |||
1827 | rb->rg_size = size; | ||
1828 | rb->rg_owner = NULL; | ||
1829 | rc = rpcrdma_register_internal(ia, rb->rg_base, size, | ||
1830 | &rb->rg_mr, &rb->rg_iov); | ||
1831 | if (rc) | ||
1832 | goto out_free; | ||
1833 | |||
1834 | return rb; | ||
1835 | |||
1836 | out_free: | ||
1837 | kfree(rb); | ||
1838 | out: | ||
1839 | return ERR_PTR(rc); | ||
1840 | } | ||
1841 | |||
1842 | /** | ||
1843 | * rpcrdma_free_regbuf - deregister and free registered buffer | ||
1844 | * @ia: controlling rpcrdma_ia | ||
1845 | * @rb: regbuf to be deregistered and freed | ||
1846 | */ | ||
1847 | void | ||
1848 | rpcrdma_free_regbuf(struct rpcrdma_ia *ia, struct rpcrdma_regbuf *rb) | ||
1849 | { | ||
1850 | if (rb) { | ||
1851 | rpcrdma_deregister_internal(ia, rb->rg_mr, &rb->rg_iov); | ||
1852 | kfree(rb); | ||
1853 | } | ||
1854 | } | ||
1855 | |||
1760 | /* | 1856 | /* |
1761 | * Wrappers for chunk registration, shared by read/write chunk code. | 1857 | * Wrappers for chunk registration, shared by read/write chunk code. |
1762 | */ | 1858 | */ |
@@ -1799,7 +1895,7 @@ rpcrdma_register_frmr_external(struct rpcrdma_mr_seg *seg, | |||
1799 | struct rpcrdma_xprt *r_xprt) | 1895 | struct rpcrdma_xprt *r_xprt) |
1800 | { | 1896 | { |
1801 | struct rpcrdma_mr_seg *seg1 = seg; | 1897 | struct rpcrdma_mr_seg *seg1 = seg; |
1802 | struct rpcrdma_mw *mw = seg1->mr_chunk.rl_mw; | 1898 | struct rpcrdma_mw *mw = seg1->rl_mw; |
1803 | struct rpcrdma_frmr *frmr = &mw->r.frmr; | 1899 | struct rpcrdma_frmr *frmr = &mw->r.frmr; |
1804 | struct ib_mr *mr = frmr->fr_mr; | 1900 | struct ib_mr *mr = frmr->fr_mr; |
1805 | struct ib_send_wr fastreg_wr, *bad_wr; | 1901 | struct ib_send_wr fastreg_wr, *bad_wr; |
@@ -1888,12 +1984,12 @@ rpcrdma_deregister_frmr_external(struct rpcrdma_mr_seg *seg, | |||
1888 | struct ib_send_wr invalidate_wr, *bad_wr; | 1984 | struct ib_send_wr invalidate_wr, *bad_wr; |
1889 | int rc; | 1985 | int rc; |
1890 | 1986 | ||
1891 | seg1->mr_chunk.rl_mw->r.frmr.fr_state = FRMR_IS_INVALID; | 1987 | seg1->rl_mw->r.frmr.fr_state = FRMR_IS_INVALID; |
1892 | 1988 | ||
1893 | memset(&invalidate_wr, 0, sizeof invalidate_wr); | 1989 | memset(&invalidate_wr, 0, sizeof invalidate_wr); |
1894 | invalidate_wr.wr_id = (unsigned long)(void *)seg1->mr_chunk.rl_mw; | 1990 | invalidate_wr.wr_id = (unsigned long)(void *)seg1->rl_mw; |
1895 | invalidate_wr.opcode = IB_WR_LOCAL_INV; | 1991 | invalidate_wr.opcode = IB_WR_LOCAL_INV; |
1896 | invalidate_wr.ex.invalidate_rkey = seg1->mr_chunk.rl_mw->r.frmr.fr_mr->rkey; | 1992 | invalidate_wr.ex.invalidate_rkey = seg1->rl_mw->r.frmr.fr_mr->rkey; |
1897 | DECR_CQCOUNT(&r_xprt->rx_ep); | 1993 | DECR_CQCOUNT(&r_xprt->rx_ep); |
1898 | 1994 | ||
1899 | read_lock(&ia->ri_qplock); | 1995 | read_lock(&ia->ri_qplock); |
@@ -1903,7 +1999,7 @@ rpcrdma_deregister_frmr_external(struct rpcrdma_mr_seg *seg, | |||
1903 | read_unlock(&ia->ri_qplock); | 1999 | read_unlock(&ia->ri_qplock); |
1904 | if (rc) { | 2000 | if (rc) { |
1905 | /* Force rpcrdma_buffer_get() to retry */ | 2001 | /* Force rpcrdma_buffer_get() to retry */ |
1906 | seg1->mr_chunk.rl_mw->r.frmr.fr_state = FRMR_IS_STALE; | 2002 | seg1->rl_mw->r.frmr.fr_state = FRMR_IS_STALE; |
1907 | dprintk("RPC: %s: failed ib_post_send for invalidate," | 2003 | dprintk("RPC: %s: failed ib_post_send for invalidate," |
1908 | " status %i\n", __func__, rc); | 2004 | " status %i\n", __func__, rc); |
1909 | } | 2005 | } |
@@ -1935,8 +2031,7 @@ rpcrdma_register_fmr_external(struct rpcrdma_mr_seg *seg, | |||
1935 | offset_in_page((seg-1)->mr_offset + (seg-1)->mr_len)) | 2031 | offset_in_page((seg-1)->mr_offset + (seg-1)->mr_len)) |
1936 | break; | 2032 | break; |
1937 | } | 2033 | } |
1938 | rc = ib_map_phys_fmr(seg1->mr_chunk.rl_mw->r.fmr, | 2034 | rc = ib_map_phys_fmr(seg1->rl_mw->r.fmr, physaddrs, i, seg1->mr_dma); |
1939 | physaddrs, i, seg1->mr_dma); | ||
1940 | if (rc) { | 2035 | if (rc) { |
1941 | dprintk("RPC: %s: failed ib_map_phys_fmr " | 2036 | dprintk("RPC: %s: failed ib_map_phys_fmr " |
1942 | "%u@0x%llx+%i (%d)... status %i\n", __func__, | 2037 | "%u@0x%llx+%i (%d)... status %i\n", __func__, |
@@ -1945,7 +2040,7 @@ rpcrdma_register_fmr_external(struct rpcrdma_mr_seg *seg, | |||
1945 | while (i--) | 2040 | while (i--) |
1946 | rpcrdma_unmap_one(ia, --seg); | 2041 | rpcrdma_unmap_one(ia, --seg); |
1947 | } else { | 2042 | } else { |
1948 | seg1->mr_rkey = seg1->mr_chunk.rl_mw->r.fmr->rkey; | 2043 | seg1->mr_rkey = seg1->rl_mw->r.fmr->rkey; |
1949 | seg1->mr_base = seg1->mr_dma + pageoff; | 2044 | seg1->mr_base = seg1->mr_dma + pageoff; |
1950 | seg1->mr_nsegs = i; | 2045 | seg1->mr_nsegs = i; |
1951 | seg1->mr_len = len; | 2046 | seg1->mr_len = len; |
@@ -1962,7 +2057,7 @@ rpcrdma_deregister_fmr_external(struct rpcrdma_mr_seg *seg, | |||
1962 | LIST_HEAD(l); | 2057 | LIST_HEAD(l); |
1963 | int rc; | 2058 | int rc; |
1964 | 2059 | ||
1965 | list_add(&seg1->mr_chunk.rl_mw->r.fmr->list, &l); | 2060 | list_add(&seg1->rl_mw->r.fmr->list, &l); |
1966 | rc = ib_unmap_fmr(&l); | 2061 | rc = ib_unmap_fmr(&l); |
1967 | read_lock(&ia->ri_qplock); | 2062 | read_lock(&ia->ri_qplock); |
1968 | while (seg1->mr_nsegs--) | 2063 | while (seg1->mr_nsegs--) |
@@ -2104,11 +2199,13 @@ rpcrdma_ep_post_recv(struct rpcrdma_ia *ia, | |||
2104 | 2199 | ||
2105 | recv_wr.next = NULL; | 2200 | recv_wr.next = NULL; |
2106 | recv_wr.wr_id = (u64) (unsigned long) rep; | 2201 | recv_wr.wr_id = (u64) (unsigned long) rep; |
2107 | recv_wr.sg_list = &rep->rr_iov; | 2202 | recv_wr.sg_list = &rep->rr_rdmabuf->rg_iov; |
2108 | recv_wr.num_sge = 1; | 2203 | recv_wr.num_sge = 1; |
2109 | 2204 | ||
2110 | ib_dma_sync_single_for_cpu(ia->ri_id->device, | 2205 | ib_dma_sync_single_for_cpu(ia->ri_id->device, |
2111 | rep->rr_iov.addr, rep->rr_iov.length, DMA_BIDIRECTIONAL); | 2206 | rdmab_addr(rep->rr_rdmabuf), |
2207 | rdmab_length(rep->rr_rdmabuf), | ||
2208 | DMA_BIDIRECTIONAL); | ||
2112 | 2209 | ||
2113 | rc = ib_post_recv(ia->ri_id->qp, &recv_wr, &recv_wr_fail); | 2210 | rc = ib_post_recv(ia->ri_id->qp, &recv_wr, &recv_wr_fail); |
2114 | 2211 | ||
diff --git a/net/sunrpc/xprtrdma/xprt_rdma.h b/net/sunrpc/xprtrdma/xprt_rdma.h index b799041b75bf..d1b70397c60f 100644 --- a/net/sunrpc/xprtrdma/xprt_rdma.h +++ b/net/sunrpc/xprtrdma/xprt_rdma.h | |||
@@ -70,6 +70,9 @@ struct rpcrdma_ia { | |||
70 | int ri_async_rc; | 70 | int ri_async_rc; |
71 | enum rpcrdma_memreg ri_memreg_strategy; | 71 | enum rpcrdma_memreg ri_memreg_strategy; |
72 | unsigned int ri_max_frmr_depth; | 72 | unsigned int ri_max_frmr_depth; |
73 | struct ib_device_attr ri_devattr; | ||
74 | struct ib_qp_attr ri_qp_attr; | ||
75 | struct ib_qp_init_attr ri_qp_init_attr; | ||
73 | }; | 76 | }; |
74 | 77 | ||
75 | /* | 78 | /* |
@@ -83,13 +86,9 @@ struct rpcrdma_ep { | |||
83 | atomic_t rep_cqcount; | 86 | atomic_t rep_cqcount; |
84 | int rep_cqinit; | 87 | int rep_cqinit; |
85 | int rep_connected; | 88 | int rep_connected; |
86 | struct rpcrdma_ia *rep_ia; | ||
87 | struct ib_qp_init_attr rep_attr; | 89 | struct ib_qp_init_attr rep_attr; |
88 | wait_queue_head_t rep_connect_wait; | 90 | wait_queue_head_t rep_connect_wait; |
89 | struct ib_sge rep_pad; /* holds zeroed pad */ | 91 | struct rpcrdma_regbuf *rep_padbuf; |
90 | struct ib_mr *rep_pad_mr; /* holds zeroed pad */ | ||
91 | void (*rep_func)(struct rpcrdma_ep *); | ||
92 | struct rpc_xprt *rep_xprt; /* for rep_func */ | ||
93 | struct rdma_conn_param rep_remote_cma; | 92 | struct rdma_conn_param rep_remote_cma; |
94 | struct sockaddr_storage rep_remote_addr; | 93 | struct sockaddr_storage rep_remote_addr; |
95 | struct delayed_work rep_connect_worker; | 94 | struct delayed_work rep_connect_worker; |
@@ -106,6 +105,44 @@ struct rpcrdma_ep { | |||
106 | #define INIT_CQCOUNT(ep) atomic_set(&(ep)->rep_cqcount, (ep)->rep_cqinit) | 105 | #define INIT_CQCOUNT(ep) atomic_set(&(ep)->rep_cqcount, (ep)->rep_cqinit) |
107 | #define DECR_CQCOUNT(ep) atomic_sub_return(1, &(ep)->rep_cqcount) | 106 | #define DECR_CQCOUNT(ep) atomic_sub_return(1, &(ep)->rep_cqcount) |
108 | 107 | ||
108 | /* Registered buffer -- registered kmalloc'd memory for RDMA SEND/RECV | ||
109 | * | ||
110 | * The below structure appears at the front of a large region of kmalloc'd | ||
111 | * memory, which always starts on a good alignment boundary. | ||
112 | */ | ||
113 | |||
114 | struct rpcrdma_regbuf { | ||
115 | size_t rg_size; | ||
116 | struct rpcrdma_req *rg_owner; | ||
117 | struct ib_mr *rg_mr; | ||
118 | struct ib_sge rg_iov; | ||
119 | __be32 rg_base[0] __attribute__ ((aligned(256))); | ||
120 | }; | ||
121 | |||
122 | static inline u64 | ||
123 | rdmab_addr(struct rpcrdma_regbuf *rb) | ||
124 | { | ||
125 | return rb->rg_iov.addr; | ||
126 | } | ||
127 | |||
128 | static inline u32 | ||
129 | rdmab_length(struct rpcrdma_regbuf *rb) | ||
130 | { | ||
131 | return rb->rg_iov.length; | ||
132 | } | ||
133 | |||
134 | static inline u32 | ||
135 | rdmab_lkey(struct rpcrdma_regbuf *rb) | ||
136 | { | ||
137 | return rb->rg_iov.lkey; | ||
138 | } | ||
139 | |||
140 | static inline struct rpcrdma_msg * | ||
141 | rdmab_to_msg(struct rpcrdma_regbuf *rb) | ||
142 | { | ||
143 | return (struct rpcrdma_msg *)rb->rg_base; | ||
144 | } | ||
145 | |||
109 | enum rpcrdma_chunktype { | 146 | enum rpcrdma_chunktype { |
110 | rpcrdma_noch = 0, | 147 | rpcrdma_noch = 0, |
111 | rpcrdma_readch, | 148 | rpcrdma_readch, |
@@ -134,22 +171,16 @@ enum rpcrdma_chunktype { | |||
134 | /* temporary static scatter/gather max */ | 171 | /* temporary static scatter/gather max */ |
135 | #define RPCRDMA_MAX_DATA_SEGS (64) /* max scatter/gather */ | 172 | #define RPCRDMA_MAX_DATA_SEGS (64) /* max scatter/gather */ |
136 | #define RPCRDMA_MAX_SEGS (RPCRDMA_MAX_DATA_SEGS + 2) /* head+tail = 2 */ | 173 | #define RPCRDMA_MAX_SEGS (RPCRDMA_MAX_DATA_SEGS + 2) /* head+tail = 2 */ |
137 | #define MAX_RPCRDMAHDR (\ | ||
138 | /* max supported RPC/RDMA header */ \ | ||
139 | sizeof(struct rpcrdma_msg) + (2 * sizeof(u32)) + \ | ||
140 | (sizeof(struct rpcrdma_read_chunk) * RPCRDMA_MAX_SEGS) + sizeof(u32)) | ||
141 | 174 | ||
142 | struct rpcrdma_buffer; | 175 | struct rpcrdma_buffer; |
143 | 176 | ||
144 | struct rpcrdma_rep { | 177 | struct rpcrdma_rep { |
145 | unsigned int rr_len; /* actual received reply length */ | 178 | unsigned int rr_len; |
146 | struct rpcrdma_buffer *rr_buffer; /* home base for this structure */ | 179 | struct rpcrdma_buffer *rr_buffer; |
147 | struct rpc_xprt *rr_xprt; /* needed for request/reply matching */ | 180 | struct rpc_xprt *rr_xprt; |
148 | void (*rr_func)(struct rpcrdma_rep *);/* called by tasklet in softint */ | 181 | void (*rr_func)(struct rpcrdma_rep *); |
149 | struct list_head rr_list; /* tasklet list */ | 182 | struct list_head rr_list; |
150 | struct ib_sge rr_iov; /* for posting */ | 183 | struct rpcrdma_regbuf *rr_rdmabuf; |
151 | struct ib_mr *rr_handle; /* handle for mem in rr_iov */ | ||
152 | char rr_base[MAX_RPCRDMAHDR]; /* minimal inline receive buffer */ | ||
153 | }; | 184 | }; |
154 | 185 | ||
155 | /* | 186 | /* |
@@ -211,10 +242,7 @@ struct rpcrdma_mw { | |||
211 | */ | 242 | */ |
212 | 243 | ||
213 | struct rpcrdma_mr_seg { /* chunk descriptors */ | 244 | struct rpcrdma_mr_seg { /* chunk descriptors */ |
214 | union { /* chunk memory handles */ | 245 | struct rpcrdma_mw *rl_mw; /* registered MR */ |
215 | struct ib_mr *rl_mr; /* if registered directly */ | ||
216 | struct rpcrdma_mw *rl_mw; /* if registered from region */ | ||
217 | } mr_chunk; | ||
218 | u64 mr_base; /* registration result */ | 246 | u64 mr_base; /* registration result */ |
219 | u32 mr_rkey; /* registration result */ | 247 | u32 mr_rkey; /* registration result */ |
220 | u32 mr_len; /* length of chunk or segment */ | 248 | u32 mr_len; /* length of chunk or segment */ |
@@ -227,22 +255,27 @@ struct rpcrdma_mr_seg { /* chunk descriptors */ | |||
227 | }; | 255 | }; |
228 | 256 | ||
229 | struct rpcrdma_req { | 257 | struct rpcrdma_req { |
230 | size_t rl_size; /* actual length of buffer */ | ||
231 | unsigned int rl_niovs; /* 0, 2 or 4 */ | 258 | unsigned int rl_niovs; /* 0, 2 or 4 */ |
232 | unsigned int rl_nchunks; /* non-zero if chunks */ | 259 | unsigned int rl_nchunks; /* non-zero if chunks */ |
233 | unsigned int rl_connect_cookie; /* retry detection */ | 260 | unsigned int rl_connect_cookie; /* retry detection */ |
234 | enum rpcrdma_chunktype rl_rtype, rl_wtype; | 261 | enum rpcrdma_chunktype rl_rtype, rl_wtype; |
235 | struct rpcrdma_buffer *rl_buffer; /* home base for this structure */ | 262 | struct rpcrdma_buffer *rl_buffer; /* home base for this structure */ |
236 | struct rpcrdma_rep *rl_reply;/* holder for reply buffer */ | 263 | struct rpcrdma_rep *rl_reply;/* holder for reply buffer */ |
237 | struct rpcrdma_mr_seg rl_segments[RPCRDMA_MAX_SEGS];/* chunk segments */ | ||
238 | struct ib_sge rl_send_iov[4]; /* for active requests */ | 264 | struct ib_sge rl_send_iov[4]; /* for active requests */ |
239 | struct ib_sge rl_iov; /* for posting */ | 265 | struct rpcrdma_regbuf *rl_rdmabuf; |
240 | struct ib_mr *rl_handle; /* handle for mem in rl_iov */ | 266 | struct rpcrdma_regbuf *rl_sendbuf; |
241 | char rl_base[MAX_RPCRDMAHDR]; /* start of actual buffer */ | 267 | struct rpcrdma_mr_seg rl_segments[RPCRDMA_MAX_SEGS]; |
242 | __u32 rl_xdr_buf[0]; /* start of returned rpc rq_buffer */ | ||
243 | }; | 268 | }; |
244 | #define rpcr_to_rdmar(r) \ | 269 | |
245 | container_of((r)->rq_buffer, struct rpcrdma_req, rl_xdr_buf[0]) | 270 | static inline struct rpcrdma_req * |
271 | rpcr_to_rdmar(struct rpc_rqst *rqst) | ||
272 | { | ||
273 | void *buffer = rqst->rq_buffer; | ||
274 | struct rpcrdma_regbuf *rb; | ||
275 | |||
276 | rb = container_of(buffer, struct rpcrdma_regbuf, rg_base); | ||
277 | return rb->rg_owner; | ||
278 | } | ||
246 | 279 | ||
247 | /* | 280 | /* |
248 | * struct rpcrdma_buffer -- holds list/queue of pre-registered memory for | 281 | * struct rpcrdma_buffer -- holds list/queue of pre-registered memory for |
@@ -252,7 +285,6 @@ struct rpcrdma_req { | |||
252 | */ | 285 | */ |
253 | struct rpcrdma_buffer { | 286 | struct rpcrdma_buffer { |
254 | spinlock_t rb_lock; /* protects indexes */ | 287 | spinlock_t rb_lock; /* protects indexes */ |
255 | atomic_t rb_credits; /* most recent server credits */ | ||
256 | int rb_max_requests;/* client max requests */ | 288 | int rb_max_requests;/* client max requests */ |
257 | struct list_head rb_mws; /* optional memory windows/fmrs/frmrs */ | 289 | struct list_head rb_mws; /* optional memory windows/fmrs/frmrs */ |
258 | struct list_head rb_all; | 290 | struct list_head rb_all; |
@@ -318,16 +350,16 @@ struct rpcrdma_stats { | |||
318 | * during unmount. | 350 | * during unmount. |
319 | */ | 351 | */ |
320 | struct rpcrdma_xprt { | 352 | struct rpcrdma_xprt { |
321 | struct rpc_xprt xprt; | 353 | struct rpc_xprt rx_xprt; |
322 | struct rpcrdma_ia rx_ia; | 354 | struct rpcrdma_ia rx_ia; |
323 | struct rpcrdma_ep rx_ep; | 355 | struct rpcrdma_ep rx_ep; |
324 | struct rpcrdma_buffer rx_buf; | 356 | struct rpcrdma_buffer rx_buf; |
325 | struct rpcrdma_create_data_internal rx_data; | 357 | struct rpcrdma_create_data_internal rx_data; |
326 | struct delayed_work rdma_connect; | 358 | struct delayed_work rx_connect_worker; |
327 | struct rpcrdma_stats rx_stats; | 359 | struct rpcrdma_stats rx_stats; |
328 | }; | 360 | }; |
329 | 361 | ||
330 | #define rpcx_to_rdmax(x) container_of(x, struct rpcrdma_xprt, xprt) | 362 | #define rpcx_to_rdmax(x) container_of(x, struct rpcrdma_xprt, rx_xprt) |
331 | #define rpcx_to_rdmad(x) (rpcx_to_rdmax(x)->rx_data) | 363 | #define rpcx_to_rdmad(x) (rpcx_to_rdmax(x)->rx_data) |
332 | 364 | ||
333 | /* Setting this to 0 ensures interoperability with early servers. | 365 | /* Setting this to 0 ensures interoperability with early servers. |
@@ -358,9 +390,7 @@ int rpcrdma_ep_post_recv(struct rpcrdma_ia *, struct rpcrdma_ep *, | |||
358 | /* | 390 | /* |
359 | * Buffer calls - xprtrdma/verbs.c | 391 | * Buffer calls - xprtrdma/verbs.c |
360 | */ | 392 | */ |
361 | int rpcrdma_buffer_create(struct rpcrdma_buffer *, struct rpcrdma_ep *, | 393 | int rpcrdma_buffer_create(struct rpcrdma_xprt *); |
362 | struct rpcrdma_ia *, | ||
363 | struct rpcrdma_create_data_internal *); | ||
364 | void rpcrdma_buffer_destroy(struct rpcrdma_buffer *); | 394 | void rpcrdma_buffer_destroy(struct rpcrdma_buffer *); |
365 | 395 | ||
366 | struct rpcrdma_req *rpcrdma_buffer_get(struct rpcrdma_buffer *); | 396 | struct rpcrdma_req *rpcrdma_buffer_get(struct rpcrdma_buffer *); |
@@ -368,16 +398,16 @@ void rpcrdma_buffer_put(struct rpcrdma_req *); | |||
368 | void rpcrdma_recv_buffer_get(struct rpcrdma_req *); | 398 | void rpcrdma_recv_buffer_get(struct rpcrdma_req *); |
369 | void rpcrdma_recv_buffer_put(struct rpcrdma_rep *); | 399 | void rpcrdma_recv_buffer_put(struct rpcrdma_rep *); |
370 | 400 | ||
371 | int rpcrdma_register_internal(struct rpcrdma_ia *, void *, int, | ||
372 | struct ib_mr **, struct ib_sge *); | ||
373 | int rpcrdma_deregister_internal(struct rpcrdma_ia *, | ||
374 | struct ib_mr *, struct ib_sge *); | ||
375 | |||
376 | int rpcrdma_register_external(struct rpcrdma_mr_seg *, | 401 | int rpcrdma_register_external(struct rpcrdma_mr_seg *, |
377 | int, int, struct rpcrdma_xprt *); | 402 | int, int, struct rpcrdma_xprt *); |
378 | int rpcrdma_deregister_external(struct rpcrdma_mr_seg *, | 403 | int rpcrdma_deregister_external(struct rpcrdma_mr_seg *, |
379 | struct rpcrdma_xprt *); | 404 | struct rpcrdma_xprt *); |
380 | 405 | ||
406 | struct rpcrdma_regbuf *rpcrdma_alloc_regbuf(struct rpcrdma_ia *, | ||
407 | size_t, gfp_t); | ||
408 | void rpcrdma_free_regbuf(struct rpcrdma_ia *, | ||
409 | struct rpcrdma_regbuf *); | ||
410 | |||
381 | /* | 411 | /* |
382 | * RPC/RDMA connection management calls - xprtrdma/rpc_rdma.c | 412 | * RPC/RDMA connection management calls - xprtrdma/rpc_rdma.c |
383 | */ | 413 | */ |
diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c index 87ce7e8bb8dc..66891e32c5e3 100644 --- a/net/sunrpc/xprtsock.c +++ b/net/sunrpc/xprtsock.c | |||
@@ -63,6 +63,8 @@ static unsigned int xprt_max_tcp_slot_table_entries = RPC_MAX_SLOT_TABLE; | |||
63 | static unsigned int xprt_min_resvport = RPC_DEF_MIN_RESVPORT; | 63 | static unsigned int xprt_min_resvport = RPC_DEF_MIN_RESVPORT; |
64 | static unsigned int xprt_max_resvport = RPC_DEF_MAX_RESVPORT; | 64 | static unsigned int xprt_max_resvport = RPC_DEF_MAX_RESVPORT; |
65 | 65 | ||
66 | #if IS_ENABLED(CONFIG_SUNRPC_DEBUG) | ||
67 | |||
66 | #define XS_TCP_LINGER_TO (15U * HZ) | 68 | #define XS_TCP_LINGER_TO (15U * HZ) |
67 | static unsigned int xs_tcp_fin_timeout __read_mostly = XS_TCP_LINGER_TO; | 69 | static unsigned int xs_tcp_fin_timeout __read_mostly = XS_TCP_LINGER_TO; |
68 | 70 | ||
@@ -75,8 +77,6 @@ static unsigned int xs_tcp_fin_timeout __read_mostly = XS_TCP_LINGER_TO; | |||
75 | * someone else's file names! | 77 | * someone else's file names! |
76 | */ | 78 | */ |
77 | 79 | ||
78 | #if IS_ENABLED(CONFIG_SUNRPC_DEBUG) | ||
79 | |||
80 | static unsigned int min_slot_table_size = RPC_MIN_SLOT_TABLE; | 80 | static unsigned int min_slot_table_size = RPC_MIN_SLOT_TABLE; |
81 | static unsigned int max_slot_table_size = RPC_MAX_SLOT_TABLE; | 81 | static unsigned int max_slot_table_size = RPC_MAX_SLOT_TABLE; |
82 | static unsigned int max_tcp_slot_table_limit = RPC_MAX_SLOT_TABLE_LIMIT; | 82 | static unsigned int max_tcp_slot_table_limit = RPC_MAX_SLOT_TABLE_LIMIT; |
@@ -627,7 +627,7 @@ process_status: | |||
627 | * @xprt: transport | 627 | * @xprt: transport |
628 | * | 628 | * |
629 | * Initiates a graceful shutdown of the TCP socket by calling the | 629 | * Initiates a graceful shutdown of the TCP socket by calling the |
630 | * equivalent of shutdown(SHUT_WR); | 630 | * equivalent of shutdown(SHUT_RDWR); |
631 | */ | 631 | */ |
632 | static void xs_tcp_shutdown(struct rpc_xprt *xprt) | 632 | static void xs_tcp_shutdown(struct rpc_xprt *xprt) |
633 | { | 633 | { |
@@ -635,7 +635,7 @@ static void xs_tcp_shutdown(struct rpc_xprt *xprt) | |||
635 | struct socket *sock = transport->sock; | 635 | struct socket *sock = transport->sock; |
636 | 636 | ||
637 | if (sock != NULL) { | 637 | if (sock != NULL) { |
638 | kernel_sock_shutdown(sock, SHUT_WR); | 638 | kernel_sock_shutdown(sock, SHUT_RDWR); |
639 | trace_rpc_socket_shutdown(xprt, sock); | 639 | trace_rpc_socket_shutdown(xprt, sock); |
640 | } | 640 | } |
641 | } | 641 | } |
@@ -718,9 +718,9 @@ static int xs_tcp_send_request(struct rpc_task *task) | |||
718 | dprintk("RPC: sendmsg returned unrecognized error %d\n", | 718 | dprintk("RPC: sendmsg returned unrecognized error %d\n", |
719 | -status); | 719 | -status); |
720 | case -ECONNRESET: | 720 | case -ECONNRESET: |
721 | xs_tcp_shutdown(xprt); | ||
722 | case -ECONNREFUSED: | 721 | case -ECONNREFUSED: |
723 | case -ENOTCONN: | 722 | case -ENOTCONN: |
723 | case -EADDRINUSE: | ||
724 | case -EPIPE: | 724 | case -EPIPE: |
725 | clear_bit(SOCK_ASYNC_NOSPACE, &transport->sock->flags); | 725 | clear_bit(SOCK_ASYNC_NOSPACE, &transport->sock->flags); |
726 | } | 726 | } |
@@ -773,6 +773,21 @@ static void xs_restore_old_callbacks(struct sock_xprt *transport, struct sock *s | |||
773 | sk->sk_error_report = transport->old_error_report; | 773 | sk->sk_error_report = transport->old_error_report; |
774 | } | 774 | } |
775 | 775 | ||
776 | static void xs_sock_reset_connection_flags(struct rpc_xprt *xprt) | ||
777 | { | ||
778 | smp_mb__before_atomic(); | ||
779 | clear_bit(XPRT_CLOSE_WAIT, &xprt->state); | ||
780 | clear_bit(XPRT_CLOSING, &xprt->state); | ||
781 | smp_mb__after_atomic(); | ||
782 | } | ||
783 | |||
784 | static void xs_sock_mark_closed(struct rpc_xprt *xprt) | ||
785 | { | ||
786 | xs_sock_reset_connection_flags(xprt); | ||
787 | /* Mark transport as closed and wake up all pending tasks */ | ||
788 | xprt_disconnect_done(xprt); | ||
789 | } | ||
790 | |||
776 | /** | 791 | /** |
777 | * xs_error_report - callback to handle TCP socket state errors | 792 | * xs_error_report - callback to handle TCP socket state errors |
778 | * @sk: socket | 793 | * @sk: socket |
@@ -792,11 +807,12 @@ static void xs_error_report(struct sock *sk) | |||
792 | err = -sk->sk_err; | 807 | err = -sk->sk_err; |
793 | if (err == 0) | 808 | if (err == 0) |
794 | goto out; | 809 | goto out; |
810 | /* Is this a reset event? */ | ||
811 | if (sk->sk_state == TCP_CLOSE) | ||
812 | xs_sock_mark_closed(xprt); | ||
795 | dprintk("RPC: xs_error_report client %p, error=%d...\n", | 813 | dprintk("RPC: xs_error_report client %p, error=%d...\n", |
796 | xprt, -err); | 814 | xprt, -err); |
797 | trace_rpc_socket_error(xprt, sk->sk_socket, err); | 815 | trace_rpc_socket_error(xprt, sk->sk_socket, err); |
798 | if (test_bit(XPRT_CONNECTION_REUSE, &xprt->state)) | ||
799 | goto out; | ||
800 | xprt_wake_pending_tasks(xprt, err); | 816 | xprt_wake_pending_tasks(xprt, err); |
801 | out: | 817 | out: |
802 | read_unlock_bh(&sk->sk_callback_lock); | 818 | read_unlock_bh(&sk->sk_callback_lock); |
@@ -806,12 +822,11 @@ static void xs_reset_transport(struct sock_xprt *transport) | |||
806 | { | 822 | { |
807 | struct socket *sock = transport->sock; | 823 | struct socket *sock = transport->sock; |
808 | struct sock *sk = transport->inet; | 824 | struct sock *sk = transport->inet; |
825 | struct rpc_xprt *xprt = &transport->xprt; | ||
809 | 826 | ||
810 | if (sk == NULL) | 827 | if (sk == NULL) |
811 | return; | 828 | return; |
812 | 829 | ||
813 | transport->srcport = 0; | ||
814 | |||
815 | write_lock_bh(&sk->sk_callback_lock); | 830 | write_lock_bh(&sk->sk_callback_lock); |
816 | transport->inet = NULL; | 831 | transport->inet = NULL; |
817 | transport->sock = NULL; | 832 | transport->sock = NULL; |
@@ -820,8 +835,9 @@ static void xs_reset_transport(struct sock_xprt *transport) | |||
820 | 835 | ||
821 | xs_restore_old_callbacks(transport, sk); | 836 | xs_restore_old_callbacks(transport, sk); |
822 | write_unlock_bh(&sk->sk_callback_lock); | 837 | write_unlock_bh(&sk->sk_callback_lock); |
838 | xs_sock_reset_connection_flags(xprt); | ||
823 | 839 | ||
824 | trace_rpc_socket_close(&transport->xprt, sock); | 840 | trace_rpc_socket_close(xprt, sock); |
825 | sock_release(sock); | 841 | sock_release(sock); |
826 | } | 842 | } |
827 | 843 | ||
@@ -841,27 +857,12 @@ static void xs_close(struct rpc_xprt *xprt) | |||
841 | 857 | ||
842 | dprintk("RPC: xs_close xprt %p\n", xprt); | 858 | dprintk("RPC: xs_close xprt %p\n", xprt); |
843 | 859 | ||
844 | cancel_delayed_work_sync(&transport->connect_worker); | ||
845 | |||
846 | xs_reset_transport(transport); | 860 | xs_reset_transport(transport); |
847 | xprt->reestablish_timeout = 0; | 861 | xprt->reestablish_timeout = 0; |
848 | 862 | ||
849 | smp_mb__before_atomic(); | ||
850 | clear_bit(XPRT_CONNECTION_ABORT, &xprt->state); | ||
851 | clear_bit(XPRT_CLOSE_WAIT, &xprt->state); | ||
852 | clear_bit(XPRT_CLOSING, &xprt->state); | ||
853 | smp_mb__after_atomic(); | ||
854 | xprt_disconnect_done(xprt); | 863 | xprt_disconnect_done(xprt); |
855 | } | 864 | } |
856 | 865 | ||
857 | static void xs_tcp_close(struct rpc_xprt *xprt) | ||
858 | { | ||
859 | if (test_and_clear_bit(XPRT_CONNECTION_CLOSE, &xprt->state)) | ||
860 | xs_close(xprt); | ||
861 | else | ||
862 | xs_tcp_shutdown(xprt); | ||
863 | } | ||
864 | |||
865 | static void xs_xprt_free(struct rpc_xprt *xprt) | 866 | static void xs_xprt_free(struct rpc_xprt *xprt) |
866 | { | 867 | { |
867 | xs_free_peer_addresses(xprt); | 868 | xs_free_peer_addresses(xprt); |
@@ -1032,7 +1033,6 @@ static void xs_udp_data_ready(struct sock *sk) | |||
1032 | */ | 1033 | */ |
1033 | static void xs_tcp_force_close(struct rpc_xprt *xprt) | 1034 | static void xs_tcp_force_close(struct rpc_xprt *xprt) |
1034 | { | 1035 | { |
1035 | set_bit(XPRT_CONNECTION_CLOSE, &xprt->state); | ||
1036 | xprt_force_disconnect(xprt); | 1036 | xprt_force_disconnect(xprt); |
1037 | } | 1037 | } |
1038 | 1038 | ||
@@ -1425,54 +1425,6 @@ out: | |||
1425 | read_unlock_bh(&sk->sk_callback_lock); | 1425 | read_unlock_bh(&sk->sk_callback_lock); |
1426 | } | 1426 | } |
1427 | 1427 | ||
1428 | /* | ||
1429 | * Do the equivalent of linger/linger2 handling for dealing with | ||
1430 | * broken servers that don't close the socket in a timely | ||
1431 | * fashion | ||
1432 | */ | ||
1433 | static void xs_tcp_schedule_linger_timeout(struct rpc_xprt *xprt, | ||
1434 | unsigned long timeout) | ||
1435 | { | ||
1436 | struct sock_xprt *transport; | ||
1437 | |||
1438 | if (xprt_test_and_set_connecting(xprt)) | ||
1439 | return; | ||
1440 | set_bit(XPRT_CONNECTION_ABORT, &xprt->state); | ||
1441 | transport = container_of(xprt, struct sock_xprt, xprt); | ||
1442 | queue_delayed_work(rpciod_workqueue, &transport->connect_worker, | ||
1443 | timeout); | ||
1444 | } | ||
1445 | |||
1446 | static void xs_tcp_cancel_linger_timeout(struct rpc_xprt *xprt) | ||
1447 | { | ||
1448 | struct sock_xprt *transport; | ||
1449 | |||
1450 | transport = container_of(xprt, struct sock_xprt, xprt); | ||
1451 | |||
1452 | if (!test_bit(XPRT_CONNECTION_ABORT, &xprt->state) || | ||
1453 | !cancel_delayed_work(&transport->connect_worker)) | ||
1454 | return; | ||
1455 | clear_bit(XPRT_CONNECTION_ABORT, &xprt->state); | ||
1456 | xprt_clear_connecting(xprt); | ||
1457 | } | ||
1458 | |||
1459 | static void xs_sock_reset_connection_flags(struct rpc_xprt *xprt) | ||
1460 | { | ||
1461 | smp_mb__before_atomic(); | ||
1462 | clear_bit(XPRT_CONNECTION_ABORT, &xprt->state); | ||
1463 | clear_bit(XPRT_CONNECTION_CLOSE, &xprt->state); | ||
1464 | clear_bit(XPRT_CLOSE_WAIT, &xprt->state); | ||
1465 | clear_bit(XPRT_CLOSING, &xprt->state); | ||
1466 | smp_mb__after_atomic(); | ||
1467 | } | ||
1468 | |||
1469 | static void xs_sock_mark_closed(struct rpc_xprt *xprt) | ||
1470 | { | ||
1471 | xs_sock_reset_connection_flags(xprt); | ||
1472 | /* Mark transport as closed and wake up all pending tasks */ | ||
1473 | xprt_disconnect_done(xprt); | ||
1474 | } | ||
1475 | |||
1476 | /** | 1428 | /** |
1477 | * xs_tcp_state_change - callback to handle TCP socket state changes | 1429 | * xs_tcp_state_change - callback to handle TCP socket state changes |
1478 | * @sk: socket whose state has changed | 1430 | * @sk: socket whose state has changed |
@@ -1521,7 +1473,6 @@ static void xs_tcp_state_change(struct sock *sk) | |||
1521 | clear_bit(XPRT_CONNECTED, &xprt->state); | 1473 | clear_bit(XPRT_CONNECTED, &xprt->state); |
1522 | clear_bit(XPRT_CLOSE_WAIT, &xprt->state); | 1474 | clear_bit(XPRT_CLOSE_WAIT, &xprt->state); |
1523 | smp_mb__after_atomic(); | 1475 | smp_mb__after_atomic(); |
1524 | xs_tcp_schedule_linger_timeout(xprt, xs_tcp_fin_timeout); | ||
1525 | break; | 1476 | break; |
1526 | case TCP_CLOSE_WAIT: | 1477 | case TCP_CLOSE_WAIT: |
1527 | /* The server initiated a shutdown of the socket */ | 1478 | /* The server initiated a shutdown of the socket */ |
@@ -1538,13 +1489,11 @@ static void xs_tcp_state_change(struct sock *sk) | |||
1538 | break; | 1489 | break; |
1539 | case TCP_LAST_ACK: | 1490 | case TCP_LAST_ACK: |
1540 | set_bit(XPRT_CLOSING, &xprt->state); | 1491 | set_bit(XPRT_CLOSING, &xprt->state); |
1541 | xs_tcp_schedule_linger_timeout(xprt, xs_tcp_fin_timeout); | ||
1542 | smp_mb__before_atomic(); | 1492 | smp_mb__before_atomic(); |
1543 | clear_bit(XPRT_CONNECTED, &xprt->state); | 1493 | clear_bit(XPRT_CONNECTED, &xprt->state); |
1544 | smp_mb__after_atomic(); | 1494 | smp_mb__after_atomic(); |
1545 | break; | 1495 | break; |
1546 | case TCP_CLOSE: | 1496 | case TCP_CLOSE: |
1547 | xs_tcp_cancel_linger_timeout(xprt); | ||
1548 | xs_sock_mark_closed(xprt); | 1497 | xs_sock_mark_closed(xprt); |
1549 | } | 1498 | } |
1550 | out: | 1499 | out: |
@@ -1667,6 +1616,40 @@ static unsigned short xs_get_random_port(void) | |||
1667 | } | 1616 | } |
1668 | 1617 | ||
1669 | /** | 1618 | /** |
1619 | * xs_set_reuseaddr_port - set the socket's port and address reuse options | ||
1620 | * @sock: socket | ||
1621 | * | ||
1622 | * Note that this function has to be called on all sockets that share the | ||
1623 | * same port, and it must be called before binding. | ||
1624 | */ | ||
1625 | static void xs_sock_set_reuseport(struct socket *sock) | ||
1626 | { | ||
1627 | int opt = 1; | ||
1628 | |||
1629 | kernel_setsockopt(sock, SOL_SOCKET, SO_REUSEPORT, | ||
1630 | (char *)&opt, sizeof(opt)); | ||
1631 | } | ||
1632 | |||
1633 | static unsigned short xs_sock_getport(struct socket *sock) | ||
1634 | { | ||
1635 | struct sockaddr_storage buf; | ||
1636 | int buflen; | ||
1637 | unsigned short port = 0; | ||
1638 | |||
1639 | if (kernel_getsockname(sock, (struct sockaddr *)&buf, &buflen) < 0) | ||
1640 | goto out; | ||
1641 | switch (buf.ss_family) { | ||
1642 | case AF_INET6: | ||
1643 | port = ntohs(((struct sockaddr_in6 *)&buf)->sin6_port); | ||
1644 | break; | ||
1645 | case AF_INET: | ||
1646 | port = ntohs(((struct sockaddr_in *)&buf)->sin_port); | ||
1647 | } | ||
1648 | out: | ||
1649 | return port; | ||
1650 | } | ||
1651 | |||
1652 | /** | ||
1670 | * xs_set_port - reset the port number in the remote endpoint address | 1653 | * xs_set_port - reset the port number in the remote endpoint address |
1671 | * @xprt: generic transport | 1654 | * @xprt: generic transport |
1672 | * @port: new port number | 1655 | * @port: new port number |
@@ -1680,6 +1663,12 @@ static void xs_set_port(struct rpc_xprt *xprt, unsigned short port) | |||
1680 | xs_update_peer_port(xprt); | 1663 | xs_update_peer_port(xprt); |
1681 | } | 1664 | } |
1682 | 1665 | ||
1666 | static void xs_set_srcport(struct sock_xprt *transport, struct socket *sock) | ||
1667 | { | ||
1668 | if (transport->srcport == 0) | ||
1669 | transport->srcport = xs_sock_getport(sock); | ||
1670 | } | ||
1671 | |||
1683 | static unsigned short xs_get_srcport(struct sock_xprt *transport) | 1672 | static unsigned short xs_get_srcport(struct sock_xprt *transport) |
1684 | { | 1673 | { |
1685 | unsigned short port = transport->srcport; | 1674 | unsigned short port = transport->srcport; |
@@ -1833,7 +1822,8 @@ static void xs_dummy_setup_socket(struct work_struct *work) | |||
1833 | } | 1822 | } |
1834 | 1823 | ||
1835 | static struct socket *xs_create_sock(struct rpc_xprt *xprt, | 1824 | static struct socket *xs_create_sock(struct rpc_xprt *xprt, |
1836 | struct sock_xprt *transport, int family, int type, int protocol) | 1825 | struct sock_xprt *transport, int family, int type, |
1826 | int protocol, bool reuseport) | ||
1837 | { | 1827 | { |
1838 | struct socket *sock; | 1828 | struct socket *sock; |
1839 | int err; | 1829 | int err; |
@@ -1846,6 +1836,9 @@ static struct socket *xs_create_sock(struct rpc_xprt *xprt, | |||
1846 | } | 1836 | } |
1847 | xs_reclassify_socket(family, sock); | 1837 | xs_reclassify_socket(family, sock); |
1848 | 1838 | ||
1839 | if (reuseport) | ||
1840 | xs_sock_set_reuseport(sock); | ||
1841 | |||
1849 | err = xs_bind(transport, sock); | 1842 | err = xs_bind(transport, sock); |
1850 | if (err) { | 1843 | if (err) { |
1851 | sock_release(sock); | 1844 | sock_release(sock); |
@@ -1903,7 +1896,6 @@ static int xs_local_setup_socket(struct sock_xprt *transport) | |||
1903 | struct socket *sock; | 1896 | struct socket *sock; |
1904 | int status = -EIO; | 1897 | int status = -EIO; |
1905 | 1898 | ||
1906 | clear_bit(XPRT_CONNECTION_ABORT, &xprt->state); | ||
1907 | status = __sock_create(xprt->xprt_net, AF_LOCAL, | 1899 | status = __sock_create(xprt->xprt_net, AF_LOCAL, |
1908 | SOCK_STREAM, 0, &sock, 1); | 1900 | SOCK_STREAM, 0, &sock, 1); |
1909 | if (status < 0) { | 1901 | if (status < 0) { |
@@ -2044,10 +2036,9 @@ static void xs_udp_setup_socket(struct work_struct *work) | |||
2044 | struct socket *sock = transport->sock; | 2036 | struct socket *sock = transport->sock; |
2045 | int status = -EIO; | 2037 | int status = -EIO; |
2046 | 2038 | ||
2047 | /* Start by resetting any existing state */ | ||
2048 | xs_reset_transport(transport); | ||
2049 | sock = xs_create_sock(xprt, transport, | 2039 | sock = xs_create_sock(xprt, transport, |
2050 | xs_addr(xprt)->sa_family, SOCK_DGRAM, IPPROTO_UDP); | 2040 | xs_addr(xprt)->sa_family, SOCK_DGRAM, |
2041 | IPPROTO_UDP, false); | ||
2051 | if (IS_ERR(sock)) | 2042 | if (IS_ERR(sock)) |
2052 | goto out; | 2043 | goto out; |
2053 | 2044 | ||
@@ -2061,61 +2052,11 @@ static void xs_udp_setup_socket(struct work_struct *work) | |||
2061 | trace_rpc_socket_connect(xprt, sock, 0); | 2052 | trace_rpc_socket_connect(xprt, sock, 0); |
2062 | status = 0; | 2053 | status = 0; |
2063 | out: | 2054 | out: |
2055 | xprt_unlock_connect(xprt, transport); | ||
2064 | xprt_clear_connecting(xprt); | 2056 | xprt_clear_connecting(xprt); |
2065 | xprt_wake_pending_tasks(xprt, status); | 2057 | xprt_wake_pending_tasks(xprt, status); |
2066 | } | 2058 | } |
2067 | 2059 | ||
2068 | /* | ||
2069 | * We need to preserve the port number so the reply cache on the server can | ||
2070 | * find our cached RPC replies when we get around to reconnecting. | ||
2071 | */ | ||
2072 | static void xs_abort_connection(struct sock_xprt *transport) | ||
2073 | { | ||
2074 | int result; | ||
2075 | struct sockaddr any; | ||
2076 | |||
2077 | dprintk("RPC: disconnecting xprt %p to reuse port\n", transport); | ||
2078 | |||
2079 | /* | ||
2080 | * Disconnect the transport socket by doing a connect operation | ||
2081 | * with AF_UNSPEC. This should return immediately... | ||
2082 | */ | ||
2083 | memset(&any, 0, sizeof(any)); | ||
2084 | any.sa_family = AF_UNSPEC; | ||
2085 | result = kernel_connect(transport->sock, &any, sizeof(any), 0); | ||
2086 | trace_rpc_socket_reset_connection(&transport->xprt, | ||
2087 | transport->sock, result); | ||
2088 | if (!result) | ||
2089 | xs_sock_reset_connection_flags(&transport->xprt); | ||
2090 | dprintk("RPC: AF_UNSPEC connect return code %d\n", result); | ||
2091 | } | ||
2092 | |||
2093 | static void xs_tcp_reuse_connection(struct sock_xprt *transport) | ||
2094 | { | ||
2095 | unsigned int state = transport->inet->sk_state; | ||
2096 | |||
2097 | if (state == TCP_CLOSE && transport->sock->state == SS_UNCONNECTED) { | ||
2098 | /* we don't need to abort the connection if the socket | ||
2099 | * hasn't undergone a shutdown | ||
2100 | */ | ||
2101 | if (transport->inet->sk_shutdown == 0) | ||
2102 | return; | ||
2103 | dprintk("RPC: %s: TCP_CLOSEd and sk_shutdown set to %d\n", | ||
2104 | __func__, transport->inet->sk_shutdown); | ||
2105 | } | ||
2106 | if ((1 << state) & (TCPF_ESTABLISHED|TCPF_SYN_SENT)) { | ||
2107 | /* we don't need to abort the connection if the socket | ||
2108 | * hasn't undergone a shutdown | ||
2109 | */ | ||
2110 | if (transport->inet->sk_shutdown == 0) | ||
2111 | return; | ||
2112 | dprintk("RPC: %s: ESTABLISHED/SYN_SENT " | ||
2113 | "sk_shutdown set to %d\n", | ||
2114 | __func__, transport->inet->sk_shutdown); | ||
2115 | } | ||
2116 | xs_abort_connection(transport); | ||
2117 | } | ||
2118 | |||
2119 | static int xs_tcp_finish_connecting(struct rpc_xprt *xprt, struct socket *sock) | 2060 | static int xs_tcp_finish_connecting(struct rpc_xprt *xprt, struct socket *sock) |
2120 | { | 2061 | { |
2121 | struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt); | 2062 | struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt); |
@@ -2149,9 +2090,7 @@ static int xs_tcp_finish_connecting(struct rpc_xprt *xprt, struct socket *sock) | |||
2149 | sk->sk_allocation = GFP_ATOMIC; | 2090 | sk->sk_allocation = GFP_ATOMIC; |
2150 | 2091 | ||
2151 | /* socket options */ | 2092 | /* socket options */ |
2152 | sk->sk_userlocks |= SOCK_BINDPORT_LOCK; | ||
2153 | sock_reset_flag(sk, SOCK_LINGER); | 2093 | sock_reset_flag(sk, SOCK_LINGER); |
2154 | tcp_sk(sk)->linger2 = 0; | ||
2155 | tcp_sk(sk)->nonagle |= TCP_NAGLE_OFF; | 2094 | tcp_sk(sk)->nonagle |= TCP_NAGLE_OFF; |
2156 | 2095 | ||
2157 | xprt_clear_connected(xprt); | 2096 | xprt_clear_connected(xprt); |
@@ -2174,6 +2113,7 @@ static int xs_tcp_finish_connecting(struct rpc_xprt *xprt, struct socket *sock) | |||
2174 | ret = kernel_connect(sock, xs_addr(xprt), xprt->addrlen, O_NONBLOCK); | 2113 | ret = kernel_connect(sock, xs_addr(xprt), xprt->addrlen, O_NONBLOCK); |
2175 | switch (ret) { | 2114 | switch (ret) { |
2176 | case 0: | 2115 | case 0: |
2116 | xs_set_srcport(transport, sock); | ||
2177 | case -EINPROGRESS: | 2117 | case -EINPROGRESS: |
2178 | /* SYN_SENT! */ | 2118 | /* SYN_SENT! */ |
2179 | if (xprt->reestablish_timeout < XS_TCP_INIT_REEST_TO) | 2119 | if (xprt->reestablish_timeout < XS_TCP_INIT_REEST_TO) |
@@ -2200,25 +2140,13 @@ static void xs_tcp_setup_socket(struct work_struct *work) | |||
2200 | int status = -EIO; | 2140 | int status = -EIO; |
2201 | 2141 | ||
2202 | if (!sock) { | 2142 | if (!sock) { |
2203 | clear_bit(XPRT_CONNECTION_ABORT, &xprt->state); | ||
2204 | sock = xs_create_sock(xprt, transport, | 2143 | sock = xs_create_sock(xprt, transport, |
2205 | xs_addr(xprt)->sa_family, SOCK_STREAM, IPPROTO_TCP); | 2144 | xs_addr(xprt)->sa_family, SOCK_STREAM, |
2145 | IPPROTO_TCP, true); | ||
2206 | if (IS_ERR(sock)) { | 2146 | if (IS_ERR(sock)) { |
2207 | status = PTR_ERR(sock); | 2147 | status = PTR_ERR(sock); |
2208 | goto out; | 2148 | goto out; |
2209 | } | 2149 | } |
2210 | } else { | ||
2211 | int abort_and_exit; | ||
2212 | |||
2213 | abort_and_exit = test_and_clear_bit(XPRT_CONNECTION_ABORT, | ||
2214 | &xprt->state); | ||
2215 | /* "close" the socket, preserving the local port */ | ||
2216 | set_bit(XPRT_CONNECTION_REUSE, &xprt->state); | ||
2217 | xs_tcp_reuse_connection(transport); | ||
2218 | clear_bit(XPRT_CONNECTION_REUSE, &xprt->state); | ||
2219 | |||
2220 | if (abort_and_exit) | ||
2221 | goto out_eagain; | ||
2222 | } | 2150 | } |
2223 | 2151 | ||
2224 | dprintk("RPC: worker connecting xprt %p via %s to " | 2152 | dprintk("RPC: worker connecting xprt %p via %s to " |
@@ -2245,6 +2173,7 @@ static void xs_tcp_setup_socket(struct work_struct *work) | |||
2245 | case 0: | 2173 | case 0: |
2246 | case -EINPROGRESS: | 2174 | case -EINPROGRESS: |
2247 | case -EALREADY: | 2175 | case -EALREADY: |
2176 | xprt_unlock_connect(xprt, transport); | ||
2248 | xprt_clear_connecting(xprt); | 2177 | xprt_clear_connecting(xprt); |
2249 | return; | 2178 | return; |
2250 | case -EINVAL: | 2179 | case -EINVAL: |
@@ -2254,13 +2183,15 @@ static void xs_tcp_setup_socket(struct work_struct *work) | |||
2254 | case -ECONNREFUSED: | 2183 | case -ECONNREFUSED: |
2255 | case -ECONNRESET: | 2184 | case -ECONNRESET: |
2256 | case -ENETUNREACH: | 2185 | case -ENETUNREACH: |
2186 | case -EADDRINUSE: | ||
2257 | case -ENOBUFS: | 2187 | case -ENOBUFS: |
2258 | /* retry with existing socket, after a delay */ | 2188 | /* retry with existing socket, after a delay */ |
2189 | xs_tcp_force_close(xprt); | ||
2259 | goto out; | 2190 | goto out; |
2260 | } | 2191 | } |
2261 | out_eagain: | ||
2262 | status = -EAGAIN; | 2192 | status = -EAGAIN; |
2263 | out: | 2193 | out: |
2194 | xprt_unlock_connect(xprt, transport); | ||
2264 | xprt_clear_connecting(xprt); | 2195 | xprt_clear_connecting(xprt); |
2265 | xprt_wake_pending_tasks(xprt, status); | 2196 | xprt_wake_pending_tasks(xprt, status); |
2266 | } | 2197 | } |
@@ -2283,6 +2214,11 @@ static void xs_connect(struct rpc_xprt *xprt, struct rpc_task *task) | |||
2283 | { | 2214 | { |
2284 | struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt); | 2215 | struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt); |
2285 | 2216 | ||
2217 | WARN_ON_ONCE(!xprt_lock_connect(xprt, task, transport)); | ||
2218 | |||
2219 | /* Start by resetting any existing state */ | ||
2220 | xs_reset_transport(transport); | ||
2221 | |||
2286 | if (transport->sock != NULL && !RPC_IS_SOFTCONN(task)) { | 2222 | if (transport->sock != NULL && !RPC_IS_SOFTCONN(task)) { |
2287 | dprintk("RPC: xs_connect delayed xprt %p for %lu " | 2223 | dprintk("RPC: xs_connect delayed xprt %p for %lu " |
2288 | "seconds\n", | 2224 | "seconds\n", |
@@ -2559,7 +2495,7 @@ static struct rpc_xprt_ops xs_tcp_ops = { | |||
2559 | .buf_free = rpc_free, | 2495 | .buf_free = rpc_free, |
2560 | .send_request = xs_tcp_send_request, | 2496 | .send_request = xs_tcp_send_request, |
2561 | .set_retrans_timeout = xprt_set_retrans_timeout_def, | 2497 | .set_retrans_timeout = xprt_set_retrans_timeout_def, |
2562 | .close = xs_tcp_close, | 2498 | .close = xs_tcp_shutdown, |
2563 | .destroy = xs_destroy, | 2499 | .destroy = xs_destroy, |
2564 | .print_stats = xs_tcp_print_stats, | 2500 | .print_stats = xs_tcp_print_stats, |
2565 | }; | 2501 | }; |