aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorChuck Lever <chuck.lever@oracle.com>2017-04-11 13:23:02 -0400
committerAnna Schumaker <Anna.Schumaker@Netapp.com>2017-04-25 16:12:22 -0400
commit91a10c52975a8c89e146a4f740e64cd147ba8e8a (patch)
treef9cf5ad426188a632479bfbd3890b02945d6e3ae
parentfff09594edf5e9b8595a2cefdc07e54b70f81729 (diff)
xprtrdma: Use same device when mapping or syncing DMA buffers
When the underlying device driver is reloaded, ia->ri_device will be replaced. All cached copies of that device pointer have to be updated as well. Commit 54cbd6b0c6b9 ("xprtrdma: Delay DMA mapping Send and Receive buffers") added the rg_device field to each regbuf. As part of handling a device removal, rpcrdma_dma_unmap_regbuf is invoked on all regbufs for a transport. Simply calling rpcrdma_dma_map_regbuf for each Receive buffer after the driver has been reloaded should reinitialize rg_device correctly for every case except rpcrdma_wc_receive, which still uses rpcrdma_rep::rr_device. Ensure the same device that was used to map a Receive buffer is also used to sync it in rpcrdma_wc_receive by using rg_device there instead of rr_device. This is the only use of rr_device, so it can be removed. The use of regbufs in the send path is also updated, for completeness. Fixes: 54cbd6b0c6b9 ("xprtrdma: Delay DMA mapping Send and ... ") Signed-off-by: Chuck Lever <chuck.lever@oracle.com> Signed-off-by: Anna Schumaker <Anna.Schumaker@Netapp.com>
-rw-r--r--net/sunrpc/xprtrdma/rpc_rdma.c4
-rw-r--r--net/sunrpc/xprtrdma/verbs.c12
-rw-r--r--net/sunrpc/xprtrdma/xprt_rdma.h7
3 files changed, 14 insertions, 9 deletions
diff --git a/net/sunrpc/xprtrdma/rpc_rdma.c b/net/sunrpc/xprtrdma/rpc_rdma.c
index a044be2d6ad7..e68131bed3ce 100644
--- a/net/sunrpc/xprtrdma/rpc_rdma.c
+++ b/net/sunrpc/xprtrdma/rpc_rdma.c
@@ -494,7 +494,7 @@ rpcrdma_prepare_hdr_sge(struct rpcrdma_ia *ia, struct rpcrdma_req *req,
494 } 494 }
495 sge->length = len; 495 sge->length = len;
496 496
497 ib_dma_sync_single_for_device(ia->ri_device, sge->addr, 497 ib_dma_sync_single_for_device(rdmab_device(rb), sge->addr,
498 sge->length, DMA_TO_DEVICE); 498 sge->length, DMA_TO_DEVICE);
499 req->rl_send_wr.num_sge++; 499 req->rl_send_wr.num_sge++;
500 return true; 500 return true;
@@ -523,7 +523,7 @@ rpcrdma_prepare_msg_sges(struct rpcrdma_ia *ia, struct rpcrdma_req *req,
523 sge[sge_no].addr = rdmab_addr(rb); 523 sge[sge_no].addr = rdmab_addr(rb);
524 sge[sge_no].length = xdr->head[0].iov_len; 524 sge[sge_no].length = xdr->head[0].iov_len;
525 sge[sge_no].lkey = rdmab_lkey(rb); 525 sge[sge_no].lkey = rdmab_lkey(rb);
526 ib_dma_sync_single_for_device(device, sge[sge_no].addr, 526 ib_dma_sync_single_for_device(rdmab_device(rb), sge[sge_no].addr,
527 sge[sge_no].length, DMA_TO_DEVICE); 527 sge[sge_no].length, DMA_TO_DEVICE);
528 528
529 /* If there is a Read chunk, the page list is being handled 529 /* If there is a Read chunk, the page list is being handled
diff --git a/net/sunrpc/xprtrdma/verbs.c b/net/sunrpc/xprtrdma/verbs.c
index d4880d50d7be..c8813fb2163f 100644
--- a/net/sunrpc/xprtrdma/verbs.c
+++ b/net/sunrpc/xprtrdma/verbs.c
@@ -180,7 +180,7 @@ rpcrdma_wc_receive(struct ib_cq *cq, struct ib_wc *wc)
180 rep->rr_wc_flags = wc->wc_flags; 180 rep->rr_wc_flags = wc->wc_flags;
181 rep->rr_inv_rkey = wc->ex.invalidate_rkey; 181 rep->rr_inv_rkey = wc->ex.invalidate_rkey;
182 182
183 ib_dma_sync_single_for_cpu(rep->rr_device, 183 ib_dma_sync_single_for_cpu(rdmab_device(rep->rr_rdmabuf),
184 rdmab_addr(rep->rr_rdmabuf), 184 rdmab_addr(rep->rr_rdmabuf),
185 rep->rr_len, DMA_FROM_DEVICE); 185 rep->rr_len, DMA_FROM_DEVICE);
186 186
@@ -878,7 +878,6 @@ struct rpcrdma_rep *
878rpcrdma_create_rep(struct rpcrdma_xprt *r_xprt) 878rpcrdma_create_rep(struct rpcrdma_xprt *r_xprt)
879{ 879{
880 struct rpcrdma_create_data_internal *cdata = &r_xprt->rx_data; 880 struct rpcrdma_create_data_internal *cdata = &r_xprt->rx_data;
881 struct rpcrdma_ia *ia = &r_xprt->rx_ia;
882 struct rpcrdma_rep *rep; 881 struct rpcrdma_rep *rep;
883 int rc; 882 int rc;
884 883
@@ -894,7 +893,6 @@ rpcrdma_create_rep(struct rpcrdma_xprt *r_xprt)
894 goto out_free; 893 goto out_free;
895 } 894 }
896 895
897 rep->rr_device = ia->ri_device;
898 rep->rr_cqe.done = rpcrdma_wc_receive; 896 rep->rr_cqe.done = rpcrdma_wc_receive;
899 rep->rr_rxprt = r_xprt; 897 rep->rr_rxprt = r_xprt;
900 INIT_WORK(&rep->rr_work, rpcrdma_reply_handler); 898 INIT_WORK(&rep->rr_work, rpcrdma_reply_handler);
@@ -1232,17 +1230,19 @@ rpcrdma_alloc_regbuf(size_t size, enum dma_data_direction direction,
1232bool 1230bool
1233__rpcrdma_dma_map_regbuf(struct rpcrdma_ia *ia, struct rpcrdma_regbuf *rb) 1231__rpcrdma_dma_map_regbuf(struct rpcrdma_ia *ia, struct rpcrdma_regbuf *rb)
1234{ 1232{
1233 struct ib_device *device = ia->ri_device;
1234
1235 if (rb->rg_direction == DMA_NONE) 1235 if (rb->rg_direction == DMA_NONE)
1236 return false; 1236 return false;
1237 1237
1238 rb->rg_iov.addr = ib_dma_map_single(ia->ri_device, 1238 rb->rg_iov.addr = ib_dma_map_single(device,
1239 (void *)rb->rg_base, 1239 (void *)rb->rg_base,
1240 rdmab_length(rb), 1240 rdmab_length(rb),
1241 rb->rg_direction); 1241 rb->rg_direction);
1242 if (ib_dma_mapping_error(ia->ri_device, rdmab_addr(rb))) 1242 if (ib_dma_mapping_error(device, rdmab_addr(rb)))
1243 return false; 1243 return false;
1244 1244
1245 rb->rg_device = ia->ri_device; 1245 rb->rg_device = device;
1246 rb->rg_iov.lkey = ia->ri_pd->local_dma_lkey; 1246 rb->rg_iov.lkey = ia->ri_pd->local_dma_lkey;
1247 return true; 1247 return true;
1248} 1248}
diff --git a/net/sunrpc/xprtrdma/xprt_rdma.h b/net/sunrpc/xprtrdma/xprt_rdma.h
index af844fc30bd4..9d58260533fc 100644
--- a/net/sunrpc/xprtrdma/xprt_rdma.h
+++ b/net/sunrpc/xprtrdma/xprt_rdma.h
@@ -164,6 +164,12 @@ rdmab_to_msg(struct rpcrdma_regbuf *rb)
164 return (struct rpcrdma_msg *)rb->rg_base; 164 return (struct rpcrdma_msg *)rb->rg_base;
165} 165}
166 166
167static inline struct ib_device *
168rdmab_device(struct rpcrdma_regbuf *rb)
169{
170 return rb->rg_device;
171}
172
167#define RPCRDMA_DEF_GFP (GFP_NOIO | __GFP_NOWARN) 173#define RPCRDMA_DEF_GFP (GFP_NOIO | __GFP_NOWARN)
168 174
169/* To ensure a transport can always make forward progress, 175/* To ensure a transport can always make forward progress,
@@ -209,7 +215,6 @@ struct rpcrdma_rep {
209 unsigned int rr_len; 215 unsigned int rr_len;
210 int rr_wc_flags; 216 int rr_wc_flags;
211 u32 rr_inv_rkey; 217 u32 rr_inv_rkey;
212 struct ib_device *rr_device;
213 struct rpcrdma_xprt *rr_rxprt; 218 struct rpcrdma_xprt *rr_rxprt;
214 struct work_struct rr_work; 219 struct work_struct rr_work;
215 struct list_head rr_list; 220 struct list_head rr_list;