summaryrefslogtreecommitdiffstats
path: root/drivers/infiniband/sw
diff options
context:
space:
mode:
authorZhu Yanjun <yanjun.zhu@oracle.com>2018-04-23 03:57:58 -0400
committerDoug Ledford <dledford@redhat.com>2018-04-27 12:18:29 -0400
commite12ee8ce51435c4d24f437f10e0fce773505c674 (patch)
tree24af7a11a40f19fd4ff0eec712083541ce5b2c2b /drivers/infiniband/sw
parent0dff463a6a867072f2c779e2fed651b498901801 (diff)
IB/rxe: remove unused function variable
In the functions rxe_mem_init_dma, rxe_mem_init_user, rxe_mem_init_fast and copy_data, the function variable rxe is not used. So this function variable rxe is removed. CC: Srinivas Eeda <srinivas.eeda@oracle.com> CC: Junxiao Bi <junxiao.bi@oracle.com> Signed-off-by: Zhu Yanjun <yanjun.zhu@oracle.com> Reviewed-by: Leon Romanovsky <leonro@mellanox.com> Signed-off-by: Doug Ledford <dledford@redhat.com>
Diffstat (limited to 'drivers/infiniband/sw')
-rw-r--r--drivers/infiniband/sw/rxe/rxe_comp.c6
-rw-r--r--drivers/infiniband/sw/rxe/rxe_loc.h8
-rw-r--r--drivers/infiniband/sw/rxe/rxe_mr.c13
-rw-r--r--drivers/infiniband/sw/rxe/rxe_req.c2
-rw-r--r--drivers/infiniband/sw/rxe/rxe_resp.c3
-rw-r--r--drivers/infiniband/sw/rxe/rxe_verbs.c6
6 files changed, 17 insertions, 21 deletions
diff --git a/drivers/infiniband/sw/rxe/rxe_comp.c b/drivers/infiniband/sw/rxe/rxe_comp.c
index 6cdc40ed8a9f..a285978aa7fe 100644
--- a/drivers/infiniband/sw/rxe/rxe_comp.c
+++ b/drivers/infiniband/sw/rxe/rxe_comp.c
@@ -355,10 +355,9 @@ static inline enum comp_state do_read(struct rxe_qp *qp,
355 struct rxe_pkt_info *pkt, 355 struct rxe_pkt_info *pkt,
356 struct rxe_send_wqe *wqe) 356 struct rxe_send_wqe *wqe)
357{ 357{
358 struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
359 int ret; 358 int ret;
360 359
361 ret = copy_data(rxe, qp->pd, IB_ACCESS_LOCAL_WRITE, 360 ret = copy_data(qp->pd, IB_ACCESS_LOCAL_WRITE,
362 &wqe->dma, payload_addr(pkt), 361 &wqe->dma, payload_addr(pkt),
363 payload_size(pkt), to_mem_obj, NULL); 362 payload_size(pkt), to_mem_obj, NULL);
364 if (ret) 363 if (ret)
@@ -374,12 +373,11 @@ static inline enum comp_state do_atomic(struct rxe_qp *qp,
374 struct rxe_pkt_info *pkt, 373 struct rxe_pkt_info *pkt,
375 struct rxe_send_wqe *wqe) 374 struct rxe_send_wqe *wqe)
376{ 375{
377 struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
378 int ret; 376 int ret;
379 377
380 u64 atomic_orig = atmack_orig(pkt); 378 u64 atomic_orig = atmack_orig(pkt);
381 379
382 ret = copy_data(rxe, qp->pd, IB_ACCESS_LOCAL_WRITE, 380 ret = copy_data(qp->pd, IB_ACCESS_LOCAL_WRITE,
383 &wqe->dma, &atomic_orig, 381 &wqe->dma, &atomic_orig,
384 sizeof(u64), to_mem_obj, NULL); 382 sizeof(u64), to_mem_obj, NULL);
385 if (ret) 383 if (ret)
diff --git a/drivers/infiniband/sw/rxe/rxe_loc.h b/drivers/infiniband/sw/rxe/rxe_loc.h
index 2f8ab8eebcb1..a51ece596c43 100644
--- a/drivers/infiniband/sw/rxe/rxe_loc.h
+++ b/drivers/infiniband/sw/rxe/rxe_loc.h
@@ -106,20 +106,20 @@ enum copy_direction {
106 from_mem_obj, 106 from_mem_obj,
107}; 107};
108 108
109int rxe_mem_init_dma(struct rxe_dev *rxe, struct rxe_pd *pd, 109int rxe_mem_init_dma(struct rxe_pd *pd,
110 int access, struct rxe_mem *mem); 110 int access, struct rxe_mem *mem);
111 111
112int rxe_mem_init_user(struct rxe_dev *rxe, struct rxe_pd *pd, u64 start, 112int rxe_mem_init_user(struct rxe_pd *pd, u64 start,
113 u64 length, u64 iova, int access, struct ib_udata *udata, 113 u64 length, u64 iova, int access, struct ib_udata *udata,
114 struct rxe_mem *mr); 114 struct rxe_mem *mr);
115 115
116int rxe_mem_init_fast(struct rxe_dev *rxe, struct rxe_pd *pd, 116int rxe_mem_init_fast(struct rxe_pd *pd,
117 int max_pages, struct rxe_mem *mem); 117 int max_pages, struct rxe_mem *mem);
118 118
119int rxe_mem_copy(struct rxe_mem *mem, u64 iova, void *addr, 119int rxe_mem_copy(struct rxe_mem *mem, u64 iova, void *addr,
120 int length, enum copy_direction dir, u32 *crcp); 120 int length, enum copy_direction dir, u32 *crcp);
121 121
122int copy_data(struct rxe_dev *rxe, struct rxe_pd *pd, int access, 122int copy_data(struct rxe_pd *pd, int access,
123 struct rxe_dma_info *dma, void *addr, int length, 123 struct rxe_dma_info *dma, void *addr, int length,
124 enum copy_direction dir, u32 *crcp); 124 enum copy_direction dir, u32 *crcp);
125 125
diff --git a/drivers/infiniband/sw/rxe/rxe_mr.c b/drivers/infiniband/sw/rxe/rxe_mr.c
index 5c2684bf430f..dff605fdf60f 100644
--- a/drivers/infiniband/sw/rxe/rxe_mr.c
+++ b/drivers/infiniband/sw/rxe/rxe_mr.c
@@ -107,7 +107,7 @@ void rxe_mem_cleanup(struct rxe_pool_entry *arg)
107 } 107 }
108} 108}
109 109
110static int rxe_mem_alloc(struct rxe_dev *rxe, struct rxe_mem *mem, int num_buf) 110static int rxe_mem_alloc(struct rxe_mem *mem, int num_buf)
111{ 111{
112 int i; 112 int i;
113 int num_map; 113 int num_map;
@@ -145,7 +145,7 @@ err1:
145 return -ENOMEM; 145 return -ENOMEM;
146} 146}
147 147
148int rxe_mem_init_dma(struct rxe_dev *rxe, struct rxe_pd *pd, 148int rxe_mem_init_dma(struct rxe_pd *pd,
149 int access, struct rxe_mem *mem) 149 int access, struct rxe_mem *mem)
150{ 150{
151 rxe_mem_init(access, mem); 151 rxe_mem_init(access, mem);
@@ -158,7 +158,7 @@ int rxe_mem_init_dma(struct rxe_dev *rxe, struct rxe_pd *pd,
158 return 0; 158 return 0;
159} 159}
160 160
161int rxe_mem_init_user(struct rxe_dev *rxe, struct rxe_pd *pd, u64 start, 161int rxe_mem_init_user(struct rxe_pd *pd, u64 start,
162 u64 length, u64 iova, int access, struct ib_udata *udata, 162 u64 length, u64 iova, int access, struct ib_udata *udata,
163 struct rxe_mem *mem) 163 struct rxe_mem *mem)
164{ 164{
@@ -184,7 +184,7 @@ int rxe_mem_init_user(struct rxe_dev *rxe, struct rxe_pd *pd, u64 start,
184 184
185 rxe_mem_init(access, mem); 185 rxe_mem_init(access, mem);
186 186
187 err = rxe_mem_alloc(rxe, mem, num_buf); 187 err = rxe_mem_alloc(mem, num_buf);
188 if (err) { 188 if (err) {
189 pr_warn("err %d from rxe_mem_alloc\n", err); 189 pr_warn("err %d from rxe_mem_alloc\n", err);
190 ib_umem_release(umem); 190 ib_umem_release(umem);
@@ -236,7 +236,7 @@ err1:
236 return err; 236 return err;
237} 237}
238 238
239int rxe_mem_init_fast(struct rxe_dev *rxe, struct rxe_pd *pd, 239int rxe_mem_init_fast(struct rxe_pd *pd,
240 int max_pages, struct rxe_mem *mem) 240 int max_pages, struct rxe_mem *mem)
241{ 241{
242 int err; 242 int err;
@@ -246,7 +246,7 @@ int rxe_mem_init_fast(struct rxe_dev *rxe, struct rxe_pd *pd,
246 /* In fastreg, we also set the rkey */ 246 /* In fastreg, we also set the rkey */
247 mem->ibmr.rkey = mem->ibmr.lkey; 247 mem->ibmr.rkey = mem->ibmr.lkey;
248 248
249 err = rxe_mem_alloc(rxe, mem, max_pages); 249 err = rxe_mem_alloc(mem, max_pages);
250 if (err) 250 if (err)
251 goto err1; 251 goto err1;
252 252
@@ -434,7 +434,6 @@ err1:
434 * under the control of a dma descriptor 434 * under the control of a dma descriptor
435 */ 435 */
436int copy_data( 436int copy_data(
437 struct rxe_dev *rxe,
438 struct rxe_pd *pd, 437 struct rxe_pd *pd,
439 int access, 438 int access,
440 struct rxe_dma_info *dma, 439 struct rxe_dma_info *dma,
diff --git a/drivers/infiniband/sw/rxe/rxe_req.c b/drivers/infiniband/sw/rxe/rxe_req.c
index 7bdaf71b8221..957826dde94f 100644
--- a/drivers/infiniband/sw/rxe/rxe_req.c
+++ b/drivers/infiniband/sw/rxe/rxe_req.c
@@ -490,7 +490,7 @@ static int fill_packet(struct rxe_qp *qp, struct rxe_send_wqe *wqe,
490 wqe->dma.resid -= paylen; 490 wqe->dma.resid -= paylen;
491 wqe->dma.sge_offset += paylen; 491 wqe->dma.sge_offset += paylen;
492 } else { 492 } else {
493 err = copy_data(rxe, qp->pd, 0, &wqe->dma, 493 err = copy_data(qp->pd, 0, &wqe->dma,
494 payload_addr(pkt), paylen, 494 payload_addr(pkt), paylen,
495 from_mem_obj, 495 from_mem_obj,
496 &crc); 496 &crc);
diff --git a/drivers/infiniband/sw/rxe/rxe_resp.c b/drivers/infiniband/sw/rxe/rxe_resp.c
index ed402f028471..c45c1ff24497 100644
--- a/drivers/infiniband/sw/rxe/rxe_resp.c
+++ b/drivers/infiniband/sw/rxe/rxe_resp.c
@@ -511,9 +511,8 @@ static enum resp_states send_data_in(struct rxe_qp *qp, void *data_addr,
511 int data_len) 511 int data_len)
512{ 512{
513 int err; 513 int err;
514 struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
515 514
516 err = copy_data(rxe, qp->pd, IB_ACCESS_LOCAL_WRITE, &qp->resp.wqe->dma, 515 err = copy_data(qp->pd, IB_ACCESS_LOCAL_WRITE, &qp->resp.wqe->dma,
517 data_addr, data_len, to_mem_obj, NULL); 516 data_addr, data_len, to_mem_obj, NULL);
518 if (unlikely(err)) 517 if (unlikely(err))
519 return (err == -ENOSPC) ? RESPST_ERR_LENGTH 518 return (err == -ENOSPC) ? RESPST_ERR_LENGTH
diff --git a/drivers/infiniband/sw/rxe/rxe_verbs.c b/drivers/infiniband/sw/rxe/rxe_verbs.c
index 2cb52fd48cf1..c5206148243c 100644
--- a/drivers/infiniband/sw/rxe/rxe_verbs.c
+++ b/drivers/infiniband/sw/rxe/rxe_verbs.c
@@ -1011,7 +1011,7 @@ static struct ib_mr *rxe_get_dma_mr(struct ib_pd *ibpd, int access)
1011 1011
1012 rxe_add_ref(pd); 1012 rxe_add_ref(pd);
1013 1013
1014 err = rxe_mem_init_dma(rxe, pd, access, mr); 1014 err = rxe_mem_init_dma(pd, access, mr);
1015 if (err) 1015 if (err)
1016 goto err2; 1016 goto err2;
1017 1017
@@ -1046,7 +1046,7 @@ static struct ib_mr *rxe_reg_user_mr(struct ib_pd *ibpd,
1046 1046
1047 rxe_add_ref(pd); 1047 rxe_add_ref(pd);
1048 1048
1049 err = rxe_mem_init_user(rxe, pd, start, length, iova, 1049 err = rxe_mem_init_user(pd, start, length, iova,
1050 access, udata, mr); 1050 access, udata, mr);
1051 if (err) 1051 if (err)
1052 goto err3; 1052 goto err3;
@@ -1094,7 +1094,7 @@ static struct ib_mr *rxe_alloc_mr(struct ib_pd *ibpd,
1094 1094
1095 rxe_add_ref(pd); 1095 rxe_add_ref(pd);
1096 1096
1097 err = rxe_mem_init_fast(rxe, pd, max_num_sg, mr); 1097 err = rxe_mem_init_fast(pd, max_num_sg, mr);
1098 if (err) 1098 if (err)
1099 goto err2; 1099 goto err2;
1100 1100