aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authoroulijun <oulijun@huawei.com>2018-02-05 08:14:00 -0500
committerDoug Ledford <dledford@redhat.com>2018-02-05 10:48:48 -0500
commit8b9b8d143b467ec9c65f87b7c2596dc2aabe6737 (patch)
treee7e8bfd3bbaaf03ee1435cf97c74447b141400c6
parente9d1e389273aac7fc50c53f0d4492dc190185882 (diff)
RDMA/hns: Fix the endian problem for hns
The hip06 and hip08 run on a little endian ARM, it needs to revise the annotations to indicate that the HW uses little endian data in the various DMA buffers, and flow the necessary swaps throughout. The imm_data use big endian mode. The cpu_to_le32/le32_to_cpu swaps are no-op for this, which makes the only substantive change the handling of imm_data which is now mandatory swapped. This also keep match with the userspace hns driver and resolve the warning by sparse. Signed-off-by: Lijun Ou <oulijun@huawei.com> Signed-off-by: Doug Ledford <dledford@redhat.com>
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_common.h6
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_device.h2
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_hw_v1.c60
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_hw_v1.h258
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_hw_v2.c54
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_hw_v2.h283
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_main.c2
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_qp.c18
8 files changed, 363 insertions, 320 deletions
diff --git a/drivers/infiniband/hw/hns/hns_roce_common.h b/drivers/infiniband/hw/hns/hns_roce_common.h
index dd67fafd0c40..319cb74aebaf 100644
--- a/drivers/infiniband/hw/hns/hns_roce_common.h
+++ b/drivers/infiniband/hw/hns/hns_roce_common.h
@@ -43,15 +43,15 @@
43 __raw_writel((__force u32)cpu_to_le32(value), (addr)) 43 __raw_writel((__force u32)cpu_to_le32(value), (addr))
44 44
45#define roce_get_field(origin, mask, shift) \ 45#define roce_get_field(origin, mask, shift) \
46 (((origin) & (mask)) >> (shift)) 46 (((le32_to_cpu(origin)) & (mask)) >> (shift))
47 47
48#define roce_get_bit(origin, shift) \ 48#define roce_get_bit(origin, shift) \
49 roce_get_field((origin), (1ul << (shift)), (shift)) 49 roce_get_field((origin), (1ul << (shift)), (shift))
50 50
51#define roce_set_field(origin, mask, shift, val) \ 51#define roce_set_field(origin, mask, shift, val) \
52 do { \ 52 do { \
53 (origin) &= (~(mask)); \ 53 (origin) &= ~cpu_to_le32(mask); \
54 (origin) |= (((u32)(val) << (shift)) & (mask)); \ 54 (origin) |= cpu_to_le32(((u32)(val) << (shift)) & (mask)); \
55 } while (0) 55 } while (0)
56 56
57#define roce_set_bit(origin, shift, val) \ 57#define roce_set_bit(origin, shift, val) \
diff --git a/drivers/infiniband/hw/hns/hns_roce_device.h b/drivers/infiniband/hw/hns/hns_roce_device.h
index edf4d80c5402..165a09b314f6 100644
--- a/drivers/infiniband/hw/hns/hns_roce_device.h
+++ b/drivers/infiniband/hw/hns/hns_roce_device.h
@@ -466,7 +466,7 @@ struct hns_roce_qp {
466 struct ib_qp ibqp; 466 struct ib_qp ibqp;
467 struct hns_roce_buf hr_buf; 467 struct hns_roce_buf hr_buf;
468 struct hns_roce_wq rq; 468 struct hns_roce_wq rq;
469 __le64 doorbell_qpn; 469 u32 doorbell_qpn;
470 __le32 sq_signal_bits; 470 __le32 sq_signal_bits;
471 u32 sq_next_wqe; 471 u32 sq_next_wqe;
472 int sq_max_wqes_per_wr; 472 int sq_max_wqes_per_wr;
diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v1.c b/drivers/infiniband/hw/hns/hns_roce_hw_v1.c
index 21ca9fa7c9d1..da13bd7c3ca9 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hw_v1.c
+++ b/drivers/infiniband/hw/hns/hns_roce_hw_v1.c
@@ -195,23 +195,47 @@ static int hns_roce_v1_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
195 195
196 memcpy(&ud_sq_wqe->dgid[0], &ah->av.dgid[0], GID_LEN); 196 memcpy(&ud_sq_wqe->dgid[0], &ah->av.dgid[0], GID_LEN);
197 197
198 ud_sq_wqe->va0_l = (u32)wr->sg_list[0].addr; 198 ud_sq_wqe->va0_l =
199 ud_sq_wqe->va0_h = (wr->sg_list[0].addr) >> 32; 199 cpu_to_le32((u32)wr->sg_list[0].addr);
200 ud_sq_wqe->l_key0 = wr->sg_list[0].lkey; 200 ud_sq_wqe->va0_h =
201 201 cpu_to_le32((wr->sg_list[0].addr) >> 32);
202 ud_sq_wqe->va1_l = (u32)wr->sg_list[1].addr; 202 ud_sq_wqe->l_key0 =
203 ud_sq_wqe->va1_h = (wr->sg_list[1].addr) >> 32; 203 cpu_to_le32(wr->sg_list[0].lkey);
204 ud_sq_wqe->l_key1 = wr->sg_list[1].lkey; 204
205 ud_sq_wqe->va1_l =
206 cpu_to_le32((u32)wr->sg_list[1].addr);
207 ud_sq_wqe->va1_h =
208 cpu_to_le32((wr->sg_list[1].addr) >> 32);
209 ud_sq_wqe->l_key1 =
210 cpu_to_le32(wr->sg_list[1].lkey);
205 ind++; 211 ind++;
206 } else if (ibqp->qp_type == IB_QPT_RC) { 212 } else if (ibqp->qp_type == IB_QPT_RC) {
213 u32 tmp_len = 0;
214
207 ctrl = wqe; 215 ctrl = wqe;
208 memset(ctrl, 0, sizeof(struct hns_roce_wqe_ctrl_seg)); 216 memset(ctrl, 0, sizeof(struct hns_roce_wqe_ctrl_seg));
209 for (i = 0; i < wr->num_sge; i++) 217 for (i = 0; i < wr->num_sge; i++)
210 ctrl->msg_length += wr->sg_list[i].length; 218 tmp_len += wr->sg_list[i].length;
219
220 ctrl->msg_length =
221 cpu_to_le32(le32_to_cpu(ctrl->msg_length) + tmp_len);
211 222
212 ctrl->sgl_pa_h = 0; 223 ctrl->sgl_pa_h = 0;
213 ctrl->flag = 0; 224 ctrl->flag = 0;
214 ctrl->imm_data = send_ieth(wr); 225
226 switch (wr->opcode) {
227 case IB_WR_SEND_WITH_IMM:
228 case IB_WR_RDMA_WRITE_WITH_IMM:
229 ctrl->imm_data = wr->ex.imm_data;
230 break;
231 case IB_WR_SEND_WITH_INV:
232 ctrl->inv_key =
233 cpu_to_le32(wr->ex.invalidate_rkey);
234 break;
235 default:
236 ctrl->imm_data = 0;
237 break;
238 }
215 239
216 /*Ctrl field, ctrl set type: sig, solic, imm, fence */ 240 /*Ctrl field, ctrl set type: sig, solic, imm, fence */
217 /* SO wait for conforming application scenarios */ 241 /* SO wait for conforming application scenarios */
@@ -258,8 +282,8 @@ static int hns_roce_v1_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
258 282
259 dseg = wqe; 283 dseg = wqe;
260 if (wr->send_flags & IB_SEND_INLINE && wr->num_sge) { 284 if (wr->send_flags & IB_SEND_INLINE && wr->num_sge) {
261 if (ctrl->msg_length > 285 if (le32_to_cpu(ctrl->msg_length) >
262 hr_dev->caps.max_sq_inline) { 286 hr_dev->caps.max_sq_inline) {
263 ret = -EINVAL; 287 ret = -EINVAL;
264 *bad_wr = wr; 288 *bad_wr = wr;
265 dev_err(dev, "inline len(1-%d)=%d, illegal", 289 dev_err(dev, "inline len(1-%d)=%d, illegal",
@@ -273,7 +297,7 @@ static int hns_roce_v1_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
273 wr->sg_list[i].length); 297 wr->sg_list[i].length);
274 wqe += wr->sg_list[i].length; 298 wqe += wr->sg_list[i].length;
275 } 299 }
276 ctrl->flag |= HNS_ROCE_WQE_INLINE; 300 ctrl->flag |= cpu_to_le32(HNS_ROCE_WQE_INLINE);
277 } else { 301 } else {
278 /*sqe num is two */ 302 /*sqe num is two */
279 for (i = 0; i < wr->num_sge; i++) 303 for (i = 0; i < wr->num_sge; i++)
@@ -306,8 +330,8 @@ out:
306 SQ_DOORBELL_U32_8_QPN_S, qp->doorbell_qpn); 330 SQ_DOORBELL_U32_8_QPN_S, qp->doorbell_qpn);
307 roce_set_bit(sq_db.u32_8, SQ_DOORBELL_HW_SYNC_S, 1); 331 roce_set_bit(sq_db.u32_8, SQ_DOORBELL_HW_SYNC_S, 1);
308 332
309 doorbell[0] = sq_db.u32_4; 333 doorbell[0] = le32_to_cpu(sq_db.u32_4);
310 doorbell[1] = sq_db.u32_8; 334 doorbell[1] = le32_to_cpu(sq_db.u32_8);
311 335
312 hns_roce_write64_k(doorbell, qp->sq.db_reg_l); 336 hns_roce_write64_k(doorbell, qp->sq.db_reg_l);
313 qp->sq_next_wqe = ind; 337 qp->sq_next_wqe = ind;
@@ -403,8 +427,8 @@ out:
403 roce_set_bit(rq_db.u32_8, RQ_DOORBELL_U32_8_HW_SYNC_S, 427 roce_set_bit(rq_db.u32_8, RQ_DOORBELL_U32_8_HW_SYNC_S,
404 1); 428 1);
405 429
406 doorbell[0] = rq_db.u32_4; 430 doorbell[0] = le32_to_cpu(rq_db.u32_4);
407 doorbell[1] = rq_db.u32_8; 431 doorbell[1] = le32_to_cpu(rq_db.u32_8);
408 432
409 hns_roce_write64_k(doorbell, hr_qp->rq.db_reg_l); 433 hns_roce_write64_k(doorbell, hr_qp->rq.db_reg_l);
410 } 434 }
@@ -2261,7 +2285,7 @@ static int hns_roce_v1_poll_one(struct hns_roce_cq *hr_cq,
2261 CQE_BYTE_4_WQE_INDEX_M, 2285 CQE_BYTE_4_WQE_INDEX_M,
2262 CQE_BYTE_4_WQE_INDEX_S)& 2286 CQE_BYTE_4_WQE_INDEX_S)&
2263 ((*cur_qp)->sq.wqe_cnt-1)); 2287 ((*cur_qp)->sq.wqe_cnt-1));
2264 switch (sq_wqe->flag & HNS_ROCE_WQE_OPCODE_MASK) { 2288 switch (le32_to_cpu(sq_wqe->flag) & HNS_ROCE_WQE_OPCODE_MASK) {
2265 case HNS_ROCE_WQE_OPCODE_SEND: 2289 case HNS_ROCE_WQE_OPCODE_SEND:
2266 wc->opcode = IB_WC_SEND; 2290 wc->opcode = IB_WC_SEND;
2267 break; 2291 break;
@@ -2282,7 +2306,7 @@ static int hns_roce_v1_poll_one(struct hns_roce_cq *hr_cq,
2282 wc->status = IB_WC_GENERAL_ERR; 2306 wc->status = IB_WC_GENERAL_ERR;
2283 break; 2307 break;
2284 } 2308 }
2285 wc->wc_flags = (sq_wqe->flag & HNS_ROCE_WQE_IMM ? 2309 wc->wc_flags = (le32_to_cpu(sq_wqe->flag) & HNS_ROCE_WQE_IMM ?
2286 IB_WC_WITH_IMM : 0); 2310 IB_WC_WITH_IMM : 0);
2287 2311
2288 wq = &(*cur_qp)->sq; 2312 wq = &(*cur_qp)->sq;
diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v1.h b/drivers/infiniband/hw/hns/hns_roce_hw_v1.h
index b44ddd239060..e9a2717ea7cd 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hw_v1.h
+++ b/drivers/infiniband/hw/hns/hns_roce_hw_v1.h
@@ -200,14 +200,14 @@
200#define HNS_ROCE_AEQE_EVENT_CE_EVENT_CEQE_CEQN_M GENMASK(4, 0) 200#define HNS_ROCE_AEQE_EVENT_CE_EVENT_CEQE_CEQN_M GENMASK(4, 0)
201 201
202struct hns_roce_cq_context { 202struct hns_roce_cq_context {
203 u32 cqc_byte_4; 203 __le32 cqc_byte_4;
204 u32 cq_bt_l; 204 __le32 cq_bt_l;
205 u32 cqc_byte_12; 205 __le32 cqc_byte_12;
206 u32 cur_cqe_ba0_l; 206 __le32 cur_cqe_ba0_l;
207 u32 cqc_byte_20; 207 __le32 cqc_byte_20;
208 u32 cqe_tptr_addr_l; 208 __le32 cqe_tptr_addr_l;
209 u32 cur_cqe_ba1_l; 209 __le32 cur_cqe_ba1_l;
210 u32 cqc_byte_32; 210 __le32 cqc_byte_32;
211}; 211};
212 212
213#define CQ_CONTEXT_CQC_BYTE_4_CQC_STATE_S 0 213#define CQ_CONTEXT_CQC_BYTE_4_CQC_STATE_S 0
@@ -257,17 +257,17 @@ struct hns_roce_cq_context {
257 (((1UL << 16) - 1) << CQ_CONTEXT_CQC_BYTE_32_CQ_CONS_IDX_S) 257 (((1UL << 16) - 1) << CQ_CONTEXT_CQC_BYTE_32_CQ_CONS_IDX_S)
258 258
259struct hns_roce_cqe { 259struct hns_roce_cqe {
260 u32 cqe_byte_4; 260 __le32 cqe_byte_4;
261 union { 261 union {
262 u32 r_key; 262 __le32 r_key;
263 u32 immediate_data; 263 __be32 immediate_data;
264 }; 264 };
265 u32 byte_cnt; 265 __le32 byte_cnt;
266 u32 cqe_byte_16; 266 __le32 cqe_byte_16;
267 u32 cqe_byte_20; 267 __le32 cqe_byte_20;
268 u32 s_mac_l; 268 __le32 s_mac_l;
269 u32 cqe_byte_28; 269 __le32 cqe_byte_28;
270 u32 reserved; 270 __le32 reserved;
271}; 271};
272 272
273#define CQE_BYTE_4_OWNER_S 7 273#define CQE_BYTE_4_OWNER_S 7
@@ -308,22 +308,22 @@ struct hns_roce_cqe {
308#define CQ_DB_REQ_NOT (1 << 16) 308#define CQ_DB_REQ_NOT (1 << 16)
309 309
310struct hns_roce_v1_mpt_entry { 310struct hns_roce_v1_mpt_entry {
311 u32 mpt_byte_4; 311 __le32 mpt_byte_4;
312 u32 pbl_addr_l; 312 __le32 pbl_addr_l;
313 u32 mpt_byte_12; 313 __le32 mpt_byte_12;
314 u32 virt_addr_l; 314 __le32 virt_addr_l;
315 u32 virt_addr_h; 315 __le32 virt_addr_h;
316 u32 length; 316 __le32 length;
317 u32 mpt_byte_28; 317 __le32 mpt_byte_28;
318 u32 pa0_l; 318 __le32 pa0_l;
319 u32 mpt_byte_36; 319 __le32 mpt_byte_36;
320 u32 mpt_byte_40; 320 __le32 mpt_byte_40;
321 u32 mpt_byte_44; 321 __le32 mpt_byte_44;
322 u32 mpt_byte_48; 322 __le32 mpt_byte_48;
323 u32 pa4_l; 323 __le32 pa4_l;
324 u32 mpt_byte_56; 324 __le32 mpt_byte_56;
325 u32 mpt_byte_60; 325 __le32 mpt_byte_60;
326 u32 mpt_byte_64; 326 __le32 mpt_byte_64;
327}; 327};
328 328
329#define MPT_BYTE_4_KEY_STATE_S 0 329#define MPT_BYTE_4_KEY_STATE_S 0
@@ -408,30 +408,32 @@ struct hns_roce_v1_mpt_entry {
408 (((1UL << 8) - 1) << MPT_BYTE_64_L_KEY_IDX_H_S) 408 (((1UL << 8) - 1) << MPT_BYTE_64_L_KEY_IDX_H_S)
409 409
410struct hns_roce_wqe_ctrl_seg { 410struct hns_roce_wqe_ctrl_seg {
411 __be32 sgl_pa_h; 411 __le32 sgl_pa_h;
412 __be32 flag; 412 __le32 flag;
413 __be32 imm_data; 413 union {
414 __be32 msg_length; 414 __be32 imm_data;
415 __le32 inv_key;
416 };
417 __le32 msg_length;
415}; 418};
416 419
417struct hns_roce_wqe_data_seg { 420struct hns_roce_wqe_data_seg {
418 __be64 addr; 421 __le64 addr;
419 __be32 lkey; 422 __le32 lkey;
420 __be32 len; 423 __le32 len;
421}; 424};
422 425
423struct hns_roce_wqe_raddr_seg { 426struct hns_roce_wqe_raddr_seg {
424 __be32 rkey; 427 __le32 rkey;
425 __be32 len;/* reserved */ 428 __le32 len;/* reserved */
426 __be64 raddr; 429 __le64 raddr;
427}; 430};
428 431
429struct hns_roce_rq_wqe_ctrl { 432struct hns_roce_rq_wqe_ctrl {
430 433 __le32 rwqe_byte_4;
431 u32 rwqe_byte_4; 434 __le32 rocee_sgl_ba_l;
432 u32 rocee_sgl_ba_l; 435 __le32 rwqe_byte_12;
433 u32 rwqe_byte_12; 436 __le32 reserved[5];
434 u32 reserved[5];
435}; 437};
436 438
437#define RQ_WQE_CTRL_RWQE_BYTE_12_RWQE_SGE_NUM_S 16 439#define RQ_WQE_CTRL_RWQE_BYTE_12_RWQE_SGE_NUM_S 16
@@ -443,31 +445,31 @@ struct hns_roce_rq_wqe_ctrl {
443#define GID_LEN 16 445#define GID_LEN 16
444 446
445struct hns_roce_ud_send_wqe { 447struct hns_roce_ud_send_wqe {
446 u32 dmac_h; 448 __le32 dmac_h;
447 u32 u32_8; 449 __le32 u32_8;
448 u32 immediate_data; 450 __le32 immediate_data;
449 451
450 u32 u32_16; 452 __le32 u32_16;
451 union { 453 union {
452 unsigned char dgid[GID_LEN]; 454 unsigned char dgid[GID_LEN];
453 struct { 455 struct {
454 u32 u32_20; 456 __le32 u32_20;
455 u32 u32_24; 457 __le32 u32_24;
456 u32 u32_28; 458 __le32 u32_28;
457 u32 u32_32; 459 __le32 u32_32;
458 }; 460 };
459 }; 461 };
460 462
461 u32 u32_36; 463 __le32 u32_36;
462 u32 u32_40; 464 __le32 u32_40;
463 465
464 u32 va0_l; 466 __le32 va0_l;
465 u32 va0_h; 467 __le32 va0_h;
466 u32 l_key0; 468 __le32 l_key0;
467 469
468 u32 va1_l; 470 __le32 va1_l;
469 u32 va1_h; 471 __le32 va1_h;
470 u32 l_key1; 472 __le32 l_key1;
471}; 473};
472 474
473#define UD_SEND_WQE_U32_4_DMAC_0_S 0 475#define UD_SEND_WQE_U32_4_DMAC_0_S 0
@@ -535,16 +537,16 @@ struct hns_roce_ud_send_wqe {
535 (((1UL << 8) - 1) << UD_SEND_WQE_U32_40_TRAFFIC_CLASS_S) 537 (((1UL << 8) - 1) << UD_SEND_WQE_U32_40_TRAFFIC_CLASS_S)
536 538
537struct hns_roce_sqp_context { 539struct hns_roce_sqp_context {
538 u32 qp1c_bytes_4; 540 __le32 qp1c_bytes_4;
539 u32 sq_rq_bt_l; 541 __le32 sq_rq_bt_l;
540 u32 qp1c_bytes_12; 542 __le32 qp1c_bytes_12;
541 u32 qp1c_bytes_16; 543 __le32 qp1c_bytes_16;
542 u32 qp1c_bytes_20; 544 __le32 qp1c_bytes_20;
543 u32 cur_rq_wqe_ba_l; 545 __le32 cur_rq_wqe_ba_l;
544 u32 qp1c_bytes_28; 546 __le32 qp1c_bytes_28;
545 u32 qp1c_bytes_32; 547 __le32 qp1c_bytes_32;
546 u32 cur_sq_wqe_ba_l; 548 __le32 cur_sq_wqe_ba_l;
547 u32 qp1c_bytes_40; 549 __le32 qp1c_bytes_40;
548}; 550};
549 551
550#define QP1C_BYTES_4_QP_STATE_S 0 552#define QP1C_BYTES_4_QP_STATE_S 0
@@ -626,64 +628,64 @@ struct hns_roce_sqp_context {
626#define HNS_ROCE_WQE_OPCODE_MASK (15<<16) 628#define HNS_ROCE_WQE_OPCODE_MASK (15<<16)
627 629
628struct hns_roce_qp_context { 630struct hns_roce_qp_context {
629 u32 qpc_bytes_4; 631 __le32 qpc_bytes_4;
630 u32 qpc_bytes_8; 632 __le32 qpc_bytes_8;
631 u32 qpc_bytes_12; 633 __le32 qpc_bytes_12;
632 u32 qpc_bytes_16; 634 __le32 qpc_bytes_16;
633 u32 sq_rq_bt_l; 635 __le32 sq_rq_bt_l;
634 u32 qpc_bytes_24; 636 __le32 qpc_bytes_24;
635 u32 irrl_ba_l; 637 __le32 irrl_ba_l;
636 u32 qpc_bytes_32; 638 __le32 qpc_bytes_32;
637 u32 qpc_bytes_36; 639 __le32 qpc_bytes_36;
638 u32 dmac_l; 640 __le32 dmac_l;
639 u32 qpc_bytes_44; 641 __le32 qpc_bytes_44;
640 u32 qpc_bytes_48; 642 __le32 qpc_bytes_48;
641 u8 dgid[16]; 643 u8 dgid[16];
642 u32 qpc_bytes_68; 644 __le32 qpc_bytes_68;
643 u32 cur_rq_wqe_ba_l; 645 __le32 cur_rq_wqe_ba_l;
644 u32 qpc_bytes_76; 646 __le32 qpc_bytes_76;
645 u32 rx_rnr_time; 647 __le32 rx_rnr_time;
646 u32 qpc_bytes_84; 648 __le32 qpc_bytes_84;
647 u32 qpc_bytes_88; 649 __le32 qpc_bytes_88;
648 union { 650 union {
649 u32 rx_sge_len; 651 __le32 rx_sge_len;
650 u32 dma_length; 652 __le32 dma_length;
651 }; 653 };
652 union { 654 union {
653 u32 rx_sge_num; 655 __le32 rx_sge_num;
654 u32 rx_send_pktn; 656 __le32 rx_send_pktn;
655 u32 r_key; 657 __le32 r_key;
656 }; 658 };
657 u32 va_l; 659 __le32 va_l;
658 u32 va_h; 660 __le32 va_h;
659 u32 qpc_bytes_108; 661 __le32 qpc_bytes_108;
660 u32 qpc_bytes_112; 662 __le32 qpc_bytes_112;
661 u32 rx_cur_sq_wqe_ba_l; 663 __le32 rx_cur_sq_wqe_ba_l;
662 u32 qpc_bytes_120; 664 __le32 qpc_bytes_120;
663 u32 qpc_bytes_124; 665 __le32 qpc_bytes_124;
664 u32 qpc_bytes_128; 666 __le32 qpc_bytes_128;
665 u32 qpc_bytes_132; 667 __le32 qpc_bytes_132;
666 u32 qpc_bytes_136; 668 __le32 qpc_bytes_136;
667 u32 qpc_bytes_140; 669 __le32 qpc_bytes_140;
668 u32 qpc_bytes_144; 670 __le32 qpc_bytes_144;
669 u32 qpc_bytes_148; 671 __le32 qpc_bytes_148;
670 union { 672 union {
671 u32 rnr_retry; 673 __le32 rnr_retry;
672 u32 ack_time; 674 __le32 ack_time;
673 }; 675 };
674 u32 qpc_bytes_156; 676 __le32 qpc_bytes_156;
675 u32 pkt_use_len; 677 __le32 pkt_use_len;
676 u32 qpc_bytes_164; 678 __le32 qpc_bytes_164;
677 u32 qpc_bytes_168; 679 __le32 qpc_bytes_168;
678 union { 680 union {
679 u32 sge_use_len; 681 __le32 sge_use_len;
680 u32 pa_use_len; 682 __le32 pa_use_len;
681 }; 683 };
682 u32 qpc_bytes_176; 684 __le32 qpc_bytes_176;
683 u32 qpc_bytes_180; 685 __le32 qpc_bytes_180;
684 u32 tx_cur_sq_wqe_ba_l; 686 __le32 tx_cur_sq_wqe_ba_l;
685 u32 qpc_bytes_188; 687 __le32 qpc_bytes_188;
686 u32 rvd21; 688 __le32 rvd21;
687}; 689};
688 690
689#define QP_CONTEXT_QPC_BYTES_4_TRANSPORT_SERVICE_TYPE_S 0 691#define QP_CONTEXT_QPC_BYTES_4_TRANSPORT_SERVICE_TYPE_S 0
@@ -996,8 +998,8 @@ struct hns_roce_qp_context {
996#define HCR_GO_BIT 15 998#define HCR_GO_BIT 15
997 999
998struct hns_roce_rq_db { 1000struct hns_roce_rq_db {
999 u32 u32_4; 1001 __le32 u32_4;
1000 u32 u32_8; 1002 __le32 u32_8;
1001}; 1003};
1002 1004
1003#define RQ_DOORBELL_U32_4_RQ_HEAD_S 0 1005#define RQ_DOORBELL_U32_4_RQ_HEAD_S 0
@@ -1013,8 +1015,8 @@ struct hns_roce_rq_db {
1013#define RQ_DOORBELL_U32_8_HW_SYNC_S 31 1015#define RQ_DOORBELL_U32_8_HW_SYNC_S 31
1014 1016
1015struct hns_roce_sq_db { 1017struct hns_roce_sq_db {
1016 u32 u32_4; 1018 __le32 u32_4;
1017 u32 u32_8; 1019 __le32 u32_8;
1018}; 1020};
1019 1021
1020#define SQ_DOORBELL_U32_4_SQ_HEAD_S 0 1022#define SQ_DOORBELL_U32_4_SQ_HEAD_S 0
diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
index fd9592a10ccf..db2ff352d75f 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
+++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
@@ -63,7 +63,8 @@ static int set_rwqe_data_seg(struct ib_qp *ibqp, struct ib_send_wr *wr,
63 int i; 63 int i;
64 64
65 if (wr->send_flags & IB_SEND_INLINE && wr->num_sge) { 65 if (wr->send_flags & IB_SEND_INLINE && wr->num_sge) {
66 if (rc_sq_wqe->msg_len > hr_dev->caps.max_sq_inline) { 66 if (le32_to_cpu(rc_sq_wqe->msg_len) >
67 hr_dev->caps.max_sq_inline) {
67 *bad_wr = wr; 68 *bad_wr = wr;
68 dev_err(hr_dev->dev, "inline len(1-%d)=%d, illegal", 69 dev_err(hr_dev->dev, "inline len(1-%d)=%d, illegal",
69 rc_sq_wqe->msg_len, hr_dev->caps.max_sq_inline); 70 rc_sq_wqe->msg_len, hr_dev->caps.max_sq_inline);
@@ -136,6 +137,7 @@ static int hns_roce_v2_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
136 unsigned long flags; 137 unsigned long flags;
137 unsigned int ind; 138 unsigned int ind;
138 void *wqe = NULL; 139 void *wqe = NULL;
140 u32 tmp_len = 0;
139 bool loopback; 141 bool loopback;
140 int ret = 0; 142 int ret = 0;
141 u8 *smac; 143 u8 *smac;
@@ -218,9 +220,20 @@ static int hns_roce_v2_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
218 HNS_ROCE_V2_WQE_OP_SEND); 220 HNS_ROCE_V2_WQE_OP_SEND);
219 221
220 for (i = 0; i < wr->num_sge; i++) 222 for (i = 0; i < wr->num_sge; i++)
221 ud_sq_wqe->msg_len += wr->sg_list[i].length; 223 tmp_len += wr->sg_list[i].length;
222 224
223 ud_sq_wqe->immtdata = send_ieth(wr); 225 ud_sq_wqe->msg_len =
226 cpu_to_le32(le32_to_cpu(ud_sq_wqe->msg_len) + tmp_len);
227
228 switch (wr->opcode) {
229 case IB_WR_SEND_WITH_IMM:
230 case IB_WR_RDMA_WRITE_WITH_IMM:
231 ud_sq_wqe->immtdata = wr->ex.imm_data;
232 break;
233 default:
234 ud_sq_wqe->immtdata = 0;
235 break;
236 }
224 237
225 /* Set sig attr */ 238 /* Set sig attr */
226 roce_set_bit(ud_sq_wqe->byte_4, 239 roce_set_bit(ud_sq_wqe->byte_4,
@@ -254,8 +267,8 @@ static int hns_roce_v2_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
254 V2_UD_SEND_WQE_BYTE_24_UDPSPN_M, 267 V2_UD_SEND_WQE_BYTE_24_UDPSPN_M,
255 V2_UD_SEND_WQE_BYTE_24_UDPSPN_S, 0); 268 V2_UD_SEND_WQE_BYTE_24_UDPSPN_S, 0);
256 ud_sq_wqe->qkey = 269 ud_sq_wqe->qkey =
257 cpu_to_be32(ud_wr(wr)->remote_qkey & 0x80000000) ? 270 cpu_to_le32(ud_wr(wr)->remote_qkey & 0x80000000 ?
258 qp->qkey : ud_wr(wr)->remote_qkey; 271 qp->qkey : ud_wr(wr)->remote_qkey);
259 roce_set_field(ud_sq_wqe->byte_32, 272 roce_set_field(ud_sq_wqe->byte_32,
260 V2_UD_SEND_WQE_BYTE_32_DQPN_M, 273 V2_UD_SEND_WQE_BYTE_32_DQPN_M,
261 V2_UD_SEND_WQE_BYTE_32_DQPN_S, 274 V2_UD_SEND_WQE_BYTE_32_DQPN_S,
@@ -264,7 +277,7 @@ static int hns_roce_v2_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
264 roce_set_field(ud_sq_wqe->byte_36, 277 roce_set_field(ud_sq_wqe->byte_36,
265 V2_UD_SEND_WQE_BYTE_36_VLAN_M, 278 V2_UD_SEND_WQE_BYTE_36_VLAN_M,
266 V2_UD_SEND_WQE_BYTE_36_VLAN_S, 279 V2_UD_SEND_WQE_BYTE_36_VLAN_S,
267 ah->av.vlan); 280 le16_to_cpu(ah->av.vlan));
268 roce_set_field(ud_sq_wqe->byte_36, 281 roce_set_field(ud_sq_wqe->byte_36,
269 V2_UD_SEND_WQE_BYTE_36_HOPLIMIT_M, 282 V2_UD_SEND_WQE_BYTE_36_HOPLIMIT_M,
270 V2_UD_SEND_WQE_BYTE_36_HOPLIMIT_S, 283 V2_UD_SEND_WQE_BYTE_36_HOPLIMIT_S,
@@ -283,8 +296,8 @@ static int hns_roce_v2_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
283 roce_set_field(ud_sq_wqe->byte_40, 296 roce_set_field(ud_sq_wqe->byte_40,
284 V2_UD_SEND_WQE_BYTE_40_SL_M, 297 V2_UD_SEND_WQE_BYTE_40_SL_M,
285 V2_UD_SEND_WQE_BYTE_40_SL_S, 298 V2_UD_SEND_WQE_BYTE_40_SL_S,
286 ah->av.sl_tclass_flowlabel >> 299 le32_to_cpu(ah->av.sl_tclass_flowlabel) >>
287 HNS_ROCE_SL_SHIFT); 300 HNS_ROCE_SL_SHIFT);
288 roce_set_field(ud_sq_wqe->byte_40, 301 roce_set_field(ud_sq_wqe->byte_40,
289 V2_UD_SEND_WQE_BYTE_40_PORTN_M, 302 V2_UD_SEND_WQE_BYTE_40_PORTN_M,
290 V2_UD_SEND_WQE_BYTE_40_PORTN_S, 303 V2_UD_SEND_WQE_BYTE_40_PORTN_S,
@@ -311,9 +324,24 @@ static int hns_roce_v2_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
311 rc_sq_wqe = wqe; 324 rc_sq_wqe = wqe;
312 memset(rc_sq_wqe, 0, sizeof(*rc_sq_wqe)); 325 memset(rc_sq_wqe, 0, sizeof(*rc_sq_wqe));
313 for (i = 0; i < wr->num_sge; i++) 326 for (i = 0; i < wr->num_sge; i++)
314 rc_sq_wqe->msg_len += wr->sg_list[i].length; 327 tmp_len += wr->sg_list[i].length;
328
329 rc_sq_wqe->msg_len =
330 cpu_to_le32(le32_to_cpu(rc_sq_wqe->msg_len) + tmp_len);
315 331
316 rc_sq_wqe->inv_key_immtdata = send_ieth(wr); 332 switch (wr->opcode) {
333 case IB_WR_SEND_WITH_IMM:
334 case IB_WR_RDMA_WRITE_WITH_IMM:
335 rc_sq_wqe->immtdata = wr->ex.imm_data;
336 break;
337 case IB_WR_SEND_WITH_INV:
338 rc_sq_wqe->inv_key =
339 cpu_to_le32(wr->ex.invalidate_rkey);
340 break;
341 default:
342 rc_sq_wqe->immtdata = 0;
343 break;
344 }
317 345
318 roce_set_bit(rc_sq_wqe->byte_4, 346 roce_set_bit(rc_sq_wqe->byte_4,
319 V2_RC_SEND_WQE_BYTE_4_FENCE_S, 347 V2_RC_SEND_WQE_BYTE_4_FENCE_S,
@@ -451,7 +479,7 @@ out:
451 roce_set_field(sq_db.parameter, V2_DB_PARAMETER_SL_M, 479 roce_set_field(sq_db.parameter, V2_DB_PARAMETER_SL_M,
452 V2_DB_PARAMETER_SL_S, qp->sl); 480 V2_DB_PARAMETER_SL_S, qp->sl);
453 481
454 hns_roce_write64_k((__be32 *)&sq_db, qp->sq.db_reg_l); 482 hns_roce_write64_k((__le32 *)&sq_db, qp->sq.db_reg_l);
455 483
456 qp->sq_next_wqe = ind; 484 qp->sq_next_wqe = ind;
457 qp->next_sge = sge_ind; 485 qp->next_sge = sge_ind;
@@ -513,7 +541,7 @@ static int hns_roce_v2_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr,
513 } 541 }
514 542
515 if (i < hr_qp->rq.max_gs) { 543 if (i < hr_qp->rq.max_gs) {
516 dseg[i].lkey = cpu_to_be32(HNS_ROCE_INVALID_LKEY); 544 dseg[i].lkey = cpu_to_le32(HNS_ROCE_INVALID_LKEY);
517 dseg[i].addr = 0; 545 dseg[i].addr = 0;
518 } 546 }
519 547
@@ -546,7 +574,7 @@ out:
546 roce_set_field(rq_db.parameter, V2_DB_PARAMETER_CONS_IDX_M, 574 roce_set_field(rq_db.parameter, V2_DB_PARAMETER_CONS_IDX_M,
547 V2_DB_PARAMETER_CONS_IDX_S, hr_qp->rq.head); 575 V2_DB_PARAMETER_CONS_IDX_S, hr_qp->rq.head);
548 576
549 hns_roce_write64_k((__be32 *)&rq_db, hr_qp->rq.db_reg_l); 577 hns_roce_write64_k((__le32 *)&rq_db, hr_qp->rq.db_reg_l);
550 } 578 }
551 spin_unlock_irqrestore(&hr_qp->rq.lock, flags); 579 spin_unlock_irqrestore(&hr_qp->rq.lock, flags);
552 580
diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.h b/drivers/infiniband/hw/hns/hns_roce_hw_v2.h
index 960df095392a..2bf8a47e3de3 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.h
+++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.h
@@ -224,22 +224,22 @@ enum hns_roce_sgid_type {
224}; 224};
225 225
226struct hns_roce_v2_cq_context { 226struct hns_roce_v2_cq_context {
227 u32 byte_4_pg_ceqn; 227 __le32 byte_4_pg_ceqn;
228 u32 byte_8_cqn; 228 __le32 byte_8_cqn;
229 u32 cqe_cur_blk_addr; 229 __le32 cqe_cur_blk_addr;
230 u32 byte_16_hop_addr; 230 __le32 byte_16_hop_addr;
231 u32 cqe_nxt_blk_addr; 231 __le32 cqe_nxt_blk_addr;
232 u32 byte_24_pgsz_addr; 232 __le32 byte_24_pgsz_addr;
233 u32 byte_28_cq_pi; 233 __le32 byte_28_cq_pi;
234 u32 byte_32_cq_ci; 234 __le32 byte_32_cq_ci;
235 u32 cqe_ba; 235 __le32 cqe_ba;
236 u32 byte_40_cqe_ba; 236 __le32 byte_40_cqe_ba;
237 u32 byte_44_db_record; 237 __le32 byte_44_db_record;
238 u32 db_record_addr; 238 __le32 db_record_addr;
239 u32 byte_52_cqe_cnt; 239 __le32 byte_52_cqe_cnt;
240 u32 byte_56_cqe_period_maxcnt; 240 __le32 byte_56_cqe_period_maxcnt;
241 u32 cqe_report_timer; 241 __le32 cqe_report_timer;
242 u32 byte_64_se_cqe_idx; 242 __le32 byte_64_se_cqe_idx;
243}; 243};
244#define HNS_ROCE_V2_CQ_DEFAULT_BURST_NUM 0x0 244#define HNS_ROCE_V2_CQ_DEFAULT_BURST_NUM 0x0
245#define HNS_ROCE_V2_CQ_DEFAULT_INTERVAL 0x0 245#define HNS_ROCE_V2_CQ_DEFAULT_INTERVAL 0x0
@@ -328,66 +328,66 @@ enum hns_roce_v2_qp_state {
328}; 328};
329 329
330struct hns_roce_v2_qp_context { 330struct hns_roce_v2_qp_context {
331 u32 byte_4_sqpn_tst; 331 __le32 byte_4_sqpn_tst;
332 u32 wqe_sge_ba; 332 __le32 wqe_sge_ba;
333 u32 byte_12_sq_hop; 333 __le32 byte_12_sq_hop;
334 u32 byte_16_buf_ba_pg_sz; 334 __le32 byte_16_buf_ba_pg_sz;
335 u32 byte_20_smac_sgid_idx; 335 __le32 byte_20_smac_sgid_idx;
336 u32 byte_24_mtu_tc; 336 __le32 byte_24_mtu_tc;
337 u32 byte_28_at_fl; 337 __le32 byte_28_at_fl;
338 u8 dgid[GID_LEN_V2]; 338 u8 dgid[GID_LEN_V2];
339 u32 dmac; 339 __le32 dmac;
340 u32 byte_52_udpspn_dmac; 340 __le32 byte_52_udpspn_dmac;
341 u32 byte_56_dqpn_err; 341 __le32 byte_56_dqpn_err;
342 u32 byte_60_qpst_mapid; 342 __le32 byte_60_qpst_mapid;
343 u32 qkey_xrcd; 343 __le32 qkey_xrcd;
344 u32 byte_68_rq_db; 344 __le32 byte_68_rq_db;
345 u32 rq_db_record_addr; 345 __le32 rq_db_record_addr;
346 u32 byte_76_srqn_op_en; 346 __le32 byte_76_srqn_op_en;
347 u32 byte_80_rnr_rx_cqn; 347 __le32 byte_80_rnr_rx_cqn;
348 u32 byte_84_rq_ci_pi; 348 __le32 byte_84_rq_ci_pi;
349 u32 rq_cur_blk_addr; 349 __le32 rq_cur_blk_addr;
350 u32 byte_92_srq_info; 350 __le32 byte_92_srq_info;
351 u32 byte_96_rx_reqmsn; 351 __le32 byte_96_rx_reqmsn;
352 u32 rq_nxt_blk_addr; 352 __le32 rq_nxt_blk_addr;
353 u32 byte_104_rq_sge; 353 __le32 byte_104_rq_sge;
354 u32 byte_108_rx_reqepsn; 354 __le32 byte_108_rx_reqepsn;
355 u32 rq_rnr_timer; 355 __le32 rq_rnr_timer;
356 u32 rx_msg_len; 356 __le32 rx_msg_len;
357 u32 rx_rkey_pkt_info; 357 __le32 rx_rkey_pkt_info;
358 u64 rx_va; 358 __le64 rx_va;
359 u32 byte_132_trrl; 359 __le32 byte_132_trrl;
360 u32 trrl_ba; 360 __le32 trrl_ba;
361 u32 byte_140_raq; 361 __le32 byte_140_raq;
362 u32 byte_144_raq; 362 __le32 byte_144_raq;
363 u32 byte_148_raq; 363 __le32 byte_148_raq;
364 u32 byte_152_raq; 364 __le32 byte_152_raq;
365 u32 byte_156_raq; 365 __le32 byte_156_raq;
366 u32 byte_160_sq_ci_pi; 366 __le32 byte_160_sq_ci_pi;
367 u32 sq_cur_blk_addr; 367 __le32 sq_cur_blk_addr;
368 u32 byte_168_irrl_idx; 368 __le32 byte_168_irrl_idx;
369 u32 byte_172_sq_psn; 369 __le32 byte_172_sq_psn;
370 u32 byte_176_msg_pktn; 370 __le32 byte_176_msg_pktn;
371 u32 sq_cur_sge_blk_addr; 371 __le32 sq_cur_sge_blk_addr;
372 u32 byte_184_irrl_idx; 372 __le32 byte_184_irrl_idx;
373 u32 cur_sge_offset; 373 __le32 cur_sge_offset;
374 u32 byte_192_ext_sge; 374 __le32 byte_192_ext_sge;
375 u32 byte_196_sq_psn; 375 __le32 byte_196_sq_psn;
376 u32 byte_200_sq_max; 376 __le32 byte_200_sq_max;
377 u32 irrl_ba; 377 __le32 irrl_ba;
378 u32 byte_208_irrl; 378 __le32 byte_208_irrl;
379 u32 byte_212_lsn; 379 __le32 byte_212_lsn;
380 u32 sq_timer; 380 __le32 sq_timer;
381 u32 byte_220_retry_psn_msn; 381 __le32 byte_220_retry_psn_msn;
382 u32 byte_224_retry_msg; 382 __le32 byte_224_retry_msg;
383 u32 rx_sq_cur_blk_addr; 383 __le32 rx_sq_cur_blk_addr;
384 u32 byte_232_irrl_sge; 384 __le32 byte_232_irrl_sge;
385 u32 irrl_cur_sge_offset; 385 __le32 irrl_cur_sge_offset;
386 u32 byte_240_irrl_tail; 386 __le32 byte_240_irrl_tail;
387 u32 byte_244_rnr_rxack; 387 __le32 byte_244_rnr_rxack;
388 u32 byte_248_ack_psn; 388 __le32 byte_248_ack_psn;
389 u32 byte_252_err_txcqn; 389 __le32 byte_252_err_txcqn;
390 u32 byte_256_sqflush_rqcqe; 390 __le32 byte_256_sqflush_rqcqe;
391}; 391};
392 392
393#define V2_QPC_BYTE_4_TST_S 0 393#define V2_QPC_BYTE_4_TST_S 0
@@ -761,17 +761,17 @@ struct hns_roce_v2_qp_context {
761#define V2_QPC_BYTE_256_SQ_FLUSH_IDX_M GENMASK(31, 16) 761#define V2_QPC_BYTE_256_SQ_FLUSH_IDX_M GENMASK(31, 16)
762 762
763struct hns_roce_v2_cqe { 763struct hns_roce_v2_cqe {
764 u32 byte_4; 764 __le32 byte_4;
765 union { 765 union {
766 __le32 rkey; 766 __le32 rkey;
767 __be32 immtdata; 767 __be32 immtdata;
768 }; 768 };
769 u32 byte_12; 769 __le32 byte_12;
770 u32 byte_16; 770 __le32 byte_16;
771 u32 byte_cnt; 771 __le32 byte_cnt;
772 u8 smac[4]; 772 u8 smac[4];
773 u32 byte_28; 773 __le32 byte_28;
774 u32 byte_32; 774 __le32 byte_32;
775}; 775};
776 776
777#define V2_CQE_BYTE_4_OPCODE_S 0 777#define V2_CQE_BYTE_4_OPCODE_S 0
@@ -901,8 +901,8 @@ struct hns_roce_v2_mpt_entry {
901#define V2_DB_PARAMETER_SL_M GENMASK(18, 16) 901#define V2_DB_PARAMETER_SL_M GENMASK(18, 16)
902 902
903struct hns_roce_v2_cq_db { 903struct hns_roce_v2_cq_db {
904 u32 byte_4; 904 __le32 byte_4;
905 u32 parameter; 905 __le32 parameter;
906}; 906};
907 907
908#define V2_CQ_DB_BYTE_4_TAG_S 0 908#define V2_CQ_DB_BYTE_4_TAG_S 0
@@ -920,18 +920,18 @@ struct hns_roce_v2_cq_db {
920#define V2_CQ_DB_PARAMETER_NOTIFY_S 24 920#define V2_CQ_DB_PARAMETER_NOTIFY_S 24
921 921
922struct hns_roce_v2_ud_send_wqe { 922struct hns_roce_v2_ud_send_wqe {
923 u32 byte_4; 923 __le32 byte_4;
924 u32 msg_len; 924 __le32 msg_len;
925 u32 immtdata; 925 __be32 immtdata;
926 u32 byte_16; 926 __le32 byte_16;
927 u32 byte_20; 927 __le32 byte_20;
928 u32 byte_24; 928 __le32 byte_24;
929 u32 qkey; 929 __le32 qkey;
930 u32 byte_32; 930 __le32 byte_32;
931 u32 byte_36; 931 __le32 byte_36;
932 u32 byte_40; 932 __le32 byte_40;
933 u32 dmac; 933 __le32 dmac;
934 u32 byte_48; 934 __le32 byte_48;
935 u8 dgid[GID_LEN_V2]; 935 u8 dgid[GID_LEN_V2];
936 936
937}; 937};
@@ -1004,13 +1004,16 @@ struct hns_roce_v2_ud_send_wqe {
1004#define V2_UD_SEND_WQE_BYTE_48_SMAC_INDX_M GENMASK(31, 24) 1004#define V2_UD_SEND_WQE_BYTE_48_SMAC_INDX_M GENMASK(31, 24)
1005 1005
1006struct hns_roce_v2_rc_send_wqe { 1006struct hns_roce_v2_rc_send_wqe {
1007 u32 byte_4; 1007 __le32 byte_4;
1008 u32 msg_len; 1008 __le32 msg_len;
1009 u32 inv_key_immtdata; 1009 union {
1010 u32 byte_16; 1010 __le32 inv_key;
1011 u32 byte_20; 1011 __be32 immtdata;
1012 u32 rkey; 1012 };
1013 u64 va; 1013 __le32 byte_16;
1014 __le32 byte_20;
1015 __le32 rkey;
1016 __le64 va;
1014}; 1017};
1015 1018
1016#define V2_RC_SEND_WQE_BYTE_4_OPCODE_S 0 1019#define V2_RC_SEND_WQE_BYTE_4_OPCODE_S 0
@@ -1038,14 +1041,14 @@ struct hns_roce_v2_rc_send_wqe {
1038#define V2_RC_SEND_WQE_BYTE_20_MSG_START_SGE_IDX_M GENMASK(23, 0) 1041#define V2_RC_SEND_WQE_BYTE_20_MSG_START_SGE_IDX_M GENMASK(23, 0)
1039 1042
1040struct hns_roce_v2_wqe_data_seg { 1043struct hns_roce_v2_wqe_data_seg {
1041 __be32 len; 1044 __le32 len;
1042 __be32 lkey; 1045 __le32 lkey;
1043 __be64 addr; 1046 __le64 addr;
1044}; 1047};
1045 1048
1046struct hns_roce_v2_db { 1049struct hns_roce_v2_db {
1047 u32 byte_4; 1050 __le32 byte_4;
1048 u32 parameter; 1051 __le32 parameter;
1049}; 1052};
1050 1053
1051struct hns_roce_query_version { 1054struct hns_roce_query_version {
@@ -1105,12 +1108,12 @@ struct hns_roce_pf_res {
1105#define PF_RES_DATA_5_PF_EQC_BT_NUM_M GENMASK(25, 16) 1108#define PF_RES_DATA_5_PF_EQC_BT_NUM_M GENMASK(25, 16)
1106 1109
1107struct hns_roce_vf_res_a { 1110struct hns_roce_vf_res_a {
1108 u32 vf_id; 1111 __le32 vf_id;
1109 u32 vf_qpc_bt_idx_num; 1112 __le32 vf_qpc_bt_idx_num;
1110 u32 vf_srqc_bt_idx_num; 1113 __le32 vf_srqc_bt_idx_num;
1111 u32 vf_cqc_bt_idx_num; 1114 __le32 vf_cqc_bt_idx_num;
1112 u32 vf_mpt_bt_idx_num; 1115 __le32 vf_mpt_bt_idx_num;
1113 u32 vf_eqc_bt_idx_num; 1116 __le32 vf_eqc_bt_idx_num;
1114}; 1117};
1115 1118
1116#define VF_RES_A_DATA_1_VF_QPC_BT_IDX_S 0 1119#define VF_RES_A_DATA_1_VF_QPC_BT_IDX_S 0
@@ -1144,11 +1147,11 @@ struct hns_roce_vf_res_a {
1144#define VF_RES_A_DATA_5_VF_EQC_NUM_M GENMASK(25, 16) 1147#define VF_RES_A_DATA_5_VF_EQC_NUM_M GENMASK(25, 16)
1145 1148
1146struct hns_roce_vf_res_b { 1149struct hns_roce_vf_res_b {
1147 u32 rsv0; 1150 __le32 rsv0;
1148 u32 vf_smac_idx_num; 1151 __le32 vf_smac_idx_num;
1149 u32 vf_sgid_idx_num; 1152 __le32 vf_sgid_idx_num;
1150 u32 vf_qid_idx_sl_num; 1153 __le32 vf_qid_idx_sl_num;
1151 u32 rsv[2]; 1154 __le32 rsv[2];
1152}; 1155};
1153 1156
1154#define VF_RES_B_DATA_0_VF_ID_S 0 1157#define VF_RES_B_DATA_0_VF_ID_S 0
@@ -1180,11 +1183,11 @@ struct hns_roce_vf_res_b {
1180#define ROCEE_VF_SGID_CFG4_SGID_TYPE_M GENMASK(1, 0) 1183#define ROCEE_VF_SGID_CFG4_SGID_TYPE_M GENMASK(1, 0)
1181 1184
1182struct hns_roce_cfg_bt_attr { 1185struct hns_roce_cfg_bt_attr {
1183 u32 vf_qpc_cfg; 1186 __le32 vf_qpc_cfg;
1184 u32 vf_srqc_cfg; 1187 __le32 vf_srqc_cfg;
1185 u32 vf_cqc_cfg; 1188 __le32 vf_cqc_cfg;
1186 u32 vf_mpt_cfg; 1189 __le32 vf_mpt_cfg;
1187 u32 rsv[2]; 1190 __le32 rsv[2];
1188}; 1191};
1189 1192
1190#define CFG_BT_ATTR_DATA_0_VF_QPC_BA_PGSZ_S 0 1193#define CFG_BT_ATTR_DATA_0_VF_QPC_BA_PGSZ_S 0
@@ -1224,11 +1227,11 @@ struct hns_roce_cfg_bt_attr {
1224#define CFG_BT_ATTR_DATA_3_VF_MPT_HOPNUM_M GENMASK(9, 8) 1227#define CFG_BT_ATTR_DATA_3_VF_MPT_HOPNUM_M GENMASK(9, 8)
1225 1228
1226struct hns_roce_cmq_desc { 1229struct hns_roce_cmq_desc {
1227 u16 opcode; 1230 __le16 opcode;
1228 u16 flag; 1231 __le16 flag;
1229 u16 retval; 1232 __le16 retval;
1230 u16 rsv; 1233 __le16 rsv;
1231 u32 data[6]; 1234 __le32 data[6];
1232}; 1235};
1233 1236
1234#define HNS_ROCE_V2_GO_BIT_TIMEOUT_MSECS 10000 1237#define HNS_ROCE_V2_GO_BIT_TIMEOUT_MSECS 10000
@@ -1274,18 +1277,18 @@ struct hns_roce_v2_priv {
1274}; 1277};
1275 1278
1276struct hns_roce_eq_context { 1279struct hns_roce_eq_context {
1277 u32 byte_4; 1280 __le32 byte_4;
1278 u32 byte_8; 1281 __le32 byte_8;
1279 u32 byte_12; 1282 __le32 byte_12;
1280 u32 eqe_report_timer; 1283 __le32 eqe_report_timer;
1281 u32 eqe_ba0; 1284 __le32 eqe_ba0;
1282 u32 eqe_ba1; 1285 __le32 eqe_ba1;
1283 u32 byte_28; 1286 __le32 byte_28;
1284 u32 byte_32; 1287 __le32 byte_32;
1285 u32 byte_36; 1288 __le32 byte_36;
1286 u32 nxt_eqe_ba0; 1289 __le32 nxt_eqe_ba0;
1287 u32 nxt_eqe_ba1; 1290 __le32 nxt_eqe_ba1;
1288 u32 rsv[5]; 1291 __le32 rsv[5];
1289}; 1292};
1290 1293
1291#define HNS_ROCE_AEQ_DEFAULT_BURST_NUM 0x0 1294#define HNS_ROCE_AEQ_DEFAULT_BURST_NUM 0x0
diff --git a/drivers/infiniband/hw/hns/hns_roce_main.c b/drivers/infiniband/hw/hns/hns_roce_main.c
index 46f065ddd6c0..eb9a69fc7bec 100644
--- a/drivers/infiniband/hw/hns/hns_roce_main.c
+++ b/drivers/infiniband/hw/hns/hns_roce_main.c
@@ -200,7 +200,7 @@ static int hns_roce_query_device(struct ib_device *ib_dev,
200 200
201 memset(props, 0, sizeof(*props)); 201 memset(props, 0, sizeof(*props));
202 202
203 props->sys_image_guid = hr_dev->sys_image_guid; 203 props->sys_image_guid = cpu_to_be32(hr_dev->sys_image_guid);
204 props->max_mr_size = (u64)(~(0ULL)); 204 props->max_mr_size = (u64)(~(0ULL));
205 props->page_size_cap = hr_dev->caps.page_size_cap; 205 props->page_size_cap = hr_dev->caps.page_size_cap;
206 props->vendor_id = hr_dev->vendor_id; 206 props->vendor_id = hr_dev->vendor_id;
diff --git a/drivers/infiniband/hw/hns/hns_roce_qp.c b/drivers/infiniband/hw/hns/hns_roce_qp.c
index 4414cea9ef56..088973a05882 100644
--- a/drivers/infiniband/hw/hns/hns_roce_qp.c
+++ b/drivers/infiniband/hw/hns/hns_roce_qp.c
@@ -512,9 +512,9 @@ static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev,
512 hr_qp->ibqp.qp_type = init_attr->qp_type; 512 hr_qp->ibqp.qp_type = init_attr->qp_type;
513 513
514 if (init_attr->sq_sig_type == IB_SIGNAL_ALL_WR) 514 if (init_attr->sq_sig_type == IB_SIGNAL_ALL_WR)
515 hr_qp->sq_signal_bits = IB_SIGNAL_ALL_WR; 515 hr_qp->sq_signal_bits = cpu_to_le32(IB_SIGNAL_ALL_WR);
516 else 516 else
517 hr_qp->sq_signal_bits = IB_SIGNAL_REQ_WR; 517 hr_qp->sq_signal_bits = cpu_to_le32(IB_SIGNAL_REQ_WR);
518 518
519 ret = hns_roce_set_rq_size(hr_dev, &init_attr->cap, !!ib_pd->uobject, 519 ret = hns_roce_set_rq_size(hr_dev, &init_attr->cap, !!ib_pd->uobject,
520 !!init_attr->srq, hr_qp); 520 !!init_attr->srq, hr_qp);
@@ -937,20 +937,6 @@ void hns_roce_unlock_cqs(struct hns_roce_cq *send_cq,
937} 937}
938EXPORT_SYMBOL_GPL(hns_roce_unlock_cqs); 938EXPORT_SYMBOL_GPL(hns_roce_unlock_cqs);
939 939
940__be32 send_ieth(struct ib_send_wr *wr)
941{
942 switch (wr->opcode) {
943 case IB_WR_SEND_WITH_IMM:
944 case IB_WR_RDMA_WRITE_WITH_IMM:
945 return cpu_to_le32(wr->ex.imm_data);
946 case IB_WR_SEND_WITH_INV:
947 return cpu_to_le32(wr->ex.invalidate_rkey);
948 default:
949 return 0;
950 }
951}
952EXPORT_SYMBOL_GPL(send_ieth);
953
954static void *get_wqe(struct hns_roce_qp *hr_qp, int offset) 940static void *get_wqe(struct hns_roce_qp *hr_qp, int offset)
955{ 941{
956 942