aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/infiniband/hw/mthca/mthca_qp.c
diff options
context:
space:
mode:
authorAnton Altaparmakov <aia21@cantab.net>2005-06-30 04:52:20 -0400
committerAnton Altaparmakov <aia21@cantab.net>2005-06-30 04:52:20 -0400
commitc2d9b8387bce8b4a0fd402fab7dc1319d11a418d (patch)
tree082cf7dd287f61635198011e61c3de1be130cc42 /drivers/infiniband/hw/mthca/mthca_qp.c
parent2a322e4c08be4e7cb0c04b427ddaaa679fd88863 (diff)
parent9b4311eedb17fa88f02e4876cd6aa9a08e383cd6 (diff)
Automerge with /usr/src/ntfs-2.6.git.
Diffstat (limited to 'drivers/infiniband/hw/mthca/mthca_qp.c')
-rw-r--r--drivers/infiniband/hw/mthca/mthca_qp.c139
1 files changed, 109 insertions, 30 deletions
diff --git a/drivers/infiniband/hw/mthca/mthca_qp.c b/drivers/infiniband/hw/mthca/mthca_qp.c
index ca73bab11a02..163a8ef4186f 100644
--- a/drivers/infiniband/hw/mthca/mthca_qp.c
+++ b/drivers/infiniband/hw/mthca/mthca_qp.c
@@ -357,6 +357,9 @@ static const struct {
357 [UD] = (IB_QP_PKEY_INDEX | 357 [UD] = (IB_QP_PKEY_INDEX |
358 IB_QP_PORT | 358 IB_QP_PORT |
359 IB_QP_QKEY), 359 IB_QP_QKEY),
360 [UC] = (IB_QP_PKEY_INDEX |
361 IB_QP_PORT |
362 IB_QP_ACCESS_FLAGS),
360 [RC] = (IB_QP_PKEY_INDEX | 363 [RC] = (IB_QP_PKEY_INDEX |
361 IB_QP_PORT | 364 IB_QP_PORT |
362 IB_QP_ACCESS_FLAGS), 365 IB_QP_ACCESS_FLAGS),
@@ -378,6 +381,9 @@ static const struct {
378 [UD] = (IB_QP_PKEY_INDEX | 381 [UD] = (IB_QP_PKEY_INDEX |
379 IB_QP_PORT | 382 IB_QP_PORT |
380 IB_QP_QKEY), 383 IB_QP_QKEY),
384 [UC] = (IB_QP_PKEY_INDEX |
385 IB_QP_PORT |
386 IB_QP_ACCESS_FLAGS),
381 [RC] = (IB_QP_PKEY_INDEX | 387 [RC] = (IB_QP_PKEY_INDEX |
382 IB_QP_PORT | 388 IB_QP_PORT |
383 IB_QP_ACCESS_FLAGS), 389 IB_QP_ACCESS_FLAGS),
@@ -388,6 +394,11 @@ static const struct {
388 [IB_QPS_RTR] = { 394 [IB_QPS_RTR] = {
389 .trans = MTHCA_TRANS_INIT2RTR, 395 .trans = MTHCA_TRANS_INIT2RTR,
390 .req_param = { 396 .req_param = {
397 [UC] = (IB_QP_AV |
398 IB_QP_PATH_MTU |
399 IB_QP_DEST_QPN |
400 IB_QP_RQ_PSN |
401 IB_QP_MAX_DEST_RD_ATOMIC),
391 [RC] = (IB_QP_AV | 402 [RC] = (IB_QP_AV |
392 IB_QP_PATH_MTU | 403 IB_QP_PATH_MTU |
393 IB_QP_DEST_QPN | 404 IB_QP_DEST_QPN |
@@ -398,6 +409,9 @@ static const struct {
398 .opt_param = { 409 .opt_param = {
399 [UD] = (IB_QP_PKEY_INDEX | 410 [UD] = (IB_QP_PKEY_INDEX |
400 IB_QP_QKEY), 411 IB_QP_QKEY),
412 [UC] = (IB_QP_ALT_PATH |
413 IB_QP_ACCESS_FLAGS |
414 IB_QP_PKEY_INDEX),
401 [RC] = (IB_QP_ALT_PATH | 415 [RC] = (IB_QP_ALT_PATH |
402 IB_QP_ACCESS_FLAGS | 416 IB_QP_ACCESS_FLAGS |
403 IB_QP_PKEY_INDEX), 417 IB_QP_PKEY_INDEX),
@@ -413,6 +427,8 @@ static const struct {
413 .trans = MTHCA_TRANS_RTR2RTS, 427 .trans = MTHCA_TRANS_RTR2RTS,
414 .req_param = { 428 .req_param = {
415 [UD] = IB_QP_SQ_PSN, 429 [UD] = IB_QP_SQ_PSN,
430 [UC] = (IB_QP_SQ_PSN |
431 IB_QP_MAX_QP_RD_ATOMIC),
416 [RC] = (IB_QP_TIMEOUT | 432 [RC] = (IB_QP_TIMEOUT |
417 IB_QP_RETRY_CNT | 433 IB_QP_RETRY_CNT |
418 IB_QP_RNR_RETRY | 434 IB_QP_RNR_RETRY |
@@ -423,6 +439,11 @@ static const struct {
423 .opt_param = { 439 .opt_param = {
424 [UD] = (IB_QP_CUR_STATE | 440 [UD] = (IB_QP_CUR_STATE |
425 IB_QP_QKEY), 441 IB_QP_QKEY),
442 [UC] = (IB_QP_CUR_STATE |
443 IB_QP_ALT_PATH |
444 IB_QP_ACCESS_FLAGS |
445 IB_QP_PKEY_INDEX |
446 IB_QP_PATH_MIG_STATE),
426 [RC] = (IB_QP_CUR_STATE | 447 [RC] = (IB_QP_CUR_STATE |
427 IB_QP_ALT_PATH | 448 IB_QP_ALT_PATH |
428 IB_QP_ACCESS_FLAGS | 449 IB_QP_ACCESS_FLAGS |
@@ -442,6 +463,9 @@ static const struct {
442 .opt_param = { 463 .opt_param = {
443 [UD] = (IB_QP_CUR_STATE | 464 [UD] = (IB_QP_CUR_STATE |
444 IB_QP_QKEY), 465 IB_QP_QKEY),
466 [UC] = (IB_QP_ACCESS_FLAGS |
467 IB_QP_ALT_PATH |
468 IB_QP_PATH_MIG_STATE),
445 [RC] = (IB_QP_ACCESS_FLAGS | 469 [RC] = (IB_QP_ACCESS_FLAGS |
446 IB_QP_ALT_PATH | 470 IB_QP_ALT_PATH |
447 IB_QP_PATH_MIG_STATE | 471 IB_QP_PATH_MIG_STATE |
@@ -462,6 +486,10 @@ static const struct {
462 .opt_param = { 486 .opt_param = {
463 [UD] = (IB_QP_CUR_STATE | 487 [UD] = (IB_QP_CUR_STATE |
464 IB_QP_QKEY), 488 IB_QP_QKEY),
489 [UC] = (IB_QP_CUR_STATE |
490 IB_QP_ALT_PATH |
491 IB_QP_ACCESS_FLAGS |
492 IB_QP_PATH_MIG_STATE),
465 [RC] = (IB_QP_CUR_STATE | 493 [RC] = (IB_QP_CUR_STATE |
466 IB_QP_ALT_PATH | 494 IB_QP_ALT_PATH |
467 IB_QP_ACCESS_FLAGS | 495 IB_QP_ACCESS_FLAGS |
@@ -476,6 +504,14 @@ static const struct {
476 .opt_param = { 504 .opt_param = {
477 [UD] = (IB_QP_PKEY_INDEX | 505 [UD] = (IB_QP_PKEY_INDEX |
478 IB_QP_QKEY), 506 IB_QP_QKEY),
507 [UC] = (IB_QP_AV |
508 IB_QP_MAX_QP_RD_ATOMIC |
509 IB_QP_MAX_DEST_RD_ATOMIC |
510 IB_QP_CUR_STATE |
511 IB_QP_ALT_PATH |
512 IB_QP_ACCESS_FLAGS |
513 IB_QP_PKEY_INDEX |
514 IB_QP_PATH_MIG_STATE),
479 [RC] = (IB_QP_AV | 515 [RC] = (IB_QP_AV |
480 IB_QP_TIMEOUT | 516 IB_QP_TIMEOUT |
481 IB_QP_RETRY_CNT | 517 IB_QP_RETRY_CNT |
@@ -501,6 +537,7 @@ static const struct {
501 .opt_param = { 537 .opt_param = {
502 [UD] = (IB_QP_CUR_STATE | 538 [UD] = (IB_QP_CUR_STATE |
503 IB_QP_QKEY), 539 IB_QP_QKEY),
540 [UC] = (IB_QP_CUR_STATE),
504 [RC] = (IB_QP_CUR_STATE | 541 [RC] = (IB_QP_CUR_STATE |
505 IB_QP_MIN_RNR_TIMER), 542 IB_QP_MIN_RNR_TIMER),
506 [MLX] = (IB_QP_CUR_STATE | 543 [MLX] = (IB_QP_CUR_STATE |
@@ -552,7 +589,7 @@ int mthca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask)
552 struct mthca_dev *dev = to_mdev(ibqp->device); 589 struct mthca_dev *dev = to_mdev(ibqp->device);
553 struct mthca_qp *qp = to_mqp(ibqp); 590 struct mthca_qp *qp = to_mqp(ibqp);
554 enum ib_qp_state cur_state, new_state; 591 enum ib_qp_state cur_state, new_state;
555 void *mailbox = NULL; 592 struct mthca_mailbox *mailbox;
556 struct mthca_qp_param *qp_param; 593 struct mthca_qp_param *qp_param;
557 struct mthca_qp_context *qp_context; 594 struct mthca_qp_context *qp_context;
558 u32 req_param, opt_param; 595 u32 req_param, opt_param;
@@ -609,10 +646,10 @@ int mthca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask)
609 return -EINVAL; 646 return -EINVAL;
610 } 647 }
611 648
612 mailbox = kmalloc(sizeof (*qp_param) + MTHCA_CMD_MAILBOX_EXTRA, GFP_KERNEL); 649 mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
613 if (!mailbox) 650 if (IS_ERR(mailbox))
614 return -ENOMEM; 651 return PTR_ERR(mailbox);
615 qp_param = MAILBOX_ALIGN(mailbox); 652 qp_param = mailbox->buf;
616 qp_context = &qp_param->context; 653 qp_context = &qp_param->context;
617 memset(qp_param, 0, sizeof *qp_param); 654 memset(qp_param, 0, sizeof *qp_param);
618 655
@@ -683,7 +720,7 @@ int mthca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask)
683 if (attr_mask & IB_QP_AV) { 720 if (attr_mask & IB_QP_AV) {
684 qp_context->pri_path.g_mylmc = attr->ah_attr.src_path_bits & 0x7f; 721 qp_context->pri_path.g_mylmc = attr->ah_attr.src_path_bits & 0x7f;
685 qp_context->pri_path.rlid = cpu_to_be16(attr->ah_attr.dlid); 722 qp_context->pri_path.rlid = cpu_to_be16(attr->ah_attr.dlid);
686 qp_context->pri_path.static_rate = (!!attr->ah_attr.static_rate) << 3; 723 qp_context->pri_path.static_rate = !!attr->ah_attr.static_rate;
687 if (attr->ah_attr.ah_flags & IB_AH_GRH) { 724 if (attr->ah_attr.ah_flags & IB_AH_GRH) {
688 qp_context->pri_path.g_mylmc |= 1 << 7; 725 qp_context->pri_path.g_mylmc |= 1 << 7;
689 qp_context->pri_path.mgid_index = attr->ah_attr.grh.sgid_index; 726 qp_context->pri_path.mgid_index = attr->ah_attr.grh.sgid_index;
@@ -724,9 +761,9 @@ int mthca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask)
724 qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_RETRY_COUNT); 761 qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_RETRY_COUNT);
725 } 762 }
726 763
727 if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) { 764 if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC) {
728 qp_context->params1 |= cpu_to_be32(min(attr->max_dest_rd_atomic ? 765 qp_context->params1 |= cpu_to_be32(min(attr->max_rd_atomic ?
729 ffs(attr->max_dest_rd_atomic) - 1 : 0, 766 ffs(attr->max_rd_atomic) - 1 : 0,
730 7) << 21); 767 7) << 21);
731 qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_SRA_MAX); 768 qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_SRA_MAX);
732 } 769 }
@@ -764,10 +801,10 @@ int mthca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask)
764 qp->atomic_rd_en = attr->qp_access_flags; 801 qp->atomic_rd_en = attr->qp_access_flags;
765 } 802 }
766 803
767 if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC) { 804 if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) {
768 u8 rra_max; 805 u8 rra_max;
769 806
770 if (qp->resp_depth && !attr->max_rd_atomic) { 807 if (qp->resp_depth && !attr->max_dest_rd_atomic) {
771 /* 808 /*
772 * Lowering our responder resources to zero. 809 * Lowering our responder resources to zero.
773 * Turn off RDMA/atomics as responder. 810 * Turn off RDMA/atomics as responder.
@@ -778,7 +815,7 @@ int mthca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask)
778 MTHCA_QP_OPTPAR_RAE); 815 MTHCA_QP_OPTPAR_RAE);
779 } 816 }
780 817
781 if (!qp->resp_depth && attr->max_rd_atomic) { 818 if (!qp->resp_depth && attr->max_dest_rd_atomic) {
782 /* 819 /*
783 * Increasing our responder resources from 820 * Increasing our responder resources from
784 * zero. Turn on RDMA/atomics as appropriate. 821 * zero. Turn on RDMA/atomics as appropriate.
@@ -799,7 +836,7 @@ int mthca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask)
799 } 836 }
800 837
801 for (rra_max = 0; 838 for (rra_max = 0;
802 1 << rra_max < attr->max_rd_atomic && 839 1 << rra_max < attr->max_dest_rd_atomic &&
803 rra_max < dev->qp_table.rdb_shift; 840 rra_max < dev->qp_table.rdb_shift;
804 ++rra_max) 841 ++rra_max)
805 ; /* nothing */ 842 ; /* nothing */
@@ -807,7 +844,7 @@ int mthca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask)
807 qp_context->params2 |= cpu_to_be32(rra_max << 21); 844 qp_context->params2 |= cpu_to_be32(rra_max << 21);
808 qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_RRA_MAX); 845 qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_RRA_MAX);
809 846
810 qp->resp_depth = attr->max_rd_atomic; 847 qp->resp_depth = attr->max_dest_rd_atomic;
811 } 848 }
812 849
813 qp_context->params2 |= cpu_to_be32(MTHCA_QP_BIT_RSC); 850 qp_context->params2 |= cpu_to_be32(MTHCA_QP_BIT_RSC);
@@ -835,7 +872,7 @@ int mthca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask)
835 } 872 }
836 873
837 err = mthca_MODIFY_QP(dev, state_table[cur_state][new_state].trans, 874 err = mthca_MODIFY_QP(dev, state_table[cur_state][new_state].trans,
838 qp->qpn, 0, qp_param, 0, &status); 875 qp->qpn, 0, mailbox, 0, &status);
839 if (status) { 876 if (status) {
840 mthca_warn(dev, "modify QP %d returned status %02x.\n", 877 mthca_warn(dev, "modify QP %d returned status %02x.\n",
841 state_table[cur_state][new_state].trans, status); 878 state_table[cur_state][new_state].trans, status);
@@ -845,7 +882,7 @@ int mthca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask)
845 if (!err) 882 if (!err)
846 qp->state = new_state; 883 qp->state = new_state;
847 884
848 kfree(mailbox); 885 mthca_free_mailbox(dev, mailbox);
849 886
850 if (is_sqp(dev, qp)) 887 if (is_sqp(dev, qp))
851 store_attrs(to_msqp(qp), attr, attr_mask); 888 store_attrs(to_msqp(qp), attr, attr_mask);
@@ -934,7 +971,8 @@ static int mthca_alloc_wqe_buf(struct mthca_dev *dev,
934 mthca_dbg(dev, "Creating direct QP of size %d (shift %d)\n", 971 mthca_dbg(dev, "Creating direct QP of size %d (shift %d)\n",
935 size, shift); 972 size, shift);
936 973
937 qp->queue.direct.buf = pci_alloc_consistent(dev->pdev, size, &t); 974 qp->queue.direct.buf = dma_alloc_coherent(&dev->pdev->dev, size,
975 &t, GFP_KERNEL);
938 if (!qp->queue.direct.buf) 976 if (!qp->queue.direct.buf)
939 goto err_out; 977 goto err_out;
940 978
@@ -973,7 +1011,8 @@ static int mthca_alloc_wqe_buf(struct mthca_dev *dev,
973 1011
974 for (i = 0; i < npages; ++i) { 1012 for (i = 0; i < npages; ++i) {
975 qp->queue.page_list[i].buf = 1013 qp->queue.page_list[i].buf =
976 pci_alloc_consistent(dev->pdev, PAGE_SIZE, &t); 1014 dma_alloc_coherent(&dev->pdev->dev, PAGE_SIZE,
1015 &t, GFP_KERNEL);
977 if (!qp->queue.page_list[i].buf) 1016 if (!qp->queue.page_list[i].buf)
978 goto err_out_free; 1017 goto err_out_free;
979 1018
@@ -996,16 +1035,15 @@ static int mthca_alloc_wqe_buf(struct mthca_dev *dev,
996 1035
997 err_out_free: 1036 err_out_free:
998 if (qp->is_direct) { 1037 if (qp->is_direct) {
999 pci_free_consistent(dev->pdev, size, 1038 dma_free_coherent(&dev->pdev->dev, size, qp->queue.direct.buf,
1000 qp->queue.direct.buf, 1039 pci_unmap_addr(&qp->queue.direct, mapping));
1001 pci_unmap_addr(&qp->queue.direct, mapping));
1002 } else 1040 } else
1003 for (i = 0; i < npages; ++i) { 1041 for (i = 0; i < npages; ++i) {
1004 if (qp->queue.page_list[i].buf) 1042 if (qp->queue.page_list[i].buf)
1005 pci_free_consistent(dev->pdev, PAGE_SIZE, 1043 dma_free_coherent(&dev->pdev->dev, PAGE_SIZE,
1006 qp->queue.page_list[i].buf, 1044 qp->queue.page_list[i].buf,
1007 pci_unmap_addr(&qp->queue.page_list[i], 1045 pci_unmap_addr(&qp->queue.page_list[i],
1008 mapping)); 1046 mapping));
1009 1047
1010 } 1048 }
1011 1049
@@ -1073,11 +1111,12 @@ static void mthca_free_memfree(struct mthca_dev *dev,
1073 if (mthca_is_memfree(dev)) { 1111 if (mthca_is_memfree(dev)) {
1074 mthca_free_db(dev, MTHCA_DB_TYPE_SQ, qp->sq.db_index); 1112 mthca_free_db(dev, MTHCA_DB_TYPE_SQ, qp->sq.db_index);
1075 mthca_free_db(dev, MTHCA_DB_TYPE_RQ, qp->rq.db_index); 1113 mthca_free_db(dev, MTHCA_DB_TYPE_RQ, qp->rq.db_index);
1076 mthca_table_put(dev, dev->qp_table.rdb_table,
1077 qp->qpn << dev->qp_table.rdb_shift);
1078 mthca_table_put(dev, dev->qp_table.eqp_table, qp->qpn);
1079 mthca_table_put(dev, dev->qp_table.qp_table, qp->qpn);
1080 } 1114 }
1115
1116 mthca_table_put(dev, dev->qp_table.rdb_table,
1117 qp->qpn << dev->qp_table.rdb_shift);
1118 mthca_table_put(dev, dev->qp_table.eqp_table, qp->qpn);
1119 mthca_table_put(dev, dev->qp_table.qp_table, qp->qpn);
1081} 1120}
1082 1121
1083static void mthca_wq_init(struct mthca_wq* wq) 1122static void mthca_wq_init(struct mthca_wq* wq)
@@ -1529,6 +1568,26 @@ int mthca_tavor_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
1529 1568
1530 break; 1569 break;
1531 1570
1571 case UC:
1572 switch (wr->opcode) {
1573 case IB_WR_RDMA_WRITE:
1574 case IB_WR_RDMA_WRITE_WITH_IMM:
1575 ((struct mthca_raddr_seg *) wqe)->raddr =
1576 cpu_to_be64(wr->wr.rdma.remote_addr);
1577 ((struct mthca_raddr_seg *) wqe)->rkey =
1578 cpu_to_be32(wr->wr.rdma.rkey);
1579 ((struct mthca_raddr_seg *) wqe)->reserved = 0;
1580 wqe += sizeof (struct mthca_raddr_seg);
1581 size += sizeof (struct mthca_raddr_seg) / 16;
1582 break;
1583
1584 default:
1585 /* No extra segments required for sends */
1586 break;
1587 }
1588
1589 break;
1590
1532 case UD: 1591 case UD:
1533 ((struct mthca_tavor_ud_seg *) wqe)->lkey = 1592 ((struct mthca_tavor_ud_seg *) wqe)->lkey =
1534 cpu_to_be32(to_mah(wr->wr.ud.ah)->key); 1593 cpu_to_be32(to_mah(wr->wr.ud.ah)->key);
@@ -1814,9 +1873,29 @@ int mthca_arbel_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
1814 sizeof (struct mthca_atomic_seg); 1873 sizeof (struct mthca_atomic_seg);
1815 break; 1874 break;
1816 1875
1876 case IB_WR_RDMA_READ:
1877 case IB_WR_RDMA_WRITE:
1878 case IB_WR_RDMA_WRITE_WITH_IMM:
1879 ((struct mthca_raddr_seg *) wqe)->raddr =
1880 cpu_to_be64(wr->wr.rdma.remote_addr);
1881 ((struct mthca_raddr_seg *) wqe)->rkey =
1882 cpu_to_be32(wr->wr.rdma.rkey);
1883 ((struct mthca_raddr_seg *) wqe)->reserved = 0;
1884 wqe += sizeof (struct mthca_raddr_seg);
1885 size += sizeof (struct mthca_raddr_seg) / 16;
1886 break;
1887
1888 default:
1889 /* No extra segments required for sends */
1890 break;
1891 }
1892
1893 break;
1894
1895 case UC:
1896 switch (wr->opcode) {
1817 case IB_WR_RDMA_WRITE: 1897 case IB_WR_RDMA_WRITE:
1818 case IB_WR_RDMA_WRITE_WITH_IMM: 1898 case IB_WR_RDMA_WRITE_WITH_IMM:
1819 case IB_WR_RDMA_READ:
1820 ((struct mthca_raddr_seg *) wqe)->raddr = 1899 ((struct mthca_raddr_seg *) wqe)->raddr =
1821 cpu_to_be64(wr->wr.rdma.remote_addr); 1900 cpu_to_be64(wr->wr.rdma.remote_addr);
1822 ((struct mthca_raddr_seg *) wqe)->rkey = 1901 ((struct mthca_raddr_seg *) wqe)->rkey =