aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/infiniband
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@g5.osdl.org>2006-01-08 23:18:44 -0500
committerLinus Torvalds <torvalds@g5.osdl.org>2006-01-08 23:18:44 -0500
commit5367f2d67c7d0bf1faae90e6e7b4e2ac3c9b5e0f (patch)
treecb2d487774571ec608bcf4241ec70fc19a0b3ad6 /drivers/infiniband
parent64ca9004b819ab87648dbfc78f3ef49ee491343e (diff)
parent4f8448dfe8d3804fadad90c9b77494238b4a4eae (diff)
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/roland/infiniband
Diffstat (limited to 'drivers/infiniband')
-rw-r--r--drivers/infiniband/core/cm.c16
-rw-r--r--drivers/infiniband/core/user_mad.c4
-rw-r--r--drivers/infiniband/core/uverbs_cmd.c21
-rw-r--r--drivers/infiniband/core/verbs.c4
-rw-r--r--drivers/infiniband/hw/mthca/mthca_cmd.c12
-rw-r--r--drivers/infiniband/hw/mthca/mthca_cq.c23
-rw-r--r--drivers/infiniband/hw/mthca/mthca_eq.c4
-rw-r--r--drivers/infiniband/hw/mthca/mthca_main.c4
-rw-r--r--drivers/infiniband/hw/mthca/mthca_mcg.c54
-rw-r--r--drivers/infiniband/hw/mthca/mthca_memfree.c4
-rw-r--r--drivers/infiniband/hw/mthca/mthca_qp.c265
-rw-r--r--drivers/infiniband/hw/mthca/mthca_srq.c2
12 files changed, 250 insertions, 163 deletions
diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c
index 02110e00d145..3a611fe5497e 100644
--- a/drivers/infiniband/core/cm.c
+++ b/drivers/infiniband/core/cm.c
@@ -308,10 +308,11 @@ static int cm_alloc_id(struct cm_id_private *cm_id_priv)
308{ 308{
309 unsigned long flags; 309 unsigned long flags;
310 int ret; 310 int ret;
311 static int next_id;
311 312
312 do { 313 do {
313 spin_lock_irqsave(&cm.lock, flags); 314 spin_lock_irqsave(&cm.lock, flags);
314 ret = idr_get_new_above(&cm.local_id_table, cm_id_priv, 1, 315 ret = idr_get_new_above(&cm.local_id_table, cm_id_priv, next_id++,
315 (__force int *) &cm_id_priv->id.local_id); 316 (__force int *) &cm_id_priv->id.local_id);
316 spin_unlock_irqrestore(&cm.lock, flags); 317 spin_unlock_irqrestore(&cm.lock, flags);
317 } while( (ret == -EAGAIN) && idr_pre_get(&cm.local_id_table, GFP_KERNEL) ); 318 } while( (ret == -EAGAIN) && idr_pre_get(&cm.local_id_table, GFP_KERNEL) );
@@ -684,6 +685,13 @@ retest:
684 cm_reject_sidr_req(cm_id_priv, IB_SIDR_REJECT); 685 cm_reject_sidr_req(cm_id_priv, IB_SIDR_REJECT);
685 break; 686 break;
686 case IB_CM_REQ_SENT: 687 case IB_CM_REQ_SENT:
688 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
689 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
690 ib_send_cm_rej(cm_id, IB_CM_REJ_TIMEOUT,
691 &cm_id_priv->av.port->cm_dev->ca_guid,
692 sizeof cm_id_priv->av.port->cm_dev->ca_guid,
693 NULL, 0);
694 break;
687 case IB_CM_MRA_REQ_RCVD: 695 case IB_CM_MRA_REQ_RCVD:
688 case IB_CM_REP_SENT: 696 case IB_CM_REP_SENT:
689 case IB_CM_MRA_REP_RCVD: 697 case IB_CM_MRA_REP_RCVD:
@@ -694,10 +702,8 @@ retest:
694 case IB_CM_REP_RCVD: 702 case IB_CM_REP_RCVD:
695 case IB_CM_MRA_REP_SENT: 703 case IB_CM_MRA_REP_SENT:
696 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 704 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
697 ib_send_cm_rej(cm_id, IB_CM_REJ_TIMEOUT, 705 ib_send_cm_rej(cm_id, IB_CM_REJ_CONSUMER_DEFINED,
698 &cm_id_priv->av.port->cm_dev->ca_guid, 706 NULL, 0, NULL, 0);
699 sizeof cm_id_priv->av.port->cm_dev->ca_guid,
700 NULL, 0);
701 break; 707 break;
702 case IB_CM_ESTABLISHED: 708 case IB_CM_ESTABLISHED:
703 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 709 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
diff --git a/drivers/infiniband/core/user_mad.c b/drivers/infiniband/core/user_mad.c
index eb7f52537ccc..c908de8db5a9 100644
--- a/drivers/infiniband/core/user_mad.c
+++ b/drivers/infiniband/core/user_mad.c
@@ -197,8 +197,8 @@ static void send_handler(struct ib_mad_agent *agent,
197 memcpy(timeout->mad.data, packet->mad.data, 197 memcpy(timeout->mad.data, packet->mad.data,
198 sizeof (struct ib_mad_hdr)); 198 sizeof (struct ib_mad_hdr));
199 199
200 if (!queue_packet(file, agent, timeout)) 200 if (queue_packet(file, agent, timeout))
201 return; 201 kfree(timeout);
202 } 202 }
203out: 203out:
204 kfree(packet); 204 kfree(packet);
diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c
index a57d021d435a..a02c5a05c984 100644
--- a/drivers/infiniband/core/uverbs_cmd.c
+++ b/drivers/infiniband/core/uverbs_cmd.c
@@ -489,6 +489,7 @@ err_idr:
489 489
490err_unreg: 490err_unreg:
491 ib_dereg_mr(mr); 491 ib_dereg_mr(mr);
492 atomic_dec(&pd->usecnt);
492 493
493err_up: 494err_up:
494 up(&ib_uverbs_idr_mutex); 495 up(&ib_uverbs_idr_mutex);
@@ -593,13 +594,18 @@ ssize_t ib_uverbs_create_cq(struct ib_uverbs_file *file,
593 if (cmd.comp_vector >= file->device->num_comp_vectors) 594 if (cmd.comp_vector >= file->device->num_comp_vectors)
594 return -EINVAL; 595 return -EINVAL;
595 596
596 if (cmd.comp_channel >= 0)
597 ev_file = ib_uverbs_lookup_comp_file(cmd.comp_channel);
598
599 uobj = kmalloc(sizeof *uobj, GFP_KERNEL); 597 uobj = kmalloc(sizeof *uobj, GFP_KERNEL);
600 if (!uobj) 598 if (!uobj)
601 return -ENOMEM; 599 return -ENOMEM;
602 600
601 if (cmd.comp_channel >= 0) {
602 ev_file = ib_uverbs_lookup_comp_file(cmd.comp_channel);
603 if (!ev_file) {
604 ret = -EINVAL;
605 goto err;
606 }
607 }
608
603 uobj->uobject.user_handle = cmd.user_handle; 609 uobj->uobject.user_handle = cmd.user_handle;
604 uobj->uobject.context = file->ucontext; 610 uobj->uobject.context = file->ucontext;
605 uobj->uverbs_file = file; 611 uobj->uverbs_file = file;
@@ -663,6 +669,8 @@ err_up:
663 ib_destroy_cq(cq); 669 ib_destroy_cq(cq);
664 670
665err: 671err:
672 if (ev_file)
673 ib_uverbs_release_ucq(file, ev_file, uobj);
666 kfree(uobj); 674 kfree(uobj);
667 return ret; 675 return ret;
668} 676}
@@ -935,6 +943,11 @@ err_idr:
935 943
936err_destroy: 944err_destroy:
937 ib_destroy_qp(qp); 945 ib_destroy_qp(qp);
946 atomic_dec(&pd->usecnt);
947 atomic_dec(&attr.send_cq->usecnt);
948 atomic_dec(&attr.recv_cq->usecnt);
949 if (attr.srq)
950 atomic_dec(&attr.srq->usecnt);
938 951
939err_up: 952err_up:
940 up(&ib_uverbs_idr_mutex); 953 up(&ib_uverbs_idr_mutex);
@@ -1448,6 +1461,7 @@ ssize_t ib_uverbs_create_ah(struct ib_uverbs_file *file,
1448 attr.sl = cmd.attr.sl; 1461 attr.sl = cmd.attr.sl;
1449 attr.src_path_bits = cmd.attr.src_path_bits; 1462 attr.src_path_bits = cmd.attr.src_path_bits;
1450 attr.static_rate = cmd.attr.static_rate; 1463 attr.static_rate = cmd.attr.static_rate;
1464 attr.ah_flags = cmd.attr.is_global ? IB_AH_GRH : 0;
1451 attr.port_num = cmd.attr.port_num; 1465 attr.port_num = cmd.attr.port_num;
1452 attr.grh.flow_label = cmd.attr.grh.flow_label; 1466 attr.grh.flow_label = cmd.attr.grh.flow_label;
1453 attr.grh.sgid_index = cmd.attr.grh.sgid_index; 1467 attr.grh.sgid_index = cmd.attr.grh.sgid_index;
@@ -1729,6 +1743,7 @@ err_idr:
1729 1743
1730err_destroy: 1744err_destroy:
1731 ib_destroy_srq(srq); 1745 ib_destroy_srq(srq);
1746 atomic_dec(&pd->usecnt);
1732 1747
1733err_up: 1748err_up:
1734 up(&ib_uverbs_idr_mutex); 1749 up(&ib_uverbs_idr_mutex);
diff --git a/drivers/infiniband/core/verbs.c b/drivers/infiniband/core/verbs.c
index 4c15e112736c..c857361be449 100644
--- a/drivers/infiniband/core/verbs.c
+++ b/drivers/infiniband/core/verbs.c
@@ -107,9 +107,9 @@ struct ib_ah *ib_create_ah_from_wc(struct ib_pd *pd, struct ib_wc *wc,
107 107
108 if (wc->wc_flags & IB_WC_GRH) { 108 if (wc->wc_flags & IB_WC_GRH) {
109 ah_attr.ah_flags = IB_AH_GRH; 109 ah_attr.ah_flags = IB_AH_GRH;
110 ah_attr.grh.dgid = grh->dgid; 110 ah_attr.grh.dgid = grh->sgid;
111 111
112 ret = ib_find_cached_gid(pd->device, &grh->sgid, &port_num, 112 ret = ib_find_cached_gid(pd->device, &grh->dgid, &port_num,
113 &gid_index); 113 &gid_index);
114 if (ret) 114 if (ret)
115 return ERR_PTR(ret); 115 return ERR_PTR(ret);
diff --git a/drivers/infiniband/hw/mthca/mthca_cmd.c b/drivers/infiniband/hw/mthca/mthca_cmd.c
index 9ed34587fc5c..22ac72bc20c3 100644
--- a/drivers/infiniband/hw/mthca/mthca_cmd.c
+++ b/drivers/infiniband/hw/mthca/mthca_cmd.c
@@ -937,10 +937,6 @@ int mthca_QUERY_DEV_LIM(struct mthca_dev *dev,
937 if (err) 937 if (err)
938 goto out; 938 goto out;
939 939
940 MTHCA_GET(field, outbox, QUERY_DEV_LIM_MAX_SRQ_SZ_OFFSET);
941 dev_lim->max_srq_sz = (1 << field) - 1;
942 MTHCA_GET(field, outbox, QUERY_DEV_LIM_MAX_QP_SZ_OFFSET);
943 dev_lim->max_qp_sz = (1 << field) - 1;
944 MTHCA_GET(field, outbox, QUERY_DEV_LIM_RSVD_QP_OFFSET); 940 MTHCA_GET(field, outbox, QUERY_DEV_LIM_RSVD_QP_OFFSET);
945 dev_lim->reserved_qps = 1 << (field & 0xf); 941 dev_lim->reserved_qps = 1 << (field & 0xf);
946 MTHCA_GET(field, outbox, QUERY_DEV_LIM_MAX_QP_OFFSET); 942 MTHCA_GET(field, outbox, QUERY_DEV_LIM_MAX_QP_OFFSET);
@@ -1056,6 +1052,10 @@ int mthca_QUERY_DEV_LIM(struct mthca_dev *dev,
1056 mthca_dbg(dev, "Flags: %08x\n", dev_lim->flags); 1052 mthca_dbg(dev, "Flags: %08x\n", dev_lim->flags);
1057 1053
1058 if (mthca_is_memfree(dev)) { 1054 if (mthca_is_memfree(dev)) {
1055 MTHCA_GET(field, outbox, QUERY_DEV_LIM_MAX_SRQ_SZ_OFFSET);
1056 dev_lim->max_srq_sz = 1 << field;
1057 MTHCA_GET(field, outbox, QUERY_DEV_LIM_MAX_QP_SZ_OFFSET);
1058 dev_lim->max_qp_sz = 1 << field;
1059 MTHCA_GET(field, outbox, QUERY_DEV_LIM_RSZ_SRQ_OFFSET); 1059 MTHCA_GET(field, outbox, QUERY_DEV_LIM_RSZ_SRQ_OFFSET);
1060 dev_lim->hca.arbel.resize_srq = field & 1; 1060 dev_lim->hca.arbel.resize_srq = field & 1;
1061 MTHCA_GET(field, outbox, QUERY_DEV_LIM_MAX_SG_RQ_OFFSET); 1061 MTHCA_GET(field, outbox, QUERY_DEV_LIM_MAX_SG_RQ_OFFSET);
@@ -1087,6 +1087,10 @@ int mthca_QUERY_DEV_LIM(struct mthca_dev *dev,
1087 mthca_dbg(dev, "Max ICM size %lld MB\n", 1087 mthca_dbg(dev, "Max ICM size %lld MB\n",
1088 (unsigned long long) dev_lim->hca.arbel.max_icm_sz >> 20); 1088 (unsigned long long) dev_lim->hca.arbel.max_icm_sz >> 20);
1089 } else { 1089 } else {
1090 MTHCA_GET(field, outbox, QUERY_DEV_LIM_MAX_SRQ_SZ_OFFSET);
1091 dev_lim->max_srq_sz = (1 << field) - 1;
1092 MTHCA_GET(field, outbox, QUERY_DEV_LIM_MAX_QP_SZ_OFFSET);
1093 dev_lim->max_qp_sz = (1 << field) - 1;
1090 MTHCA_GET(field, outbox, QUERY_DEV_LIM_MAX_AV_OFFSET); 1094 MTHCA_GET(field, outbox, QUERY_DEV_LIM_MAX_AV_OFFSET);
1091 dev_lim->hca.tavor.max_avs = 1 << (field & 0x3f); 1095 dev_lim->hca.tavor.max_avs = 1 << (field & 0x3f);
1092 dev_lim->mpt_entry_sz = MTHCA_MPT_ENTRY_SIZE; 1096 dev_lim->mpt_entry_sz = MTHCA_MPT_ENTRY_SIZE;
diff --git a/drivers/infiniband/hw/mthca/mthca_cq.c b/drivers/infiniband/hw/mthca/mthca_cq.c
index 4a8adcef2079..96f1a86bf049 100644
--- a/drivers/infiniband/hw/mthca/mthca_cq.c
+++ b/drivers/infiniband/hw/mthca/mthca_cq.c
@@ -128,12 +128,12 @@ struct mthca_err_cqe {
128 __be32 my_qpn; 128 __be32 my_qpn;
129 u32 reserved1[3]; 129 u32 reserved1[3];
130 u8 syndrome; 130 u8 syndrome;
131 u8 reserved2; 131 u8 vendor_err;
132 __be16 db_cnt; 132 __be16 db_cnt;
133 u32 reserved3; 133 u32 reserved2;
134 __be32 wqe; 134 __be32 wqe;
135 u8 opcode; 135 u8 opcode;
136 u8 reserved4[2]; 136 u8 reserved3[2];
137 u8 owner; 137 u8 owner;
138}; 138};
139 139
@@ -253,6 +253,15 @@ void mthca_cq_event(struct mthca_dev *dev, u32 cqn,
253 wake_up(&cq->wait); 253 wake_up(&cq->wait);
254} 254}
255 255
256static inline int is_recv_cqe(struct mthca_cqe *cqe)
257{
258 if ((cqe->opcode & MTHCA_ERROR_CQE_OPCODE_MASK) ==
259 MTHCA_ERROR_CQE_OPCODE_MASK)
260 return !(cqe->opcode & 0x01);
261 else
262 return !(cqe->is_send & 0x80);
263}
264
256void mthca_cq_clean(struct mthca_dev *dev, u32 cqn, u32 qpn, 265void mthca_cq_clean(struct mthca_dev *dev, u32 cqn, u32 qpn,
257 struct mthca_srq *srq) 266 struct mthca_srq *srq)
258{ 267{
@@ -296,7 +305,7 @@ void mthca_cq_clean(struct mthca_dev *dev, u32 cqn, u32 qpn,
296 while ((int) --prod_index - (int) cq->cons_index >= 0) { 305 while ((int) --prod_index - (int) cq->cons_index >= 0) {
297 cqe = get_cqe(cq, prod_index & cq->ibcq.cqe); 306 cqe = get_cqe(cq, prod_index & cq->ibcq.cqe);
298 if (cqe->my_qpn == cpu_to_be32(qpn)) { 307 if (cqe->my_qpn == cpu_to_be32(qpn)) {
299 if (srq) 308 if (srq && is_recv_cqe(cqe))
300 mthca_free_srq_wqe(srq, be32_to_cpu(cqe->wqe)); 309 mthca_free_srq_wqe(srq, be32_to_cpu(cqe->wqe));
301 ++nfreed; 310 ++nfreed;
302 } else if (nfreed) 311 } else if (nfreed)
@@ -333,8 +342,8 @@ static int handle_error_cqe(struct mthca_dev *dev, struct mthca_cq *cq,
333 } 342 }
334 343
335 /* 344 /*
336 * For completions in error, only work request ID, status (and 345 * For completions in error, only work request ID, status, vendor error
337 * freed resource count for RD) have to be set. 346 * (and freed resource count for RD) have to be set.
338 */ 347 */
339 switch (cqe->syndrome) { 348 switch (cqe->syndrome) {
340 case SYNDROME_LOCAL_LENGTH_ERR: 349 case SYNDROME_LOCAL_LENGTH_ERR:
@@ -396,6 +405,8 @@ static int handle_error_cqe(struct mthca_dev *dev, struct mthca_cq *cq,
396 break; 405 break;
397 } 406 }
398 407
408 entry->vendor_err = cqe->vendor_err;
409
399 /* 410 /*
400 * Mem-free HCAs always generate one CQE per WQE, even in the 411 * Mem-free HCAs always generate one CQE per WQE, even in the
401 * error case, so we don't have to check the doorbell count, etc. 412 * error case, so we don't have to check the doorbell count, etc.
diff --git a/drivers/infiniband/hw/mthca/mthca_eq.c b/drivers/infiniband/hw/mthca/mthca_eq.c
index 34d68e5a72d8..e8a948f087c0 100644
--- a/drivers/infiniband/hw/mthca/mthca_eq.c
+++ b/drivers/infiniband/hw/mthca/mthca_eq.c
@@ -484,8 +484,7 @@ static int __devinit mthca_create_eq(struct mthca_dev *dev,
484 u8 intr, 484 u8 intr,
485 struct mthca_eq *eq) 485 struct mthca_eq *eq)
486{ 486{
487 int npages = (nent * MTHCA_EQ_ENTRY_SIZE + PAGE_SIZE - 1) / 487 int npages;
488 PAGE_SIZE;
489 u64 *dma_list = NULL; 488 u64 *dma_list = NULL;
490 dma_addr_t t; 489 dma_addr_t t;
491 struct mthca_mailbox *mailbox; 490 struct mthca_mailbox *mailbox;
@@ -496,6 +495,7 @@ static int __devinit mthca_create_eq(struct mthca_dev *dev,
496 495
497 eq->dev = dev; 496 eq->dev = dev;
498 eq->nent = roundup_pow_of_two(max(nent, 2)); 497 eq->nent = roundup_pow_of_two(max(nent, 2));
498 npages = ALIGN(eq->nent * MTHCA_EQ_ENTRY_SIZE, PAGE_SIZE) / PAGE_SIZE;
499 499
500 eq->page_list = kmalloc(npages * sizeof *eq->page_list, 500 eq->page_list = kmalloc(npages * sizeof *eq->page_list,
501 GFP_KERNEL); 501 GFP_KERNEL);
diff --git a/drivers/infiniband/hw/mthca/mthca_main.c b/drivers/infiniband/hw/mthca/mthca_main.c
index 6f94b25f3acd..8b00d9a0f6f4 100644
--- a/drivers/infiniband/hw/mthca/mthca_main.c
+++ b/drivers/infiniband/hw/mthca/mthca_main.c
@@ -261,6 +261,10 @@ static int __devinit mthca_init_tavor(struct mthca_dev *mdev)
261 } 261 }
262 262
263 err = mthca_dev_lim(mdev, &dev_lim); 263 err = mthca_dev_lim(mdev, &dev_lim);
264 if (err) {
265 mthca_err(mdev, "QUERY_DEV_LIM command failed, aborting.\n");
266 goto err_disable;
267 }
264 268
265 profile = default_profile; 269 profile = default_profile;
266 profile.num_uar = dev_lim.uar_size / PAGE_SIZE; 270 profile.num_uar = dev_lim.uar_size / PAGE_SIZE;
diff --git a/drivers/infiniband/hw/mthca/mthca_mcg.c b/drivers/infiniband/hw/mthca/mthca_mcg.c
index 2fc449da418d..77bc6c746f43 100644
--- a/drivers/infiniband/hw/mthca/mthca_mcg.c
+++ b/drivers/infiniband/hw/mthca/mthca_mcg.c
@@ -111,7 +111,8 @@ static int find_mgm(struct mthca_dev *dev,
111 goto out; 111 goto out;
112 if (status) { 112 if (status) {
113 mthca_err(dev, "READ_MGM returned status %02x\n", status); 113 mthca_err(dev, "READ_MGM returned status %02x\n", status);
114 return -EINVAL; 114 err = -EINVAL;
115 goto out;
115 } 116 }
116 117
117 if (!memcmp(mgm->gid, zero_gid, 16)) { 118 if (!memcmp(mgm->gid, zero_gid, 16)) {
@@ -126,7 +127,7 @@ static int find_mgm(struct mthca_dev *dev,
126 goto out; 127 goto out;
127 128
128 *prev = *index; 129 *prev = *index;
129 *index = be32_to_cpu(mgm->next_gid_index) >> 5; 130 *index = be32_to_cpu(mgm->next_gid_index) >> 6;
130 } while (*index); 131 } while (*index);
131 132
132 *index = -1; 133 *index = -1;
@@ -153,8 +154,10 @@ int mthca_multicast_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
153 return PTR_ERR(mailbox); 154 return PTR_ERR(mailbox);
154 mgm = mailbox->buf; 155 mgm = mailbox->buf;
155 156
156 if (down_interruptible(&dev->mcg_table.sem)) 157 if (down_interruptible(&dev->mcg_table.sem)) {
157 return -EINTR; 158 err = -EINTR;
159 goto err_sem;
160 }
158 161
159 err = find_mgm(dev, gid->raw, mailbox, &hash, &prev, &index); 162 err = find_mgm(dev, gid->raw, mailbox, &hash, &prev, &index);
160 if (err) 163 if (err)
@@ -181,9 +184,8 @@ int mthca_multicast_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
181 err = -EINVAL; 184 err = -EINVAL;
182 goto out; 185 goto out;
183 } 186 }
184 187 memset(mgm, 0, sizeof *mgm);
185 memcpy(mgm->gid, gid->raw, 16); 188 memcpy(mgm->gid, gid->raw, 16);
186 mgm->next_gid_index = 0;
187 } 189 }
188 190
189 for (i = 0; i < MTHCA_QP_PER_MGM; ++i) 191 for (i = 0; i < MTHCA_QP_PER_MGM; ++i)
@@ -209,6 +211,7 @@ int mthca_multicast_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
209 if (status) { 211 if (status) {
210 mthca_err(dev, "WRITE_MGM returned status %02x\n", status); 212 mthca_err(dev, "WRITE_MGM returned status %02x\n", status);
211 err = -EINVAL; 213 err = -EINVAL;
214 goto out;
212 } 215 }
213 216
214 if (!link) 217 if (!link)
@@ -223,7 +226,7 @@ int mthca_multicast_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
223 goto out; 226 goto out;
224 } 227 }
225 228
226 mgm->next_gid_index = cpu_to_be32(index << 5); 229 mgm->next_gid_index = cpu_to_be32(index << 6);
227 230
228 err = mthca_WRITE_MGM(dev, prev, mailbox, &status); 231 err = mthca_WRITE_MGM(dev, prev, mailbox, &status);
229 if (err) 232 if (err)
@@ -234,7 +237,12 @@ int mthca_multicast_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
234 } 237 }
235 238
236 out: 239 out:
240 if (err && link && index != -1) {
241 BUG_ON(index < dev->limits.num_mgms);
242 mthca_free(&dev->mcg_table.alloc, index);
243 }
237 up(&dev->mcg_table.sem); 244 up(&dev->mcg_table.sem);
245 err_sem:
238 mthca_free_mailbox(dev, mailbox); 246 mthca_free_mailbox(dev, mailbox);
239 return err; 247 return err;
240} 248}
@@ -255,8 +263,10 @@ int mthca_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
255 return PTR_ERR(mailbox); 263 return PTR_ERR(mailbox);
256 mgm = mailbox->buf; 264 mgm = mailbox->buf;
257 265
258 if (down_interruptible(&dev->mcg_table.sem)) 266 if (down_interruptible(&dev->mcg_table.sem)) {
259 return -EINTR; 267 err = -EINTR;
268 goto err_sem;
269 }
260 270
261 err = find_mgm(dev, gid->raw, mailbox, &hash, &prev, &index); 271 err = find_mgm(dev, gid->raw, mailbox, &hash, &prev, &index);
262 if (err) 272 if (err)
@@ -305,13 +315,11 @@ int mthca_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
305 if (i != 1) 315 if (i != 1)
306 goto out; 316 goto out;
307 317
308 goto out;
309
310 if (prev == -1) { 318 if (prev == -1) {
311 /* Remove entry from MGM */ 319 /* Remove entry from MGM */
312 if (be32_to_cpu(mgm->next_gid_index) >> 5) { 320 int amgm_index_to_free = be32_to_cpu(mgm->next_gid_index) >> 6;
313 err = mthca_READ_MGM(dev, 321 if (amgm_index_to_free) {
314 be32_to_cpu(mgm->next_gid_index) >> 5, 322 err = mthca_READ_MGM(dev, amgm_index_to_free,
315 mailbox, &status); 323 mailbox, &status);
316 if (err) 324 if (err)
317 goto out; 325 goto out;
@@ -332,9 +340,13 @@ int mthca_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
332 err = -EINVAL; 340 err = -EINVAL;
333 goto out; 341 goto out;
334 } 342 }
343 if (amgm_index_to_free) {
344 BUG_ON(amgm_index_to_free < dev->limits.num_mgms);
345 mthca_free(&dev->mcg_table.alloc, amgm_index_to_free);
346 }
335 } else { 347 } else {
336 /* Remove entry from AMGM */ 348 /* Remove entry from AMGM */
337 index = be32_to_cpu(mgm->next_gid_index) >> 5; 349 int curr_next_index = be32_to_cpu(mgm->next_gid_index) >> 6;
338 err = mthca_READ_MGM(dev, prev, mailbox, &status); 350 err = mthca_READ_MGM(dev, prev, mailbox, &status);
339 if (err) 351 if (err)
340 goto out; 352 goto out;
@@ -344,7 +356,7 @@ int mthca_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
344 goto out; 356 goto out;
345 } 357 }
346 358
347 mgm->next_gid_index = cpu_to_be32(index << 5); 359 mgm->next_gid_index = cpu_to_be32(curr_next_index << 6);
348 360
349 err = mthca_WRITE_MGM(dev, prev, mailbox, &status); 361 err = mthca_WRITE_MGM(dev, prev, mailbox, &status);
350 if (err) 362 if (err)
@@ -354,10 +366,13 @@ int mthca_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
354 err = -EINVAL; 366 err = -EINVAL;
355 goto out; 367 goto out;
356 } 368 }
369 BUG_ON(index < dev->limits.num_mgms);
370 mthca_free(&dev->mcg_table.alloc, index);
357 } 371 }
358 372
359 out: 373 out:
360 up(&dev->mcg_table.sem); 374 up(&dev->mcg_table.sem);
375 err_sem:
361 mthca_free_mailbox(dev, mailbox); 376 mthca_free_mailbox(dev, mailbox);
362 return err; 377 return err;
363} 378}
@@ -365,11 +380,12 @@ int mthca_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
365int __devinit mthca_init_mcg_table(struct mthca_dev *dev) 380int __devinit mthca_init_mcg_table(struct mthca_dev *dev)
366{ 381{
367 int err; 382 int err;
383 int table_size = dev->limits.num_mgms + dev->limits.num_amgms;
368 384
369 err = mthca_alloc_init(&dev->mcg_table.alloc, 385 err = mthca_alloc_init(&dev->mcg_table.alloc,
370 dev->limits.num_amgms, 386 table_size,
371 dev->limits.num_amgms - 1, 387 table_size - 1,
372 0); 388 dev->limits.num_mgms);
373 if (err) 389 if (err)
374 return err; 390 return err;
375 391
diff --git a/drivers/infiniband/hw/mthca/mthca_memfree.c b/drivers/infiniband/hw/mthca/mthca_memfree.c
index d72fe95cba08..9fb985a016e9 100644
--- a/drivers/infiniband/hw/mthca/mthca_memfree.c
+++ b/drivers/infiniband/hw/mthca/mthca_memfree.c
@@ -233,7 +233,7 @@ void *mthca_table_find(struct mthca_icm_table *table, int obj)
233 for (i = 0; i < chunk->npages; ++i) { 233 for (i = 0; i < chunk->npages; ++i) {
234 if (chunk->mem[i].length >= offset) { 234 if (chunk->mem[i].length >= offset) {
235 page = chunk->mem[i].page; 235 page = chunk->mem[i].page;
236 break; 236 goto out;
237 } 237 }
238 offset -= chunk->mem[i].length; 238 offset -= chunk->mem[i].length;
239 } 239 }
@@ -485,6 +485,8 @@ void mthca_cleanup_user_db_tab(struct mthca_dev *dev, struct mthca_uar *uar,
485 put_page(db_tab->page[i].mem.page); 485 put_page(db_tab->page[i].mem.page);
486 } 486 }
487 } 487 }
488
489 kfree(db_tab);
488} 490}
489 491
490int mthca_alloc_db(struct mthca_dev *dev, enum mthca_db_type type, 492int mthca_alloc_db(struct mthca_dev *dev, enum mthca_db_type type,
diff --git a/drivers/infiniband/hw/mthca/mthca_qp.c b/drivers/infiniband/hw/mthca/mthca_qp.c
index 7450550db736..564b6d51c394 100644
--- a/drivers/infiniband/hw/mthca/mthca_qp.c
+++ b/drivers/infiniband/hw/mthca/mthca_qp.c
@@ -383,12 +383,10 @@ static const struct {
383 [UC] = (IB_QP_CUR_STATE | 383 [UC] = (IB_QP_CUR_STATE |
384 IB_QP_ALT_PATH | 384 IB_QP_ALT_PATH |
385 IB_QP_ACCESS_FLAGS | 385 IB_QP_ACCESS_FLAGS |
386 IB_QP_PKEY_INDEX |
387 IB_QP_PATH_MIG_STATE), 386 IB_QP_PATH_MIG_STATE),
388 [RC] = (IB_QP_CUR_STATE | 387 [RC] = (IB_QP_CUR_STATE |
389 IB_QP_ALT_PATH | 388 IB_QP_ALT_PATH |
390 IB_QP_ACCESS_FLAGS | 389 IB_QP_ACCESS_FLAGS |
391 IB_QP_PKEY_INDEX |
392 IB_QP_MIN_RNR_TIMER | 390 IB_QP_MIN_RNR_TIMER |
393 IB_QP_PATH_MIG_STATE), 391 IB_QP_PATH_MIG_STATE),
394 [MLX] = (IB_QP_CUR_STATE | 392 [MLX] = (IB_QP_CUR_STATE |
@@ -476,9 +474,8 @@ static const struct {
476 .opt_param = { 474 .opt_param = {
477 [UD] = (IB_QP_CUR_STATE | 475 [UD] = (IB_QP_CUR_STATE |
478 IB_QP_QKEY), 476 IB_QP_QKEY),
479 [UC] = IB_QP_CUR_STATE, 477 [UC] = (IB_QP_CUR_STATE |
480 [RC] = (IB_QP_CUR_STATE | 478 IB_QP_ACCESS_FLAGS),
481 IB_QP_MIN_RNR_TIMER),
482 [MLX] = (IB_QP_CUR_STATE | 479 [MLX] = (IB_QP_CUR_STATE |
483 IB_QP_QKEY), 480 IB_QP_QKEY),
484 } 481 }
@@ -522,6 +519,55 @@ static void init_port(struct mthca_dev *dev, int port)
522 mthca_warn(dev, "INIT_IB returned status %02x.\n", status); 519 mthca_warn(dev, "INIT_IB returned status %02x.\n", status);
523} 520}
524 521
522static __be32 get_hw_access_flags(struct mthca_qp *qp, struct ib_qp_attr *attr,
523 int attr_mask)
524{
525 u8 dest_rd_atomic;
526 u32 access_flags;
527 u32 hw_access_flags = 0;
528
529 if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC)
530 dest_rd_atomic = attr->max_dest_rd_atomic;
531 else
532 dest_rd_atomic = qp->resp_depth;
533
534 if (attr_mask & IB_QP_ACCESS_FLAGS)
535 access_flags = attr->qp_access_flags;
536 else
537 access_flags = qp->atomic_rd_en;
538
539 if (!dest_rd_atomic)
540 access_flags &= IB_ACCESS_REMOTE_WRITE;
541
542 if (access_flags & IB_ACCESS_REMOTE_READ)
543 hw_access_flags |= MTHCA_QP_BIT_RRE;
544 if (access_flags & IB_ACCESS_REMOTE_ATOMIC)
545 hw_access_flags |= MTHCA_QP_BIT_RAE;
546 if (access_flags & IB_ACCESS_REMOTE_WRITE)
547 hw_access_flags |= MTHCA_QP_BIT_RWE;
548
549 return cpu_to_be32(hw_access_flags);
550}
551
552static void mthca_path_set(struct ib_ah_attr *ah, struct mthca_qp_path *path)
553{
554 path->g_mylmc = ah->src_path_bits & 0x7f;
555 path->rlid = cpu_to_be16(ah->dlid);
556 path->static_rate = !!ah->static_rate;
557
558 if (ah->ah_flags & IB_AH_GRH) {
559 path->g_mylmc |= 1 << 7;
560 path->mgid_index = ah->grh.sgid_index;
561 path->hop_limit = ah->grh.hop_limit;
562 path->sl_tclass_flowlabel =
563 cpu_to_be32((ah->sl << 28) |
564 (ah->grh.traffic_class << 20) |
565 (ah->grh.flow_label));
566 memcpy(path->rgid, ah->grh.dgid.raw, 16);
567 } else
568 path->sl_tclass_flowlabel = cpu_to_be32(ah->sl << 28);
569}
570
525int mthca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask) 571int mthca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask)
526{ 572{
527 struct mthca_dev *dev = to_mdev(ibqp->device); 573 struct mthca_dev *dev = to_mdev(ibqp->device);
@@ -591,6 +637,26 @@ int mthca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask)
591 return -EINVAL; 637 return -EINVAL;
592 } 638 }
593 639
640 if ((attr_mask & IB_QP_PORT) &&
641 (attr->port_num == 0 || attr->port_num > dev->limits.num_ports)) {
642 mthca_dbg(dev, "Port number (%u) is invalid\n", attr->port_num);
643 return -EINVAL;
644 }
645
646 if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC &&
647 attr->max_rd_atomic > dev->limits.max_qp_init_rdma) {
648 mthca_dbg(dev, "Max rdma_atomic as initiator %u too large (max is %d)\n",
649 attr->max_rd_atomic, dev->limits.max_qp_init_rdma);
650 return -EINVAL;
651 }
652
653 if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC &&
654 attr->max_dest_rd_atomic > 1 << dev->qp_table.rdb_shift) {
655 mthca_dbg(dev, "Max rdma_atomic as responder %u too large (max %d)\n",
656 attr->max_dest_rd_atomic, 1 << dev->qp_table.rdb_shift);
657 return -EINVAL;
658 }
659
594 mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL); 660 mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
595 if (IS_ERR(mailbox)) 661 if (IS_ERR(mailbox))
596 return PTR_ERR(mailbox); 662 return PTR_ERR(mailbox);
@@ -665,28 +731,14 @@ int mthca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask)
665 } 731 }
666 732
667 if (attr_mask & IB_QP_RNR_RETRY) { 733 if (attr_mask & IB_QP_RNR_RETRY) {
668 qp_context->pri_path.rnr_retry = attr->rnr_retry << 5; 734 qp_context->alt_path.rnr_retry = qp_context->pri_path.rnr_retry =
669 qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_RNR_RETRY); 735 attr->rnr_retry << 5;
736 qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_RNR_RETRY |
737 MTHCA_QP_OPTPAR_ALT_RNR_RETRY);
670 } 738 }
671 739
672 if (attr_mask & IB_QP_AV) { 740 if (attr_mask & IB_QP_AV) {
673 qp_context->pri_path.g_mylmc = attr->ah_attr.src_path_bits & 0x7f; 741 mthca_path_set(&attr->ah_attr, &qp_context->pri_path);
674 qp_context->pri_path.rlid = cpu_to_be16(attr->ah_attr.dlid);
675 qp_context->pri_path.static_rate = !!attr->ah_attr.static_rate;
676 if (attr->ah_attr.ah_flags & IB_AH_GRH) {
677 qp_context->pri_path.g_mylmc |= 1 << 7;
678 qp_context->pri_path.mgid_index = attr->ah_attr.grh.sgid_index;
679 qp_context->pri_path.hop_limit = attr->ah_attr.grh.hop_limit;
680 qp_context->pri_path.sl_tclass_flowlabel =
681 cpu_to_be32((attr->ah_attr.sl << 28) |
682 (attr->ah_attr.grh.traffic_class << 20) |
683 (attr->ah_attr.grh.flow_label));
684 memcpy(qp_context->pri_path.rgid,
685 attr->ah_attr.grh.dgid.raw, 16);
686 } else {
687 qp_context->pri_path.sl_tclass_flowlabel =
688 cpu_to_be32(attr->ah_attr.sl << 28);
689 }
690 qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_PRIMARY_ADDR_PATH); 742 qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_PRIMARY_ADDR_PATH);
691 } 743 }
692 744
@@ -695,7 +747,19 @@ int mthca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask)
695 qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_ACK_TIMEOUT); 747 qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_ACK_TIMEOUT);
696 } 748 }
697 749
698 /* XXX alt_path */ 750 if (attr_mask & IB_QP_ALT_PATH) {
751 if (attr->alt_port_num == 0 || attr->alt_port_num > dev->limits.num_ports) {
752 mthca_dbg(dev, "Alternate port number (%u) is invalid\n",
753 attr->alt_port_num);
754 return -EINVAL;
755 }
756
757 mthca_path_set(&attr->alt_ah_attr, &qp_context->alt_path);
758 qp_context->alt_path.port_pkey |= cpu_to_be32(attr->alt_pkey_index |
759 attr->alt_port_num << 24);
760 qp_context->alt_path.ackto = attr->alt_timeout << 3;
761 qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_ALT_ADDR_PATH);
762 }
699 763
700 /* leave rdd as 0 */ 764 /* leave rdd as 0 */
701 qp_context->pd = cpu_to_be32(to_mpd(ibqp->pd)->pd_num); 765 qp_context->pd = cpu_to_be32(to_mpd(ibqp->pd)->pd_num);
@@ -703,9 +767,7 @@ int mthca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask)
703 qp_context->wqe_lkey = cpu_to_be32(qp->mr.ibmr.lkey); 767 qp_context->wqe_lkey = cpu_to_be32(qp->mr.ibmr.lkey);
704 qp_context->params1 = cpu_to_be32((MTHCA_ACK_REQ_FREQ << 28) | 768 qp_context->params1 = cpu_to_be32((MTHCA_ACK_REQ_FREQ << 28) |
705 (MTHCA_FLIGHT_LIMIT << 24) | 769 (MTHCA_FLIGHT_LIMIT << 24) |
706 MTHCA_QP_BIT_SRE | 770 MTHCA_QP_BIT_SWE);
707 MTHCA_QP_BIT_SWE |
708 MTHCA_QP_BIT_SAE);
709 if (qp->sq_policy == IB_SIGNAL_ALL_WR) 771 if (qp->sq_policy == IB_SIGNAL_ALL_WR)
710 qp_context->params1 |= cpu_to_be32(MTHCA_QP_BIT_SSC); 772 qp_context->params1 |= cpu_to_be32(MTHCA_QP_BIT_SSC);
711 if (attr_mask & IB_QP_RETRY_CNT) { 773 if (attr_mask & IB_QP_RETRY_CNT) {
@@ -714,9 +776,13 @@ int mthca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask)
714 } 776 }
715 777
716 if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC) { 778 if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC) {
717 qp_context->params1 |= cpu_to_be32(min(attr->max_rd_atomic ? 779 if (attr->max_rd_atomic) {
718 ffs(attr->max_rd_atomic) - 1 : 0, 780 qp_context->params1 |=
719 7) << 21); 781 cpu_to_be32(MTHCA_QP_BIT_SRE |
782 MTHCA_QP_BIT_SAE);
783 qp_context->params1 |=
784 cpu_to_be32(fls(attr->max_rd_atomic - 1) << 21);
785 }
720 qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_SRA_MAX); 786 qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_SRA_MAX);
721 } 787 }
722 788
@@ -729,71 +795,19 @@ int mthca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask)
729 qp_context->snd_db_index = cpu_to_be32(qp->sq.db_index); 795 qp_context->snd_db_index = cpu_to_be32(qp->sq.db_index);
730 } 796 }
731 797
732 if (attr_mask & IB_QP_ACCESS_FLAGS) {
733 qp_context->params2 |=
734 cpu_to_be32(attr->qp_access_flags & IB_ACCESS_REMOTE_WRITE ?
735 MTHCA_QP_BIT_RWE : 0);
736
737 /*
738 * Only enable RDMA reads and atomics if we have
739 * responder resources set to a non-zero value.
740 */
741 if (qp->resp_depth) {
742 qp_context->params2 |=
743 cpu_to_be32(attr->qp_access_flags & IB_ACCESS_REMOTE_READ ?
744 MTHCA_QP_BIT_RRE : 0);
745 qp_context->params2 |=
746 cpu_to_be32(attr->qp_access_flags & IB_ACCESS_REMOTE_ATOMIC ?
747 MTHCA_QP_BIT_RAE : 0);
748 }
749
750 qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_RWE |
751 MTHCA_QP_OPTPAR_RRE |
752 MTHCA_QP_OPTPAR_RAE);
753
754 qp->atomic_rd_en = attr->qp_access_flags;
755 }
756
757 if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) { 798 if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) {
758 u8 rra_max; 799 if (attr->max_dest_rd_atomic)
759
760 if (qp->resp_depth && !attr->max_dest_rd_atomic) {
761 /*
762 * Lowering our responder resources to zero.
763 * Turn off reads RDMA and atomics as responder.
764 * (RRE/RAE in params2 already zero)
765 */
766 qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_RRE |
767 MTHCA_QP_OPTPAR_RAE);
768 }
769
770 if (!qp->resp_depth && attr->max_dest_rd_atomic) {
771 /*
772 * Increasing our responder resources from
773 * zero. Turn on RDMA reads and atomics as
774 * appropriate.
775 */
776 qp_context->params2 |= 800 qp_context->params2 |=
777 cpu_to_be32(qp->atomic_rd_en & IB_ACCESS_REMOTE_READ ? 801 cpu_to_be32(fls(attr->max_dest_rd_atomic - 1) << 21);
778 MTHCA_QP_BIT_RRE : 0);
779 qp_context->params2 |=
780 cpu_to_be32(qp->atomic_rd_en & IB_ACCESS_REMOTE_ATOMIC ?
781 MTHCA_QP_BIT_RAE : 0);
782
783 qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_RRE |
784 MTHCA_QP_OPTPAR_RAE);
785 }
786 802
787 for (rra_max = 0;
788 1 << rra_max < attr->max_dest_rd_atomic &&
789 rra_max < dev->qp_table.rdb_shift;
790 ++rra_max)
791 ; /* nothing */
792
793 qp_context->params2 |= cpu_to_be32(rra_max << 21);
794 qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_RRA_MAX); 803 qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_RRA_MAX);
804 }
795 805
796 qp->resp_depth = attr->max_dest_rd_atomic; 806 if (attr_mask & (IB_QP_ACCESS_FLAGS | IB_QP_MAX_DEST_RD_ATOMIC)) {
807 qp_context->params2 |= get_hw_access_flags(qp, attr, attr_mask);
808 qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_RWE |
809 MTHCA_QP_OPTPAR_RRE |
810 MTHCA_QP_OPTPAR_RAE);
797 } 811 }
798 812
799 qp_context->params2 |= cpu_to_be32(MTHCA_QP_BIT_RSC); 813 qp_context->params2 |= cpu_to_be32(MTHCA_QP_BIT_RSC);
@@ -835,8 +849,13 @@ int mthca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask)
835 err = -EINVAL; 849 err = -EINVAL;
836 } 850 }
837 851
838 if (!err) 852 if (!err) {
839 qp->state = new_state; 853 qp->state = new_state;
854 if (attr_mask & IB_QP_ACCESS_FLAGS)
855 qp->atomic_rd_en = attr->qp_access_flags;
856 if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC)
857 qp->resp_depth = attr->max_dest_rd_atomic;
858 }
840 859
841 mthca_free_mailbox(dev, mailbox); 860 mthca_free_mailbox(dev, mailbox);
842 861
@@ -885,18 +904,13 @@ int mthca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask)
885 return err; 904 return err;
886} 905}
887 906
888static void mthca_adjust_qp_caps(struct mthca_dev *dev, 907static int mthca_max_data_size(struct mthca_dev *dev, struct mthca_qp *qp, int desc_sz)
889 struct mthca_pd *pd,
890 struct mthca_qp *qp)
891{ 908{
892 int max_data_size;
893
894 /* 909 /*
895 * Calculate the maximum size of WQE s/g segments, excluding 910 * Calculate the maximum size of WQE s/g segments, excluding
896 * the next segment and other non-data segments. 911 * the next segment and other non-data segments.
897 */ 912 */
898 max_data_size = min(dev->limits.max_desc_sz, 1 << qp->sq.wqe_shift) - 913 int max_data_size = desc_sz - sizeof (struct mthca_next_seg);
899 sizeof (struct mthca_next_seg);
900 914
901 switch (qp->transport) { 915 switch (qp->transport) {
902 case MLX: 916 case MLX:
@@ -915,11 +929,24 @@ static void mthca_adjust_qp_caps(struct mthca_dev *dev,
915 break; 929 break;
916 } 930 }
917 931
932 return max_data_size;
933}
934
935static inline int mthca_max_inline_data(struct mthca_pd *pd, int max_data_size)
936{
918 /* We don't support inline data for kernel QPs (yet). */ 937 /* We don't support inline data for kernel QPs (yet). */
919 if (!pd->ibpd.uobject) 938 return pd->ibpd.uobject ? max_data_size - MTHCA_INLINE_HEADER_SIZE : 0;
920 qp->max_inline_data = 0; 939}
921 else 940
922 qp->max_inline_data = max_data_size - MTHCA_INLINE_HEADER_SIZE; 941static void mthca_adjust_qp_caps(struct mthca_dev *dev,
942 struct mthca_pd *pd,
943 struct mthca_qp *qp)
944{
945 int max_data_size = mthca_max_data_size(dev, qp,
946 min(dev->limits.max_desc_sz,
947 1 << qp->sq.wqe_shift));
948
949 qp->max_inline_data = mthca_max_inline_data(pd, max_data_size);
923 950
924 qp->sq.max_gs = min_t(int, dev->limits.max_sg, 951 qp->sq.max_gs = min_t(int, dev->limits.max_sg,
925 max_data_size / sizeof (struct mthca_data_seg)); 952 max_data_size / sizeof (struct mthca_data_seg));
@@ -1186,13 +1213,23 @@ static int mthca_alloc_qp_common(struct mthca_dev *dev,
1186} 1213}
1187 1214
1188static int mthca_set_qp_size(struct mthca_dev *dev, struct ib_qp_cap *cap, 1215static int mthca_set_qp_size(struct mthca_dev *dev, struct ib_qp_cap *cap,
1189 struct mthca_qp *qp) 1216 struct mthca_pd *pd, struct mthca_qp *qp)
1190{ 1217{
1218 int max_data_size = mthca_max_data_size(dev, qp, dev->limits.max_desc_sz);
1219
1191 /* Sanity check QP size before proceeding */ 1220 /* Sanity check QP size before proceeding */
1192 if (cap->max_send_wr > dev->limits.max_wqes || 1221 if (cap->max_send_wr > dev->limits.max_wqes ||
1193 cap->max_recv_wr > dev->limits.max_wqes || 1222 cap->max_recv_wr > dev->limits.max_wqes ||
1194 cap->max_send_sge > dev->limits.max_sg || 1223 cap->max_send_sge > dev->limits.max_sg ||
1195 cap->max_recv_sge > dev->limits.max_sg) 1224 cap->max_recv_sge > dev->limits.max_sg ||
1225 cap->max_inline_data > mthca_max_inline_data(pd, max_data_size))
1226 return -EINVAL;
1227
1228 /*
1229 * For MLX transport we need 2 extra S/G entries:
1230 * one for the header and one for the checksum at the end
1231 */
1232 if (qp->transport == MLX && cap->max_recv_sge + 2 > dev->limits.max_sg)
1196 return -EINVAL; 1233 return -EINVAL;
1197 1234
1198 if (mthca_is_memfree(dev)) { 1235 if (mthca_is_memfree(dev)) {
@@ -1211,14 +1248,6 @@ static int mthca_set_qp_size(struct mthca_dev *dev, struct ib_qp_cap *cap,
1211 MTHCA_INLINE_CHUNK_SIZE) / 1248 MTHCA_INLINE_CHUNK_SIZE) /
1212 sizeof (struct mthca_data_seg)); 1249 sizeof (struct mthca_data_seg));
1213 1250
1214 /*
1215 * For MLX transport we need 2 extra S/G entries:
1216 * one for the header and one for the checksum at the end
1217 */
1218 if ((qp->transport == MLX && qp->sq.max_gs + 2 > dev->limits.max_sg) ||
1219 qp->sq.max_gs > dev->limits.max_sg || qp->rq.max_gs > dev->limits.max_sg)
1220 return -EINVAL;
1221
1222 return 0; 1251 return 0;
1223} 1252}
1224 1253
@@ -1233,7 +1262,7 @@ int mthca_alloc_qp(struct mthca_dev *dev,
1233{ 1262{
1234 int err; 1263 int err;
1235 1264
1236 err = mthca_set_qp_size(dev, cap, qp); 1265 err = mthca_set_qp_size(dev, cap, pd, qp);
1237 if (err) 1266 if (err)
1238 return err; 1267 return err;
1239 1268
@@ -1276,7 +1305,7 @@ int mthca_alloc_sqp(struct mthca_dev *dev,
1276 u32 mqpn = qpn * 2 + dev->qp_table.sqp_start + port - 1; 1305 u32 mqpn = qpn * 2 + dev->qp_table.sqp_start + port - 1;
1277 int err; 1306 int err;
1278 1307
1279 err = mthca_set_qp_size(dev, cap, &sqp->qp); 1308 err = mthca_set_qp_size(dev, cap, pd, &sqp->qp);
1280 if (err) 1309 if (err)
1281 return err; 1310 return err;
1282 1311
diff --git a/drivers/infiniband/hw/mthca/mthca_srq.c b/drivers/infiniband/hw/mthca/mthca_srq.c
index f7d234295efe..e7e153d9c4c6 100644
--- a/drivers/infiniband/hw/mthca/mthca_srq.c
+++ b/drivers/infiniband/hw/mthca/mthca_srq.c
@@ -201,7 +201,7 @@ int mthca_alloc_srq(struct mthca_dev *dev, struct mthca_pd *pd,
201 if (mthca_is_memfree(dev)) 201 if (mthca_is_memfree(dev))
202 srq->max = roundup_pow_of_two(srq->max + 1); 202 srq->max = roundup_pow_of_two(srq->max + 1);
203 203
204 ds = min(64UL, 204 ds = max(64UL,
205 roundup_pow_of_two(sizeof (struct mthca_next_seg) + 205 roundup_pow_of_two(sizeof (struct mthca_next_seg) +
206 srq->max_gs * sizeof (struct mthca_data_seg))); 206 srq->max_gs * sizeof (struct mthca_data_seg)));
207 srq->wqe_shift = long_log2(ds); 207 srq->wqe_shift = long_log2(ds);