diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2008-05-23 14:11:44 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2008-05-23 14:11:44 -0400 |
commit | c2448278e3be3a43fe0343e138444742af1e99ae (patch) | |
tree | f48201f00ebfb03e13c26e92514e9f2d118923e9 /drivers/infiniband | |
parent | e6b027a398bcfbb8897353a88a2edfcc97fd604d (diff) | |
parent | 5a4f2b675210718aceb4abf41617a3af31bba718 (diff) |
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/roland/infiniband
* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/roland/infiniband:
IB/mad: Fix kernel crash when .process_mad() returns SUCCESS|CONSUMED
IPoIB: Test for NULL broadcast object in ipiob_mcast_join_finish()
MAINTAINERS: Add cxgb3 and iw_cxgb3 NIC and iWARP driver entries
IB/mlx4: Fix creation of kernel QP with max number of send s/g entries
IB/mthca: Fix max_sge value returned by query_device
RDMA/cxgb3: Fix uninitialized variable warning in iwch_post_send()
IB/mlx4: Fix uninitialized-var warning in mlx4_ib_post_send()
IB/ipath: Fix UC receive completion opcode for RDMA WRITE with immediate
IB/ipath: Fix printk format for ipath_sdma_status
Diffstat (limited to 'drivers/infiniband')
-rw-r--r-- | drivers/infiniband/core/mad.c | 4 | ||||
-rw-r--r-- | drivers/infiniband/hw/cxgb3/iwch_qp.c | 2 | ||||
-rw-r--r-- | drivers/infiniband/hw/ipath/ipath_sdma.c | 4 | ||||
-rw-r--r-- | drivers/infiniband/hw/ipath/ipath_uc.c | 4 | ||||
-rw-r--r-- | drivers/infiniband/hw/mlx4/qp.c | 15 | ||||
-rw-r--r-- | drivers/infiniband/hw/mthca/mthca_main.c | 14 | ||||
-rw-r--r-- | drivers/infiniband/ulp/ipoib/ipoib_multicast.c | 6 |
7 files changed, 36 insertions, 13 deletions
diff --git a/drivers/infiniband/core/mad.c b/drivers/infiniband/core/mad.c index fbe16d5250a4..1adf2efd3cb3 100644 --- a/drivers/infiniband/core/mad.c +++ b/drivers/infiniband/core/mad.c | |||
@@ -747,7 +747,9 @@ static int handle_outgoing_dr_smp(struct ib_mad_agent_private *mad_agent_priv, | |||
747 | break; | 747 | break; |
748 | case IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED: | 748 | case IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED: |
749 | kmem_cache_free(ib_mad_cache, mad_priv); | 749 | kmem_cache_free(ib_mad_cache, mad_priv); |
750 | break; | 750 | kfree(local); |
751 | ret = 1; | ||
752 | goto out; | ||
751 | case IB_MAD_RESULT_SUCCESS: | 753 | case IB_MAD_RESULT_SUCCESS: |
752 | /* Treat like an incoming receive MAD */ | 754 | /* Treat like an incoming receive MAD */ |
753 | port_priv = ib_get_mad_port(mad_agent_priv->agent.device, | 755 | port_priv = ib_get_mad_port(mad_agent_priv->agent.device, |
diff --git a/drivers/infiniband/hw/cxgb3/iwch_qp.c b/drivers/infiniband/hw/cxgb3/iwch_qp.c index 79dbe5beae52..992613799228 100644 --- a/drivers/infiniband/hw/cxgb3/iwch_qp.c +++ b/drivers/infiniband/hw/cxgb3/iwch_qp.c | |||
@@ -229,7 +229,7 @@ int iwch_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, | |||
229 | struct ib_send_wr **bad_wr) | 229 | struct ib_send_wr **bad_wr) |
230 | { | 230 | { |
231 | int err = 0; | 231 | int err = 0; |
232 | u8 t3_wr_flit_cnt; | 232 | u8 uninitialized_var(t3_wr_flit_cnt); |
233 | enum t3_wr_opcode t3_wr_opcode = 0; | 233 | enum t3_wr_opcode t3_wr_opcode = 0; |
234 | enum t3_wr_flags t3_wr_flags; | 234 | enum t3_wr_flags t3_wr_flags; |
235 | struct iwch_qp *qhp; | 235 | struct iwch_qp *qhp; |
diff --git a/drivers/infiniband/hw/ipath/ipath_sdma.c b/drivers/infiniband/hw/ipath/ipath_sdma.c index 3697449c1ba4..0a8c1b8091a2 100644 --- a/drivers/infiniband/hw/ipath/ipath_sdma.c +++ b/drivers/infiniband/hw/ipath/ipath_sdma.c | |||
@@ -345,7 +345,7 @@ resched: | |||
345 | * state change | 345 | * state change |
346 | */ | 346 | */ |
347 | if (jiffies > dd->ipath_sdma_abort_jiffies) { | 347 | if (jiffies > dd->ipath_sdma_abort_jiffies) { |
348 | ipath_dbg("looping with status 0x%016llx\n", | 348 | ipath_dbg("looping with status 0x%08lx\n", |
349 | dd->ipath_sdma_status); | 349 | dd->ipath_sdma_status); |
350 | dd->ipath_sdma_abort_jiffies = jiffies + 5 * HZ; | 350 | dd->ipath_sdma_abort_jiffies = jiffies + 5 * HZ; |
351 | } | 351 | } |
@@ -615,7 +615,7 @@ void ipath_restart_sdma(struct ipath_devdata *dd) | |||
615 | } | 615 | } |
616 | spin_unlock_irqrestore(&dd->ipath_sdma_lock, flags); | 616 | spin_unlock_irqrestore(&dd->ipath_sdma_lock, flags); |
617 | if (!needed) { | 617 | if (!needed) { |
618 | ipath_dbg("invalid attempt to restart SDMA, status 0x%016llx\n", | 618 | ipath_dbg("invalid attempt to restart SDMA, status 0x%08lx\n", |
619 | dd->ipath_sdma_status); | 619 | dd->ipath_sdma_status); |
620 | goto bail; | 620 | goto bail; |
621 | } | 621 | } |
diff --git a/drivers/infiniband/hw/ipath/ipath_uc.c b/drivers/infiniband/hw/ipath/ipath_uc.c index 7fd18e833907..0596ec16fcbd 100644 --- a/drivers/infiniband/hw/ipath/ipath_uc.c +++ b/drivers/infiniband/hw/ipath/ipath_uc.c | |||
@@ -407,12 +407,11 @@ void ipath_uc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr, | |||
407 | dev->n_pkt_drops++; | 407 | dev->n_pkt_drops++; |
408 | goto done; | 408 | goto done; |
409 | } | 409 | } |
410 | /* XXX Need to free SGEs */ | 410 | wc.opcode = IB_WC_RECV; |
411 | last_imm: | 411 | last_imm: |
412 | ipath_copy_sge(&qp->r_sge, data, tlen); | 412 | ipath_copy_sge(&qp->r_sge, data, tlen); |
413 | wc.wr_id = qp->r_wr_id; | 413 | wc.wr_id = qp->r_wr_id; |
414 | wc.status = IB_WC_SUCCESS; | 414 | wc.status = IB_WC_SUCCESS; |
415 | wc.opcode = IB_WC_RECV; | ||
416 | wc.qp = &qp->ibqp; | 415 | wc.qp = &qp->ibqp; |
417 | wc.src_qp = qp->remote_qpn; | 416 | wc.src_qp = qp->remote_qpn; |
418 | wc.slid = qp->remote_ah_attr.dlid; | 417 | wc.slid = qp->remote_ah_attr.dlid; |
@@ -514,6 +513,7 @@ void ipath_uc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr, | |||
514 | goto done; | 513 | goto done; |
515 | } | 514 | } |
516 | wc.byte_len = qp->r_len; | 515 | wc.byte_len = qp->r_len; |
516 | wc.opcode = IB_WC_RECV_RDMA_WITH_IMM; | ||
517 | goto last_imm; | 517 | goto last_imm; |
518 | 518 | ||
519 | case OP(RDMA_WRITE_LAST): | 519 | case OP(RDMA_WRITE_LAST): |
diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c index 8e02ecfec188..a80df22deae8 100644 --- a/drivers/infiniband/hw/mlx4/qp.c +++ b/drivers/infiniband/hw/mlx4/qp.c | |||
@@ -333,6 +333,9 @@ static int set_kernel_sq_size(struct mlx4_ib_dev *dev, struct ib_qp_cap *cap, | |||
333 | cap->max_inline_data + sizeof (struct mlx4_wqe_inline_seg)) + | 333 | cap->max_inline_data + sizeof (struct mlx4_wqe_inline_seg)) + |
334 | send_wqe_overhead(type, qp->flags); | 334 | send_wqe_overhead(type, qp->flags); |
335 | 335 | ||
336 | if (s > dev->dev->caps.max_sq_desc_sz) | ||
337 | return -EINVAL; | ||
338 | |||
336 | /* | 339 | /* |
337 | * Hermon supports shrinking WQEs, such that a single work | 340 | * Hermon supports shrinking WQEs, such that a single work |
338 | * request can include multiple units of 1 << wqe_shift. This | 341 | * request can include multiple units of 1 << wqe_shift. This |
@@ -372,9 +375,6 @@ static int set_kernel_sq_size(struct mlx4_ib_dev *dev, struct ib_qp_cap *cap, | |||
372 | qp->sq.wqe_shift = ilog2(roundup_pow_of_two(s)); | 375 | qp->sq.wqe_shift = ilog2(roundup_pow_of_two(s)); |
373 | 376 | ||
374 | for (;;) { | 377 | for (;;) { |
375 | if (1 << qp->sq.wqe_shift > dev->dev->caps.max_sq_desc_sz) | ||
376 | return -EINVAL; | ||
377 | |||
378 | qp->sq_max_wqes_per_wr = DIV_ROUND_UP(s, 1U << qp->sq.wqe_shift); | 378 | qp->sq_max_wqes_per_wr = DIV_ROUND_UP(s, 1U << qp->sq.wqe_shift); |
379 | 379 | ||
380 | /* | 380 | /* |
@@ -395,7 +395,8 @@ static int set_kernel_sq_size(struct mlx4_ib_dev *dev, struct ib_qp_cap *cap, | |||
395 | ++qp->sq.wqe_shift; | 395 | ++qp->sq.wqe_shift; |
396 | } | 396 | } |
397 | 397 | ||
398 | qp->sq.max_gs = ((qp->sq_max_wqes_per_wr << qp->sq.wqe_shift) - | 398 | qp->sq.max_gs = (min(dev->dev->caps.max_sq_desc_sz, |
399 | (qp->sq_max_wqes_per_wr << qp->sq.wqe_shift)) - | ||
399 | send_wqe_overhead(type, qp->flags)) / | 400 | send_wqe_overhead(type, qp->flags)) / |
400 | sizeof (struct mlx4_wqe_data_seg); | 401 | sizeof (struct mlx4_wqe_data_seg); |
401 | 402 | ||
@@ -411,7 +412,9 @@ static int set_kernel_sq_size(struct mlx4_ib_dev *dev, struct ib_qp_cap *cap, | |||
411 | 412 | ||
412 | cap->max_send_wr = qp->sq.max_post = | 413 | cap->max_send_wr = qp->sq.max_post = |
413 | (qp->sq.wqe_cnt - qp->sq_spare_wqes) / qp->sq_max_wqes_per_wr; | 414 | (qp->sq.wqe_cnt - qp->sq_spare_wqes) / qp->sq_max_wqes_per_wr; |
414 | cap->max_send_sge = qp->sq.max_gs; | 415 | cap->max_send_sge = min(qp->sq.max_gs, |
416 | min(dev->dev->caps.max_sq_sg, | ||
417 | dev->dev->caps.max_rq_sg)); | ||
415 | /* We don't support inline sends for kernel QPs (yet) */ | 418 | /* We don't support inline sends for kernel QPs (yet) */ |
416 | cap->max_inline_data = 0; | 419 | cap->max_inline_data = 0; |
417 | 420 | ||
@@ -1457,7 +1460,7 @@ int mlx4_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, | |||
1457 | unsigned ind; | 1460 | unsigned ind; |
1458 | int uninitialized_var(stamp); | 1461 | int uninitialized_var(stamp); |
1459 | int uninitialized_var(size); | 1462 | int uninitialized_var(size); |
1460 | unsigned seglen; | 1463 | unsigned uninitialized_var(seglen); |
1461 | int i; | 1464 | int i; |
1462 | 1465 | ||
1463 | spin_lock_irqsave(&qp->sq.lock, flags); | 1466 | spin_lock_irqsave(&qp->sq.lock, flags); |
diff --git a/drivers/infiniband/hw/mthca/mthca_main.c b/drivers/infiniband/hw/mthca/mthca_main.c index 9ebadd6e0cfb..200cf13fc9bb 100644 --- a/drivers/infiniband/hw/mthca/mthca_main.c +++ b/drivers/infiniband/hw/mthca/mthca_main.c | |||
@@ -45,6 +45,7 @@ | |||
45 | #include "mthca_cmd.h" | 45 | #include "mthca_cmd.h" |
46 | #include "mthca_profile.h" | 46 | #include "mthca_profile.h" |
47 | #include "mthca_memfree.h" | 47 | #include "mthca_memfree.h" |
48 | #include "mthca_wqe.h" | ||
48 | 49 | ||
49 | MODULE_AUTHOR("Roland Dreier"); | 50 | MODULE_AUTHOR("Roland Dreier"); |
50 | MODULE_DESCRIPTION("Mellanox InfiniBand HCA low-level driver"); | 51 | MODULE_DESCRIPTION("Mellanox InfiniBand HCA low-level driver"); |
@@ -200,7 +201,18 @@ static int mthca_dev_lim(struct mthca_dev *mdev, struct mthca_dev_lim *dev_lim) | |||
200 | mdev->limits.gid_table_len = dev_lim->max_gids; | 201 | mdev->limits.gid_table_len = dev_lim->max_gids; |
201 | mdev->limits.pkey_table_len = dev_lim->max_pkeys; | 202 | mdev->limits.pkey_table_len = dev_lim->max_pkeys; |
202 | mdev->limits.local_ca_ack_delay = dev_lim->local_ca_ack_delay; | 203 | mdev->limits.local_ca_ack_delay = dev_lim->local_ca_ack_delay; |
203 | mdev->limits.max_sg = dev_lim->max_sg; | 204 | /* |
205 | * Need to allow for worst case send WQE overhead and check | ||
206 | * whether max_desc_sz imposes a lower limit than max_sg; UD | ||
207 | * send has the biggest overhead. | ||
208 | */ | ||
209 | mdev->limits.max_sg = min_t(int, dev_lim->max_sg, | ||
210 | (dev_lim->max_desc_sz - | ||
211 | sizeof (struct mthca_next_seg) - | ||
212 | (mthca_is_memfree(mdev) ? | ||
213 | sizeof (struct mthca_arbel_ud_seg) : | ||
214 | sizeof (struct mthca_tavor_ud_seg))) / | ||
215 | sizeof (struct mthca_data_seg)); | ||
204 | mdev->limits.max_wqes = dev_lim->max_qp_sz; | 216 | mdev->limits.max_wqes = dev_lim->max_qp_sz; |
205 | mdev->limits.max_qp_init_rdma = dev_lim->max_requester_per_qp; | 217 | mdev->limits.max_qp_init_rdma = dev_lim->max_requester_per_qp; |
206 | mdev->limits.reserved_qps = dev_lim->reserved_qps; | 218 | mdev->limits.reserved_qps = dev_lim->reserved_qps; |
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c index d00a2c174aee..3f663fb852c1 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c | |||
@@ -194,7 +194,13 @@ static int ipoib_mcast_join_finish(struct ipoib_mcast *mcast, | |||
194 | /* Set the cached Q_Key before we attach if it's the broadcast group */ | 194 | /* Set the cached Q_Key before we attach if it's the broadcast group */ |
195 | if (!memcmp(mcast->mcmember.mgid.raw, priv->dev->broadcast + 4, | 195 | if (!memcmp(mcast->mcmember.mgid.raw, priv->dev->broadcast + 4, |
196 | sizeof (union ib_gid))) { | 196 | sizeof (union ib_gid))) { |
197 | spin_lock_irq(&priv->lock); | ||
198 | if (!priv->broadcast) { | ||
199 | spin_unlock_irq(&priv->lock); | ||
200 | return -EAGAIN; | ||
201 | } | ||
197 | priv->qkey = be32_to_cpu(priv->broadcast->mcmember.qkey); | 202 | priv->qkey = be32_to_cpu(priv->broadcast->mcmember.qkey); |
203 | spin_unlock_irq(&priv->lock); | ||
198 | priv->tx_wr.wr.ud.remote_qkey = priv->qkey; | 204 | priv->tx_wr.wr.ud.remote_qkey = priv->qkey; |
199 | } | 205 | } |
200 | 206 | ||