aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2017-01-16 15:08:29 -0500
committerDavid S. Miller <davem@davemloft.net>2017-01-16 15:08:29 -0500
commit617125e759673873e6503481d7dabaee6ded7af8 (patch)
tree11646877fd1106de5be2bb343c74c131f59f1549
parentb618ab4561d40664492cf9f9507f19a1c8272970 (diff)
parent9577b174cd0323d287c994ef0891db71666d0765 (diff)
Merge branch 'mlx4-core-fixes'
Tariq Toukan says: ==================== mlx4 core fixes This patchset contains bug fixes from Jack to the mlx4 Core driver. Patch 1 solves a race in the flow of CQ free. Patch 2 moves some qp context flags update to the correct qp transition. Patch 3 eliminates warnings from the path of SRQ_LIMIT that flood the message log, and keeps them only in the path of SRQ_CATAS_ERROR. Series generated against net commit: 1a717fcf8bbe Merge tag 'mac80211-for-davem-2017-01-13' of git://git.kernel.org/pub/scm/linux/kernel/git/jberg/mac80211 ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/cq.c38
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/eq.c23
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/resource_tracker.c5
3 files changed, 37 insertions, 29 deletions
diff --git a/drivers/net/ethernet/mellanox/mlx4/cq.c b/drivers/net/ethernet/mellanox/mlx4/cq.c
index a849da92f857..6b8635378f1f 100644
--- a/drivers/net/ethernet/mellanox/mlx4/cq.c
+++ b/drivers/net/ethernet/mellanox/mlx4/cq.c
@@ -101,13 +101,19 @@ void mlx4_cq_completion(struct mlx4_dev *dev, u32 cqn)
101{ 101{
102 struct mlx4_cq *cq; 102 struct mlx4_cq *cq;
103 103
104 rcu_read_lock();
104 cq = radix_tree_lookup(&mlx4_priv(dev)->cq_table.tree, 105 cq = radix_tree_lookup(&mlx4_priv(dev)->cq_table.tree,
105 cqn & (dev->caps.num_cqs - 1)); 106 cqn & (dev->caps.num_cqs - 1));
107 rcu_read_unlock();
108
106 if (!cq) { 109 if (!cq) {
107 mlx4_dbg(dev, "Completion event for bogus CQ %08x\n", cqn); 110 mlx4_dbg(dev, "Completion event for bogus CQ %08x\n", cqn);
108 return; 111 return;
109 } 112 }
110 113
114 /* Acessing the CQ outside of rcu_read_lock is safe, because
115 * the CQ is freed only after interrupt handling is completed.
116 */
111 ++cq->arm_sn; 117 ++cq->arm_sn;
112 118
113 cq->comp(cq); 119 cq->comp(cq);
@@ -118,23 +124,19 @@ void mlx4_cq_event(struct mlx4_dev *dev, u32 cqn, int event_type)
118 struct mlx4_cq_table *cq_table = &mlx4_priv(dev)->cq_table; 124 struct mlx4_cq_table *cq_table = &mlx4_priv(dev)->cq_table;
119 struct mlx4_cq *cq; 125 struct mlx4_cq *cq;
120 126
121 spin_lock(&cq_table->lock); 127 rcu_read_lock();
122
123 cq = radix_tree_lookup(&cq_table->tree, cqn & (dev->caps.num_cqs - 1)); 128 cq = radix_tree_lookup(&cq_table->tree, cqn & (dev->caps.num_cqs - 1));
124 if (cq) 129 rcu_read_unlock();
125 atomic_inc(&cq->refcount);
126
127 spin_unlock(&cq_table->lock);
128 130
129 if (!cq) { 131 if (!cq) {
130 mlx4_warn(dev, "Async event for bogus CQ %08x\n", cqn); 132 mlx4_dbg(dev, "Async event for bogus CQ %08x\n", cqn);
131 return; 133 return;
132 } 134 }
133 135
136 /* Acessing the CQ outside of rcu_read_lock is safe, because
137 * the CQ is freed only after interrupt handling is completed.
138 */
134 cq->event(cq, event_type); 139 cq->event(cq, event_type);
135
136 if (atomic_dec_and_test(&cq->refcount))
137 complete(&cq->free);
138} 140}
139 141
140static int mlx4_SW2HW_CQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox, 142static int mlx4_SW2HW_CQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox,
@@ -301,9 +303,9 @@ int mlx4_cq_alloc(struct mlx4_dev *dev, int nent,
301 if (err) 303 if (err)
302 return err; 304 return err;
303 305
304 spin_lock_irq(&cq_table->lock); 306 spin_lock(&cq_table->lock);
305 err = radix_tree_insert(&cq_table->tree, cq->cqn, cq); 307 err = radix_tree_insert(&cq_table->tree, cq->cqn, cq);
306 spin_unlock_irq(&cq_table->lock); 308 spin_unlock(&cq_table->lock);
307 if (err) 309 if (err)
308 goto err_icm; 310 goto err_icm;
309 311
@@ -349,9 +351,9 @@ int mlx4_cq_alloc(struct mlx4_dev *dev, int nent,
349 return 0; 351 return 0;
350 352
351err_radix: 353err_radix:
352 spin_lock_irq(&cq_table->lock); 354 spin_lock(&cq_table->lock);
353 radix_tree_delete(&cq_table->tree, cq->cqn); 355 radix_tree_delete(&cq_table->tree, cq->cqn);
354 spin_unlock_irq(&cq_table->lock); 356 spin_unlock(&cq_table->lock);
355 357
356err_icm: 358err_icm:
357 mlx4_cq_free_icm(dev, cq->cqn); 359 mlx4_cq_free_icm(dev, cq->cqn);
@@ -370,15 +372,15 @@ void mlx4_cq_free(struct mlx4_dev *dev, struct mlx4_cq *cq)
370 if (err) 372 if (err)
371 mlx4_warn(dev, "HW2SW_CQ failed (%d) for CQN %06x\n", err, cq->cqn); 373 mlx4_warn(dev, "HW2SW_CQ failed (%d) for CQN %06x\n", err, cq->cqn);
372 374
375 spin_lock(&cq_table->lock);
376 radix_tree_delete(&cq_table->tree, cq->cqn);
377 spin_unlock(&cq_table->lock);
378
373 synchronize_irq(priv->eq_table.eq[MLX4_CQ_TO_EQ_VECTOR(cq->vector)].irq); 379 synchronize_irq(priv->eq_table.eq[MLX4_CQ_TO_EQ_VECTOR(cq->vector)].irq);
374 if (priv->eq_table.eq[MLX4_CQ_TO_EQ_VECTOR(cq->vector)].irq != 380 if (priv->eq_table.eq[MLX4_CQ_TO_EQ_VECTOR(cq->vector)].irq !=
375 priv->eq_table.eq[MLX4_EQ_ASYNC].irq) 381 priv->eq_table.eq[MLX4_EQ_ASYNC].irq)
376 synchronize_irq(priv->eq_table.eq[MLX4_EQ_ASYNC].irq); 382 synchronize_irq(priv->eq_table.eq[MLX4_EQ_ASYNC].irq);
377 383
378 spin_lock_irq(&cq_table->lock);
379 radix_tree_delete(&cq_table->tree, cq->cqn);
380 spin_unlock_irq(&cq_table->lock);
381
382 if (atomic_dec_and_test(&cq->refcount)) 384 if (atomic_dec_and_test(&cq->refcount))
383 complete(&cq->free); 385 complete(&cq->free);
384 wait_for_completion(&cq->free); 386 wait_for_completion(&cq->free);
diff --git a/drivers/net/ethernet/mellanox/mlx4/eq.c b/drivers/net/ethernet/mellanox/mlx4/eq.c
index cd3638e6fe25..0509996957d9 100644
--- a/drivers/net/ethernet/mellanox/mlx4/eq.c
+++ b/drivers/net/ethernet/mellanox/mlx4/eq.c
@@ -554,8 +554,9 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq)
554 break; 554 break;
555 555
556 case MLX4_EVENT_TYPE_SRQ_LIMIT: 556 case MLX4_EVENT_TYPE_SRQ_LIMIT:
557 mlx4_dbg(dev, "%s: MLX4_EVENT_TYPE_SRQ_LIMIT\n", 557 mlx4_dbg(dev, "%s: MLX4_EVENT_TYPE_SRQ_LIMIT. srq_no=0x%x, eq 0x%x\n",
558 __func__); 558 __func__, be32_to_cpu(eqe->event.srq.srqn),
559 eq->eqn);
559 case MLX4_EVENT_TYPE_SRQ_CATAS_ERROR: 560 case MLX4_EVENT_TYPE_SRQ_CATAS_ERROR:
560 if (mlx4_is_master(dev)) { 561 if (mlx4_is_master(dev)) {
561 /* forward only to slave owning the SRQ */ 562 /* forward only to slave owning the SRQ */
@@ -570,15 +571,19 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq)
570 eq->eqn, eq->cons_index, ret); 571 eq->eqn, eq->cons_index, ret);
571 break; 572 break;
572 } 573 }
573 mlx4_warn(dev, "%s: slave:%d, srq_no:0x%x, event: %02x(%02x)\n", 574 if (eqe->type ==
574 __func__, slave, 575 MLX4_EVENT_TYPE_SRQ_CATAS_ERROR)
575 be32_to_cpu(eqe->event.srq.srqn), 576 mlx4_warn(dev, "%s: slave:%d, srq_no:0x%x, event: %02x(%02x)\n",
576 eqe->type, eqe->subtype); 577 __func__, slave,
578 be32_to_cpu(eqe->event.srq.srqn),
579 eqe->type, eqe->subtype);
577 580
578 if (!ret && slave != dev->caps.function) { 581 if (!ret && slave != dev->caps.function) {
579 mlx4_warn(dev, "%s: sending event %02x(%02x) to slave:%d\n", 582 if (eqe->type ==
580 __func__, eqe->type, 583 MLX4_EVENT_TYPE_SRQ_CATAS_ERROR)
581 eqe->subtype, slave); 584 mlx4_warn(dev, "%s: sending event %02x(%02x) to slave:%d\n",
585 __func__, eqe->type,
586 eqe->subtype, slave);
582 mlx4_slave_event(dev, slave, eqe); 587 mlx4_slave_event(dev, slave, eqe);
583 break; 588 break;
584 } 589 }
diff --git a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
index 56185a0b827d..1822382212ee 100644
--- a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
+++ b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
@@ -2980,6 +2980,9 @@ int mlx4_RST2INIT_QP_wrapper(struct mlx4_dev *dev, int slave,
2980 put_res(dev, slave, srqn, RES_SRQ); 2980 put_res(dev, slave, srqn, RES_SRQ);
2981 qp->srq = srq; 2981 qp->srq = srq;
2982 } 2982 }
2983
2984 /* Save param3 for dynamic changes from VST back to VGT */
2985 qp->param3 = qpc->param3;
2983 put_res(dev, slave, rcqn, RES_CQ); 2986 put_res(dev, slave, rcqn, RES_CQ);
2984 put_res(dev, slave, mtt_base, RES_MTT); 2987 put_res(dev, slave, mtt_base, RES_MTT);
2985 res_end_move(dev, slave, RES_QP, qpn); 2988 res_end_move(dev, slave, RES_QP, qpn);
@@ -3772,7 +3775,6 @@ int mlx4_INIT2RTR_QP_wrapper(struct mlx4_dev *dev, int slave,
3772 int qpn = vhcr->in_modifier & 0x7fffff; 3775 int qpn = vhcr->in_modifier & 0x7fffff;
3773 struct res_qp *qp; 3776 struct res_qp *qp;
3774 u8 orig_sched_queue; 3777 u8 orig_sched_queue;
3775 __be32 orig_param3 = qpc->param3;
3776 u8 orig_vlan_control = qpc->pri_path.vlan_control; 3778 u8 orig_vlan_control = qpc->pri_path.vlan_control;
3777 u8 orig_fvl_rx = qpc->pri_path.fvl_rx; 3779 u8 orig_fvl_rx = qpc->pri_path.fvl_rx;
3778 u8 orig_pri_path_fl = qpc->pri_path.fl; 3780 u8 orig_pri_path_fl = qpc->pri_path.fl;
@@ -3814,7 +3816,6 @@ out:
3814 */ 3816 */
3815 if (!err) { 3817 if (!err) {
3816 qp->sched_queue = orig_sched_queue; 3818 qp->sched_queue = orig_sched_queue;
3817 qp->param3 = orig_param3;
3818 qp->vlan_control = orig_vlan_control; 3819 qp->vlan_control = orig_vlan_control;
3819 qp->fvl_rx = orig_fvl_rx; 3820 qp->fvl_rx = orig_fvl_rx;
3820 qp->pri_path_fl = orig_pri_path_fl; 3821 qp->pri_path_fl = orig_pri_path_fl;