aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorSaeed Mahameed <saeedm@mellanox.com>2018-02-01 07:37:07 -0500
committerSaeed Mahameed <saeedm@mellanox.com>2018-02-15 03:30:02 -0500
commit3ac7afdbcf243d6c79c1569d9e29aef0096e4743 (patch)
tree2c06e76ba2670c5996c848fa81cc25582301cf3d
parentf105b45bf77ced96e516e1cd771c41bb7e8c830b (diff)
net/mlx5: Move CQ completion and event forwarding logic to eq.c
Since CQ tree is now per EQ, CQ completion and event forwarding became specific implementation of EQ logic, this patch moves that logic to eq.c and makes those functions static. Signed-off-by: Saeed Mahameed <saeedm@mellanox.com> Reviewed-by: Gal Pressman <galp@mellanox.com>
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/cq.c45
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eq.c49
-rw-r--r--include/linux/mlx5/driver.h2
3 files changed, 47 insertions, 49 deletions
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cq.c b/drivers/net/ethernet/mellanox/mlx5/core/cq.c
index 06dc7bd302ed..669ed16938b3 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/cq.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/cq.c
@@ -85,51 +85,6 @@ static void mlx5_add_cq_to_tasklet(struct mlx5_core_cq *cq)
85 spin_unlock_irqrestore(&tasklet_ctx->lock, flags); 85 spin_unlock_irqrestore(&tasklet_ctx->lock, flags);
86} 86}
87 87
88/* caller must eventually call mlx5_cq_put on the returned cq */
89static struct mlx5_core_cq *mlx5_eq_cq_get(struct mlx5_eq *eq, u32 cqn)
90{
91 struct mlx5_cq_table *table = &eq->cq_table;
92 struct mlx5_core_cq *cq = NULL;
93
94 spin_lock(&table->lock);
95 cq = radix_tree_lookup(&table->tree, cqn);
96 if (likely(cq))
97 mlx5_cq_hold(cq);
98 spin_unlock(&table->lock);
99
100 return cq;
101}
102
103void mlx5_cq_completion(struct mlx5_eq *eq, u32 cqn)
104{
105 struct mlx5_core_cq *cq = mlx5_eq_cq_get(eq, cqn);
106
107 if (unlikely(!cq)) {
108 mlx5_core_warn(eq->dev, "Completion event for bogus CQ 0x%x\n", cqn);
109 return;
110 }
111
112 ++cq->arm_sn;
113
114 cq->comp(cq);
115
116 mlx5_cq_put(cq);
117}
118
119void mlx5_cq_event(struct mlx5_eq *eq, u32 cqn, int event_type)
120{
121 struct mlx5_core_cq *cq = mlx5_eq_cq_get(eq, cqn);
122
123 if (unlikely(!cq)) {
124 mlx5_core_warn(eq->dev, "Async event for bogus CQ 0x%x\n", cqn);
125 return;
126 }
127
128 cq->event(cq, event_type);
129
130 mlx5_cq_put(cq);
131}
132
133int mlx5_core_create_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq, 88int mlx5_core_create_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq,
134 u32 *in, int inlen) 89 u32 *in, int inlen)
135{ 90{
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eq.c b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
index c1f0468e95bd..7e442b38a8ca 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eq.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
@@ -393,6 +393,51 @@ static void general_event_handler(struct mlx5_core_dev *dev,
393 } 393 }
394} 394}
395 395
396/* caller must eventually call mlx5_cq_put on the returned cq */
397static struct mlx5_core_cq *mlx5_eq_cq_get(struct mlx5_eq *eq, u32 cqn)
398{
399 struct mlx5_cq_table *table = &eq->cq_table;
400 struct mlx5_core_cq *cq = NULL;
401
402 spin_lock(&table->lock);
403 cq = radix_tree_lookup(&table->tree, cqn);
404 if (likely(cq))
405 mlx5_cq_hold(cq);
406 spin_unlock(&table->lock);
407
408 return cq;
409}
410
411static void mlx5_eq_cq_completion(struct mlx5_eq *eq, u32 cqn)
412{
413 struct mlx5_core_cq *cq = mlx5_eq_cq_get(eq, cqn);
414
415 if (unlikely(!cq)) {
416 mlx5_core_warn(eq->dev, "Completion event for bogus CQ 0x%x\n", cqn);
417 return;
418 }
419
420 ++cq->arm_sn;
421
422 cq->comp(cq);
423
424 mlx5_cq_put(cq);
425}
426
427static void mlx5_eq_cq_event(struct mlx5_eq *eq, u32 cqn, int event_type)
428{
429 struct mlx5_core_cq *cq = mlx5_eq_cq_get(eq, cqn);
430
431 if (unlikely(!cq)) {
432 mlx5_core_warn(eq->dev, "Async event for bogus CQ 0x%x\n", cqn);
433 return;
434 }
435
436 cq->event(cq, event_type);
437
438 mlx5_cq_put(cq);
439}
440
396static irqreturn_t mlx5_eq_int(int irq, void *eq_ptr) 441static irqreturn_t mlx5_eq_int(int irq, void *eq_ptr)
397{ 442{
398 struct mlx5_eq *eq = eq_ptr; 443 struct mlx5_eq *eq = eq_ptr;
@@ -415,7 +460,7 @@ static irqreturn_t mlx5_eq_int(int irq, void *eq_ptr)
415 switch (eqe->type) { 460 switch (eqe->type) {
416 case MLX5_EVENT_TYPE_COMP: 461 case MLX5_EVENT_TYPE_COMP:
417 cqn = be32_to_cpu(eqe->data.comp.cqn) & 0xffffff; 462 cqn = be32_to_cpu(eqe->data.comp.cqn) & 0xffffff;
418 mlx5_cq_completion(eq, cqn); 463 mlx5_eq_cq_completion(eq, cqn);
419 break; 464 break;
420 case MLX5_EVENT_TYPE_DCT_DRAINED: 465 case MLX5_EVENT_TYPE_DCT_DRAINED:
421 rsn = be32_to_cpu(eqe->data.dct.dctn) & 0xffffff; 466 rsn = be32_to_cpu(eqe->data.dct.dctn) & 0xffffff;
@@ -472,7 +517,7 @@ static irqreturn_t mlx5_eq_int(int irq, void *eq_ptr)
472 cqn = be32_to_cpu(eqe->data.cq_err.cqn) & 0xffffff; 517 cqn = be32_to_cpu(eqe->data.cq_err.cqn) & 0xffffff;
473 mlx5_core_warn(dev, "CQ error on CQN 0x%x, syndrome 0x%x\n", 518 mlx5_core_warn(dev, "CQ error on CQN 0x%x, syndrome 0x%x\n",
474 cqn, eqe->data.cq_err.syndrome); 519 cqn, eqe->data.cq_err.syndrome);
475 mlx5_cq_event(eq, cqn, eqe->type); 520 mlx5_eq_cq_event(eq, cqn, eqe->type);
476 break; 521 break;
477 522
478 case MLX5_EVENT_TYPE_PAGE_REQUEST: 523 case MLX5_EVENT_TYPE_PAGE_REQUEST:
diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h
index 96e003db2bcd..09e2f3e8753c 100644
--- a/include/linux/mlx5/driver.h
+++ b/include/linux/mlx5/driver.h
@@ -1049,12 +1049,10 @@ int mlx5_eq_init(struct mlx5_core_dev *dev);
1049void mlx5_eq_cleanup(struct mlx5_core_dev *dev); 1049void mlx5_eq_cleanup(struct mlx5_core_dev *dev);
1050void mlx5_fill_page_array(struct mlx5_buf *buf, __be64 *pas); 1050void mlx5_fill_page_array(struct mlx5_buf *buf, __be64 *pas);
1051void mlx5_fill_page_frag_array(struct mlx5_frag_buf *frag_buf, __be64 *pas); 1051void mlx5_fill_page_frag_array(struct mlx5_frag_buf *frag_buf, __be64 *pas);
1052void mlx5_cq_completion(struct mlx5_eq *eq, u32 cqn);
1053void mlx5_rsc_event(struct mlx5_core_dev *dev, u32 rsn, int event_type); 1052void mlx5_rsc_event(struct mlx5_core_dev *dev, u32 rsn, int event_type);
1054void mlx5_srq_event(struct mlx5_core_dev *dev, u32 srqn, int event_type); 1053void mlx5_srq_event(struct mlx5_core_dev *dev, u32 srqn, int event_type);
1055struct mlx5_core_srq *mlx5_core_get_srq(struct mlx5_core_dev *dev, u32 srqn); 1054struct mlx5_core_srq *mlx5_core_get_srq(struct mlx5_core_dev *dev, u32 srqn);
1056void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, u64 vec, bool forced); 1055void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, u64 vec, bool forced);
1057void mlx5_cq_event(struct mlx5_eq *eq, u32 cqn, int event_type);
1058int mlx5_create_map_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq, u8 vecidx, 1056int mlx5_create_map_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq, u8 vecidx,
1059 int nent, u64 mask, const char *name, 1057 int nent, u64 mask, const char *name,
1060 enum mlx5_eq_type type); 1058 enum mlx5_eq_type type);