diff options
author | Saeed Mahameed <saeedm@mellanox.com> | 2018-02-01 07:28:17 -0500 |
---|---|---|
committer | Saeed Mahameed <saeedm@mellanox.com> | 2018-02-15 03:30:00 -0500 |
commit | d5c07157dd4f5ab9123eaab7db572ca360c19a55 (patch) | |
tree | 50946aadcd5e581e55784f1ab4e9bca424ac762e | |
parent | d2ff4fa575000058def5f5c602784e233211d4e7 (diff) |
net/mlx5: EQ add/del CQ API
Add API to add/del CQ to/from EQs CQ table to be used in cq.c upon CQ
creation/destruction, as CQ table is now private to eq.c.
Signed-off-by: Saeed Mahameed <saeedm@mellanox.com>
Reviewed-by: Gal Pressman <galp@mellanox.com>
-rw-r--r-- | drivers/net/ethernet/mellanox/mlx5/core/cq.c | 60 | ||||
-rw-r--r-- | drivers/net/ethernet/mellanox/mlx5/core/eq.c | 34 | ||||
-rw-r--r-- | drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h | 4 |
3 files changed, 53 insertions, 45 deletions
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cq.c b/drivers/net/ethernet/mellanox/mlx5/core/cq.c index 9feeb555e937..f6e478d05ecc 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/cq.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/cq.c | |||
@@ -137,22 +137,17 @@ void mlx5_cq_event(struct mlx5_eq *eq, u32 cqn, int event_type) | |||
137 | int mlx5_core_create_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq, | 137 | int mlx5_core_create_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq, |
138 | u32 *in, int inlen) | 138 | u32 *in, int inlen) |
139 | { | 139 | { |
140 | int eqn = MLX5_GET(cqc, MLX5_ADDR_OF(create_cq_in, in, cq_context), c_eqn); | ||
141 | u32 dout[MLX5_ST_SZ_DW(destroy_cq_out)]; | ||
140 | u32 out[MLX5_ST_SZ_DW(create_cq_out)]; | 142 | u32 out[MLX5_ST_SZ_DW(create_cq_out)]; |
141 | u32 din[MLX5_ST_SZ_DW(destroy_cq_in)]; | 143 | u32 din[MLX5_ST_SZ_DW(destroy_cq_in)]; |
142 | u32 dout[MLX5_ST_SZ_DW(destroy_cq_out)]; | 144 | struct mlx5_eq *eq; |
143 | int eqn = MLX5_GET(cqc, MLX5_ADDR_OF(create_cq_in, in, cq_context), | ||
144 | c_eqn); | ||
145 | struct mlx5_eq *eq, *async_eq; | ||
146 | struct mlx5_cq_table *table; | ||
147 | int err; | 145 | int err; |
148 | 146 | ||
149 | async_eq = &dev->priv.eq_table.async_eq; | ||
150 | eq = mlx5_eqn2eq(dev, eqn); | 147 | eq = mlx5_eqn2eq(dev, eqn); |
151 | if (IS_ERR(eq)) | 148 | if (IS_ERR(eq)) |
152 | return PTR_ERR(eq); | 149 | return PTR_ERR(eq); |
153 | 150 | ||
154 | table = &eq->cq_table; | ||
155 | |||
156 | memset(out, 0, sizeof(out)); | 151 | memset(out, 0, sizeof(out)); |
157 | MLX5_SET(create_cq_in, in, opcode, MLX5_CMD_OP_CREATE_CQ); | 152 | MLX5_SET(create_cq_in, in, opcode, MLX5_CMD_OP_CREATE_CQ); |
158 | err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out)); | 153 | err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out)); |
@@ -172,18 +167,14 @@ int mlx5_core_create_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq, | |||
172 | INIT_LIST_HEAD(&cq->tasklet_ctx.list); | 167 | INIT_LIST_HEAD(&cq->tasklet_ctx.list); |
173 | 168 | ||
174 | /* Add to comp EQ CQ tree to recv comp events */ | 169 | /* Add to comp EQ CQ tree to recv comp events */ |
175 | spin_lock_irq(&table->lock); | 170 | err = mlx5_eq_add_cq(eq, cq); |
176 | err = radix_tree_insert(&table->tree, cq->cqn, cq); | ||
177 | spin_unlock_irq(&table->lock); | ||
178 | if (err) | 171 | if (err) |
179 | goto err_cmd; | 172 | goto err_cmd; |
180 | 173 | ||
181 | /* Add to async EQ CQ tree to recv Async events */ | 174 | /* Add to async EQ CQ tree to recv async events */ |
182 | spin_lock_irq(&async_eq->cq_table.lock); | 175 | err = mlx5_eq_add_cq(&dev->priv.eq_table.async_eq, cq); |
183 | err = radix_tree_insert(&async_eq->cq_table.tree, cq->cqn, cq); | ||
184 | spin_unlock_irq(&async_eq->cq_table.lock); | ||
185 | if (err) | 176 | if (err) |
186 | goto err_cq_table; | 177 | goto err_cq_add; |
187 | 178 | ||
188 | cq->pid = current->pid; | 179 | cq->pid = current->pid; |
189 | err = mlx5_debug_cq_add(dev, cq); | 180 | err = mlx5_debug_cq_add(dev, cq); |
@@ -195,10 +186,8 @@ int mlx5_core_create_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq, | |||
195 | 186 | ||
196 | return 0; | 187 | return 0; |
197 | 188 | ||
198 | err_cq_table: | 189 | err_cq_add: |
199 | spin_lock_irq(&table->lock); | 190 | mlx5_eq_del_cq(eq, cq); |
200 | radix_tree_delete(&table->tree, cq->cqn); | ||
201 | spin_unlock_irq(&table->lock); | ||
202 | err_cmd: | 191 | err_cmd: |
203 | memset(din, 0, sizeof(din)); | 192 | memset(din, 0, sizeof(din)); |
204 | memset(dout, 0, sizeof(dout)); | 193 | memset(dout, 0, sizeof(dout)); |
@@ -211,36 +200,17 @@ EXPORT_SYMBOL(mlx5_core_create_cq); | |||
211 | 200 | ||
212 | int mlx5_core_destroy_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq) | 201 | int mlx5_core_destroy_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq) |
213 | { | 202 | { |
214 | struct mlx5_cq_table *asyn_eq_cq_table = &dev->priv.eq_table.async_eq.cq_table; | ||
215 | struct mlx5_cq_table *table = &cq->eq->cq_table; | ||
216 | u32 out[MLX5_ST_SZ_DW(destroy_cq_out)] = {0}; | 203 | u32 out[MLX5_ST_SZ_DW(destroy_cq_out)] = {0}; |
217 | u32 in[MLX5_ST_SZ_DW(destroy_cq_in)] = {0}; | 204 | u32 in[MLX5_ST_SZ_DW(destroy_cq_in)] = {0}; |
218 | struct mlx5_core_cq *tmp; | ||
219 | int err; | 205 | int err; |
220 | 206 | ||
221 | spin_lock_irq(&asyn_eq_cq_table->lock); | 207 | err = mlx5_eq_del_cq(&dev->priv.eq_table.async_eq, cq); |
222 | tmp = radix_tree_delete(&asyn_eq_cq_table->tree, cq->cqn); | 208 | if (err) |
223 | spin_unlock_irq(&asyn_eq_cq_table->lock); | 209 | return err; |
224 | if (!tmp) { | ||
225 | mlx5_core_warn(dev, "cq 0x%x not found in async eq cq tree\n", cq->cqn); | ||
226 | return -EINVAL; | ||
227 | } | ||
228 | if (tmp != cq) { | ||
229 | mlx5_core_warn(dev, "corruption on cqn 0x%x in async eq cq tree\n", cq->cqn); | ||
230 | return -EINVAL; | ||
231 | } | ||
232 | 210 | ||
233 | spin_lock_irq(&table->lock); | 211 | err = mlx5_eq_del_cq(cq->eq, cq); |
234 | tmp = radix_tree_delete(&table->tree, cq->cqn); | 212 | if (err) |
235 | spin_unlock_irq(&table->lock); | 213 | return err; |
236 | if (!tmp) { | ||
237 | mlx5_core_warn(dev, "cq 0x%x not found in comp eq cq tree\n", cq->cqn); | ||
238 | return -EINVAL; | ||
239 | } | ||
240 | if (tmp != cq) { | ||
241 | mlx5_core_warn(dev, "corruption on cqn 0x%x in comp eq cq tree\n", cq->cqn); | ||
242 | return -EINVAL; | ||
243 | } | ||
244 | 214 | ||
245 | MLX5_SET(destroy_cq_in, in, opcode, MLX5_CMD_OP_DESTROY_CQ); | 215 | MLX5_SET(destroy_cq_in, in, opcode, MLX5_CMD_OP_DESTROY_CQ); |
246 | MLX5_SET(destroy_cq_in, in, cqn, cq->cqn); | 216 | MLX5_SET(destroy_cq_in, in, cqn, cq->cqn); |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eq.c b/drivers/net/ethernet/mellanox/mlx5/core/eq.c index 328403ebf2f5..c1f0468e95bd 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eq.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eq.c | |||
@@ -704,6 +704,40 @@ int mlx5_destroy_unmap_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq) | |||
704 | } | 704 | } |
705 | EXPORT_SYMBOL_GPL(mlx5_destroy_unmap_eq); | 705 | EXPORT_SYMBOL_GPL(mlx5_destroy_unmap_eq); |
706 | 706 | ||
707 | int mlx5_eq_add_cq(struct mlx5_eq *eq, struct mlx5_core_cq *cq) | ||
708 | { | ||
709 | struct mlx5_cq_table *table = &eq->cq_table; | ||
710 | int err; | ||
711 | |||
712 | spin_lock_irq(&table->lock); | ||
713 | err = radix_tree_insert(&table->tree, cq->cqn, cq); | ||
714 | spin_unlock_irq(&table->lock); | ||
715 | |||
716 | return err; | ||
717 | } | ||
718 | |||
719 | int mlx5_eq_del_cq(struct mlx5_eq *eq, struct mlx5_core_cq *cq) | ||
720 | { | ||
721 | struct mlx5_cq_table *table = &eq->cq_table; | ||
722 | struct mlx5_core_cq *tmp; | ||
723 | |||
724 | spin_lock_irq(&table->lock); | ||
725 | tmp = radix_tree_delete(&table->tree, cq->cqn); | ||
726 | spin_unlock_irq(&table->lock); | ||
727 | |||
728 | if (!tmp) { | ||
729 | mlx5_core_warn(eq->dev, "cq 0x%x not found in eq 0x%x tree\n", eq->eqn, cq->cqn); | ||
730 | return -ENOENT; | ||
731 | } | ||
732 | |||
733 | if (tmp != cq) { | ||
734 | mlx5_core_warn(eq->dev, "corruption on cqn 0x%x in eq 0x%x\n", eq->eqn, cq->cqn); | ||
735 | return -EINVAL; | ||
736 | } | ||
737 | |||
738 | return 0; | ||
739 | } | ||
740 | |||
707 | int mlx5_eq_init(struct mlx5_core_dev *dev) | 741 | int mlx5_eq_init(struct mlx5_core_dev *dev) |
708 | { | 742 | { |
709 | int err; | 743 | int err; |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h index 394552f36fcf..54a1cbfb1b5a 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h | |||
@@ -38,6 +38,7 @@ | |||
38 | #include <linux/sched.h> | 38 | #include <linux/sched.h> |
39 | #include <linux/if_link.h> | 39 | #include <linux/if_link.h> |
40 | #include <linux/firmware.h> | 40 | #include <linux/firmware.h> |
41 | #include <linux/mlx5/cq.h> | ||
41 | 42 | ||
42 | #define DRIVER_NAME "mlx5_core" | 43 | #define DRIVER_NAME "mlx5_core" |
43 | #define DRIVER_VERSION "5.0-0" | 44 | #define DRIVER_VERSION "5.0-0" |
@@ -115,6 +116,9 @@ int mlx5_destroy_scheduling_element_cmd(struct mlx5_core_dev *dev, u8 hierarchy, | |||
115 | u32 element_id); | 116 | u32 element_id); |
116 | int mlx5_wait_for_vf_pages(struct mlx5_core_dev *dev); | 117 | int mlx5_wait_for_vf_pages(struct mlx5_core_dev *dev); |
117 | u64 mlx5_read_internal_timer(struct mlx5_core_dev *dev); | 118 | u64 mlx5_read_internal_timer(struct mlx5_core_dev *dev); |
119 | |||
120 | int mlx5_eq_add_cq(struct mlx5_eq *eq, struct mlx5_core_cq *cq); | ||
121 | int mlx5_eq_del_cq(struct mlx5_eq *eq, struct mlx5_core_cq *cq); | ||
118 | struct mlx5_eq *mlx5_eqn2eq(struct mlx5_core_dev *dev, int eqn); | 122 | struct mlx5_eq *mlx5_eqn2eq(struct mlx5_core_dev *dev, int eqn); |
119 | u32 mlx5_eq_poll_irq_disabled(struct mlx5_eq *eq); | 123 | u32 mlx5_eq_poll_irq_disabled(struct mlx5_eq *eq); |
120 | void mlx5_cq_tasklet_cb(unsigned long data); | 124 | void mlx5_cq_tasklet_cb(unsigned long data); |