aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJack Morgenstein <jackm@dev.mellanox.co.il>2011-12-12 23:13:36 -0500
committerDavid S. Miller <davem@davemloft.net>2011-12-13 13:56:06 -0500
commitd7233386b21775a8b099d7d5dcc36d1e4642b896 (patch)
tree774f01c6ad53590ff7dff2562aaf37ba2ed7e581
parentfe9a2603c530fbf1e5d798901cec8d5b79976533 (diff)
mlx4_core: cq modifications for SRIOV
CQs are resources which are allocated and tracked by the PF driver. In multifunction mode, the allocation and icm mapping is done in the resource tracker (later patch in this sequence). To accomplish this, we have "work" functions whose names start with "__", and "request" functions (same name, no __). If we are operating in multifunction mode, the request function actually results in comm-channel commands being sent (ALLOC_RES or FREE_RES). The PF-driver comm-channel handler will ultimately invoke the "work" (__) function and return the result. If we are not in multifunction mode, the "work" handler is invoked immediately. Signed-off-by: Jack Morgenstein <jackm@dev.mellanox.co.il> Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/cq.c116
1 files changed, 89 insertions, 27 deletions
diff --git a/drivers/net/ethernet/mellanox/mlx4/cq.c b/drivers/net/ethernet/mellanox/mlx4/cq.c
index ebd0eb234f14..dd9211f1d500 100644
--- a/drivers/net/ethernet/mellanox/mlx4/cq.c
+++ b/drivers/net/ethernet/mellanox/mlx4/cq.c
@@ -34,9 +34,9 @@
34 * SOFTWARE. 34 * SOFTWARE.
35 */ 35 */
36 36
37#include <linux/init.h>
37#include <linux/hardirq.h> 38#include <linux/hardirq.h>
38#include <linux/export.h> 39#include <linux/export.h>
39#include <linux/gfp.h>
40 40
41#include <linux/mlx4/cmd.h> 41#include <linux/mlx4/cmd.h>
42#include <linux/mlx4/cq.h> 42#include <linux/mlx4/cq.h>
@@ -81,7 +81,7 @@ void mlx4_cq_completion(struct mlx4_dev *dev, u32 cqn)
81 cq = radix_tree_lookup(&mlx4_priv(dev)->cq_table.tree, 81 cq = radix_tree_lookup(&mlx4_priv(dev)->cq_table.tree,
82 cqn & (dev->caps.num_cqs - 1)); 82 cqn & (dev->caps.num_cqs - 1));
83 if (!cq) { 83 if (!cq) {
84 mlx4_warn(dev, "Completion event for bogus CQ %08x\n", cqn); 84 mlx4_dbg(dev, "Completion event for bogus CQ %08x\n", cqn);
85 return; 85 return;
86 } 86 }
87 87
@@ -117,8 +117,9 @@ void mlx4_cq_event(struct mlx4_dev *dev, u32 cqn, int event_type)
117static int mlx4_SW2HW_CQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox, 117static int mlx4_SW2HW_CQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox,
118 int cq_num) 118 int cq_num)
119{ 119{
120 return mlx4_cmd(dev, mailbox->dma, cq_num, 0, MLX4_CMD_SW2HW_CQ, 120 return mlx4_cmd(dev, mailbox->dma | dev->caps.function, cq_num, 0,
121 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED); 121 MLX4_CMD_SW2HW_CQ, MLX4_CMD_TIME_CLASS_A,
122 MLX4_CMD_WRAPPED);
122} 123}
123 124
124static int mlx4_MODIFY_CQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox, 125static int mlx4_MODIFY_CQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox,
@@ -131,8 +132,8 @@ static int mlx4_MODIFY_CQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox
131static int mlx4_HW2SW_CQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox, 132static int mlx4_HW2SW_CQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox,
132 int cq_num) 133 int cq_num)
133{ 134{
134 return mlx4_cmd_box(dev, 0, mailbox ? mailbox->dma : 0, cq_num, 135 return mlx4_cmd_box(dev, dev->caps.function, mailbox ? mailbox->dma : 0,
135 mailbox ? 0 : 1, MLX4_CMD_HW2SW_CQ, 136 cq_num, mailbox ? 0 : 1, MLX4_CMD_HW2SW_CQ,
136 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED); 137 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
137} 138}
138 139
@@ -188,6 +189,78 @@ int mlx4_cq_resize(struct mlx4_dev *dev, struct mlx4_cq *cq,
188} 189}
189EXPORT_SYMBOL_GPL(mlx4_cq_resize); 190EXPORT_SYMBOL_GPL(mlx4_cq_resize);
190 191
192static int __mlx4_cq_alloc_icm(struct mlx4_dev *dev, int *cqn)
193{
194 struct mlx4_priv *priv = mlx4_priv(dev);
195 struct mlx4_cq_table *cq_table = &priv->cq_table;
196 int err;
197
198 *cqn = mlx4_bitmap_alloc(&cq_table->bitmap);
199 if (*cqn == -1)
200 return -ENOMEM;
201
202 err = mlx4_table_get(dev, &cq_table->table, *cqn);
203 if (err)
204 goto err_out;
205
206 err = mlx4_table_get(dev, &cq_table->cmpt_table, *cqn);
207 if (err)
208 goto err_put;
209 return 0;
210
211err_put:
212 mlx4_table_put(dev, &cq_table->table, *cqn);
213
214err_out:
215 mlx4_bitmap_free(&cq_table->bitmap, *cqn);
216 return err;
217}
218
219static int mlx4_cq_alloc_icm(struct mlx4_dev *dev, int *cqn)
220{
221 u64 out_param;
222 int err;
223
224 if (mlx4_is_mfunc(dev)) {
225 err = mlx4_cmd_imm(dev, 0, &out_param, RES_CQ,
226 RES_OP_RESERVE_AND_MAP, MLX4_CMD_ALLOC_RES,
227 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
228 if (err)
229 return err;
230 else {
231 *cqn = get_param_l(&out_param);
232 return 0;
233 }
234 }
235 return __mlx4_cq_alloc_icm(dev, cqn);
236}
237
238static void __mlx4_cq_free_icm(struct mlx4_dev *dev, int cqn)
239{
240 struct mlx4_priv *priv = mlx4_priv(dev);
241 struct mlx4_cq_table *cq_table = &priv->cq_table;
242
243 mlx4_table_put(dev, &cq_table->cmpt_table, cqn);
244 mlx4_table_put(dev, &cq_table->table, cqn);
245 mlx4_bitmap_free(&cq_table->bitmap, cqn);
246}
247
248static void mlx4_cq_free_icm(struct mlx4_dev *dev, int cqn)
249{
250 u64 in_param;
251 int err;
252
253 if (mlx4_is_mfunc(dev)) {
254 set_param_l(&in_param, cqn);
255 err = mlx4_cmd(dev, in_param, RES_CQ, RES_OP_RESERVE_AND_MAP,
256 MLX4_CMD_FREE_RES,
257 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
258 if (err)
259 mlx4_warn(dev, "Failed freeing cq:%d\n", cqn);
260 } else
261 __mlx4_cq_free_icm(dev, cqn);
262}
263
191int mlx4_cq_alloc(struct mlx4_dev *dev, int nent, struct mlx4_mtt *mtt, 264int mlx4_cq_alloc(struct mlx4_dev *dev, int nent, struct mlx4_mtt *mtt,
192 struct mlx4_uar *uar, u64 db_rec, struct mlx4_cq *cq, 265 struct mlx4_uar *uar, u64 db_rec, struct mlx4_cq *cq,
193 unsigned vector, int collapsed) 266 unsigned vector, int collapsed)
@@ -204,23 +277,15 @@ int mlx4_cq_alloc(struct mlx4_dev *dev, int nent, struct mlx4_mtt *mtt,
204 277
205 cq->vector = vector; 278 cq->vector = vector;
206 279
207 cq->cqn = mlx4_bitmap_alloc(&cq_table->bitmap); 280 err = mlx4_cq_alloc_icm(dev, &cq->cqn);
208 if (cq->cqn == -1)
209 return -ENOMEM;
210
211 err = mlx4_table_get(dev, &cq_table->table, cq->cqn);
212 if (err)
213 goto err_out;
214
215 err = mlx4_table_get(dev, &cq_table->cmpt_table, cq->cqn);
216 if (err) 281 if (err)
217 goto err_put; 282 return err;
218 283
219 spin_lock_irq(&cq_table->lock); 284 spin_lock_irq(&cq_table->lock);
220 err = radix_tree_insert(&cq_table->tree, cq->cqn, cq); 285 err = radix_tree_insert(&cq_table->tree, cq->cqn, cq);
221 spin_unlock_irq(&cq_table->lock); 286 spin_unlock_irq(&cq_table->lock);
222 if (err) 287 if (err)
223 goto err_cmpt_put; 288 goto err_icm;
224 289
225 mailbox = mlx4_alloc_cmd_mailbox(dev); 290 mailbox = mlx4_alloc_cmd_mailbox(dev);
226 if (IS_ERR(mailbox)) { 291 if (IS_ERR(mailbox)) {
@@ -259,14 +324,8 @@ err_radix:
259 radix_tree_delete(&cq_table->tree, cq->cqn); 324 radix_tree_delete(&cq_table->tree, cq->cqn);
260 spin_unlock_irq(&cq_table->lock); 325 spin_unlock_irq(&cq_table->lock);
261 326
262err_cmpt_put: 327err_icm:
263 mlx4_table_put(dev, &cq_table->cmpt_table, cq->cqn); 328 mlx4_cq_free_icm(dev, cq->cqn);
264
265err_put:
266 mlx4_table_put(dev, &cq_table->table, cq->cqn);
267
268err_out:
269 mlx4_bitmap_free(&cq_table->bitmap, cq->cqn);
270 329
271 return err; 330 return err;
272} 331}
@@ -292,8 +351,7 @@ void mlx4_cq_free(struct mlx4_dev *dev, struct mlx4_cq *cq)
292 complete(&cq->free); 351 complete(&cq->free);
293 wait_for_completion(&cq->free); 352 wait_for_completion(&cq->free);
294 353
295 mlx4_table_put(dev, &cq_table->table, cq->cqn); 354 mlx4_cq_free_icm(dev, cq->cqn);
296 mlx4_bitmap_free(&cq_table->bitmap, cq->cqn);
297} 355}
298EXPORT_SYMBOL_GPL(mlx4_cq_free); 356EXPORT_SYMBOL_GPL(mlx4_cq_free);
299 357
@@ -304,6 +362,8 @@ int mlx4_init_cq_table(struct mlx4_dev *dev)
304 362
305 spin_lock_init(&cq_table->lock); 363 spin_lock_init(&cq_table->lock);
306 INIT_RADIX_TREE(&cq_table->tree, GFP_ATOMIC); 364 INIT_RADIX_TREE(&cq_table->tree, GFP_ATOMIC);
365 if (mlx4_is_slave(dev))
366 return 0;
307 367
308 err = mlx4_bitmap_init(&cq_table->bitmap, dev->caps.num_cqs, 368 err = mlx4_bitmap_init(&cq_table->bitmap, dev->caps.num_cqs,
309 dev->caps.num_cqs - 1, dev->caps.reserved_cqs, 0); 369 dev->caps.num_cqs - 1, dev->caps.reserved_cqs, 0);
@@ -315,6 +375,8 @@ int mlx4_init_cq_table(struct mlx4_dev *dev)
315 375
316void mlx4_cleanup_cq_table(struct mlx4_dev *dev) 376void mlx4_cleanup_cq_table(struct mlx4_dev *dev)
317{ 377{
378 if (mlx4_is_slave(dev))
379 return;
318 /* Nothing to do to clean up radix_tree */ 380 /* Nothing to do to clean up radix_tree */
319 mlx4_bitmap_cleanup(&mlx4_priv(dev)->cq_table.bitmap); 381 mlx4_bitmap_cleanup(&mlx4_priv(dev)->cq_table.bitmap);
320} 382}