aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/qp.c235
1 files changed, 181 insertions, 54 deletions
diff --git a/drivers/net/ethernet/mellanox/mlx4/qp.c b/drivers/net/ethernet/mellanox/mlx4/qp.c
index e721f4cd34f8..d0489740563e 100644
--- a/drivers/net/ethernet/mellanox/mlx4/qp.c
+++ b/drivers/net/ethernet/mellanox/mlx4/qp.c
@@ -35,6 +35,8 @@
35 35
36#include <linux/gfp.h> 36#include <linux/gfp.h>
37#include <linux/export.h> 37#include <linux/export.h>
38#include <linux/init.h>
39
38#include <linux/mlx4/cmd.h> 40#include <linux/mlx4/cmd.h>
39#include <linux/mlx4/qp.h> 41#include <linux/mlx4/qp.h>
40 42
@@ -55,7 +57,7 @@ void mlx4_qp_event(struct mlx4_dev *dev, u32 qpn, int event_type)
55 spin_unlock(&qp_table->lock); 57 spin_unlock(&qp_table->lock);
56 58
57 if (!qp) { 59 if (!qp) {
58 mlx4_warn(dev, "Async event for bogus QP %08x\n", qpn); 60 mlx4_dbg(dev, "Async event for none existent QP %08x\n", qpn);
59 return; 61 return;
60 } 62 }
61 63
@@ -65,10 +67,17 @@ void mlx4_qp_event(struct mlx4_dev *dev, u32 qpn, int event_type)
65 complete(&qp->free); 67 complete(&qp->free);
66} 68}
67 69
68int mlx4_qp_modify(struct mlx4_dev *dev, struct mlx4_mtt *mtt, 70static int is_qp0(struct mlx4_dev *dev, struct mlx4_qp *qp)
69 enum mlx4_qp_state cur_state, enum mlx4_qp_state new_state, 71{
70 struct mlx4_qp_context *context, enum mlx4_qp_optpar optpar, 72 return qp->qpn >= dev->caps.sqp_start &&
71 int sqd_event, struct mlx4_qp *qp) 73 qp->qpn <= dev->caps.sqp_start + 1;
74}
75
76static int __mlx4_qp_modify(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
77 enum mlx4_qp_state cur_state, enum mlx4_qp_state new_state,
78 struct mlx4_qp_context *context,
79 enum mlx4_qp_optpar optpar,
80 int sqd_event, struct mlx4_qp *qp, int native)
72{ 81{
73 static const u16 op[MLX4_QP_NUM_STATE][MLX4_QP_NUM_STATE] = { 82 static const u16 op[MLX4_QP_NUM_STATE][MLX4_QP_NUM_STATE] = {
74 [MLX4_QP_STATE_RST] = { 83 [MLX4_QP_STATE_RST] = {
@@ -110,17 +119,26 @@ int mlx4_qp_modify(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
110 } 119 }
111 }; 120 };
112 121
122 struct mlx4_priv *priv = mlx4_priv(dev);
113 struct mlx4_cmd_mailbox *mailbox; 123 struct mlx4_cmd_mailbox *mailbox;
114 int ret = 0; 124 int ret = 0;
125 u8 port;
115 126
116 if (cur_state >= MLX4_QP_NUM_STATE || new_state >= MLX4_QP_NUM_STATE || 127 if (cur_state >= MLX4_QP_NUM_STATE || new_state >= MLX4_QP_NUM_STATE ||
117 !op[cur_state][new_state]) 128 !op[cur_state][new_state])
118 return -EINVAL; 129 return -EINVAL;
119 130
120 if (op[cur_state][new_state] == MLX4_CMD_2RST_QP) 131 if (op[cur_state][new_state] == MLX4_CMD_2RST_QP) {
121 return mlx4_cmd(dev, 0, qp->qpn, 2, 132 ret = mlx4_cmd(dev, 0, qp->qpn, 2,
122 MLX4_CMD_2RST_QP, MLX4_CMD_TIME_CLASS_A, 133 MLX4_CMD_2RST_QP, MLX4_CMD_TIME_CLASS_A, native);
123 MLX4_CMD_WRAPPED); 134 if (mlx4_is_master(dev) && cur_state != MLX4_QP_STATE_ERR &&
135 cur_state != MLX4_QP_STATE_RST &&
136 is_qp0(dev, qp)) {
137 port = (qp->qpn & 1) + 1;
138 priv->mfunc.master.qp0_state[port].qp0_active = 0;
139 }
140 return ret;
141 }
124 142
125 mailbox = mlx4_alloc_cmd_mailbox(dev); 143 mailbox = mlx4_alloc_cmd_mailbox(dev);
126 if (IS_ERR(mailbox)) 144 if (IS_ERR(mailbox))
@@ -133,108 +151,218 @@ int mlx4_qp_modify(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
133 context->log_page_size = mtt->page_shift - MLX4_ICM_PAGE_SHIFT; 151 context->log_page_size = mtt->page_shift - MLX4_ICM_PAGE_SHIFT;
134 } 152 }
135 153
154 port = ((context->pri_path.sched_queue >> 6) & 1) + 1;
155 if (dev->caps.port_type[port] == MLX4_PORT_TYPE_ETH)
156 context->pri_path.sched_queue = (context->pri_path.sched_queue &
157 0xc3);
158
136 *(__be32 *) mailbox->buf = cpu_to_be32(optpar); 159 *(__be32 *) mailbox->buf = cpu_to_be32(optpar);
137 memcpy(mailbox->buf + 8, context, sizeof *context); 160 memcpy(mailbox->buf + 8, context, sizeof *context);
138 161
139 ((struct mlx4_qp_context *) (mailbox->buf + 8))->local_qpn = 162 ((struct mlx4_qp_context *) (mailbox->buf + 8))->local_qpn =
140 cpu_to_be32(qp->qpn); 163 cpu_to_be32(qp->qpn);
141 164
142 ret = mlx4_cmd(dev, mailbox->dma, qp->qpn | (!!sqd_event << 31), 165 ret = mlx4_cmd(dev, mailbox->dma | dev->caps.function,
166 qp->qpn | (!!sqd_event << 31),
143 new_state == MLX4_QP_STATE_RST ? 2 : 0, 167 new_state == MLX4_QP_STATE_RST ? 2 : 0,
144 op[cur_state][new_state], MLX4_CMD_TIME_CLASS_C, 168 op[cur_state][new_state], MLX4_CMD_TIME_CLASS_C, native);
145 MLX4_CMD_WRAPPED);
146 169
147 mlx4_free_cmd_mailbox(dev, mailbox); 170 mlx4_free_cmd_mailbox(dev, mailbox);
148 return ret; 171 return ret;
149} 172}
173
174int mlx4_qp_modify(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
175 enum mlx4_qp_state cur_state, enum mlx4_qp_state new_state,
176 struct mlx4_qp_context *context,
177 enum mlx4_qp_optpar optpar,
178 int sqd_event, struct mlx4_qp *qp)
179{
180 return __mlx4_qp_modify(dev, mtt, cur_state, new_state, context,
181 optpar, sqd_event, qp, 0);
182}
150EXPORT_SYMBOL_GPL(mlx4_qp_modify); 183EXPORT_SYMBOL_GPL(mlx4_qp_modify);
151 184
152int mlx4_qp_reserve_range(struct mlx4_dev *dev, int cnt, int align, int *base) 185static int __mlx4_qp_reserve_range(struct mlx4_dev *dev, int cnt, int align,
186 int *base)
153{ 187{
154 struct mlx4_priv *priv = mlx4_priv(dev); 188 struct mlx4_priv *priv = mlx4_priv(dev);
155 struct mlx4_qp_table *qp_table = &priv->qp_table; 189 struct mlx4_qp_table *qp_table = &priv->qp_table;
156 int qpn;
157 190
158 qpn = mlx4_bitmap_alloc_range(&qp_table->bitmap, cnt, align); 191 *base = mlx4_bitmap_alloc_range(&qp_table->bitmap, cnt, align);
159 if (qpn == -1) 192 if (*base == -1)
160 return -ENOMEM; 193 return -ENOMEM;
161 194
162 *base = qpn;
163 return 0; 195 return 0;
164} 196}
197
198int mlx4_qp_reserve_range(struct mlx4_dev *dev, int cnt, int align, int *base)
199{
200 u64 in_param;
201 u64 out_param;
202 int err;
203
204 if (mlx4_is_mfunc(dev)) {
205 set_param_l(&in_param, cnt);
206 set_param_h(&in_param, align);
207 err = mlx4_cmd_imm(dev, in_param, &out_param,
208 RES_QP, RES_OP_RESERVE,
209 MLX4_CMD_ALLOC_RES,
210 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
211 if (err)
212 return err;
213
214 *base = get_param_l(&out_param);
215 return 0;
216 }
217 return __mlx4_qp_reserve_range(dev, cnt, align, base);
218}
165EXPORT_SYMBOL_GPL(mlx4_qp_reserve_range); 219EXPORT_SYMBOL_GPL(mlx4_qp_reserve_range);
166 220
167void mlx4_qp_release_range(struct mlx4_dev *dev, int base_qpn, int cnt) 221static void __mlx4_qp_release_range(struct mlx4_dev *dev, int base_qpn, int cnt)
168{ 222{
169 struct mlx4_priv *priv = mlx4_priv(dev); 223 struct mlx4_priv *priv = mlx4_priv(dev);
170 struct mlx4_qp_table *qp_table = &priv->qp_table; 224 struct mlx4_qp_table *qp_table = &priv->qp_table;
171 if (base_qpn < dev->caps.sqp_start + 8)
172 return;
173 225
226 if (mlx4_is_qp_reserved(dev, (u32) base_qpn))
227 return;
174 mlx4_bitmap_free_range(&qp_table->bitmap, base_qpn, cnt); 228 mlx4_bitmap_free_range(&qp_table->bitmap, base_qpn, cnt);
175} 229}
230
231void mlx4_qp_release_range(struct mlx4_dev *dev, int base_qpn, int cnt)
232{
233 u64 in_param;
234 int err;
235
236 if (mlx4_is_mfunc(dev)) {
237 set_param_l(&in_param, base_qpn);
238 set_param_h(&in_param, cnt);
239 err = mlx4_cmd(dev, in_param, RES_QP, RES_OP_RESERVE,
240 MLX4_CMD_FREE_RES,
241 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
242 if (err) {
243 mlx4_warn(dev, "Failed to release qp range"
244 " base:%d cnt:%d\n", base_qpn, cnt);
245 }
246 } else
247 __mlx4_qp_release_range(dev, base_qpn, cnt);
248}
176EXPORT_SYMBOL_GPL(mlx4_qp_release_range); 249EXPORT_SYMBOL_GPL(mlx4_qp_release_range);
177 250
178int mlx4_qp_alloc(struct mlx4_dev *dev, int qpn, struct mlx4_qp *qp) 251static int __mlx4_qp_alloc_icm(struct mlx4_dev *dev, int qpn)
179{ 252{
180 struct mlx4_priv *priv = mlx4_priv(dev); 253 struct mlx4_priv *priv = mlx4_priv(dev);
181 struct mlx4_qp_table *qp_table = &priv->qp_table; 254 struct mlx4_qp_table *qp_table = &priv->qp_table;
182 int err; 255 int err;
183 256
184 if (!qpn) 257 err = mlx4_table_get(dev, &qp_table->qp_table, qpn);
185 return -EINVAL;
186
187 qp->qpn = qpn;
188
189 err = mlx4_table_get(dev, &qp_table->qp_table, qp->qpn);
190 if (err) 258 if (err)
191 goto err_out; 259 goto err_out;
192 260
193 err = mlx4_table_get(dev, &qp_table->auxc_table, qp->qpn); 261 err = mlx4_table_get(dev, &qp_table->auxc_table, qpn);
194 if (err) 262 if (err)
195 goto err_put_qp; 263 goto err_put_qp;
196 264
197 err = mlx4_table_get(dev, &qp_table->altc_table, qp->qpn); 265 err = mlx4_table_get(dev, &qp_table->altc_table, qpn);
198 if (err) 266 if (err)
199 goto err_put_auxc; 267 goto err_put_auxc;
200 268
201 err = mlx4_table_get(dev, &qp_table->rdmarc_table, qp->qpn); 269 err = mlx4_table_get(dev, &qp_table->rdmarc_table, qpn);
202 if (err) 270 if (err)
203 goto err_put_altc; 271 goto err_put_altc;
204 272
205 err = mlx4_table_get(dev, &qp_table->cmpt_table, qp->qpn); 273 err = mlx4_table_get(dev, &qp_table->cmpt_table, qpn);
206 if (err) 274 if (err)
207 goto err_put_rdmarc; 275 goto err_put_rdmarc;
208 276
209 spin_lock_irq(&qp_table->lock);
210 err = radix_tree_insert(&dev->qp_table_tree, qp->qpn & (dev->caps.num_qps - 1), qp);
211 spin_unlock_irq(&qp_table->lock);
212 if (err)
213 goto err_put_cmpt;
214
215 atomic_set(&qp->refcount, 1);
216 init_completion(&qp->free);
217
218 return 0; 277 return 0;
219 278
220err_put_cmpt:
221 mlx4_table_put(dev, &qp_table->cmpt_table, qp->qpn);
222
223err_put_rdmarc: 279err_put_rdmarc:
224 mlx4_table_put(dev, &qp_table->rdmarc_table, qp->qpn); 280 mlx4_table_put(dev, &qp_table->rdmarc_table, qpn);
225 281
226err_put_altc: 282err_put_altc:
227 mlx4_table_put(dev, &qp_table->altc_table, qp->qpn); 283 mlx4_table_put(dev, &qp_table->altc_table, qpn);
228 284
229err_put_auxc: 285err_put_auxc:
230 mlx4_table_put(dev, &qp_table->auxc_table, qp->qpn); 286 mlx4_table_put(dev, &qp_table->auxc_table, qpn);
231 287
232err_put_qp: 288err_put_qp:
233 mlx4_table_put(dev, &qp_table->qp_table, qp->qpn); 289 mlx4_table_put(dev, &qp_table->qp_table, qpn);
234 290
235err_out: 291err_out:
236 return err; 292 return err;
237} 293}
294
295static int mlx4_qp_alloc_icm(struct mlx4_dev *dev, int qpn)
296{
297 u64 param;
298
299 if (mlx4_is_mfunc(dev)) {
300 set_param_l(&param, qpn);
301 return mlx4_cmd_imm(dev, param, &param, RES_QP, RES_OP_MAP_ICM,
302 MLX4_CMD_ALLOC_RES, MLX4_CMD_TIME_CLASS_A,
303 MLX4_CMD_WRAPPED);
304 }
305 return __mlx4_qp_alloc_icm(dev, qpn);
306}
307
308static void __mlx4_qp_free_icm(struct mlx4_dev *dev, int qpn)
309{
310 struct mlx4_priv *priv = mlx4_priv(dev);
311 struct mlx4_qp_table *qp_table = &priv->qp_table;
312
313 mlx4_table_put(dev, &qp_table->cmpt_table, qpn);
314 mlx4_table_put(dev, &qp_table->rdmarc_table, qpn);
315 mlx4_table_put(dev, &qp_table->altc_table, qpn);
316 mlx4_table_put(dev, &qp_table->auxc_table, qpn);
317 mlx4_table_put(dev, &qp_table->qp_table, qpn);
318}
319
320static void mlx4_qp_free_icm(struct mlx4_dev *dev, int qpn)
321{
322 u64 in_param;
323
324 if (mlx4_is_mfunc(dev)) {
325 set_param_l(&in_param, qpn);
326 if (mlx4_cmd(dev, in_param, RES_QP, RES_OP_MAP_ICM,
327 MLX4_CMD_FREE_RES, MLX4_CMD_TIME_CLASS_A,
328 MLX4_CMD_WRAPPED))
329 mlx4_warn(dev, "Failed to free icm of qp:%d\n", qpn);
330 } else
331 __mlx4_qp_free_icm(dev, qpn);
332}
333
334int mlx4_qp_alloc(struct mlx4_dev *dev, int qpn, struct mlx4_qp *qp)
335{
336 struct mlx4_priv *priv = mlx4_priv(dev);
337 struct mlx4_qp_table *qp_table = &priv->qp_table;
338 int err;
339
340 if (!qpn)
341 return -EINVAL;
342
343 qp->qpn = qpn;
344
345 err = mlx4_qp_alloc_icm(dev, qpn);
346 if (err)
347 return err;
348
349 spin_lock_irq(&qp_table->lock);
350 err = radix_tree_insert(&dev->qp_table_tree, qp->qpn &
351 (dev->caps.num_qps - 1), qp);
352 spin_unlock_irq(&qp_table->lock);
353 if (err)
354 goto err_icm;
355
356 atomic_set(&qp->refcount, 1);
357 init_completion(&qp->free);
358
359 return 0;
360
361err_icm:
362 mlx4_qp_free_icm(dev, qpn);
363 return err;
364}
365
238EXPORT_SYMBOL_GPL(mlx4_qp_alloc); 366EXPORT_SYMBOL_GPL(mlx4_qp_alloc);
239 367
240void mlx4_qp_remove(struct mlx4_dev *dev, struct mlx4_qp *qp) 368void mlx4_qp_remove(struct mlx4_dev *dev, struct mlx4_qp *qp)
@@ -250,17 +378,11 @@ EXPORT_SYMBOL_GPL(mlx4_qp_remove);
250 378
251void mlx4_qp_free(struct mlx4_dev *dev, struct mlx4_qp *qp) 379void mlx4_qp_free(struct mlx4_dev *dev, struct mlx4_qp *qp)
252{ 380{
253 struct mlx4_qp_table *qp_table = &mlx4_priv(dev)->qp_table;
254
255 if (atomic_dec_and_test(&qp->refcount)) 381 if (atomic_dec_and_test(&qp->refcount))
256 complete(&qp->free); 382 complete(&qp->free);
257 wait_for_completion(&qp->free); 383 wait_for_completion(&qp->free);
258 384
259 mlx4_table_put(dev, &qp_table->cmpt_table, qp->qpn); 385 mlx4_qp_free_icm(dev, qp->qpn);
260 mlx4_table_put(dev, &qp_table->rdmarc_table, qp->qpn);
261 mlx4_table_put(dev, &qp_table->altc_table, qp->qpn);
262 mlx4_table_put(dev, &qp_table->auxc_table, qp->qpn);
263 mlx4_table_put(dev, &qp_table->qp_table, qp->qpn);
264} 386}
265EXPORT_SYMBOL_GPL(mlx4_qp_free); 387EXPORT_SYMBOL_GPL(mlx4_qp_free);
266 388
@@ -278,6 +400,8 @@ int mlx4_init_qp_table(struct mlx4_dev *dev)
278 400
279 spin_lock_init(&qp_table->lock); 401 spin_lock_init(&qp_table->lock);
280 INIT_RADIX_TREE(&dev->qp_table_tree, GFP_ATOMIC); 402 INIT_RADIX_TREE(&dev->qp_table_tree, GFP_ATOMIC);
403 if (mlx4_is_slave(dev))
404 return 0;
281 405
282 /* 406 /*
283 * We reserve 2 extra QPs per port for the special QPs. The 407 * We reserve 2 extra QPs per port for the special QPs. The
@@ -329,6 +453,9 @@ int mlx4_init_qp_table(struct mlx4_dev *dev)
329 453
330void mlx4_cleanup_qp_table(struct mlx4_dev *dev) 454void mlx4_cleanup_qp_table(struct mlx4_dev *dev)
331{ 455{
456 if (mlx4_is_slave(dev))
457 return;
458
332 mlx4_CONF_SPECIAL_QP(dev, 0); 459 mlx4_CONF_SPECIAL_QP(dev, 0);
333 mlx4_bitmap_cleanup(&mlx4_priv(dev)->qp_table.bitmap); 460 mlx4_bitmap_cleanup(&mlx4_priv(dev)->qp_table.bitmap);
334} 461}