diff options
Diffstat (limited to 'drivers')
-rw-r--r-- | drivers/net/ethernet/mellanox/mlx4/eq.c | 375 |
1 files changed, 350 insertions, 25 deletions
diff --git a/drivers/net/ethernet/mellanox/mlx4/eq.c b/drivers/net/ethernet/mellanox/mlx4/eq.c index 9e5863dfa60a..7416ef20c203 100644 --- a/drivers/net/ethernet/mellanox/mlx4/eq.c +++ b/drivers/net/ethernet/mellanox/mlx4/eq.c | |||
@@ -31,6 +31,7 @@ | |||
31 | * SOFTWARE. | 31 | * SOFTWARE. |
32 | */ | 32 | */ |
33 | 33 | ||
34 | #include <linux/init.h> | ||
34 | #include <linux/interrupt.h> | 35 | #include <linux/interrupt.h> |
35 | #include <linux/slab.h> | 36 | #include <linux/slab.h> |
36 | #include <linux/export.h> | 37 | #include <linux/export.h> |
@@ -100,7 +101,9 @@ struct mlx4_eq_context { | |||
100 | (1ull << MLX4_EVENT_TYPE_SRQ_CATAS_ERROR) | \ | 101 | (1ull << MLX4_EVENT_TYPE_SRQ_CATAS_ERROR) | \ |
101 | (1ull << MLX4_EVENT_TYPE_SRQ_QP_LAST_WQE) | \ | 102 | (1ull << MLX4_EVENT_TYPE_SRQ_QP_LAST_WQE) | \ |
102 | (1ull << MLX4_EVENT_TYPE_SRQ_LIMIT) | \ | 103 | (1ull << MLX4_EVENT_TYPE_SRQ_LIMIT) | \ |
103 | (1ull << MLX4_EVENT_TYPE_CMD)) | 104 | (1ull << MLX4_EVENT_TYPE_CMD) | \ |
105 | (1ull << MLX4_EVENT_TYPE_COMM_CHANNEL) | \ | ||
106 | (1ull << MLX4_EVENT_TYPE_FLR_EVENT)) | ||
104 | 107 | ||
105 | static void eq_set_ci(struct mlx4_eq *eq, int req_not) | 108 | static void eq_set_ci(struct mlx4_eq *eq, int req_not) |
106 | { | 109 | { |
@@ -123,13 +126,157 @@ static struct mlx4_eqe *next_eqe_sw(struct mlx4_eq *eq) | |||
123 | return !!(eqe->owner & 0x80) ^ !!(eq->cons_index & eq->nent) ? NULL : eqe; | 126 | return !!(eqe->owner & 0x80) ^ !!(eq->cons_index & eq->nent) ? NULL : eqe; |
124 | } | 127 | } |
125 | 128 | ||
129 | static struct mlx4_eqe *next_slave_event_eqe(struct mlx4_slave_event_eq *slave_eq) | ||
130 | { | ||
131 | struct mlx4_eqe *eqe = | ||
132 | &slave_eq->event_eqe[slave_eq->cons & (SLAVE_EVENT_EQ_SIZE - 1)]; | ||
133 | return (!!(eqe->owner & 0x80) ^ | ||
134 | !!(slave_eq->cons & SLAVE_EVENT_EQ_SIZE)) ? | ||
135 | eqe : NULL; | ||
136 | } | ||
137 | |||
138 | /* dummies for now */ | ||
139 | void mlx4_delete_all_resources_for_slave(struct mlx4_dev *dev, int slave) | ||
140 | { | ||
141 | } | ||
142 | |||
143 | int mlx4_get_slave_from_resource_id(struct mlx4_dev *dev, | ||
144 | enum mlx4_resource type, | ||
145 | int res_id, int *slave) | ||
146 | { | ||
147 | return -ENOENT; | ||
148 | } | ||
149 | /* end dummies */ | ||
150 | |||
151 | void mlx4_gen_slave_eqe(struct work_struct *work) | ||
152 | { | ||
153 | struct mlx4_mfunc_master_ctx *master = | ||
154 | container_of(work, struct mlx4_mfunc_master_ctx, | ||
155 | slave_event_work); | ||
156 | struct mlx4_mfunc *mfunc = | ||
157 | container_of(master, struct mlx4_mfunc, master); | ||
158 | struct mlx4_priv *priv = container_of(mfunc, struct mlx4_priv, mfunc); | ||
159 | struct mlx4_dev *dev = &priv->dev; | ||
160 | struct mlx4_slave_event_eq *slave_eq = &mfunc->master.slave_eq; | ||
161 | struct mlx4_eqe *eqe; | ||
162 | u8 slave; | ||
163 | int i; | ||
164 | |||
165 | for (eqe = next_slave_event_eqe(slave_eq); eqe; | ||
166 | eqe = next_slave_event_eqe(slave_eq)) { | ||
167 | slave = eqe->slave_id; | ||
168 | |||
169 | /* All active slaves need to receive the event */ | ||
170 | if (slave == ALL_SLAVES) { | ||
171 | for (i = 0; i < dev->num_slaves; i++) { | ||
172 | if (i != dev->caps.function && | ||
173 | master->slave_state[i].active) | ||
174 | if (mlx4_GEN_EQE(dev, i, eqe)) | ||
175 | mlx4_warn(dev, "Failed to " | ||
176 | " generate event " | ||
177 | "for slave %d\n", i); | ||
178 | } | ||
179 | } else { | ||
180 | if (mlx4_GEN_EQE(dev, slave, eqe)) | ||
181 | mlx4_warn(dev, "Failed to generate event " | ||
182 | "for slave %d\n", slave); | ||
183 | } | ||
184 | ++slave_eq->cons; | ||
185 | } | ||
186 | } | ||
187 | |||
188 | |||
189 | static void slave_event(struct mlx4_dev *dev, u8 slave, struct mlx4_eqe *eqe) | ||
190 | { | ||
191 | struct mlx4_priv *priv = mlx4_priv(dev); | ||
192 | struct mlx4_slave_event_eq *slave_eq = &priv->mfunc.master.slave_eq; | ||
193 | struct mlx4_eqe *s_eqe = | ||
194 | &slave_eq->event_eqe[slave_eq->prod & (SLAVE_EVENT_EQ_SIZE - 1)]; | ||
195 | |||
196 | if ((!!(s_eqe->owner & 0x80)) ^ | ||
197 | (!!(slave_eq->prod & SLAVE_EVENT_EQ_SIZE))) { | ||
198 | mlx4_warn(dev, "Master failed to generate an EQE for slave: %d. " | ||
199 | "No free EQE on slave events queue\n", slave); | ||
200 | return; | ||
201 | } | ||
202 | |||
203 | memcpy(s_eqe, eqe, sizeof(struct mlx4_eqe) - 1); | ||
204 | s_eqe->slave_id = slave; | ||
205 | /* ensure all information is written before setting the ownersip bit */ | ||
206 | wmb(); | ||
207 | s_eqe->owner = !!(slave_eq->prod & SLAVE_EVENT_EQ_SIZE) ? 0x0 : 0x80; | ||
208 | ++slave_eq->prod; | ||
209 | |||
210 | queue_work(priv->mfunc.master.comm_wq, | ||
211 | &priv->mfunc.master.slave_event_work); | ||
212 | } | ||
213 | |||
214 | static void mlx4_slave_event(struct mlx4_dev *dev, int slave, | ||
215 | struct mlx4_eqe *eqe) | ||
216 | { | ||
217 | struct mlx4_priv *priv = mlx4_priv(dev); | ||
218 | struct mlx4_slave_state *s_slave = | ||
219 | &priv->mfunc.master.slave_state[slave]; | ||
220 | |||
221 | if (!s_slave->active) { | ||
222 | /*mlx4_warn(dev, "Trying to pass event to inactive slave\n");*/ | ||
223 | return; | ||
224 | } | ||
225 | |||
226 | slave_event(dev, slave, eqe); | ||
227 | } | ||
228 | |||
229 | void mlx4_master_handle_slave_flr(struct work_struct *work) | ||
230 | { | ||
231 | struct mlx4_mfunc_master_ctx *master = | ||
232 | container_of(work, struct mlx4_mfunc_master_ctx, | ||
233 | slave_flr_event_work); | ||
234 | struct mlx4_mfunc *mfunc = | ||
235 | container_of(master, struct mlx4_mfunc, master); | ||
236 | struct mlx4_priv *priv = | ||
237 | container_of(mfunc, struct mlx4_priv, mfunc); | ||
238 | struct mlx4_dev *dev = &priv->dev; | ||
239 | struct mlx4_slave_state *slave_state = priv->mfunc.master.slave_state; | ||
240 | int i; | ||
241 | int err; | ||
242 | |||
243 | mlx4_dbg(dev, "mlx4_handle_slave_flr\n"); | ||
244 | |||
245 | for (i = 0 ; i < dev->num_slaves; i++) { | ||
246 | |||
247 | if (MLX4_COMM_CMD_FLR == slave_state[i].last_cmd) { | ||
248 | mlx4_dbg(dev, "mlx4_handle_slave_flr: " | ||
249 | "clean slave: %d\n", i); | ||
250 | |||
251 | mlx4_delete_all_resources_for_slave(dev, i); | ||
252 | /*return the slave to running mode*/ | ||
253 | spin_lock(&priv->mfunc.master.slave_state_lock); | ||
254 | slave_state[i].last_cmd = MLX4_COMM_CMD_RESET; | ||
255 | slave_state[i].is_slave_going_down = 0; | ||
256 | spin_unlock(&priv->mfunc.master.slave_state_lock); | ||
257 | /*notify the FW:*/ | ||
258 | err = mlx4_cmd(dev, 0, i, 0, MLX4_CMD_INFORM_FLR_DONE, | ||
259 | MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED); | ||
260 | if (err) | ||
261 | mlx4_warn(dev, "Failed to notify FW on " | ||
262 | "FLR done (slave:%d)\n", i); | ||
263 | } | ||
264 | } | ||
265 | } | ||
266 | |||
126 | static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq) | 267 | static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq) |
127 | { | 268 | { |
269 | struct mlx4_priv *priv = mlx4_priv(dev); | ||
128 | struct mlx4_eqe *eqe; | 270 | struct mlx4_eqe *eqe; |
129 | int cqn; | 271 | int cqn; |
130 | int eqes_found = 0; | 272 | int eqes_found = 0; |
131 | int set_ci = 0; | 273 | int set_ci = 0; |
132 | int port; | 274 | int port; |
275 | int slave = 0; | ||
276 | int ret; | ||
277 | u32 flr_slave; | ||
278 | u8 update_slave_state; | ||
279 | int i; | ||
133 | 280 | ||
134 | while ((eqe = next_eqe_sw(eq))) { | 281 | while ((eqe = next_eqe_sw(eq))) { |
135 | /* | 282 | /* |
@@ -152,14 +299,68 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq) | |||
152 | case MLX4_EVENT_TYPE_PATH_MIG_FAILED: | 299 | case MLX4_EVENT_TYPE_PATH_MIG_FAILED: |
153 | case MLX4_EVENT_TYPE_WQ_INVAL_REQ_ERROR: | 300 | case MLX4_EVENT_TYPE_WQ_INVAL_REQ_ERROR: |
154 | case MLX4_EVENT_TYPE_WQ_ACCESS_ERROR: | 301 | case MLX4_EVENT_TYPE_WQ_ACCESS_ERROR: |
155 | mlx4_qp_event(dev, be32_to_cpu(eqe->event.qp.qpn) & 0xffffff, | 302 | mlx4_dbg(dev, "event %d arrived\n", eqe->type); |
156 | eqe->type); | 303 | if (mlx4_is_master(dev)) { |
304 | /* forward only to slave owning the QP */ | ||
305 | ret = mlx4_get_slave_from_resource_id(dev, | ||
306 | RES_QP, | ||
307 | be32_to_cpu(eqe->event.qp.qpn) | ||
308 | & 0xffffff, &slave); | ||
309 | if (ret && ret != -ENOENT) { | ||
310 | mlx4_dbg(dev, "QP event %02x(%02x) on " | ||
311 | "EQ %d at index %u: could " | ||
312 | "not get slave id (%d)\n", | ||
313 | eqe->type, eqe->subtype, | ||
314 | eq->eqn, eq->cons_index, ret); | ||
315 | break; | ||
316 | } | ||
317 | |||
318 | if (!ret && slave != dev->caps.function) { | ||
319 | mlx4_slave_event(dev, slave, eqe); | ||
320 | break; | ||
321 | } | ||
322 | |||
323 | } | ||
324 | mlx4_qp_event(dev, be32_to_cpu(eqe->event.qp.qpn) & | ||
325 | 0xffffff, eqe->type); | ||
157 | break; | 326 | break; |
158 | 327 | ||
159 | case MLX4_EVENT_TYPE_SRQ_LIMIT: | 328 | case MLX4_EVENT_TYPE_SRQ_LIMIT: |
329 | mlx4_warn(dev, "%s: MLX4_EVENT_TYPE_SRQ_LIMIT\n", | ||
330 | __func__); | ||
160 | case MLX4_EVENT_TYPE_SRQ_CATAS_ERROR: | 331 | case MLX4_EVENT_TYPE_SRQ_CATAS_ERROR: |
161 | mlx4_srq_event(dev, be32_to_cpu(eqe->event.srq.srqn) & 0xffffff, | 332 | if (mlx4_is_master(dev)) { |
162 | eqe->type); | 333 | /* forward only to slave owning the SRQ */ |
334 | ret = mlx4_get_slave_from_resource_id(dev, | ||
335 | RES_SRQ, | ||
336 | be32_to_cpu(eqe->event.srq.srqn) | ||
337 | & 0xffffff, | ||
338 | &slave); | ||
339 | if (ret && ret != -ENOENT) { | ||
340 | mlx4_warn(dev, "SRQ event %02x(%02x) " | ||
341 | "on EQ %d at index %u: could" | ||
342 | " not get slave id (%d)\n", | ||
343 | eqe->type, eqe->subtype, | ||
344 | eq->eqn, eq->cons_index, ret); | ||
345 | break; | ||
346 | } | ||
347 | mlx4_warn(dev, "%s: slave:%d, srq_no:0x%x," | ||
348 | " event: %02x(%02x)\n", __func__, | ||
349 | slave, | ||
350 | be32_to_cpu(eqe->event.srq.srqn), | ||
351 | eqe->type, eqe->subtype); | ||
352 | |||
353 | if (!ret && slave != dev->caps.function) { | ||
354 | mlx4_warn(dev, "%s: sending event " | ||
355 | "%02x(%02x) to slave:%d\n", | ||
356 | __func__, eqe->type, | ||
357 | eqe->subtype, slave); | ||
358 | mlx4_slave_event(dev, slave, eqe); | ||
359 | break; | ||
360 | } | ||
361 | } | ||
362 | mlx4_srq_event(dev, be32_to_cpu(eqe->event.srq.srqn) & | ||
363 | 0xffffff, eqe->type); | ||
163 | break; | 364 | break; |
164 | 365 | ||
165 | case MLX4_EVENT_TYPE_CMD: | 366 | case MLX4_EVENT_TYPE_CMD: |
@@ -172,13 +373,35 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq) | |||
172 | case MLX4_EVENT_TYPE_PORT_CHANGE: | 373 | case MLX4_EVENT_TYPE_PORT_CHANGE: |
173 | port = be32_to_cpu(eqe->event.port_change.port) >> 28; | 374 | port = be32_to_cpu(eqe->event.port_change.port) >> 28; |
174 | if (eqe->subtype == MLX4_PORT_CHANGE_SUBTYPE_DOWN) { | 375 | if (eqe->subtype == MLX4_PORT_CHANGE_SUBTYPE_DOWN) { |
175 | mlx4_dispatch_event(dev, MLX4_DEV_EVENT_PORT_DOWN, | 376 | mlx4_dispatch_event(dev, |
377 | MLX4_DEV_EVENT_PORT_DOWN, | ||
176 | port); | 378 | port); |
177 | mlx4_priv(dev)->sense.do_sense_port[port] = 1; | 379 | mlx4_priv(dev)->sense.do_sense_port[port] = 1; |
380 | if (mlx4_is_master(dev)) | ||
381 | /*change the state of all slave's port | ||
382 | * to down:*/ | ||
383 | for (i = 0; i < dev->num_slaves; i++) { | ||
384 | mlx4_dbg(dev, "%s: Sending " | ||
385 | "MLX4_PORT_CHANGE_SUBTYPE_DOWN" | ||
386 | " to slave: %d, port:%d\n", | ||
387 | __func__, i, port); | ||
388 | if (i == dev->caps.function) | ||
389 | continue; | ||
390 | mlx4_slave_event(dev, i, eqe); | ||
391 | } | ||
178 | } else { | 392 | } else { |
179 | mlx4_dispatch_event(dev, MLX4_DEV_EVENT_PORT_UP, | 393 | mlx4_dispatch_event(dev, |
394 | MLX4_DEV_EVENT_PORT_UP, | ||
180 | port); | 395 | port); |
181 | mlx4_priv(dev)->sense.do_sense_port[port] = 0; | 396 | mlx4_priv(dev)->sense.do_sense_port[port] = 0; |
397 | |||
398 | if (mlx4_is_master(dev)) { | ||
399 | for (i = 0; i < dev->num_slaves; i++) { | ||
400 | if (i == dev->caps.function) | ||
401 | continue; | ||
402 | mlx4_slave_event(dev, i, eqe); | ||
403 | } | ||
404 | } | ||
182 | } | 405 | } |
183 | break; | 406 | break; |
184 | 407 | ||
@@ -187,7 +410,28 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq) | |||
187 | eqe->event.cq_err.syndrome == 1 ? | 410 | eqe->event.cq_err.syndrome == 1 ? |
188 | "overrun" : "access violation", | 411 | "overrun" : "access violation", |
189 | be32_to_cpu(eqe->event.cq_err.cqn) & 0xffffff); | 412 | be32_to_cpu(eqe->event.cq_err.cqn) & 0xffffff); |
190 | mlx4_cq_event(dev, be32_to_cpu(eqe->event.cq_err.cqn), | 413 | if (mlx4_is_master(dev)) { |
414 | ret = mlx4_get_slave_from_resource_id(dev, | ||
415 | RES_CQ, | ||
416 | be32_to_cpu(eqe->event.cq_err.cqn) | ||
417 | & 0xffffff, &slave); | ||
418 | if (ret && ret != -ENOENT) { | ||
419 | mlx4_dbg(dev, "CQ event %02x(%02x) on " | ||
420 | "EQ %d at index %u: could " | ||
421 | "not get slave id (%d)\n", | ||
422 | eqe->type, eqe->subtype, | ||
423 | eq->eqn, eq->cons_index, ret); | ||
424 | break; | ||
425 | } | ||
426 | |||
427 | if (!ret && slave != dev->caps.function) { | ||
428 | mlx4_slave_event(dev, slave, eqe); | ||
429 | break; | ||
430 | } | ||
431 | } | ||
432 | mlx4_cq_event(dev, | ||
433 | be32_to_cpu(eqe->event.cq_err.cqn) | ||
434 | & 0xffffff, | ||
191 | eqe->type); | 435 | eqe->type); |
192 | break; | 436 | break; |
193 | 437 | ||
@@ -195,13 +439,60 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq) | |||
195 | mlx4_warn(dev, "EQ overrun on EQN %d\n", eq->eqn); | 439 | mlx4_warn(dev, "EQ overrun on EQN %d\n", eq->eqn); |
196 | break; | 440 | break; |
197 | 441 | ||
442 | case MLX4_EVENT_TYPE_COMM_CHANNEL: | ||
443 | if (!mlx4_is_master(dev)) { | ||
444 | mlx4_warn(dev, "Received comm channel event " | ||
445 | "for non master device\n"); | ||
446 | break; | ||
447 | } | ||
448 | memcpy(&priv->mfunc.master.comm_arm_bit_vector, | ||
449 | eqe->event.comm_channel_arm.bit_vec, | ||
450 | sizeof eqe->event.comm_channel_arm.bit_vec); | ||
451 | queue_work(priv->mfunc.master.comm_wq, | ||
452 | &priv->mfunc.master.comm_work); | ||
453 | break; | ||
454 | |||
455 | case MLX4_EVENT_TYPE_FLR_EVENT: | ||
456 | flr_slave = be32_to_cpu(eqe->event.flr_event.slave_id); | ||
457 | if (!mlx4_is_master(dev)) { | ||
458 | mlx4_warn(dev, "Non-master function received" | ||
459 | "FLR event\n"); | ||
460 | break; | ||
461 | } | ||
462 | |||
463 | mlx4_dbg(dev, "FLR event for slave: %d\n", flr_slave); | ||
464 | |||
465 | if (flr_slave > dev->num_slaves) { | ||
466 | mlx4_warn(dev, | ||
467 | "Got FLR for unknown function: %d\n", | ||
468 | flr_slave); | ||
469 | update_slave_state = 0; | ||
470 | } else | ||
471 | update_slave_state = 1; | ||
472 | |||
473 | spin_lock(&priv->mfunc.master.slave_state_lock); | ||
474 | if (update_slave_state) { | ||
475 | priv->mfunc.master.slave_state[flr_slave].active = false; | ||
476 | priv->mfunc.master.slave_state[flr_slave].last_cmd = MLX4_COMM_CMD_FLR; | ||
477 | priv->mfunc.master.slave_state[flr_slave].is_slave_going_down = 1; | ||
478 | } | ||
479 | spin_unlock(&priv->mfunc.master.slave_state_lock); | ||
480 | queue_work(priv->mfunc.master.comm_wq, | ||
481 | &priv->mfunc.master.slave_flr_event_work); | ||
482 | break; | ||
198 | case MLX4_EVENT_TYPE_EEC_CATAS_ERROR: | 483 | case MLX4_EVENT_TYPE_EEC_CATAS_ERROR: |
199 | case MLX4_EVENT_TYPE_ECC_DETECT: | 484 | case MLX4_EVENT_TYPE_ECC_DETECT: |
200 | default: | 485 | default: |
201 | mlx4_warn(dev, "Unhandled event %02x(%02x) on EQ %d at index %u\n", | 486 | mlx4_warn(dev, "Unhandled event %02x(%02x) on EQ %d at " |
202 | eqe->type, eqe->subtype, eq->eqn, eq->cons_index); | 487 | "index %u. owner=%x, nent=0x%x, slave=%x, " |
488 | "ownership=%s\n", | ||
489 | eqe->type, eqe->subtype, eq->eqn, | ||
490 | eq->cons_index, eqe->owner, eq->nent, | ||
491 | eqe->slave_id, | ||
492 | !!(eqe->owner & 0x80) ^ | ||
493 | !!(eq->cons_index & eq->nent) ? "HW" : "SW"); | ||
203 | break; | 494 | break; |
204 | } | 495 | }; |
205 | 496 | ||
206 | ++eq->cons_index; | 497 | ++eq->cons_index; |
207 | eqes_found = 1; | 498 | eqes_found = 1; |
@@ -251,6 +542,36 @@ static irqreturn_t mlx4_msi_x_interrupt(int irq, void *eq_ptr) | |||
251 | return IRQ_HANDLED; | 542 | return IRQ_HANDLED; |
252 | } | 543 | } |
253 | 544 | ||
545 | int mlx4_MAP_EQ_wrapper(struct mlx4_dev *dev, int slave, | ||
546 | struct mlx4_vhcr *vhcr, | ||
547 | struct mlx4_cmd_mailbox *inbox, | ||
548 | struct mlx4_cmd_mailbox *outbox, | ||
549 | struct mlx4_cmd_info *cmd) | ||
550 | { | ||
551 | struct mlx4_priv *priv = mlx4_priv(dev); | ||
552 | struct mlx4_slave_event_eq_info *event_eq = | ||
553 | &priv->mfunc.master.slave_state[slave].event_eq; | ||
554 | u32 in_modifier = vhcr->in_modifier; | ||
555 | u32 eqn = in_modifier & 0x1FF; | ||
556 | u64 in_param = vhcr->in_param; | ||
557 | int err = 0; | ||
558 | |||
559 | if (slave == dev->caps.function) | ||
560 | err = mlx4_cmd(dev, in_param, (in_modifier & 0x80000000) | eqn, | ||
561 | 0, MLX4_CMD_MAP_EQ, MLX4_CMD_TIME_CLASS_B, | ||
562 | MLX4_CMD_NATIVE); | ||
563 | if (!err) { | ||
564 | if (in_modifier >> 31) { | ||
565 | /* unmap */ | ||
566 | event_eq->event_type &= ~in_param; | ||
567 | } else { | ||
568 | event_eq->eqn = eqn; | ||
569 | event_eq->event_type = in_param; | ||
570 | } | ||
571 | } | ||
572 | return err; | ||
573 | } | ||
574 | |||
254 | static int mlx4_MAP_EQ(struct mlx4_dev *dev, u64 event_mask, int unmap, | 575 | static int mlx4_MAP_EQ(struct mlx4_dev *dev, u64 event_mask, int unmap, |
255 | int eq_num) | 576 | int eq_num) |
256 | { | 577 | { |
@@ -262,16 +583,16 @@ static int mlx4_MAP_EQ(struct mlx4_dev *dev, u64 event_mask, int unmap, | |||
262 | static int mlx4_SW2HW_EQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox, | 583 | static int mlx4_SW2HW_EQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox, |
263 | int eq_num) | 584 | int eq_num) |
264 | { | 585 | { |
265 | return mlx4_cmd(dev, mailbox->dma, eq_num, 0, MLX4_CMD_SW2HW_EQ, | 586 | return mlx4_cmd(dev, mailbox->dma | dev->caps.function, eq_num, 0, |
266 | MLX4_CMD_TIME_CLASS_A, | 587 | MLX4_CMD_SW2HW_EQ, MLX4_CMD_TIME_CLASS_A, |
267 | MLX4_CMD_WRAPPED); | 588 | MLX4_CMD_WRAPPED); |
268 | } | 589 | } |
269 | 590 | ||
270 | static int mlx4_HW2SW_EQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox, | 591 | static int mlx4_HW2SW_EQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox, |
271 | int eq_num) | 592 | int eq_num) |
272 | { | 593 | { |
273 | return mlx4_cmd_box(dev, 0, mailbox->dma, eq_num, 0, MLX4_CMD_HW2SW_EQ, | 594 | return mlx4_cmd_box(dev, dev->caps.function, mailbox->dma, eq_num, |
274 | MLX4_CMD_TIME_CLASS_A, | 595 | 0, MLX4_CMD_HW2SW_EQ, MLX4_CMD_TIME_CLASS_A, |
275 | MLX4_CMD_WRAPPED); | 596 | MLX4_CMD_WRAPPED); |
276 | } | 597 | } |
277 | 598 | ||
@@ -549,14 +870,16 @@ int mlx4_init_eq_table(struct mlx4_dev *dev) | |||
549 | for (i = 0; i < mlx4_num_eq_uar(dev); ++i) | 870 | for (i = 0; i < mlx4_num_eq_uar(dev); ++i) |
550 | priv->eq_table.uar_map[i] = NULL; | 871 | priv->eq_table.uar_map[i] = NULL; |
551 | 872 | ||
552 | err = mlx4_map_clr_int(dev); | 873 | if (!mlx4_is_slave(dev)) { |
553 | if (err) | 874 | err = mlx4_map_clr_int(dev); |
554 | goto err_out_bitmap; | 875 | if (err) |
876 | goto err_out_bitmap; | ||
555 | 877 | ||
556 | priv->eq_table.clr_mask = | 878 | priv->eq_table.clr_mask = |
557 | swab32(1 << (priv->eq_table.inta_pin & 31)); | 879 | swab32(1 << (priv->eq_table.inta_pin & 31)); |
558 | priv->eq_table.clr_int = priv->clr_base + | 880 | priv->eq_table.clr_int = priv->clr_base + |
559 | (priv->eq_table.inta_pin < 32 ? 4 : 0); | 881 | (priv->eq_table.inta_pin < 32 ? 4 : 0); |
882 | } | ||
560 | 883 | ||
561 | priv->eq_table.irq_names = | 884 | priv->eq_table.irq_names = |
562 | kmalloc(MLX4_IRQNAME_SIZE * (dev->caps.num_comp_vectors + 1 + | 885 | kmalloc(MLX4_IRQNAME_SIZE * (dev->caps.num_comp_vectors + 1 + |
@@ -664,7 +987,8 @@ err_out_unmap: | |||
664 | mlx4_free_eq(dev, &priv->eq_table.eq[i]); | 987 | mlx4_free_eq(dev, &priv->eq_table.eq[i]); |
665 | --i; | 988 | --i; |
666 | } | 989 | } |
667 | mlx4_unmap_clr_int(dev); | 990 | if (!mlx4_is_slave(dev)) |
991 | mlx4_unmap_clr_int(dev); | ||
668 | mlx4_free_irqs(dev); | 992 | mlx4_free_irqs(dev); |
669 | 993 | ||
670 | err_out_bitmap: | 994 | err_out_bitmap: |
@@ -689,7 +1013,8 @@ void mlx4_cleanup_eq_table(struct mlx4_dev *dev) | |||
689 | for (i = 0; i < dev->caps.num_comp_vectors + dev->caps.comp_pool + 1; ++i) | 1013 | for (i = 0; i < dev->caps.num_comp_vectors + dev->caps.comp_pool + 1; ++i) |
690 | mlx4_free_eq(dev, &priv->eq_table.eq[i]); | 1014 | mlx4_free_eq(dev, &priv->eq_table.eq[i]); |
691 | 1015 | ||
692 | mlx4_unmap_clr_int(dev); | 1016 | if (!mlx4_is_slave(dev)) |
1017 | mlx4_unmap_clr_int(dev); | ||
693 | 1018 | ||
694 | for (i = 0; i < mlx4_num_eq_uar(dev); ++i) | 1019 | for (i = 0; i < mlx4_num_eq_uar(dev); ++i) |
695 | if (priv->eq_table.uar_map[i]) | 1020 | if (priv->eq_table.uar_map[i]) |
@@ -712,7 +1037,7 @@ int mlx4_test_interrupts(struct mlx4_dev *dev) | |||
712 | 1037 | ||
713 | err = mlx4_NOP(dev); | 1038 | err = mlx4_NOP(dev); |
714 | /* When not in MSI_X, there is only one irq to check */ | 1039 | /* When not in MSI_X, there is only one irq to check */ |
715 | if (!(dev->flags & MLX4_FLAG_MSI_X)) | 1040 | if (!(dev->flags & MLX4_FLAG_MSI_X) || mlx4_is_slave(dev)) |
716 | return err; | 1041 | return err; |
717 | 1042 | ||
718 | /* A loop over all completion vectors, for each vector we will check | 1043 | /* A loop over all completion vectors, for each vector we will check |