aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/infiniband/hw/mthca/mthca_qp.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/infiniband/hw/mthca/mthca_qp.c')
-rw-r--r--drivers/infiniband/hw/mthca/mthca_qp.c448
1 files changed, 183 insertions, 265 deletions
diff --git a/drivers/infiniband/hw/mthca/mthca_qp.c b/drivers/infiniband/hw/mthca/mthca_qp.c
index fba608ed7df2..f673c461e30b 100644
--- a/drivers/infiniband/hw/mthca/mthca_qp.c
+++ b/drivers/infiniband/hw/mthca/mthca_qp.c
@@ -2,7 +2,7 @@
2 * Copyright (c) 2004 Topspin Communications. All rights reserved. 2 * Copyright (c) 2004 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005 Cisco Systems. All rights reserved. 3 * Copyright (c) 2005 Cisco Systems. All rights reserved.
4 * Copyright (c) 2005 Mellanox Technologies. All rights reserved. 4 * Copyright (c) 2005 Mellanox Technologies. All rights reserved.
5 * Copyright (c) 2004 Voltaire, Inc. All rights reserved. 5 * Copyright (c) 2004 Voltaire, Inc. All rights reserved.
6 * 6 *
7 * This software is available to you under a choice of one of two 7 * This software is available to you under a choice of one of two
8 * licenses. You may choose to be licensed under the terms of the GNU 8 * licenses. You may choose to be licensed under the terms of the GNU
@@ -286,207 +286,6 @@ static int to_mthca_st(int transport)
286 } 286 }
287} 287}
288 288
289static const struct {
290 int trans;
291 u32 req_param[NUM_TRANS];
292 u32 opt_param[NUM_TRANS];
293} state_table[IB_QPS_ERR + 1][IB_QPS_ERR + 1] = {
294 [IB_QPS_RESET] = {
295 [IB_QPS_RESET] = { .trans = MTHCA_TRANS_ANY2RST },
296 [IB_QPS_ERR] = { .trans = MTHCA_TRANS_ANY2ERR },
297 [IB_QPS_INIT] = {
298 .trans = MTHCA_TRANS_RST2INIT,
299 .req_param = {
300 [UD] = (IB_QP_PKEY_INDEX |
301 IB_QP_PORT |
302 IB_QP_QKEY),
303 [UC] = (IB_QP_PKEY_INDEX |
304 IB_QP_PORT |
305 IB_QP_ACCESS_FLAGS),
306 [RC] = (IB_QP_PKEY_INDEX |
307 IB_QP_PORT |
308 IB_QP_ACCESS_FLAGS),
309 [MLX] = (IB_QP_PKEY_INDEX |
310 IB_QP_QKEY),
311 },
312 /* bug-for-bug compatibility with VAPI: */
313 .opt_param = {
314 [MLX] = IB_QP_PORT
315 }
316 },
317 },
318 [IB_QPS_INIT] = {
319 [IB_QPS_RESET] = { .trans = MTHCA_TRANS_ANY2RST },
320 [IB_QPS_ERR] = { .trans = MTHCA_TRANS_ANY2ERR },
321 [IB_QPS_INIT] = {
322 .trans = MTHCA_TRANS_INIT2INIT,
323 .opt_param = {
324 [UD] = (IB_QP_PKEY_INDEX |
325 IB_QP_PORT |
326 IB_QP_QKEY),
327 [UC] = (IB_QP_PKEY_INDEX |
328 IB_QP_PORT |
329 IB_QP_ACCESS_FLAGS),
330 [RC] = (IB_QP_PKEY_INDEX |
331 IB_QP_PORT |
332 IB_QP_ACCESS_FLAGS),
333 [MLX] = (IB_QP_PKEY_INDEX |
334 IB_QP_QKEY),
335 }
336 },
337 [IB_QPS_RTR] = {
338 .trans = MTHCA_TRANS_INIT2RTR,
339 .req_param = {
340 [UC] = (IB_QP_AV |
341 IB_QP_PATH_MTU |
342 IB_QP_DEST_QPN |
343 IB_QP_RQ_PSN),
344 [RC] = (IB_QP_AV |
345 IB_QP_PATH_MTU |
346 IB_QP_DEST_QPN |
347 IB_QP_RQ_PSN |
348 IB_QP_MAX_DEST_RD_ATOMIC |
349 IB_QP_MIN_RNR_TIMER),
350 },
351 .opt_param = {
352 [UD] = (IB_QP_PKEY_INDEX |
353 IB_QP_QKEY),
354 [UC] = (IB_QP_ALT_PATH |
355 IB_QP_ACCESS_FLAGS |
356 IB_QP_PKEY_INDEX),
357 [RC] = (IB_QP_ALT_PATH |
358 IB_QP_ACCESS_FLAGS |
359 IB_QP_PKEY_INDEX),
360 [MLX] = (IB_QP_PKEY_INDEX |
361 IB_QP_QKEY),
362 }
363 }
364 },
365 [IB_QPS_RTR] = {
366 [IB_QPS_RESET] = { .trans = MTHCA_TRANS_ANY2RST },
367 [IB_QPS_ERR] = { .trans = MTHCA_TRANS_ANY2ERR },
368 [IB_QPS_RTS] = {
369 .trans = MTHCA_TRANS_RTR2RTS,
370 .req_param = {
371 [UD] = IB_QP_SQ_PSN,
372 [UC] = IB_QP_SQ_PSN,
373 [RC] = (IB_QP_TIMEOUT |
374 IB_QP_RETRY_CNT |
375 IB_QP_RNR_RETRY |
376 IB_QP_SQ_PSN |
377 IB_QP_MAX_QP_RD_ATOMIC),
378 [MLX] = IB_QP_SQ_PSN,
379 },
380 .opt_param = {
381 [UD] = (IB_QP_CUR_STATE |
382 IB_QP_QKEY),
383 [UC] = (IB_QP_CUR_STATE |
384 IB_QP_ALT_PATH |
385 IB_QP_ACCESS_FLAGS |
386 IB_QP_PATH_MIG_STATE),
387 [RC] = (IB_QP_CUR_STATE |
388 IB_QP_ALT_PATH |
389 IB_QP_ACCESS_FLAGS |
390 IB_QP_MIN_RNR_TIMER |
391 IB_QP_PATH_MIG_STATE),
392 [MLX] = (IB_QP_CUR_STATE |
393 IB_QP_QKEY),
394 }
395 }
396 },
397 [IB_QPS_RTS] = {
398 [IB_QPS_RESET] = { .trans = MTHCA_TRANS_ANY2RST },
399 [IB_QPS_ERR] = { .trans = MTHCA_TRANS_ANY2ERR },
400 [IB_QPS_RTS] = {
401 .trans = MTHCA_TRANS_RTS2RTS,
402 .opt_param = {
403 [UD] = (IB_QP_CUR_STATE |
404 IB_QP_QKEY),
405 [UC] = (IB_QP_ACCESS_FLAGS |
406 IB_QP_ALT_PATH |
407 IB_QP_PATH_MIG_STATE),
408 [RC] = (IB_QP_ACCESS_FLAGS |
409 IB_QP_ALT_PATH |
410 IB_QP_PATH_MIG_STATE |
411 IB_QP_MIN_RNR_TIMER),
412 [MLX] = (IB_QP_CUR_STATE |
413 IB_QP_QKEY),
414 }
415 },
416 [IB_QPS_SQD] = {
417 .trans = MTHCA_TRANS_RTS2SQD,
418 },
419 },
420 [IB_QPS_SQD] = {
421 [IB_QPS_RESET] = { .trans = MTHCA_TRANS_ANY2RST },
422 [IB_QPS_ERR] = { .trans = MTHCA_TRANS_ANY2ERR },
423 [IB_QPS_RTS] = {
424 .trans = MTHCA_TRANS_SQD2RTS,
425 .opt_param = {
426 [UD] = (IB_QP_CUR_STATE |
427 IB_QP_QKEY),
428 [UC] = (IB_QP_CUR_STATE |
429 IB_QP_ALT_PATH |
430 IB_QP_ACCESS_FLAGS |
431 IB_QP_PATH_MIG_STATE),
432 [RC] = (IB_QP_CUR_STATE |
433 IB_QP_ALT_PATH |
434 IB_QP_ACCESS_FLAGS |
435 IB_QP_MIN_RNR_TIMER |
436 IB_QP_PATH_MIG_STATE),
437 [MLX] = (IB_QP_CUR_STATE |
438 IB_QP_QKEY),
439 }
440 },
441 [IB_QPS_SQD] = {
442 .trans = MTHCA_TRANS_SQD2SQD,
443 .opt_param = {
444 [UD] = (IB_QP_PKEY_INDEX |
445 IB_QP_QKEY),
446 [UC] = (IB_QP_AV |
447 IB_QP_CUR_STATE |
448 IB_QP_ALT_PATH |
449 IB_QP_ACCESS_FLAGS |
450 IB_QP_PKEY_INDEX |
451 IB_QP_PATH_MIG_STATE),
452 [RC] = (IB_QP_AV |
453 IB_QP_TIMEOUT |
454 IB_QP_RETRY_CNT |
455 IB_QP_RNR_RETRY |
456 IB_QP_MAX_QP_RD_ATOMIC |
457 IB_QP_MAX_DEST_RD_ATOMIC |
458 IB_QP_CUR_STATE |
459 IB_QP_ALT_PATH |
460 IB_QP_ACCESS_FLAGS |
461 IB_QP_PKEY_INDEX |
462 IB_QP_MIN_RNR_TIMER |
463 IB_QP_PATH_MIG_STATE),
464 [MLX] = (IB_QP_PKEY_INDEX |
465 IB_QP_QKEY),
466 }
467 }
468 },
469 [IB_QPS_SQE] = {
470 [IB_QPS_RESET] = { .trans = MTHCA_TRANS_ANY2RST },
471 [IB_QPS_ERR] = { .trans = MTHCA_TRANS_ANY2ERR },
472 [IB_QPS_RTS] = {
473 .trans = MTHCA_TRANS_SQERR2RTS,
474 .opt_param = {
475 [UD] = (IB_QP_CUR_STATE |
476 IB_QP_QKEY),
477 [UC] = (IB_QP_CUR_STATE |
478 IB_QP_ACCESS_FLAGS),
479 [MLX] = (IB_QP_CUR_STATE |
480 IB_QP_QKEY),
481 }
482 }
483 },
484 [IB_QPS_ERR] = {
485 [IB_QPS_RESET] = { .trans = MTHCA_TRANS_ANY2RST },
486 [IB_QPS_ERR] = { .trans = MTHCA_TRANS_ANY2ERR }
487 }
488};
489
490static void store_attrs(struct mthca_sqp *sqp, struct ib_qp_attr *attr, 289static void store_attrs(struct mthca_sqp *sqp, struct ib_qp_attr *attr,
491 int attr_mask) 290 int attr_mask)
492{ 291{
@@ -549,6 +348,141 @@ static __be32 get_hw_access_flags(struct mthca_qp *qp, struct ib_qp_attr *attr,
549 return cpu_to_be32(hw_access_flags); 348 return cpu_to_be32(hw_access_flags);
550} 349}
551 350
351static inline enum ib_qp_state to_ib_qp_state(int mthca_state)
352{
353 switch (mthca_state) {
354 case MTHCA_QP_STATE_RST: return IB_QPS_RESET;
355 case MTHCA_QP_STATE_INIT: return IB_QPS_INIT;
356 case MTHCA_QP_STATE_RTR: return IB_QPS_RTR;
357 case MTHCA_QP_STATE_RTS: return IB_QPS_RTS;
358 case MTHCA_QP_STATE_DRAINING:
359 case MTHCA_QP_STATE_SQD: return IB_QPS_SQD;
360 case MTHCA_QP_STATE_SQE: return IB_QPS_SQE;
361 case MTHCA_QP_STATE_ERR: return IB_QPS_ERR;
362 default: return -1;
363 }
364}
365
366static inline enum ib_mig_state to_ib_mig_state(int mthca_mig_state)
367{
368 switch (mthca_mig_state) {
369 case 0: return IB_MIG_ARMED;
370 case 1: return IB_MIG_REARM;
371 case 3: return IB_MIG_MIGRATED;
372 default: return -1;
373 }
374}
375
376static int to_ib_qp_access_flags(int mthca_flags)
377{
378 int ib_flags = 0;
379
380 if (mthca_flags & MTHCA_QP_BIT_RRE)
381 ib_flags |= IB_ACCESS_REMOTE_READ;
382 if (mthca_flags & MTHCA_QP_BIT_RWE)
383 ib_flags |= IB_ACCESS_REMOTE_WRITE;
384 if (mthca_flags & MTHCA_QP_BIT_RAE)
385 ib_flags |= IB_ACCESS_REMOTE_ATOMIC;
386
387 return ib_flags;
388}
389
390static void to_ib_ah_attr(struct mthca_dev *dev, struct ib_ah_attr *ib_ah_attr,
391 struct mthca_qp_path *path)
392{
393 memset(ib_ah_attr, 0, sizeof *path);
394 ib_ah_attr->port_num = (be32_to_cpu(path->port_pkey) >> 24) & 0x3;
395 ib_ah_attr->dlid = be16_to_cpu(path->rlid);
396 ib_ah_attr->sl = be32_to_cpu(path->sl_tclass_flowlabel) >> 28;
397 ib_ah_attr->src_path_bits = path->g_mylmc & 0x7f;
398 ib_ah_attr->static_rate = path->static_rate & 0x7;
399 ib_ah_attr->ah_flags = (path->g_mylmc & (1 << 7)) ? IB_AH_GRH : 0;
400 if (ib_ah_attr->ah_flags) {
401 ib_ah_attr->grh.sgid_index = path->mgid_index & (dev->limits.gid_table_len - 1);
402 ib_ah_attr->grh.hop_limit = path->hop_limit;
403 ib_ah_attr->grh.traffic_class =
404 (be32_to_cpu(path->sl_tclass_flowlabel) >> 20) & 0xff;
405 ib_ah_attr->grh.flow_label =
406 be32_to_cpu(path->sl_tclass_flowlabel) & 0xfffff;
407 memcpy(ib_ah_attr->grh.dgid.raw,
408 path->rgid, sizeof ib_ah_attr->grh.dgid.raw);
409 }
410}
411
412int mthca_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, int qp_attr_mask,
413 struct ib_qp_init_attr *qp_init_attr)
414{
415 struct mthca_dev *dev = to_mdev(ibqp->device);
416 struct mthca_qp *qp = to_mqp(ibqp);
417 int err;
418 struct mthca_mailbox *mailbox;
419 struct mthca_qp_param *qp_param;
420 struct mthca_qp_context *context;
421 int mthca_state;
422 u8 status;
423
424 mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
425 if (IS_ERR(mailbox))
426 return PTR_ERR(mailbox);
427
428 err = mthca_QUERY_QP(dev, qp->qpn, 0, mailbox, &status);
429 if (err)
430 goto out;
431 if (status) {
432 mthca_warn(dev, "QUERY_QP returned status %02x\n", status);
433 err = -EINVAL;
434 goto out;
435 }
436
437 qp_param = mailbox->buf;
438 context = &qp_param->context;
439 mthca_state = be32_to_cpu(context->flags) >> 28;
440
441 qp_attr->qp_state = to_ib_qp_state(mthca_state);
442 qp_attr->cur_qp_state = qp_attr->qp_state;
443 qp_attr->path_mtu = context->mtu_msgmax >> 5;
444 qp_attr->path_mig_state =
445 to_ib_mig_state((be32_to_cpu(context->flags) >> 11) & 0x3);
446 qp_attr->qkey = be32_to_cpu(context->qkey);
447 qp_attr->rq_psn = be32_to_cpu(context->rnr_nextrecvpsn) & 0xffffff;
448 qp_attr->sq_psn = be32_to_cpu(context->next_send_psn) & 0xffffff;
449 qp_attr->dest_qp_num = be32_to_cpu(context->remote_qpn) & 0xffffff;
450 qp_attr->qp_access_flags =
451 to_ib_qp_access_flags(be32_to_cpu(context->params2));
452 qp_attr->cap.max_send_wr = qp->sq.max;
453 qp_attr->cap.max_recv_wr = qp->rq.max;
454 qp_attr->cap.max_send_sge = qp->sq.max_gs;
455 qp_attr->cap.max_recv_sge = qp->rq.max_gs;
456 qp_attr->cap.max_inline_data = qp->max_inline_data;
457
458 to_ib_ah_attr(dev, &qp_attr->ah_attr, &context->pri_path);
459 to_ib_ah_attr(dev, &qp_attr->alt_ah_attr, &context->alt_path);
460
461 qp_attr->pkey_index = be32_to_cpu(context->pri_path.port_pkey) & 0x7f;
462 qp_attr->alt_pkey_index = be32_to_cpu(context->alt_path.port_pkey) & 0x7f;
463
464 /* qp_attr->en_sqd_async_notify is only applicable in modify qp */
465 qp_attr->sq_draining = mthca_state == MTHCA_QP_STATE_DRAINING;
466
467 qp_attr->max_rd_atomic = 1 << ((be32_to_cpu(context->params1) >> 21) & 0x7);
468
469 qp_attr->max_dest_rd_atomic =
470 1 << ((be32_to_cpu(context->params2) >> 21) & 0x7);
471 qp_attr->min_rnr_timer =
472 (be32_to_cpu(context->rnr_nextrecvpsn) >> 24) & 0x1f;
473 qp_attr->port_num = qp_attr->ah_attr.port_num;
474 qp_attr->timeout = context->pri_path.ackto >> 3;
475 qp_attr->retry_cnt = (be32_to_cpu(context->params1) >> 16) & 0x7;
476 qp_attr->rnr_retry = context->pri_path.rnr_retry >> 5;
477 qp_attr->alt_port_num = qp_attr->alt_ah_attr.port_num;
478 qp_attr->alt_timeout = context->alt_path.ackto >> 3;
479 qp_init_attr->cap = qp_attr->cap;
480
481out:
482 mthca_free_mailbox(dev, mailbox);
483 return err;
484}
485
552static void mthca_path_set(struct ib_ah_attr *ah, struct mthca_qp_path *path) 486static void mthca_path_set(struct ib_ah_attr *ah, struct mthca_qp_path *path)
553{ 487{
554 path->g_mylmc = ah->src_path_bits & 0x7f; 488 path->g_mylmc = ah->src_path_bits & 0x7f;
@@ -559,9 +493,9 @@ static void mthca_path_set(struct ib_ah_attr *ah, struct mthca_qp_path *path)
559 path->g_mylmc |= 1 << 7; 493 path->g_mylmc |= 1 << 7;
560 path->mgid_index = ah->grh.sgid_index; 494 path->mgid_index = ah->grh.sgid_index;
561 path->hop_limit = ah->grh.hop_limit; 495 path->hop_limit = ah->grh.hop_limit;
562 path->sl_tclass_flowlabel = 496 path->sl_tclass_flowlabel =
563 cpu_to_be32((ah->sl << 28) | 497 cpu_to_be32((ah->sl << 28) |
564 (ah->grh.traffic_class << 20) | 498 (ah->grh.traffic_class << 20) |
565 (ah->grh.flow_label)); 499 (ah->grh.flow_label));
566 memcpy(path->rgid, ah->grh.dgid.raw, 16); 500 memcpy(path->rgid, ah->grh.dgid.raw, 16);
567 } else 501 } else
@@ -576,18 +510,12 @@ int mthca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask)
576 struct mthca_mailbox *mailbox; 510 struct mthca_mailbox *mailbox;
577 struct mthca_qp_param *qp_param; 511 struct mthca_qp_param *qp_param;
578 struct mthca_qp_context *qp_context; 512 struct mthca_qp_context *qp_context;
579 u32 req_param, opt_param; 513 u32 sqd_event = 0;
580 u8 status; 514 u8 status;
581 int err; 515 int err;
582 516
583 if (attr_mask & IB_QP_CUR_STATE) { 517 if (attr_mask & IB_QP_CUR_STATE) {
584 if (attr->cur_qp_state != IB_QPS_RTR && 518 cur_state = attr->cur_qp_state;
585 attr->cur_qp_state != IB_QPS_RTS &&
586 attr->cur_qp_state != IB_QPS_SQD &&
587 attr->cur_qp_state != IB_QPS_SQE)
588 return -EINVAL;
589 else
590 cur_state = attr->cur_qp_state;
591 } else { 519 } else {
592 spin_lock_irq(&qp->sq.lock); 520 spin_lock_irq(&qp->sq.lock);
593 spin_lock(&qp->rq.lock); 521 spin_lock(&qp->rq.lock);
@@ -596,44 +524,20 @@ int mthca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask)
596 spin_unlock_irq(&qp->sq.lock); 524 spin_unlock_irq(&qp->sq.lock);
597 } 525 }
598 526
599 if (attr_mask & IB_QP_STATE) { 527 new_state = attr_mask & IB_QP_STATE ? attr->qp_state : cur_state;
600 if (attr->qp_state < 0 || attr->qp_state > IB_QPS_ERR)
601 return -EINVAL;
602 new_state = attr->qp_state;
603 } else
604 new_state = cur_state;
605
606 if (state_table[cur_state][new_state].trans == MTHCA_TRANS_INVALID) {
607 mthca_dbg(dev, "Illegal QP transition "
608 "%d->%d\n", cur_state, new_state);
609 return -EINVAL;
610 }
611
612 req_param = state_table[cur_state][new_state].req_param[qp->transport];
613 opt_param = state_table[cur_state][new_state].opt_param[qp->transport];
614 528
615 if ((req_param & attr_mask) != req_param) { 529 if (!ib_modify_qp_is_ok(cur_state, new_state, ibqp->qp_type, attr_mask)) {
616 mthca_dbg(dev, "QP transition " 530 mthca_dbg(dev, "Bad QP transition (transport %d) "
617 "%d->%d missing req attr 0x%08x\n", 531 "%d->%d with attr 0x%08x\n",
618 cur_state, new_state, 532 qp->transport, cur_state, new_state,
619 req_param & ~attr_mask); 533 attr_mask);
620 return -EINVAL; 534 return -EINVAL;
621 } 535 }
622 536
623 if (attr_mask & ~(req_param | opt_param | IB_QP_STATE)) { 537 if ((attr_mask & IB_QP_PKEY_INDEX) &&
624 mthca_dbg(dev, "QP transition (transport %d) "
625 "%d->%d has extra attr 0x%08x\n",
626 qp->transport,
627 cur_state, new_state,
628 attr_mask & ~(req_param | opt_param |
629 IB_QP_STATE));
630 return -EINVAL;
631 }
632
633 if ((attr_mask & IB_QP_PKEY_INDEX) &&
634 attr->pkey_index >= dev->limits.pkey_table_len) { 538 attr->pkey_index >= dev->limits.pkey_table_len) {
635 mthca_dbg(dev, "PKey index (%u) too large. max is %d\n", 539 mthca_dbg(dev, "P_Key index (%u) too large. max is %d\n",
636 attr->pkey_index,dev->limits.pkey_table_len-1); 540 attr->pkey_index, dev->limits.pkey_table_len-1);
637 return -EINVAL; 541 return -EINVAL;
638 } 542 }
639 543
@@ -733,7 +637,7 @@ int mthca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask)
733 if (attr_mask & IB_QP_RNR_RETRY) { 637 if (attr_mask & IB_QP_RNR_RETRY) {
734 qp_context->alt_path.rnr_retry = qp_context->pri_path.rnr_retry = 638 qp_context->alt_path.rnr_retry = qp_context->pri_path.rnr_retry =
735 attr->rnr_retry << 5; 639 attr->rnr_retry << 5;
736 qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_RNR_RETRY | 640 qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_RNR_RETRY |
737 MTHCA_QP_OPTPAR_ALT_RNR_RETRY); 641 MTHCA_QP_OPTPAR_ALT_RNR_RETRY);
738 } 642 }
739 643
@@ -748,14 +652,20 @@ int mthca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask)
748 } 652 }
749 653
750 if (attr_mask & IB_QP_ALT_PATH) { 654 if (attr_mask & IB_QP_ALT_PATH) {
655 if (attr->alt_pkey_index >= dev->limits.pkey_table_len) {
656 mthca_dbg(dev, "Alternate P_Key index (%u) too large. max is %d\n",
657 attr->alt_pkey_index, dev->limits.pkey_table_len-1);
658 return -EINVAL;
659 }
660
751 if (attr->alt_port_num == 0 || attr->alt_port_num > dev->limits.num_ports) { 661 if (attr->alt_port_num == 0 || attr->alt_port_num > dev->limits.num_ports) {
752 mthca_dbg(dev, "Alternate port number (%u) is invalid\n", 662 mthca_dbg(dev, "Alternate port number (%u) is invalid\n",
753 attr->alt_port_num); 663 attr->alt_port_num);
754 return -EINVAL; 664 return -EINVAL;
755 } 665 }
756 666
757 mthca_path_set(&attr->alt_ah_attr, &qp_context->alt_path); 667 mthca_path_set(&attr->alt_ah_attr, &qp_context->alt_path);
758 qp_context->alt_path.port_pkey |= cpu_to_be32(attr->alt_pkey_index | 668 qp_context->alt_path.port_pkey |= cpu_to_be32(attr->alt_pkey_index |
759 attr->alt_port_num << 24); 669 attr->alt_port_num << 24);
760 qp_context->alt_path.ackto = attr->alt_timeout << 3; 670 qp_context->alt_path.ackto = attr->alt_timeout << 3;
761 qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_ALT_ADDR_PATH); 671 qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_ALT_ADDR_PATH);
@@ -841,11 +751,16 @@ int mthca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask)
841 qp_context->srqn = cpu_to_be32(1 << 24 | 751 qp_context->srqn = cpu_to_be32(1 << 24 |
842 to_msrq(ibqp->srq)->srqn); 752 to_msrq(ibqp->srq)->srqn);
843 753
844 err = mthca_MODIFY_QP(dev, state_table[cur_state][new_state].trans, 754 if (cur_state == IB_QPS_RTS && new_state == IB_QPS_SQD &&
845 qp->qpn, 0, mailbox, 0, &status); 755 attr_mask & IB_QP_EN_SQD_ASYNC_NOTIFY &&
756 attr->en_sqd_async_notify)
757 sqd_event = 1 << 31;
758
759 err = mthca_MODIFY_QP(dev, cur_state, new_state, qp->qpn, 0,
760 mailbox, sqd_event, &status);
846 if (status) { 761 if (status) {
847 mthca_warn(dev, "modify QP %d returned status %02x.\n", 762 mthca_warn(dev, "modify QP %d->%d returned status %02x.\n",
848 state_table[cur_state][new_state].trans, status); 763 cur_state, new_state, status);
849 err = -EINVAL; 764 err = -EINVAL;
850 } 765 }
851 766
@@ -1078,10 +993,10 @@ static int mthca_map_memfree(struct mthca_dev *dev,
1078 if (ret) 993 if (ret)
1079 goto err_qpc; 994 goto err_qpc;
1080 995
1081 ret = mthca_table_get(dev, dev->qp_table.rdb_table, 996 ret = mthca_table_get(dev, dev->qp_table.rdb_table,
1082 qp->qpn << dev->qp_table.rdb_shift); 997 qp->qpn << dev->qp_table.rdb_shift);
1083 if (ret) 998 if (ret)
1084 goto err_eqpc; 999 goto err_eqpc;
1085 1000
1086 } 1001 }
1087 1002
@@ -1393,7 +1308,8 @@ void mthca_free_qp(struct mthca_dev *dev,
1393 wait_event(qp->wait, !atomic_read(&qp->refcount)); 1308 wait_event(qp->wait, !atomic_read(&qp->refcount));
1394 1309
1395 if (qp->state != IB_QPS_RESET) 1310 if (qp->state != IB_QPS_RESET)
1396 mthca_MODIFY_QP(dev, MTHCA_TRANS_ANY2RST, qp->qpn, 0, NULL, 0, &status); 1311 mthca_MODIFY_QP(dev, qp->state, IB_QPS_RESET, qp->qpn, 0,
1312 NULL, 0, &status);
1397 1313
1398 /* 1314 /*
1399 * If this is a userspace QP, the buffers, MR, CQs and so on 1315 * If this is a userspace QP, the buffers, MR, CQs and so on
@@ -1699,7 +1615,9 @@ int mthca_tavor_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
1699 mthca_opcode[wr->opcode]); 1615 mthca_opcode[wr->opcode]);
1700 wmb(); 1616 wmb();
1701 ((struct mthca_next_seg *) prev_wqe)->ee_nds = 1617 ((struct mthca_next_seg *) prev_wqe)->ee_nds =
1702 cpu_to_be32((size0 ? 0 : MTHCA_NEXT_DBD) | size); 1618 cpu_to_be32((size0 ? 0 : MTHCA_NEXT_DBD) | size |
1619 ((wr->send_flags & IB_SEND_FENCE) ?
1620 MTHCA_NEXT_FENCE : 0));
1703 1621
1704 if (!size0) { 1622 if (!size0) {
1705 size0 = size; 1623 size0 = size;
@@ -2061,7 +1979,9 @@ int mthca_arbel_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
2061 mthca_opcode[wr->opcode]); 1979 mthca_opcode[wr->opcode]);
2062 wmb(); 1980 wmb();
2063 ((struct mthca_next_seg *) prev_wqe)->ee_nds = 1981 ((struct mthca_next_seg *) prev_wqe)->ee_nds =
2064 cpu_to_be32(MTHCA_NEXT_DBD | size); 1982 cpu_to_be32(MTHCA_NEXT_DBD | size |
1983 ((wr->send_flags & IB_SEND_FENCE) ?
1984 MTHCA_NEXT_FENCE : 0));
2065 1985
2066 if (!size0) { 1986 if (!size0) {
2067 size0 = size; 1987 size0 = size;
@@ -2115,7 +2035,7 @@ int mthca_arbel_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr,
2115 int i; 2035 int i;
2116 void *wqe; 2036 void *wqe;
2117 2037
2118 spin_lock_irqsave(&qp->rq.lock, flags); 2038 spin_lock_irqsave(&qp->rq.lock, flags);
2119 2039
2120 /* XXX check that state is OK to post receive */ 2040 /* XXX check that state is OK to post receive */
2121 2041
@@ -2182,8 +2102,8 @@ out:
2182 return err; 2102 return err;
2183} 2103}
2184 2104
2185int mthca_free_err_wqe(struct mthca_dev *dev, struct mthca_qp *qp, int is_send, 2105void mthca_free_err_wqe(struct mthca_dev *dev, struct mthca_qp *qp, int is_send,
2186 int index, int *dbd, __be32 *new_wqe) 2106 int index, int *dbd, __be32 *new_wqe)
2187{ 2107{
2188 struct mthca_next_seg *next; 2108 struct mthca_next_seg *next;
2189 2109
@@ -2193,7 +2113,7 @@ int mthca_free_err_wqe(struct mthca_dev *dev, struct mthca_qp *qp, int is_send,
2193 */ 2113 */
2194 if (qp->ibqp.srq) { 2114 if (qp->ibqp.srq) {
2195 *new_wqe = 0; 2115 *new_wqe = 0;
2196 return 0; 2116 return;
2197 } 2117 }
2198 2118
2199 if (is_send) 2119 if (is_send)
@@ -2207,8 +2127,6 @@ int mthca_free_err_wqe(struct mthca_dev *dev, struct mthca_qp *qp, int is_send,
2207 (next->ee_nds & cpu_to_be32(0x3f)); 2127 (next->ee_nds & cpu_to_be32(0x3f));
2208 else 2128 else
2209 *new_wqe = 0; 2129 *new_wqe = 0;
2210
2211 return 0;
2212} 2130}
2213 2131
2214int __devinit mthca_init_qp_table(struct mthca_dev *dev) 2132int __devinit mthca_init_qp_table(struct mthca_dev *dev)