diff options
author | Trond Myklebust <Trond.Myklebust@netapp.com> | 2006-07-03 13:49:45 -0400 |
---|---|---|
committer | Trond Myklebust <Trond.Myklebust@netapp.com> | 2006-07-03 13:49:45 -0400 |
commit | 026477c1141b67e98e3bd8bdedb7d4b88a3ecd09 (patch) | |
tree | 2624a44924c625c367f3cebf937853b9da2de282 /drivers/infiniband/hw/ipath/ipath_qp.c | |
parent | 9f2fa466383ce100b90fe52cb4489d7a26bf72a9 (diff) | |
parent | 29454dde27d8e340bb1987bad9aa504af7081eba (diff) |
Merge branch 'master' of /home/trondmy/kernel/linux-2.6/
Diffstat (limited to 'drivers/infiniband/hw/ipath/ipath_qp.c')
-rw-r--r-- | drivers/infiniband/hw/ipath/ipath_qp.c | 108 |
1 files changed, 54 insertions, 54 deletions
diff --git a/drivers/infiniband/hw/ipath/ipath_qp.c b/drivers/infiniband/hw/ipath/ipath_qp.c index 9f8855d970c8..83e557be591e 100644 --- a/drivers/infiniband/hw/ipath/ipath_qp.c +++ b/drivers/infiniband/hw/ipath/ipath_qp.c | |||
@@ -1,4 +1,5 @@ | |||
1 | /* | 1 | /* |
2 | * Copyright (c) 2006 QLogic, Inc. All rights reserved. | ||
2 | * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved. | 3 | * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved. |
3 | * | 4 | * |
4 | * This software is available to you under a choice of one of two | 5 | * This software is available to you under a choice of one of two |
@@ -34,7 +35,7 @@ | |||
34 | #include <linux/vmalloc.h> | 35 | #include <linux/vmalloc.h> |
35 | 36 | ||
36 | #include "ipath_verbs.h" | 37 | #include "ipath_verbs.h" |
37 | #include "ips_common.h" | 38 | #include "ipath_common.h" |
38 | 39 | ||
39 | #define BITS_PER_PAGE (PAGE_SIZE*BITS_PER_BYTE) | 40 | #define BITS_PER_PAGE (PAGE_SIZE*BITS_PER_BYTE) |
40 | #define BITS_PER_PAGE_MASK (BITS_PER_PAGE-1) | 41 | #define BITS_PER_PAGE_MASK (BITS_PER_PAGE-1) |
@@ -332,10 +333,11 @@ static void ipath_reset_qp(struct ipath_qp *qp) | |||
332 | qp->remote_qpn = 0; | 333 | qp->remote_qpn = 0; |
333 | qp->qkey = 0; | 334 | qp->qkey = 0; |
334 | qp->qp_access_flags = 0; | 335 | qp->qp_access_flags = 0; |
336 | clear_bit(IPATH_S_BUSY, &qp->s_flags); | ||
335 | qp->s_hdrwords = 0; | 337 | qp->s_hdrwords = 0; |
336 | qp->s_psn = 0; | 338 | qp->s_psn = 0; |
337 | qp->r_psn = 0; | 339 | qp->r_psn = 0; |
338 | atomic_set(&qp->msn, 0); | 340 | qp->r_msn = 0; |
339 | if (qp->ibqp.qp_type == IB_QPT_RC) { | 341 | if (qp->ibqp.qp_type == IB_QPT_RC) { |
340 | qp->s_state = IB_OPCODE_RC_SEND_LAST; | 342 | qp->s_state = IB_OPCODE_RC_SEND_LAST; |
341 | qp->r_state = IB_OPCODE_RC_SEND_LAST; | 343 | qp->r_state = IB_OPCODE_RC_SEND_LAST; |
@@ -344,7 +346,8 @@ static void ipath_reset_qp(struct ipath_qp *qp) | |||
344 | qp->r_state = IB_OPCODE_UC_SEND_LAST; | 346 | qp->r_state = IB_OPCODE_UC_SEND_LAST; |
345 | } | 347 | } |
346 | qp->s_ack_state = IB_OPCODE_RC_ACKNOWLEDGE; | 348 | qp->s_ack_state = IB_OPCODE_RC_ACKNOWLEDGE; |
347 | qp->s_nak_state = 0; | 349 | qp->r_ack_state = IB_OPCODE_RC_ACKNOWLEDGE; |
350 | qp->r_nak_state = 0; | ||
348 | qp->s_rnr_timeout = 0; | 351 | qp->s_rnr_timeout = 0; |
349 | qp->s_head = 0; | 352 | qp->s_head = 0; |
350 | qp->s_tail = 0; | 353 | qp->s_tail = 0; |
@@ -362,10 +365,10 @@ static void ipath_reset_qp(struct ipath_qp *qp) | |||
362 | * @qp: the QP to put into an error state | 365 | * @qp: the QP to put into an error state |
363 | * | 366 | * |
364 | * Flushes both send and receive work queues. | 367 | * Flushes both send and receive work queues. |
365 | * QP r_rq.lock and s_lock should be held. | 368 | * QP s_lock should be held and interrupts disabled. |
366 | */ | 369 | */ |
367 | 370 | ||
368 | static void ipath_error_qp(struct ipath_qp *qp) | 371 | void ipath_error_qp(struct ipath_qp *qp) |
369 | { | 372 | { |
370 | struct ipath_ibdev *dev = to_idev(qp->ibqp.device); | 373 | struct ipath_ibdev *dev = to_idev(qp->ibqp.device); |
371 | struct ib_wc wc; | 374 | struct ib_wc wc; |
@@ -408,12 +411,14 @@ static void ipath_error_qp(struct ipath_qp *qp) | |||
408 | qp->s_ack_state = IB_OPCODE_RC_ACKNOWLEDGE; | 411 | qp->s_ack_state = IB_OPCODE_RC_ACKNOWLEDGE; |
409 | 412 | ||
410 | wc.opcode = IB_WC_RECV; | 413 | wc.opcode = IB_WC_RECV; |
414 | spin_lock(&qp->r_rq.lock); | ||
411 | while (qp->r_rq.tail != qp->r_rq.head) { | 415 | while (qp->r_rq.tail != qp->r_rq.head) { |
412 | wc.wr_id = get_rwqe_ptr(&qp->r_rq, qp->r_rq.tail)->wr_id; | 416 | wc.wr_id = get_rwqe_ptr(&qp->r_rq, qp->r_rq.tail)->wr_id; |
413 | if (++qp->r_rq.tail >= qp->r_rq.size) | 417 | if (++qp->r_rq.tail >= qp->r_rq.size) |
414 | qp->r_rq.tail = 0; | 418 | qp->r_rq.tail = 0; |
415 | ipath_cq_enter(to_icq(qp->ibqp.recv_cq), &wc, 1); | 419 | ipath_cq_enter(to_icq(qp->ibqp.recv_cq), &wc, 1); |
416 | } | 420 | } |
421 | spin_unlock(&qp->r_rq.lock); | ||
417 | } | 422 | } |
418 | 423 | ||
419 | /** | 424 | /** |
@@ -433,8 +438,7 @@ int ipath_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, | |||
433 | unsigned long flags; | 438 | unsigned long flags; |
434 | int ret; | 439 | int ret; |
435 | 440 | ||
436 | spin_lock_irqsave(&qp->r_rq.lock, flags); | 441 | spin_lock_irqsave(&qp->s_lock, flags); |
437 | spin_lock(&qp->s_lock); | ||
438 | 442 | ||
439 | cur_state = attr_mask & IB_QP_CUR_STATE ? | 443 | cur_state = attr_mask & IB_QP_CUR_STATE ? |
440 | attr->cur_qp_state : qp->state; | 444 | attr->cur_qp_state : qp->state; |
@@ -446,7 +450,7 @@ int ipath_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, | |||
446 | 450 | ||
447 | if (attr_mask & IB_QP_AV) | 451 | if (attr_mask & IB_QP_AV) |
448 | if (attr->ah_attr.dlid == 0 || | 452 | if (attr->ah_attr.dlid == 0 || |
449 | attr->ah_attr.dlid >= IPS_MULTICAST_LID_BASE) | 453 | attr->ah_attr.dlid >= IPATH_MULTICAST_LID_BASE) |
450 | goto inval; | 454 | goto inval; |
451 | 455 | ||
452 | if (attr_mask & IB_QP_PKEY_INDEX) | 456 | if (attr_mask & IB_QP_PKEY_INDEX) |
@@ -505,34 +509,19 @@ int ipath_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, | |||
505 | } | 509 | } |
506 | 510 | ||
507 | if (attr_mask & IB_QP_MIN_RNR_TIMER) | 511 | if (attr_mask & IB_QP_MIN_RNR_TIMER) |
508 | qp->s_min_rnr_timer = attr->min_rnr_timer; | 512 | qp->r_min_rnr_timer = attr->min_rnr_timer; |
509 | 513 | ||
510 | if (attr_mask & IB_QP_QKEY) | 514 | if (attr_mask & IB_QP_QKEY) |
511 | qp->qkey = attr->qkey; | 515 | qp->qkey = attr->qkey; |
512 | 516 | ||
513 | if (attr_mask & IB_QP_PKEY_INDEX) | ||
514 | qp->s_pkey_index = attr->pkey_index; | ||
515 | |||
516 | qp->state = new_state; | 517 | qp->state = new_state; |
517 | spin_unlock(&qp->s_lock); | 518 | spin_unlock_irqrestore(&qp->s_lock, flags); |
518 | spin_unlock_irqrestore(&qp->r_rq.lock, flags); | ||
519 | |||
520 | /* | ||
521 | * If QP1 changed to the RTS state, try to move to the link to INIT | ||
522 | * even if it was ACTIVE so the SM will reinitialize the SMA's | ||
523 | * state. | ||
524 | */ | ||
525 | if (qp->ibqp.qp_num == 1 && new_state == IB_QPS_RTS) { | ||
526 | struct ipath_ibdev *dev = to_idev(ibqp->device); | ||
527 | 519 | ||
528 | ipath_layer_set_linkstate(dev->dd, IPATH_IB_LINKDOWN); | ||
529 | } | ||
530 | ret = 0; | 520 | ret = 0; |
531 | goto bail; | 521 | goto bail; |
532 | 522 | ||
533 | inval: | 523 | inval: |
534 | spin_unlock(&qp->s_lock); | 524 | spin_unlock_irqrestore(&qp->s_lock, flags); |
535 | spin_unlock_irqrestore(&qp->r_rq.lock, flags); | ||
536 | ret = -EINVAL; | 525 | ret = -EINVAL; |
537 | 526 | ||
538 | bail: | 527 | bail: |
@@ -566,7 +555,7 @@ int ipath_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, | |||
566 | attr->sq_draining = 0; | 555 | attr->sq_draining = 0; |
567 | attr->max_rd_atomic = 1; | 556 | attr->max_rd_atomic = 1; |
568 | attr->max_dest_rd_atomic = 1; | 557 | attr->max_dest_rd_atomic = 1; |
569 | attr->min_rnr_timer = qp->s_min_rnr_timer; | 558 | attr->min_rnr_timer = qp->r_min_rnr_timer; |
570 | attr->port_num = 1; | 559 | attr->port_num = 1; |
571 | attr->timeout = 0; | 560 | attr->timeout = 0; |
572 | attr->retry_cnt = qp->s_retry_cnt; | 561 | attr->retry_cnt = qp->s_retry_cnt; |
@@ -593,21 +582,17 @@ int ipath_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, | |||
593 | * @qp: the queue pair to compute the AETH for | 582 | * @qp: the queue pair to compute the AETH for |
594 | * | 583 | * |
595 | * Returns the AETH. | 584 | * Returns the AETH. |
596 | * | ||
597 | * The QP s_lock should be held. | ||
598 | */ | 585 | */ |
599 | __be32 ipath_compute_aeth(struct ipath_qp *qp) | 586 | __be32 ipath_compute_aeth(struct ipath_qp *qp) |
600 | { | 587 | { |
601 | u32 aeth = atomic_read(&qp->msn) & IPS_MSN_MASK; | 588 | u32 aeth = qp->r_msn & IPATH_MSN_MASK; |
602 | 589 | ||
603 | if (qp->s_nak_state) { | 590 | if (qp->ibqp.srq) { |
604 | aeth |= qp->s_nak_state << IPS_AETH_CREDIT_SHIFT; | ||
605 | } else if (qp->ibqp.srq) { | ||
606 | /* | 591 | /* |
607 | * Shared receive queues don't generate credits. | 592 | * Shared receive queues don't generate credits. |
608 | * Set the credit field to the invalid value. | 593 | * Set the credit field to the invalid value. |
609 | */ | 594 | */ |
610 | aeth |= IPS_AETH_CREDIT_INVAL << IPS_AETH_CREDIT_SHIFT; | 595 | aeth |= IPATH_AETH_CREDIT_INVAL << IPATH_AETH_CREDIT_SHIFT; |
611 | } else { | 596 | } else { |
612 | u32 min, max, x; | 597 | u32 min, max, x; |
613 | u32 credits; | 598 | u32 credits; |
@@ -637,7 +622,7 @@ __be32 ipath_compute_aeth(struct ipath_qp *qp) | |||
637 | else | 622 | else |
638 | min = x; | 623 | min = x; |
639 | } | 624 | } |
640 | aeth |= x << IPS_AETH_CREDIT_SHIFT; | 625 | aeth |= x << IPATH_AETH_CREDIT_SHIFT; |
641 | } | 626 | } |
642 | return cpu_to_be32(aeth); | 627 | return cpu_to_be32(aeth); |
643 | } | 628 | } |
@@ -663,12 +648,22 @@ struct ib_qp *ipath_create_qp(struct ib_pd *ibpd, | |||
663 | size_t sz; | 648 | size_t sz; |
664 | struct ib_qp *ret; | 649 | struct ib_qp *ret; |
665 | 650 | ||
666 | if (init_attr->cap.max_send_sge > 255 || | 651 | if (init_attr->cap.max_send_sge > ib_ipath_max_sges || |
667 | init_attr->cap.max_recv_sge > 255) { | 652 | init_attr->cap.max_recv_sge > ib_ipath_max_sges || |
653 | init_attr->cap.max_send_wr > ib_ipath_max_qp_wrs || | ||
654 | init_attr->cap.max_recv_wr > ib_ipath_max_qp_wrs) { | ||
668 | ret = ERR_PTR(-ENOMEM); | 655 | ret = ERR_PTR(-ENOMEM); |
669 | goto bail; | 656 | goto bail; |
670 | } | 657 | } |
671 | 658 | ||
659 | if (init_attr->cap.max_send_sge + | ||
660 | init_attr->cap.max_recv_sge + | ||
661 | init_attr->cap.max_send_wr + | ||
662 | init_attr->cap.max_recv_wr == 0) { | ||
663 | ret = ERR_PTR(-EINVAL); | ||
664 | goto bail; | ||
665 | } | ||
666 | |||
672 | switch (init_attr->qp_type) { | 667 | switch (init_attr->qp_type) { |
673 | case IB_QPT_UC: | 668 | case IB_QPT_UC: |
674 | case IB_QPT_RC: | 669 | case IB_QPT_RC: |
@@ -686,18 +681,26 @@ struct ib_qp *ipath_create_qp(struct ib_pd *ibpd, | |||
686 | case IB_QPT_GSI: | 681 | case IB_QPT_GSI: |
687 | qp = kmalloc(sizeof(*qp), GFP_KERNEL); | 682 | qp = kmalloc(sizeof(*qp), GFP_KERNEL); |
688 | if (!qp) { | 683 | if (!qp) { |
684 | vfree(swq); | ||
689 | ret = ERR_PTR(-ENOMEM); | 685 | ret = ERR_PTR(-ENOMEM); |
690 | goto bail; | 686 | goto bail; |
691 | } | 687 | } |
692 | qp->r_rq.size = init_attr->cap.max_recv_wr + 1; | 688 | if (init_attr->srq) { |
693 | sz = sizeof(struct ipath_sge) * | 689 | qp->r_rq.size = 0; |
694 | init_attr->cap.max_recv_sge + | 690 | qp->r_rq.max_sge = 0; |
695 | sizeof(struct ipath_rwqe); | 691 | qp->r_rq.wq = NULL; |
696 | qp->r_rq.wq = vmalloc(qp->r_rq.size * sz); | 692 | } else { |
697 | if (!qp->r_rq.wq) { | 693 | qp->r_rq.size = init_attr->cap.max_recv_wr + 1; |
698 | kfree(qp); | 694 | qp->r_rq.max_sge = init_attr->cap.max_recv_sge; |
699 | ret = ERR_PTR(-ENOMEM); | 695 | sz = (sizeof(struct ipath_sge) * qp->r_rq.max_sge) + |
700 | goto bail; | 696 | sizeof(struct ipath_rwqe); |
697 | qp->r_rq.wq = vmalloc(qp->r_rq.size * sz); | ||
698 | if (!qp->r_rq.wq) { | ||
699 | kfree(qp); | ||
700 | vfree(swq); | ||
701 | ret = ERR_PTR(-ENOMEM); | ||
702 | goto bail; | ||
703 | } | ||
701 | } | 704 | } |
702 | 705 | ||
703 | /* | 706 | /* |
@@ -708,9 +711,7 @@ struct ib_qp *ipath_create_qp(struct ib_pd *ibpd, | |||
708 | spin_lock_init(&qp->r_rq.lock); | 711 | spin_lock_init(&qp->r_rq.lock); |
709 | atomic_set(&qp->refcount, 0); | 712 | atomic_set(&qp->refcount, 0); |
710 | init_waitqueue_head(&qp->wait); | 713 | init_waitqueue_head(&qp->wait); |
711 | tasklet_init(&qp->s_task, | 714 | tasklet_init(&qp->s_task, ipath_do_ruc_send, |
712 | init_attr->qp_type == IB_QPT_RC ? | ||
713 | ipath_do_rc_send : ipath_do_uc_send, | ||
714 | (unsigned long)qp); | 715 | (unsigned long)qp); |
715 | INIT_LIST_HEAD(&qp->piowait); | 716 | INIT_LIST_HEAD(&qp->piowait); |
716 | INIT_LIST_HEAD(&qp->timerwait); | 717 | INIT_LIST_HEAD(&qp->timerwait); |
@@ -718,7 +719,6 @@ struct ib_qp *ipath_create_qp(struct ib_pd *ibpd, | |||
718 | qp->s_wq = swq; | 719 | qp->s_wq = swq; |
719 | qp->s_size = init_attr->cap.max_send_wr + 1; | 720 | qp->s_size = init_attr->cap.max_send_wr + 1; |
720 | qp->s_max_sge = init_attr->cap.max_send_sge; | 721 | qp->s_max_sge = init_attr->cap.max_send_sge; |
721 | qp->r_rq.max_sge = init_attr->cap.max_recv_sge; | ||
722 | qp->s_flags = init_attr->sq_sig_type == IB_SIGNAL_REQ_WR ? | 722 | qp->s_flags = init_attr->sq_sig_type == IB_SIGNAL_REQ_WR ? |
723 | 1 << IPATH_S_SIGNAL_REQ_WR : 0; | 723 | 1 << IPATH_S_SIGNAL_REQ_WR : 0; |
724 | dev = to_idev(ibpd->device); | 724 | dev = to_idev(ibpd->device); |
@@ -888,18 +888,18 @@ void ipath_sqerror_qp(struct ipath_qp *qp, struct ib_wc *wc) | |||
888 | */ | 888 | */ |
889 | void ipath_get_credit(struct ipath_qp *qp, u32 aeth) | 889 | void ipath_get_credit(struct ipath_qp *qp, u32 aeth) |
890 | { | 890 | { |
891 | u32 credit = (aeth >> IPS_AETH_CREDIT_SHIFT) & IPS_AETH_CREDIT_MASK; | 891 | u32 credit = (aeth >> IPATH_AETH_CREDIT_SHIFT) & IPATH_AETH_CREDIT_MASK; |
892 | 892 | ||
893 | /* | 893 | /* |
894 | * If the credit is invalid, we can send | 894 | * If the credit is invalid, we can send |
895 | * as many packets as we like. Otherwise, we have to | 895 | * as many packets as we like. Otherwise, we have to |
896 | * honor the credit field. | 896 | * honor the credit field. |
897 | */ | 897 | */ |
898 | if (credit == IPS_AETH_CREDIT_INVAL) { | 898 | if (credit == IPATH_AETH_CREDIT_INVAL) |
899 | qp->s_lsn = (u32) -1; | 899 | qp->s_lsn = (u32) -1; |
900 | } else if (qp->s_lsn != (u32) -1) { | 900 | else if (qp->s_lsn != (u32) -1) { |
901 | /* Compute new LSN (i.e., MSN + credit) */ | 901 | /* Compute new LSN (i.e., MSN + credit) */ |
902 | credit = (aeth + credit_table[credit]) & IPS_MSN_MASK; | 902 | credit = (aeth + credit_table[credit]) & IPATH_MSN_MASK; |
903 | if (ipath_cmp24(credit, qp->s_lsn) > 0) | 903 | if (ipath_cmp24(credit, qp->s_lsn) > 0) |
904 | qp->s_lsn = credit; | 904 | qp->s_lsn = credit; |
905 | } | 905 | } |