aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/infiniband/hw/ipath/ipath_verbs.c
diff options
context:
space:
mode:
authorBryan O'Sullivan <bos@pathscale.com>2006-08-25 14:24:32 -0400
committerRoland Dreier <rolandd@cisco.com>2006-09-22 18:22:31 -0400
commit34b2aafea38efdf02cd8107a6e1057e2a297c447 (patch)
treefc800510f947696156df70cf6608f8283bab868c /drivers/infiniband/hw/ipath/ipath_verbs.c
parentb1c1b6a30eac88665a35a207cc5e6233090b9d65 (diff)
IB/ipath: simplify layering code
A lot of ipath layer code was only called in one place. Now that the ipath_core and ib_ipath drivers are merged, it's more sensible to simply inline the simple stuff that the layer code was doing. Signed-off-by: Bryan O'Sullivan <bryan.osullivan@qlogic.com> Signed-off-by: Roland Dreier <rolandd@cisco.com>
Diffstat (limited to 'drivers/infiniband/hw/ipath/ipath_verbs.c')
-rw-r--r--drivers/infiniband/hw/ipath/ipath_verbs.c525
1 files changed, 486 insertions, 39 deletions
diff --git a/drivers/infiniband/hw/ipath/ipath_verbs.c b/drivers/infiniband/hw/ipath/ipath_verbs.c
index 15edec9227e4..3c47620e9887 100644
--- a/drivers/infiniband/hw/ipath/ipath_verbs.c
+++ b/drivers/infiniband/hw/ipath/ipath_verbs.c
@@ -33,15 +33,13 @@
33 33
34#include <rdma/ib_mad.h> 34#include <rdma/ib_mad.h>
35#include <rdma/ib_user_verbs.h> 35#include <rdma/ib_user_verbs.h>
36#include <linux/io.h>
36#include <linux/utsname.h> 37#include <linux/utsname.h>
37 38
38#include "ipath_kernel.h" 39#include "ipath_kernel.h"
39#include "ipath_verbs.h" 40#include "ipath_verbs.h"
40#include "ipath_common.h" 41#include "ipath_common.h"
41 42
42/* Not static, because we don't want the compiler removing it */
43const char ipath_verbs_version[] = "ipath_verbs " IPATH_IDSTR;
44
45static unsigned int ib_ipath_qp_table_size = 251; 43static unsigned int ib_ipath_qp_table_size = 251;
46module_param_named(qp_table_size, ib_ipath_qp_table_size, uint, S_IRUGO); 44module_param_named(qp_table_size, ib_ipath_qp_table_size, uint, S_IRUGO);
47MODULE_PARM_DESC(qp_table_size, "QP table size"); 45MODULE_PARM_DESC(qp_table_size, "QP table size");
@@ -109,10 +107,6 @@ module_param_named(max_srq_wrs, ib_ipath_max_srq_wrs,
109 uint, S_IWUSR | S_IRUGO); 107 uint, S_IWUSR | S_IRUGO);
110MODULE_PARM_DESC(max_srq_wrs, "Maximum number of SRQ WRs support"); 108MODULE_PARM_DESC(max_srq_wrs, "Maximum number of SRQ WRs support");
111 109
112MODULE_LICENSE("GPL");
113MODULE_AUTHOR("QLogic <support@pathscale.com>");
114MODULE_DESCRIPTION("QLogic InfiniPath driver");
115
116const int ib_ipath_state_ops[IB_QPS_ERR + 1] = { 110const int ib_ipath_state_ops[IB_QPS_ERR + 1] = {
117 [IB_QPS_RESET] = 0, 111 [IB_QPS_RESET] = 0,
118 [IB_QPS_INIT] = IPATH_POST_RECV_OK, 112 [IB_QPS_INIT] = IPATH_POST_RECV_OK,
@@ -125,6 +119,16 @@ const int ib_ipath_state_ops[IB_QPS_ERR + 1] = {
125 [IB_QPS_ERR] = 0, 119 [IB_QPS_ERR] = 0,
126}; 120};
127 121
122struct ipath_ucontext {
123 struct ib_ucontext ibucontext;
124};
125
126static inline struct ipath_ucontext *to_iucontext(struct ib_ucontext
127 *ibucontext)
128{
129 return container_of(ibucontext, struct ipath_ucontext, ibucontext);
130}
131
128/* 132/*
129 * Translate ib_wr_opcode into ib_wc_opcode. 133 * Translate ib_wr_opcode into ib_wc_opcode.
130 */ 134 */
@@ -400,7 +404,7 @@ void ipath_ib_rcv(struct ipath_ibdev *dev, void *rhdr, void *data,
400 lid = be16_to_cpu(hdr->lrh[1]); 404 lid = be16_to_cpu(hdr->lrh[1]);
401 if (lid < IPATH_MULTICAST_LID_BASE) { 405 if (lid < IPATH_MULTICAST_LID_BASE) {
402 lid &= ~((1 << (dev->mkeyprot_resv_lmc & 7)) - 1); 406 lid &= ~((1 << (dev->mkeyprot_resv_lmc & 7)) - 1);
403 if (unlikely(lid != ipath_layer_get_lid(dev->dd))) { 407 if (unlikely(lid != dev->dd->ipath_lid)) {
404 dev->rcv_errors++; 408 dev->rcv_errors++;
405 goto bail; 409 goto bail;
406 } 410 }
@@ -511,19 +515,19 @@ void ipath_ib_timer(struct ipath_ibdev *dev)
511 if (dev->pma_sample_status == IB_PMA_SAMPLE_STATUS_STARTED && 515 if (dev->pma_sample_status == IB_PMA_SAMPLE_STATUS_STARTED &&
512 --dev->pma_sample_start == 0) { 516 --dev->pma_sample_start == 0) {
513 dev->pma_sample_status = IB_PMA_SAMPLE_STATUS_RUNNING; 517 dev->pma_sample_status = IB_PMA_SAMPLE_STATUS_RUNNING;
514 ipath_layer_snapshot_counters(dev->dd, &dev->ipath_sword, 518 ipath_snapshot_counters(dev->dd, &dev->ipath_sword,
515 &dev->ipath_rword, 519 &dev->ipath_rword,
516 &dev->ipath_spkts, 520 &dev->ipath_spkts,
517 &dev->ipath_rpkts, 521 &dev->ipath_rpkts,
518 &dev->ipath_xmit_wait); 522 &dev->ipath_xmit_wait);
519 } 523 }
520 if (dev->pma_sample_status == IB_PMA_SAMPLE_STATUS_RUNNING) { 524 if (dev->pma_sample_status == IB_PMA_SAMPLE_STATUS_RUNNING) {
521 if (dev->pma_sample_interval == 0) { 525 if (dev->pma_sample_interval == 0) {
522 u64 ta, tb, tc, td, te; 526 u64 ta, tb, tc, td, te;
523 527
524 dev->pma_sample_status = IB_PMA_SAMPLE_STATUS_DONE; 528 dev->pma_sample_status = IB_PMA_SAMPLE_STATUS_DONE;
525 ipath_layer_snapshot_counters(dev->dd, &ta, &tb, 529 ipath_snapshot_counters(dev->dd, &ta, &tb,
526 &tc, &td, &te); 530 &tc, &td, &te);
527 531
528 dev->ipath_sword = ta - dev->ipath_sword; 532 dev->ipath_sword = ta - dev->ipath_sword;
529 dev->ipath_rword = tb - dev->ipath_rword; 533 dev->ipath_rword = tb - dev->ipath_rword;
@@ -553,6 +557,362 @@ void ipath_ib_timer(struct ipath_ibdev *dev)
553 } 557 }
554} 558}
555 559
560static void update_sge(struct ipath_sge_state *ss, u32 length)
561{
562 struct ipath_sge *sge = &ss->sge;
563
564 sge->vaddr += length;
565 sge->length -= length;
566 sge->sge_length -= length;
567 if (sge->sge_length == 0) {
568 if (--ss->num_sge)
569 *sge = *ss->sg_list++;
570 } else if (sge->length == 0 && sge->mr != NULL) {
571 if (++sge->n >= IPATH_SEGSZ) {
572 if (++sge->m >= sge->mr->mapsz)
573 return;
574 sge->n = 0;
575 }
576 sge->vaddr = sge->mr->map[sge->m]->segs[sge->n].vaddr;
577 sge->length = sge->mr->map[sge->m]->segs[sge->n].length;
578 }
579}
580
581#ifdef __LITTLE_ENDIAN
582static inline u32 get_upper_bits(u32 data, u32 shift)
583{
584 return data >> shift;
585}
586
587static inline u32 set_upper_bits(u32 data, u32 shift)
588{
589 return data << shift;
590}
591
592static inline u32 clear_upper_bytes(u32 data, u32 n, u32 off)
593{
594 data <<= ((sizeof(u32) - n) * BITS_PER_BYTE);
595 data >>= ((sizeof(u32) - n - off) * BITS_PER_BYTE);
596 return data;
597}
598#else
599static inline u32 get_upper_bits(u32 data, u32 shift)
600{
601 return data << shift;
602}
603
604static inline u32 set_upper_bits(u32 data, u32 shift)
605{
606 return data >> shift;
607}
608
609static inline u32 clear_upper_bytes(u32 data, u32 n, u32 off)
610{
611 data >>= ((sizeof(u32) - n) * BITS_PER_BYTE);
612 data <<= ((sizeof(u32) - n - off) * BITS_PER_BYTE);
613 return data;
614}
615#endif
616
617static void copy_io(u32 __iomem *piobuf, struct ipath_sge_state *ss,
618 u32 length)
619{
620 u32 extra = 0;
621 u32 data = 0;
622 u32 last;
623
624 while (1) {
625 u32 len = ss->sge.length;
626 u32 off;
627
628 BUG_ON(len == 0);
629 if (len > length)
630 len = length;
631 if (len > ss->sge.sge_length)
632 len = ss->sge.sge_length;
633 /* If the source address is not aligned, try to align it. */
634 off = (unsigned long)ss->sge.vaddr & (sizeof(u32) - 1);
635 if (off) {
636 u32 *addr = (u32 *)((unsigned long)ss->sge.vaddr &
637 ~(sizeof(u32) - 1));
638 u32 v = get_upper_bits(*addr, off * BITS_PER_BYTE);
639 u32 y;
640
641 y = sizeof(u32) - off;
642 if (len > y)
643 len = y;
644 if (len + extra >= sizeof(u32)) {
645 data |= set_upper_bits(v, extra *
646 BITS_PER_BYTE);
647 len = sizeof(u32) - extra;
648 if (len == length) {
649 last = data;
650 break;
651 }
652 __raw_writel(data, piobuf);
653 piobuf++;
654 extra = 0;
655 data = 0;
656 } else {
657 /* Clear unused upper bytes */
658 data |= clear_upper_bytes(v, len, extra);
659 if (len == length) {
660 last = data;
661 break;
662 }
663 extra += len;
664 }
665 } else if (extra) {
666 /* Source address is aligned. */
667 u32 *addr = (u32 *) ss->sge.vaddr;
668 int shift = extra * BITS_PER_BYTE;
669 int ushift = 32 - shift;
670 u32 l = len;
671
672 while (l >= sizeof(u32)) {
673 u32 v = *addr;
674
675 data |= set_upper_bits(v, shift);
676 __raw_writel(data, piobuf);
677 data = get_upper_bits(v, ushift);
678 piobuf++;
679 addr++;
680 l -= sizeof(u32);
681 }
682 /*
683 * We still have 'extra' number of bytes leftover.
684 */
685 if (l) {
686 u32 v = *addr;
687
688 if (l + extra >= sizeof(u32)) {
689 data |= set_upper_bits(v, shift);
690 len -= l + extra - sizeof(u32);
691 if (len == length) {
692 last = data;
693 break;
694 }
695 __raw_writel(data, piobuf);
696 piobuf++;
697 extra = 0;
698 data = 0;
699 } else {
700 /* Clear unused upper bytes */
701 data |= clear_upper_bytes(v, l,
702 extra);
703 if (len == length) {
704 last = data;
705 break;
706 }
707 extra += l;
708 }
709 } else if (len == length) {
710 last = data;
711 break;
712 }
713 } else if (len == length) {
714 u32 w;
715
716 /*
717 * Need to round up for the last dword in the
718 * packet.
719 */
720 w = (len + 3) >> 2;
721 __iowrite32_copy(piobuf, ss->sge.vaddr, w - 1);
722 piobuf += w - 1;
723 last = ((u32 *) ss->sge.vaddr)[w - 1];
724 break;
725 } else {
726 u32 w = len >> 2;
727
728 __iowrite32_copy(piobuf, ss->sge.vaddr, w);
729 piobuf += w;
730
731 extra = len & (sizeof(u32) - 1);
732 if (extra) {
733 u32 v = ((u32 *) ss->sge.vaddr)[w];
734
735 /* Clear unused upper bytes */
736 data = clear_upper_bytes(v, extra, 0);
737 }
738 }
739 update_sge(ss, len);
740 length -= len;
741 }
742 /* Update address before sending packet. */
743 update_sge(ss, length);
744 /* must flush early everything before trigger word */
745 ipath_flush_wc();
746 __raw_writel(last, piobuf);
747 /* be sure trigger word is written */
748 ipath_flush_wc();
749}
750
751/**
752 * ipath_verbs_send - send a packet
753 * @dd: the infinipath device
754 * @hdrwords: the number of words in the header
755 * @hdr: the packet header
756 * @len: the length of the packet in bytes
757 * @ss: the SGE to send
758 */
759int ipath_verbs_send(struct ipath_devdata *dd, u32 hdrwords,
760 u32 *hdr, u32 len, struct ipath_sge_state *ss)
761{
762 u32 __iomem *piobuf;
763 u32 plen;
764 int ret;
765
766 /* +1 is for the qword padding of pbc */
767 plen = hdrwords + ((len + 3) >> 2) + 1;
768 if (unlikely((plen << 2) > dd->ipath_ibmaxlen)) {
769 ipath_dbg("packet len 0x%x too long, failing\n", plen);
770 ret = -EINVAL;
771 goto bail;
772 }
773
774 /* Get a PIO buffer to use. */
775 piobuf = ipath_getpiobuf(dd, NULL);
776 if (unlikely(piobuf == NULL)) {
777 ret = -EBUSY;
778 goto bail;
779 }
780
781 /*
782 * Write len to control qword, no flags.
783 * We have to flush after the PBC for correctness on some cpus
784 * or WC buffer can be written out of order.
785 */
786 writeq(plen, piobuf);
787 ipath_flush_wc();
788 piobuf += 2;
789 if (len == 0) {
790 /*
791 * If there is just the header portion, must flush before
792 * writing last word of header for correctness, and after
793 * the last header word (trigger word).
794 */
795 __iowrite32_copy(piobuf, hdr, hdrwords - 1);
796 ipath_flush_wc();
797 __raw_writel(hdr[hdrwords - 1], piobuf + hdrwords - 1);
798 ipath_flush_wc();
799 ret = 0;
800 goto bail;
801 }
802
803 __iowrite32_copy(piobuf, hdr, hdrwords);
804 piobuf += hdrwords;
805
806 /* The common case is aligned and contained in one segment. */
807 if (likely(ss->num_sge == 1 && len <= ss->sge.length &&
808 !((unsigned long)ss->sge.vaddr & (sizeof(u32) - 1)))) {
809 u32 w;
810 u32 *addr = (u32 *) ss->sge.vaddr;
811
812 /* Update address before sending packet. */
813 update_sge(ss, len);
814 /* Need to round up for the last dword in the packet. */
815 w = (len + 3) >> 2;
816 __iowrite32_copy(piobuf, addr, w - 1);
817 /* must flush early everything before trigger word */
818 ipath_flush_wc();
819 __raw_writel(addr[w - 1], piobuf + w - 1);
820 /* be sure trigger word is written */
821 ipath_flush_wc();
822 ret = 0;
823 goto bail;
824 }
825 copy_io(piobuf, ss, len);
826 ret = 0;
827
828bail:
829 return ret;
830}
831
832int ipath_snapshot_counters(struct ipath_devdata *dd, u64 *swords,
833 u64 *rwords, u64 *spkts, u64 *rpkts,
834 u64 *xmit_wait)
835{
836 int ret;
837
838 if (!(dd->ipath_flags & IPATH_INITTED)) {
839 /* no hardware, freeze, etc. */
840 ipath_dbg("unit %u not usable\n", dd->ipath_unit);
841 ret = -EINVAL;
842 goto bail;
843 }
844 *swords = ipath_snap_cntr(dd, dd->ipath_cregs->cr_wordsendcnt);
845 *rwords = ipath_snap_cntr(dd, dd->ipath_cregs->cr_wordrcvcnt);
846 *spkts = ipath_snap_cntr(dd, dd->ipath_cregs->cr_pktsendcnt);
847 *rpkts = ipath_snap_cntr(dd, dd->ipath_cregs->cr_pktrcvcnt);
848 *xmit_wait = ipath_snap_cntr(dd, dd->ipath_cregs->cr_sendstallcnt);
849
850 ret = 0;
851
852bail:
853 return ret;
854}
855
856/**
857 * ipath_get_counters - get various chip counters
858 * @dd: the infinipath device
859 * @cntrs: counters are placed here
860 *
861 * Return the counters needed by recv_pma_get_portcounters().
862 */
863int ipath_get_counters(struct ipath_devdata *dd,
864 struct ipath_verbs_counters *cntrs)
865{
866 int ret;
867
868 if (!(dd->ipath_flags & IPATH_INITTED)) {
869 /* no hardware, freeze, etc. */
870 ipath_dbg("unit %u not usable\n", dd->ipath_unit);
871 ret = -EINVAL;
872 goto bail;
873 }
874 cntrs->symbol_error_counter =
875 ipath_snap_cntr(dd, dd->ipath_cregs->cr_ibsymbolerrcnt);
876 cntrs->link_error_recovery_counter =
877 ipath_snap_cntr(dd, dd->ipath_cregs->cr_iblinkerrrecovcnt);
878 /*
879 * The link downed counter counts when the other side downs the
880 * connection. We add in the number of times we downed the link
881 * due to local link integrity errors to compensate.
882 */
883 cntrs->link_downed_counter =
884 ipath_snap_cntr(dd, dd->ipath_cregs->cr_iblinkdowncnt);
885 cntrs->port_rcv_errors =
886 ipath_snap_cntr(dd, dd->ipath_cregs->cr_rxdroppktcnt) +
887 ipath_snap_cntr(dd, dd->ipath_cregs->cr_rcvovflcnt) +
888 ipath_snap_cntr(dd, dd->ipath_cregs->cr_portovflcnt) +
889 ipath_snap_cntr(dd, dd->ipath_cregs->cr_err_rlencnt) +
890 ipath_snap_cntr(dd, dd->ipath_cregs->cr_invalidrlencnt) +
891 ipath_snap_cntr(dd, dd->ipath_cregs->cr_erricrccnt) +
892 ipath_snap_cntr(dd, dd->ipath_cregs->cr_errvcrccnt) +
893 ipath_snap_cntr(dd, dd->ipath_cregs->cr_errlpcrccnt) +
894 ipath_snap_cntr(dd, dd->ipath_cregs->cr_badformatcnt);
895 cntrs->port_rcv_remphys_errors =
896 ipath_snap_cntr(dd, dd->ipath_cregs->cr_rcvebpcnt);
897 cntrs->port_xmit_discards =
898 ipath_snap_cntr(dd, dd->ipath_cregs->cr_unsupvlcnt);
899 cntrs->port_xmit_data =
900 ipath_snap_cntr(dd, dd->ipath_cregs->cr_wordsendcnt);
901 cntrs->port_rcv_data =
902 ipath_snap_cntr(dd, dd->ipath_cregs->cr_wordrcvcnt);
903 cntrs->port_xmit_packets =
904 ipath_snap_cntr(dd, dd->ipath_cregs->cr_pktsendcnt);
905 cntrs->port_rcv_packets =
906 ipath_snap_cntr(dd, dd->ipath_cregs->cr_pktrcvcnt);
907 cntrs->local_link_integrity_errors = dd->ipath_lli_errors;
908 cntrs->excessive_buffer_overrun_errors = 0; /* XXX */
909
910 ret = 0;
911
912bail:
913 return ret;
914}
915
556/** 916/**
557 * ipath_ib_piobufavail - callback when a PIO buffer is available 917 * ipath_ib_piobufavail - callback when a PIO buffer is available
558 * @arg: the device pointer 918 * @arg: the device pointer
@@ -595,9 +955,9 @@ static int ipath_query_device(struct ib_device *ibdev,
595 IB_DEVICE_BAD_QKEY_CNTR | IB_DEVICE_SHUTDOWN_PORT | 955 IB_DEVICE_BAD_QKEY_CNTR | IB_DEVICE_SHUTDOWN_PORT |
596 IB_DEVICE_SYS_IMAGE_GUID; 956 IB_DEVICE_SYS_IMAGE_GUID;
597 props->page_size_cap = PAGE_SIZE; 957 props->page_size_cap = PAGE_SIZE;
598 props->vendor_id = ipath_layer_get_vendorid(dev->dd); 958 props->vendor_id = dev->dd->ipath_vendorid;
599 props->vendor_part_id = ipath_layer_get_deviceid(dev->dd); 959 props->vendor_part_id = dev->dd->ipath_deviceid;
600 props->hw_ver = ipath_layer_get_pcirev(dev->dd); 960 props->hw_ver = dev->dd->ipath_pcirev;
601 961
602 props->sys_image_guid = dev->sys_image_guid; 962 props->sys_image_guid = dev->sys_image_guid;
603 963
@@ -618,7 +978,7 @@ static int ipath_query_device(struct ib_device *ibdev,
618 props->max_srq_sge = ib_ipath_max_srq_sges; 978 props->max_srq_sge = ib_ipath_max_srq_sges;
619 /* props->local_ca_ack_delay */ 979 /* props->local_ca_ack_delay */
620 props->atomic_cap = IB_ATOMIC_HCA; 980 props->atomic_cap = IB_ATOMIC_HCA;
621 props->max_pkeys = ipath_layer_get_npkeys(dev->dd); 981 props->max_pkeys = ipath_get_npkeys(dev->dd);
622 props->max_mcast_grp = ib_ipath_max_mcast_grps; 982 props->max_mcast_grp = ib_ipath_max_mcast_grps;
623 props->max_mcast_qp_attach = ib_ipath_max_mcast_qp_attached; 983 props->max_mcast_qp_attach = ib_ipath_max_mcast_qp_attached;
624 props->max_total_mcast_qp_attach = props->max_mcast_qp_attach * 984 props->max_total_mcast_qp_attach = props->max_mcast_qp_attach *
@@ -643,12 +1003,17 @@ const u8 ipath_cvt_physportstate[16] = {
643 [INFINIPATH_IBCS_LT_STATE_RECOVERIDLE] = 6, 1003 [INFINIPATH_IBCS_LT_STATE_RECOVERIDLE] = 6,
644}; 1004};
645 1005
1006u32 ipath_get_cr_errpkey(struct ipath_devdata *dd)
1007{
1008 return ipath_read_creg32(dd, dd->ipath_cregs->cr_errpkey);
1009}
1010
646static int ipath_query_port(struct ib_device *ibdev, 1011static int ipath_query_port(struct ib_device *ibdev,
647 u8 port, struct ib_port_attr *props) 1012 u8 port, struct ib_port_attr *props)
648{ 1013{
649 struct ipath_ibdev *dev = to_idev(ibdev); 1014 struct ipath_ibdev *dev = to_idev(ibdev);
650 enum ib_mtu mtu; 1015 enum ib_mtu mtu;
651 u16 lid = ipath_layer_get_lid(dev->dd); 1016 u16 lid = dev->dd->ipath_lid;
652 u64 ibcstat; 1017 u64 ibcstat;
653 1018
654 memset(props, 0, sizeof(*props)); 1019 memset(props, 0, sizeof(*props));
@@ -656,16 +1021,16 @@ static int ipath_query_port(struct ib_device *ibdev,
656 props->lmc = dev->mkeyprot_resv_lmc & 7; 1021 props->lmc = dev->mkeyprot_resv_lmc & 7;
657 props->sm_lid = dev->sm_lid; 1022 props->sm_lid = dev->sm_lid;
658 props->sm_sl = dev->sm_sl; 1023 props->sm_sl = dev->sm_sl;
659 ibcstat = ipath_layer_get_lastibcstat(dev->dd); 1024 ibcstat = dev->dd->ipath_lastibcstat;
660 props->state = ((ibcstat >> 4) & 0x3) + 1; 1025 props->state = ((ibcstat >> 4) & 0x3) + 1;
661 /* See phys_state_show() */ 1026 /* See phys_state_show() */
662 props->phys_state = ipath_cvt_physportstate[ 1027 props->phys_state = ipath_cvt_physportstate[
663 ipath_layer_get_lastibcstat(dev->dd) & 0xf]; 1028 dev->dd->ipath_lastibcstat & 0xf];
664 props->port_cap_flags = dev->port_cap_flags; 1029 props->port_cap_flags = dev->port_cap_flags;
665 props->gid_tbl_len = 1; 1030 props->gid_tbl_len = 1;
666 props->max_msg_sz = 0x80000000; 1031 props->max_msg_sz = 0x80000000;
667 props->pkey_tbl_len = ipath_layer_get_npkeys(dev->dd); 1032 props->pkey_tbl_len = ipath_get_npkeys(dev->dd);
668 props->bad_pkey_cntr = ipath_layer_get_cr_errpkey(dev->dd) - 1033 props->bad_pkey_cntr = ipath_get_cr_errpkey(dev->dd) -
669 dev->z_pkey_violations; 1034 dev->z_pkey_violations;
670 props->qkey_viol_cntr = dev->qkey_violations; 1035 props->qkey_viol_cntr = dev->qkey_violations;
671 props->active_width = IB_WIDTH_4X; 1036 props->active_width = IB_WIDTH_4X;
@@ -675,7 +1040,7 @@ static int ipath_query_port(struct ib_device *ibdev,
675 props->init_type_reply = 0; 1040 props->init_type_reply = 0;
676 1041
677 props->max_mtu = IB_MTU_4096; 1042 props->max_mtu = IB_MTU_4096;
678 switch (ipath_layer_get_ibmtu(dev->dd)) { 1043 switch (dev->dd->ipath_ibmtu) {
679 case 4096: 1044 case 4096:
680 mtu = IB_MTU_4096; 1045 mtu = IB_MTU_4096;
681 break; 1046 break;
@@ -734,7 +1099,7 @@ static int ipath_modify_port(struct ib_device *ibdev,
734 dev->port_cap_flags |= props->set_port_cap_mask; 1099 dev->port_cap_flags |= props->set_port_cap_mask;
735 dev->port_cap_flags &= ~props->clr_port_cap_mask; 1100 dev->port_cap_flags &= ~props->clr_port_cap_mask;
736 if (port_modify_mask & IB_PORT_SHUTDOWN) 1101 if (port_modify_mask & IB_PORT_SHUTDOWN)
737 ipath_layer_set_linkstate(dev->dd, IPATH_IB_LINKDOWN); 1102 ipath_set_linkstate(dev->dd, IPATH_IB_LINKDOWN);
738 if (port_modify_mask & IB_PORT_RESET_QKEY_CNTR) 1103 if (port_modify_mask & IB_PORT_RESET_QKEY_CNTR)
739 dev->qkey_violations = 0; 1104 dev->qkey_violations = 0;
740 return 0; 1105 return 0;
@@ -751,7 +1116,7 @@ static int ipath_query_gid(struct ib_device *ibdev, u8 port,
751 goto bail; 1116 goto bail;
752 } 1117 }
753 gid->global.subnet_prefix = dev->gid_prefix; 1118 gid->global.subnet_prefix = dev->gid_prefix;
754 gid->global.interface_id = ipath_layer_get_guid(dev->dd); 1119 gid->global.interface_id = dev->dd->ipath_guid;
755 1120
756 ret = 0; 1121 ret = 0;
757 1122
@@ -902,25 +1267,50 @@ static int ipath_query_ah(struct ib_ah *ibah, struct ib_ah_attr *ah_attr)
902 return 0; 1267 return 0;
903} 1268}
904 1269
1270/**
1271 * ipath_get_npkeys - return the size of the PKEY table for port 0
1272 * @dd: the infinipath device
1273 */
1274unsigned ipath_get_npkeys(struct ipath_devdata *dd)
1275{
1276 return ARRAY_SIZE(dd->ipath_pd[0]->port_pkeys);
1277}
1278
1279/**
1280 * ipath_get_pkey - return the indexed PKEY from the port 0 PKEY table
1281 * @dd: the infinipath device
1282 * @index: the PKEY index
1283 */
1284unsigned ipath_get_pkey(struct ipath_devdata *dd, unsigned index)
1285{
1286 unsigned ret;
1287
1288 if (index >= ARRAY_SIZE(dd->ipath_pd[0]->port_pkeys))
1289 ret = 0;
1290 else
1291 ret = dd->ipath_pd[0]->port_pkeys[index];
1292
1293 return ret;
1294}
1295
905static int ipath_query_pkey(struct ib_device *ibdev, u8 port, u16 index, 1296static int ipath_query_pkey(struct ib_device *ibdev, u8 port, u16 index,
906 u16 *pkey) 1297 u16 *pkey)
907{ 1298{
908 struct ipath_ibdev *dev = to_idev(ibdev); 1299 struct ipath_ibdev *dev = to_idev(ibdev);
909 int ret; 1300 int ret;
910 1301
911 if (index >= ipath_layer_get_npkeys(dev->dd)) { 1302 if (index >= ipath_get_npkeys(dev->dd)) {
912 ret = -EINVAL; 1303 ret = -EINVAL;
913 goto bail; 1304 goto bail;
914 } 1305 }
915 1306
916 *pkey = ipath_layer_get_pkey(dev->dd, index); 1307 *pkey = ipath_get_pkey(dev->dd, index);
917 ret = 0; 1308 ret = 0;
918 1309
919bail: 1310bail:
920 return ret; 1311 return ret;
921} 1312}
922 1313
923
924/** 1314/**
925 * ipath_alloc_ucontext - allocate a ucontest 1315 * ipath_alloc_ucontext - allocate a ucontest
926 * @ibdev: the infiniband device 1316 * @ibdev: the infiniband device
@@ -953,6 +1343,63 @@ static int ipath_dealloc_ucontext(struct ib_ucontext *context)
953 1343
954static int ipath_verbs_register_sysfs(struct ib_device *dev); 1344static int ipath_verbs_register_sysfs(struct ib_device *dev);
955 1345
1346static void __verbs_timer(unsigned long arg)
1347{
1348 struct ipath_devdata *dd = (struct ipath_devdata *) arg;
1349
1350 /*
1351 * If port 0 receive packet interrupts are not available, or
1352 * can be missed, poll the receive queue
1353 */
1354 if (dd->ipath_flags & IPATH_POLL_RX_INTR)
1355 ipath_kreceive(dd);
1356
1357 /* Handle verbs layer timeouts. */
1358 ipath_ib_timer(dd->verbs_dev);
1359
1360 mod_timer(&dd->verbs_timer, jiffies + 1);
1361}
1362
1363static int enable_timer(struct ipath_devdata *dd)
1364{
1365 /*
1366 * Early chips had a design flaw where the chip and kernel idea
1367 * of the tail register don't always agree, and therefore we won't
1368 * get an interrupt on the next packet received.
1369 * If the board supports per packet receive interrupts, use it.
1370 * Otherwise, the timer function periodically checks for packets
1371 * to cover this case.
1372 * Either way, the timer is needed for verbs layer related
1373 * processing.
1374 */
1375 if (dd->ipath_flags & IPATH_GPIO_INTR) {
1376 ipath_write_kreg(dd, dd->ipath_kregs->kr_debugportselect,
1377 0x2074076542310ULL);
1378 /* Enable GPIO bit 2 interrupt */
1379 ipath_write_kreg(dd, dd->ipath_kregs->kr_gpio_mask,
1380 (u64) (1 << 2));
1381 }
1382
1383 init_timer(&dd->verbs_timer);
1384 dd->verbs_timer.function = __verbs_timer;
1385 dd->verbs_timer.data = (unsigned long)dd;
1386 dd->verbs_timer.expires = jiffies + 1;
1387 add_timer(&dd->verbs_timer);
1388
1389 return 0;
1390}
1391
1392static int disable_timer(struct ipath_devdata *dd)
1393{
1394 /* Disable GPIO bit 2 interrupt */
1395 if (dd->ipath_flags & IPATH_GPIO_INTR)
1396 ipath_write_kreg(dd, dd->ipath_kregs->kr_gpio_mask, 0);
1397
1398 del_timer_sync(&dd->verbs_timer);
1399
1400 return 0;
1401}
1402
956/** 1403/**
957 * ipath_register_ib_device - register our device with the infiniband core 1404 * ipath_register_ib_device - register our device with the infiniband core
958 * @dd: the device data structure 1405 * @dd: the device data structure
@@ -960,7 +1407,7 @@ static int ipath_verbs_register_sysfs(struct ib_device *dev);
960 */ 1407 */
961int ipath_register_ib_device(struct ipath_devdata *dd) 1408int ipath_register_ib_device(struct ipath_devdata *dd)
962{ 1409{
963 struct ipath_layer_counters cntrs; 1410 struct ipath_verbs_counters cntrs;
964 struct ipath_ibdev *idev; 1411 struct ipath_ibdev *idev;
965 struct ib_device *dev; 1412 struct ib_device *dev;
966 int ret; 1413 int ret;
@@ -1020,7 +1467,7 @@ int ipath_register_ib_device(struct ipath_devdata *dd)
1020 idev->link_width_enabled = 3; /* 1x or 4x */ 1467 idev->link_width_enabled = 3; /* 1x or 4x */
1021 1468
1022 /* Snapshot current HW counters to "clear" them. */ 1469 /* Snapshot current HW counters to "clear" them. */
1023 ipath_layer_get_counters(dd, &cntrs); 1470 ipath_get_counters(dd, &cntrs);
1024 idev->z_symbol_error_counter = cntrs.symbol_error_counter; 1471 idev->z_symbol_error_counter = cntrs.symbol_error_counter;
1025 idev->z_link_error_recovery_counter = 1472 idev->z_link_error_recovery_counter =
1026 cntrs.link_error_recovery_counter; 1473 cntrs.link_error_recovery_counter;
@@ -1044,14 +1491,14 @@ int ipath_register_ib_device(struct ipath_devdata *dd)
1044 * device types in the system, we can't be sure this is unique. 1491 * device types in the system, we can't be sure this is unique.
1045 */ 1492 */
1046 if (!sys_image_guid) 1493 if (!sys_image_guid)
1047 sys_image_guid = ipath_layer_get_guid(dd); 1494 sys_image_guid = dd->ipath_guid;
1048 idev->sys_image_guid = sys_image_guid; 1495 idev->sys_image_guid = sys_image_guid;
1049 idev->ib_unit = dd->ipath_unit; 1496 idev->ib_unit = dd->ipath_unit;
1050 idev->dd = dd; 1497 idev->dd = dd;
1051 1498
1052 strlcpy(dev->name, "ipath%d", IB_DEVICE_NAME_MAX); 1499 strlcpy(dev->name, "ipath%d", IB_DEVICE_NAME_MAX);
1053 dev->owner = THIS_MODULE; 1500 dev->owner = THIS_MODULE;
1054 dev->node_guid = ipath_layer_get_guid(dd); 1501 dev->node_guid = dd->ipath_guid;
1055 dev->uverbs_abi_ver = IPATH_UVERBS_ABI_VERSION; 1502 dev->uverbs_abi_ver = IPATH_UVERBS_ABI_VERSION;
1056 dev->uverbs_cmd_mask = 1503 dev->uverbs_cmd_mask =
1057 (1ull << IB_USER_VERBS_CMD_GET_CONTEXT) | 1504 (1ull << IB_USER_VERBS_CMD_GET_CONTEXT) |
@@ -1085,7 +1532,7 @@ int ipath_register_ib_device(struct ipath_devdata *dd)
1085 (1ull << IB_USER_VERBS_CMD_POST_SRQ_RECV); 1532 (1ull << IB_USER_VERBS_CMD_POST_SRQ_RECV);
1086 dev->node_type = IB_NODE_CA; 1533 dev->node_type = IB_NODE_CA;
1087 dev->phys_port_cnt = 1; 1534 dev->phys_port_cnt = 1;
1088 dev->dma_device = ipath_layer_get_device(dd); 1535 dev->dma_device = &dd->pcidev->dev;
1089 dev->class_dev.dev = dev->dma_device; 1536 dev->class_dev.dev = dev->dma_device;
1090 dev->query_device = ipath_query_device; 1537 dev->query_device = ipath_query_device;
1091 dev->modify_device = ipath_modify_device; 1538 dev->modify_device = ipath_modify_device;
@@ -1139,7 +1586,7 @@ int ipath_register_ib_device(struct ipath_devdata *dd)
1139 if (ipath_verbs_register_sysfs(dev)) 1586 if (ipath_verbs_register_sysfs(dev))
1140 goto err_class; 1587 goto err_class;
1141 1588
1142 ipath_layer_enable_timer(dd); 1589 enable_timer(dd);
1143 1590
1144 goto bail; 1591 goto bail;
1145 1592
@@ -1164,7 +1611,7 @@ void ipath_unregister_ib_device(struct ipath_ibdev *dev)
1164{ 1611{
1165 struct ib_device *ibdev = &dev->ibdev; 1612 struct ib_device *ibdev = &dev->ibdev;
1166 1613
1167 ipath_layer_disable_timer(dev->dd); 1614 disable_timer(dev->dd);
1168 1615
1169 ib_unregister_device(ibdev); 1616 ib_unregister_device(ibdev);
1170 1617
@@ -1197,7 +1644,7 @@ static ssize_t show_rev(struct class_device *cdev, char *buf)
1197 struct ipath_ibdev *dev = 1644 struct ipath_ibdev *dev =
1198 container_of(cdev, struct ipath_ibdev, ibdev.class_dev); 1645 container_of(cdev, struct ipath_ibdev, ibdev.class_dev);
1199 1646
1200 return sprintf(buf, "%x\n", ipath_layer_get_pcirev(dev->dd)); 1647 return sprintf(buf, "%x\n", dev->dd->ipath_pcirev);
1201} 1648}
1202 1649
1203static ssize_t show_hca(struct class_device *cdev, char *buf) 1650static ssize_t show_hca(struct class_device *cdev, char *buf)
@@ -1206,7 +1653,7 @@ static ssize_t show_hca(struct class_device *cdev, char *buf)
1206 container_of(cdev, struct ipath_ibdev, ibdev.class_dev); 1653 container_of(cdev, struct ipath_ibdev, ibdev.class_dev);
1207 int ret; 1654 int ret;
1208 1655
1209 ret = ipath_layer_get_boardname(dev->dd, buf, 128); 1656 ret = dev->dd->ipath_f_get_boardname(dev->dd, buf, 128);
1210 if (ret < 0) 1657 if (ret < 0)
1211 goto bail; 1658 goto bail;
1212 strcat(buf, "\n"); 1659 strcat(buf, "\n");