aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/infiniband/hw/ipath/ipath_verbs.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/infiniband/hw/ipath/ipath_verbs.c')
-rw-r--r--drivers/infiniband/hw/ipath/ipath_verbs.c687
1 files changed, 556 insertions, 131 deletions
diff --git a/drivers/infiniband/hw/ipath/ipath_verbs.c b/drivers/infiniband/hw/ipath/ipath_verbs.c
index d70a9b6b5239..b8381c5e72bd 100644
--- a/drivers/infiniband/hw/ipath/ipath_verbs.c
+++ b/drivers/infiniband/hw/ipath/ipath_verbs.c
@@ -33,15 +33,13 @@
33 33
34#include <rdma/ib_mad.h> 34#include <rdma/ib_mad.h>
35#include <rdma/ib_user_verbs.h> 35#include <rdma/ib_user_verbs.h>
36#include <linux/io.h>
36#include <linux/utsname.h> 37#include <linux/utsname.h>
37 38
38#include "ipath_kernel.h" 39#include "ipath_kernel.h"
39#include "ipath_verbs.h" 40#include "ipath_verbs.h"
40#include "ipath_common.h" 41#include "ipath_common.h"
41 42
42/* Not static, because we don't want the compiler removing it */
43const char ipath_verbs_version[] = "ipath_verbs " IPATH_IDSTR;
44
45static unsigned int ib_ipath_qp_table_size = 251; 43static unsigned int ib_ipath_qp_table_size = 251;
46module_param_named(qp_table_size, ib_ipath_qp_table_size, uint, S_IRUGO); 44module_param_named(qp_table_size, ib_ipath_qp_table_size, uint, S_IRUGO);
47MODULE_PARM_DESC(qp_table_size, "QP table size"); 45MODULE_PARM_DESC(qp_table_size, "QP table size");
@@ -52,10 +50,6 @@ module_param_named(lkey_table_size, ib_ipath_lkey_table_size, uint,
52MODULE_PARM_DESC(lkey_table_size, 50MODULE_PARM_DESC(lkey_table_size,
53 "LKEY table size in bits (2^n, 1 <= n <= 23)"); 51 "LKEY table size in bits (2^n, 1 <= n <= 23)");
54 52
55unsigned int ib_ipath_debug; /* debug mask */
56module_param_named(debug, ib_ipath_debug, uint, S_IWUSR | S_IRUGO);
57MODULE_PARM_DESC(debug, "Verbs debug mask");
58
59static unsigned int ib_ipath_max_pds = 0xFFFF; 53static unsigned int ib_ipath_max_pds = 0xFFFF;
60module_param_named(max_pds, ib_ipath_max_pds, uint, S_IWUSR | S_IRUGO); 54module_param_named(max_pds, ib_ipath_max_pds, uint, S_IWUSR | S_IRUGO);
61MODULE_PARM_DESC(max_pds, 55MODULE_PARM_DESC(max_pds,
@@ -79,6 +73,10 @@ module_param_named(max_qp_wrs, ib_ipath_max_qp_wrs, uint,
79 S_IWUSR | S_IRUGO); 73 S_IWUSR | S_IRUGO);
80MODULE_PARM_DESC(max_qp_wrs, "Maximum number of QP WRs to support"); 74MODULE_PARM_DESC(max_qp_wrs, "Maximum number of QP WRs to support");
81 75
76unsigned int ib_ipath_max_qps = 16384;
77module_param_named(max_qps, ib_ipath_max_qps, uint, S_IWUSR | S_IRUGO);
78MODULE_PARM_DESC(max_qps, "Maximum number of QPs to support");
79
82unsigned int ib_ipath_max_sges = 0x60; 80unsigned int ib_ipath_max_sges = 0x60;
83module_param_named(max_sges, ib_ipath_max_sges, uint, S_IWUSR | S_IRUGO); 81module_param_named(max_sges, ib_ipath_max_sges, uint, S_IWUSR | S_IRUGO);
84MODULE_PARM_DESC(max_sges, "Maximum number of SGEs to support"); 82MODULE_PARM_DESC(max_sges, "Maximum number of SGEs to support");
@@ -109,9 +107,9 @@ module_param_named(max_srq_wrs, ib_ipath_max_srq_wrs,
109 uint, S_IWUSR | S_IRUGO); 107 uint, S_IWUSR | S_IRUGO);
110MODULE_PARM_DESC(max_srq_wrs, "Maximum number of SRQ WRs support"); 108MODULE_PARM_DESC(max_srq_wrs, "Maximum number of SRQ WRs support");
111 109
112MODULE_LICENSE("GPL"); 110static unsigned int ib_ipath_disable_sma;
113MODULE_AUTHOR("QLogic <support@pathscale.com>"); 111module_param_named(disable_sma, ib_ipath_disable_sma, uint, S_IWUSR | S_IRUGO);
114MODULE_DESCRIPTION("QLogic InfiniPath driver"); 112MODULE_PARM_DESC(ib_ipath_disable_sma, "Disable the SMA");
115 113
116const int ib_ipath_state_ops[IB_QPS_ERR + 1] = { 114const int ib_ipath_state_ops[IB_QPS_ERR + 1] = {
117 [IB_QPS_RESET] = 0, 115 [IB_QPS_RESET] = 0,
@@ -125,6 +123,16 @@ const int ib_ipath_state_ops[IB_QPS_ERR + 1] = {
125 [IB_QPS_ERR] = 0, 123 [IB_QPS_ERR] = 0,
126}; 124};
127 125
126struct ipath_ucontext {
127 struct ib_ucontext ibucontext;
128};
129
130static inline struct ipath_ucontext *to_iucontext(struct ib_ucontext
131 *ibucontext)
132{
133 return container_of(ibucontext, struct ipath_ucontext, ibucontext);
134}
135
128/* 136/*
129 * Translate ib_wr_opcode into ib_wc_opcode. 137 * Translate ib_wr_opcode into ib_wc_opcode.
130 */ 138 */
@@ -277,11 +285,12 @@ static int ipath_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr,
277 struct ib_recv_wr **bad_wr) 285 struct ib_recv_wr **bad_wr)
278{ 286{
279 struct ipath_qp *qp = to_iqp(ibqp); 287 struct ipath_qp *qp = to_iqp(ibqp);
288 struct ipath_rwq *wq = qp->r_rq.wq;
280 unsigned long flags; 289 unsigned long flags;
281 int ret; 290 int ret;
282 291
283 /* Check that state is OK to post receive. */ 292 /* Check that state is OK to post receive. */
284 if (!(ib_ipath_state_ops[qp->state] & IPATH_POST_RECV_OK)) { 293 if (!(ib_ipath_state_ops[qp->state] & IPATH_POST_RECV_OK) || !wq) {
285 *bad_wr = wr; 294 *bad_wr = wr;
286 ret = -EINVAL; 295 ret = -EINVAL;
287 goto bail; 296 goto bail;
@@ -290,59 +299,31 @@ static int ipath_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr,
290 for (; wr; wr = wr->next) { 299 for (; wr; wr = wr->next) {
291 struct ipath_rwqe *wqe; 300 struct ipath_rwqe *wqe;
292 u32 next; 301 u32 next;
293 int i, j; 302 int i;
294 303
295 if (wr->num_sge > qp->r_rq.max_sge) { 304 if ((unsigned) wr->num_sge > qp->r_rq.max_sge) {
296 *bad_wr = wr; 305 *bad_wr = wr;
297 ret = -ENOMEM; 306 ret = -ENOMEM;
298 goto bail; 307 goto bail;
299 } 308 }
300 309
301 spin_lock_irqsave(&qp->r_rq.lock, flags); 310 spin_lock_irqsave(&qp->r_rq.lock, flags);
302 next = qp->r_rq.head + 1; 311 next = wq->head + 1;
303 if (next >= qp->r_rq.size) 312 if (next >= qp->r_rq.size)
304 next = 0; 313 next = 0;
305 if (next == qp->r_rq.tail) { 314 if (next == wq->tail) {
306 spin_unlock_irqrestore(&qp->r_rq.lock, flags); 315 spin_unlock_irqrestore(&qp->r_rq.lock, flags);
307 *bad_wr = wr; 316 *bad_wr = wr;
308 ret = -ENOMEM; 317 ret = -ENOMEM;
309 goto bail; 318 goto bail;
310 } 319 }
311 320
312 wqe = get_rwqe_ptr(&qp->r_rq, qp->r_rq.head); 321 wqe = get_rwqe_ptr(&qp->r_rq, wq->head);
313 wqe->wr_id = wr->wr_id; 322 wqe->wr_id = wr->wr_id;
314 wqe->sg_list[0].mr = NULL; 323 wqe->num_sge = wr->num_sge;
315 wqe->sg_list[0].vaddr = NULL; 324 for (i = 0; i < wr->num_sge; i++)
316 wqe->sg_list[0].length = 0; 325 wqe->sg_list[i] = wr->sg_list[i];
317 wqe->sg_list[0].sge_length = 0; 326 wq->head = next;
318 wqe->length = 0;
319 for (i = 0, j = 0; i < wr->num_sge; i++) {
320 /* Check LKEY */
321 if (to_ipd(qp->ibqp.pd)->user &&
322 wr->sg_list[i].lkey == 0) {
323 spin_unlock_irqrestore(&qp->r_rq.lock,
324 flags);
325 *bad_wr = wr;
326 ret = -EINVAL;
327 goto bail;
328 }
329 if (wr->sg_list[i].length == 0)
330 continue;
331 if (!ipath_lkey_ok(
332 &to_idev(qp->ibqp.device)->lk_table,
333 &wqe->sg_list[j], &wr->sg_list[i],
334 IB_ACCESS_LOCAL_WRITE)) {
335 spin_unlock_irqrestore(&qp->r_rq.lock,
336 flags);
337 *bad_wr = wr;
338 ret = -EINVAL;
339 goto bail;
340 }
341 wqe->length += wr->sg_list[i].length;
342 j++;
343 }
344 wqe->num_sge = j;
345 qp->r_rq.head = next;
346 spin_unlock_irqrestore(&qp->r_rq.lock, flags); 327 spin_unlock_irqrestore(&qp->r_rq.lock, flags);
347 } 328 }
348 ret = 0; 329 ret = 0;
@@ -377,6 +358,9 @@ static void ipath_qp_rcv(struct ipath_ibdev *dev,
377 switch (qp->ibqp.qp_type) { 358 switch (qp->ibqp.qp_type) {
378 case IB_QPT_SMI: 359 case IB_QPT_SMI:
379 case IB_QPT_GSI: 360 case IB_QPT_GSI:
361 if (ib_ipath_disable_sma)
362 break;
363 /* FALLTHROUGH */
380 case IB_QPT_UD: 364 case IB_QPT_UD:
381 ipath_ud_rcv(dev, hdr, has_grh, data, tlen, qp); 365 ipath_ud_rcv(dev, hdr, has_grh, data, tlen, qp);
382 break; 366 break;
@@ -395,7 +379,7 @@ static void ipath_qp_rcv(struct ipath_ibdev *dev,
395} 379}
396 380
397/** 381/**
398 * ipath_ib_rcv - process and incoming packet 382 * ipath_ib_rcv - process an incoming packet
399 * @arg: the device pointer 383 * @arg: the device pointer
400 * @rhdr: the header of the packet 384 * @rhdr: the header of the packet
401 * @data: the packet data 385 * @data: the packet data
@@ -404,9 +388,9 @@ static void ipath_qp_rcv(struct ipath_ibdev *dev,
404 * This is called from ipath_kreceive() to process an incoming packet at 388 * This is called from ipath_kreceive() to process an incoming packet at
405 * interrupt level. Tlen is the length of the header + data + CRC in bytes. 389 * interrupt level. Tlen is the length of the header + data + CRC in bytes.
406 */ 390 */
407static void ipath_ib_rcv(void *arg, void *rhdr, void *data, u32 tlen) 391void ipath_ib_rcv(struct ipath_ibdev *dev, void *rhdr, void *data,
392 u32 tlen)
408{ 393{
409 struct ipath_ibdev *dev = (struct ipath_ibdev *) arg;
410 struct ipath_ib_header *hdr = rhdr; 394 struct ipath_ib_header *hdr = rhdr;
411 struct ipath_other_headers *ohdr; 395 struct ipath_other_headers *ohdr;
412 struct ipath_qp *qp; 396 struct ipath_qp *qp;
@@ -427,7 +411,7 @@ static void ipath_ib_rcv(void *arg, void *rhdr, void *data, u32 tlen)
427 lid = be16_to_cpu(hdr->lrh[1]); 411 lid = be16_to_cpu(hdr->lrh[1]);
428 if (lid < IPATH_MULTICAST_LID_BASE) { 412 if (lid < IPATH_MULTICAST_LID_BASE) {
429 lid &= ~((1 << (dev->mkeyprot_resv_lmc & 7)) - 1); 413 lid &= ~((1 << (dev->mkeyprot_resv_lmc & 7)) - 1);
430 if (unlikely(lid != ipath_layer_get_lid(dev->dd))) { 414 if (unlikely(lid != dev->dd->ipath_lid)) {
431 dev->rcv_errors++; 415 dev->rcv_errors++;
432 goto bail; 416 goto bail;
433 } 417 }
@@ -495,9 +479,8 @@ bail:;
495 * This is called from ipath_do_rcv_timer() at interrupt level to check for 479 * This is called from ipath_do_rcv_timer() at interrupt level to check for
496 * QPs which need retransmits and to collect performance numbers. 480 * QPs which need retransmits and to collect performance numbers.
497 */ 481 */
498static void ipath_ib_timer(void *arg) 482void ipath_ib_timer(struct ipath_ibdev *dev)
499{ 483{
500 struct ipath_ibdev *dev = (struct ipath_ibdev *) arg;
501 struct ipath_qp *resend = NULL; 484 struct ipath_qp *resend = NULL;
502 struct list_head *last; 485 struct list_head *last;
503 struct ipath_qp *qp; 486 struct ipath_qp *qp;
@@ -539,19 +522,19 @@ static void ipath_ib_timer(void *arg)
539 if (dev->pma_sample_status == IB_PMA_SAMPLE_STATUS_STARTED && 522 if (dev->pma_sample_status == IB_PMA_SAMPLE_STATUS_STARTED &&
540 --dev->pma_sample_start == 0) { 523 --dev->pma_sample_start == 0) {
541 dev->pma_sample_status = IB_PMA_SAMPLE_STATUS_RUNNING; 524 dev->pma_sample_status = IB_PMA_SAMPLE_STATUS_RUNNING;
542 ipath_layer_snapshot_counters(dev->dd, &dev->ipath_sword, 525 ipath_snapshot_counters(dev->dd, &dev->ipath_sword,
543 &dev->ipath_rword, 526 &dev->ipath_rword,
544 &dev->ipath_spkts, 527 &dev->ipath_spkts,
545 &dev->ipath_rpkts, 528 &dev->ipath_rpkts,
546 &dev->ipath_xmit_wait); 529 &dev->ipath_xmit_wait);
547 } 530 }
548 if (dev->pma_sample_status == IB_PMA_SAMPLE_STATUS_RUNNING) { 531 if (dev->pma_sample_status == IB_PMA_SAMPLE_STATUS_RUNNING) {
549 if (dev->pma_sample_interval == 0) { 532 if (dev->pma_sample_interval == 0) {
550 u64 ta, tb, tc, td, te; 533 u64 ta, tb, tc, td, te;
551 534
552 dev->pma_sample_status = IB_PMA_SAMPLE_STATUS_DONE; 535 dev->pma_sample_status = IB_PMA_SAMPLE_STATUS_DONE;
553 ipath_layer_snapshot_counters(dev->dd, &ta, &tb, 536 ipath_snapshot_counters(dev->dd, &ta, &tb,
554 &tc, &td, &te); 537 &tc, &td, &te);
555 538
556 dev->ipath_sword = ta - dev->ipath_sword; 539 dev->ipath_sword = ta - dev->ipath_sword;
557 dev->ipath_rword = tb - dev->ipath_rword; 540 dev->ipath_rword = tb - dev->ipath_rword;
@@ -581,6 +564,362 @@ static void ipath_ib_timer(void *arg)
581 } 564 }
582} 565}
583 566
567static void update_sge(struct ipath_sge_state *ss, u32 length)
568{
569 struct ipath_sge *sge = &ss->sge;
570
571 sge->vaddr += length;
572 sge->length -= length;
573 sge->sge_length -= length;
574 if (sge->sge_length == 0) {
575 if (--ss->num_sge)
576 *sge = *ss->sg_list++;
577 } else if (sge->length == 0 && sge->mr != NULL) {
578 if (++sge->n >= IPATH_SEGSZ) {
579 if (++sge->m >= sge->mr->mapsz)
580 return;
581 sge->n = 0;
582 }
583 sge->vaddr = sge->mr->map[sge->m]->segs[sge->n].vaddr;
584 sge->length = sge->mr->map[sge->m]->segs[sge->n].length;
585 }
586}
587
588#ifdef __LITTLE_ENDIAN
589static inline u32 get_upper_bits(u32 data, u32 shift)
590{
591 return data >> shift;
592}
593
594static inline u32 set_upper_bits(u32 data, u32 shift)
595{
596 return data << shift;
597}
598
599static inline u32 clear_upper_bytes(u32 data, u32 n, u32 off)
600{
601 data <<= ((sizeof(u32) - n) * BITS_PER_BYTE);
602 data >>= ((sizeof(u32) - n - off) * BITS_PER_BYTE);
603 return data;
604}
605#else
606static inline u32 get_upper_bits(u32 data, u32 shift)
607{
608 return data << shift;
609}
610
611static inline u32 set_upper_bits(u32 data, u32 shift)
612{
613 return data >> shift;
614}
615
616static inline u32 clear_upper_bytes(u32 data, u32 n, u32 off)
617{
618 data >>= ((sizeof(u32) - n) * BITS_PER_BYTE);
619 data <<= ((sizeof(u32) - n - off) * BITS_PER_BYTE);
620 return data;
621}
622#endif
623
624static void copy_io(u32 __iomem *piobuf, struct ipath_sge_state *ss,
625 u32 length)
626{
627 u32 extra = 0;
628 u32 data = 0;
629 u32 last;
630
631 while (1) {
632 u32 len = ss->sge.length;
633 u32 off;
634
635 BUG_ON(len == 0);
636 if (len > length)
637 len = length;
638 if (len > ss->sge.sge_length)
639 len = ss->sge.sge_length;
640 /* If the source address is not aligned, try to align it. */
641 off = (unsigned long)ss->sge.vaddr & (sizeof(u32) - 1);
642 if (off) {
643 u32 *addr = (u32 *)((unsigned long)ss->sge.vaddr &
644 ~(sizeof(u32) - 1));
645 u32 v = get_upper_bits(*addr, off * BITS_PER_BYTE);
646 u32 y;
647
648 y = sizeof(u32) - off;
649 if (len > y)
650 len = y;
651 if (len + extra >= sizeof(u32)) {
652 data |= set_upper_bits(v, extra *
653 BITS_PER_BYTE);
654 len = sizeof(u32) - extra;
655 if (len == length) {
656 last = data;
657 break;
658 }
659 __raw_writel(data, piobuf);
660 piobuf++;
661 extra = 0;
662 data = 0;
663 } else {
664 /* Clear unused upper bytes */
665 data |= clear_upper_bytes(v, len, extra);
666 if (len == length) {
667 last = data;
668 break;
669 }
670 extra += len;
671 }
672 } else if (extra) {
673 /* Source address is aligned. */
674 u32 *addr = (u32 *) ss->sge.vaddr;
675 int shift = extra * BITS_PER_BYTE;
676 int ushift = 32 - shift;
677 u32 l = len;
678
679 while (l >= sizeof(u32)) {
680 u32 v = *addr;
681
682 data |= set_upper_bits(v, shift);
683 __raw_writel(data, piobuf);
684 data = get_upper_bits(v, ushift);
685 piobuf++;
686 addr++;
687 l -= sizeof(u32);
688 }
689 /*
690 * We still have 'extra' number of bytes leftover.
691 */
692 if (l) {
693 u32 v = *addr;
694
695 if (l + extra >= sizeof(u32)) {
696 data |= set_upper_bits(v, shift);
697 len -= l + extra - sizeof(u32);
698 if (len == length) {
699 last = data;
700 break;
701 }
702 __raw_writel(data, piobuf);
703 piobuf++;
704 extra = 0;
705 data = 0;
706 } else {
707 /* Clear unused upper bytes */
708 data |= clear_upper_bytes(v, l,
709 extra);
710 if (len == length) {
711 last = data;
712 break;
713 }
714 extra += l;
715 }
716 } else if (len == length) {
717 last = data;
718 break;
719 }
720 } else if (len == length) {
721 u32 w;
722
723 /*
724 * Need to round up for the last dword in the
725 * packet.
726 */
727 w = (len + 3) >> 2;
728 __iowrite32_copy(piobuf, ss->sge.vaddr, w - 1);
729 piobuf += w - 1;
730 last = ((u32 *) ss->sge.vaddr)[w - 1];
731 break;
732 } else {
733 u32 w = len >> 2;
734
735 __iowrite32_copy(piobuf, ss->sge.vaddr, w);
736 piobuf += w;
737
738 extra = len & (sizeof(u32) - 1);
739 if (extra) {
740 u32 v = ((u32 *) ss->sge.vaddr)[w];
741
742 /* Clear unused upper bytes */
743 data = clear_upper_bytes(v, extra, 0);
744 }
745 }
746 update_sge(ss, len);
747 length -= len;
748 }
749 /* Update address before sending packet. */
750 update_sge(ss, length);
751 /* must flush early everything before trigger word */
752 ipath_flush_wc();
753 __raw_writel(last, piobuf);
754 /* be sure trigger word is written */
755 ipath_flush_wc();
756}
757
758/**
759 * ipath_verbs_send - send a packet
760 * @dd: the infinipath device
761 * @hdrwords: the number of words in the header
762 * @hdr: the packet header
763 * @len: the length of the packet in bytes
764 * @ss: the SGE to send
765 */
766int ipath_verbs_send(struct ipath_devdata *dd, u32 hdrwords,
767 u32 *hdr, u32 len, struct ipath_sge_state *ss)
768{
769 u32 __iomem *piobuf;
770 u32 plen;
771 int ret;
772
773 /* +1 is for the qword padding of pbc */
774 plen = hdrwords + ((len + 3) >> 2) + 1;
775 if (unlikely((plen << 2) > dd->ipath_ibmaxlen)) {
776 ipath_dbg("packet len 0x%x too long, failing\n", plen);
777 ret = -EINVAL;
778 goto bail;
779 }
780
781 /* Get a PIO buffer to use. */
782 piobuf = ipath_getpiobuf(dd, NULL);
783 if (unlikely(piobuf == NULL)) {
784 ret = -EBUSY;
785 goto bail;
786 }
787
788 /*
789 * Write len to control qword, no flags.
790 * We have to flush after the PBC for correctness on some cpus
791 * or WC buffer can be written out of order.
792 */
793 writeq(plen, piobuf);
794 ipath_flush_wc();
795 piobuf += 2;
796 if (len == 0) {
797 /*
798 * If there is just the header portion, must flush before
799 * writing last word of header for correctness, and after
800 * the last header word (trigger word).
801 */
802 __iowrite32_copy(piobuf, hdr, hdrwords - 1);
803 ipath_flush_wc();
804 __raw_writel(hdr[hdrwords - 1], piobuf + hdrwords - 1);
805 ipath_flush_wc();
806 ret = 0;
807 goto bail;
808 }
809
810 __iowrite32_copy(piobuf, hdr, hdrwords);
811 piobuf += hdrwords;
812
813 /* The common case is aligned and contained in one segment. */
814 if (likely(ss->num_sge == 1 && len <= ss->sge.length &&
815 !((unsigned long)ss->sge.vaddr & (sizeof(u32) - 1)))) {
816 u32 w;
817 u32 *addr = (u32 *) ss->sge.vaddr;
818
819 /* Update address before sending packet. */
820 update_sge(ss, len);
821 /* Need to round up for the last dword in the packet. */
822 w = (len + 3) >> 2;
823 __iowrite32_copy(piobuf, addr, w - 1);
824 /* must flush early everything before trigger word */
825 ipath_flush_wc();
826 __raw_writel(addr[w - 1], piobuf + w - 1);
827 /* be sure trigger word is written */
828 ipath_flush_wc();
829 ret = 0;
830 goto bail;
831 }
832 copy_io(piobuf, ss, len);
833 ret = 0;
834
835bail:
836 return ret;
837}
838
839int ipath_snapshot_counters(struct ipath_devdata *dd, u64 *swords,
840 u64 *rwords, u64 *spkts, u64 *rpkts,
841 u64 *xmit_wait)
842{
843 int ret;
844
845 if (!(dd->ipath_flags & IPATH_INITTED)) {
846 /* no hardware, freeze, etc. */
847 ipath_dbg("unit %u not usable\n", dd->ipath_unit);
848 ret = -EINVAL;
849 goto bail;
850 }
851 *swords = ipath_snap_cntr(dd, dd->ipath_cregs->cr_wordsendcnt);
852 *rwords = ipath_snap_cntr(dd, dd->ipath_cregs->cr_wordrcvcnt);
853 *spkts = ipath_snap_cntr(dd, dd->ipath_cregs->cr_pktsendcnt);
854 *rpkts = ipath_snap_cntr(dd, dd->ipath_cregs->cr_pktrcvcnt);
855 *xmit_wait = ipath_snap_cntr(dd, dd->ipath_cregs->cr_sendstallcnt);
856
857 ret = 0;
858
859bail:
860 return ret;
861}
862
863/**
864 * ipath_get_counters - get various chip counters
865 * @dd: the infinipath device
866 * @cntrs: counters are placed here
867 *
868 * Return the counters needed by recv_pma_get_portcounters().
869 */
870int ipath_get_counters(struct ipath_devdata *dd,
871 struct ipath_verbs_counters *cntrs)
872{
873 int ret;
874
875 if (!(dd->ipath_flags & IPATH_INITTED)) {
876 /* no hardware, freeze, etc. */
877 ipath_dbg("unit %u not usable\n", dd->ipath_unit);
878 ret = -EINVAL;
879 goto bail;
880 }
881 cntrs->symbol_error_counter =
882 ipath_snap_cntr(dd, dd->ipath_cregs->cr_ibsymbolerrcnt);
883 cntrs->link_error_recovery_counter =
884 ipath_snap_cntr(dd, dd->ipath_cregs->cr_iblinkerrrecovcnt);
885 /*
886 * The link downed counter counts when the other side downs the
887 * connection. We add in the number of times we downed the link
888 * due to local link integrity errors to compensate.
889 */
890 cntrs->link_downed_counter =
891 ipath_snap_cntr(dd, dd->ipath_cregs->cr_iblinkdowncnt);
892 cntrs->port_rcv_errors =
893 ipath_snap_cntr(dd, dd->ipath_cregs->cr_rxdroppktcnt) +
894 ipath_snap_cntr(dd, dd->ipath_cregs->cr_rcvovflcnt) +
895 ipath_snap_cntr(dd, dd->ipath_cregs->cr_portovflcnt) +
896 ipath_snap_cntr(dd, dd->ipath_cregs->cr_err_rlencnt) +
897 ipath_snap_cntr(dd, dd->ipath_cregs->cr_invalidrlencnt) +
898 ipath_snap_cntr(dd, dd->ipath_cregs->cr_erricrccnt) +
899 ipath_snap_cntr(dd, dd->ipath_cregs->cr_errvcrccnt) +
900 ipath_snap_cntr(dd, dd->ipath_cregs->cr_errlpcrccnt) +
901 ipath_snap_cntr(dd, dd->ipath_cregs->cr_badformatcnt);
902 cntrs->port_rcv_remphys_errors =
903 ipath_snap_cntr(dd, dd->ipath_cregs->cr_rcvebpcnt);
904 cntrs->port_xmit_discards =
905 ipath_snap_cntr(dd, dd->ipath_cregs->cr_unsupvlcnt);
906 cntrs->port_xmit_data =
907 ipath_snap_cntr(dd, dd->ipath_cregs->cr_wordsendcnt);
908 cntrs->port_rcv_data =
909 ipath_snap_cntr(dd, dd->ipath_cregs->cr_wordrcvcnt);
910 cntrs->port_xmit_packets =
911 ipath_snap_cntr(dd, dd->ipath_cregs->cr_pktsendcnt);
912 cntrs->port_rcv_packets =
913 ipath_snap_cntr(dd, dd->ipath_cregs->cr_pktrcvcnt);
914 cntrs->local_link_integrity_errors = dd->ipath_lli_errors;
915 cntrs->excessive_buffer_overrun_errors = 0; /* XXX */
916
917 ret = 0;
918
919bail:
920 return ret;
921}
922
584/** 923/**
585 * ipath_ib_piobufavail - callback when a PIO buffer is available 924 * ipath_ib_piobufavail - callback when a PIO buffer is available
586 * @arg: the device pointer 925 * @arg: the device pointer
@@ -591,9 +930,8 @@ static void ipath_ib_timer(void *arg)
591 * QPs waiting for buffers (for now, just do a tasklet_hi_schedule and 930 * QPs waiting for buffers (for now, just do a tasklet_hi_schedule and
592 * return zero). 931 * return zero).
593 */ 932 */
594static int ipath_ib_piobufavail(void *arg) 933int ipath_ib_piobufavail(struct ipath_ibdev *dev)
595{ 934{
596 struct ipath_ibdev *dev = (struct ipath_ibdev *) arg;
597 struct ipath_qp *qp; 935 struct ipath_qp *qp;
598 unsigned long flags; 936 unsigned long flags;
599 937
@@ -624,14 +962,14 @@ static int ipath_query_device(struct ib_device *ibdev,
624 IB_DEVICE_BAD_QKEY_CNTR | IB_DEVICE_SHUTDOWN_PORT | 962 IB_DEVICE_BAD_QKEY_CNTR | IB_DEVICE_SHUTDOWN_PORT |
625 IB_DEVICE_SYS_IMAGE_GUID; 963 IB_DEVICE_SYS_IMAGE_GUID;
626 props->page_size_cap = PAGE_SIZE; 964 props->page_size_cap = PAGE_SIZE;
627 props->vendor_id = ipath_layer_get_vendorid(dev->dd); 965 props->vendor_id = dev->dd->ipath_vendorid;
628 props->vendor_part_id = ipath_layer_get_deviceid(dev->dd); 966 props->vendor_part_id = dev->dd->ipath_deviceid;
629 props->hw_ver = ipath_layer_get_pcirev(dev->dd); 967 props->hw_ver = dev->dd->ipath_pcirev;
630 968
631 props->sys_image_guid = dev->sys_image_guid; 969 props->sys_image_guid = dev->sys_image_guid;
632 970
633 props->max_mr_size = ~0ull; 971 props->max_mr_size = ~0ull;
634 props->max_qp = dev->qp_table.max; 972 props->max_qp = ib_ipath_max_qps;
635 props->max_qp_wr = ib_ipath_max_qp_wrs; 973 props->max_qp_wr = ib_ipath_max_qp_wrs;
636 props->max_sge = ib_ipath_max_sges; 974 props->max_sge = ib_ipath_max_sges;
637 props->max_cq = ib_ipath_max_cqs; 975 props->max_cq = ib_ipath_max_cqs;
@@ -647,7 +985,7 @@ static int ipath_query_device(struct ib_device *ibdev,
647 props->max_srq_sge = ib_ipath_max_srq_sges; 985 props->max_srq_sge = ib_ipath_max_srq_sges;
648 /* props->local_ca_ack_delay */ 986 /* props->local_ca_ack_delay */
649 props->atomic_cap = IB_ATOMIC_HCA; 987 props->atomic_cap = IB_ATOMIC_HCA;
650 props->max_pkeys = ipath_layer_get_npkeys(dev->dd); 988 props->max_pkeys = ipath_get_npkeys(dev->dd);
651 props->max_mcast_grp = ib_ipath_max_mcast_grps; 989 props->max_mcast_grp = ib_ipath_max_mcast_grps;
652 props->max_mcast_qp_attach = ib_ipath_max_mcast_qp_attached; 990 props->max_mcast_qp_attach = ib_ipath_max_mcast_qp_attached;
653 props->max_total_mcast_qp_attach = props->max_mcast_qp_attach * 991 props->max_total_mcast_qp_attach = props->max_mcast_qp_attach *
@@ -672,12 +1010,17 @@ const u8 ipath_cvt_physportstate[16] = {
672 [INFINIPATH_IBCS_LT_STATE_RECOVERIDLE] = 6, 1010 [INFINIPATH_IBCS_LT_STATE_RECOVERIDLE] = 6,
673}; 1011};
674 1012
1013u32 ipath_get_cr_errpkey(struct ipath_devdata *dd)
1014{
1015 return ipath_read_creg32(dd, dd->ipath_cregs->cr_errpkey);
1016}
1017
675static int ipath_query_port(struct ib_device *ibdev, 1018static int ipath_query_port(struct ib_device *ibdev,
676 u8 port, struct ib_port_attr *props) 1019 u8 port, struct ib_port_attr *props)
677{ 1020{
678 struct ipath_ibdev *dev = to_idev(ibdev); 1021 struct ipath_ibdev *dev = to_idev(ibdev);
679 enum ib_mtu mtu; 1022 enum ib_mtu mtu;
680 u16 lid = ipath_layer_get_lid(dev->dd); 1023 u16 lid = dev->dd->ipath_lid;
681 u64 ibcstat; 1024 u64 ibcstat;
682 1025
683 memset(props, 0, sizeof(*props)); 1026 memset(props, 0, sizeof(*props));
@@ -685,16 +1028,16 @@ static int ipath_query_port(struct ib_device *ibdev,
685 props->lmc = dev->mkeyprot_resv_lmc & 7; 1028 props->lmc = dev->mkeyprot_resv_lmc & 7;
686 props->sm_lid = dev->sm_lid; 1029 props->sm_lid = dev->sm_lid;
687 props->sm_sl = dev->sm_sl; 1030 props->sm_sl = dev->sm_sl;
688 ibcstat = ipath_layer_get_lastibcstat(dev->dd); 1031 ibcstat = dev->dd->ipath_lastibcstat;
689 props->state = ((ibcstat >> 4) & 0x3) + 1; 1032 props->state = ((ibcstat >> 4) & 0x3) + 1;
690 /* See phys_state_show() */ 1033 /* See phys_state_show() */
691 props->phys_state = ipath_cvt_physportstate[ 1034 props->phys_state = ipath_cvt_physportstate[
692 ipath_layer_get_lastibcstat(dev->dd) & 0xf]; 1035 dev->dd->ipath_lastibcstat & 0xf];
693 props->port_cap_flags = dev->port_cap_flags; 1036 props->port_cap_flags = dev->port_cap_flags;
694 props->gid_tbl_len = 1; 1037 props->gid_tbl_len = 1;
695 props->max_msg_sz = 0x80000000; 1038 props->max_msg_sz = 0x80000000;
696 props->pkey_tbl_len = ipath_layer_get_npkeys(dev->dd); 1039 props->pkey_tbl_len = ipath_get_npkeys(dev->dd);
697 props->bad_pkey_cntr = ipath_layer_get_cr_errpkey(dev->dd) - 1040 props->bad_pkey_cntr = ipath_get_cr_errpkey(dev->dd) -
698 dev->z_pkey_violations; 1041 dev->z_pkey_violations;
699 props->qkey_viol_cntr = dev->qkey_violations; 1042 props->qkey_viol_cntr = dev->qkey_violations;
700 props->active_width = IB_WIDTH_4X; 1043 props->active_width = IB_WIDTH_4X;
@@ -704,7 +1047,7 @@ static int ipath_query_port(struct ib_device *ibdev,
704 props->init_type_reply = 0; 1047 props->init_type_reply = 0;
705 1048
706 props->max_mtu = IB_MTU_4096; 1049 props->max_mtu = IB_MTU_4096;
707 switch (ipath_layer_get_ibmtu(dev->dd)) { 1050 switch (dev->dd->ipath_ibmtu) {
708 case 4096: 1051 case 4096:
709 mtu = IB_MTU_4096; 1052 mtu = IB_MTU_4096;
710 break; 1053 break;
@@ -763,7 +1106,7 @@ static int ipath_modify_port(struct ib_device *ibdev,
763 dev->port_cap_flags |= props->set_port_cap_mask; 1106 dev->port_cap_flags |= props->set_port_cap_mask;
764 dev->port_cap_flags &= ~props->clr_port_cap_mask; 1107 dev->port_cap_flags &= ~props->clr_port_cap_mask;
765 if (port_modify_mask & IB_PORT_SHUTDOWN) 1108 if (port_modify_mask & IB_PORT_SHUTDOWN)
766 ipath_layer_set_linkstate(dev->dd, IPATH_IB_LINKDOWN); 1109 ipath_set_linkstate(dev->dd, IPATH_IB_LINKDOWN);
767 if (port_modify_mask & IB_PORT_RESET_QKEY_CNTR) 1110 if (port_modify_mask & IB_PORT_RESET_QKEY_CNTR)
768 dev->qkey_violations = 0; 1111 dev->qkey_violations = 0;
769 return 0; 1112 return 0;
@@ -780,7 +1123,7 @@ static int ipath_query_gid(struct ib_device *ibdev, u8 port,
780 goto bail; 1123 goto bail;
781 } 1124 }
782 gid->global.subnet_prefix = dev->gid_prefix; 1125 gid->global.subnet_prefix = dev->gid_prefix;
783 gid->global.interface_id = ipath_layer_get_guid(dev->dd); 1126 gid->global.interface_id = dev->dd->ipath_guid;
784 1127
785 ret = 0; 1128 ret = 0;
786 1129
@@ -803,18 +1146,22 @@ static struct ib_pd *ipath_alloc_pd(struct ib_device *ibdev,
803 * we allow allocations of more than we report for this value. 1146 * we allow allocations of more than we report for this value.
804 */ 1147 */
805 1148
806 if (dev->n_pds_allocated == ib_ipath_max_pds) { 1149 pd = kmalloc(sizeof *pd, GFP_KERNEL);
1150 if (!pd) {
807 ret = ERR_PTR(-ENOMEM); 1151 ret = ERR_PTR(-ENOMEM);
808 goto bail; 1152 goto bail;
809 } 1153 }
810 1154
811 pd = kmalloc(sizeof *pd, GFP_KERNEL); 1155 spin_lock(&dev->n_pds_lock);
812 if (!pd) { 1156 if (dev->n_pds_allocated == ib_ipath_max_pds) {
1157 spin_unlock(&dev->n_pds_lock);
1158 kfree(pd);
813 ret = ERR_PTR(-ENOMEM); 1159 ret = ERR_PTR(-ENOMEM);
814 goto bail; 1160 goto bail;
815 } 1161 }
816 1162
817 dev->n_pds_allocated++; 1163 dev->n_pds_allocated++;
1164 spin_unlock(&dev->n_pds_lock);
818 1165
819 /* ib_alloc_pd() will initialize pd->ibpd. */ 1166 /* ib_alloc_pd() will initialize pd->ibpd. */
820 pd->user = udata != NULL; 1167 pd->user = udata != NULL;
@@ -830,7 +1177,9 @@ static int ipath_dealloc_pd(struct ib_pd *ibpd)
830 struct ipath_pd *pd = to_ipd(ibpd); 1177 struct ipath_pd *pd = to_ipd(ibpd);
831 struct ipath_ibdev *dev = to_idev(ibpd->device); 1178 struct ipath_ibdev *dev = to_idev(ibpd->device);
832 1179
1180 spin_lock(&dev->n_pds_lock);
833 dev->n_pds_allocated--; 1181 dev->n_pds_allocated--;
1182 spin_unlock(&dev->n_pds_lock);
834 1183
835 kfree(pd); 1184 kfree(pd);
836 1185
@@ -851,11 +1200,6 @@ static struct ib_ah *ipath_create_ah(struct ib_pd *pd,
851 struct ib_ah *ret; 1200 struct ib_ah *ret;
852 struct ipath_ibdev *dev = to_idev(pd->device); 1201 struct ipath_ibdev *dev = to_idev(pd->device);
853 1202
854 if (dev->n_ahs_allocated == ib_ipath_max_ahs) {
855 ret = ERR_PTR(-ENOMEM);
856 goto bail;
857 }
858
859 /* A multicast address requires a GRH (see ch. 8.4.1). */ 1203 /* A multicast address requires a GRH (see ch. 8.4.1). */
860 if (ah_attr->dlid >= IPATH_MULTICAST_LID_BASE && 1204 if (ah_attr->dlid >= IPATH_MULTICAST_LID_BASE &&
861 ah_attr->dlid != IPATH_PERMISSIVE_LID && 1205 ah_attr->dlid != IPATH_PERMISSIVE_LID &&
@@ -881,7 +1225,16 @@ static struct ib_ah *ipath_create_ah(struct ib_pd *pd,
881 goto bail; 1225 goto bail;
882 } 1226 }
883 1227
1228 spin_lock(&dev->n_ahs_lock);
1229 if (dev->n_ahs_allocated == ib_ipath_max_ahs) {
1230 spin_unlock(&dev->n_ahs_lock);
1231 kfree(ah);
1232 ret = ERR_PTR(-ENOMEM);
1233 goto bail;
1234 }
1235
884 dev->n_ahs_allocated++; 1236 dev->n_ahs_allocated++;
1237 spin_unlock(&dev->n_ahs_lock);
885 1238
886 /* ib_create_ah() will initialize ah->ibah. */ 1239 /* ib_create_ah() will initialize ah->ibah. */
887 ah->attr = *ah_attr; 1240 ah->attr = *ah_attr;
@@ -903,7 +1256,9 @@ static int ipath_destroy_ah(struct ib_ah *ibah)
903 struct ipath_ibdev *dev = to_idev(ibah->device); 1256 struct ipath_ibdev *dev = to_idev(ibah->device);
904 struct ipath_ah *ah = to_iah(ibah); 1257 struct ipath_ah *ah = to_iah(ibah);
905 1258
1259 spin_lock(&dev->n_ahs_lock);
906 dev->n_ahs_allocated--; 1260 dev->n_ahs_allocated--;
1261 spin_unlock(&dev->n_ahs_lock);
907 1262
908 kfree(ah); 1263 kfree(ah);
909 1264
@@ -919,25 +1274,50 @@ static int ipath_query_ah(struct ib_ah *ibah, struct ib_ah_attr *ah_attr)
919 return 0; 1274 return 0;
920} 1275}
921 1276
1277/**
1278 * ipath_get_npkeys - return the size of the PKEY table for port 0
1279 * @dd: the infinipath device
1280 */
1281unsigned ipath_get_npkeys(struct ipath_devdata *dd)
1282{
1283 return ARRAY_SIZE(dd->ipath_pd[0]->port_pkeys);
1284}
1285
1286/**
1287 * ipath_get_pkey - return the indexed PKEY from the port 0 PKEY table
1288 * @dd: the infinipath device
1289 * @index: the PKEY index
1290 */
1291unsigned ipath_get_pkey(struct ipath_devdata *dd, unsigned index)
1292{
1293 unsigned ret;
1294
1295 if (index >= ARRAY_SIZE(dd->ipath_pd[0]->port_pkeys))
1296 ret = 0;
1297 else
1298 ret = dd->ipath_pd[0]->port_pkeys[index];
1299
1300 return ret;
1301}
1302
922static int ipath_query_pkey(struct ib_device *ibdev, u8 port, u16 index, 1303static int ipath_query_pkey(struct ib_device *ibdev, u8 port, u16 index,
923 u16 *pkey) 1304 u16 *pkey)
924{ 1305{
925 struct ipath_ibdev *dev = to_idev(ibdev); 1306 struct ipath_ibdev *dev = to_idev(ibdev);
926 int ret; 1307 int ret;
927 1308
928 if (index >= ipath_layer_get_npkeys(dev->dd)) { 1309 if (index >= ipath_get_npkeys(dev->dd)) {
929 ret = -EINVAL; 1310 ret = -EINVAL;
930 goto bail; 1311 goto bail;
931 } 1312 }
932 1313
933 *pkey = ipath_layer_get_pkey(dev->dd, index); 1314 *pkey = ipath_get_pkey(dev->dd, index);
934 ret = 0; 1315 ret = 0;
935 1316
936bail: 1317bail:
937 return ret; 1318 return ret;
938} 1319}
939 1320
940
941/** 1321/**
942 * ipath_alloc_ucontext - allocate a ucontest 1322 * ipath_alloc_ucontext - allocate a ucontest
943 * @ibdev: the infiniband device 1323 * @ibdev: the infiniband device
@@ -970,26 +1350,91 @@ static int ipath_dealloc_ucontext(struct ib_ucontext *context)
970 1350
971static int ipath_verbs_register_sysfs(struct ib_device *dev); 1351static int ipath_verbs_register_sysfs(struct ib_device *dev);
972 1352
1353static void __verbs_timer(unsigned long arg)
1354{
1355 struct ipath_devdata *dd = (struct ipath_devdata *) arg;
1356
1357 /*
1358 * If port 0 receive packet interrupts are not available, or
1359 * can be missed, poll the receive queue
1360 */
1361 if (dd->ipath_flags & IPATH_POLL_RX_INTR)
1362 ipath_kreceive(dd);
1363
1364 /* Handle verbs layer timeouts. */
1365 ipath_ib_timer(dd->verbs_dev);
1366
1367 mod_timer(&dd->verbs_timer, jiffies + 1);
1368}
1369
1370static int enable_timer(struct ipath_devdata *dd)
1371{
1372 /*
1373 * Early chips had a design flaw where the chip and kernel idea
1374 * of the tail register don't always agree, and therefore we won't
1375 * get an interrupt on the next packet received.
1376 * If the board supports per packet receive interrupts, use it.
1377 * Otherwise, the timer function periodically checks for packets
1378 * to cover this case.
1379 * Either way, the timer is needed for verbs layer related
1380 * processing.
1381 */
1382 if (dd->ipath_flags & IPATH_GPIO_INTR) {
1383 ipath_write_kreg(dd, dd->ipath_kregs->kr_debugportselect,
1384 0x2074076542310ULL);
1385 /* Enable GPIO bit 2 interrupt */
1386 ipath_write_kreg(dd, dd->ipath_kregs->kr_gpio_mask,
1387 (u64) (1 << 2));
1388 }
1389
1390 init_timer(&dd->verbs_timer);
1391 dd->verbs_timer.function = __verbs_timer;
1392 dd->verbs_timer.data = (unsigned long)dd;
1393 dd->verbs_timer.expires = jiffies + 1;
1394 add_timer(&dd->verbs_timer);
1395
1396 return 0;
1397}
1398
1399static int disable_timer(struct ipath_devdata *dd)
1400{
1401 /* Disable GPIO bit 2 interrupt */
1402 if (dd->ipath_flags & IPATH_GPIO_INTR)
1403 ipath_write_kreg(dd, dd->ipath_kregs->kr_gpio_mask, 0);
1404
1405 del_timer_sync(&dd->verbs_timer);
1406
1407 return 0;
1408}
1409
973/** 1410/**
974 * ipath_register_ib_device - register our device with the infiniband core 1411 * ipath_register_ib_device - register our device with the infiniband core
975 * @unit: the device number to register
976 * @dd: the device data structure 1412 * @dd: the device data structure
977 * Return the allocated ipath_ibdev pointer or NULL on error. 1413 * Return the allocated ipath_ibdev pointer or NULL on error.
978 */ 1414 */
979static void *ipath_register_ib_device(int unit, struct ipath_devdata *dd) 1415int ipath_register_ib_device(struct ipath_devdata *dd)
980{ 1416{
981 struct ipath_layer_counters cntrs; 1417 struct ipath_verbs_counters cntrs;
982 struct ipath_ibdev *idev; 1418 struct ipath_ibdev *idev;
983 struct ib_device *dev; 1419 struct ib_device *dev;
984 int ret; 1420 int ret;
985 1421
986 idev = (struct ipath_ibdev *)ib_alloc_device(sizeof *idev); 1422 idev = (struct ipath_ibdev *)ib_alloc_device(sizeof *idev);
987 if (idev == NULL) 1423 if (idev == NULL) {
1424 ret = -ENOMEM;
988 goto bail; 1425 goto bail;
1426 }
989 1427
990 dev = &idev->ibdev; 1428 dev = &idev->ibdev;
991 1429
992 /* Only need to initialize non-zero fields. */ 1430 /* Only need to initialize non-zero fields. */
1431 spin_lock_init(&idev->n_pds_lock);
1432 spin_lock_init(&idev->n_ahs_lock);
1433 spin_lock_init(&idev->n_cqs_lock);
1434 spin_lock_init(&idev->n_qps_lock);
1435 spin_lock_init(&idev->n_srqs_lock);
1436 spin_lock_init(&idev->n_mcast_grps_lock);
1437
993 spin_lock_init(&idev->qp_table.lock); 1438 spin_lock_init(&idev->qp_table.lock);
994 spin_lock_init(&idev->lk_table.lock); 1439 spin_lock_init(&idev->lk_table.lock);
995 idev->sm_lid = __constant_be16_to_cpu(IB_LID_PERMISSIVE); 1440 idev->sm_lid = __constant_be16_to_cpu(IB_LID_PERMISSIVE);
@@ -1030,7 +1475,7 @@ static void *ipath_register_ib_device(int unit, struct ipath_devdata *dd)
1030 idev->link_width_enabled = 3; /* 1x or 4x */ 1475 idev->link_width_enabled = 3; /* 1x or 4x */
1031 1476
1032 /* Snapshot current HW counters to "clear" them. */ 1477 /* Snapshot current HW counters to "clear" them. */
1033 ipath_layer_get_counters(dd, &cntrs); 1478 ipath_get_counters(dd, &cntrs);
1034 idev->z_symbol_error_counter = cntrs.symbol_error_counter; 1479 idev->z_symbol_error_counter = cntrs.symbol_error_counter;
1035 idev->z_link_error_recovery_counter = 1480 idev->z_link_error_recovery_counter =
1036 cntrs.link_error_recovery_counter; 1481 cntrs.link_error_recovery_counter;
@@ -1054,14 +1499,14 @@ static void *ipath_register_ib_device(int unit, struct ipath_devdata *dd)
1054 * device types in the system, we can't be sure this is unique. 1499 * device types in the system, we can't be sure this is unique.
1055 */ 1500 */
1056 if (!sys_image_guid) 1501 if (!sys_image_guid)
1057 sys_image_guid = ipath_layer_get_guid(dd); 1502 sys_image_guid = dd->ipath_guid;
1058 idev->sys_image_guid = sys_image_guid; 1503 idev->sys_image_guid = sys_image_guid;
1059 idev->ib_unit = unit; 1504 idev->ib_unit = dd->ipath_unit;
1060 idev->dd = dd; 1505 idev->dd = dd;
1061 1506
1062 strlcpy(dev->name, "ipath%d", IB_DEVICE_NAME_MAX); 1507 strlcpy(dev->name, "ipath%d", IB_DEVICE_NAME_MAX);
1063 dev->owner = THIS_MODULE; 1508 dev->owner = THIS_MODULE;
1064 dev->node_guid = ipath_layer_get_guid(dd); 1509 dev->node_guid = dd->ipath_guid;
1065 dev->uverbs_abi_ver = IPATH_UVERBS_ABI_VERSION; 1510 dev->uverbs_abi_ver = IPATH_UVERBS_ABI_VERSION;
1066 dev->uverbs_cmd_mask = 1511 dev->uverbs_cmd_mask =
1067 (1ull << IB_USER_VERBS_CMD_GET_CONTEXT) | 1512 (1ull << IB_USER_VERBS_CMD_GET_CONTEXT) |
@@ -1093,9 +1538,9 @@ static void *ipath_register_ib_device(int unit, struct ipath_devdata *dd)
1093 (1ull << IB_USER_VERBS_CMD_QUERY_SRQ) | 1538 (1ull << IB_USER_VERBS_CMD_QUERY_SRQ) |
1094 (1ull << IB_USER_VERBS_CMD_DESTROY_SRQ) | 1539 (1ull << IB_USER_VERBS_CMD_DESTROY_SRQ) |
1095 (1ull << IB_USER_VERBS_CMD_POST_SRQ_RECV); 1540 (1ull << IB_USER_VERBS_CMD_POST_SRQ_RECV);
1096 dev->node_type = IB_NODE_CA; 1541 dev->node_type = RDMA_NODE_IB_CA;
1097 dev->phys_port_cnt = 1; 1542 dev->phys_port_cnt = 1;
1098 dev->dma_device = ipath_layer_get_device(dd); 1543 dev->dma_device = &dd->pcidev->dev;
1099 dev->class_dev.dev = dev->dma_device; 1544 dev->class_dev.dev = dev->dma_device;
1100 dev->query_device = ipath_query_device; 1545 dev->query_device = ipath_query_device;
1101 dev->modify_device = ipath_modify_device; 1546 dev->modify_device = ipath_modify_device;
@@ -1137,9 +1582,10 @@ static void *ipath_register_ib_device(int unit, struct ipath_devdata *dd)
1137 dev->attach_mcast = ipath_multicast_attach; 1582 dev->attach_mcast = ipath_multicast_attach;
1138 dev->detach_mcast = ipath_multicast_detach; 1583 dev->detach_mcast = ipath_multicast_detach;
1139 dev->process_mad = ipath_process_mad; 1584 dev->process_mad = ipath_process_mad;
1585 dev->mmap = ipath_mmap;
1140 1586
1141 snprintf(dev->node_desc, sizeof(dev->node_desc), 1587 snprintf(dev->node_desc, sizeof(dev->node_desc),
1142 IPATH_IDSTR " %s kernel_SMA", system_utsname.nodename); 1588 IPATH_IDSTR " %s", system_utsname.nodename);
1143 1589
1144 ret = ib_register_device(dev); 1590 ret = ib_register_device(dev);
1145 if (ret) 1591 if (ret)
@@ -1148,7 +1594,7 @@ static void *ipath_register_ib_device(int unit, struct ipath_devdata *dd)
1148 if (ipath_verbs_register_sysfs(dev)) 1594 if (ipath_verbs_register_sysfs(dev))
1149 goto err_class; 1595 goto err_class;
1150 1596
1151 ipath_layer_enable_timer(dd); 1597 enable_timer(dd);
1152 1598
1153 goto bail; 1599 goto bail;
1154 1600
@@ -1160,37 +1606,32 @@ err_lk:
1160 kfree(idev->qp_table.table); 1606 kfree(idev->qp_table.table);
1161err_qp: 1607err_qp:
1162 ib_dealloc_device(dev); 1608 ib_dealloc_device(dev);
1163 _VERBS_ERROR("ib_ipath%d cannot register verbs (%d)!\n", 1609 ipath_dev_err(dd, "cannot register verbs: %d!\n", -ret);
1164 unit, -ret);
1165 idev = NULL; 1610 idev = NULL;
1166 1611
1167bail: 1612bail:
1168 return idev; 1613 dd->verbs_dev = idev;
1614 return ret;
1169} 1615}
1170 1616
1171static void ipath_unregister_ib_device(void *arg) 1617void ipath_unregister_ib_device(struct ipath_ibdev *dev)
1172{ 1618{
1173 struct ipath_ibdev *dev = (struct ipath_ibdev *) arg;
1174 struct ib_device *ibdev = &dev->ibdev; 1619 struct ib_device *ibdev = &dev->ibdev;
1175 1620
1176 ipath_layer_disable_timer(dev->dd); 1621 disable_timer(dev->dd);
1177 1622
1178 ib_unregister_device(ibdev); 1623 ib_unregister_device(ibdev);
1179 1624
1180 if (!list_empty(&dev->pending[0]) || 1625 if (!list_empty(&dev->pending[0]) ||
1181 !list_empty(&dev->pending[1]) || 1626 !list_empty(&dev->pending[1]) ||
1182 !list_empty(&dev->pending[2])) 1627 !list_empty(&dev->pending[2]))
1183 _VERBS_ERROR("ipath%d pending list not empty!\n", 1628 ipath_dev_err(dev->dd, "pending list not empty!\n");
1184 dev->ib_unit);
1185 if (!list_empty(&dev->piowait)) 1629 if (!list_empty(&dev->piowait))
1186 _VERBS_ERROR("ipath%d piowait list not empty!\n", 1630 ipath_dev_err(dev->dd, "piowait list not empty!\n");
1187 dev->ib_unit);
1188 if (!list_empty(&dev->rnrwait)) 1631 if (!list_empty(&dev->rnrwait))
1189 _VERBS_ERROR("ipath%d rnrwait list not empty!\n", 1632 ipath_dev_err(dev->dd, "rnrwait list not empty!\n");
1190 dev->ib_unit);
1191 if (!ipath_mcast_tree_empty()) 1633 if (!ipath_mcast_tree_empty())
1192 _VERBS_ERROR("ipath%d multicast table memory leak!\n", 1634 ipath_dev_err(dev->dd, "multicast table memory leak!\n");
1193 dev->ib_unit);
1194 /* 1635 /*
1195 * Note that ipath_unregister_ib_device() can be called before all 1636 * Note that ipath_unregister_ib_device() can be called before all
1196 * the QPs are destroyed! 1637 * the QPs are destroyed!
@@ -1201,25 +1642,12 @@ static void ipath_unregister_ib_device(void *arg)
1201 ib_dealloc_device(ibdev); 1642 ib_dealloc_device(ibdev);
1202} 1643}
1203 1644
1204static int __init ipath_verbs_init(void)
1205{
1206 return ipath_verbs_register(ipath_register_ib_device,
1207 ipath_unregister_ib_device,
1208 ipath_ib_piobufavail, ipath_ib_rcv,
1209 ipath_ib_timer);
1210}
1211
1212static void __exit ipath_verbs_cleanup(void)
1213{
1214 ipath_verbs_unregister();
1215}
1216
1217static ssize_t show_rev(struct class_device *cdev, char *buf) 1645static ssize_t show_rev(struct class_device *cdev, char *buf)
1218{ 1646{
1219 struct ipath_ibdev *dev = 1647 struct ipath_ibdev *dev =
1220 container_of(cdev, struct ipath_ibdev, ibdev.class_dev); 1648 container_of(cdev, struct ipath_ibdev, ibdev.class_dev);
1221 1649
1222 return sprintf(buf, "%x\n", ipath_layer_get_pcirev(dev->dd)); 1650 return sprintf(buf, "%x\n", dev->dd->ipath_pcirev);
1223} 1651}
1224 1652
1225static ssize_t show_hca(struct class_device *cdev, char *buf) 1653static ssize_t show_hca(struct class_device *cdev, char *buf)
@@ -1228,7 +1656,7 @@ static ssize_t show_hca(struct class_device *cdev, char *buf)
1228 container_of(cdev, struct ipath_ibdev, ibdev.class_dev); 1656 container_of(cdev, struct ipath_ibdev, ibdev.class_dev);
1229 int ret; 1657 int ret;
1230 1658
1231 ret = ipath_layer_get_boardname(dev->dd, buf, 128); 1659 ret = dev->dd->ipath_f_get_boardname(dev->dd, buf, 128);
1232 if (ret < 0) 1660 if (ret < 0)
1233 goto bail; 1661 goto bail;
1234 strcat(buf, "\n"); 1662 strcat(buf, "\n");
@@ -1305,6 +1733,3 @@ static int ipath_verbs_register_sysfs(struct ib_device *dev)
1305bail: 1733bail:
1306 return ret; 1734 return ret;
1307} 1735}
1308
1309module_init(ipath_verbs_init);
1310module_exit(ipath_verbs_cleanup);