aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/infiniband/hw
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/infiniband/hw')
-rw-r--r--drivers/infiniband/hw/ipath/ipath_diag.c1
-rw-r--r--drivers/infiniband/hw/ipath/ipath_driver.c275
-rw-r--r--drivers/infiniband/hw/ipath/ipath_file_ops.c1
-rw-r--r--drivers/infiniband/hw/ipath/ipath_intr.c7
-rw-r--r--drivers/infiniband/hw/ipath/ipath_kernel.h15
-rw-r--r--drivers/infiniband/hw/ipath/ipath_layer.c978
-rw-r--r--drivers/infiniband/hw/ipath/ipath_layer.h104
-rw-r--r--drivers/infiniband/hw/ipath/ipath_mad.c339
-rw-r--r--drivers/infiniband/hw/ipath/ipath_mr.c12
-rw-r--r--drivers/infiniband/hw/ipath/ipath_qp.c34
-rw-r--r--drivers/infiniband/hw/ipath/ipath_rc.c9
-rw-r--r--drivers/infiniband/hw/ipath/ipath_ruc.c22
-rw-r--r--drivers/infiniband/hw/ipath/ipath_sysfs.c6
-rw-r--r--drivers/infiniband/hw/ipath/ipath_uc.c5
-rw-r--r--drivers/infiniband/hw/ipath/ipath_ud.c13
-rw-r--r--drivers/infiniband/hw/ipath/ipath_verbs.c525
-rw-r--r--drivers/infiniband/hw/ipath/ipath_verbs.h109
17 files changed, 1126 insertions, 1329 deletions
diff --git a/drivers/infiniband/hw/ipath/ipath_diag.c b/drivers/infiniband/hw/ipath/ipath_diag.c
index 147dd89e21c9..5d77a74aa57b 100644
--- a/drivers/infiniband/hw/ipath/ipath_diag.c
+++ b/drivers/infiniband/hw/ipath/ipath_diag.c
@@ -45,7 +45,6 @@
45#include <asm/uaccess.h> 45#include <asm/uaccess.h>
46 46
47#include "ipath_kernel.h" 47#include "ipath_kernel.h"
48#include "ipath_layer.h"
49#include "ipath_common.h" 48#include "ipath_common.h"
50 49
51int ipath_diag_inuse; 50int ipath_diag_inuse;
diff --git a/drivers/infiniband/hw/ipath/ipath_driver.c b/drivers/infiniband/hw/ipath/ipath_driver.c
index 9af7406d6a62..958cc9b33c8f 100644
--- a/drivers/infiniband/hw/ipath/ipath_driver.c
+++ b/drivers/infiniband/hw/ipath/ipath_driver.c
@@ -39,7 +39,6 @@
39#include <linux/vmalloc.h> 39#include <linux/vmalloc.h>
40 40
41#include "ipath_kernel.h" 41#include "ipath_kernel.h"
42#include "ipath_layer.h"
43#include "ipath_verbs.h" 42#include "ipath_verbs.h"
44#include "ipath_common.h" 43#include "ipath_common.h"
45 44
@@ -508,7 +507,6 @@ static int __devinit ipath_init_one(struct pci_dev *pdev,
508 ipathfs_add_device(dd); 507 ipathfs_add_device(dd);
509 ipath_user_add(dd); 508 ipath_user_add(dd);
510 ipath_diag_add(dd); 509 ipath_diag_add(dd);
511 ipath_layer_add(dd);
512 ipath_register_ib_device(dd); 510 ipath_register_ib_device(dd);
513 511
514 goto bail; 512 goto bail;
@@ -539,7 +537,6 @@ static void __devexit ipath_remove_one(struct pci_dev *pdev)
539 537
540 dd = pci_get_drvdata(pdev); 538 dd = pci_get_drvdata(pdev);
541 ipath_unregister_ib_device(dd->verbs_dev); 539 ipath_unregister_ib_device(dd->verbs_dev);
542 ipath_layer_remove(dd);
543 ipath_diag_remove(dd); 540 ipath_diag_remove(dd);
544 ipath_user_remove(dd); 541 ipath_user_remove(dd);
545 ipathfs_remove_device(dd); 542 ipathfs_remove_device(dd);
@@ -614,11 +611,12 @@ void ipath_disarm_piobufs(struct ipath_devdata *dd, unsigned first,
614 * 611 *
615 * wait up to msecs milliseconds for IB link state change to occur for 612 * wait up to msecs milliseconds for IB link state change to occur for
616 * now, take the easy polling route. Currently used only by 613 * now, take the easy polling route. Currently used only by
617 * ipath_layer_set_linkstate. Returns 0 if state reached, otherwise 614 * ipath_set_linkstate. Returns 0 if state reached, otherwise
618 * -ETIMEDOUT state can have multiple states set, for any of several 615 * -ETIMEDOUT state can have multiple states set, for any of several
619 * transitions. 616 * transitions.
620 */ 617 */
621int ipath_wait_linkstate(struct ipath_devdata *dd, u32 state, int msecs) 618static int ipath_wait_linkstate(struct ipath_devdata *dd, u32 state,
619 int msecs)
622{ 620{
623 dd->ipath_sma_state_wanted = state; 621 dd->ipath_sma_state_wanted = state;
624 wait_event_interruptible_timeout(ipath_sma_state_wait, 622 wait_event_interruptible_timeout(ipath_sma_state_wait,
@@ -814,58 +812,6 @@ bail:
814 return skb; 812 return skb;
815} 813}
816 814
817/**
818 * ipath_rcv_layer - receive a packet for the layered (ethernet) driver
819 * @dd: the infinipath device
820 * @etail: the sk_buff number
821 * @tlen: the total packet length
822 * @hdr: the ethernet header
823 *
824 * Separate routine for better overall optimization
825 */
826static void ipath_rcv_layer(struct ipath_devdata *dd, u32 etail,
827 u32 tlen, struct ether_header *hdr)
828{
829 u32 elen;
830 u8 pad, *bthbytes;
831 struct sk_buff *skb, *nskb;
832
833 if (dd->ipath_port0_skbs &&
834 hdr->sub_opcode == IPATH_ITH4X_OPCODE_ENCAP) {
835 /*
836 * Allocate a new sk_buff to replace the one we give
837 * to the network stack.
838 */
839 nskb = ipath_alloc_skb(dd, GFP_ATOMIC);
840 if (!nskb) {
841 /* count OK packets that we drop */
842 ipath_stats.sps_krdrops++;
843 return;
844 }
845
846 bthbytes = (u8 *) hdr->bth;
847 pad = (bthbytes[1] >> 4) & 3;
848 /* +CRC32 */
849 elen = tlen - (sizeof(*hdr) + pad + sizeof(u32));
850
851 skb = dd->ipath_port0_skbs[etail];
852 dd->ipath_port0_skbs[etail] = nskb;
853 skb_put(skb, elen);
854
855 dd->ipath_f_put_tid(dd, etail + (u64 __iomem *)
856 ((char __iomem *) dd->ipath_kregbase
857 + dd->ipath_rcvegrbase), 0,
858 virt_to_phys(nskb->data));
859
860 __ipath_layer_rcv(dd, hdr, skb);
861
862 /* another ether packet received */
863 ipath_stats.sps_ether_rpkts++;
864 }
865 else if (hdr->sub_opcode == IPATH_ITH4X_OPCODE_LID_ARP)
866 __ipath_layer_rcv_lid(dd, hdr);
867}
868
869static void ipath_rcv_hdrerr(struct ipath_devdata *dd, 815static void ipath_rcv_hdrerr(struct ipath_devdata *dd,
870 u32 eflags, 816 u32 eflags,
871 u32 l, 817 u32 l,
@@ -979,22 +925,17 @@ reloop:
979 if (unlikely(eflags)) 925 if (unlikely(eflags))
980 ipath_rcv_hdrerr(dd, eflags, l, etail, rc); 926 ipath_rcv_hdrerr(dd, eflags, l, etail, rc);
981 else if (etype == RCVHQ_RCV_TYPE_NON_KD) { 927 else if (etype == RCVHQ_RCV_TYPE_NON_KD) {
982 ipath_ib_rcv(dd->verbs_dev, rc + 1, ebuf, 928 ipath_ib_rcv(dd->verbs_dev, rc + 1, ebuf, tlen);
983 tlen); 929 if (dd->ipath_lli_counter)
984 if (dd->ipath_lli_counter) 930 dd->ipath_lli_counter--;
985 dd->ipath_lli_counter--; 931 ipath_cdbg(PKT, "typ %x, opcode %x (eager, "
986 932 "qp=%x), len %x; ignored\n",
987 } else if (etype == RCVHQ_RCV_TYPE_EAGER) { 933 etype, bthbytes[0], qp, tlen);
988 if (qp == IPATH_KD_QP &&
989 bthbytes[0] == ipath_layer_rcv_opcode &&
990 ebuf)
991 ipath_rcv_layer(dd, etail, tlen,
992 (struct ether_header *)hdr);
993 else
994 ipath_cdbg(PKT, "typ %x, opcode %x (eager, "
995 "qp=%x), len %x; ignored\n",
996 etype, bthbytes[0], qp, tlen);
997 } 934 }
935 else if (etype == RCVHQ_RCV_TYPE_EAGER)
936 ipath_cdbg(PKT, "typ %x, opcode %x (eager, "
937 "qp=%x), len %x; ignored\n",
938 etype, bthbytes[0], qp, tlen);
998 else if (etype == RCVHQ_RCV_TYPE_EXPECTED) 939 else if (etype == RCVHQ_RCV_TYPE_EXPECTED)
999 ipath_dbg("Bug: Expected TID, opcode %x; ignored\n", 940 ipath_dbg("Bug: Expected TID, opcode %x; ignored\n",
1000 be32_to_cpu(hdr->bth[0]) & 0xff); 941 be32_to_cpu(hdr->bth[0]) & 0xff);
@@ -1320,13 +1261,6 @@ rescan:
1320 goto bail; 1261 goto bail;
1321 } 1262 }
1322 1263
1323 if (updated)
1324 /*
1325 * ran out of bufs, now some (at least this one we just
1326 * got) are now available, so tell the layered driver.
1327 */
1328 __ipath_layer_intr(dd, IPATH_LAYER_INT_SEND_CONTINUE);
1329
1330 /* 1264 /*
1331 * set next starting place. Since it's just an optimization, 1265 * set next starting place. Since it's just an optimization,
1332 * it doesn't matter who wins on this, so no locking 1266 * it doesn't matter who wins on this, so no locking
@@ -1503,7 +1437,7 @@ int ipath_waitfor_mdio_cmdready(struct ipath_devdata *dd)
1503 return ret; 1437 return ret;
1504} 1438}
1505 1439
1506void ipath_set_ib_lstate(struct ipath_devdata *dd, int which) 1440static void ipath_set_ib_lstate(struct ipath_devdata *dd, int which)
1507{ 1441{
1508 static const char *what[4] = { 1442 static const char *what[4] = {
1509 [0] = "DOWN", 1443 [0] = "DOWN",
@@ -1537,6 +1471,180 @@ void ipath_set_ib_lstate(struct ipath_devdata *dd, int which)
1537 dd->ipath_ibcctrl | which); 1471 dd->ipath_ibcctrl | which);
1538} 1472}
1539 1473
1474int ipath_set_linkstate(struct ipath_devdata *dd, u8 newstate)
1475{
1476 u32 lstate;
1477 int ret;
1478
1479 switch (newstate) {
1480 case IPATH_IB_LINKDOWN:
1481 ipath_set_ib_lstate(dd, INFINIPATH_IBCC_LINKINITCMD_POLL <<
1482 INFINIPATH_IBCC_LINKINITCMD_SHIFT);
1483 /* don't wait */
1484 ret = 0;
1485 goto bail;
1486
1487 case IPATH_IB_LINKDOWN_SLEEP:
1488 ipath_set_ib_lstate(dd, INFINIPATH_IBCC_LINKINITCMD_SLEEP <<
1489 INFINIPATH_IBCC_LINKINITCMD_SHIFT);
1490 /* don't wait */
1491 ret = 0;
1492 goto bail;
1493
1494 case IPATH_IB_LINKDOWN_DISABLE:
1495 ipath_set_ib_lstate(dd,
1496 INFINIPATH_IBCC_LINKINITCMD_DISABLE <<
1497 INFINIPATH_IBCC_LINKINITCMD_SHIFT);
1498 /* don't wait */
1499 ret = 0;
1500 goto bail;
1501
1502 case IPATH_IB_LINKINIT:
1503 if (dd->ipath_flags & IPATH_LINKINIT) {
1504 ret = 0;
1505 goto bail;
1506 }
1507 ipath_set_ib_lstate(dd, INFINIPATH_IBCC_LINKCMD_INIT <<
1508 INFINIPATH_IBCC_LINKCMD_SHIFT);
1509 lstate = IPATH_LINKINIT;
1510 break;
1511
1512 case IPATH_IB_LINKARM:
1513 if (dd->ipath_flags & IPATH_LINKARMED) {
1514 ret = 0;
1515 goto bail;
1516 }
1517 if (!(dd->ipath_flags &
1518 (IPATH_LINKINIT | IPATH_LINKACTIVE))) {
1519 ret = -EINVAL;
1520 goto bail;
1521 }
1522 ipath_set_ib_lstate(dd, INFINIPATH_IBCC_LINKCMD_ARMED <<
1523 INFINIPATH_IBCC_LINKCMD_SHIFT);
1524 /*
1525 * Since the port can transition to ACTIVE by receiving
1526 * a non VL 15 packet, wait for either state.
1527 */
1528 lstate = IPATH_LINKARMED | IPATH_LINKACTIVE;
1529 break;
1530
1531 case IPATH_IB_LINKACTIVE:
1532 if (dd->ipath_flags & IPATH_LINKACTIVE) {
1533 ret = 0;
1534 goto bail;
1535 }
1536 if (!(dd->ipath_flags & IPATH_LINKARMED)) {
1537 ret = -EINVAL;
1538 goto bail;
1539 }
1540 ipath_set_ib_lstate(dd, INFINIPATH_IBCC_LINKCMD_ACTIVE <<
1541 INFINIPATH_IBCC_LINKCMD_SHIFT);
1542 lstate = IPATH_LINKACTIVE;
1543 break;
1544
1545 default:
1546 ipath_dbg("Invalid linkstate 0x%x requested\n", newstate);
1547 ret = -EINVAL;
1548 goto bail;
1549 }
1550 ret = ipath_wait_linkstate(dd, lstate, 2000);
1551
1552bail:
1553 return ret;
1554}
1555
1556/**
1557 * ipath_set_mtu - set the MTU
1558 * @dd: the infinipath device
1559 * @arg: the new MTU
1560 *
1561 * we can handle "any" incoming size, the issue here is whether we
1562 * need to restrict our outgoing size. For now, we don't do any
1563 * sanity checking on this, and we don't deal with what happens to
1564 * programs that are already running when the size changes.
1565 * NOTE: changing the MTU will usually cause the IBC to go back to
1566 * link initialize (IPATH_IBSTATE_INIT) state...
1567 */
1568int ipath_set_mtu(struct ipath_devdata *dd, u16 arg)
1569{
1570 u32 piosize;
1571 int changed = 0;
1572 int ret;
1573
1574 /*
1575 * mtu is IB data payload max. It's the largest power of 2 less
1576 * than piosize (or even larger, since it only really controls the
1577 * largest we can receive; we can send the max of the mtu and
1578 * piosize). We check that it's one of the valid IB sizes.
1579 */
1580 if (arg != 256 && arg != 512 && arg != 1024 && arg != 2048 &&
1581 arg != 4096) {
1582 ipath_dbg("Trying to set invalid mtu %u, failing\n", arg);
1583 ret = -EINVAL;
1584 goto bail;
1585 }
1586 if (dd->ipath_ibmtu == arg) {
1587 ret = 0; /* same as current */
1588 goto bail;
1589 }
1590
1591 piosize = dd->ipath_ibmaxlen;
1592 dd->ipath_ibmtu = arg;
1593
1594 if (arg >= (piosize - IPATH_PIO_MAXIBHDR)) {
1595 /* Only if it's not the initial value (or reset to it) */
1596 if (piosize != dd->ipath_init_ibmaxlen) {
1597 dd->ipath_ibmaxlen = piosize;
1598 changed = 1;
1599 }
1600 } else if ((arg + IPATH_PIO_MAXIBHDR) != dd->ipath_ibmaxlen) {
1601 piosize = arg + IPATH_PIO_MAXIBHDR;
1602 ipath_cdbg(VERBOSE, "ibmaxlen was 0x%x, setting to 0x%x "
1603 "(mtu 0x%x)\n", dd->ipath_ibmaxlen, piosize,
1604 arg);
1605 dd->ipath_ibmaxlen = piosize;
1606 changed = 1;
1607 }
1608
1609 if (changed) {
1610 /*
1611 * set the IBC maxpktlength to the size of our pio
1612 * buffers in words
1613 */
1614 u64 ibc = dd->ipath_ibcctrl;
1615 ibc &= ~(INFINIPATH_IBCC_MAXPKTLEN_MASK <<
1616 INFINIPATH_IBCC_MAXPKTLEN_SHIFT);
1617
1618 piosize = piosize - 2 * sizeof(u32); /* ignore pbc */
1619 dd->ipath_ibmaxlen = piosize;
1620 piosize /= sizeof(u32); /* in words */
1621 /*
1622 * for ICRC, which we only send in diag test pkt mode, and
1623 * we don't need to worry about that for mtu
1624 */
1625 piosize += 1;
1626
1627 ibc |= piosize << INFINIPATH_IBCC_MAXPKTLEN_SHIFT;
1628 dd->ipath_ibcctrl = ibc;
1629 ipath_write_kreg(dd, dd->ipath_kregs->kr_ibcctrl,
1630 dd->ipath_ibcctrl);
1631 dd->ipath_f_tidtemplate(dd);
1632 }
1633
1634 ret = 0;
1635
1636bail:
1637 return ret;
1638}
1639
1640int ipath_set_lid(struct ipath_devdata *dd, u32 arg, u8 lmc)
1641{
1642 dd->ipath_lid = arg;
1643 dd->ipath_lmc = lmc;
1644
1645 return 0;
1646}
1647
1540/** 1648/**
1541 * ipath_read_kreg64_port - read a device's per-port 64-bit kernel register 1649 * ipath_read_kreg64_port - read a device's per-port 64-bit kernel register
1542 * @dd: the infinipath device 1650 * @dd: the infinipath device
@@ -1640,13 +1748,6 @@ void ipath_shutdown_device(struct ipath_devdata *dd)
1640 ipath_set_ib_lstate(dd, INFINIPATH_IBCC_LINKINITCMD_DISABLE << 1748 ipath_set_ib_lstate(dd, INFINIPATH_IBCC_LINKINITCMD_DISABLE <<
1641 INFINIPATH_IBCC_LINKINITCMD_SHIFT); 1749 INFINIPATH_IBCC_LINKINITCMD_SHIFT);
1642 1750
1643 /*
1644 * we are shutting down, so tell the layered driver. We don't do
1645 * this on just a link state change, much like ethernet, a cable
1646 * unplug, etc. doesn't change driver state
1647 */
1648 ipath_layer_intr(dd, IPATH_LAYER_INT_IF_DOWN);
1649
1650 /* disable IBC */ 1751 /* disable IBC */
1651 dd->ipath_control &= ~INFINIPATH_C_LINKENABLE; 1752 dd->ipath_control &= ~INFINIPATH_C_LINKENABLE;
1652 ipath_write_kreg(dd, dd->ipath_kregs->kr_control, 1753 ipath_write_kreg(dd, dd->ipath_kregs->kr_control,
diff --git a/drivers/infiniband/hw/ipath/ipath_file_ops.c b/drivers/infiniband/hw/ipath/ipath_file_ops.c
index e999a46bef9b..f865ce89b73f 100644
--- a/drivers/infiniband/hw/ipath/ipath_file_ops.c
+++ b/drivers/infiniband/hw/ipath/ipath_file_ops.c
@@ -39,7 +39,6 @@
39#include <asm/pgtable.h> 39#include <asm/pgtable.h>
40 40
41#include "ipath_kernel.h" 41#include "ipath_kernel.h"
42#include "ipath_layer.h"
43#include "ipath_common.h" 42#include "ipath_common.h"
44 43
45static int ipath_open(struct inode *, struct file *); 44static int ipath_open(struct inode *, struct file *);
diff --git a/drivers/infiniband/hw/ipath/ipath_intr.c b/drivers/infiniband/hw/ipath/ipath_intr.c
index ed54f8f2945e..250e2a9f01bb 100644
--- a/drivers/infiniband/hw/ipath/ipath_intr.c
+++ b/drivers/infiniband/hw/ipath/ipath_intr.c
@@ -34,7 +34,6 @@
34#include <linux/pci.h> 34#include <linux/pci.h>
35 35
36#include "ipath_kernel.h" 36#include "ipath_kernel.h"
37#include "ipath_layer.h"
38#include "ipath_verbs.h" 37#include "ipath_verbs.h"
39#include "ipath_common.h" 38#include "ipath_common.h"
40 39
@@ -290,8 +289,6 @@ static void handle_e_ibstatuschanged(struct ipath_devdata *dd,
290 *dd->ipath_statusp |= 289 *dd->ipath_statusp |=
291 IPATH_STATUS_IB_READY | IPATH_STATUS_IB_CONF; 290 IPATH_STATUS_IB_READY | IPATH_STATUS_IB_CONF;
292 dd->ipath_f_setextled(dd, lstate, ltstate); 291 dd->ipath_f_setextled(dd, lstate, ltstate);
293
294 __ipath_layer_intr(dd, IPATH_LAYER_INT_IF_UP);
295 } else if ((val & IPATH_IBSTATE_MASK) == IPATH_IBSTATE_INIT) { 292 } else if ((val & IPATH_IBSTATE_MASK) == IPATH_IBSTATE_INIT) {
296 /* 293 /*
297 * set INIT and DOWN. Down is checked by most of the other 294 * set INIT and DOWN. Down is checked by most of the other
@@ -709,10 +706,6 @@ static void handle_layer_pioavail(struct ipath_devdata *dd)
709{ 706{
710 int ret; 707 int ret;
711 708
712 ret = __ipath_layer_intr(dd, IPATH_LAYER_INT_SEND_CONTINUE);
713 if (ret > 0)
714 goto set;
715
716 ret = ipath_ib_piobufavail(dd->verbs_dev); 709 ret = ipath_ib_piobufavail(dd->verbs_dev);
717 if (ret > 0) 710 if (ret > 0)
718 goto set; 711 goto set;
diff --git a/drivers/infiniband/hw/ipath/ipath_kernel.h b/drivers/infiniband/hw/ipath/ipath_kernel.h
index f1931105adb3..999249b7f27f 100644
--- a/drivers/infiniband/hw/ipath/ipath_kernel.h
+++ b/drivers/infiniband/hw/ipath/ipath_kernel.h
@@ -518,16 +518,6 @@ extern struct list_head ipath_dev_list;
518extern spinlock_t ipath_devs_lock; 518extern spinlock_t ipath_devs_lock;
519extern struct ipath_devdata *ipath_lookup(int unit); 519extern struct ipath_devdata *ipath_lookup(int unit);
520 520
521extern u16 ipath_layer_rcv_opcode;
522extern int __ipath_layer_intr(struct ipath_devdata *, u32);
523extern int ipath_layer_intr(struct ipath_devdata *, u32);
524extern int __ipath_layer_rcv(struct ipath_devdata *, void *,
525 struct sk_buff *);
526extern int __ipath_layer_rcv_lid(struct ipath_devdata *, void *);
527
528void ipath_layer_add(struct ipath_devdata *);
529void ipath_layer_remove(struct ipath_devdata *);
530
531int ipath_init_chip(struct ipath_devdata *, int); 521int ipath_init_chip(struct ipath_devdata *, int);
532int ipath_enable_wc(struct ipath_devdata *dd); 522int ipath_enable_wc(struct ipath_devdata *dd);
533void ipath_disable_wc(struct ipath_devdata *dd); 523void ipath_disable_wc(struct ipath_devdata *dd);
@@ -575,12 +565,13 @@ void ipath_free_pddata(struct ipath_devdata *, struct ipath_portdata *);
575 565
576int ipath_parse_ushort(const char *str, unsigned short *valp); 566int ipath_parse_ushort(const char *str, unsigned short *valp);
577 567
578int ipath_wait_linkstate(struct ipath_devdata *, u32, int);
579void ipath_set_ib_lstate(struct ipath_devdata *, int);
580void ipath_kreceive(struct ipath_devdata *); 568void ipath_kreceive(struct ipath_devdata *);
581int ipath_setrcvhdrsize(struct ipath_devdata *, unsigned); 569int ipath_setrcvhdrsize(struct ipath_devdata *, unsigned);
582int ipath_reset_device(int); 570int ipath_reset_device(int);
583void ipath_get_faststats(unsigned long); 571void ipath_get_faststats(unsigned long);
572int ipath_set_linkstate(struct ipath_devdata *, u8);
573int ipath_set_mtu(struct ipath_devdata *, u16);
574int ipath_set_lid(struct ipath_devdata *, u32, u8);
584 575
585/* for use in system calls, where we want to know device type, etc. */ 576/* for use in system calls, where we want to know device type, etc. */
586#define port_fp(fp) ((struct ipath_portdata *) (fp)->private_data) 577#define port_fp(fp) ((struct ipath_portdata *) (fp)->private_data)
diff --git a/drivers/infiniband/hw/ipath/ipath_layer.c b/drivers/infiniband/hw/ipath/ipath_layer.c
index acc32200cc0e..10f578e2aed6 100644
--- a/drivers/infiniband/hw/ipath/ipath_layer.c
+++ b/drivers/infiniband/hw/ipath/ipath_layer.c
@@ -101,242 +101,14 @@ int __ipath_layer_rcv_lid(struct ipath_devdata *dd, void *hdr)
101 return ret; 101 return ret;
102} 102}
103 103
104int ipath_layer_set_linkstate(struct ipath_devdata *dd, u8 newstate) 104void ipath_layer_lid_changed(struct ipath_devdata *dd)
105{ 105{
106 u32 lstate;
107 int ret;
108
109 switch (newstate) {
110 case IPATH_IB_LINKDOWN:
111 ipath_set_ib_lstate(dd, INFINIPATH_IBCC_LINKINITCMD_POLL <<
112 INFINIPATH_IBCC_LINKINITCMD_SHIFT);
113 /* don't wait */
114 ret = 0;
115 goto bail;
116
117 case IPATH_IB_LINKDOWN_SLEEP:
118 ipath_set_ib_lstate(dd, INFINIPATH_IBCC_LINKINITCMD_SLEEP <<
119 INFINIPATH_IBCC_LINKINITCMD_SHIFT);
120 /* don't wait */
121 ret = 0;
122 goto bail;
123
124 case IPATH_IB_LINKDOWN_DISABLE:
125 ipath_set_ib_lstate(dd,
126 INFINIPATH_IBCC_LINKINITCMD_DISABLE <<
127 INFINIPATH_IBCC_LINKINITCMD_SHIFT);
128 /* don't wait */
129 ret = 0;
130 goto bail;
131
132 case IPATH_IB_LINKINIT:
133 if (dd->ipath_flags & IPATH_LINKINIT) {
134 ret = 0;
135 goto bail;
136 }
137 ipath_set_ib_lstate(dd, INFINIPATH_IBCC_LINKCMD_INIT <<
138 INFINIPATH_IBCC_LINKCMD_SHIFT);
139 lstate = IPATH_LINKINIT;
140 break;
141
142 case IPATH_IB_LINKARM:
143 if (dd->ipath_flags & IPATH_LINKARMED) {
144 ret = 0;
145 goto bail;
146 }
147 if (!(dd->ipath_flags &
148 (IPATH_LINKINIT | IPATH_LINKACTIVE))) {
149 ret = -EINVAL;
150 goto bail;
151 }
152 ipath_set_ib_lstate(dd, INFINIPATH_IBCC_LINKCMD_ARMED <<
153 INFINIPATH_IBCC_LINKCMD_SHIFT);
154 /*
155 * Since the port can transition to ACTIVE by receiving
156 * a non VL 15 packet, wait for either state.
157 */
158 lstate = IPATH_LINKARMED | IPATH_LINKACTIVE;
159 break;
160
161 case IPATH_IB_LINKACTIVE:
162 if (dd->ipath_flags & IPATH_LINKACTIVE) {
163 ret = 0;
164 goto bail;
165 }
166 if (!(dd->ipath_flags & IPATH_LINKARMED)) {
167 ret = -EINVAL;
168 goto bail;
169 }
170 ipath_set_ib_lstate(dd, INFINIPATH_IBCC_LINKCMD_ACTIVE <<
171 INFINIPATH_IBCC_LINKCMD_SHIFT);
172 lstate = IPATH_LINKACTIVE;
173 break;
174
175 default:
176 ipath_dbg("Invalid linkstate 0x%x requested\n", newstate);
177 ret = -EINVAL;
178 goto bail;
179 }
180 ret = ipath_wait_linkstate(dd, lstate, 2000);
181
182bail:
183 return ret;
184}
185
186/**
187 * ipath_layer_set_mtu - set the MTU
188 * @dd: the infinipath device
189 * @arg: the new MTU
190 *
191 * we can handle "any" incoming size, the issue here is whether we
192 * need to restrict our outgoing size. For now, we don't do any
193 * sanity checking on this, and we don't deal with what happens to
194 * programs that are already running when the size changes.
195 * NOTE: changing the MTU will usually cause the IBC to go back to
196 * link initialize (IPATH_IBSTATE_INIT) state...
197 */
198int ipath_layer_set_mtu(struct ipath_devdata *dd, u16 arg)
199{
200 u32 piosize;
201 int changed = 0;
202 int ret;
203
204 /*
205 * mtu is IB data payload max. It's the largest power of 2 less
206 * than piosize (or even larger, since it only really controls the
207 * largest we can receive; we can send the max of the mtu and
208 * piosize). We check that it's one of the valid IB sizes.
209 */
210 if (arg != 256 && arg != 512 && arg != 1024 && arg != 2048 &&
211 arg != 4096) {
212 ipath_dbg("Trying to set invalid mtu %u, failing\n", arg);
213 ret = -EINVAL;
214 goto bail;
215 }
216 if (dd->ipath_ibmtu == arg) {
217 ret = 0; /* same as current */
218 goto bail;
219 }
220
221 piosize = dd->ipath_ibmaxlen;
222 dd->ipath_ibmtu = arg;
223
224 if (arg >= (piosize - IPATH_PIO_MAXIBHDR)) {
225 /* Only if it's not the initial value (or reset to it) */
226 if (piosize != dd->ipath_init_ibmaxlen) {
227 dd->ipath_ibmaxlen = piosize;
228 changed = 1;
229 }
230 } else if ((arg + IPATH_PIO_MAXIBHDR) != dd->ipath_ibmaxlen) {
231 piosize = arg + IPATH_PIO_MAXIBHDR;
232 ipath_cdbg(VERBOSE, "ibmaxlen was 0x%x, setting to 0x%x "
233 "(mtu 0x%x)\n", dd->ipath_ibmaxlen, piosize,
234 arg);
235 dd->ipath_ibmaxlen = piosize;
236 changed = 1;
237 }
238
239 if (changed) {
240 /*
241 * set the IBC maxpktlength to the size of our pio
242 * buffers in words
243 */
244 u64 ibc = dd->ipath_ibcctrl;
245 ibc &= ~(INFINIPATH_IBCC_MAXPKTLEN_MASK <<
246 INFINIPATH_IBCC_MAXPKTLEN_SHIFT);
247
248 piosize = piosize - 2 * sizeof(u32); /* ignore pbc */
249 dd->ipath_ibmaxlen = piosize;
250 piosize /= sizeof(u32); /* in words */
251 /*
252 * for ICRC, which we only send in diag test pkt mode, and
253 * we don't need to worry about that for mtu
254 */
255 piosize += 1;
256
257 ibc |= piosize << INFINIPATH_IBCC_MAXPKTLEN_SHIFT;
258 dd->ipath_ibcctrl = ibc;
259 ipath_write_kreg(dd, dd->ipath_kregs->kr_ibcctrl,
260 dd->ipath_ibcctrl);
261 dd->ipath_f_tidtemplate(dd);
262 }
263
264 ret = 0;
265
266bail:
267 return ret;
268}
269
270int ipath_set_lid(struct ipath_devdata *dd, u32 arg, u8 lmc)
271{
272 dd->ipath_lid = arg;
273 dd->ipath_lmc = lmc;
274
275 mutex_lock(&ipath_layer_mutex); 106 mutex_lock(&ipath_layer_mutex);
276 107
277 if (dd->ipath_layer.l_arg && layer_intr) 108 if (dd->ipath_layer.l_arg && layer_intr)
278 layer_intr(dd->ipath_layer.l_arg, IPATH_LAYER_INT_LID); 109 layer_intr(dd->ipath_layer.l_arg, IPATH_LAYER_INT_LID);
279 110
280 mutex_unlock(&ipath_layer_mutex); 111 mutex_unlock(&ipath_layer_mutex);
281
282 return 0;
283}
284
285int ipath_layer_set_guid(struct ipath_devdata *dd, __be64 guid)
286{
287 /* XXX - need to inform anyone who cares this just happened. */
288 dd->ipath_guid = guid;
289 return 0;
290}
291
292__be64 ipath_layer_get_guid(struct ipath_devdata *dd)
293{
294 return dd->ipath_guid;
295}
296
297u32 ipath_layer_get_majrev(struct ipath_devdata *dd)
298{
299 return dd->ipath_majrev;
300}
301
302u32 ipath_layer_get_minrev(struct ipath_devdata *dd)
303{
304 return dd->ipath_minrev;
305}
306
307u32 ipath_layer_get_pcirev(struct ipath_devdata *dd)
308{
309 return dd->ipath_pcirev;
310}
311
312u32 ipath_layer_get_flags(struct ipath_devdata *dd)
313{
314 return dd->ipath_flags;
315}
316
317struct device *ipath_layer_get_device(struct ipath_devdata *dd)
318{
319 return &dd->pcidev->dev;
320}
321
322u16 ipath_layer_get_deviceid(struct ipath_devdata *dd)
323{
324 return dd->ipath_deviceid;
325}
326
327u32 ipath_layer_get_vendorid(struct ipath_devdata *dd)
328{
329 return dd->ipath_vendorid;
330}
331
332u64 ipath_layer_get_lastibcstat(struct ipath_devdata *dd)
333{
334 return dd->ipath_lastibcstat;
335}
336
337u32 ipath_layer_get_ibmtu(struct ipath_devdata *dd)
338{
339 return dd->ipath_ibmtu;
340} 112}
341 113
342void ipath_layer_add(struct ipath_devdata *dd) 114void ipath_layer_add(struct ipath_devdata *dd)
@@ -436,22 +208,6 @@ void ipath_layer_unregister(void)
436 208
437EXPORT_SYMBOL_GPL(ipath_layer_unregister); 209EXPORT_SYMBOL_GPL(ipath_layer_unregister);
438 210
439static void __ipath_verbs_timer(unsigned long arg)
440{
441 struct ipath_devdata *dd = (struct ipath_devdata *) arg;
442
443 /*
444 * If port 0 receive packet interrupts are not available, or
445 * can be missed, poll the receive queue
446 */
447 if (dd->ipath_flags & IPATH_POLL_RX_INTR)
448 ipath_kreceive(dd);
449
450 /* Handle verbs layer timeouts. */
451 ipath_ib_timer(dd->verbs_dev);
452 mod_timer(&dd->verbs_timer, jiffies + 1);
453}
454
455int ipath_layer_open(struct ipath_devdata *dd, u32 * pktmax) 211int ipath_layer_open(struct ipath_devdata *dd, u32 * pktmax)
456{ 212{
457 int ret; 213 int ret;
@@ -540,380 +296,6 @@ u16 ipath_layer_get_bcast(struct ipath_devdata *dd)
540 296
541EXPORT_SYMBOL_GPL(ipath_layer_get_bcast); 297EXPORT_SYMBOL_GPL(ipath_layer_get_bcast);
542 298
543u32 ipath_layer_get_cr_errpkey(struct ipath_devdata *dd)
544{
545 return ipath_read_creg32(dd, dd->ipath_cregs->cr_errpkey);
546}
547
548static void update_sge(struct ipath_sge_state *ss, u32 length)
549{
550 struct ipath_sge *sge = &ss->sge;
551
552 sge->vaddr += length;
553 sge->length -= length;
554 sge->sge_length -= length;
555 if (sge->sge_length == 0) {
556 if (--ss->num_sge)
557 *sge = *ss->sg_list++;
558 } else if (sge->length == 0 && sge->mr != NULL) {
559 if (++sge->n >= IPATH_SEGSZ) {
560 if (++sge->m >= sge->mr->mapsz)
561 return;
562 sge->n = 0;
563 }
564 sge->vaddr = sge->mr->map[sge->m]->segs[sge->n].vaddr;
565 sge->length = sge->mr->map[sge->m]->segs[sge->n].length;
566 }
567}
568
569#ifdef __LITTLE_ENDIAN
570static inline u32 get_upper_bits(u32 data, u32 shift)
571{
572 return data >> shift;
573}
574
575static inline u32 set_upper_bits(u32 data, u32 shift)
576{
577 return data << shift;
578}
579
580static inline u32 clear_upper_bytes(u32 data, u32 n, u32 off)
581{
582 data <<= ((sizeof(u32) - n) * BITS_PER_BYTE);
583 data >>= ((sizeof(u32) - n - off) * BITS_PER_BYTE);
584 return data;
585}
586#else
587static inline u32 get_upper_bits(u32 data, u32 shift)
588{
589 return data << shift;
590}
591
592static inline u32 set_upper_bits(u32 data, u32 shift)
593{
594 return data >> shift;
595}
596
597static inline u32 clear_upper_bytes(u32 data, u32 n, u32 off)
598{
599 data >>= ((sizeof(u32) - n) * BITS_PER_BYTE);
600 data <<= ((sizeof(u32) - n - off) * BITS_PER_BYTE);
601 return data;
602}
603#endif
604
605static void copy_io(u32 __iomem *piobuf, struct ipath_sge_state *ss,
606 u32 length)
607{
608 u32 extra = 0;
609 u32 data = 0;
610 u32 last;
611
612 while (1) {
613 u32 len = ss->sge.length;
614 u32 off;
615
616 BUG_ON(len == 0);
617 if (len > length)
618 len = length;
619 if (len > ss->sge.sge_length)
620 len = ss->sge.sge_length;
621 /* If the source address is not aligned, try to align it. */
622 off = (unsigned long)ss->sge.vaddr & (sizeof(u32) - 1);
623 if (off) {
624 u32 *addr = (u32 *)((unsigned long)ss->sge.vaddr &
625 ~(sizeof(u32) - 1));
626 u32 v = get_upper_bits(*addr, off * BITS_PER_BYTE);
627 u32 y;
628
629 y = sizeof(u32) - off;
630 if (len > y)
631 len = y;
632 if (len + extra >= sizeof(u32)) {
633 data |= set_upper_bits(v, extra *
634 BITS_PER_BYTE);
635 len = sizeof(u32) - extra;
636 if (len == length) {
637 last = data;
638 break;
639 }
640 __raw_writel(data, piobuf);
641 piobuf++;
642 extra = 0;
643 data = 0;
644 } else {
645 /* Clear unused upper bytes */
646 data |= clear_upper_bytes(v, len, extra);
647 if (len == length) {
648 last = data;
649 break;
650 }
651 extra += len;
652 }
653 } else if (extra) {
654 /* Source address is aligned. */
655 u32 *addr = (u32 *) ss->sge.vaddr;
656 int shift = extra * BITS_PER_BYTE;
657 int ushift = 32 - shift;
658 u32 l = len;
659
660 while (l >= sizeof(u32)) {
661 u32 v = *addr;
662
663 data |= set_upper_bits(v, shift);
664 __raw_writel(data, piobuf);
665 data = get_upper_bits(v, ushift);
666 piobuf++;
667 addr++;
668 l -= sizeof(u32);
669 }
670 /*
671 * We still have 'extra' number of bytes leftover.
672 */
673 if (l) {
674 u32 v = *addr;
675
676 if (l + extra >= sizeof(u32)) {
677 data |= set_upper_bits(v, shift);
678 len -= l + extra - sizeof(u32);
679 if (len == length) {
680 last = data;
681 break;
682 }
683 __raw_writel(data, piobuf);
684 piobuf++;
685 extra = 0;
686 data = 0;
687 } else {
688 /* Clear unused upper bytes */
689 data |= clear_upper_bytes(v, l,
690 extra);
691 if (len == length) {
692 last = data;
693 break;
694 }
695 extra += l;
696 }
697 } else if (len == length) {
698 last = data;
699 break;
700 }
701 } else if (len == length) {
702 u32 w;
703
704 /*
705 * Need to round up for the last dword in the
706 * packet.
707 */
708 w = (len + 3) >> 2;
709 __iowrite32_copy(piobuf, ss->sge.vaddr, w - 1);
710 piobuf += w - 1;
711 last = ((u32 *) ss->sge.vaddr)[w - 1];
712 break;
713 } else {
714 u32 w = len >> 2;
715
716 __iowrite32_copy(piobuf, ss->sge.vaddr, w);
717 piobuf += w;
718
719 extra = len & (sizeof(u32) - 1);
720 if (extra) {
721 u32 v = ((u32 *) ss->sge.vaddr)[w];
722
723 /* Clear unused upper bytes */
724 data = clear_upper_bytes(v, extra, 0);
725 }
726 }
727 update_sge(ss, len);
728 length -= len;
729 }
730 /* Update address before sending packet. */
731 update_sge(ss, length);
732 /* must flush early everything before trigger word */
733 ipath_flush_wc();
734 __raw_writel(last, piobuf);
735 /* be sure trigger word is written */
736 ipath_flush_wc();
737}
738
739/**
740 * ipath_verbs_send - send a packet from the verbs layer
741 * @dd: the infinipath device
742 * @hdrwords: the number of words in the header
743 * @hdr: the packet header
744 * @len: the length of the packet in bytes
745 * @ss: the SGE to send
746 *
747 * This is like ipath_sma_send_pkt() in that we need to be able to send
748 * packets after the chip is initialized (MADs) but also like
749 * ipath_layer_send_hdr() since its used by the verbs layer.
750 */
751int ipath_verbs_send(struct ipath_devdata *dd, u32 hdrwords,
752 u32 *hdr, u32 len, struct ipath_sge_state *ss)
753{
754 u32 __iomem *piobuf;
755 u32 plen;
756 int ret;
757
758 /* +1 is for the qword padding of pbc */
759 plen = hdrwords + ((len + 3) >> 2) + 1;
760 if (unlikely((plen << 2) > dd->ipath_ibmaxlen)) {
761 ipath_dbg("packet len 0x%x too long, failing\n", plen);
762 ret = -EINVAL;
763 goto bail;
764 }
765
766 /* Get a PIO buffer to use. */
767 piobuf = ipath_getpiobuf(dd, NULL);
768 if (unlikely(piobuf == NULL)) {
769 ret = -EBUSY;
770 goto bail;
771 }
772
773 /*
774 * Write len to control qword, no flags.
775 * We have to flush after the PBC for correctness on some cpus
776 * or WC buffer can be written out of order.
777 */
778 writeq(plen, piobuf);
779 ipath_flush_wc();
780 piobuf += 2;
781 if (len == 0) {
782 /*
783 * If there is just the header portion, must flush before
784 * writing last word of header for correctness, and after
785 * the last header word (trigger word).
786 */
787 __iowrite32_copy(piobuf, hdr, hdrwords - 1);
788 ipath_flush_wc();
789 __raw_writel(hdr[hdrwords - 1], piobuf + hdrwords - 1);
790 ipath_flush_wc();
791 ret = 0;
792 goto bail;
793 }
794
795 __iowrite32_copy(piobuf, hdr, hdrwords);
796 piobuf += hdrwords;
797
798 /* The common case is aligned and contained in one segment. */
799 if (likely(ss->num_sge == 1 && len <= ss->sge.length &&
800 !((unsigned long)ss->sge.vaddr & (sizeof(u32) - 1)))) {
801 u32 w;
802 u32 *addr = (u32 *) ss->sge.vaddr;
803
804 /* Update address before sending packet. */
805 update_sge(ss, len);
806 /* Need to round up for the last dword in the packet. */
807 w = (len + 3) >> 2;
808 __iowrite32_copy(piobuf, addr, w - 1);
809 /* must flush early everything before trigger word */
810 ipath_flush_wc();
811 __raw_writel(addr[w - 1], piobuf + w - 1);
812 /* be sure trigger word is written */
813 ipath_flush_wc();
814 ret = 0;
815 goto bail;
816 }
817 copy_io(piobuf, ss, len);
818 ret = 0;
819
820bail:
821 return ret;
822}
823
824int ipath_layer_snapshot_counters(struct ipath_devdata *dd, u64 *swords,
825 u64 *rwords, u64 *spkts, u64 *rpkts,
826 u64 *xmit_wait)
827{
828 int ret;
829
830 if (!(dd->ipath_flags & IPATH_INITTED)) {
831 /* no hardware, freeze, etc. */
832 ipath_dbg("unit %u not usable\n", dd->ipath_unit);
833 ret = -EINVAL;
834 goto bail;
835 }
836 *swords = ipath_snap_cntr(dd, dd->ipath_cregs->cr_wordsendcnt);
837 *rwords = ipath_snap_cntr(dd, dd->ipath_cregs->cr_wordrcvcnt);
838 *spkts = ipath_snap_cntr(dd, dd->ipath_cregs->cr_pktsendcnt);
839 *rpkts = ipath_snap_cntr(dd, dd->ipath_cregs->cr_pktrcvcnt);
840 *xmit_wait = ipath_snap_cntr(dd, dd->ipath_cregs->cr_sendstallcnt);
841
842 ret = 0;
843
844bail:
845 return ret;
846}
847
848/**
849 * ipath_layer_get_counters - get various chip counters
850 * @dd: the infinipath device
851 * @cntrs: counters are placed here
852 *
853 * Return the counters needed by recv_pma_get_portcounters().
854 */
855int ipath_layer_get_counters(struct ipath_devdata *dd,
856 struct ipath_layer_counters *cntrs)
857{
858 int ret;
859
860 if (!(dd->ipath_flags & IPATH_INITTED)) {
861 /* no hardware, freeze, etc. */
862 ipath_dbg("unit %u not usable\n", dd->ipath_unit);
863 ret = -EINVAL;
864 goto bail;
865 }
866 cntrs->symbol_error_counter =
867 ipath_snap_cntr(dd, dd->ipath_cregs->cr_ibsymbolerrcnt);
868 cntrs->link_error_recovery_counter =
869 ipath_snap_cntr(dd, dd->ipath_cregs->cr_iblinkerrrecovcnt);
870 /*
871 * The link downed counter counts when the other side downs the
872 * connection. We add in the number of times we downed the link
873 * due to local link integrity errors to compensate.
874 */
875 cntrs->link_downed_counter =
876 ipath_snap_cntr(dd, dd->ipath_cregs->cr_iblinkdowncnt);
877 cntrs->port_rcv_errors =
878 ipath_snap_cntr(dd, dd->ipath_cregs->cr_rxdroppktcnt) +
879 ipath_snap_cntr(dd, dd->ipath_cregs->cr_rcvovflcnt) +
880 ipath_snap_cntr(dd, dd->ipath_cregs->cr_portovflcnt) +
881 ipath_snap_cntr(dd, dd->ipath_cregs->cr_err_rlencnt) +
882 ipath_snap_cntr(dd, dd->ipath_cregs->cr_invalidrlencnt) +
883 ipath_snap_cntr(dd, dd->ipath_cregs->cr_erricrccnt) +
884 ipath_snap_cntr(dd, dd->ipath_cregs->cr_errvcrccnt) +
885 ipath_snap_cntr(dd, dd->ipath_cregs->cr_errlpcrccnt) +
886 ipath_snap_cntr(dd, dd->ipath_cregs->cr_badformatcnt);
887 cntrs->port_rcv_remphys_errors =
888 ipath_snap_cntr(dd, dd->ipath_cregs->cr_rcvebpcnt);
889 cntrs->port_xmit_discards =
890 ipath_snap_cntr(dd, dd->ipath_cregs->cr_unsupvlcnt);
891 cntrs->port_xmit_data =
892 ipath_snap_cntr(dd, dd->ipath_cregs->cr_wordsendcnt);
893 cntrs->port_rcv_data =
894 ipath_snap_cntr(dd, dd->ipath_cregs->cr_wordrcvcnt);
895 cntrs->port_xmit_packets =
896 ipath_snap_cntr(dd, dd->ipath_cregs->cr_pktsendcnt);
897 cntrs->port_rcv_packets =
898 ipath_snap_cntr(dd, dd->ipath_cregs->cr_pktrcvcnt);
899 cntrs->local_link_integrity_errors = dd->ipath_lli_errors;
900 cntrs->excessive_buffer_overrun_errors = 0; /* XXX */
901
902 ret = 0;
903
904bail:
905 return ret;
906}
907
908int ipath_layer_want_buffer(struct ipath_devdata *dd)
909{
910 set_bit(IPATH_S_PIOINTBUFAVAIL, &dd->ipath_sendctrl);
911 ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl,
912 dd->ipath_sendctrl);
913
914 return 0;
915}
916
917int ipath_layer_send_hdr(struct ipath_devdata *dd, struct ether_header *hdr) 299int ipath_layer_send_hdr(struct ipath_devdata *dd, struct ether_header *hdr)
918{ 300{
919 int ret = 0; 301 int ret = 0;
@@ -985,361 +367,3 @@ int ipath_layer_set_piointbufavail_int(struct ipath_devdata *dd)
985} 367}
986 368
987EXPORT_SYMBOL_GPL(ipath_layer_set_piointbufavail_int); 369EXPORT_SYMBOL_GPL(ipath_layer_set_piointbufavail_int);
988
989int ipath_layer_enable_timer(struct ipath_devdata *dd)
990{
991 /*
992 * HT-400 has a design flaw where the chip and kernel idea
993 * of the tail register don't always agree, and therefore we won't
994 * get an interrupt on the next packet received.
995 * If the board supports per packet receive interrupts, use it.
996 * Otherwise, the timer function periodically checks for packets
997 * to cover this case.
998 * Either way, the timer is needed for verbs layer related
999 * processing.
1000 */
1001 if (dd->ipath_flags & IPATH_GPIO_INTR) {
1002 ipath_write_kreg(dd, dd->ipath_kregs->kr_debugportselect,
1003 0x2074076542310ULL);
1004 /* Enable GPIO bit 2 interrupt */
1005 ipath_write_kreg(dd, dd->ipath_kregs->kr_gpio_mask,
1006 (u64) (1 << 2));
1007 }
1008
1009 init_timer(&dd->verbs_timer);
1010 dd->verbs_timer.function = __ipath_verbs_timer;
1011 dd->verbs_timer.data = (unsigned long)dd;
1012 dd->verbs_timer.expires = jiffies + 1;
1013 add_timer(&dd->verbs_timer);
1014
1015 return 0;
1016}
1017
1018int ipath_layer_disable_timer(struct ipath_devdata *dd)
1019{
1020 /* Disable GPIO bit 2 interrupt */
1021 if (dd->ipath_flags & IPATH_GPIO_INTR)
1022 ipath_write_kreg(dd, dd->ipath_kregs->kr_gpio_mask, 0);
1023
1024 del_timer_sync(&dd->verbs_timer);
1025
1026 return 0;
1027}
1028
1029/**
1030 * ipath_layer_set_verbs_flags - set the verbs layer flags
1031 * @dd: the infinipath device
1032 * @flags: the flags to set
1033 */
1034int ipath_layer_set_verbs_flags(struct ipath_devdata *dd, unsigned flags)
1035{
1036 struct ipath_devdata *ss;
1037 unsigned long lflags;
1038
1039 spin_lock_irqsave(&ipath_devs_lock, lflags);
1040
1041 list_for_each_entry(ss, &ipath_dev_list, ipath_list) {
1042 if (!(ss->ipath_flags & IPATH_INITTED))
1043 continue;
1044 if ((flags & IPATH_VERBS_KERNEL_SMA) &&
1045 !(*ss->ipath_statusp & IPATH_STATUS_SMA))
1046 *ss->ipath_statusp |= IPATH_STATUS_OIB_SMA;
1047 else
1048 *ss->ipath_statusp &= ~IPATH_STATUS_OIB_SMA;
1049 }
1050
1051 spin_unlock_irqrestore(&ipath_devs_lock, lflags);
1052
1053 return 0;
1054}
1055
1056/**
1057 * ipath_layer_get_npkeys - return the size of the PKEY table for port 0
1058 * @dd: the infinipath device
1059 */
1060unsigned ipath_layer_get_npkeys(struct ipath_devdata *dd)
1061{
1062 return ARRAY_SIZE(dd->ipath_pd[0]->port_pkeys);
1063}
1064
1065/**
1066 * ipath_layer_get_pkey - return the indexed PKEY from the port 0 PKEY table
1067 * @dd: the infinipath device
1068 * @index: the PKEY index
1069 */
1070unsigned ipath_layer_get_pkey(struct ipath_devdata *dd, unsigned index)
1071{
1072 unsigned ret;
1073
1074 if (index >= ARRAY_SIZE(dd->ipath_pd[0]->port_pkeys))
1075 ret = 0;
1076 else
1077 ret = dd->ipath_pd[0]->port_pkeys[index];
1078
1079 return ret;
1080}
1081
1082/**
1083 * ipath_layer_get_pkeys - return the PKEY table for port 0
1084 * @dd: the infinipath device
1085 * @pkeys: the pkey table is placed here
1086 */
1087int ipath_layer_get_pkeys(struct ipath_devdata *dd, u16 * pkeys)
1088{
1089 struct ipath_portdata *pd = dd->ipath_pd[0];
1090
1091 memcpy(pkeys, pd->port_pkeys, sizeof(pd->port_pkeys));
1092
1093 return 0;
1094}
1095
1096/**
1097 * rm_pkey - decrecment the reference count for the given PKEY
1098 * @dd: the infinipath device
1099 * @key: the PKEY index
1100 *
1101 * Return true if this was the last reference and the hardware table entry
1102 * needs to be changed.
1103 */
1104static int rm_pkey(struct ipath_devdata *dd, u16 key)
1105{
1106 int i;
1107 int ret;
1108
1109 for (i = 0; i < ARRAY_SIZE(dd->ipath_pkeys); i++) {
1110 if (dd->ipath_pkeys[i] != key)
1111 continue;
1112 if (atomic_dec_and_test(&dd->ipath_pkeyrefs[i])) {
1113 dd->ipath_pkeys[i] = 0;
1114 ret = 1;
1115 goto bail;
1116 }
1117 break;
1118 }
1119
1120 ret = 0;
1121
1122bail:
1123 return ret;
1124}
1125
1126/**
1127 * add_pkey - add the given PKEY to the hardware table
1128 * @dd: the infinipath device
1129 * @key: the PKEY
1130 *
1131 * Return an error code if unable to add the entry, zero if no change,
1132 * or 1 if the hardware PKEY register needs to be updated.
1133 */
1134static int add_pkey(struct ipath_devdata *dd, u16 key)
1135{
1136 int i;
1137 u16 lkey = key & 0x7FFF;
1138 int any = 0;
1139 int ret;
1140
1141 if (lkey == 0x7FFF) {
1142 ret = 0;
1143 goto bail;
1144 }
1145
1146 /* Look for an empty slot or a matching PKEY. */
1147 for (i = 0; i < ARRAY_SIZE(dd->ipath_pkeys); i++) {
1148 if (!dd->ipath_pkeys[i]) {
1149 any++;
1150 continue;
1151 }
1152 /* If it matches exactly, try to increment the ref count */
1153 if (dd->ipath_pkeys[i] == key) {
1154 if (atomic_inc_return(&dd->ipath_pkeyrefs[i]) > 1) {
1155 ret = 0;
1156 goto bail;
1157 }
1158 /* Lost the race. Look for an empty slot below. */
1159 atomic_dec(&dd->ipath_pkeyrefs[i]);
1160 any++;
1161 }
1162 /*
1163 * It makes no sense to have both the limited and unlimited
1164 * PKEY set at the same time since the unlimited one will
1165 * disable the limited one.
1166 */
1167 if ((dd->ipath_pkeys[i] & 0x7FFF) == lkey) {
1168 ret = -EEXIST;
1169 goto bail;
1170 }
1171 }
1172 if (!any) {
1173 ret = -EBUSY;
1174 goto bail;
1175 }
1176 for (i = 0; i < ARRAY_SIZE(dd->ipath_pkeys); i++) {
1177 if (!dd->ipath_pkeys[i] &&
1178 atomic_inc_return(&dd->ipath_pkeyrefs[i]) == 1) {
1179 /* for ipathstats, etc. */
1180 ipath_stats.sps_pkeys[i] = lkey;
1181 dd->ipath_pkeys[i] = key;
1182 ret = 1;
1183 goto bail;
1184 }
1185 }
1186 ret = -EBUSY;
1187
1188bail:
1189 return ret;
1190}
1191
1192/**
1193 * ipath_layer_set_pkeys - set the PKEY table for port 0
1194 * @dd: the infinipath device
1195 * @pkeys: the PKEY table
1196 */
1197int ipath_layer_set_pkeys(struct ipath_devdata *dd, u16 * pkeys)
1198{
1199 struct ipath_portdata *pd;
1200 int i;
1201 int changed = 0;
1202
1203 pd = dd->ipath_pd[0];
1204
1205 for (i = 0; i < ARRAY_SIZE(pd->port_pkeys); i++) {
1206 u16 key = pkeys[i];
1207 u16 okey = pd->port_pkeys[i];
1208
1209 if (key == okey)
1210 continue;
1211 /*
1212 * The value of this PKEY table entry is changing.
1213 * Remove the old entry in the hardware's array of PKEYs.
1214 */
1215 if (okey & 0x7FFF)
1216 changed |= rm_pkey(dd, okey);
1217 if (key & 0x7FFF) {
1218 int ret = add_pkey(dd, key);
1219
1220 if (ret < 0)
1221 key = 0;
1222 else
1223 changed |= ret;
1224 }
1225 pd->port_pkeys[i] = key;
1226 }
1227 if (changed) {
1228 u64 pkey;
1229
1230 pkey = (u64) dd->ipath_pkeys[0] |
1231 ((u64) dd->ipath_pkeys[1] << 16) |
1232 ((u64) dd->ipath_pkeys[2] << 32) |
1233 ((u64) dd->ipath_pkeys[3] << 48);
1234 ipath_cdbg(VERBOSE, "p0 new pkey reg %llx\n",
1235 (unsigned long long) pkey);
1236 ipath_write_kreg(dd, dd->ipath_kregs->kr_partitionkey,
1237 pkey);
1238 }
1239 return 0;
1240}
1241
1242/**
1243 * ipath_layer_get_linkdowndefaultstate - get the default linkdown state
1244 * @dd: the infinipath device
1245 *
1246 * Returns zero if the default is POLL, 1 if the default is SLEEP.
1247 */
1248int ipath_layer_get_linkdowndefaultstate(struct ipath_devdata *dd)
1249{
1250 return !!(dd->ipath_ibcctrl & INFINIPATH_IBCC_LINKDOWNDEFAULTSTATE);
1251}
1252
1253/**
1254 * ipath_layer_set_linkdowndefaultstate - set the default linkdown state
1255 * @dd: the infinipath device
1256 * @sleep: the new state
1257 *
1258 * Note that this will only take effect when the link state changes.
1259 */
1260int ipath_layer_set_linkdowndefaultstate(struct ipath_devdata *dd,
1261 int sleep)
1262{
1263 if (sleep)
1264 dd->ipath_ibcctrl |= INFINIPATH_IBCC_LINKDOWNDEFAULTSTATE;
1265 else
1266 dd->ipath_ibcctrl &= ~INFINIPATH_IBCC_LINKDOWNDEFAULTSTATE;
1267 ipath_write_kreg(dd, dd->ipath_kregs->kr_ibcctrl,
1268 dd->ipath_ibcctrl);
1269 return 0;
1270}
1271
1272int ipath_layer_get_phyerrthreshold(struct ipath_devdata *dd)
1273{
1274 return (dd->ipath_ibcctrl >>
1275 INFINIPATH_IBCC_PHYERRTHRESHOLD_SHIFT) &
1276 INFINIPATH_IBCC_PHYERRTHRESHOLD_MASK;
1277}
1278
1279/**
1280 * ipath_layer_set_phyerrthreshold - set the physical error threshold
1281 * @dd: the infinipath device
1282 * @n: the new threshold
1283 *
1284 * Note that this will only take effect when the link state changes.
1285 */
1286int ipath_layer_set_phyerrthreshold(struct ipath_devdata *dd, unsigned n)
1287{
1288 unsigned v;
1289
1290 v = (dd->ipath_ibcctrl >> INFINIPATH_IBCC_PHYERRTHRESHOLD_SHIFT) &
1291 INFINIPATH_IBCC_PHYERRTHRESHOLD_MASK;
1292 if (v != n) {
1293 dd->ipath_ibcctrl &=
1294 ~(INFINIPATH_IBCC_PHYERRTHRESHOLD_MASK <<
1295 INFINIPATH_IBCC_PHYERRTHRESHOLD_SHIFT);
1296 dd->ipath_ibcctrl |=
1297 (u64) n << INFINIPATH_IBCC_PHYERRTHRESHOLD_SHIFT;
1298 ipath_write_kreg(dd, dd->ipath_kregs->kr_ibcctrl,
1299 dd->ipath_ibcctrl);
1300 }
1301 return 0;
1302}
1303
1304int ipath_layer_get_overrunthreshold(struct ipath_devdata *dd)
1305{
1306 return (dd->ipath_ibcctrl >>
1307 INFINIPATH_IBCC_OVERRUNTHRESHOLD_SHIFT) &
1308 INFINIPATH_IBCC_OVERRUNTHRESHOLD_MASK;
1309}
1310
1311/**
1312 * ipath_layer_set_overrunthreshold - set the overrun threshold
1313 * @dd: the infinipath device
1314 * @n: the new threshold
1315 *
1316 * Note that this will only take effect when the link state changes.
1317 */
1318int ipath_layer_set_overrunthreshold(struct ipath_devdata *dd, unsigned n)
1319{
1320 unsigned v;
1321
1322 v = (dd->ipath_ibcctrl >> INFINIPATH_IBCC_OVERRUNTHRESHOLD_SHIFT) &
1323 INFINIPATH_IBCC_OVERRUNTHRESHOLD_MASK;
1324 if (v != n) {
1325 dd->ipath_ibcctrl &=
1326 ~(INFINIPATH_IBCC_OVERRUNTHRESHOLD_MASK <<
1327 INFINIPATH_IBCC_OVERRUNTHRESHOLD_SHIFT);
1328 dd->ipath_ibcctrl |=
1329 (u64) n << INFINIPATH_IBCC_OVERRUNTHRESHOLD_SHIFT;
1330 ipath_write_kreg(dd, dd->ipath_kregs->kr_ibcctrl,
1331 dd->ipath_ibcctrl);
1332 }
1333 return 0;
1334}
1335
1336int ipath_layer_get_boardname(struct ipath_devdata *dd, char *name,
1337 size_t namelen)
1338{
1339 return dd->ipath_f_get_boardname(dd, name, namelen);
1340}
1341
1342u32 ipath_layer_get_rcvhdrentsize(struct ipath_devdata *dd)
1343{
1344 return dd->ipath_rcvhdrentsize;
1345}
diff --git a/drivers/infiniband/hw/ipath/ipath_layer.h b/drivers/infiniband/hw/ipath/ipath_layer.h
index 57c990a5715f..4a27ede49941 100644
--- a/drivers/infiniband/hw/ipath/ipath_layer.h
+++ b/drivers/infiniband/hw/ipath/ipath_layer.h
@@ -40,73 +40,9 @@
40 */ 40 */
41 41
42struct sk_buff; 42struct sk_buff;
43struct ipath_sge_state;
44struct ipath_devdata; 43struct ipath_devdata;
45struct ether_header; 44struct ether_header;
46 45
47struct ipath_layer_counters {
48 u64 symbol_error_counter;
49 u64 link_error_recovery_counter;
50 u64 link_downed_counter;
51 u64 port_rcv_errors;
52 u64 port_rcv_remphys_errors;
53 u64 port_xmit_discards;
54 u64 port_xmit_data;
55 u64 port_rcv_data;
56 u64 port_xmit_packets;
57 u64 port_rcv_packets;
58 u32 local_link_integrity_errors;
59 u32 excessive_buffer_overrun_errors;
60};
61
62/*
63 * A segment is a linear region of low physical memory.
64 * XXX Maybe we should use phys addr here and kmap()/kunmap().
65 * Used by the verbs layer.
66 */
67struct ipath_seg {
68 void *vaddr;
69 size_t length;
70};
71
72/* The number of ipath_segs that fit in a page. */
73#define IPATH_SEGSZ (PAGE_SIZE / sizeof (struct ipath_seg))
74
75struct ipath_segarray {
76 struct ipath_seg segs[IPATH_SEGSZ];
77};
78
79struct ipath_mregion {
80 u64 user_base; /* User's address for this region */
81 u64 iova; /* IB start address of this region */
82 size_t length;
83 u32 lkey;
84 u32 offset; /* offset (bytes) to start of region */
85 int access_flags;
86 u32 max_segs; /* number of ipath_segs in all the arrays */
87 u32 mapsz; /* size of the map array */
88 struct ipath_segarray *map[0]; /* the segments */
89};
90
91/*
92 * These keep track of the copy progress within a memory region.
93 * Used by the verbs layer.
94 */
95struct ipath_sge {
96 struct ipath_mregion *mr;
97 void *vaddr; /* current pointer into the segment */
98 u32 sge_length; /* length of the SGE */
99 u32 length; /* remaining length of the segment */
100 u16 m; /* current index: mr->map[m] */
101 u16 n; /* current index: mr->map[m]->segs[n] */
102};
103
104struct ipath_sge_state {
105 struct ipath_sge *sg_list; /* next SGE to be used if any */
106 struct ipath_sge sge; /* progress state for the current SGE */
107 u8 num_sge;
108};
109
110int ipath_layer_register(void *(*l_add)(int, struct ipath_devdata *), 46int ipath_layer_register(void *(*l_add)(int, struct ipath_devdata *),
111 void (*l_remove)(void *), 47 void (*l_remove)(void *),
112 int (*l_intr)(void *, u32), 48 int (*l_intr)(void *, u32),
@@ -119,49 +55,9 @@ int ipath_layer_open(struct ipath_devdata *, u32 * pktmax);
119u16 ipath_layer_get_lid(struct ipath_devdata *dd); 55u16 ipath_layer_get_lid(struct ipath_devdata *dd);
120int ipath_layer_get_mac(struct ipath_devdata *dd, u8 *); 56int ipath_layer_get_mac(struct ipath_devdata *dd, u8 *);
121u16 ipath_layer_get_bcast(struct ipath_devdata *dd); 57u16 ipath_layer_get_bcast(struct ipath_devdata *dd);
122u32 ipath_layer_get_cr_errpkey(struct ipath_devdata *dd);
123int ipath_layer_set_linkstate(struct ipath_devdata *dd, u8 state);
124int ipath_layer_set_mtu(struct ipath_devdata *, u16);
125int ipath_set_lid(struct ipath_devdata *, u32, u8);
126int ipath_layer_send_hdr(struct ipath_devdata *dd, 58int ipath_layer_send_hdr(struct ipath_devdata *dd,
127 struct ether_header *hdr); 59 struct ether_header *hdr);
128int ipath_verbs_send(struct ipath_devdata *dd, u32 hdrwords,
129 u32 * hdr, u32 len, struct ipath_sge_state *ss);
130int ipath_layer_set_piointbufavail_int(struct ipath_devdata *dd); 60int ipath_layer_set_piointbufavail_int(struct ipath_devdata *dd);
131int ipath_layer_get_boardname(struct ipath_devdata *dd, char *name,
132 size_t namelen);
133int ipath_layer_snapshot_counters(struct ipath_devdata *dd, u64 *swords,
134 u64 *rwords, u64 *spkts, u64 *rpkts,
135 u64 *xmit_wait);
136int ipath_layer_get_counters(struct ipath_devdata *dd,
137 struct ipath_layer_counters *cntrs);
138int ipath_layer_want_buffer(struct ipath_devdata *dd);
139int ipath_layer_set_guid(struct ipath_devdata *, __be64 guid);
140__be64 ipath_layer_get_guid(struct ipath_devdata *);
141u32 ipath_layer_get_majrev(struct ipath_devdata *);
142u32 ipath_layer_get_minrev(struct ipath_devdata *);
143u32 ipath_layer_get_pcirev(struct ipath_devdata *);
144u32 ipath_layer_get_flags(struct ipath_devdata *dd);
145struct device *ipath_layer_get_device(struct ipath_devdata *dd);
146u16 ipath_layer_get_deviceid(struct ipath_devdata *dd);
147u32 ipath_layer_get_vendorid(struct ipath_devdata *);
148u64 ipath_layer_get_lastibcstat(struct ipath_devdata *dd);
149u32 ipath_layer_get_ibmtu(struct ipath_devdata *dd);
150int ipath_layer_enable_timer(struct ipath_devdata *dd);
151int ipath_layer_disable_timer(struct ipath_devdata *dd);
152int ipath_layer_set_verbs_flags(struct ipath_devdata *dd, unsigned flags);
153unsigned ipath_layer_get_npkeys(struct ipath_devdata *dd);
154unsigned ipath_layer_get_pkey(struct ipath_devdata *dd, unsigned index);
155int ipath_layer_get_pkeys(struct ipath_devdata *dd, u16 *pkeys);
156int ipath_layer_set_pkeys(struct ipath_devdata *dd, u16 *pkeys);
157int ipath_layer_get_linkdowndefaultstate(struct ipath_devdata *dd);
158int ipath_layer_set_linkdowndefaultstate(struct ipath_devdata *dd,
159 int sleep);
160int ipath_layer_get_phyerrthreshold(struct ipath_devdata *dd);
161int ipath_layer_set_phyerrthreshold(struct ipath_devdata *dd, unsigned n);
162int ipath_layer_get_overrunthreshold(struct ipath_devdata *dd);
163int ipath_layer_set_overrunthreshold(struct ipath_devdata *dd, unsigned n);
164u32 ipath_layer_get_rcvhdrentsize(struct ipath_devdata *dd);
165 61
166/* ipath_ether interrupt values */ 62/* ipath_ether interrupt values */
167#define IPATH_LAYER_INT_IF_UP 0x2 63#define IPATH_LAYER_INT_IF_UP 0x2
diff --git a/drivers/infiniband/hw/ipath/ipath_mad.c b/drivers/infiniband/hw/ipath/ipath_mad.c
index d3402341b7d0..72d1db89db8f 100644
--- a/drivers/infiniband/hw/ipath/ipath_mad.c
+++ b/drivers/infiniband/hw/ipath/ipath_mad.c
@@ -101,15 +101,15 @@ static int recv_subn_get_nodeinfo(struct ib_smp *smp,
101 nip->num_ports = ibdev->phys_port_cnt; 101 nip->num_ports = ibdev->phys_port_cnt;
102 /* This is already in network order */ 102 /* This is already in network order */
103 nip->sys_guid = to_idev(ibdev)->sys_image_guid; 103 nip->sys_guid = to_idev(ibdev)->sys_image_guid;
104 nip->node_guid = ipath_layer_get_guid(dd); 104 nip->node_guid = dd->ipath_guid;
105 nip->port_guid = nip->sys_guid; 105 nip->port_guid = nip->sys_guid;
106 nip->partition_cap = cpu_to_be16(ipath_layer_get_npkeys(dd)); 106 nip->partition_cap = cpu_to_be16(ipath_get_npkeys(dd));
107 nip->device_id = cpu_to_be16(ipath_layer_get_deviceid(dd)); 107 nip->device_id = cpu_to_be16(dd->ipath_deviceid);
108 majrev = ipath_layer_get_majrev(dd); 108 majrev = dd->ipath_majrev;
109 minrev = ipath_layer_get_minrev(dd); 109 minrev = dd->ipath_minrev;
110 nip->revision = cpu_to_be32((majrev << 16) | minrev); 110 nip->revision = cpu_to_be32((majrev << 16) | minrev);
111 nip->local_port_num = port; 111 nip->local_port_num = port;
112 vendor = ipath_layer_get_vendorid(dd); 112 vendor = dd->ipath_vendorid;
113 nip->vendor_id[0] = 0; 113 nip->vendor_id[0] = 0;
114 nip->vendor_id[1] = vendor >> 8; 114 nip->vendor_id[1] = vendor >> 8;
115 nip->vendor_id[2] = vendor; 115 nip->vendor_id[2] = vendor;
@@ -133,13 +133,89 @@ static int recv_subn_get_guidinfo(struct ib_smp *smp,
133 */ 133 */
134 if (startgx == 0) 134 if (startgx == 0)
135 /* The first is a copy of the read-only HW GUID. */ 135 /* The first is a copy of the read-only HW GUID. */
136 *p = ipath_layer_get_guid(to_idev(ibdev)->dd); 136 *p = to_idev(ibdev)->dd->ipath_guid;
137 else 137 else
138 smp->status |= IB_SMP_INVALID_FIELD; 138 smp->status |= IB_SMP_INVALID_FIELD;
139 139
140 return reply(smp); 140 return reply(smp);
141} 141}
142 142
143
144static int get_overrunthreshold(struct ipath_devdata *dd)
145{
146 return (dd->ipath_ibcctrl >>
147 INFINIPATH_IBCC_OVERRUNTHRESHOLD_SHIFT) &
148 INFINIPATH_IBCC_OVERRUNTHRESHOLD_MASK;
149}
150
151/**
152 * set_overrunthreshold - set the overrun threshold
153 * @dd: the infinipath device
154 * @n: the new threshold
155 *
156 * Note that this will only take effect when the link state changes.
157 */
158static int set_overrunthreshold(struct ipath_devdata *dd, unsigned n)
159{
160 unsigned v;
161
162 v = (dd->ipath_ibcctrl >> INFINIPATH_IBCC_OVERRUNTHRESHOLD_SHIFT) &
163 INFINIPATH_IBCC_OVERRUNTHRESHOLD_MASK;
164 if (v != n) {
165 dd->ipath_ibcctrl &=
166 ~(INFINIPATH_IBCC_OVERRUNTHRESHOLD_MASK <<
167 INFINIPATH_IBCC_OVERRUNTHRESHOLD_SHIFT);
168 dd->ipath_ibcctrl |=
169 (u64) n << INFINIPATH_IBCC_OVERRUNTHRESHOLD_SHIFT;
170 ipath_write_kreg(dd, dd->ipath_kregs->kr_ibcctrl,
171 dd->ipath_ibcctrl);
172 }
173 return 0;
174}
175
176static int get_phyerrthreshold(struct ipath_devdata *dd)
177{
178 return (dd->ipath_ibcctrl >>
179 INFINIPATH_IBCC_PHYERRTHRESHOLD_SHIFT) &
180 INFINIPATH_IBCC_PHYERRTHRESHOLD_MASK;
181}
182
183/**
184 * set_phyerrthreshold - set the physical error threshold
185 * @dd: the infinipath device
186 * @n: the new threshold
187 *
188 * Note that this will only take effect when the link state changes.
189 */
190static int set_phyerrthreshold(struct ipath_devdata *dd, unsigned n)
191{
192 unsigned v;
193
194 v = (dd->ipath_ibcctrl >> INFINIPATH_IBCC_PHYERRTHRESHOLD_SHIFT) &
195 INFINIPATH_IBCC_PHYERRTHRESHOLD_MASK;
196 if (v != n) {
197 dd->ipath_ibcctrl &=
198 ~(INFINIPATH_IBCC_PHYERRTHRESHOLD_MASK <<
199 INFINIPATH_IBCC_PHYERRTHRESHOLD_SHIFT);
200 dd->ipath_ibcctrl |=
201 (u64) n << INFINIPATH_IBCC_PHYERRTHRESHOLD_SHIFT;
202 ipath_write_kreg(dd, dd->ipath_kregs->kr_ibcctrl,
203 dd->ipath_ibcctrl);
204 }
205 return 0;
206}
207
208/**
209 * get_linkdowndefaultstate - get the default linkdown state
210 * @dd: the infinipath device
211 *
212 * Returns zero if the default is POLL, 1 if the default is SLEEP.
213 */
214static int get_linkdowndefaultstate(struct ipath_devdata *dd)
215{
216 return !!(dd->ipath_ibcctrl & INFINIPATH_IBCC_LINKDOWNDEFAULTSTATE);
217}
218
143static int recv_subn_get_portinfo(struct ib_smp *smp, 219static int recv_subn_get_portinfo(struct ib_smp *smp,
144 struct ib_device *ibdev, u8 port) 220 struct ib_device *ibdev, u8 port)
145{ 221{
@@ -166,7 +242,7 @@ static int recv_subn_get_portinfo(struct ib_smp *smp,
166 (dev->mkeyprot_resv_lmc >> 6) == 0) 242 (dev->mkeyprot_resv_lmc >> 6) == 0)
167 pip->mkey = dev->mkey; 243 pip->mkey = dev->mkey;
168 pip->gid_prefix = dev->gid_prefix; 244 pip->gid_prefix = dev->gid_prefix;
169 lid = ipath_layer_get_lid(dev->dd); 245 lid = dev->dd->ipath_lid;
170 pip->lid = lid ? cpu_to_be16(lid) : IB_LID_PERMISSIVE; 246 pip->lid = lid ? cpu_to_be16(lid) : IB_LID_PERMISSIVE;
171 pip->sm_lid = cpu_to_be16(dev->sm_lid); 247 pip->sm_lid = cpu_to_be16(dev->sm_lid);
172 pip->cap_mask = cpu_to_be32(dev->port_cap_flags); 248 pip->cap_mask = cpu_to_be32(dev->port_cap_flags);
@@ -177,14 +253,14 @@ static int recv_subn_get_portinfo(struct ib_smp *smp,
177 pip->link_width_supported = 3; /* 1x or 4x */ 253 pip->link_width_supported = 3; /* 1x or 4x */
178 pip->link_width_active = 2; /* 4x */ 254 pip->link_width_active = 2; /* 4x */
179 pip->linkspeed_portstate = 0x10; /* 2.5Gbps */ 255 pip->linkspeed_portstate = 0x10; /* 2.5Gbps */
180 ibcstat = ipath_layer_get_lastibcstat(dev->dd); 256 ibcstat = dev->dd->ipath_lastibcstat;
181 pip->linkspeed_portstate |= ((ibcstat >> 4) & 0x3) + 1; 257 pip->linkspeed_portstate |= ((ibcstat >> 4) & 0x3) + 1;
182 pip->portphysstate_linkdown = 258 pip->portphysstate_linkdown =
183 (ipath_cvt_physportstate[ibcstat & 0xf] << 4) | 259 (ipath_cvt_physportstate[ibcstat & 0xf] << 4) |
184 (ipath_layer_get_linkdowndefaultstate(dev->dd) ? 1 : 2); 260 (get_linkdowndefaultstate(dev->dd) ? 1 : 2);
185 pip->mkeyprot_resv_lmc = dev->mkeyprot_resv_lmc; 261 pip->mkeyprot_resv_lmc = dev->mkeyprot_resv_lmc;
186 pip->linkspeedactive_enabled = 0x11; /* 2.5Gbps, 2.5Gbps */ 262 pip->linkspeedactive_enabled = 0x11; /* 2.5Gbps, 2.5Gbps */
187 switch (ipath_layer_get_ibmtu(dev->dd)) { 263 switch (dev->dd->ipath_ibmtu) {
188 case 4096: 264 case 4096:
189 mtu = IB_MTU_4096; 265 mtu = IB_MTU_4096;
190 break; 266 break;
@@ -217,7 +293,7 @@ static int recv_subn_get_portinfo(struct ib_smp *smp,
217 pip->mkey_violations = cpu_to_be16(dev->mkey_violations); 293 pip->mkey_violations = cpu_to_be16(dev->mkey_violations);
218 /* P_KeyViolations are counted by hardware. */ 294 /* P_KeyViolations are counted by hardware. */
219 pip->pkey_violations = 295 pip->pkey_violations =
220 cpu_to_be16((ipath_layer_get_cr_errpkey(dev->dd) - 296 cpu_to_be16((ipath_get_cr_errpkey(dev->dd) -
221 dev->z_pkey_violations) & 0xFFFF); 297 dev->z_pkey_violations) & 0xFFFF);
222 pip->qkey_violations = cpu_to_be16(dev->qkey_violations); 298 pip->qkey_violations = cpu_to_be16(dev->qkey_violations);
223 /* Only the hardware GUID is supported for now */ 299 /* Only the hardware GUID is supported for now */
@@ -226,8 +302,8 @@ static int recv_subn_get_portinfo(struct ib_smp *smp,
226 /* 32.768 usec. response time (guessing) */ 302 /* 32.768 usec. response time (guessing) */
227 pip->resv_resptimevalue = 3; 303 pip->resv_resptimevalue = 3;
228 pip->localphyerrors_overrunerrors = 304 pip->localphyerrors_overrunerrors =
229 (ipath_layer_get_phyerrthreshold(dev->dd) << 4) | 305 (get_phyerrthreshold(dev->dd) << 4) |
230 ipath_layer_get_overrunthreshold(dev->dd); 306 get_overrunthreshold(dev->dd);
231 /* pip->max_credit_hint; */ 307 /* pip->max_credit_hint; */
232 /* pip->link_roundtrip_latency[3]; */ 308 /* pip->link_roundtrip_latency[3]; */
233 309
@@ -237,6 +313,20 @@ bail:
237 return ret; 313 return ret;
238} 314}
239 315
316/**
317 * get_pkeys - return the PKEY table for port 0
318 * @dd: the infinipath device
319 * @pkeys: the pkey table is placed here
320 */
321static int get_pkeys(struct ipath_devdata *dd, u16 * pkeys)
322{
323 struct ipath_portdata *pd = dd->ipath_pd[0];
324
325 memcpy(pkeys, pd->port_pkeys, sizeof(pd->port_pkeys));
326
327 return 0;
328}
329
240static int recv_subn_get_pkeytable(struct ib_smp *smp, 330static int recv_subn_get_pkeytable(struct ib_smp *smp,
241 struct ib_device *ibdev) 331 struct ib_device *ibdev)
242{ 332{
@@ -249,9 +339,9 @@ static int recv_subn_get_pkeytable(struct ib_smp *smp,
249 memset(smp->data, 0, sizeof(smp->data)); 339 memset(smp->data, 0, sizeof(smp->data));
250 if (startpx == 0) { 340 if (startpx == 0) {
251 struct ipath_ibdev *dev = to_idev(ibdev); 341 struct ipath_ibdev *dev = to_idev(ibdev);
252 unsigned i, n = ipath_layer_get_npkeys(dev->dd); 342 unsigned i, n = ipath_get_npkeys(dev->dd);
253 343
254 ipath_layer_get_pkeys(dev->dd, p); 344 get_pkeys(dev->dd, p);
255 345
256 for (i = 0; i < n; i++) 346 for (i = 0; i < n; i++)
257 q[i] = cpu_to_be16(p[i]); 347 q[i] = cpu_to_be16(p[i]);
@@ -269,6 +359,24 @@ static int recv_subn_set_guidinfo(struct ib_smp *smp,
269} 359}
270 360
271/** 361/**
362 * set_linkdowndefaultstate - set the default linkdown state
363 * @dd: the infinipath device
364 * @sleep: the new state
365 *
366 * Note that this will only take effect when the link state changes.
367 */
368static int set_linkdowndefaultstate(struct ipath_devdata *dd, int sleep)
369{
370 if (sleep)
371 dd->ipath_ibcctrl |= INFINIPATH_IBCC_LINKDOWNDEFAULTSTATE;
372 else
373 dd->ipath_ibcctrl &= ~INFINIPATH_IBCC_LINKDOWNDEFAULTSTATE;
374 ipath_write_kreg(dd, dd->ipath_kregs->kr_ibcctrl,
375 dd->ipath_ibcctrl);
376 return 0;
377}
378
379/**
272 * recv_subn_set_portinfo - set port information 380 * recv_subn_set_portinfo - set port information
273 * @smp: the incoming SM packet 381 * @smp: the incoming SM packet
274 * @ibdev: the infiniband device 382 * @ibdev: the infiniband device
@@ -290,7 +398,7 @@ static int recv_subn_set_portinfo(struct ib_smp *smp,
290 u8 state; 398 u8 state;
291 u16 lstate; 399 u16 lstate;
292 u32 mtu; 400 u32 mtu;
293 int ret; 401 int ret, ore;
294 402
295 if (be32_to_cpu(smp->attr_mod) > ibdev->phys_port_cnt) 403 if (be32_to_cpu(smp->attr_mod) > ibdev->phys_port_cnt)
296 goto err; 404 goto err;
@@ -304,7 +412,7 @@ static int recv_subn_set_portinfo(struct ib_smp *smp,
304 dev->mkey_lease_period = be16_to_cpu(pip->mkey_lease_period); 412 dev->mkey_lease_period = be16_to_cpu(pip->mkey_lease_period);
305 413
306 lid = be16_to_cpu(pip->lid); 414 lid = be16_to_cpu(pip->lid);
307 if (lid != ipath_layer_get_lid(dev->dd)) { 415 if (lid != dev->dd->ipath_lid) {
308 /* Must be a valid unicast LID address. */ 416 /* Must be a valid unicast LID address. */
309 if (lid == 0 || lid >= IPATH_MULTICAST_LID_BASE) 417 if (lid == 0 || lid >= IPATH_MULTICAST_LID_BASE)
310 goto err; 418 goto err;
@@ -342,11 +450,11 @@ static int recv_subn_set_portinfo(struct ib_smp *smp,
342 case 0: /* NOP */ 450 case 0: /* NOP */
343 break; 451 break;
344 case 1: /* SLEEP */ 452 case 1: /* SLEEP */
345 if (ipath_layer_set_linkdowndefaultstate(dev->dd, 1)) 453 if (set_linkdowndefaultstate(dev->dd, 1))
346 goto err; 454 goto err;
347 break; 455 break;
348 case 2: /* POLL */ 456 case 2: /* POLL */
349 if (ipath_layer_set_linkdowndefaultstate(dev->dd, 0)) 457 if (set_linkdowndefaultstate(dev->dd, 0))
350 goto err; 458 goto err;
351 break; 459 break;
352 default: 460 default:
@@ -376,7 +484,7 @@ static int recv_subn_set_portinfo(struct ib_smp *smp,
376 /* XXX We have already partially updated our state! */ 484 /* XXX We have already partially updated our state! */
377 goto err; 485 goto err;
378 } 486 }
379 ipath_layer_set_mtu(dev->dd, mtu); 487 ipath_set_mtu(dev->dd, mtu);
380 488
381 dev->sm_sl = pip->neighbormtu_mastersmsl & 0xF; 489 dev->sm_sl = pip->neighbormtu_mastersmsl & 0xF;
382 490
@@ -392,20 +500,16 @@ static int recv_subn_set_portinfo(struct ib_smp *smp,
392 * later. 500 * later.
393 */ 501 */
394 if (pip->pkey_violations == 0) 502 if (pip->pkey_violations == 0)
395 dev->z_pkey_violations = 503 dev->z_pkey_violations = ipath_get_cr_errpkey(dev->dd);
396 ipath_layer_get_cr_errpkey(dev->dd);
397 504
398 if (pip->qkey_violations == 0) 505 if (pip->qkey_violations == 0)
399 dev->qkey_violations = 0; 506 dev->qkey_violations = 0;
400 507
401 if (ipath_layer_set_phyerrthreshold( 508 ore = pip->localphyerrors_overrunerrors;
402 dev->dd, 509 if (set_phyerrthreshold(dev->dd, (ore >> 4) & 0xF))
403 (pip->localphyerrors_overrunerrors >> 4) & 0xF))
404 goto err; 510 goto err;
405 511
406 if (ipath_layer_set_overrunthreshold( 512 if (set_overrunthreshold(dev->dd, (ore & 0xF)))
407 dev->dd,
408 (pip->localphyerrors_overrunerrors & 0xF)))
409 goto err; 513 goto err;
410 514
411 dev->subnet_timeout = pip->clientrereg_resv_subnetto & 0x1F; 515 dev->subnet_timeout = pip->clientrereg_resv_subnetto & 0x1F;
@@ -423,7 +527,7 @@ static int recv_subn_set_portinfo(struct ib_smp *smp,
423 * is down or is being set to down. 527 * is down or is being set to down.
424 */ 528 */
425 state = pip->linkspeed_portstate & 0xF; 529 state = pip->linkspeed_portstate & 0xF;
426 flags = ipath_layer_get_flags(dev->dd); 530 flags = dev->dd->ipath_flags;
427 lstate = (pip->portphysstate_linkdown >> 4) & 0xF; 531 lstate = (pip->portphysstate_linkdown >> 4) & 0xF;
428 if (lstate && !(state == IB_PORT_DOWN || state == IB_PORT_NOP)) 532 if (lstate && !(state == IB_PORT_DOWN || state == IB_PORT_NOP))
429 goto err; 533 goto err;
@@ -439,7 +543,7 @@ static int recv_subn_set_portinfo(struct ib_smp *smp,
439 /* FALLTHROUGH */ 543 /* FALLTHROUGH */
440 case IB_PORT_DOWN: 544 case IB_PORT_DOWN:
441 if (lstate == 0) 545 if (lstate == 0)
442 if (ipath_layer_get_linkdowndefaultstate(dev->dd)) 546 if (get_linkdowndefaultstate(dev->dd))
443 lstate = IPATH_IB_LINKDOWN_SLEEP; 547 lstate = IPATH_IB_LINKDOWN_SLEEP;
444 else 548 else
445 lstate = IPATH_IB_LINKDOWN; 549 lstate = IPATH_IB_LINKDOWN;
@@ -451,7 +555,7 @@ static int recv_subn_set_portinfo(struct ib_smp *smp,
451 lstate = IPATH_IB_LINKDOWN_DISABLE; 555 lstate = IPATH_IB_LINKDOWN_DISABLE;
452 else 556 else
453 goto err; 557 goto err;
454 ipath_layer_set_linkstate(dev->dd, lstate); 558 ipath_set_linkstate(dev->dd, lstate);
455 if (flags & IPATH_LINKACTIVE) { 559 if (flags & IPATH_LINKACTIVE) {
456 event.event = IB_EVENT_PORT_ERR; 560 event.event = IB_EVENT_PORT_ERR;
457 ib_dispatch_event(&event); 561 ib_dispatch_event(&event);
@@ -460,7 +564,7 @@ static int recv_subn_set_portinfo(struct ib_smp *smp,
460 case IB_PORT_ARMED: 564 case IB_PORT_ARMED:
461 if (!(flags & (IPATH_LINKINIT | IPATH_LINKACTIVE))) 565 if (!(flags & (IPATH_LINKINIT | IPATH_LINKACTIVE)))
462 break; 566 break;
463 ipath_layer_set_linkstate(dev->dd, IPATH_IB_LINKARM); 567 ipath_set_linkstate(dev->dd, IPATH_IB_LINKARM);
464 if (flags & IPATH_LINKACTIVE) { 568 if (flags & IPATH_LINKACTIVE) {
465 event.event = IB_EVENT_PORT_ERR; 569 event.event = IB_EVENT_PORT_ERR;
466 ib_dispatch_event(&event); 570 ib_dispatch_event(&event);
@@ -469,7 +573,7 @@ static int recv_subn_set_portinfo(struct ib_smp *smp,
469 case IB_PORT_ACTIVE: 573 case IB_PORT_ACTIVE:
470 if (!(flags & IPATH_LINKARMED)) 574 if (!(flags & IPATH_LINKARMED))
471 break; 575 break;
472 ipath_layer_set_linkstate(dev->dd, IPATH_IB_LINKACTIVE); 576 ipath_set_linkstate(dev->dd, IPATH_IB_LINKACTIVE);
473 event.event = IB_EVENT_PORT_ACTIVE; 577 event.event = IB_EVENT_PORT_ACTIVE;
474 ib_dispatch_event(&event); 578 ib_dispatch_event(&event);
475 break; 579 break;
@@ -493,6 +597,152 @@ done:
493 return ret; 597 return ret;
494} 598}
495 599
600/**
601 * rm_pkey - decrecment the reference count for the given PKEY
602 * @dd: the infinipath device
603 * @key: the PKEY index
604 *
605 * Return true if this was the last reference and the hardware table entry
606 * needs to be changed.
607 */
608static int rm_pkey(struct ipath_devdata *dd, u16 key)
609{
610 int i;
611 int ret;
612
613 for (i = 0; i < ARRAY_SIZE(dd->ipath_pkeys); i++) {
614 if (dd->ipath_pkeys[i] != key)
615 continue;
616 if (atomic_dec_and_test(&dd->ipath_pkeyrefs[i])) {
617 dd->ipath_pkeys[i] = 0;
618 ret = 1;
619 goto bail;
620 }
621 break;
622 }
623
624 ret = 0;
625
626bail:
627 return ret;
628}
629
630/**
631 * add_pkey - add the given PKEY to the hardware table
632 * @dd: the infinipath device
633 * @key: the PKEY
634 *
635 * Return an error code if unable to add the entry, zero if no change,
636 * or 1 if the hardware PKEY register needs to be updated.
637 */
638static int add_pkey(struct ipath_devdata *dd, u16 key)
639{
640 int i;
641 u16 lkey = key & 0x7FFF;
642 int any = 0;
643 int ret;
644
645 if (lkey == 0x7FFF) {
646 ret = 0;
647 goto bail;
648 }
649
650 /* Look for an empty slot or a matching PKEY. */
651 for (i = 0; i < ARRAY_SIZE(dd->ipath_pkeys); i++) {
652 if (!dd->ipath_pkeys[i]) {
653 any++;
654 continue;
655 }
656 /* If it matches exactly, try to increment the ref count */
657 if (dd->ipath_pkeys[i] == key) {
658 if (atomic_inc_return(&dd->ipath_pkeyrefs[i]) > 1) {
659 ret = 0;
660 goto bail;
661 }
662 /* Lost the race. Look for an empty slot below. */
663 atomic_dec(&dd->ipath_pkeyrefs[i]);
664 any++;
665 }
666 /*
667 * It makes no sense to have both the limited and unlimited
668 * PKEY set at the same time since the unlimited one will
669 * disable the limited one.
670 */
671 if ((dd->ipath_pkeys[i] & 0x7FFF) == lkey) {
672 ret = -EEXIST;
673 goto bail;
674 }
675 }
676 if (!any) {
677 ret = -EBUSY;
678 goto bail;
679 }
680 for (i = 0; i < ARRAY_SIZE(dd->ipath_pkeys); i++) {
681 if (!dd->ipath_pkeys[i] &&
682 atomic_inc_return(&dd->ipath_pkeyrefs[i]) == 1) {
683 /* for ipathstats, etc. */
684 ipath_stats.sps_pkeys[i] = lkey;
685 dd->ipath_pkeys[i] = key;
686 ret = 1;
687 goto bail;
688 }
689 }
690 ret = -EBUSY;
691
692bail:
693 return ret;
694}
695
696/**
697 * set_pkeys - set the PKEY table for port 0
698 * @dd: the infinipath device
699 * @pkeys: the PKEY table
700 */
701static int set_pkeys(struct ipath_devdata *dd, u16 *pkeys)
702{
703 struct ipath_portdata *pd;
704 int i;
705 int changed = 0;
706
707 pd = dd->ipath_pd[0];
708
709 for (i = 0; i < ARRAY_SIZE(pd->port_pkeys); i++) {
710 u16 key = pkeys[i];
711 u16 okey = pd->port_pkeys[i];
712
713 if (key == okey)
714 continue;
715 /*
716 * The value of this PKEY table entry is changing.
717 * Remove the old entry in the hardware's array of PKEYs.
718 */
719 if (okey & 0x7FFF)
720 changed |= rm_pkey(dd, okey);
721 if (key & 0x7FFF) {
722 int ret = add_pkey(dd, key);
723
724 if (ret < 0)
725 key = 0;
726 else
727 changed |= ret;
728 }
729 pd->port_pkeys[i] = key;
730 }
731 if (changed) {
732 u64 pkey;
733
734 pkey = (u64) dd->ipath_pkeys[0] |
735 ((u64) dd->ipath_pkeys[1] << 16) |
736 ((u64) dd->ipath_pkeys[2] << 32) |
737 ((u64) dd->ipath_pkeys[3] << 48);
738 ipath_cdbg(VERBOSE, "p0 new pkey reg %llx\n",
739 (unsigned long long) pkey);
740 ipath_write_kreg(dd, dd->ipath_kregs->kr_partitionkey,
741 pkey);
742 }
743 return 0;
744}
745
496static int recv_subn_set_pkeytable(struct ib_smp *smp, 746static int recv_subn_set_pkeytable(struct ib_smp *smp,
497 struct ib_device *ibdev) 747 struct ib_device *ibdev)
498{ 748{
@@ -500,13 +750,12 @@ static int recv_subn_set_pkeytable(struct ib_smp *smp,
500 __be16 *p = (__be16 *) smp->data; 750 __be16 *p = (__be16 *) smp->data;
501 u16 *q = (u16 *) smp->data; 751 u16 *q = (u16 *) smp->data;
502 struct ipath_ibdev *dev = to_idev(ibdev); 752 struct ipath_ibdev *dev = to_idev(ibdev);
503 unsigned i, n = ipath_layer_get_npkeys(dev->dd); 753 unsigned i, n = ipath_get_npkeys(dev->dd);
504 754
505 for (i = 0; i < n; i++) 755 for (i = 0; i < n; i++)
506 q[i] = be16_to_cpu(p[i]); 756 q[i] = be16_to_cpu(p[i]);
507 757
508 if (startpx != 0 || 758 if (startpx != 0 || set_pkeys(dev->dd, q) != 0)
509 ipath_layer_set_pkeys(dev->dd, q) != 0)
510 smp->status |= IB_SMP_INVALID_FIELD; 759 smp->status |= IB_SMP_INVALID_FIELD;
511 760
512 return recv_subn_get_pkeytable(smp, ibdev); 761 return recv_subn_get_pkeytable(smp, ibdev);
@@ -844,10 +1093,10 @@ static int recv_pma_get_portcounters(struct ib_perf *pmp,
844 struct ib_pma_portcounters *p = (struct ib_pma_portcounters *) 1093 struct ib_pma_portcounters *p = (struct ib_pma_portcounters *)
845 pmp->data; 1094 pmp->data;
846 struct ipath_ibdev *dev = to_idev(ibdev); 1095 struct ipath_ibdev *dev = to_idev(ibdev);
847 struct ipath_layer_counters cntrs; 1096 struct ipath_verbs_counters cntrs;
848 u8 port_select = p->port_select; 1097 u8 port_select = p->port_select;
849 1098
850 ipath_layer_get_counters(dev->dd, &cntrs); 1099 ipath_get_counters(dev->dd, &cntrs);
851 1100
852 /* Adjust counters for any resets done. */ 1101 /* Adjust counters for any resets done. */
853 cntrs.symbol_error_counter -= dev->z_symbol_error_counter; 1102 cntrs.symbol_error_counter -= dev->z_symbol_error_counter;
@@ -944,8 +1193,8 @@ static int recv_pma_get_portcounters_ext(struct ib_perf *pmp,
944 u64 swords, rwords, spkts, rpkts, xwait; 1193 u64 swords, rwords, spkts, rpkts, xwait;
945 u8 port_select = p->port_select; 1194 u8 port_select = p->port_select;
946 1195
947 ipath_layer_snapshot_counters(dev->dd, &swords, &rwords, &spkts, 1196 ipath_snapshot_counters(dev->dd, &swords, &rwords, &spkts,
948 &rpkts, &xwait); 1197 &rpkts, &xwait);
949 1198
950 /* Adjust counters for any resets done. */ 1199 /* Adjust counters for any resets done. */
951 swords -= dev->z_port_xmit_data; 1200 swords -= dev->z_port_xmit_data;
@@ -978,13 +1227,13 @@ static int recv_pma_set_portcounters(struct ib_perf *pmp,
978 struct ib_pma_portcounters *p = (struct ib_pma_portcounters *) 1227 struct ib_pma_portcounters *p = (struct ib_pma_portcounters *)
979 pmp->data; 1228 pmp->data;
980 struct ipath_ibdev *dev = to_idev(ibdev); 1229 struct ipath_ibdev *dev = to_idev(ibdev);
981 struct ipath_layer_counters cntrs; 1230 struct ipath_verbs_counters cntrs;
982 1231
983 /* 1232 /*
984 * Since the HW doesn't support clearing counters, we save the 1233 * Since the HW doesn't support clearing counters, we save the
985 * current count and subtract it from future responses. 1234 * current count and subtract it from future responses.
986 */ 1235 */
987 ipath_layer_get_counters(dev->dd, &cntrs); 1236 ipath_get_counters(dev->dd, &cntrs);
988 1237
989 if (p->counter_select & IB_PMA_SEL_SYMBOL_ERROR) 1238 if (p->counter_select & IB_PMA_SEL_SYMBOL_ERROR)
990 dev->z_symbol_error_counter = cntrs.symbol_error_counter; 1239 dev->z_symbol_error_counter = cntrs.symbol_error_counter;
@@ -1041,8 +1290,8 @@ static int recv_pma_set_portcounters_ext(struct ib_perf *pmp,
1041 struct ipath_ibdev *dev = to_idev(ibdev); 1290 struct ipath_ibdev *dev = to_idev(ibdev);
1042 u64 swords, rwords, spkts, rpkts, xwait; 1291 u64 swords, rwords, spkts, rpkts, xwait;
1043 1292
1044 ipath_layer_snapshot_counters(dev->dd, &swords, &rwords, &spkts, 1293 ipath_snapshot_counters(dev->dd, &swords, &rwords, &spkts,
1045 &rpkts, &xwait); 1294 &rpkts, &xwait);
1046 1295
1047 if (p->counter_select & IB_PMA_SELX_PORT_XMIT_DATA) 1296 if (p->counter_select & IB_PMA_SELX_PORT_XMIT_DATA)
1048 dev->z_port_xmit_data = swords; 1297 dev->z_port_xmit_data = swords;
diff --git a/drivers/infiniband/hw/ipath/ipath_mr.c b/drivers/infiniband/hw/ipath/ipath_mr.c
index 4ac31a5da330..b36f6fb3e37a 100644
--- a/drivers/infiniband/hw/ipath/ipath_mr.c
+++ b/drivers/infiniband/hw/ipath/ipath_mr.c
@@ -36,6 +36,18 @@
36 36
37#include "ipath_verbs.h" 37#include "ipath_verbs.h"
38 38
39/* Fast memory region */
40struct ipath_fmr {
41 struct ib_fmr ibfmr;
42 u8 page_shift;
43 struct ipath_mregion mr; /* must be last */
44};
45
46static inline struct ipath_fmr *to_ifmr(struct ib_fmr *ibfmr)
47{
48 return container_of(ibfmr, struct ipath_fmr, ibfmr);
49}
50
39/** 51/**
40 * ipath_get_dma_mr - get a DMA memory region 52 * ipath_get_dma_mr - get a DMA memory region
41 * @pd: protection domain for this memory region 53 * @pd: protection domain for this memory region
diff --git a/drivers/infiniband/hw/ipath/ipath_qp.c b/drivers/infiniband/hw/ipath/ipath_qp.c
index 1ccfc909db1e..9228d59b59d4 100644
--- a/drivers/infiniband/hw/ipath/ipath_qp.c
+++ b/drivers/infiniband/hw/ipath/ipath_qp.c
@@ -461,7 +461,7 @@ int ipath_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
461 goto inval; 461 goto inval;
462 462
463 if (attr_mask & IB_QP_PKEY_INDEX) 463 if (attr_mask & IB_QP_PKEY_INDEX)
464 if (attr->pkey_index >= ipath_layer_get_npkeys(dev->dd)) 464 if (attr->pkey_index >= ipath_get_npkeys(dev->dd))
465 goto inval; 465 goto inval;
466 466
467 if (attr_mask & IB_QP_MIN_RNR_TIMER) 467 if (attr_mask & IB_QP_MIN_RNR_TIMER)
@@ -645,6 +645,33 @@ __be32 ipath_compute_aeth(struct ipath_qp *qp)
645} 645}
646 646
647/** 647/**
648 * set_verbs_flags - set the verbs layer flags
649 * @dd: the infinipath device
650 * @flags: the flags to set
651 */
652static int set_verbs_flags(struct ipath_devdata *dd, unsigned flags)
653{
654 struct ipath_devdata *ss;
655 unsigned long lflags;
656
657 spin_lock_irqsave(&ipath_devs_lock, lflags);
658
659 list_for_each_entry(ss, &ipath_dev_list, ipath_list) {
660 if (!(ss->ipath_flags & IPATH_INITTED))
661 continue;
662 if ((flags & IPATH_VERBS_KERNEL_SMA) &&
663 !(*ss->ipath_statusp & IPATH_STATUS_SMA))
664 *ss->ipath_statusp |= IPATH_STATUS_OIB_SMA;
665 else
666 *ss->ipath_statusp &= ~IPATH_STATUS_OIB_SMA;
667 }
668
669 spin_unlock_irqrestore(&ipath_devs_lock, lflags);
670
671 return 0;
672}
673
674/**
648 * ipath_create_qp - create a queue pair for a device 675 * ipath_create_qp - create a queue pair for a device
649 * @ibpd: the protection domain who's device we create the queue pair for 676 * @ibpd: the protection domain who's device we create the queue pair for
650 * @init_attr: the attributes of the queue pair 677 * @init_attr: the attributes of the queue pair
@@ -760,8 +787,7 @@ struct ib_qp *ipath_create_qp(struct ib_pd *ibpd,
760 787
761 /* Tell the core driver that the kernel SMA is present. */ 788 /* Tell the core driver that the kernel SMA is present. */
762 if (init_attr->qp_type == IB_QPT_SMI) 789 if (init_attr->qp_type == IB_QPT_SMI)
763 ipath_layer_set_verbs_flags(dev->dd, 790 set_verbs_flags(dev->dd, IPATH_VERBS_KERNEL_SMA);
764 IPATH_VERBS_KERNEL_SMA);
765 break; 791 break;
766 792
767 default: 793 default:
@@ -838,7 +864,7 @@ int ipath_destroy_qp(struct ib_qp *ibqp)
838 864
839 /* Tell the core driver that the kernel SMA is gone. */ 865 /* Tell the core driver that the kernel SMA is gone. */
840 if (qp->ibqp.qp_type == IB_QPT_SMI) 866 if (qp->ibqp.qp_type == IB_QPT_SMI)
841 ipath_layer_set_verbs_flags(dev->dd, 0); 867 set_verbs_flags(dev->dd, 0);
842 868
843 spin_lock_irqsave(&qp->s_lock, flags); 869 spin_lock_irqsave(&qp->s_lock, flags);
844 qp->state = IB_QPS_ERR; 870 qp->state = IB_QPS_ERR;
diff --git a/drivers/infiniband/hw/ipath/ipath_rc.c b/drivers/infiniband/hw/ipath/ipath_rc.c
index 774d1615ce2f..a08654042c03 100644
--- a/drivers/infiniband/hw/ipath/ipath_rc.c
+++ b/drivers/infiniband/hw/ipath/ipath_rc.c
@@ -32,7 +32,7 @@
32 */ 32 */
33 33
34#include "ipath_verbs.h" 34#include "ipath_verbs.h"
35#include "ipath_common.h" 35#include "ipath_kernel.h"
36 36
37/* cut down ridiculously long IB macro names */ 37/* cut down ridiculously long IB macro names */
38#define OP(x) IB_OPCODE_RC_##x 38#define OP(x) IB_OPCODE_RC_##x
@@ -540,7 +540,7 @@ static void send_rc_ack(struct ipath_qp *qp)
540 lrh0 = IPATH_LRH_GRH; 540 lrh0 = IPATH_LRH_GRH;
541 } 541 }
542 /* read pkey_index w/o lock (its atomic) */ 542 /* read pkey_index w/o lock (its atomic) */
543 bth0 = ipath_layer_get_pkey(dev->dd, qp->s_pkey_index); 543 bth0 = ipath_get_pkey(dev->dd, qp->s_pkey_index);
544 if (qp->r_nak_state) 544 if (qp->r_nak_state)
545 ohdr->u.aeth = cpu_to_be32((qp->r_msn & IPATH_MSN_MASK) | 545 ohdr->u.aeth = cpu_to_be32((qp->r_msn & IPATH_MSN_MASK) |
546 (qp->r_nak_state << 546 (qp->r_nak_state <<
@@ -557,7 +557,7 @@ static void send_rc_ack(struct ipath_qp *qp)
557 hdr.lrh[0] = cpu_to_be16(lrh0); 557 hdr.lrh[0] = cpu_to_be16(lrh0);
558 hdr.lrh[1] = cpu_to_be16(qp->remote_ah_attr.dlid); 558 hdr.lrh[1] = cpu_to_be16(qp->remote_ah_attr.dlid);
559 hdr.lrh[2] = cpu_to_be16(hwords + SIZE_OF_CRC); 559 hdr.lrh[2] = cpu_to_be16(hwords + SIZE_OF_CRC);
560 hdr.lrh[3] = cpu_to_be16(ipath_layer_get_lid(dev->dd)); 560 hdr.lrh[3] = cpu_to_be16(dev->dd->ipath_lid);
561 ohdr->bth[0] = cpu_to_be32(bth0); 561 ohdr->bth[0] = cpu_to_be32(bth0);
562 ohdr->bth[1] = cpu_to_be32(qp->remote_qpn); 562 ohdr->bth[1] = cpu_to_be32(qp->remote_qpn);
563 ohdr->bth[2] = cpu_to_be32(qp->r_ack_psn & IPATH_PSN_MASK); 563 ohdr->bth[2] = cpu_to_be32(qp->r_ack_psn & IPATH_PSN_MASK);
@@ -1323,8 +1323,7 @@ void ipath_rc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
1323 * the eager header buffer size to 56 bytes so the last 4 1323 * the eager header buffer size to 56 bytes so the last 4
1324 * bytes of the BTH header (PSN) is in the data buffer. 1324 * bytes of the BTH header (PSN) is in the data buffer.
1325 */ 1325 */
1326 header_in_data = 1326 header_in_data = dev->dd->ipath_rcvhdrentsize == 16;
1327 ipath_layer_get_rcvhdrentsize(dev->dd) == 16;
1328 if (header_in_data) { 1327 if (header_in_data) {
1329 psn = be32_to_cpu(((__be32 *) data)[0]); 1328 psn = be32_to_cpu(((__be32 *) data)[0]);
1330 data += sizeof(__be32); 1329 data += sizeof(__be32);
diff --git a/drivers/infiniband/hw/ipath/ipath_ruc.c b/drivers/infiniband/hw/ipath/ipath_ruc.c
index dd09420d677d..5c1da2d25e03 100644
--- a/drivers/infiniband/hw/ipath/ipath_ruc.c
+++ b/drivers/infiniband/hw/ipath/ipath_ruc.c
@@ -470,6 +470,15 @@ done:
470 wake_up(&qp->wait); 470 wake_up(&qp->wait);
471} 471}
472 472
473static int want_buffer(struct ipath_devdata *dd)
474{
475 set_bit(IPATH_S_PIOINTBUFAVAIL, &dd->ipath_sendctrl);
476 ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl,
477 dd->ipath_sendctrl);
478
479 return 0;
480}
481
473/** 482/**
474 * ipath_no_bufs_available - tell the layer driver we need buffers 483 * ipath_no_bufs_available - tell the layer driver we need buffers
475 * @qp: the QP that caused the problem 484 * @qp: the QP that caused the problem
@@ -486,7 +495,7 @@ void ipath_no_bufs_available(struct ipath_qp *qp, struct ipath_ibdev *dev)
486 list_add_tail(&qp->piowait, &dev->piowait); 495 list_add_tail(&qp->piowait, &dev->piowait);
487 spin_unlock_irqrestore(&dev->pending_lock, flags); 496 spin_unlock_irqrestore(&dev->pending_lock, flags);
488 /* 497 /*
489 * Note that as soon as ipath_layer_want_buffer() is called and 498 * Note that as soon as want_buffer() is called and
490 * possibly before it returns, ipath_ib_piobufavail() 499 * possibly before it returns, ipath_ib_piobufavail()
491 * could be called. If we are still in the tasklet function, 500 * could be called. If we are still in the tasklet function,
492 * tasklet_hi_schedule() will not call us until the next time 501 * tasklet_hi_schedule() will not call us until the next time
@@ -496,7 +505,7 @@ void ipath_no_bufs_available(struct ipath_qp *qp, struct ipath_ibdev *dev)
496 */ 505 */
497 clear_bit(IPATH_S_BUSY, &qp->s_flags); 506 clear_bit(IPATH_S_BUSY, &qp->s_flags);
498 tasklet_unlock(&qp->s_task); 507 tasklet_unlock(&qp->s_task);
499 ipath_layer_want_buffer(dev->dd); 508 want_buffer(dev->dd);
500 dev->n_piowait++; 509 dev->n_piowait++;
501} 510}
502 511
@@ -611,7 +620,7 @@ u32 ipath_make_grh(struct ipath_ibdev *dev, struct ib_grh *hdr,
611 hdr->hop_limit = grh->hop_limit; 620 hdr->hop_limit = grh->hop_limit;
612 /* The SGID is 32-bit aligned. */ 621 /* The SGID is 32-bit aligned. */
613 hdr->sgid.global.subnet_prefix = dev->gid_prefix; 622 hdr->sgid.global.subnet_prefix = dev->gid_prefix;
614 hdr->sgid.global.interface_id = ipath_layer_get_guid(dev->dd); 623 hdr->sgid.global.interface_id = dev->dd->ipath_guid;
615 hdr->dgid = grh->dgid; 624 hdr->dgid = grh->dgid;
616 625
617 /* GRH header size in 32-bit words. */ 626 /* GRH header size in 32-bit words. */
@@ -643,8 +652,7 @@ void ipath_do_ruc_send(unsigned long data)
643 if (test_and_set_bit(IPATH_S_BUSY, &qp->s_flags)) 652 if (test_and_set_bit(IPATH_S_BUSY, &qp->s_flags))
644 goto bail; 653 goto bail;
645 654
646 if (unlikely(qp->remote_ah_attr.dlid == 655 if (unlikely(qp->remote_ah_attr.dlid == dev->dd->ipath_lid)) {
647 ipath_layer_get_lid(dev->dd))) {
648 ipath_ruc_loopback(qp); 656 ipath_ruc_loopback(qp);
649 goto clear; 657 goto clear;
650 } 658 }
@@ -711,8 +719,8 @@ again:
711 qp->s_hdr.lrh[1] = cpu_to_be16(qp->remote_ah_attr.dlid); 719 qp->s_hdr.lrh[1] = cpu_to_be16(qp->remote_ah_attr.dlid);
712 qp->s_hdr.lrh[2] = cpu_to_be16(qp->s_hdrwords + nwords + 720 qp->s_hdr.lrh[2] = cpu_to_be16(qp->s_hdrwords + nwords +
713 SIZE_OF_CRC); 721 SIZE_OF_CRC);
714 qp->s_hdr.lrh[3] = cpu_to_be16(ipath_layer_get_lid(dev->dd)); 722 qp->s_hdr.lrh[3] = cpu_to_be16(dev->dd->ipath_lid);
715 bth0 |= ipath_layer_get_pkey(dev->dd, qp->s_pkey_index); 723 bth0 |= ipath_get_pkey(dev->dd, qp->s_pkey_index);
716 bth0 |= extra_bytes << 20; 724 bth0 |= extra_bytes << 20;
717 ohdr->bth[0] = cpu_to_be32(bth0); 725 ohdr->bth[0] = cpu_to_be32(bth0);
718 ohdr->bth[1] = cpu_to_be32(qp->remote_qpn); 726 ohdr->bth[1] = cpu_to_be32(qp->remote_qpn);
diff --git a/drivers/infiniband/hw/ipath/ipath_sysfs.c b/drivers/infiniband/hw/ipath/ipath_sysfs.c
index b98821d7801d..7396a63840db 100644
--- a/drivers/infiniband/hw/ipath/ipath_sysfs.c
+++ b/drivers/infiniband/hw/ipath/ipath_sysfs.c
@@ -35,7 +35,6 @@
35#include <linux/pci.h> 35#include <linux/pci.h>
36 36
37#include "ipath_kernel.h" 37#include "ipath_kernel.h"
38#include "ipath_layer.h"
39#include "ipath_common.h" 38#include "ipath_common.h"
40 39
41/** 40/**
@@ -227,7 +226,6 @@ static ssize_t store_mlid(struct device *dev,
227 unit = dd->ipath_unit; 226 unit = dd->ipath_unit;
228 227
229 dd->ipath_mlid = mlid; 228 dd->ipath_mlid = mlid;
230 ipath_layer_intr(dd, IPATH_LAYER_INT_BCAST);
231 229
232 goto bail; 230 goto bail;
233invalid: 231invalid:
@@ -467,7 +465,7 @@ static ssize_t store_link_state(struct device *dev,
467 if (ret < 0) 465 if (ret < 0)
468 goto invalid; 466 goto invalid;
469 467
470 r = ipath_layer_set_linkstate(dd, state); 468 r = ipath_set_linkstate(dd, state);
471 if (r < 0) { 469 if (r < 0) {
472 ret = r; 470 ret = r;
473 goto bail; 471 goto bail;
@@ -502,7 +500,7 @@ static ssize_t store_mtu(struct device *dev,
502 if (ret < 0) 500 if (ret < 0)
503 goto invalid; 501 goto invalid;
504 502
505 r = ipath_layer_set_mtu(dd, mtu); 503 r = ipath_set_mtu(dd, mtu);
506 if (r < 0) 504 if (r < 0)
507 ret = r; 505 ret = r;
508 506
diff --git a/drivers/infiniband/hw/ipath/ipath_uc.c b/drivers/infiniband/hw/ipath/ipath_uc.c
index c33abea2d5a7..0fd3cded16ba 100644
--- a/drivers/infiniband/hw/ipath/ipath_uc.c
+++ b/drivers/infiniband/hw/ipath/ipath_uc.c
@@ -32,7 +32,7 @@
32 */ 32 */
33 33
34#include "ipath_verbs.h" 34#include "ipath_verbs.h"
35#include "ipath_common.h" 35#include "ipath_kernel.h"
36 36
37/* cut down ridiculously long IB macro names */ 37/* cut down ridiculously long IB macro names */
38#define OP(x) IB_OPCODE_UC_##x 38#define OP(x) IB_OPCODE_UC_##x
@@ -261,8 +261,7 @@ void ipath_uc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
261 * size to 56 bytes so the last 4 bytes of 261 * size to 56 bytes so the last 4 bytes of
262 * the BTH header (PSN) is in the data buffer. 262 * the BTH header (PSN) is in the data buffer.
263 */ 263 */
264 header_in_data = 264 header_in_data = dev->dd->ipath_rcvhdrentsize == 16;
265 ipath_layer_get_rcvhdrentsize(dev->dd) == 16;
266 if (header_in_data) { 265 if (header_in_data) {
267 psn = be32_to_cpu(((__be32 *) data)[0]); 266 psn = be32_to_cpu(((__be32 *) data)[0]);
268 data += sizeof(__be32); 267 data += sizeof(__be32);
diff --git a/drivers/infiniband/hw/ipath/ipath_ud.c b/drivers/infiniband/hw/ipath/ipath_ud.c
index 82439fcfc2f8..6991d1d74e3c 100644
--- a/drivers/infiniband/hw/ipath/ipath_ud.c
+++ b/drivers/infiniband/hw/ipath/ipath_ud.c
@@ -353,7 +353,7 @@ int ipath_post_ud_send(struct ipath_qp *qp, struct ib_send_wr *wr)
353 ss.num_sge++; 353 ss.num_sge++;
354 } 354 }
355 /* Check for invalid packet size. */ 355 /* Check for invalid packet size. */
356 if (len > ipath_layer_get_ibmtu(dev->dd)) { 356 if (len > dev->dd->ipath_ibmtu) {
357 ret = -EINVAL; 357 ret = -EINVAL;
358 goto bail; 358 goto bail;
359 } 359 }
@@ -375,7 +375,7 @@ int ipath_post_ud_send(struct ipath_qp *qp, struct ib_send_wr *wr)
375 dev->n_unicast_xmit++; 375 dev->n_unicast_xmit++;
376 lid = ah_attr->dlid & 376 lid = ah_attr->dlid &
377 ~((1 << (dev->mkeyprot_resv_lmc & 7)) - 1); 377 ~((1 << (dev->mkeyprot_resv_lmc & 7)) - 1);
378 if (unlikely(lid == ipath_layer_get_lid(dev->dd))) { 378 if (unlikely(lid == dev->dd->ipath_lid)) {
379 /* 379 /*
380 * Pass in an uninitialized ib_wc to save stack 380 * Pass in an uninitialized ib_wc to save stack
381 * space. 381 * space.
@@ -404,7 +404,7 @@ int ipath_post_ud_send(struct ipath_qp *qp, struct ib_send_wr *wr)
404 qp->s_hdr.u.l.grh.sgid.global.subnet_prefix = 404 qp->s_hdr.u.l.grh.sgid.global.subnet_prefix =
405 dev->gid_prefix; 405 dev->gid_prefix;
406 qp->s_hdr.u.l.grh.sgid.global.interface_id = 406 qp->s_hdr.u.l.grh.sgid.global.interface_id =
407 ipath_layer_get_guid(dev->dd); 407 dev->dd->ipath_guid;
408 qp->s_hdr.u.l.grh.dgid = ah_attr->grh.dgid; 408 qp->s_hdr.u.l.grh.dgid = ah_attr->grh.dgid;
409 /* 409 /*
410 * Don't worry about sending to locally attached multicast 410 * Don't worry about sending to locally attached multicast
@@ -434,7 +434,7 @@ int ipath_post_ud_send(struct ipath_qp *qp, struct ib_send_wr *wr)
434 qp->s_hdr.lrh[0] = cpu_to_be16(lrh0); 434 qp->s_hdr.lrh[0] = cpu_to_be16(lrh0);
435 qp->s_hdr.lrh[1] = cpu_to_be16(ah_attr->dlid); /* DEST LID */ 435 qp->s_hdr.lrh[1] = cpu_to_be16(ah_attr->dlid); /* DEST LID */
436 qp->s_hdr.lrh[2] = cpu_to_be16(hwords + nwords + SIZE_OF_CRC); 436 qp->s_hdr.lrh[2] = cpu_to_be16(hwords + nwords + SIZE_OF_CRC);
437 lid = ipath_layer_get_lid(dev->dd); 437 lid = dev->dd->ipath_lid;
438 if (lid) { 438 if (lid) {
439 lid |= ah_attr->src_path_bits & 439 lid |= ah_attr->src_path_bits &
440 ((1 << (dev->mkeyprot_resv_lmc & 7)) - 1); 440 ((1 << (dev->mkeyprot_resv_lmc & 7)) - 1);
@@ -445,7 +445,7 @@ int ipath_post_ud_send(struct ipath_qp *qp, struct ib_send_wr *wr)
445 bth0 |= 1 << 23; 445 bth0 |= 1 << 23;
446 bth0 |= extra_bytes << 20; 446 bth0 |= extra_bytes << 20;
447 bth0 |= qp->ibqp.qp_type == IB_QPT_SMI ? IPATH_DEFAULT_P_KEY : 447 bth0 |= qp->ibqp.qp_type == IB_QPT_SMI ? IPATH_DEFAULT_P_KEY :
448 ipath_layer_get_pkey(dev->dd, qp->s_pkey_index); 448 ipath_get_pkey(dev->dd, qp->s_pkey_index);
449 ohdr->bth[0] = cpu_to_be32(bth0); 449 ohdr->bth[0] = cpu_to_be32(bth0);
450 /* 450 /*
451 * Use the multicast QP if the destination LID is a multicast LID. 451 * Use the multicast QP if the destination LID is a multicast LID.
@@ -531,8 +531,7 @@ void ipath_ud_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
531 * the eager header buffer size to 56 bytes so the last 12 531 * the eager header buffer size to 56 bytes so the last 12
532 * bytes of the IB header is in the data buffer. 532 * bytes of the IB header is in the data buffer.
533 */ 533 */
534 header_in_data = 534 header_in_data = dev->dd->ipath_rcvhdrentsize == 16;
535 ipath_layer_get_rcvhdrentsize(dev->dd) == 16;
536 if (header_in_data) { 535 if (header_in_data) {
537 qkey = be32_to_cpu(((__be32 *) data)[1]); 536 qkey = be32_to_cpu(((__be32 *) data)[1]);
538 src_qp = be32_to_cpu(((__be32 *) data)[2]); 537 src_qp = be32_to_cpu(((__be32 *) data)[2]);
diff --git a/drivers/infiniband/hw/ipath/ipath_verbs.c b/drivers/infiniband/hw/ipath/ipath_verbs.c
index 15edec9227e4..3c47620e9887 100644
--- a/drivers/infiniband/hw/ipath/ipath_verbs.c
+++ b/drivers/infiniband/hw/ipath/ipath_verbs.c
@@ -33,15 +33,13 @@
33 33
34#include <rdma/ib_mad.h> 34#include <rdma/ib_mad.h>
35#include <rdma/ib_user_verbs.h> 35#include <rdma/ib_user_verbs.h>
36#include <linux/io.h>
36#include <linux/utsname.h> 37#include <linux/utsname.h>
37 38
38#include "ipath_kernel.h" 39#include "ipath_kernel.h"
39#include "ipath_verbs.h" 40#include "ipath_verbs.h"
40#include "ipath_common.h" 41#include "ipath_common.h"
41 42
42/* Not static, because we don't want the compiler removing it */
43const char ipath_verbs_version[] = "ipath_verbs " IPATH_IDSTR;
44
45static unsigned int ib_ipath_qp_table_size = 251; 43static unsigned int ib_ipath_qp_table_size = 251;
46module_param_named(qp_table_size, ib_ipath_qp_table_size, uint, S_IRUGO); 44module_param_named(qp_table_size, ib_ipath_qp_table_size, uint, S_IRUGO);
47MODULE_PARM_DESC(qp_table_size, "QP table size"); 45MODULE_PARM_DESC(qp_table_size, "QP table size");
@@ -109,10 +107,6 @@ module_param_named(max_srq_wrs, ib_ipath_max_srq_wrs,
109 uint, S_IWUSR | S_IRUGO); 107 uint, S_IWUSR | S_IRUGO);
110MODULE_PARM_DESC(max_srq_wrs, "Maximum number of SRQ WRs support"); 108MODULE_PARM_DESC(max_srq_wrs, "Maximum number of SRQ WRs support");
111 109
112MODULE_LICENSE("GPL");
113MODULE_AUTHOR("QLogic <support@pathscale.com>");
114MODULE_DESCRIPTION("QLogic InfiniPath driver");
115
116const int ib_ipath_state_ops[IB_QPS_ERR + 1] = { 110const int ib_ipath_state_ops[IB_QPS_ERR + 1] = {
117 [IB_QPS_RESET] = 0, 111 [IB_QPS_RESET] = 0,
118 [IB_QPS_INIT] = IPATH_POST_RECV_OK, 112 [IB_QPS_INIT] = IPATH_POST_RECV_OK,
@@ -125,6 +119,16 @@ const int ib_ipath_state_ops[IB_QPS_ERR + 1] = {
125 [IB_QPS_ERR] = 0, 119 [IB_QPS_ERR] = 0,
126}; 120};
127 121
122struct ipath_ucontext {
123 struct ib_ucontext ibucontext;
124};
125
126static inline struct ipath_ucontext *to_iucontext(struct ib_ucontext
127 *ibucontext)
128{
129 return container_of(ibucontext, struct ipath_ucontext, ibucontext);
130}
131
128/* 132/*
129 * Translate ib_wr_opcode into ib_wc_opcode. 133 * Translate ib_wr_opcode into ib_wc_opcode.
130 */ 134 */
@@ -400,7 +404,7 @@ void ipath_ib_rcv(struct ipath_ibdev *dev, void *rhdr, void *data,
400 lid = be16_to_cpu(hdr->lrh[1]); 404 lid = be16_to_cpu(hdr->lrh[1]);
401 if (lid < IPATH_MULTICAST_LID_BASE) { 405 if (lid < IPATH_MULTICAST_LID_BASE) {
402 lid &= ~((1 << (dev->mkeyprot_resv_lmc & 7)) - 1); 406 lid &= ~((1 << (dev->mkeyprot_resv_lmc & 7)) - 1);
403 if (unlikely(lid != ipath_layer_get_lid(dev->dd))) { 407 if (unlikely(lid != dev->dd->ipath_lid)) {
404 dev->rcv_errors++; 408 dev->rcv_errors++;
405 goto bail; 409 goto bail;
406 } 410 }
@@ -511,19 +515,19 @@ void ipath_ib_timer(struct ipath_ibdev *dev)
511 if (dev->pma_sample_status == IB_PMA_SAMPLE_STATUS_STARTED && 515 if (dev->pma_sample_status == IB_PMA_SAMPLE_STATUS_STARTED &&
512 --dev->pma_sample_start == 0) { 516 --dev->pma_sample_start == 0) {
513 dev->pma_sample_status = IB_PMA_SAMPLE_STATUS_RUNNING; 517 dev->pma_sample_status = IB_PMA_SAMPLE_STATUS_RUNNING;
514 ipath_layer_snapshot_counters(dev->dd, &dev->ipath_sword, 518 ipath_snapshot_counters(dev->dd, &dev->ipath_sword,
515 &dev->ipath_rword, 519 &dev->ipath_rword,
516 &dev->ipath_spkts, 520 &dev->ipath_spkts,
517 &dev->ipath_rpkts, 521 &dev->ipath_rpkts,
518 &dev->ipath_xmit_wait); 522 &dev->ipath_xmit_wait);
519 } 523 }
520 if (dev->pma_sample_status == IB_PMA_SAMPLE_STATUS_RUNNING) { 524 if (dev->pma_sample_status == IB_PMA_SAMPLE_STATUS_RUNNING) {
521 if (dev->pma_sample_interval == 0) { 525 if (dev->pma_sample_interval == 0) {
522 u64 ta, tb, tc, td, te; 526 u64 ta, tb, tc, td, te;
523 527
524 dev->pma_sample_status = IB_PMA_SAMPLE_STATUS_DONE; 528 dev->pma_sample_status = IB_PMA_SAMPLE_STATUS_DONE;
525 ipath_layer_snapshot_counters(dev->dd, &ta, &tb, 529 ipath_snapshot_counters(dev->dd, &ta, &tb,
526 &tc, &td, &te); 530 &tc, &td, &te);
527 531
528 dev->ipath_sword = ta - dev->ipath_sword; 532 dev->ipath_sword = ta - dev->ipath_sword;
529 dev->ipath_rword = tb - dev->ipath_rword; 533 dev->ipath_rword = tb - dev->ipath_rword;
@@ -553,6 +557,362 @@ void ipath_ib_timer(struct ipath_ibdev *dev)
553 } 557 }
554} 558}
555 559
560static void update_sge(struct ipath_sge_state *ss, u32 length)
561{
562 struct ipath_sge *sge = &ss->sge;
563
564 sge->vaddr += length;
565 sge->length -= length;
566 sge->sge_length -= length;
567 if (sge->sge_length == 0) {
568 if (--ss->num_sge)
569 *sge = *ss->sg_list++;
570 } else if (sge->length == 0 && sge->mr != NULL) {
571 if (++sge->n >= IPATH_SEGSZ) {
572 if (++sge->m >= sge->mr->mapsz)
573 return;
574 sge->n = 0;
575 }
576 sge->vaddr = sge->mr->map[sge->m]->segs[sge->n].vaddr;
577 sge->length = sge->mr->map[sge->m]->segs[sge->n].length;
578 }
579}
580
581#ifdef __LITTLE_ENDIAN
582static inline u32 get_upper_bits(u32 data, u32 shift)
583{
584 return data >> shift;
585}
586
587static inline u32 set_upper_bits(u32 data, u32 shift)
588{
589 return data << shift;
590}
591
592static inline u32 clear_upper_bytes(u32 data, u32 n, u32 off)
593{
594 data <<= ((sizeof(u32) - n) * BITS_PER_BYTE);
595 data >>= ((sizeof(u32) - n - off) * BITS_PER_BYTE);
596 return data;
597}
598#else
599static inline u32 get_upper_bits(u32 data, u32 shift)
600{
601 return data << shift;
602}
603
604static inline u32 set_upper_bits(u32 data, u32 shift)
605{
606 return data >> shift;
607}
608
609static inline u32 clear_upper_bytes(u32 data, u32 n, u32 off)
610{
611 data >>= ((sizeof(u32) - n) * BITS_PER_BYTE);
612 data <<= ((sizeof(u32) - n - off) * BITS_PER_BYTE);
613 return data;
614}
615#endif
616
617static void copy_io(u32 __iomem *piobuf, struct ipath_sge_state *ss,
618 u32 length)
619{
620 u32 extra = 0;
621 u32 data = 0;
622 u32 last;
623
624 while (1) {
625 u32 len = ss->sge.length;
626 u32 off;
627
628 BUG_ON(len == 0);
629 if (len > length)
630 len = length;
631 if (len > ss->sge.sge_length)
632 len = ss->sge.sge_length;
633 /* If the source address is not aligned, try to align it. */
634 off = (unsigned long)ss->sge.vaddr & (sizeof(u32) - 1);
635 if (off) {
636 u32 *addr = (u32 *)((unsigned long)ss->sge.vaddr &
637 ~(sizeof(u32) - 1));
638 u32 v = get_upper_bits(*addr, off * BITS_PER_BYTE);
639 u32 y;
640
641 y = sizeof(u32) - off;
642 if (len > y)
643 len = y;
644 if (len + extra >= sizeof(u32)) {
645 data |= set_upper_bits(v, extra *
646 BITS_PER_BYTE);
647 len = sizeof(u32) - extra;
648 if (len == length) {
649 last = data;
650 break;
651 }
652 __raw_writel(data, piobuf);
653 piobuf++;
654 extra = 0;
655 data = 0;
656 } else {
657 /* Clear unused upper bytes */
658 data |= clear_upper_bytes(v, len, extra);
659 if (len == length) {
660 last = data;
661 break;
662 }
663 extra += len;
664 }
665 } else if (extra) {
666 /* Source address is aligned. */
667 u32 *addr = (u32 *) ss->sge.vaddr;
668 int shift = extra * BITS_PER_BYTE;
669 int ushift = 32 - shift;
670 u32 l = len;
671
672 while (l >= sizeof(u32)) {
673 u32 v = *addr;
674
675 data |= set_upper_bits(v, shift);
676 __raw_writel(data, piobuf);
677 data = get_upper_bits(v, ushift);
678 piobuf++;
679 addr++;
680 l -= sizeof(u32);
681 }
682 /*
683 * We still have 'extra' number of bytes leftover.
684 */
685 if (l) {
686 u32 v = *addr;
687
688 if (l + extra >= sizeof(u32)) {
689 data |= set_upper_bits(v, shift);
690 len -= l + extra - sizeof(u32);
691 if (len == length) {
692 last = data;
693 break;
694 }
695 __raw_writel(data, piobuf);
696 piobuf++;
697 extra = 0;
698 data = 0;
699 } else {
700 /* Clear unused upper bytes */
701 data |= clear_upper_bytes(v, l,
702 extra);
703 if (len == length) {
704 last = data;
705 break;
706 }
707 extra += l;
708 }
709 } else if (len == length) {
710 last = data;
711 break;
712 }
713 } else if (len == length) {
714 u32 w;
715
716 /*
717 * Need to round up for the last dword in the
718 * packet.
719 */
720 w = (len + 3) >> 2;
721 __iowrite32_copy(piobuf, ss->sge.vaddr, w - 1);
722 piobuf += w - 1;
723 last = ((u32 *) ss->sge.vaddr)[w - 1];
724 break;
725 } else {
726 u32 w = len >> 2;
727
728 __iowrite32_copy(piobuf, ss->sge.vaddr, w);
729 piobuf += w;
730
731 extra = len & (sizeof(u32) - 1);
732 if (extra) {
733 u32 v = ((u32 *) ss->sge.vaddr)[w];
734
735 /* Clear unused upper bytes */
736 data = clear_upper_bytes(v, extra, 0);
737 }
738 }
739 update_sge(ss, len);
740 length -= len;
741 }
742 /* Update address before sending packet. */
743 update_sge(ss, length);
744 /* must flush early everything before trigger word */
745 ipath_flush_wc();
746 __raw_writel(last, piobuf);
747 /* be sure trigger word is written */
748 ipath_flush_wc();
749}
750
751/**
752 * ipath_verbs_send - send a packet
753 * @dd: the infinipath device
754 * @hdrwords: the number of words in the header
755 * @hdr: the packet header
756 * @len: the length of the packet in bytes
757 * @ss: the SGE to send
758 */
759int ipath_verbs_send(struct ipath_devdata *dd, u32 hdrwords,
760 u32 *hdr, u32 len, struct ipath_sge_state *ss)
761{
762 u32 __iomem *piobuf;
763 u32 plen;
764 int ret;
765
766 /* +1 is for the qword padding of pbc */
767 plen = hdrwords + ((len + 3) >> 2) + 1;
768 if (unlikely((plen << 2) > dd->ipath_ibmaxlen)) {
769 ipath_dbg("packet len 0x%x too long, failing\n", plen);
770 ret = -EINVAL;
771 goto bail;
772 }
773
774 /* Get a PIO buffer to use. */
775 piobuf = ipath_getpiobuf(dd, NULL);
776 if (unlikely(piobuf == NULL)) {
777 ret = -EBUSY;
778 goto bail;
779 }
780
781 /*
782 * Write len to control qword, no flags.
783 * We have to flush after the PBC for correctness on some cpus
784 * or WC buffer can be written out of order.
785 */
786 writeq(plen, piobuf);
787 ipath_flush_wc();
788 piobuf += 2;
789 if (len == 0) {
790 /*
791 * If there is just the header portion, must flush before
792 * writing last word of header for correctness, and after
793 * the last header word (trigger word).
794 */
795 __iowrite32_copy(piobuf, hdr, hdrwords - 1);
796 ipath_flush_wc();
797 __raw_writel(hdr[hdrwords - 1], piobuf + hdrwords - 1);
798 ipath_flush_wc();
799 ret = 0;
800 goto bail;
801 }
802
803 __iowrite32_copy(piobuf, hdr, hdrwords);
804 piobuf += hdrwords;
805
806 /* The common case is aligned and contained in one segment. */
807 if (likely(ss->num_sge == 1 && len <= ss->sge.length &&
808 !((unsigned long)ss->sge.vaddr & (sizeof(u32) - 1)))) {
809 u32 w;
810 u32 *addr = (u32 *) ss->sge.vaddr;
811
812 /* Update address before sending packet. */
813 update_sge(ss, len);
814 /* Need to round up for the last dword in the packet. */
815 w = (len + 3) >> 2;
816 __iowrite32_copy(piobuf, addr, w - 1);
817 /* must flush early everything before trigger word */
818 ipath_flush_wc();
819 __raw_writel(addr[w - 1], piobuf + w - 1);
820 /* be sure trigger word is written */
821 ipath_flush_wc();
822 ret = 0;
823 goto bail;
824 }
825 copy_io(piobuf, ss, len);
826 ret = 0;
827
828bail:
829 return ret;
830}
831
832int ipath_snapshot_counters(struct ipath_devdata *dd, u64 *swords,
833 u64 *rwords, u64 *spkts, u64 *rpkts,
834 u64 *xmit_wait)
835{
836 int ret;
837
838 if (!(dd->ipath_flags & IPATH_INITTED)) {
839 /* no hardware, freeze, etc. */
840 ipath_dbg("unit %u not usable\n", dd->ipath_unit);
841 ret = -EINVAL;
842 goto bail;
843 }
844 *swords = ipath_snap_cntr(dd, dd->ipath_cregs->cr_wordsendcnt);
845 *rwords = ipath_snap_cntr(dd, dd->ipath_cregs->cr_wordrcvcnt);
846 *spkts = ipath_snap_cntr(dd, dd->ipath_cregs->cr_pktsendcnt);
847 *rpkts = ipath_snap_cntr(dd, dd->ipath_cregs->cr_pktrcvcnt);
848 *xmit_wait = ipath_snap_cntr(dd, dd->ipath_cregs->cr_sendstallcnt);
849
850 ret = 0;
851
852bail:
853 return ret;
854}
855
856/**
857 * ipath_get_counters - get various chip counters
858 * @dd: the infinipath device
859 * @cntrs: counters are placed here
860 *
861 * Return the counters needed by recv_pma_get_portcounters().
862 */
863int ipath_get_counters(struct ipath_devdata *dd,
864 struct ipath_verbs_counters *cntrs)
865{
866 int ret;
867
868 if (!(dd->ipath_flags & IPATH_INITTED)) {
869 /* no hardware, freeze, etc. */
870 ipath_dbg("unit %u not usable\n", dd->ipath_unit);
871 ret = -EINVAL;
872 goto bail;
873 }
874 cntrs->symbol_error_counter =
875 ipath_snap_cntr(dd, dd->ipath_cregs->cr_ibsymbolerrcnt);
876 cntrs->link_error_recovery_counter =
877 ipath_snap_cntr(dd, dd->ipath_cregs->cr_iblinkerrrecovcnt);
878 /*
879 * The link downed counter counts when the other side downs the
880 * connection. We add in the number of times we downed the link
881 * due to local link integrity errors to compensate.
882 */
883 cntrs->link_downed_counter =
884 ipath_snap_cntr(dd, dd->ipath_cregs->cr_iblinkdowncnt);
885 cntrs->port_rcv_errors =
886 ipath_snap_cntr(dd, dd->ipath_cregs->cr_rxdroppktcnt) +
887 ipath_snap_cntr(dd, dd->ipath_cregs->cr_rcvovflcnt) +
888 ipath_snap_cntr(dd, dd->ipath_cregs->cr_portovflcnt) +
889 ipath_snap_cntr(dd, dd->ipath_cregs->cr_err_rlencnt) +
890 ipath_snap_cntr(dd, dd->ipath_cregs->cr_invalidrlencnt) +
891 ipath_snap_cntr(dd, dd->ipath_cregs->cr_erricrccnt) +
892 ipath_snap_cntr(dd, dd->ipath_cregs->cr_errvcrccnt) +
893 ipath_snap_cntr(dd, dd->ipath_cregs->cr_errlpcrccnt) +
894 ipath_snap_cntr(dd, dd->ipath_cregs->cr_badformatcnt);
895 cntrs->port_rcv_remphys_errors =
896 ipath_snap_cntr(dd, dd->ipath_cregs->cr_rcvebpcnt);
897 cntrs->port_xmit_discards =
898 ipath_snap_cntr(dd, dd->ipath_cregs->cr_unsupvlcnt);
899 cntrs->port_xmit_data =
900 ipath_snap_cntr(dd, dd->ipath_cregs->cr_wordsendcnt);
901 cntrs->port_rcv_data =
902 ipath_snap_cntr(dd, dd->ipath_cregs->cr_wordrcvcnt);
903 cntrs->port_xmit_packets =
904 ipath_snap_cntr(dd, dd->ipath_cregs->cr_pktsendcnt);
905 cntrs->port_rcv_packets =
906 ipath_snap_cntr(dd, dd->ipath_cregs->cr_pktrcvcnt);
907 cntrs->local_link_integrity_errors = dd->ipath_lli_errors;
908 cntrs->excessive_buffer_overrun_errors = 0; /* XXX */
909
910 ret = 0;
911
912bail:
913 return ret;
914}
915
556/** 916/**
557 * ipath_ib_piobufavail - callback when a PIO buffer is available 917 * ipath_ib_piobufavail - callback when a PIO buffer is available
558 * @arg: the device pointer 918 * @arg: the device pointer
@@ -595,9 +955,9 @@ static int ipath_query_device(struct ib_device *ibdev,
595 IB_DEVICE_BAD_QKEY_CNTR | IB_DEVICE_SHUTDOWN_PORT | 955 IB_DEVICE_BAD_QKEY_CNTR | IB_DEVICE_SHUTDOWN_PORT |
596 IB_DEVICE_SYS_IMAGE_GUID; 956 IB_DEVICE_SYS_IMAGE_GUID;
597 props->page_size_cap = PAGE_SIZE; 957 props->page_size_cap = PAGE_SIZE;
598 props->vendor_id = ipath_layer_get_vendorid(dev->dd); 958 props->vendor_id = dev->dd->ipath_vendorid;
599 props->vendor_part_id = ipath_layer_get_deviceid(dev->dd); 959 props->vendor_part_id = dev->dd->ipath_deviceid;
600 props->hw_ver = ipath_layer_get_pcirev(dev->dd); 960 props->hw_ver = dev->dd->ipath_pcirev;
601 961
602 props->sys_image_guid = dev->sys_image_guid; 962 props->sys_image_guid = dev->sys_image_guid;
603 963
@@ -618,7 +978,7 @@ static int ipath_query_device(struct ib_device *ibdev,
618 props->max_srq_sge = ib_ipath_max_srq_sges; 978 props->max_srq_sge = ib_ipath_max_srq_sges;
619 /* props->local_ca_ack_delay */ 979 /* props->local_ca_ack_delay */
620 props->atomic_cap = IB_ATOMIC_HCA; 980 props->atomic_cap = IB_ATOMIC_HCA;
621 props->max_pkeys = ipath_layer_get_npkeys(dev->dd); 981 props->max_pkeys = ipath_get_npkeys(dev->dd);
622 props->max_mcast_grp = ib_ipath_max_mcast_grps; 982 props->max_mcast_grp = ib_ipath_max_mcast_grps;
623 props->max_mcast_qp_attach = ib_ipath_max_mcast_qp_attached; 983 props->max_mcast_qp_attach = ib_ipath_max_mcast_qp_attached;
624 props->max_total_mcast_qp_attach = props->max_mcast_qp_attach * 984 props->max_total_mcast_qp_attach = props->max_mcast_qp_attach *
@@ -643,12 +1003,17 @@ const u8 ipath_cvt_physportstate[16] = {
643 [INFINIPATH_IBCS_LT_STATE_RECOVERIDLE] = 6, 1003 [INFINIPATH_IBCS_LT_STATE_RECOVERIDLE] = 6,
644}; 1004};
645 1005
1006u32 ipath_get_cr_errpkey(struct ipath_devdata *dd)
1007{
1008 return ipath_read_creg32(dd, dd->ipath_cregs->cr_errpkey);
1009}
1010
646static int ipath_query_port(struct ib_device *ibdev, 1011static int ipath_query_port(struct ib_device *ibdev,
647 u8 port, struct ib_port_attr *props) 1012 u8 port, struct ib_port_attr *props)
648{ 1013{
649 struct ipath_ibdev *dev = to_idev(ibdev); 1014 struct ipath_ibdev *dev = to_idev(ibdev);
650 enum ib_mtu mtu; 1015 enum ib_mtu mtu;
651 u16 lid = ipath_layer_get_lid(dev->dd); 1016 u16 lid = dev->dd->ipath_lid;
652 u64 ibcstat; 1017 u64 ibcstat;
653 1018
654 memset(props, 0, sizeof(*props)); 1019 memset(props, 0, sizeof(*props));
@@ -656,16 +1021,16 @@ static int ipath_query_port(struct ib_device *ibdev,
656 props->lmc = dev->mkeyprot_resv_lmc & 7; 1021 props->lmc = dev->mkeyprot_resv_lmc & 7;
657 props->sm_lid = dev->sm_lid; 1022 props->sm_lid = dev->sm_lid;
658 props->sm_sl = dev->sm_sl; 1023 props->sm_sl = dev->sm_sl;
659 ibcstat = ipath_layer_get_lastibcstat(dev->dd); 1024 ibcstat = dev->dd->ipath_lastibcstat;
660 props->state = ((ibcstat >> 4) & 0x3) + 1; 1025 props->state = ((ibcstat >> 4) & 0x3) + 1;
661 /* See phys_state_show() */ 1026 /* See phys_state_show() */
662 props->phys_state = ipath_cvt_physportstate[ 1027 props->phys_state = ipath_cvt_physportstate[
663 ipath_layer_get_lastibcstat(dev->dd) & 0xf]; 1028 dev->dd->ipath_lastibcstat & 0xf];
664 props->port_cap_flags = dev->port_cap_flags; 1029 props->port_cap_flags = dev->port_cap_flags;
665 props->gid_tbl_len = 1; 1030 props->gid_tbl_len = 1;
666 props->max_msg_sz = 0x80000000; 1031 props->max_msg_sz = 0x80000000;
667 props->pkey_tbl_len = ipath_layer_get_npkeys(dev->dd); 1032 props->pkey_tbl_len = ipath_get_npkeys(dev->dd);
668 props->bad_pkey_cntr = ipath_layer_get_cr_errpkey(dev->dd) - 1033 props->bad_pkey_cntr = ipath_get_cr_errpkey(dev->dd) -
669 dev->z_pkey_violations; 1034 dev->z_pkey_violations;
670 props->qkey_viol_cntr = dev->qkey_violations; 1035 props->qkey_viol_cntr = dev->qkey_violations;
671 props->active_width = IB_WIDTH_4X; 1036 props->active_width = IB_WIDTH_4X;
@@ -675,7 +1040,7 @@ static int ipath_query_port(struct ib_device *ibdev,
675 props->init_type_reply = 0; 1040 props->init_type_reply = 0;
676 1041
677 props->max_mtu = IB_MTU_4096; 1042 props->max_mtu = IB_MTU_4096;
678 switch (ipath_layer_get_ibmtu(dev->dd)) { 1043 switch (dev->dd->ipath_ibmtu) {
679 case 4096: 1044 case 4096:
680 mtu = IB_MTU_4096; 1045 mtu = IB_MTU_4096;
681 break; 1046 break;
@@ -734,7 +1099,7 @@ static int ipath_modify_port(struct ib_device *ibdev,
734 dev->port_cap_flags |= props->set_port_cap_mask; 1099 dev->port_cap_flags |= props->set_port_cap_mask;
735 dev->port_cap_flags &= ~props->clr_port_cap_mask; 1100 dev->port_cap_flags &= ~props->clr_port_cap_mask;
736 if (port_modify_mask & IB_PORT_SHUTDOWN) 1101 if (port_modify_mask & IB_PORT_SHUTDOWN)
737 ipath_layer_set_linkstate(dev->dd, IPATH_IB_LINKDOWN); 1102 ipath_set_linkstate(dev->dd, IPATH_IB_LINKDOWN);
738 if (port_modify_mask & IB_PORT_RESET_QKEY_CNTR) 1103 if (port_modify_mask & IB_PORT_RESET_QKEY_CNTR)
739 dev->qkey_violations = 0; 1104 dev->qkey_violations = 0;
740 return 0; 1105 return 0;
@@ -751,7 +1116,7 @@ static int ipath_query_gid(struct ib_device *ibdev, u8 port,
751 goto bail; 1116 goto bail;
752 } 1117 }
753 gid->global.subnet_prefix = dev->gid_prefix; 1118 gid->global.subnet_prefix = dev->gid_prefix;
754 gid->global.interface_id = ipath_layer_get_guid(dev->dd); 1119 gid->global.interface_id = dev->dd->ipath_guid;
755 1120
756 ret = 0; 1121 ret = 0;
757 1122
@@ -902,25 +1267,50 @@ static int ipath_query_ah(struct ib_ah *ibah, struct ib_ah_attr *ah_attr)
902 return 0; 1267 return 0;
903} 1268}
904 1269
1270/**
1271 * ipath_get_npkeys - return the size of the PKEY table for port 0
1272 * @dd: the infinipath device
1273 */
1274unsigned ipath_get_npkeys(struct ipath_devdata *dd)
1275{
1276 return ARRAY_SIZE(dd->ipath_pd[0]->port_pkeys);
1277}
1278
1279/**
1280 * ipath_get_pkey - return the indexed PKEY from the port 0 PKEY table
1281 * @dd: the infinipath device
1282 * @index: the PKEY index
1283 */
1284unsigned ipath_get_pkey(struct ipath_devdata *dd, unsigned index)
1285{
1286 unsigned ret;
1287
1288 if (index >= ARRAY_SIZE(dd->ipath_pd[0]->port_pkeys))
1289 ret = 0;
1290 else
1291 ret = dd->ipath_pd[0]->port_pkeys[index];
1292
1293 return ret;
1294}
1295
905static int ipath_query_pkey(struct ib_device *ibdev, u8 port, u16 index, 1296static int ipath_query_pkey(struct ib_device *ibdev, u8 port, u16 index,
906 u16 *pkey) 1297 u16 *pkey)
907{ 1298{
908 struct ipath_ibdev *dev = to_idev(ibdev); 1299 struct ipath_ibdev *dev = to_idev(ibdev);
909 int ret; 1300 int ret;
910 1301
911 if (index >= ipath_layer_get_npkeys(dev->dd)) { 1302 if (index >= ipath_get_npkeys(dev->dd)) {
912 ret = -EINVAL; 1303 ret = -EINVAL;
913 goto bail; 1304 goto bail;
914 } 1305 }
915 1306
916 *pkey = ipath_layer_get_pkey(dev->dd, index); 1307 *pkey = ipath_get_pkey(dev->dd, index);
917 ret = 0; 1308 ret = 0;
918 1309
919bail: 1310bail:
920 return ret; 1311 return ret;
921} 1312}
922 1313
923
924/** 1314/**
925 * ipath_alloc_ucontext - allocate a ucontest 1315 * ipath_alloc_ucontext - allocate a ucontest
926 * @ibdev: the infiniband device 1316 * @ibdev: the infiniband device
@@ -953,6 +1343,63 @@ static int ipath_dealloc_ucontext(struct ib_ucontext *context)
953 1343
954static int ipath_verbs_register_sysfs(struct ib_device *dev); 1344static int ipath_verbs_register_sysfs(struct ib_device *dev);
955 1345
1346static void __verbs_timer(unsigned long arg)
1347{
1348 struct ipath_devdata *dd = (struct ipath_devdata *) arg;
1349
1350 /*
1351 * If port 0 receive packet interrupts are not available, or
1352 * can be missed, poll the receive queue
1353 */
1354 if (dd->ipath_flags & IPATH_POLL_RX_INTR)
1355 ipath_kreceive(dd);
1356
1357 /* Handle verbs layer timeouts. */
1358 ipath_ib_timer(dd->verbs_dev);
1359
1360 mod_timer(&dd->verbs_timer, jiffies + 1);
1361}
1362
1363static int enable_timer(struct ipath_devdata *dd)
1364{
1365 /*
1366 * Early chips had a design flaw where the chip and kernel idea
1367 * of the tail register don't always agree, and therefore we won't
1368 * get an interrupt on the next packet received.
1369 * If the board supports per packet receive interrupts, use it.
1370 * Otherwise, the timer function periodically checks for packets
1371 * to cover this case.
1372 * Either way, the timer is needed for verbs layer related
1373 * processing.
1374 */
1375 if (dd->ipath_flags & IPATH_GPIO_INTR) {
1376 ipath_write_kreg(dd, dd->ipath_kregs->kr_debugportselect,
1377 0x2074076542310ULL);
1378 /* Enable GPIO bit 2 interrupt */
1379 ipath_write_kreg(dd, dd->ipath_kregs->kr_gpio_mask,
1380 (u64) (1 << 2));
1381 }
1382
1383 init_timer(&dd->verbs_timer);
1384 dd->verbs_timer.function = __verbs_timer;
1385 dd->verbs_timer.data = (unsigned long)dd;
1386 dd->verbs_timer.expires = jiffies + 1;
1387 add_timer(&dd->verbs_timer);
1388
1389 return 0;
1390}
1391
1392static int disable_timer(struct ipath_devdata *dd)
1393{
1394 /* Disable GPIO bit 2 interrupt */
1395 if (dd->ipath_flags & IPATH_GPIO_INTR)
1396 ipath_write_kreg(dd, dd->ipath_kregs->kr_gpio_mask, 0);
1397
1398 del_timer_sync(&dd->verbs_timer);
1399
1400 return 0;
1401}
1402
956/** 1403/**
957 * ipath_register_ib_device - register our device with the infiniband core 1404 * ipath_register_ib_device - register our device with the infiniband core
958 * @dd: the device data structure 1405 * @dd: the device data structure
@@ -960,7 +1407,7 @@ static int ipath_verbs_register_sysfs(struct ib_device *dev);
960 */ 1407 */
961int ipath_register_ib_device(struct ipath_devdata *dd) 1408int ipath_register_ib_device(struct ipath_devdata *dd)
962{ 1409{
963 struct ipath_layer_counters cntrs; 1410 struct ipath_verbs_counters cntrs;
964 struct ipath_ibdev *idev; 1411 struct ipath_ibdev *idev;
965 struct ib_device *dev; 1412 struct ib_device *dev;
966 int ret; 1413 int ret;
@@ -1020,7 +1467,7 @@ int ipath_register_ib_device(struct ipath_devdata *dd)
1020 idev->link_width_enabled = 3; /* 1x or 4x */ 1467 idev->link_width_enabled = 3; /* 1x or 4x */
1021 1468
1022 /* Snapshot current HW counters to "clear" them. */ 1469 /* Snapshot current HW counters to "clear" them. */
1023 ipath_layer_get_counters(dd, &cntrs); 1470 ipath_get_counters(dd, &cntrs);
1024 idev->z_symbol_error_counter = cntrs.symbol_error_counter; 1471 idev->z_symbol_error_counter = cntrs.symbol_error_counter;
1025 idev->z_link_error_recovery_counter = 1472 idev->z_link_error_recovery_counter =
1026 cntrs.link_error_recovery_counter; 1473 cntrs.link_error_recovery_counter;
@@ -1044,14 +1491,14 @@ int ipath_register_ib_device(struct ipath_devdata *dd)
1044 * device types in the system, we can't be sure this is unique. 1491 * device types in the system, we can't be sure this is unique.
1045 */ 1492 */
1046 if (!sys_image_guid) 1493 if (!sys_image_guid)
1047 sys_image_guid = ipath_layer_get_guid(dd); 1494 sys_image_guid = dd->ipath_guid;
1048 idev->sys_image_guid = sys_image_guid; 1495 idev->sys_image_guid = sys_image_guid;
1049 idev->ib_unit = dd->ipath_unit; 1496 idev->ib_unit = dd->ipath_unit;
1050 idev->dd = dd; 1497 idev->dd = dd;
1051 1498
1052 strlcpy(dev->name, "ipath%d", IB_DEVICE_NAME_MAX); 1499 strlcpy(dev->name, "ipath%d", IB_DEVICE_NAME_MAX);
1053 dev->owner = THIS_MODULE; 1500 dev->owner = THIS_MODULE;
1054 dev->node_guid = ipath_layer_get_guid(dd); 1501 dev->node_guid = dd->ipath_guid;
1055 dev->uverbs_abi_ver = IPATH_UVERBS_ABI_VERSION; 1502 dev->uverbs_abi_ver = IPATH_UVERBS_ABI_VERSION;
1056 dev->uverbs_cmd_mask = 1503 dev->uverbs_cmd_mask =
1057 (1ull << IB_USER_VERBS_CMD_GET_CONTEXT) | 1504 (1ull << IB_USER_VERBS_CMD_GET_CONTEXT) |
@@ -1085,7 +1532,7 @@ int ipath_register_ib_device(struct ipath_devdata *dd)
1085 (1ull << IB_USER_VERBS_CMD_POST_SRQ_RECV); 1532 (1ull << IB_USER_VERBS_CMD_POST_SRQ_RECV);
1086 dev->node_type = IB_NODE_CA; 1533 dev->node_type = IB_NODE_CA;
1087 dev->phys_port_cnt = 1; 1534 dev->phys_port_cnt = 1;
1088 dev->dma_device = ipath_layer_get_device(dd); 1535 dev->dma_device = &dd->pcidev->dev;
1089 dev->class_dev.dev = dev->dma_device; 1536 dev->class_dev.dev = dev->dma_device;
1090 dev->query_device = ipath_query_device; 1537 dev->query_device = ipath_query_device;
1091 dev->modify_device = ipath_modify_device; 1538 dev->modify_device = ipath_modify_device;
@@ -1139,7 +1586,7 @@ int ipath_register_ib_device(struct ipath_devdata *dd)
1139 if (ipath_verbs_register_sysfs(dev)) 1586 if (ipath_verbs_register_sysfs(dev))
1140 goto err_class; 1587 goto err_class;
1141 1588
1142 ipath_layer_enable_timer(dd); 1589 enable_timer(dd);
1143 1590
1144 goto bail; 1591 goto bail;
1145 1592
@@ -1164,7 +1611,7 @@ void ipath_unregister_ib_device(struct ipath_ibdev *dev)
1164{ 1611{
1165 struct ib_device *ibdev = &dev->ibdev; 1612 struct ib_device *ibdev = &dev->ibdev;
1166 1613
1167 ipath_layer_disable_timer(dev->dd); 1614 disable_timer(dev->dd);
1168 1615
1169 ib_unregister_device(ibdev); 1616 ib_unregister_device(ibdev);
1170 1617
@@ -1197,7 +1644,7 @@ static ssize_t show_rev(struct class_device *cdev, char *buf)
1197 struct ipath_ibdev *dev = 1644 struct ipath_ibdev *dev =
1198 container_of(cdev, struct ipath_ibdev, ibdev.class_dev); 1645 container_of(cdev, struct ipath_ibdev, ibdev.class_dev);
1199 1646
1200 return sprintf(buf, "%x\n", ipath_layer_get_pcirev(dev->dd)); 1647 return sprintf(buf, "%x\n", dev->dd->ipath_pcirev);
1201} 1648}
1202 1649
1203static ssize_t show_hca(struct class_device *cdev, char *buf) 1650static ssize_t show_hca(struct class_device *cdev, char *buf)
@@ -1206,7 +1653,7 @@ static ssize_t show_hca(struct class_device *cdev, char *buf)
1206 container_of(cdev, struct ipath_ibdev, ibdev.class_dev); 1653 container_of(cdev, struct ipath_ibdev, ibdev.class_dev);
1207 int ret; 1654 int ret;
1208 1655
1209 ret = ipath_layer_get_boardname(dev->dd, buf, 128); 1656 ret = dev->dd->ipath_f_get_boardname(dev->dd, buf, 128);
1210 if (ret < 0) 1657 if (ret < 0)
1211 goto bail; 1658 goto bail;
1212 strcat(buf, "\n"); 1659 strcat(buf, "\n");
diff --git a/drivers/infiniband/hw/ipath/ipath_verbs.h b/drivers/infiniband/hw/ipath/ipath_verbs.h
index d6faa4ba6067..00f4cecc258e 100644
--- a/drivers/infiniband/hw/ipath/ipath_verbs.h
+++ b/drivers/infiniband/hw/ipath/ipath_verbs.h
@@ -153,19 +153,6 @@ struct ipath_mcast {
153 int n_attached; 153 int n_attached;
154}; 154};
155 155
156/* Memory region */
157struct ipath_mr {
158 struct ib_mr ibmr;
159 struct ipath_mregion mr; /* must be last */
160};
161
162/* Fast memory region */
163struct ipath_fmr {
164 struct ib_fmr ibfmr;
165 u8 page_shift;
166 struct ipath_mregion mr; /* must be last */
167};
168
169/* Protection domain */ 156/* Protection domain */
170struct ipath_pd { 157struct ipath_pd {
171 struct ib_pd ibpd; 158 struct ib_pd ibpd;
@@ -217,6 +204,54 @@ struct ipath_cq {
217}; 204};
218 205
219/* 206/*
207 * A segment is a linear region of low physical memory.
208 * XXX Maybe we should use phys addr here and kmap()/kunmap().
209 * Used by the verbs layer.
210 */
211struct ipath_seg {
212 void *vaddr;
213 size_t length;
214};
215
216/* The number of ipath_segs that fit in a page. */
217#define IPATH_SEGSZ (PAGE_SIZE / sizeof (struct ipath_seg))
218
219struct ipath_segarray {
220 struct ipath_seg segs[IPATH_SEGSZ];
221};
222
223struct ipath_mregion {
224 u64 user_base; /* User's address for this region */
225 u64 iova; /* IB start address of this region */
226 size_t length;
227 u32 lkey;
228 u32 offset; /* offset (bytes) to start of region */
229 int access_flags;
230 u32 max_segs; /* number of ipath_segs in all the arrays */
231 u32 mapsz; /* size of the map array */
232 struct ipath_segarray *map[0]; /* the segments */
233};
234
235/*
236 * These keep track of the copy progress within a memory region.
237 * Used by the verbs layer.
238 */
239struct ipath_sge {
240 struct ipath_mregion *mr;
241 void *vaddr; /* current pointer into the segment */
242 u32 sge_length; /* length of the SGE */
243 u32 length; /* remaining length of the segment */
244 u16 m; /* current index: mr->map[m] */
245 u16 n; /* current index: mr->map[m]->segs[n] */
246};
247
248/* Memory region */
249struct ipath_mr {
250 struct ib_mr ibmr;
251 struct ipath_mregion mr; /* must be last */
252};
253
254/*
220 * Send work request queue entry. 255 * Send work request queue entry.
221 * The size of the sg_list is determined when the QP is created and stored 256 * The size of the sg_list is determined when the QP is created and stored
222 * in qp->s_max_sge. 257 * in qp->s_max_sge.
@@ -270,6 +305,12 @@ struct ipath_srq {
270 u32 limit; 305 u32 limit;
271}; 306};
272 307
308struct ipath_sge_state {
309 struct ipath_sge *sg_list; /* next SGE to be used if any */
310 struct ipath_sge sge; /* progress state for the current SGE */
311 u8 num_sge;
312};
313
273/* 314/*
274 * Variables prefixed with s_ are for the requester (sender). 315 * Variables prefixed with s_ are for the requester (sender).
275 * Variables prefixed with r_ are for the responder (receiver). 316 * Variables prefixed with r_ are for the responder (receiver).
@@ -500,8 +541,19 @@ struct ipath_ibdev {
500 struct ipath_opcode_stats opstats[128]; 541 struct ipath_opcode_stats opstats[128];
501}; 542};
502 543
503struct ipath_ucontext { 544struct ipath_verbs_counters {
504 struct ib_ucontext ibucontext; 545 u64 symbol_error_counter;
546 u64 link_error_recovery_counter;
547 u64 link_downed_counter;
548 u64 port_rcv_errors;
549 u64 port_rcv_remphys_errors;
550 u64 port_xmit_discards;
551 u64 port_xmit_data;
552 u64 port_rcv_data;
553 u64 port_xmit_packets;
554 u64 port_rcv_packets;
555 u32 local_link_integrity_errors;
556 u32 excessive_buffer_overrun_errors;
505}; 557};
506 558
507static inline struct ipath_mr *to_imr(struct ib_mr *ibmr) 559static inline struct ipath_mr *to_imr(struct ib_mr *ibmr)
@@ -509,11 +561,6 @@ static inline struct ipath_mr *to_imr(struct ib_mr *ibmr)
509 return container_of(ibmr, struct ipath_mr, ibmr); 561 return container_of(ibmr, struct ipath_mr, ibmr);
510} 562}
511 563
512static inline struct ipath_fmr *to_ifmr(struct ib_fmr *ibfmr)
513{
514 return container_of(ibfmr, struct ipath_fmr, ibfmr);
515}
516
517static inline struct ipath_pd *to_ipd(struct ib_pd *ibpd) 564static inline struct ipath_pd *to_ipd(struct ib_pd *ibpd)
518{ 565{
519 return container_of(ibpd, struct ipath_pd, ibpd); 566 return container_of(ibpd, struct ipath_pd, ibpd);
@@ -551,12 +598,6 @@ int ipath_process_mad(struct ib_device *ibdev,
551 struct ib_grh *in_grh, 598 struct ib_grh *in_grh,
552 struct ib_mad *in_mad, struct ib_mad *out_mad); 599 struct ib_mad *in_mad, struct ib_mad *out_mad);
553 600
554static inline struct ipath_ucontext *to_iucontext(struct ib_ucontext
555 *ibucontext)
556{
557 return container_of(ibucontext, struct ipath_ucontext, ibucontext);
558}
559
560/* 601/*
561 * Compare the lower 24 bits of the two values. 602 * Compare the lower 24 bits of the two values.
562 * Returns an integer <, ==, or > than zero. 603 * Returns an integer <, ==, or > than zero.
@@ -568,6 +609,13 @@ static inline int ipath_cmp24(u32 a, u32 b)
568 609
569struct ipath_mcast *ipath_mcast_find(union ib_gid *mgid); 610struct ipath_mcast *ipath_mcast_find(union ib_gid *mgid);
570 611
612int ipath_snapshot_counters(struct ipath_devdata *dd, u64 *swords,
613 u64 *rwords, u64 *spkts, u64 *rpkts,
614 u64 *xmit_wait);
615
616int ipath_get_counters(struct ipath_devdata *dd,
617 struct ipath_verbs_counters *cntrs);
618
571int ipath_multicast_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid); 619int ipath_multicast_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid);
572 620
573int ipath_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid); 621int ipath_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid);
@@ -598,6 +646,9 @@ void ipath_sqerror_qp(struct ipath_qp *qp, struct ib_wc *wc);
598 646
599void ipath_get_credit(struct ipath_qp *qp, u32 aeth); 647void ipath_get_credit(struct ipath_qp *qp, u32 aeth);
600 648
649int ipath_verbs_send(struct ipath_devdata *dd, u32 hdrwords,
650 u32 *hdr, u32 len, struct ipath_sge_state *ss);
651
601void ipath_cq_enter(struct ipath_cq *cq, struct ib_wc *entry, int sig); 652void ipath_cq_enter(struct ipath_cq *cq, struct ib_wc *entry, int sig);
602 653
603int ipath_rkey_ok(struct ipath_ibdev *dev, struct ipath_sge_state *ss, 654int ipath_rkey_ok(struct ipath_ibdev *dev, struct ipath_sge_state *ss,
@@ -721,6 +772,12 @@ int ipath_ib_piobufavail(struct ipath_ibdev *);
721 772
722void ipath_ib_timer(struct ipath_ibdev *); 773void ipath_ib_timer(struct ipath_ibdev *);
723 774
775unsigned ipath_get_npkeys(struct ipath_devdata *);
776
777u32 ipath_get_cr_errpkey(struct ipath_devdata *);
778
779unsigned ipath_get_pkey(struct ipath_devdata *, unsigned);
780
724extern const enum ib_wc_opcode ib_ipath_wc_opcode[]; 781extern const enum ib_wc_opcode ib_ipath_wc_opcode[];
725 782
726extern const u8 ipath_cvt_physportstate[]; 783extern const u8 ipath_cvt_physportstate[];