aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/infiniband/hw
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/infiniband/hw')
-rw-r--r--drivers/infiniband/hw/ipath/Makefile3
-rw-r--r--drivers/infiniband/hw/ipath/ipath_common.h16
-rw-r--r--drivers/infiniband/hw/ipath/ipath_driver.c150
-rw-r--r--drivers/infiniband/hw/ipath/ipath_file_ops.c97
-rw-r--r--drivers/infiniband/hw/ipath/ipath_init_chip.c4
-rw-r--r--drivers/infiniband/hw/ipath/ipath_intr.c239
-rw-r--r--drivers/infiniband/hw/ipath/ipath_kernel.h4
-rw-r--r--drivers/infiniband/hw/ipath/ipath_qp.c14
-rw-r--r--drivers/infiniband/hw/ipath/ipath_ruc.c18
-rw-r--r--drivers/infiniband/hw/ipath/ipath_sdma.c91
-rw-r--r--drivers/infiniband/hw/ipath/ipath_stats.c4
-rw-r--r--drivers/infiniband/hw/ipath/ipath_ud.c1
-rw-r--r--drivers/infiniband/hw/ipath/ipath_verbs.c391
13 files changed, 896 insertions, 136 deletions
diff --git a/drivers/infiniband/hw/ipath/Makefile b/drivers/infiniband/hw/ipath/Makefile
index fe6738826865..75a6c91944c4 100644
--- a/drivers/infiniband/hw/ipath/Makefile
+++ b/drivers/infiniband/hw/ipath/Makefile
@@ -20,17 +20,20 @@ ib_ipath-y := \
20 ipath_qp.o \ 20 ipath_qp.o \
21 ipath_rc.o \ 21 ipath_rc.o \
22 ipath_ruc.o \ 22 ipath_ruc.o \
23 ipath_sdma.o \
23 ipath_srq.o \ 24 ipath_srq.o \
24 ipath_stats.o \ 25 ipath_stats.o \
25 ipath_sysfs.o \ 26 ipath_sysfs.o \
26 ipath_uc.o \ 27 ipath_uc.o \
27 ipath_ud.o \ 28 ipath_ud.o \
28 ipath_user_pages.o \ 29 ipath_user_pages.o \
30 ipath_user_sdma.o \
29 ipath_verbs_mcast.o \ 31 ipath_verbs_mcast.o \
30 ipath_verbs.o 32 ipath_verbs.o
31 33
32ib_ipath-$(CONFIG_HT_IRQ) += ipath_iba6110.o 34ib_ipath-$(CONFIG_HT_IRQ) += ipath_iba6110.o
33ib_ipath-$(CONFIG_PCI_MSI) += ipath_iba6120.o 35ib_ipath-$(CONFIG_PCI_MSI) += ipath_iba6120.o
36ib_ipath-$(CONFIG_PCI_MSI) += ipath_iba7220.o ipath_sd7220.o ipath_sd7220_img.o
34 37
35ib_ipath-$(CONFIG_X86_64) += ipath_wc_x86_64.o 38ib_ipath-$(CONFIG_X86_64) += ipath_wc_x86_64.o
36ib_ipath-$(CONFIG_PPC64) += ipath_wc_ppc64.o 39ib_ipath-$(CONFIG_PPC64) += ipath_wc_ppc64.o
diff --git a/drivers/infiniband/hw/ipath/ipath_common.h b/drivers/infiniband/hw/ipath/ipath_common.h
index 02fd310d1ef4..2cf7cd2cb662 100644
--- a/drivers/infiniband/hw/ipath/ipath_common.h
+++ b/drivers/infiniband/hw/ipath/ipath_common.h
@@ -447,8 +447,9 @@ struct ipath_user_info {
447#define IPATH_CMD_PIOAVAILUPD 27 /* force an update of PIOAvail reg */ 447#define IPATH_CMD_PIOAVAILUPD 27 /* force an update of PIOAvail reg */
448#define IPATH_CMD_POLL_TYPE 28 /* set the kind of polling we want */ 448#define IPATH_CMD_POLL_TYPE 28 /* set the kind of polling we want */
449#define IPATH_CMD_ARMLAUNCH_CTRL 29 /* armlaunch detection control */ 449#define IPATH_CMD_ARMLAUNCH_CTRL 29 /* armlaunch detection control */
450 450/* 30 is unused */
451#define IPATH_CMD_MAX 29 451#define IPATH_CMD_SDMA_INFLIGHT 31 /* sdma inflight counter request */
452#define IPATH_CMD_SDMA_COMPLETE 32 /* sdma completion counter request */
452 453
453/* 454/*
454 * Poll types 455 * Poll types
@@ -486,6 +487,17 @@ struct ipath_cmd {
486 union { 487 union {
487 struct ipath_tid_info tid_info; 488 struct ipath_tid_info tid_info;
488 struct ipath_user_info user_info; 489 struct ipath_user_info user_info;
490
491 /*
492 * address in userspace where we should put the sdma
493 * inflight counter
494 */
495 __u64 sdma_inflight;
496 /*
497 * address in userspace where we should put the sdma
498 * completion counter
499 */
500 __u64 sdma_complete;
489 /* address in userspace of struct ipath_port_info to 501 /* address in userspace of struct ipath_port_info to
490 write result to */ 502 write result to */
491 __u64 port_info; 503 __u64 port_info;
diff --git a/drivers/infiniband/hw/ipath/ipath_driver.c b/drivers/infiniband/hw/ipath/ipath_driver.c
index c94bc4730b2b..ed7bdc93b9e3 100644
--- a/drivers/infiniband/hw/ipath/ipath_driver.c
+++ b/drivers/infiniband/hw/ipath/ipath_driver.c
@@ -129,8 +129,10 @@ static int __devinit ipath_init_one(struct pci_dev *,
129 129
130/* Only needed for registration, nothing else needs this info */ 130/* Only needed for registration, nothing else needs this info */
131#define PCI_VENDOR_ID_PATHSCALE 0x1fc1 131#define PCI_VENDOR_ID_PATHSCALE 0x1fc1
132#define PCI_VENDOR_ID_QLOGIC 0x1077
132#define PCI_DEVICE_ID_INFINIPATH_HT 0xd 133#define PCI_DEVICE_ID_INFINIPATH_HT 0xd
133#define PCI_DEVICE_ID_INFINIPATH_PE800 0x10 134#define PCI_DEVICE_ID_INFINIPATH_PE800 0x10
135#define PCI_DEVICE_ID_INFINIPATH_7220 0x7220
134 136
135/* Number of seconds before our card status check... */ 137/* Number of seconds before our card status check... */
136#define STATUS_TIMEOUT 60 138#define STATUS_TIMEOUT 60
@@ -138,6 +140,7 @@ static int __devinit ipath_init_one(struct pci_dev *,
138static const struct pci_device_id ipath_pci_tbl[] = { 140static const struct pci_device_id ipath_pci_tbl[] = {
139 { PCI_DEVICE(PCI_VENDOR_ID_PATHSCALE, PCI_DEVICE_ID_INFINIPATH_HT) }, 141 { PCI_DEVICE(PCI_VENDOR_ID_PATHSCALE, PCI_DEVICE_ID_INFINIPATH_HT) },
140 { PCI_DEVICE(PCI_VENDOR_ID_PATHSCALE, PCI_DEVICE_ID_INFINIPATH_PE800) }, 142 { PCI_DEVICE(PCI_VENDOR_ID_PATHSCALE, PCI_DEVICE_ID_INFINIPATH_PE800) },
143 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_INFINIPATH_7220) },
141 { 0, } 144 { 0, }
142}; 145};
143 146
@@ -532,6 +535,13 @@ static int __devinit ipath_init_one(struct pci_dev *pdev,
532 "CONFIG_PCI_MSI is not enabled\n", ent->device); 535 "CONFIG_PCI_MSI is not enabled\n", ent->device);
533 return -ENODEV; 536 return -ENODEV;
534#endif 537#endif
538 case PCI_DEVICE_ID_INFINIPATH_7220:
539#ifndef CONFIG_PCI_MSI
540 ipath_dbg("CONFIG_PCI_MSI is not enabled, "
541 "using IntX for unit %u\n", dd->ipath_unit);
542#endif
543 ipath_init_iba7220_funcs(dd);
544 break;
535 default: 545 default:
536 ipath_dev_err(dd, "Found unknown QLogic deviceid 0x%x, " 546 ipath_dev_err(dd, "Found unknown QLogic deviceid 0x%x, "
537 "failing\n", ent->device); 547 "failing\n", ent->device);
@@ -887,13 +897,47 @@ int ipath_wait_linkstate(struct ipath_devdata *dd, u32 state, int msecs)
887 return (dd->ipath_flags & state) ? 0 : -ETIMEDOUT; 897 return (dd->ipath_flags & state) ? 0 : -ETIMEDOUT;
888} 898}
889 899
900static void decode_sdma_errs(struct ipath_devdata *dd, ipath_err_t err,
901 char *buf, size_t blen)
902{
903 static const struct {
904 ipath_err_t err;
905 const char *msg;
906 } errs[] = {
907 { INFINIPATH_E_SDMAGENMISMATCH, "SDmaGenMismatch" },
908 { INFINIPATH_E_SDMAOUTOFBOUND, "SDmaOutOfBound" },
909 { INFINIPATH_E_SDMATAILOUTOFBOUND, "SDmaTailOutOfBound" },
910 { INFINIPATH_E_SDMABASE, "SDmaBase" },
911 { INFINIPATH_E_SDMA1STDESC, "SDma1stDesc" },
912 { INFINIPATH_E_SDMARPYTAG, "SDmaRpyTag" },
913 { INFINIPATH_E_SDMADWEN, "SDmaDwEn" },
914 { INFINIPATH_E_SDMAMISSINGDW, "SDmaMissingDw" },
915 { INFINIPATH_E_SDMAUNEXPDATA, "SDmaUnexpData" },
916 { INFINIPATH_E_SDMADESCADDRMISALIGN, "SDmaDescAddrMisalign" },
917 { INFINIPATH_E_SENDBUFMISUSE, "SendBufMisuse" },
918 { INFINIPATH_E_SDMADISABLED, "SDmaDisabled" },
919 };
920 int i;
921 int expected;
922 size_t bidx = 0;
923
924 for (i = 0; i < ARRAY_SIZE(errs); i++) {
925 expected = (errs[i].err != INFINIPATH_E_SDMADISABLED) ? 0 :
926 test_bit(IPATH_SDMA_ABORTING, &dd->ipath_sdma_status);
927 if ((err & errs[i].err) && !expected)
928 bidx += snprintf(buf + bidx, blen - bidx,
929 "%s ", errs[i].msg);
930 }
931}
932
890/* 933/*
891 * Decode the error status into strings, deciding whether to always 934 * Decode the error status into strings, deciding whether to always
892 * print * it or not depending on "normal packet errors" vs everything 935 * print * it or not depending on "normal packet errors" vs everything
893 * else. Return 1 if "real" errors, otherwise 0 if only packet 936 * else. Return 1 if "real" errors, otherwise 0 if only packet
894 * errors, so caller can decide what to print with the string. 937 * errors, so caller can decide what to print with the string.
895 */ 938 */
896int ipath_decode_err(char *buf, size_t blen, ipath_err_t err) 939int ipath_decode_err(struct ipath_devdata *dd, char *buf, size_t blen,
940 ipath_err_t err)
897{ 941{
898 int iserr = 1; 942 int iserr = 1;
899 *buf = '\0'; 943 *buf = '\0';
@@ -975,6 +1019,8 @@ int ipath_decode_err(char *buf, size_t blen, ipath_err_t err)
975 strlcat(buf, "hardware ", blen); 1019 strlcat(buf, "hardware ", blen);
976 if (err & INFINIPATH_E_RESET) 1020 if (err & INFINIPATH_E_RESET)
977 strlcat(buf, "reset ", blen); 1021 strlcat(buf, "reset ", blen);
1022 if (err & INFINIPATH_E_SDMAERRS)
1023 decode_sdma_errs(dd, err, buf, blen);
978 if (err & INFINIPATH_E_INVALIDEEPCMD) 1024 if (err & INFINIPATH_E_INVALIDEEPCMD)
979 strlcat(buf, "invalideepromcmd ", blen); 1025 strlcat(buf, "invalideepromcmd ", blen);
980done: 1026done:
@@ -1730,30 +1776,80 @@ bail:
1730 */ 1776 */
1731void ipath_cancel_sends(struct ipath_devdata *dd, int restore_sendctrl) 1777void ipath_cancel_sends(struct ipath_devdata *dd, int restore_sendctrl)
1732{ 1778{
1779 unsigned long flags;
1780
1733 if (dd->ipath_flags & IPATH_IB_AUTONEG_INPROG) { 1781 if (dd->ipath_flags & IPATH_IB_AUTONEG_INPROG) {
1734 ipath_cdbg(VERBOSE, "Ignore while in autonegotiation\n"); 1782 ipath_cdbg(VERBOSE, "Ignore while in autonegotiation\n");
1735 goto bail; 1783 goto bail;
1736 } 1784 }
1785 /*
1786 * If we have SDMA, and it's not disabled, we have to kick off the
1787 * abort state machine, provided we aren't already aborting.
1788 * If we are in the process of aborting SDMA (!DISABLED, but ABORTING),
1789 * we skip the rest of this routine. It is already "in progress"
1790 */
1791 if (dd->ipath_flags & IPATH_HAS_SEND_DMA) {
1792 int skip_cancel;
1793 u64 *statp = &dd->ipath_sdma_status;
1794
1795 spin_lock_irqsave(&dd->ipath_sdma_lock, flags);
1796 skip_cancel =
1797 !test_bit(IPATH_SDMA_DISABLED, statp) &&
1798 test_and_set_bit(IPATH_SDMA_ABORTING, statp);
1799 spin_unlock_irqrestore(&dd->ipath_sdma_lock, flags);
1800 if (skip_cancel)
1801 goto bail;
1802 }
1803
1737 ipath_dbg("Cancelling all in-progress send buffers\n"); 1804 ipath_dbg("Cancelling all in-progress send buffers\n");
1738 1805
1739 /* skip armlaunch errs for a while */ 1806 /* skip armlaunch errs for a while */
1740 dd->ipath_lastcancel = jiffies + HZ / 2; 1807 dd->ipath_lastcancel = jiffies + HZ / 2;
1741 1808
1742 /* 1809 /*
1743 * the abort bit is auto-clearing. We read scratch to be sure 1810 * The abort bit is auto-clearing. We also don't want pioavail
1744 * that cancels and the abort have taken effect in the chip. 1811 * update happening during this, and we don't want any other
1812 * sends going out, so turn those off for the duration. We read
1813 * the scratch register to be sure that cancels and the abort
1814 * have taken effect in the chip. Otherwise two parts are same
1815 * as ipath_force_pio_avail_update()
1745 */ 1816 */
1817 spin_lock_irqsave(&dd->ipath_sendctrl_lock, flags);
1818 dd->ipath_sendctrl &= ~(INFINIPATH_S_PIOBUFAVAILUPD
1819 | INFINIPATH_S_PIOENABLE);
1746 ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl, 1820 ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl,
1747 INFINIPATH_S_ABORT); 1821 dd->ipath_sendctrl | INFINIPATH_S_ABORT);
1748 ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch); 1822 ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
1823 spin_unlock_irqrestore(&dd->ipath_sendctrl_lock, flags);
1824
1825 /* disarm all send buffers */
1749 ipath_disarm_piobufs(dd, 0, 1826 ipath_disarm_piobufs(dd, 0,
1750 (unsigned)(dd->ipath_piobcnt2k + dd->ipath_piobcnt4k)); 1827 dd->ipath_piobcnt2k + dd->ipath_piobcnt4k);
1751 if (restore_sendctrl) /* else done by caller later */ 1828
1829 if (restore_sendctrl) {
1830 /* else done by caller later if needed */
1831 spin_lock_irqsave(&dd->ipath_sendctrl_lock, flags);
1832 dd->ipath_sendctrl |= INFINIPATH_S_PIOBUFAVAILUPD |
1833 INFINIPATH_S_PIOENABLE;
1752 ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl, 1834 ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl,
1753 dd->ipath_sendctrl); 1835 dd->ipath_sendctrl);
1836 /* and again, be sure all have hit the chip */
1837 ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
1838 spin_unlock_irqrestore(&dd->ipath_sendctrl_lock, flags);
1839 }
1754 1840
1755 /* and again, be sure all have hit the chip */ 1841 if ((dd->ipath_flags & IPATH_HAS_SEND_DMA) &&
1756 ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch); 1842 !test_bit(IPATH_SDMA_DISABLED, &dd->ipath_sdma_status) &&
1843 test_bit(IPATH_SDMA_RUNNING, &dd->ipath_sdma_status)) {
1844 spin_lock_irqsave(&dd->ipath_sdma_lock, flags);
1845 /* only wait so long for intr */
1846 dd->ipath_sdma_abort_intr_timeout = jiffies + HZ;
1847 dd->ipath_sdma_reset_wait = 200;
1848 __set_bit(IPATH_SDMA_DISARMED, &dd->ipath_sdma_status);
1849 if (!test_bit(IPATH_SDMA_SHUTDOWN, &dd->ipath_sdma_status))
1850 tasklet_hi_schedule(&dd->ipath_sdma_abort_task);
1851 spin_unlock_irqrestore(&dd->ipath_sdma_lock, flags);
1852 }
1757bail:; 1853bail:;
1758} 1854}
1759 1855
@@ -1952,7 +2048,7 @@ bail:
1952 * sanity checking on this, and we don't deal with what happens to 2048 * sanity checking on this, and we don't deal with what happens to
1953 * programs that are already running when the size changes. 2049 * programs that are already running when the size changes.
1954 * NOTE: changing the MTU will usually cause the IBC to go back to 2050 * NOTE: changing the MTU will usually cause the IBC to go back to
1955 * link initialize (IPATH_IBSTATE_INIT) state... 2051 * link INIT state...
1956 */ 2052 */
1957int ipath_set_mtu(struct ipath_devdata *dd, u16 arg) 2053int ipath_set_mtu(struct ipath_devdata *dd, u16 arg)
1958{ 2054{
@@ -2092,9 +2188,8 @@ static void ipath_run_led_override(unsigned long opaque)
2092 * but leave that to per-chip functions. 2188 * but leave that to per-chip functions.
2093 */ 2189 */
2094 val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_ibcstatus); 2190 val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_ibcstatus);
2095 ltstate = (val >> INFINIPATH_IBCS_LINKTRAININGSTATE_SHIFT) & 2191 ltstate = ipath_ib_linktrstate(dd, val);
2096 dd->ibcs_lts_mask; 2192 lstate = ipath_ib_linkstate(dd, val);
2097 lstate = (val >> dd->ibcs_ls_shift) & INFINIPATH_IBCS_LINKSTATE_MASK;
2098 2193
2099 dd->ipath_f_setextled(dd, lstate, ltstate); 2194 dd->ipath_f_setextled(dd, lstate, ltstate);
2100 mod_timer(&dd->ipath_led_override_timer, jiffies + timeoff); 2195 mod_timer(&dd->ipath_led_override_timer, jiffies + timeoff);
@@ -2170,6 +2265,9 @@ void ipath_shutdown_device(struct ipath_devdata *dd)
2170 ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvctrl, 2265 ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvctrl,
2171 dd->ipath_rcvctrl); 2266 dd->ipath_rcvctrl);
2172 2267
2268 if (dd->ipath_flags & IPATH_HAS_SEND_DMA)
2269 teardown_sdma(dd);
2270
2173 /* 2271 /*
2174 * gracefully stop all sends allowing any in progress to trickle out 2272 * gracefully stop all sends allowing any in progress to trickle out
2175 * first. 2273 * first.
@@ -2187,9 +2285,16 @@ void ipath_shutdown_device(struct ipath_devdata *dd)
2187 */ 2285 */
2188 udelay(5); 2286 udelay(5);
2189 2287
2288 dd->ipath_f_setextled(dd, 0, 0); /* make sure LEDs are off */
2289
2190 ipath_set_ib_lstate(dd, 0, INFINIPATH_IBCC_LINKINITCMD_DISABLE); 2290 ipath_set_ib_lstate(dd, 0, INFINIPATH_IBCC_LINKINITCMD_DISABLE);
2191 ipath_cancel_sends(dd, 0); 2291 ipath_cancel_sends(dd, 0);
2192 2292
2293 /*
2294 * we are shutting down, so tell components that care. We don't do
2295 * this on just a link state change, much like ethernet, a cable
2296 * unplug, etc. doesn't change driver state
2297 */
2193 signal_ib_event(dd, IB_EVENT_PORT_ERR); 2298 signal_ib_event(dd, IB_EVENT_PORT_ERR);
2194 2299
2195 /* disable IBC */ 2300 /* disable IBC */
@@ -2214,6 +2319,10 @@ void ipath_shutdown_device(struct ipath_devdata *dd)
2214 del_timer_sync(&dd->ipath_intrchk_timer); 2319 del_timer_sync(&dd->ipath_intrchk_timer);
2215 dd->ipath_intrchk_timer.data = 0; 2320 dd->ipath_intrchk_timer.data = 0;
2216 } 2321 }
2322 if (atomic_read(&dd->ipath_led_override_timer_active)) {
2323 del_timer_sync(&dd->ipath_led_override_timer);
2324 atomic_set(&dd->ipath_led_override_timer_active, 0);
2325 }
2217 2326
2218 /* 2327 /*
2219 * clear all interrupts and errors, so that the next time the driver 2328 * clear all interrupts and errors, so that the next time the driver
@@ -2408,13 +2517,18 @@ int ipath_reset_device(int unit)
2408 } 2517 }
2409 } 2518 }
2410 2519
2520 if (dd->ipath_flags & IPATH_HAS_SEND_DMA)
2521 teardown_sdma(dd);
2522
2411 dd->ipath_flags &= ~IPATH_INITTED; 2523 dd->ipath_flags &= ~IPATH_INITTED;
2524 ipath_write_kreg(dd, dd->ipath_kregs->kr_intmask, 0ULL);
2412 ret = dd->ipath_f_reset(dd); 2525 ret = dd->ipath_f_reset(dd);
2413 if (ret != 1) 2526 if (ret == 1) {
2414 ipath_dbg("reset was not successful\n"); 2527 ipath_dbg("Reinitializing unit %u after reset attempt\n",
2415 ipath_dbg("Trying to reinitialize unit %u after reset attempt\n", 2528 unit);
2416 unit); 2529 ret = ipath_init_chip(dd, 1);
2417 ret = ipath_init_chip(dd, 1); 2530 } else
2531 ret = -EAGAIN;
2418 if (ret) 2532 if (ret)
2419 ipath_dev_err(dd, "Reinitialize unit %u after " 2533 ipath_dev_err(dd, "Reinitialize unit %u after "
2420 "reset failed with %d\n", unit, ret); 2534 "reset failed with %d\n", unit, ret);
diff --git a/drivers/infiniband/hw/ipath/ipath_file_ops.c b/drivers/infiniband/hw/ipath/ipath_file_ops.c
index b87d3126e4dc..d38ba293323d 100644
--- a/drivers/infiniband/hw/ipath/ipath_file_ops.c
+++ b/drivers/infiniband/hw/ipath/ipath_file_ops.c
@@ -36,21 +36,28 @@
36#include <linux/cdev.h> 36#include <linux/cdev.h>
37#include <linux/swap.h> 37#include <linux/swap.h>
38#include <linux/vmalloc.h> 38#include <linux/vmalloc.h>
39#include <linux/highmem.h>
40#include <linux/io.h>
41#include <linux/jiffies.h>
39#include <asm/pgtable.h> 42#include <asm/pgtable.h>
40 43
41#include "ipath_kernel.h" 44#include "ipath_kernel.h"
42#include "ipath_common.h" 45#include "ipath_common.h"
46#include "ipath_user_sdma.h"
43 47
44static int ipath_open(struct inode *, struct file *); 48static int ipath_open(struct inode *, struct file *);
45static int ipath_close(struct inode *, struct file *); 49static int ipath_close(struct inode *, struct file *);
46static ssize_t ipath_write(struct file *, const char __user *, size_t, 50static ssize_t ipath_write(struct file *, const char __user *, size_t,
47 loff_t *); 51 loff_t *);
52static ssize_t ipath_writev(struct kiocb *, const struct iovec *,
53 unsigned long , loff_t);
48static unsigned int ipath_poll(struct file *, struct poll_table_struct *); 54static unsigned int ipath_poll(struct file *, struct poll_table_struct *);
49static int ipath_mmap(struct file *, struct vm_area_struct *); 55static int ipath_mmap(struct file *, struct vm_area_struct *);
50 56
51static const struct file_operations ipath_file_ops = { 57static const struct file_operations ipath_file_ops = {
52 .owner = THIS_MODULE, 58 .owner = THIS_MODULE,
53 .write = ipath_write, 59 .write = ipath_write,
60 .aio_write = ipath_writev,
54 .open = ipath_open, 61 .open = ipath_open,
55 .release = ipath_close, 62 .release = ipath_close,
56 .poll = ipath_poll, 63 .poll = ipath_poll,
@@ -1870,10 +1877,9 @@ static int ipath_assign_port(struct file *fp,
1870 if (ipath_compatible_subports(swmajor, swminor) && 1877 if (ipath_compatible_subports(swmajor, swminor) &&
1871 uinfo->spu_subport_cnt && 1878 uinfo->spu_subport_cnt &&
1872 (ret = find_shared_port(fp, uinfo))) { 1879 (ret = find_shared_port(fp, uinfo))) {
1873 mutex_unlock(&ipath_mutex);
1874 if (ret > 0) 1880 if (ret > 0)
1875 ret = 0; 1881 ret = 0;
1876 goto done; 1882 goto done_chk_sdma;
1877 } 1883 }
1878 1884
1879 i_minor = iminor(fp->f_path.dentry->d_inode) - IPATH_USER_MINOR_BASE; 1885 i_minor = iminor(fp->f_path.dentry->d_inode) - IPATH_USER_MINOR_BASE;
@@ -1885,6 +1891,21 @@ static int ipath_assign_port(struct file *fp,
1885 else 1891 else
1886 ret = find_best_unit(fp, uinfo); 1892 ret = find_best_unit(fp, uinfo);
1887 1893
1894done_chk_sdma:
1895 if (!ret) {
1896 struct ipath_filedata *fd = fp->private_data;
1897 const struct ipath_portdata *pd = fd->pd;
1898 const struct ipath_devdata *dd = pd->port_dd;
1899
1900 fd->pq = ipath_user_sdma_queue_create(&dd->pcidev->dev,
1901 dd->ipath_unit,
1902 pd->port_port,
1903 fd->subport);
1904
1905 if (!fd->pq)
1906 ret = -ENOMEM;
1907 }
1908
1888 mutex_unlock(&ipath_mutex); 1909 mutex_unlock(&ipath_mutex);
1889 1910
1890done: 1911done:
@@ -2042,6 +2063,13 @@ static int ipath_close(struct inode *in, struct file *fp)
2042 mutex_unlock(&ipath_mutex); 2063 mutex_unlock(&ipath_mutex);
2043 goto bail; 2064 goto bail;
2044 } 2065 }
2066
2067 dd = pd->port_dd;
2068
2069 /* drain user sdma queue */
2070 ipath_user_sdma_queue_drain(dd, fd->pq);
2071 ipath_user_sdma_queue_destroy(fd->pq);
2072
2045 if (--pd->port_cnt) { 2073 if (--pd->port_cnt) {
2046 /* 2074 /*
2047 * XXX If the master closes the port before the slave(s), 2075 * XXX If the master closes the port before the slave(s),
@@ -2054,7 +2082,6 @@ static int ipath_close(struct inode *in, struct file *fp)
2054 goto bail; 2082 goto bail;
2055 } 2083 }
2056 port = pd->port_port; 2084 port = pd->port_port;
2057 dd = pd->port_dd;
2058 2085
2059 if (pd->port_hdrqfull) { 2086 if (pd->port_hdrqfull) {
2060 ipath_cdbg(PROC, "%s[%u] had %u rcvhdrqfull errors " 2087 ipath_cdbg(PROC, "%s[%u] had %u rcvhdrqfull errors "
@@ -2176,6 +2203,35 @@ static int ipath_get_slave_info(struct ipath_portdata *pd,
2176 return ret; 2203 return ret;
2177} 2204}
2178 2205
2206static int ipath_sdma_get_inflight(struct ipath_user_sdma_queue *pq,
2207 u32 __user *inflightp)
2208{
2209 const u32 val = ipath_user_sdma_inflight_counter(pq);
2210
2211 if (put_user(val, inflightp))
2212 return -EFAULT;
2213
2214 return 0;
2215}
2216
2217static int ipath_sdma_get_complete(struct ipath_devdata *dd,
2218 struct ipath_user_sdma_queue *pq,
2219 u32 __user *completep)
2220{
2221 u32 val;
2222 int err;
2223
2224 err = ipath_user_sdma_make_progress(dd, pq);
2225 if (err < 0)
2226 return err;
2227
2228 val = ipath_user_sdma_complete_counter(pq);
2229 if (put_user(val, completep))
2230 return -EFAULT;
2231
2232 return 0;
2233}
2234
2179static ssize_t ipath_write(struct file *fp, const char __user *data, 2235static ssize_t ipath_write(struct file *fp, const char __user *data,
2180 size_t count, loff_t *off) 2236 size_t count, loff_t *off)
2181{ 2237{
@@ -2250,6 +2306,16 @@ static ssize_t ipath_write(struct file *fp, const char __user *data,
2250 dest = &cmd.cmd.armlaunch_ctrl; 2306 dest = &cmd.cmd.armlaunch_ctrl;
2251 src = &ucmd->cmd.armlaunch_ctrl; 2307 src = &ucmd->cmd.armlaunch_ctrl;
2252 break; 2308 break;
2309 case IPATH_CMD_SDMA_INFLIGHT:
2310 copy = sizeof(cmd.cmd.sdma_inflight);
2311 dest = &cmd.cmd.sdma_inflight;
2312 src = &ucmd->cmd.sdma_inflight;
2313 break;
2314 case IPATH_CMD_SDMA_COMPLETE:
2315 copy = sizeof(cmd.cmd.sdma_complete);
2316 dest = &cmd.cmd.sdma_complete;
2317 src = &ucmd->cmd.sdma_complete;
2318 break;
2253 default: 2319 default:
2254 ret = -EINVAL; 2320 ret = -EINVAL;
2255 goto bail; 2321 goto bail;
@@ -2331,6 +2397,17 @@ static ssize_t ipath_write(struct file *fp, const char __user *data,
2331 else 2397 else
2332 ipath_disable_armlaunch(pd->port_dd); 2398 ipath_disable_armlaunch(pd->port_dd);
2333 break; 2399 break;
2400 case IPATH_CMD_SDMA_INFLIGHT:
2401 ret = ipath_sdma_get_inflight(user_sdma_queue_fp(fp),
2402 (u32 __user *) (unsigned long)
2403 cmd.cmd.sdma_inflight);
2404 break;
2405 case IPATH_CMD_SDMA_COMPLETE:
2406 ret = ipath_sdma_get_complete(pd->port_dd,
2407 user_sdma_queue_fp(fp),
2408 (u32 __user *) (unsigned long)
2409 cmd.cmd.sdma_complete);
2410 break;
2334 } 2411 }
2335 2412
2336 if (ret >= 0) 2413 if (ret >= 0)
@@ -2340,6 +2417,20 @@ bail:
2340 return ret; 2417 return ret;
2341} 2418}
2342 2419
2420static ssize_t ipath_writev(struct kiocb *iocb, const struct iovec *iov,
2421 unsigned long dim, loff_t off)
2422{
2423 struct file *filp = iocb->ki_filp;
2424 struct ipath_filedata *fp = filp->private_data;
2425 struct ipath_portdata *pd = port_fp(filp);
2426 struct ipath_user_sdma_queue *pq = fp->pq;
2427
2428 if (!dim)
2429 return -EINVAL;
2430
2431 return ipath_user_sdma_writev(pd->port_dd, pq, iov, dim);
2432}
2433
2343static struct class *ipath_class; 2434static struct class *ipath_class;
2344 2435
2345static int init_cdev(int minor, char *name, const struct file_operations *fops, 2436static int init_cdev(int minor, char *name, const struct file_operations *fops,
diff --git a/drivers/infiniband/hw/ipath/ipath_init_chip.c b/drivers/infiniband/hw/ipath/ipath_init_chip.c
index c012e05649b3..b43c2a10029a 100644
--- a/drivers/infiniband/hw/ipath/ipath_init_chip.c
+++ b/drivers/infiniband/hw/ipath/ipath_init_chip.c
@@ -980,6 +980,10 @@ int ipath_init_chip(struct ipath_devdata *dd, int reinit)
980 dd->ipath_stats_timer_active = 1; 980 dd->ipath_stats_timer_active = 1;
981 } 981 }
982 982
983 /* Set up SendDMA if chip supports it */
984 if (dd->ipath_flags & IPATH_HAS_SEND_DMA)
985 ret = setup_sdma(dd);
986
983 /* Set up HoL state */ 987 /* Set up HoL state */
984 init_timer(&dd->ipath_hol_timer); 988 init_timer(&dd->ipath_hol_timer);
985 dd->ipath_hol_timer.function = ipath_hol_event; 989 dd->ipath_hol_timer.function = ipath_hol_event;
diff --git a/drivers/infiniband/hw/ipath/ipath_intr.c b/drivers/infiniband/hw/ipath/ipath_intr.c
index 90b972f6a840..d0088d5b9a43 100644
--- a/drivers/infiniband/hw/ipath/ipath_intr.c
+++ b/drivers/infiniband/hw/ipath/ipath_intr.c
@@ -433,6 +433,8 @@ static void handle_e_ibstatuschanged(struct ipath_devdata *dd,
433 dd->ipath_flags &= ~(IPATH_LINKUNK | IPATH_LINKINIT 433 dd->ipath_flags &= ~(IPATH_LINKUNK | IPATH_LINKINIT
434 | IPATH_LINKDOWN | IPATH_LINKARMED | 434 | IPATH_LINKDOWN | IPATH_LINKARMED |
435 IPATH_NOCABLE); 435 IPATH_NOCABLE);
436 if (dd->ipath_flags & IPATH_HAS_SEND_DMA)
437 ipath_restart_sdma(dd);
436 signal_ib_event(dd, IB_EVENT_PORT_ACTIVE); 438 signal_ib_event(dd, IB_EVENT_PORT_ACTIVE);
437 /* LED active not handled in chip _f_updown */ 439 /* LED active not handled in chip _f_updown */
438 dd->ipath_f_setextled(dd, lstate, ltstate); 440 dd->ipath_f_setextled(dd, lstate, ltstate);
@@ -480,7 +482,7 @@ done:
480} 482}
481 483
482static void handle_supp_msgs(struct ipath_devdata *dd, 484static void handle_supp_msgs(struct ipath_devdata *dd,
483 unsigned supp_msgs, char *msg, int msgsz) 485 unsigned supp_msgs, char *msg, u32 msgsz)
484{ 486{
485 /* 487 /*
486 * Print the message unless it's ibc status change only, which 488 * Print the message unless it's ibc status change only, which
@@ -488,12 +490,19 @@ static void handle_supp_msgs(struct ipath_devdata *dd,
488 */ 490 */
489 if (dd->ipath_lasterror & ~INFINIPATH_E_IBSTATUSCHANGED) { 491 if (dd->ipath_lasterror & ~INFINIPATH_E_IBSTATUSCHANGED) {
490 int iserr; 492 int iserr;
491 iserr = ipath_decode_err(msg, msgsz, 493 ipath_err_t mask;
494 iserr = ipath_decode_err(dd, msg, msgsz,
492 dd->ipath_lasterror & 495 dd->ipath_lasterror &
493 ~INFINIPATH_E_IBSTATUSCHANGED); 496 ~INFINIPATH_E_IBSTATUSCHANGED);
494 if (dd->ipath_lasterror & 497
495 ~(INFINIPATH_E_RRCVEGRFULL | 498 mask = INFINIPATH_E_RRCVEGRFULL | INFINIPATH_E_RRCVHDRFULL |
496 INFINIPATH_E_RRCVHDRFULL | INFINIPATH_E_PKTERRS)) 499 INFINIPATH_E_PKTERRS | INFINIPATH_E_SDMADISABLED;
500
501 /* if we're in debug, then don't mask SDMADISABLED msgs */
502 if (ipath_debug & __IPATH_DBG)
503 mask &= ~INFINIPATH_E_SDMADISABLED;
504
505 if (dd->ipath_lasterror & ~mask)
497 ipath_dev_err(dd, "Suppressed %u messages for " 506 ipath_dev_err(dd, "Suppressed %u messages for "
498 "fast-repeating errors (%s) (%llx)\n", 507 "fast-repeating errors (%s) (%llx)\n",
499 supp_msgs, msg, 508 supp_msgs, msg,
@@ -520,7 +529,7 @@ static void handle_supp_msgs(struct ipath_devdata *dd,
520 529
521static unsigned handle_frequent_errors(struct ipath_devdata *dd, 530static unsigned handle_frequent_errors(struct ipath_devdata *dd,
522 ipath_err_t errs, char *msg, 531 ipath_err_t errs, char *msg,
523 int msgsz, int *noprint) 532 u32 msgsz, int *noprint)
524{ 533{
525 unsigned long nc; 534 unsigned long nc;
526 static unsigned long nextmsg_time; 535 static unsigned long nextmsg_time;
@@ -550,19 +559,125 @@ static unsigned handle_frequent_errors(struct ipath_devdata *dd,
550 return supp_msgs; 559 return supp_msgs;
551} 560}
552 561
562static void handle_sdma_errors(struct ipath_devdata *dd, ipath_err_t errs)
563{
564 unsigned long flags;
565 int expected;
566
567 if (ipath_debug & __IPATH_DBG) {
568 char msg[128];
569 ipath_decode_err(dd, msg, sizeof msg, errs &
570 INFINIPATH_E_SDMAERRS);
571 ipath_dbg("errors %lx (%s)\n", (unsigned long)errs, msg);
572 }
573 if (ipath_debug & __IPATH_VERBDBG) {
574 unsigned long tl, hd, status, lengen;
575 tl = ipath_read_kreg64(dd, dd->ipath_kregs->kr_senddmatail);
576 hd = ipath_read_kreg64(dd, dd->ipath_kregs->kr_senddmahead);
577 status = ipath_read_kreg64(dd
578 , dd->ipath_kregs->kr_senddmastatus);
579 lengen = ipath_read_kreg64(dd,
580 dd->ipath_kregs->kr_senddmalengen);
581 ipath_cdbg(VERBOSE, "sdma tl 0x%lx hd 0x%lx status 0x%lx "
582 "lengen 0x%lx\n", tl, hd, status, lengen);
583 }
584
585 spin_lock_irqsave(&dd->ipath_sdma_lock, flags);
586 __set_bit(IPATH_SDMA_DISABLED, &dd->ipath_sdma_status);
587 expected = test_bit(IPATH_SDMA_ABORTING, &dd->ipath_sdma_status);
588 spin_unlock_irqrestore(&dd->ipath_sdma_lock, flags);
589 if (!expected)
590 ipath_cancel_sends(dd, 1);
591}
592
593static void handle_sdma_intr(struct ipath_devdata *dd, u64 istat)
594{
595 unsigned long flags;
596 int expected;
597
598 if ((istat & INFINIPATH_I_SDMAINT) &&
599 !test_bit(IPATH_SDMA_SHUTDOWN, &dd->ipath_sdma_status))
600 ipath_sdma_intr(dd);
601
602 if (istat & INFINIPATH_I_SDMADISABLED) {
603 expected = test_bit(IPATH_SDMA_ABORTING,
604 &dd->ipath_sdma_status);
605 ipath_dbg("%s SDmaDisabled intr\n",
606 expected ? "expected" : "unexpected");
607 spin_lock_irqsave(&dd->ipath_sdma_lock, flags);
608 __set_bit(IPATH_SDMA_DISABLED, &dd->ipath_sdma_status);
609 spin_unlock_irqrestore(&dd->ipath_sdma_lock, flags);
610 if (!expected)
611 ipath_cancel_sends(dd, 1);
612 if (!test_bit(IPATH_SDMA_SHUTDOWN, &dd->ipath_sdma_status))
613 tasklet_hi_schedule(&dd->ipath_sdma_abort_task);
614 }
615}
616
617static int handle_hdrq_full(struct ipath_devdata *dd)
618{
619 int chkerrpkts = 0;
620 u32 hd, tl;
621 u32 i;
622
623 ipath_stats.sps_hdrqfull++;
624 for (i = 0; i < dd->ipath_cfgports; i++) {
625 struct ipath_portdata *pd = dd->ipath_pd[i];
626
627 if (i == 0) {
628 /*
629 * For kernel receive queues, we just want to know
630 * if there are packets in the queue that we can
631 * process.
632 */
633 if (pd->port_head != ipath_get_hdrqtail(pd))
634 chkerrpkts |= 1 << i;
635 continue;
636 }
637
638 /* Skip if user context is not open */
639 if (!pd || !pd->port_cnt)
640 continue;
641
642 /* Don't report the same point multiple times. */
643 if (dd->ipath_flags & IPATH_NODMA_RTAIL)
644 tl = ipath_read_ureg32(dd, ur_rcvhdrtail, i);
645 else
646 tl = ipath_get_rcvhdrtail(pd);
647 if (tl == pd->port_lastrcvhdrqtail)
648 continue;
649
650 hd = ipath_read_ureg32(dd, ur_rcvhdrhead, i);
651 if (hd == (tl + 1) || (!hd && tl == dd->ipath_hdrqlast)) {
652 pd->port_lastrcvhdrqtail = tl;
653 pd->port_hdrqfull++;
654 /* flush hdrqfull so that poll() sees it */
655 wmb();
656 wake_up_interruptible(&pd->port_wait);
657 }
658 }
659
660 return chkerrpkts;
661}
662
553static int handle_errors(struct ipath_devdata *dd, ipath_err_t errs) 663static int handle_errors(struct ipath_devdata *dd, ipath_err_t errs)
554{ 664{
555 char msg[128]; 665 char msg[128];
556 u64 ignore_this_time = 0; 666 u64 ignore_this_time = 0;
557 int i, iserr = 0; 667 u64 iserr = 0;
558 int chkerrpkts = 0, noprint = 0; 668 int chkerrpkts = 0, noprint = 0;
559 unsigned supp_msgs; 669 unsigned supp_msgs;
560 int log_idx; 670 int log_idx;
561 671
562 supp_msgs = handle_frequent_errors(dd, errs, msg, sizeof msg, &noprint); 672 /*
673 * don't report errors that are masked, either at init
674 * (not set in ipath_errormask), or temporarily (set in
675 * ipath_maskederrs)
676 */
677 errs &= dd->ipath_errormask & ~dd->ipath_maskederrs;
563 678
564 /* don't report errors that are masked */ 679 supp_msgs = handle_frequent_errors(dd, errs, msg, (u32)sizeof msg,
565 errs &= ~dd->ipath_maskederrs; 680 &noprint);
566 681
567 /* do these first, they are most important */ 682 /* do these first, they are most important */
568 if (errs & INFINIPATH_E_HARDWARE) { 683 if (errs & INFINIPATH_E_HARDWARE) {
@@ -577,6 +692,9 @@ static int handle_errors(struct ipath_devdata *dd, ipath_err_t errs)
577 } 692 }
578 } 693 }
579 694
695 if (errs & INFINIPATH_E_SDMAERRS)
696 handle_sdma_errors(dd, errs);
697
580 if (!noprint && (errs & ~dd->ipath_e_bitsextant)) 698 if (!noprint && (errs & ~dd->ipath_e_bitsextant))
581 ipath_dev_err(dd, "error interrupt with unknown errors " 699 ipath_dev_err(dd, "error interrupt with unknown errors "
582 "%llx set\n", (unsigned long long) 700 "%llx set\n", (unsigned long long)
@@ -611,7 +729,7 @@ static int handle_errors(struct ipath_devdata *dd, ipath_err_t errs)
611 dd->ipath_errormask &= ~dd->ipath_maskederrs; 729 dd->ipath_errormask &= ~dd->ipath_maskederrs;
612 ipath_write_kreg(dd, dd->ipath_kregs->kr_errormask, 730 ipath_write_kreg(dd, dd->ipath_kregs->kr_errormask,
613 dd->ipath_errormask); 731 dd->ipath_errormask);
614 s_iserr = ipath_decode_err(msg, sizeof msg, 732 s_iserr = ipath_decode_err(dd, msg, sizeof msg,
615 dd->ipath_maskederrs); 733 dd->ipath_maskederrs);
616 734
617 if (dd->ipath_maskederrs & 735 if (dd->ipath_maskederrs &
@@ -661,26 +779,43 @@ static int handle_errors(struct ipath_devdata *dd, ipath_err_t errs)
661 INFINIPATH_E_IBSTATUSCHANGED); 779 INFINIPATH_E_IBSTATUSCHANGED);
662 } 780 }
663 781
664 /* likely due to cancel, so suppress */ 782 if (errs & INFINIPATH_E_SENDSPECIALTRIGGER) {
783 dd->ipath_spectriggerhit++;
784 ipath_dbg("%lu special trigger hits\n",
785 dd->ipath_spectriggerhit);
786 }
787
788 /* likely due to cancel; so suppress message unless verbose */
665 if ((errs & (INFINIPATH_E_SPKTLEN | INFINIPATH_E_SPIOARMLAUNCH)) && 789 if ((errs & (INFINIPATH_E_SPKTLEN | INFINIPATH_E_SPIOARMLAUNCH)) &&
666 dd->ipath_lastcancel > jiffies) { 790 dd->ipath_lastcancel > jiffies) {
667 ipath_dbg("Suppressed armlaunch/spktlen after error send cancel\n"); 791 /* armlaunch takes precedence; it often causes both. */
792 ipath_cdbg(VERBOSE,
793 "Suppressed %s error (%llx) after sendbuf cancel\n",
794 (errs & INFINIPATH_E_SPIOARMLAUNCH) ?
795 "armlaunch" : "sendpktlen", (unsigned long long)errs);
668 errs &= ~(INFINIPATH_E_SPIOARMLAUNCH | INFINIPATH_E_SPKTLEN); 796 errs &= ~(INFINIPATH_E_SPIOARMLAUNCH | INFINIPATH_E_SPKTLEN);
669 } 797 }
670 798
671 if (!errs) 799 if (!errs)
672 return 0; 800 return 0;
673 801
674 if (!noprint) 802 if (!noprint) {
803 ipath_err_t mask;
675 /* 804 /*
676 * the ones we mask off are handled specially below or above 805 * The ones we mask off are handled specially below
806 * or above. Also mask SDMADISABLED by default as it
807 * is too chatty.
677 */ 808 */
678 ipath_decode_err(msg, sizeof msg, 809 mask = INFINIPATH_E_IBSTATUSCHANGED |
679 errs & ~(INFINIPATH_E_IBSTATUSCHANGED | 810 INFINIPATH_E_RRCVEGRFULL | INFINIPATH_E_RRCVHDRFULL |
680 INFINIPATH_E_RRCVEGRFULL | 811 INFINIPATH_E_HARDWARE | INFINIPATH_E_SDMADISABLED;
681 INFINIPATH_E_RRCVHDRFULL | 812
682 INFINIPATH_E_HARDWARE)); 813 /* if we're in debug, then don't mask SDMADISABLED msgs */
683 else 814 if (ipath_debug & __IPATH_DBG)
815 mask &= ~INFINIPATH_E_SDMADISABLED;
816
817 ipath_decode_err(dd, msg, sizeof msg, errs & ~mask);
818 } else
684 /* so we don't need if (!noprint) at strlcat's below */ 819 /* so we don't need if (!noprint) at strlcat's below */
685 *msg = 0; 820 *msg = 0;
686 821
@@ -705,39 +840,8 @@ static int handle_errors(struct ipath_devdata *dd, ipath_err_t errs)
705 * fast_stats, no more than every 5 seconds, user ports get printed 840 * fast_stats, no more than every 5 seconds, user ports get printed
706 * on close 841 * on close
707 */ 842 */
708 if (errs & INFINIPATH_E_RRCVHDRFULL) { 843 if (errs & INFINIPATH_E_RRCVHDRFULL)
709 u32 hd, tl; 844 chkerrpkts |= handle_hdrq_full(dd);
710 ipath_stats.sps_hdrqfull++;
711 for (i = 0; i < dd->ipath_cfgports; i++) {
712 struct ipath_portdata *pd = dd->ipath_pd[i];
713 if (i == 0) {
714 hd = pd->port_head;
715 tl = ipath_get_hdrqtail(pd);
716 } else if (pd && pd->port_cnt &&
717 pd->port_rcvhdrtail_kvaddr) {
718 /*
719 * don't report same point multiple times,
720 * except kernel
721 */
722 tl = *(u64 *) pd->port_rcvhdrtail_kvaddr;
723 if (tl == pd->port_lastrcvhdrqtail)
724 continue;
725 hd = ipath_read_ureg32(dd, ur_rcvhdrhead,
726 i);
727 } else
728 continue;
729 if (hd == (tl + 1) ||
730 (!hd && tl == dd->ipath_hdrqlast)) {
731 if (i == 0)
732 chkerrpkts = 1;
733 pd->port_lastrcvhdrqtail = tl;
734 pd->port_hdrqfull++;
735 /* flush hdrqfull so that poll() sees it */
736 wmb();
737 wake_up_interruptible(&pd->port_wait);
738 }
739 }
740 }
741 if (errs & INFINIPATH_E_RRCVEGRFULL) { 845 if (errs & INFINIPATH_E_RRCVEGRFULL) {
742 struct ipath_portdata *pd = dd->ipath_pd[0]; 846 struct ipath_portdata *pd = dd->ipath_pd[0];
743 847
@@ -749,7 +853,7 @@ static int handle_errors(struct ipath_devdata *dd, ipath_err_t errs)
749 */ 853 */
750 ipath_stats.sps_etidfull++; 854 ipath_stats.sps_etidfull++;
751 if (pd->port_head != ipath_get_hdrqtail(pd)) 855 if (pd->port_head != ipath_get_hdrqtail(pd))
752 chkerrpkts = 1; 856 chkerrpkts |= 1;
753 } 857 }
754 858
755 /* 859 /*
@@ -788,9 +892,6 @@ static int handle_errors(struct ipath_devdata *dd, ipath_err_t errs)
788 if (!noprint && *msg) { 892 if (!noprint && *msg) {
789 if (iserr) 893 if (iserr)
790 ipath_dev_err(dd, "%s error\n", msg); 894 ipath_dev_err(dd, "%s error\n", msg);
791 else
792 dev_info(&dd->pcidev->dev, "%s packet problems\n",
793 msg);
794 } 895 }
795 if (dd->ipath_state_wanted & dd->ipath_flags) { 896 if (dd->ipath_state_wanted & dd->ipath_flags) {
796 ipath_cdbg(VERBOSE, "driver wanted state %x, iflags now %x, " 897 ipath_cdbg(VERBOSE, "driver wanted state %x, iflags now %x, "
@@ -1017,7 +1118,7 @@ static void handle_urcv(struct ipath_devdata *dd, u64 istat)
1017irqreturn_t ipath_intr(int irq, void *data) 1118irqreturn_t ipath_intr(int irq, void *data)
1018{ 1119{
1019 struct ipath_devdata *dd = data; 1120 struct ipath_devdata *dd = data;
1020 u32 istat, chk0rcv = 0; 1121 u64 istat, chk0rcv = 0;
1021 ipath_err_t estat = 0; 1122 ipath_err_t estat = 0;
1022 irqreturn_t ret; 1123 irqreturn_t ret;
1023 static unsigned unexpected = 0; 1124 static unsigned unexpected = 0;
@@ -1070,17 +1171,17 @@ irqreturn_t ipath_intr(int irq, void *data)
1070 1171
1071 if (unlikely(istat & ~dd->ipath_i_bitsextant)) 1172 if (unlikely(istat & ~dd->ipath_i_bitsextant))
1072 ipath_dev_err(dd, 1173 ipath_dev_err(dd,
1073 "interrupt with unknown interrupts %x set\n", 1174 "interrupt with unknown interrupts %Lx set\n",
1074 istat & (u32) ~ dd->ipath_i_bitsextant); 1175 istat & ~dd->ipath_i_bitsextant);
1075 else 1176 else if (istat & ~INFINIPATH_I_ERROR) /* errors do own printing */
1076 ipath_cdbg(VERBOSE, "intr stat=0x%x\n", istat); 1177 ipath_cdbg(VERBOSE, "intr stat=0x%Lx\n", istat);
1077 1178
1078 if (unlikely(istat & INFINIPATH_I_ERROR)) { 1179 if (istat & INFINIPATH_I_ERROR) {
1079 ipath_stats.sps_errints++; 1180 ipath_stats.sps_errints++;
1080 estat = ipath_read_kreg64(dd, 1181 estat = ipath_read_kreg64(dd,
1081 dd->ipath_kregs->kr_errorstatus); 1182 dd->ipath_kregs->kr_errorstatus);
1082 if (!estat) 1183 if (!estat)
1083 dev_info(&dd->pcidev->dev, "error interrupt (%x), " 1184 dev_info(&dd->pcidev->dev, "error interrupt (%Lx), "
1084 "but no error bits set!\n", istat); 1185 "but no error bits set!\n", istat);
1085 else if (estat == -1LL) 1186 else if (estat == -1LL)
1086 /* 1187 /*
@@ -1198,6 +1299,9 @@ irqreturn_t ipath_intr(int irq, void *data)
1198 (dd->ipath_i_rcvurg_mask << dd->ipath_i_rcvurg_shift))) 1299 (dd->ipath_i_rcvurg_mask << dd->ipath_i_rcvurg_shift)))
1199 handle_urcv(dd, istat); 1300 handle_urcv(dd, istat);
1200 1301
1302 if (istat & (INFINIPATH_I_SDMAINT | INFINIPATH_I_SDMADISABLED))
1303 handle_sdma_intr(dd, istat);
1304
1201 if (istat & INFINIPATH_I_SPIOBUFAVAIL) { 1305 if (istat & INFINIPATH_I_SPIOBUFAVAIL) {
1202 unsigned long flags; 1306 unsigned long flags;
1203 1307
@@ -1208,7 +1312,10 @@ irqreturn_t ipath_intr(int irq, void *data)
1208 ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch); 1312 ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
1209 spin_unlock_irqrestore(&dd->ipath_sendctrl_lock, flags); 1313 spin_unlock_irqrestore(&dd->ipath_sendctrl_lock, flags);
1210 1314
1211 handle_layer_pioavail(dd); 1315 if (!(dd->ipath_flags & IPATH_HAS_SEND_DMA))
1316 handle_layer_pioavail(dd);
1317 else
1318 ipath_dbg("unexpected BUFAVAIL intr\n");
1212 } 1319 }
1213 1320
1214 ret = IRQ_HANDLED; 1321 ret = IRQ_HANDLED;
diff --git a/drivers/infiniband/hw/ipath/ipath_kernel.h b/drivers/infiniband/hw/ipath/ipath_kernel.h
index 550d46c1aefb..a2f036c9c28c 100644
--- a/drivers/infiniband/hw/ipath/ipath_kernel.h
+++ b/drivers/infiniband/hw/ipath/ipath_kernel.h
@@ -871,7 +871,8 @@ struct sk_buff *ipath_alloc_skb(struct ipath_devdata *dd, gfp_t);
871extern int ipath_diag_inuse; 871extern int ipath_diag_inuse;
872 872
873irqreturn_t ipath_intr(int irq, void *devid); 873irqreturn_t ipath_intr(int irq, void *devid);
874int ipath_decode_err(char *buf, size_t blen, ipath_err_t err); 874int ipath_decode_err(struct ipath_devdata *dd, char *buf, size_t blen,
875 ipath_err_t err);
875#if __IPATH_INFO || __IPATH_DBG 876#if __IPATH_INFO || __IPATH_DBG
876extern const char *ipath_ibcstatus_str[]; 877extern const char *ipath_ibcstatus_str[];
877#endif 878#endif
@@ -1026,6 +1027,7 @@ void ipath_set_led_override(struct ipath_devdata *dd, unsigned int val);
1026/* send dma routines */ 1027/* send dma routines */
1027int setup_sdma(struct ipath_devdata *); 1028int setup_sdma(struct ipath_devdata *);
1028void teardown_sdma(struct ipath_devdata *); 1029void teardown_sdma(struct ipath_devdata *);
1030void ipath_restart_sdma(struct ipath_devdata *);
1029void ipath_sdma_intr(struct ipath_devdata *); 1031void ipath_sdma_intr(struct ipath_devdata *);
1030int ipath_sdma_verbs_send(struct ipath_devdata *, struct ipath_sge_state *, 1032int ipath_sdma_verbs_send(struct ipath_devdata *, struct ipath_sge_state *,
1031 u32, struct ipath_verbs_txreq *); 1033 u32, struct ipath_verbs_txreq *);
diff --git a/drivers/infiniband/hw/ipath/ipath_qp.c b/drivers/infiniband/hw/ipath/ipath_qp.c
index 812b42c500e1..ded970bd13e0 100644
--- a/drivers/infiniband/hw/ipath/ipath_qp.c
+++ b/drivers/infiniband/hw/ipath/ipath_qp.c
@@ -340,6 +340,7 @@ static void ipath_reset_qp(struct ipath_qp *qp, enum ib_qp_type type)
340 qp->s_flags &= IPATH_S_SIGNAL_REQ_WR; 340 qp->s_flags &= IPATH_S_SIGNAL_REQ_WR;
341 qp->s_hdrwords = 0; 341 qp->s_hdrwords = 0;
342 qp->s_wqe = NULL; 342 qp->s_wqe = NULL;
343 qp->s_pkt_delay = 0;
343 qp->s_psn = 0; 344 qp->s_psn = 0;
344 qp->r_psn = 0; 345 qp->r_psn = 0;
345 qp->r_msn = 0; 346 qp->r_msn = 0;
@@ -563,8 +564,10 @@ int ipath_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
563 if (attr_mask & IB_QP_ACCESS_FLAGS) 564 if (attr_mask & IB_QP_ACCESS_FLAGS)
564 qp->qp_access_flags = attr->qp_access_flags; 565 qp->qp_access_flags = attr->qp_access_flags;
565 566
566 if (attr_mask & IB_QP_AV) 567 if (attr_mask & IB_QP_AV) {
567 qp->remote_ah_attr = attr->ah_attr; 568 qp->remote_ah_attr = attr->ah_attr;
569 qp->s_dmult = ipath_ib_rate_to_mult(attr->ah_attr.static_rate);
570 }
568 571
569 if (attr_mask & IB_QP_PATH_MTU) 572 if (attr_mask & IB_QP_PATH_MTU)
570 qp->path_mtu = attr->path_mtu; 573 qp->path_mtu = attr->path_mtu;
@@ -850,6 +853,7 @@ struct ib_qp *ipath_create_qp(struct ib_pd *ibpd,
850 goto bail_qp; 853 goto bail_qp;
851 } 854 }
852 qp->ip = NULL; 855 qp->ip = NULL;
856 qp->s_tx = NULL;
853 ipath_reset_qp(qp, init_attr->qp_type); 857 ipath_reset_qp(qp, init_attr->qp_type);
854 break; 858 break;
855 859
@@ -955,12 +959,20 @@ int ipath_destroy_qp(struct ib_qp *ibqp)
955 /* Stop the sending tasklet. */ 959 /* Stop the sending tasklet. */
956 tasklet_kill(&qp->s_task); 960 tasklet_kill(&qp->s_task);
957 961
962 if (qp->s_tx) {
963 atomic_dec(&qp->refcount);
964 if (qp->s_tx->txreq.flags & IPATH_SDMA_TXREQ_F_FREEBUF)
965 kfree(qp->s_tx->txreq.map_addr);
966 }
967
958 /* Make sure the QP isn't on the timeout list. */ 968 /* Make sure the QP isn't on the timeout list. */
959 spin_lock_irqsave(&dev->pending_lock, flags); 969 spin_lock_irqsave(&dev->pending_lock, flags);
960 if (!list_empty(&qp->timerwait)) 970 if (!list_empty(&qp->timerwait))
961 list_del_init(&qp->timerwait); 971 list_del_init(&qp->timerwait);
962 if (!list_empty(&qp->piowait)) 972 if (!list_empty(&qp->piowait))
963 list_del_init(&qp->piowait); 973 list_del_init(&qp->piowait);
974 if (qp->s_tx)
975 list_add(&qp->s_tx->txreq.list, &dev->txreq_free);
964 spin_unlock_irqrestore(&dev->pending_lock, flags); 976 spin_unlock_irqrestore(&dev->pending_lock, flags);
965 977
966 /* 978 /*
diff --git a/drivers/infiniband/hw/ipath/ipath_ruc.c b/drivers/infiniband/hw/ipath/ipath_ruc.c
index a59bdbd0ed87..bcaa2914e341 100644
--- a/drivers/infiniband/hw/ipath/ipath_ruc.c
+++ b/drivers/infiniband/hw/ipath/ipath_ruc.c
@@ -483,14 +483,16 @@ done:
483 483
484static void want_buffer(struct ipath_devdata *dd) 484static void want_buffer(struct ipath_devdata *dd)
485{ 485{
486 unsigned long flags; 486 if (!(dd->ipath_flags & IPATH_HAS_SEND_DMA)) {
487 487 unsigned long flags;
488 spin_lock_irqsave(&dd->ipath_sendctrl_lock, flags); 488
489 dd->ipath_sendctrl |= INFINIPATH_S_PIOINTBUFAVAIL; 489 spin_lock_irqsave(&dd->ipath_sendctrl_lock, flags);
490 ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl, 490 dd->ipath_sendctrl |= INFINIPATH_S_PIOINTBUFAVAIL;
491 dd->ipath_sendctrl); 491 ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl,
492 ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch); 492 dd->ipath_sendctrl);
493 spin_unlock_irqrestore(&dd->ipath_sendctrl_lock, flags); 493 ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
494 spin_unlock_irqrestore(&dd->ipath_sendctrl_lock, flags);
495 }
494} 496}
495 497
496/** 498/**
diff --git a/drivers/infiniband/hw/ipath/ipath_sdma.c b/drivers/infiniband/hw/ipath/ipath_sdma.c
index 5918cafb880b..1974df7a9f78 100644
--- a/drivers/infiniband/hw/ipath/ipath_sdma.c
+++ b/drivers/infiniband/hw/ipath/ipath_sdma.c
@@ -230,7 +230,6 @@ static void dump_sdma_state(struct ipath_devdata *dd)
230static void sdma_abort_task(unsigned long opaque) 230static void sdma_abort_task(unsigned long opaque)
231{ 231{
232 struct ipath_devdata *dd = (struct ipath_devdata *) opaque; 232 struct ipath_devdata *dd = (struct ipath_devdata *) opaque;
233 int kick = 0;
234 u64 status; 233 u64 status;
235 unsigned long flags; 234 unsigned long flags;
236 235
@@ -308,30 +307,26 @@ static void sdma_abort_task(unsigned long opaque)
308 /* done with sdma state for a bit */ 307 /* done with sdma state for a bit */
309 spin_unlock_irqrestore(&dd->ipath_sdma_lock, flags); 308 spin_unlock_irqrestore(&dd->ipath_sdma_lock, flags);
310 309
311 /* restart sdma engine */ 310 /*
311 * Don't restart sdma here. Wait until link is up to ACTIVE.
312 * VL15 MADs used to bring the link up use PIO, and multiple
313 * link transitions otherwise cause the sdma engine to be
314 * stopped and started multiple times.
315 * The disable is done here, including the shadow, so the
316 * state is kept consistent.
317 * See ipath_restart_sdma() for the actual starting of sdma.
318 */
312 spin_lock_irqsave(&dd->ipath_sendctrl_lock, flags); 319 spin_lock_irqsave(&dd->ipath_sendctrl_lock, flags);
313 dd->ipath_sendctrl &= ~INFINIPATH_S_SDMAENABLE; 320 dd->ipath_sendctrl &= ~INFINIPATH_S_SDMAENABLE;
314 ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl, 321 ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl,
315 dd->ipath_sendctrl); 322 dd->ipath_sendctrl);
316 ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch); 323 ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
317 dd->ipath_sendctrl |= INFINIPATH_S_SDMAENABLE;
318 ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl,
319 dd->ipath_sendctrl);
320 ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
321 spin_unlock_irqrestore(&dd->ipath_sendctrl_lock, flags); 324 spin_unlock_irqrestore(&dd->ipath_sendctrl_lock, flags);
322 kick = 1;
323 ipath_dbg("sdma restarted from abort\n");
324
325 /* now clear status bits */
326 spin_lock_irqsave(&dd->ipath_sdma_lock, flags);
327 __clear_bit(IPATH_SDMA_ABORTING, &dd->ipath_sdma_status);
328 __clear_bit(IPATH_SDMA_DISARMED, &dd->ipath_sdma_status);
329 __clear_bit(IPATH_SDMA_DISABLED, &dd->ipath_sdma_status);
330 325
331 /* make sure I see next message */ 326 /* make sure I see next message */
332 dd->ipath_sdma_abort_jiffies = 0; 327 dd->ipath_sdma_abort_jiffies = 0;
333 328
334 goto unlock; 329 goto done;
335 } 330 }
336 331
337resched: 332resched:
@@ -353,10 +348,8 @@ resched_noprint:
353 348
354unlock: 349unlock:
355 spin_unlock_irqrestore(&dd->ipath_sdma_lock, flags); 350 spin_unlock_irqrestore(&dd->ipath_sdma_lock, flags);
356 351done:
357 /* kick upper layers */ 352 return;
358 if (kick)
359 ipath_ib_piobufavail(dd->verbs_dev);
360} 353}
361 354
362/* 355/*
@@ -481,10 +474,14 @@ int setup_sdma(struct ipath_devdata *dd)
481 tasklet_init(&dd->ipath_sdma_abort_task, sdma_abort_task, 474 tasklet_init(&dd->ipath_sdma_abort_task, sdma_abort_task,
482 (unsigned long) dd); 475 (unsigned long) dd);
483 476
484 /* Turn on SDMA */ 477 /*
478 * No use to turn on SDMA here, as link is probably not ACTIVE
479 * Just mark it RUNNING and enable the interrupt, and let the
480 * ipath_restart_sdma() on link transition to ACTIVE actually
481 * enable it.
482 */
485 spin_lock_irqsave(&dd->ipath_sendctrl_lock, flags); 483 spin_lock_irqsave(&dd->ipath_sendctrl_lock, flags);
486 dd->ipath_sendctrl |= INFINIPATH_S_SDMAENABLE | 484 dd->ipath_sendctrl |= INFINIPATH_S_SDMAINTENABLE;
487 INFINIPATH_S_SDMAINTENABLE;
488 ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl, dd->ipath_sendctrl); 485 ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl, dd->ipath_sendctrl);
489 ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch); 486 ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
490 __set_bit(IPATH_SDMA_RUNNING, &dd->ipath_sdma_status); 487 __set_bit(IPATH_SDMA_RUNNING, &dd->ipath_sdma_status);
@@ -572,6 +569,56 @@ void teardown_sdma(struct ipath_devdata *dd)
572 sdma_descq, sdma_descq_phys); 569 sdma_descq, sdma_descq_phys);
573} 570}
574 571
572/*
573 * [Re]start SDMA, if we use it, and it's not already OK.
574 * This is called on transition to link ACTIVE, either the first or
575 * subsequent times.
576 */
577void ipath_restart_sdma(struct ipath_devdata *dd)
578{
579 unsigned long flags;
580 int needed = 1;
581
582 if (!(dd->ipath_flags & IPATH_HAS_SEND_DMA))
583 goto bail;
584
585 /*
586 * First, make sure we should, which is to say,
587 * check that we are "RUNNING" (not in teardown)
588 * and not "SHUTDOWN"
589 */
590 spin_lock_irqsave(&dd->ipath_sdma_lock, flags);
591 if (!test_bit(IPATH_SDMA_RUNNING, &dd->ipath_sdma_status)
592 || test_bit(IPATH_SDMA_SHUTDOWN, &dd->ipath_sdma_status))
593 needed = 0;
594 else {
595 __clear_bit(IPATH_SDMA_DISABLED, &dd->ipath_sdma_status);
596 __clear_bit(IPATH_SDMA_DISARMED, &dd->ipath_sdma_status);
597 __clear_bit(IPATH_SDMA_ABORTING, &dd->ipath_sdma_status);
598 }
599 spin_unlock_irqrestore(&dd->ipath_sdma_lock, flags);
600 if (!needed) {
601 ipath_dbg("invalid attempt to restart SDMA, status 0x%016llx\n",
602 dd->ipath_sdma_status);
603 goto bail;
604 }
605 spin_lock_irqsave(&dd->ipath_sendctrl_lock, flags);
606 /*
607 * First clear, just to be safe. Enable is only done
608 * in chip on 0->1 transition
609 */
610 dd->ipath_sendctrl &= ~INFINIPATH_S_SDMAENABLE;
611 ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl, dd->ipath_sendctrl);
612 ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
613 dd->ipath_sendctrl |= INFINIPATH_S_SDMAENABLE;
614 ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl, dd->ipath_sendctrl);
615 ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
616 spin_unlock_irqrestore(&dd->ipath_sendctrl_lock, flags);
617
618bail:
619 return;
620}
621
575static inline void make_sdma_desc(struct ipath_devdata *dd, 622static inline void make_sdma_desc(struct ipath_devdata *dd,
576 u64 *sdmadesc, u64 addr, u64 dwlen, u64 dwoffset) 623 u64 *sdmadesc, u64 addr, u64 dwlen, u64 dwoffset)
577{ 624{
diff --git a/drivers/infiniband/hw/ipath/ipath_stats.c b/drivers/infiniband/hw/ipath/ipath_stats.c
index adff2f10dc0e..1e36bac81498 100644
--- a/drivers/infiniband/hw/ipath/ipath_stats.c
+++ b/drivers/infiniband/hw/ipath/ipath_stats.c
@@ -292,8 +292,8 @@ void ipath_get_faststats(unsigned long opaque)
292 && time_after(jiffies, dd->ipath_unmasktime)) { 292 && time_after(jiffies, dd->ipath_unmasktime)) {
293 char ebuf[256]; 293 char ebuf[256];
294 int iserr; 294 int iserr;
295 iserr = ipath_decode_err(ebuf, sizeof ebuf, 295 iserr = ipath_decode_err(dd, ebuf, sizeof ebuf,
296 dd->ipath_maskederrs); 296 dd->ipath_maskederrs);
297 if (dd->ipath_maskederrs & 297 if (dd->ipath_maskederrs &
298 ~(INFINIPATH_E_RRCVEGRFULL | INFINIPATH_E_RRCVHDRFULL | 298 ~(INFINIPATH_E_RRCVEGRFULL | INFINIPATH_E_RRCVHDRFULL |
299 INFINIPATH_E_PKTERRS)) 299 INFINIPATH_E_PKTERRS))
diff --git a/drivers/infiniband/hw/ipath/ipath_ud.c b/drivers/infiniband/hw/ipath/ipath_ud.c
index de67eed08ed0..4d4d58d8e9d9 100644
--- a/drivers/infiniband/hw/ipath/ipath_ud.c
+++ b/drivers/infiniband/hw/ipath/ipath_ud.c
@@ -303,6 +303,7 @@ int ipath_make_ud_req(struct ipath_qp *qp)
303 qp->s_hdrwords = 7; 303 qp->s_hdrwords = 7;
304 qp->s_cur_size = wqe->length; 304 qp->s_cur_size = wqe->length;
305 qp->s_cur_sge = &qp->s_sge; 305 qp->s_cur_sge = &qp->s_sge;
306 qp->s_dmult = ah_attr->static_rate;
306 qp->s_wqe = wqe; 307 qp->s_wqe = wqe;
307 qp->s_sge.sge = wqe->sg_list[0]; 308 qp->s_sge.sge = wqe->sg_list[0];
308 qp->s_sge.sg_list = wqe->sg_list + 1; 309 qp->s_sge.sg_list = wqe->sg_list + 1;
diff --git a/drivers/infiniband/hw/ipath/ipath_verbs.c b/drivers/infiniband/hw/ipath/ipath_verbs.c
index 2e6b6f6265b6..75429aa92ce5 100644
--- a/drivers/infiniband/hw/ipath/ipath_verbs.c
+++ b/drivers/infiniband/hw/ipath/ipath_verbs.c
@@ -242,6 +242,93 @@ static void ipath_flush_wqe(struct ipath_qp *qp, struct ib_send_wr *wr)
242 ipath_cq_enter(to_icq(qp->ibqp.send_cq), &wc, 1); 242 ipath_cq_enter(to_icq(qp->ibqp.send_cq), &wc, 1);
243} 243}
244 244
245/*
246 * Count the number of DMA descriptors needed to send length bytes of data.
247 * Don't modify the ipath_sge_state to get the count.
248 * Return zero if any of the segments is not aligned.
249 */
250static u32 ipath_count_sge(struct ipath_sge_state *ss, u32 length)
251{
252 struct ipath_sge *sg_list = ss->sg_list;
253 struct ipath_sge sge = ss->sge;
254 u8 num_sge = ss->num_sge;
255 u32 ndesc = 1; /* count the header */
256
257 while (length) {
258 u32 len = sge.length;
259
260 if (len > length)
261 len = length;
262 if (len > sge.sge_length)
263 len = sge.sge_length;
264 BUG_ON(len == 0);
265 if (((long) sge.vaddr & (sizeof(u32) - 1)) ||
266 (len != length && (len & (sizeof(u32) - 1)))) {
267 ndesc = 0;
268 break;
269 }
270 ndesc++;
271 sge.vaddr += len;
272 sge.length -= len;
273 sge.sge_length -= len;
274 if (sge.sge_length == 0) {
275 if (--num_sge)
276 sge = *sg_list++;
277 } else if (sge.length == 0 && sge.mr != NULL) {
278 if (++sge.n >= IPATH_SEGSZ) {
279 if (++sge.m >= sge.mr->mapsz)
280 break;
281 sge.n = 0;
282 }
283 sge.vaddr =
284 sge.mr->map[sge.m]->segs[sge.n].vaddr;
285 sge.length =
286 sge.mr->map[sge.m]->segs[sge.n].length;
287 }
288 length -= len;
289 }
290 return ndesc;
291}
292
293/*
294 * Copy from the SGEs to the data buffer.
295 */
296static void ipath_copy_from_sge(void *data, struct ipath_sge_state *ss,
297 u32 length)
298{
299 struct ipath_sge *sge = &ss->sge;
300
301 while (length) {
302 u32 len = sge->length;
303
304 if (len > length)
305 len = length;
306 if (len > sge->sge_length)
307 len = sge->sge_length;
308 BUG_ON(len == 0);
309 memcpy(data, sge->vaddr, len);
310 sge->vaddr += len;
311 sge->length -= len;
312 sge->sge_length -= len;
313 if (sge->sge_length == 0) {
314 if (--ss->num_sge)
315 *sge = *ss->sg_list++;
316 } else if (sge->length == 0 && sge->mr != NULL) {
317 if (++sge->n >= IPATH_SEGSZ) {
318 if (++sge->m >= sge->mr->mapsz)
319 break;
320 sge->n = 0;
321 }
322 sge->vaddr =
323 sge->mr->map[sge->m]->segs[sge->n].vaddr;
324 sge->length =
325 sge->mr->map[sge->m]->segs[sge->n].length;
326 }
327 data += len;
328 length -= len;
329 }
330}
331
245/** 332/**
246 * ipath_post_one_send - post one RC, UC, or UD send work request 333 * ipath_post_one_send - post one RC, UC, or UD send work request
247 * @qp: the QP to post on 334 * @qp: the QP to post on
@@ -866,13 +953,231 @@ static void copy_io(u32 __iomem *piobuf, struct ipath_sge_state *ss,
866 __raw_writel(last, piobuf); 953 __raw_writel(last, piobuf);
867} 954}
868 955
869static int ipath_verbs_send_pio(struct ipath_qp *qp, u32 *hdr, u32 hdrwords, 956/*
957 * Convert IB rate to delay multiplier.
958 */
959unsigned ipath_ib_rate_to_mult(enum ib_rate rate)
960{
961 switch (rate) {
962 case IB_RATE_2_5_GBPS: return 8;
963 case IB_RATE_5_GBPS: return 4;
964 case IB_RATE_10_GBPS: return 2;
965 case IB_RATE_20_GBPS: return 1;
966 default: return 0;
967 }
968}
969
970/*
971 * Convert delay multiplier to IB rate
972 */
973static enum ib_rate ipath_mult_to_ib_rate(unsigned mult)
974{
975 switch (mult) {
976 case 8: return IB_RATE_2_5_GBPS;
977 case 4: return IB_RATE_5_GBPS;
978 case 2: return IB_RATE_10_GBPS;
979 case 1: return IB_RATE_20_GBPS;
980 default: return IB_RATE_PORT_CURRENT;
981 }
982}
983
984static inline struct ipath_verbs_txreq *get_txreq(struct ipath_ibdev *dev)
985{
986 struct ipath_verbs_txreq *tx = NULL;
987 unsigned long flags;
988
989 spin_lock_irqsave(&dev->pending_lock, flags);
990 if (!list_empty(&dev->txreq_free)) {
991 struct list_head *l = dev->txreq_free.next;
992
993 list_del(l);
994 tx = list_entry(l, struct ipath_verbs_txreq, txreq.list);
995 }
996 spin_unlock_irqrestore(&dev->pending_lock, flags);
997 return tx;
998}
999
1000static inline void put_txreq(struct ipath_ibdev *dev,
1001 struct ipath_verbs_txreq *tx)
1002{
1003 unsigned long flags;
1004
1005 spin_lock_irqsave(&dev->pending_lock, flags);
1006 list_add(&tx->txreq.list, &dev->txreq_free);
1007 spin_unlock_irqrestore(&dev->pending_lock, flags);
1008}
1009
1010static void sdma_complete(void *cookie, int status)
1011{
1012 struct ipath_verbs_txreq *tx = cookie;
1013 struct ipath_qp *qp = tx->qp;
1014 struct ipath_ibdev *dev = to_idev(qp->ibqp.device);
1015
1016 /* Generate a completion queue entry if needed */
1017 if (qp->ibqp.qp_type != IB_QPT_RC && tx->wqe) {
1018 enum ib_wc_status ibs = status == IPATH_SDMA_TXREQ_S_OK ?
1019 IB_WC_SUCCESS : IB_WC_WR_FLUSH_ERR;
1020
1021 ipath_send_complete(qp, tx->wqe, ibs);
1022 }
1023
1024 if (tx->txreq.flags & IPATH_SDMA_TXREQ_F_FREEBUF)
1025 kfree(tx->txreq.map_addr);
1026 put_txreq(dev, tx);
1027
1028 if (atomic_dec_and_test(&qp->refcount))
1029 wake_up(&qp->wait);
1030}
1031
1032/*
1033 * Compute the number of clock cycles of delay before sending the next packet.
1034 * The multipliers reflect the number of clocks for the fastest rate so
1035 * one tick at 4xDDR is 8 ticks at 1xSDR.
1036 * If the destination port will take longer to receive a packet than
1037 * the outgoing link can send it, we need to delay sending the next packet
1038 * by the difference in time it takes the receiver to receive and the sender
1039 * to send this packet.
1040 * Note that this delay is always correct for UC and RC but not always
1041 * optimal for UD. For UD, the destination HCA can be different for each
1042 * packet, in which case, we could send packets to a different destination
1043 * while "waiting" for the delay. The overhead for doing this without
1044 * HW support is more than just paying the cost of delaying some packets
1045 * unnecessarily.
1046 */
1047static inline unsigned ipath_pkt_delay(u32 plen, u8 snd_mult, u8 rcv_mult)
1048{
1049 return (rcv_mult > snd_mult) ?
1050 (plen * (rcv_mult - snd_mult) + 1) >> 1 : 0;
1051}
1052
1053static int ipath_verbs_send_dma(struct ipath_qp *qp,
1054 struct ipath_ib_header *hdr, u32 hdrwords,
1055 struct ipath_sge_state *ss, u32 len,
1056 u32 plen, u32 dwords)
1057{
1058 struct ipath_ibdev *dev = to_idev(qp->ibqp.device);
1059 struct ipath_devdata *dd = dev->dd;
1060 struct ipath_verbs_txreq *tx;
1061 u32 *piobuf;
1062 u32 control;
1063 u32 ndesc;
1064 int ret;
1065
1066 tx = qp->s_tx;
1067 if (tx) {
1068 qp->s_tx = NULL;
1069 /* resend previously constructed packet */
1070 ret = ipath_sdma_verbs_send(dd, tx->ss, tx->len, tx);
1071 if (ret)
1072 qp->s_tx = tx;
1073 goto bail;
1074 }
1075
1076 tx = get_txreq(dev);
1077 if (!tx) {
1078 ret = -EBUSY;
1079 goto bail;
1080 }
1081
1082 /*
1083 * Get the saved delay count we computed for the previous packet
1084 * and save the delay count for this packet to be used next time
1085 * we get here.
1086 */
1087 control = qp->s_pkt_delay;
1088 qp->s_pkt_delay = ipath_pkt_delay(plen, dd->delay_mult, qp->s_dmult);
1089
1090 tx->qp = qp;
1091 atomic_inc(&qp->refcount);
1092 tx->wqe = qp->s_wqe;
1093 tx->txreq.callback = sdma_complete;
1094 tx->txreq.callback_cookie = tx;
1095 tx->txreq.flags = IPATH_SDMA_TXREQ_F_HEADTOHOST |
1096 IPATH_SDMA_TXREQ_F_INTREQ | IPATH_SDMA_TXREQ_F_FREEDESC;
1097 if (plen + 1 >= IPATH_SMALLBUF_DWORDS)
1098 tx->txreq.flags |= IPATH_SDMA_TXREQ_F_USELARGEBUF;
1099
1100 /* VL15 packets bypass credit check */
1101 if ((be16_to_cpu(hdr->lrh[0]) >> 12) == 15) {
1102 control |= 1ULL << 31;
1103 tx->txreq.flags |= IPATH_SDMA_TXREQ_F_VL15;
1104 }
1105
1106 if (len) {
1107 /*
1108 * Don't try to DMA if it takes more descriptors than
1109 * the queue holds.
1110 */
1111 ndesc = ipath_count_sge(ss, len);
1112 if (ndesc >= dd->ipath_sdma_descq_cnt)
1113 ndesc = 0;
1114 } else
1115 ndesc = 1;
1116 if (ndesc) {
1117 tx->hdr.pbc[0] = cpu_to_le32(plen);
1118 tx->hdr.pbc[1] = cpu_to_le32(control);
1119 memcpy(&tx->hdr.hdr, hdr, hdrwords << 2);
1120 tx->txreq.sg_count = ndesc;
1121 tx->map_len = (hdrwords + 2) << 2;
1122 tx->txreq.map_addr = &tx->hdr;
1123 ret = ipath_sdma_verbs_send(dd, ss, dwords, tx);
1124 if (ret) {
1125 /* save ss and length in dwords */
1126 tx->ss = ss;
1127 tx->len = dwords;
1128 qp->s_tx = tx;
1129 }
1130 goto bail;
1131 }
1132
1133 /* Allocate a buffer and copy the header and payload to it. */
1134 tx->map_len = (plen + 1) << 2;
1135 piobuf = kmalloc(tx->map_len, GFP_ATOMIC);
1136 if (unlikely(piobuf == NULL)) {
1137 ret = -EBUSY;
1138 goto err_tx;
1139 }
1140 tx->txreq.map_addr = piobuf;
1141 tx->txreq.flags |= IPATH_SDMA_TXREQ_F_FREEBUF;
1142 tx->txreq.sg_count = 1;
1143
1144 *piobuf++ = (__force u32) cpu_to_le32(plen);
1145 *piobuf++ = (__force u32) cpu_to_le32(control);
1146 memcpy(piobuf, hdr, hdrwords << 2);
1147 ipath_copy_from_sge(piobuf + hdrwords, ss, len);
1148
1149 ret = ipath_sdma_verbs_send(dd, NULL, 0, tx);
1150 /*
1151 * If we couldn't queue the DMA request, save the info
1152 * and try again later rather than destroying the
1153 * buffer and undoing the side effects of the copy.
1154 */
1155 if (ret) {
1156 tx->ss = NULL;
1157 tx->len = 0;
1158 qp->s_tx = tx;
1159 }
1160 dev->n_unaligned++;
1161 goto bail;
1162
1163err_tx:
1164 if (atomic_dec_and_test(&qp->refcount))
1165 wake_up(&qp->wait);
1166 put_txreq(dev, tx);
1167bail:
1168 return ret;
1169}
1170
1171static int ipath_verbs_send_pio(struct ipath_qp *qp,
1172 struct ipath_ib_header *ibhdr, u32 hdrwords,
870 struct ipath_sge_state *ss, u32 len, 1173 struct ipath_sge_state *ss, u32 len,
871 u32 plen, u32 dwords) 1174 u32 plen, u32 dwords)
872{ 1175{
873 struct ipath_devdata *dd = to_idev(qp->ibqp.device)->dd; 1176 struct ipath_devdata *dd = to_idev(qp->ibqp.device)->dd;
1177 u32 *hdr = (u32 *) ibhdr;
874 u32 __iomem *piobuf; 1178 u32 __iomem *piobuf;
875 unsigned flush_wc; 1179 unsigned flush_wc;
1180 u32 control;
876 int ret; 1181 int ret;
877 1182
878 piobuf = ipath_getpiobuf(dd, plen, NULL); 1183 piobuf = ipath_getpiobuf(dd, plen, NULL);
@@ -882,11 +1187,23 @@ static int ipath_verbs_send_pio(struct ipath_qp *qp, u32 *hdr, u32 hdrwords,
882 } 1187 }
883 1188
884 /* 1189 /*
885 * Write len to control qword, no flags. 1190 * Get the saved delay count we computed for the previous packet
1191 * and save the delay count for this packet to be used next time
1192 * we get here.
1193 */
1194 control = qp->s_pkt_delay;
1195 qp->s_pkt_delay = ipath_pkt_delay(plen, dd->delay_mult, qp->s_dmult);
1196
1197 /* VL15 packets bypass credit check */
1198 if ((be16_to_cpu(ibhdr->lrh[0]) >> 12) == 15)
1199 control |= 1ULL << 31;
1200
1201 /*
1202 * Write the length to the control qword plus any needed flags.
886 * We have to flush after the PBC for correctness on some cpus 1203 * We have to flush after the PBC for correctness on some cpus
887 * or WC buffer can be written out of order. 1204 * or WC buffer can be written out of order.
888 */ 1205 */
889 writeq(plen, piobuf); 1206 writeq(((u64) control << 32) | plen, piobuf);
890 piobuf += 2; 1207 piobuf += 2;
891 1208
892 flush_wc = dd->ipath_flags & IPATH_PIO_FLUSH_WC; 1209 flush_wc = dd->ipath_flags & IPATH_PIO_FLUSH_WC;
@@ -961,15 +1278,25 @@ int ipath_verbs_send(struct ipath_qp *qp, struct ipath_ib_header *hdr,
961 */ 1278 */
962 plen = hdrwords + dwords + 1; 1279 plen = hdrwords + dwords + 1;
963 1280
964 /* Drop non-VL15 packets if we are not in the active state */ 1281 /*
965 if (!(dd->ipath_flags & IPATH_LINKACTIVE) && 1282 * VL15 packets (IB_QPT_SMI) will always use PIO, so we
966 qp->ibqp.qp_type != IB_QPT_SMI) { 1283 * can defer SDMA restart until link goes ACTIVE without
1284 * worrying about just how we got there.
1285 */
1286 if (qp->ibqp.qp_type == IB_QPT_SMI)
1287 ret = ipath_verbs_send_pio(qp, hdr, hdrwords, ss, len,
1288 plen, dwords);
1289 /* All non-VL15 packets are dropped if link is not ACTIVE */
1290 else if (!(dd->ipath_flags & IPATH_LINKACTIVE)) {
967 if (qp->s_wqe) 1291 if (qp->s_wqe)
968 ipath_send_complete(qp, qp->s_wqe, IB_WC_SUCCESS); 1292 ipath_send_complete(qp, qp->s_wqe, IB_WC_SUCCESS);
969 ret = 0; 1293 ret = 0;
970 } else 1294 } else if (dd->ipath_flags & IPATH_HAS_SEND_DMA)
971 ret = ipath_verbs_send_pio(qp, (u32 *) hdr, hdrwords, 1295 ret = ipath_verbs_send_dma(qp, hdr, hdrwords, ss, len,
972 ss, len, plen, dwords); 1296 plen, dwords);
1297 else
1298 ret = ipath_verbs_send_pio(qp, hdr, hdrwords, ss, len,
1299 plen, dwords);
973 1300
974 return ret; 1301 return ret;
975} 1302}
@@ -1038,6 +1365,12 @@ int ipath_get_counters(struct ipath_devdata *dd,
1038 ipath_snap_cntr(dd, crp->cr_errlpcrccnt) + 1365 ipath_snap_cntr(dd, crp->cr_errlpcrccnt) +
1039 ipath_snap_cntr(dd, crp->cr_badformatcnt) + 1366 ipath_snap_cntr(dd, crp->cr_badformatcnt) +
1040 dd->ipath_rxfc_unsupvl_errs; 1367 dd->ipath_rxfc_unsupvl_errs;
1368 if (crp->cr_rxotherlocalphyerrcnt)
1369 cntrs->port_rcv_errors +=
1370 ipath_snap_cntr(dd, crp->cr_rxotherlocalphyerrcnt);
1371 if (crp->cr_rxvlerrcnt)
1372 cntrs->port_rcv_errors +=
1373 ipath_snap_cntr(dd, crp->cr_rxvlerrcnt);
1041 cntrs->port_rcv_remphys_errors = 1374 cntrs->port_rcv_remphys_errors =
1042 ipath_snap_cntr(dd, crp->cr_rcvebpcnt); 1375 ipath_snap_cntr(dd, crp->cr_rcvebpcnt);
1043 cntrs->port_xmit_discards = ipath_snap_cntr(dd, crp->cr_unsupvlcnt); 1376 cntrs->port_xmit_discards = ipath_snap_cntr(dd, crp->cr_unsupvlcnt);
@@ -1046,9 +1379,16 @@ int ipath_get_counters(struct ipath_devdata *dd,
1046 cntrs->port_xmit_packets = ipath_snap_cntr(dd, crp->cr_pktsendcnt); 1379 cntrs->port_xmit_packets = ipath_snap_cntr(dd, crp->cr_pktsendcnt);
1047 cntrs->port_rcv_packets = ipath_snap_cntr(dd, crp->cr_pktrcvcnt); 1380 cntrs->port_rcv_packets = ipath_snap_cntr(dd, crp->cr_pktrcvcnt);
1048 cntrs->local_link_integrity_errors = 1381 cntrs->local_link_integrity_errors =
1049 (dd->ipath_flags & IPATH_GPIO_ERRINTRS) ? 1382 crp->cr_locallinkintegrityerrcnt ?
1050 dd->ipath_lli_errs : dd->ipath_lli_errors; 1383 ipath_snap_cntr(dd, crp->cr_locallinkintegrityerrcnt) :
1051 cntrs->excessive_buffer_overrun_errors = dd->ipath_overrun_thresh_errs; 1384 ((dd->ipath_flags & IPATH_GPIO_ERRINTRS) ?
1385 dd->ipath_lli_errs : dd->ipath_lli_errors);
1386 cntrs->excessive_buffer_overrun_errors =
1387 crp->cr_excessbufferovflcnt ?
1388 ipath_snap_cntr(dd, crp->cr_excessbufferovflcnt) :
1389 dd->ipath_overrun_thresh_errs;
1390 cntrs->vl15_dropped = crp->cr_vl15droppedpktcnt ?
1391 ipath_snap_cntr(dd, crp->cr_vl15droppedpktcnt) : 0;
1052 1392
1053 ret = 0; 1393 ret = 0;
1054 1394
@@ -1396,6 +1736,7 @@ static struct ib_ah *ipath_create_ah(struct ib_pd *pd,
1396 1736
1397 /* ib_create_ah() will initialize ah->ibah. */ 1737 /* ib_create_ah() will initialize ah->ibah. */
1398 ah->attr = *ah_attr; 1738 ah->attr = *ah_attr;
1739 ah->attr.static_rate = ipath_ib_rate_to_mult(ah_attr->static_rate);
1399 1740
1400 ret = &ah->ibah; 1741 ret = &ah->ibah;
1401 1742
@@ -1429,6 +1770,7 @@ static int ipath_query_ah(struct ib_ah *ibah, struct ib_ah_attr *ah_attr)
1429 struct ipath_ah *ah = to_iah(ibah); 1770 struct ipath_ah *ah = to_iah(ibah);
1430 1771
1431 *ah_attr = ah->attr; 1772 *ah_attr = ah->attr;
1773 ah_attr->static_rate = ipath_mult_to_ib_rate(ah->attr.static_rate);
1432 1774
1433 return 0; 1775 return 0;
1434} 1776}
@@ -1578,6 +1920,8 @@ int ipath_register_ib_device(struct ipath_devdata *dd)
1578 struct ipath_verbs_counters cntrs; 1920 struct ipath_verbs_counters cntrs;
1579 struct ipath_ibdev *idev; 1921 struct ipath_ibdev *idev;
1580 struct ib_device *dev; 1922 struct ib_device *dev;
1923 struct ipath_verbs_txreq *tx;
1924 unsigned i;
1581 int ret; 1925 int ret;
1582 1926
1583 idev = (struct ipath_ibdev *)ib_alloc_device(sizeof *idev); 1927 idev = (struct ipath_ibdev *)ib_alloc_device(sizeof *idev);
@@ -1588,6 +1932,17 @@ int ipath_register_ib_device(struct ipath_devdata *dd)
1588 1932
1589 dev = &idev->ibdev; 1933 dev = &idev->ibdev;
1590 1934
1935 if (dd->ipath_sdma_descq_cnt) {
1936 tx = kmalloc(dd->ipath_sdma_descq_cnt * sizeof *tx,
1937 GFP_KERNEL);
1938 if (tx == NULL) {
1939 ret = -ENOMEM;
1940 goto err_tx;
1941 }
1942 } else
1943 tx = NULL;
1944 idev->txreq_bufs = tx;
1945
1591 /* Only need to initialize non-zero fields. */ 1946 /* Only need to initialize non-zero fields. */
1592 spin_lock_init(&idev->n_pds_lock); 1947 spin_lock_init(&idev->n_pds_lock);
1593 spin_lock_init(&idev->n_ahs_lock); 1948 spin_lock_init(&idev->n_ahs_lock);
@@ -1628,6 +1983,7 @@ int ipath_register_ib_device(struct ipath_devdata *dd)
1628 INIT_LIST_HEAD(&idev->pending[2]); 1983 INIT_LIST_HEAD(&idev->pending[2]);
1629 INIT_LIST_HEAD(&idev->piowait); 1984 INIT_LIST_HEAD(&idev->piowait);
1630 INIT_LIST_HEAD(&idev->rnrwait); 1985 INIT_LIST_HEAD(&idev->rnrwait);
1986 INIT_LIST_HEAD(&idev->txreq_free);
1631 idev->pending_index = 0; 1987 idev->pending_index = 0;
1632 idev->port_cap_flags = 1988 idev->port_cap_flags =
1633 IB_PORT_SYS_IMAGE_GUID_SUP | IB_PORT_CLIENT_REG_SUP; 1989 IB_PORT_SYS_IMAGE_GUID_SUP | IB_PORT_CLIENT_REG_SUP;
@@ -1659,6 +2015,9 @@ int ipath_register_ib_device(struct ipath_devdata *dd)
1659 cntrs.excessive_buffer_overrun_errors; 2015 cntrs.excessive_buffer_overrun_errors;
1660 idev->z_vl15_dropped = cntrs.vl15_dropped; 2016 idev->z_vl15_dropped = cntrs.vl15_dropped;
1661 2017
2018 for (i = 0; i < dd->ipath_sdma_descq_cnt; i++, tx++)
2019 list_add(&tx->txreq.list, &idev->txreq_free);
2020
1662 /* 2021 /*
1663 * The system image GUID is supposed to be the same for all 2022 * The system image GUID is supposed to be the same for all
1664 * IB HCAs in a single system but since there can be other 2023 * IB HCAs in a single system but since there can be other
@@ -1708,6 +2067,7 @@ int ipath_register_ib_device(struct ipath_devdata *dd)
1708 dev->phys_port_cnt = 1; 2067 dev->phys_port_cnt = 1;
1709 dev->num_comp_vectors = 1; 2068 dev->num_comp_vectors = 1;
1710 dev->dma_device = &dd->pcidev->dev; 2069 dev->dma_device = &dd->pcidev->dev;
2070 dev->class_dev.dev = dev->dma_device;
1711 dev->query_device = ipath_query_device; 2071 dev->query_device = ipath_query_device;
1712 dev->modify_device = ipath_modify_device; 2072 dev->modify_device = ipath_modify_device;
1713 dev->query_port = ipath_query_port; 2073 dev->query_port = ipath_query_port;
@@ -1772,6 +2132,8 @@ err_reg:
1772err_lk: 2132err_lk:
1773 kfree(idev->qp_table.table); 2133 kfree(idev->qp_table.table);
1774err_qp: 2134err_qp:
2135 kfree(idev->txreq_bufs);
2136err_tx:
1775 ib_dealloc_device(dev); 2137 ib_dealloc_device(dev);
1776 ipath_dev_err(dd, "cannot register verbs: %d!\n", -ret); 2138 ipath_dev_err(dd, "cannot register verbs: %d!\n", -ret);
1777 idev = NULL; 2139 idev = NULL;
@@ -1806,6 +2168,7 @@ void ipath_unregister_ib_device(struct ipath_ibdev *dev)
1806 ipath_free_all_qps(&dev->qp_table); 2168 ipath_free_all_qps(&dev->qp_table);
1807 kfree(dev->qp_table.table); 2169 kfree(dev->qp_table.table);
1808 kfree(dev->lk_table.table); 2170 kfree(dev->lk_table.table);
2171 kfree(dev->txreq_bufs);
1809 ib_dealloc_device(ibdev); 2172 ib_dealloc_device(ibdev);
1810} 2173}
1811 2174
@@ -1853,13 +2216,15 @@ static ssize_t show_stats(struct class_device *cdev, char *buf)
1853 "RC stalls %d\n" 2216 "RC stalls %d\n"
1854 "piobuf wait %d\n" 2217 "piobuf wait %d\n"
1855 "no piobuf %d\n" 2218 "no piobuf %d\n"
2219 "unaligned %d\n"
1856 "PKT drops %d\n" 2220 "PKT drops %d\n"
1857 "WQE errs %d\n", 2221 "WQE errs %d\n",
1858 dev->n_rc_resends, dev->n_rc_qacks, dev->n_rc_acks, 2222 dev->n_rc_resends, dev->n_rc_qacks, dev->n_rc_acks,
1859 dev->n_seq_naks, dev->n_rdma_seq, dev->n_rnr_naks, 2223 dev->n_seq_naks, dev->n_rdma_seq, dev->n_rnr_naks,
1860 dev->n_other_naks, dev->n_timeouts, 2224 dev->n_other_naks, dev->n_timeouts,
1861 dev->n_rdma_dup_busy, dev->n_rc_stalls, dev->n_piowait, 2225 dev->n_rdma_dup_busy, dev->n_rc_stalls, dev->n_piowait,
1862 dev->n_no_piobuf, dev->n_pkt_drops, dev->n_wqe_errs); 2226 dev->n_no_piobuf, dev->n_unaligned,
2227 dev->n_pkt_drops, dev->n_wqe_errs);
1863 for (i = 0; i < ARRAY_SIZE(dev->opstats); i++) { 2228 for (i = 0; i < ARRAY_SIZE(dev->opstats); i++) {
1864 const struct ipath_opcode_stats *si = &dev->opstats[i]; 2229 const struct ipath_opcode_stats *si = &dev->opstats[i];
1865 2230