diff options
Diffstat (limited to 'drivers/infiniband/hw')
30 files changed, 475 insertions, 402 deletions
diff --git a/drivers/infiniband/hw/ipath/ipath_debug.h b/drivers/infiniband/hw/ipath/ipath_debug.h index 593e28969c69..46762387f5f8 100644 --- a/drivers/infiniband/hw/ipath/ipath_debug.h +++ b/drivers/infiniband/hw/ipath/ipath_debug.h | |||
@@ -60,11 +60,11 @@ | |||
60 | #define __IPATH_KERNEL_SEND 0x2000 /* use kernel mode send */ | 60 | #define __IPATH_KERNEL_SEND 0x2000 /* use kernel mode send */ |
61 | #define __IPATH_EPKTDBG 0x4000 /* print ethernet packet data */ | 61 | #define __IPATH_EPKTDBG 0x4000 /* print ethernet packet data */ |
62 | #define __IPATH_SMADBG 0x8000 /* sma packet debug */ | 62 | #define __IPATH_SMADBG 0x8000 /* sma packet debug */ |
63 | #define __IPATH_IPATHDBG 0x10000 /* Ethernet (IPATH) general debug on */ | 63 | #define __IPATH_IPATHDBG 0x10000 /* Ethernet (IPATH) gen debug */ |
64 | #define __IPATH_IPATHWARN 0x20000 /* Ethernet (IPATH) warnings on */ | 64 | #define __IPATH_IPATHWARN 0x20000 /* Ethernet (IPATH) warnings */ |
65 | #define __IPATH_IPATHERR 0x40000 /* Ethernet (IPATH) errors on */ | 65 | #define __IPATH_IPATHERR 0x40000 /* Ethernet (IPATH) errors */ |
66 | #define __IPATH_IPATHPD 0x80000 /* Ethernet (IPATH) packet dump on */ | 66 | #define __IPATH_IPATHPD 0x80000 /* Ethernet (IPATH) packet dump */ |
67 | #define __IPATH_IPATHTABLE 0x100000 /* Ethernet (IPATH) table dump on */ | 67 | #define __IPATH_IPATHTABLE 0x100000 /* Ethernet (IPATH) table dump */ |
68 | 68 | ||
69 | #else /* _IPATH_DEBUGGING */ | 69 | #else /* _IPATH_DEBUGGING */ |
70 | 70 | ||
@@ -79,11 +79,12 @@ | |||
79 | #define __IPATH_TRSAMPLE 0x0 /* generate trace buffer sample entries */ | 79 | #define __IPATH_TRSAMPLE 0x0 /* generate trace buffer sample entries */ |
80 | #define __IPATH_VERBDBG 0x0 /* very verbose debug */ | 80 | #define __IPATH_VERBDBG 0x0 /* very verbose debug */ |
81 | #define __IPATH_PKTDBG 0x0 /* print packet data */ | 81 | #define __IPATH_PKTDBG 0x0 /* print packet data */ |
82 | #define __IPATH_PROCDBG 0x0 /* print process startup (init)/exit messages */ | 82 | #define __IPATH_PROCDBG 0x0 /* process startup (init)/exit messages */ |
83 | /* print mmap/nopage stuff, not using VDBG any more */ | 83 | /* print mmap/nopage stuff, not using VDBG any more */ |
84 | #define __IPATH_MMDBG 0x0 | 84 | #define __IPATH_MMDBG 0x0 |
85 | #define __IPATH_EPKTDBG 0x0 /* print ethernet packet data */ | 85 | #define __IPATH_EPKTDBG 0x0 /* print ethernet packet data */ |
86 | #define __IPATH_SMADBG 0x0 /* print process startup (init)/exit messages */#define __IPATH_IPATHDBG 0x0 /* Ethernet (IPATH) table dump on */ | 86 | #define __IPATH_SMADBG 0x0 /* process startup (init)/exit messages */ |
87 | #define __IPATH_IPATHDBG 0x0 /* Ethernet (IPATH) table dump on */ | ||
87 | #define __IPATH_IPATHWARN 0x0 /* Ethernet (IPATH) warnings on */ | 88 | #define __IPATH_IPATHWARN 0x0 /* Ethernet (IPATH) warnings on */ |
88 | #define __IPATH_IPATHERR 0x0 /* Ethernet (IPATH) errors on */ | 89 | #define __IPATH_IPATHERR 0x0 /* Ethernet (IPATH) errors on */ |
89 | #define __IPATH_IPATHPD 0x0 /* Ethernet (IPATH) packet dump on */ | 90 | #define __IPATH_IPATHPD 0x0 /* Ethernet (IPATH) packet dump on */ |
diff --git a/drivers/infiniband/hw/ipath/ipath_diag.c b/drivers/infiniband/hw/ipath/ipath_diag.c index cd533cf951c2..28ddceb260e8 100644 --- a/drivers/infiniband/hw/ipath/ipath_diag.c +++ b/drivers/infiniband/hw/ipath/ipath_diag.c | |||
@@ -277,13 +277,14 @@ static int ipath_diag_open(struct inode *in, struct file *fp) | |||
277 | 277 | ||
278 | bail: | 278 | bail: |
279 | spin_unlock_irqrestore(&ipath_devs_lock, flags); | 279 | spin_unlock_irqrestore(&ipath_devs_lock, flags); |
280 | mutex_unlock(&ipath_mutex); | ||
281 | 280 | ||
282 | /* Only expose a way to reset the device if we | 281 | /* Only expose a way to reset the device if we |
283 | make it into diag mode. */ | 282 | make it into diag mode. */ |
284 | if (ret == 0) | 283 | if (ret == 0) |
285 | ipath_expose_reset(&dd->pcidev->dev); | 284 | ipath_expose_reset(&dd->pcidev->dev); |
286 | 285 | ||
286 | mutex_unlock(&ipath_mutex); | ||
287 | |||
287 | return ret; | 288 | return ret; |
288 | } | 289 | } |
289 | 290 | ||
@@ -365,15 +366,3 @@ static ssize_t ipath_diag_write(struct file *fp, const char __user *data, | |||
365 | bail: | 366 | bail: |
366 | return ret; | 367 | return ret; |
367 | } | 368 | } |
368 | |||
369 | void ipath_diag_bringup_link(struct ipath_devdata *dd) | ||
370 | { | ||
371 | if (diag_set_link || (dd->ipath_flags & IPATH_LINKACTIVE)) | ||
372 | return; | ||
373 | |||
374 | diag_set_link = 1; | ||
375 | ipath_cdbg(VERBOSE, "Trying to set to set link active for " | ||
376 | "diag pkt\n"); | ||
377 | ipath_layer_set_linkstate(dd, IPATH_IB_LINKARM); | ||
378 | ipath_layer_set_linkstate(dd, IPATH_IB_LINKACTIVE); | ||
379 | } | ||
diff --git a/drivers/infiniband/hw/ipath/ipath_driver.c b/drivers/infiniband/hw/ipath/ipath_driver.c index 58a94efb0070..dddcdae736ac 100644 --- a/drivers/infiniband/hw/ipath/ipath_driver.c +++ b/drivers/infiniband/hw/ipath/ipath_driver.c | |||
@@ -116,10 +116,9 @@ static int __devinit ipath_init_one(struct pci_dev *, | |||
116 | #define PCI_DEVICE_ID_INFINIPATH_PE800 0x10 | 116 | #define PCI_DEVICE_ID_INFINIPATH_PE800 0x10 |
117 | 117 | ||
118 | static const struct pci_device_id ipath_pci_tbl[] = { | 118 | static const struct pci_device_id ipath_pci_tbl[] = { |
119 | {PCI_DEVICE(PCI_VENDOR_ID_PATHSCALE, | 119 | { PCI_DEVICE(PCI_VENDOR_ID_PATHSCALE, PCI_DEVICE_ID_INFINIPATH_HT) }, |
120 | PCI_DEVICE_ID_INFINIPATH_HT)}, | 120 | { PCI_DEVICE(PCI_VENDOR_ID_PATHSCALE, PCI_DEVICE_ID_INFINIPATH_PE800) }, |
121 | {PCI_DEVICE(PCI_VENDOR_ID_PATHSCALE, | 121 | { 0, } |
122 | PCI_DEVICE_ID_INFINIPATH_PE800)}, | ||
123 | }; | 122 | }; |
124 | 123 | ||
125 | MODULE_DEVICE_TABLE(pci, ipath_pci_tbl); | 124 | MODULE_DEVICE_TABLE(pci, ipath_pci_tbl); |
@@ -418,9 +417,19 @@ static int __devinit ipath_init_one(struct pci_dev *pdev, | |||
418 | 417 | ||
419 | ret = pci_set_dma_mask(pdev, DMA_64BIT_MASK); | 418 | ret = pci_set_dma_mask(pdev, DMA_64BIT_MASK); |
420 | if (ret) { | 419 | if (ret) { |
421 | dev_info(&pdev->dev, "pci_set_dma_mask unit %u " | 420 | /* |
422 | "fails: %d\n", dd->ipath_unit, ret); | 421 | * if the 64 bit setup fails, try 32 bit. Some systems |
423 | goto bail_regions; | 422 | * do not setup 64 bit maps on systems with 2GB or less |
423 | * memory installed. | ||
424 | */ | ||
425 | ret = pci_set_dma_mask(pdev, DMA_32BIT_MASK); | ||
426 | if (ret) { | ||
427 | dev_info(&pdev->dev, "pci_set_dma_mask unit %u " | ||
428 | "fails: %d\n", dd->ipath_unit, ret); | ||
429 | goto bail_regions; | ||
430 | } | ||
431 | else | ||
432 | ipath_dbg("No 64bit DMA mask, used 32 bit mask\n"); | ||
424 | } | 433 | } |
425 | 434 | ||
426 | pci_set_master(pdev); | 435 | pci_set_master(pdev); |
@@ -1729,7 +1738,7 @@ void ipath_free_pddata(struct ipath_devdata *dd, u32 port, int freehdrq) | |||
1729 | } | 1738 | } |
1730 | } | 1739 | } |
1731 | 1740 | ||
1732 | int __init infinipath_init(void) | 1741 | static int __init infinipath_init(void) |
1733 | { | 1742 | { |
1734 | int ret; | 1743 | int ret; |
1735 | 1744 | ||
@@ -1896,19 +1905,19 @@ static void __exit infinipath_cleanup(void) | |||
1896 | } else | 1905 | } else |
1897 | ipath_dbg("irq is 0, not doing free_irq " | 1906 | ipath_dbg("irq is 0, not doing free_irq " |
1898 | "for unit %u\n", dd->ipath_unit); | 1907 | "for unit %u\n", dd->ipath_unit); |
1899 | dd->pcidev = NULL; | ||
1900 | } | ||
1901 | 1908 | ||
1902 | /* | 1909 | /* |
1903 | * we check for NULL here, because it's outside the kregbase | 1910 | * we check for NULL here, because it's outside |
1904 | * check, and we need to call it after the free_irq. Thus | 1911 | * the kregbase check, and we need to call it |
1905 | * it's possible that the function pointers were never | 1912 | * after the free_irq. Thus it's possible that |
1906 | * initialized. | 1913 | * the function pointers were never initialized. |
1907 | */ | 1914 | */ |
1908 | if (dd->ipath_f_cleanup) | 1915 | if (dd->ipath_f_cleanup) |
1909 | /* clean up chip-specific stuff */ | 1916 | /* clean up chip-specific stuff */ |
1910 | dd->ipath_f_cleanup(dd); | 1917 | dd->ipath_f_cleanup(dd); |
1911 | 1918 | ||
1919 | dd->pcidev = NULL; | ||
1920 | } | ||
1912 | spin_lock_irqsave(&ipath_devs_lock, flags); | 1921 | spin_lock_irqsave(&ipath_devs_lock, flags); |
1913 | } | 1922 | } |
1914 | 1923 | ||
@@ -1949,7 +1958,7 @@ int ipath_reset_device(int unit) | |||
1949 | } | 1958 | } |
1950 | 1959 | ||
1951 | if (dd->ipath_pd) | 1960 | if (dd->ipath_pd) |
1952 | for (i = 1; i < dd->ipath_portcnt; i++) { | 1961 | for (i = 1; i < dd->ipath_cfgports; i++) { |
1953 | if (dd->ipath_pd[i] && dd->ipath_pd[i]->port_cnt) { | 1962 | if (dd->ipath_pd[i] && dd->ipath_pd[i]->port_cnt) { |
1954 | ipath_dbg("unit %u port %d is in use " | 1963 | ipath_dbg("unit %u port %d is in use " |
1955 | "(PID %u cmd %s), can't reset\n", | 1964 | "(PID %u cmd %s), can't reset\n", |
diff --git a/drivers/infiniband/hw/ipath/ipath_eeprom.c b/drivers/infiniband/hw/ipath/ipath_eeprom.c index f11a900e8cd7..a2f1ceafcca9 100644 --- a/drivers/infiniband/hw/ipath/ipath_eeprom.c +++ b/drivers/infiniband/hw/ipath/ipath_eeprom.c | |||
@@ -505,11 +505,10 @@ static u8 flash_csum(struct ipath_flash *ifp, int adjust) | |||
505 | * ipath_get_guid - get the GUID from the i2c device | 505 | * ipath_get_guid - get the GUID from the i2c device |
506 | * @dd: the infinipath device | 506 | * @dd: the infinipath device |
507 | * | 507 | * |
508 | * When we add the multi-chip support, we will probably have to add | 508 | * We have the capability to use the ipath_nguid field, and get |
509 | * the ability to use the number of guids field, and get the guid from | 509 | * the guid from the first chip's flash, to use for all of them. |
510 | * the first chip's flash, to use for all of them. | ||
511 | */ | 510 | */ |
512 | void ipath_get_guid(struct ipath_devdata *dd) | 511 | void ipath_get_eeprom_info(struct ipath_devdata *dd) |
513 | { | 512 | { |
514 | void *buf; | 513 | void *buf; |
515 | struct ipath_flash *ifp; | 514 | struct ipath_flash *ifp; |
diff --git a/drivers/infiniband/hw/ipath/ipath_file_ops.c b/drivers/infiniband/hw/ipath/ipath_file_ops.c index c347191f02bf..ada267e41f6c 100644 --- a/drivers/infiniband/hw/ipath/ipath_file_ops.c +++ b/drivers/infiniband/hw/ipath/ipath_file_ops.c | |||
@@ -139,7 +139,7 @@ static int ipath_get_base_info(struct ipath_portdata *pd, | |||
139 | kinfo->spi_piosize = dd->ipath_ibmaxlen; | 139 | kinfo->spi_piosize = dd->ipath_ibmaxlen; |
140 | kinfo->spi_mtu = dd->ipath_ibmaxlen; /* maxlen, not ibmtu */ | 140 | kinfo->spi_mtu = dd->ipath_ibmaxlen; /* maxlen, not ibmtu */ |
141 | kinfo->spi_port = pd->port_port; | 141 | kinfo->spi_port = pd->port_port; |
142 | kinfo->spi_sw_version = IPATH_USER_SWVERSION; | 142 | kinfo->spi_sw_version = IPATH_KERN_SWVERSION; |
143 | kinfo->spi_hw_version = dd->ipath_revision; | 143 | kinfo->spi_hw_version = dd->ipath_revision; |
144 | 144 | ||
145 | if (copy_to_user(ubase, kinfo, sizeof(*kinfo))) | 145 | if (copy_to_user(ubase, kinfo, sizeof(*kinfo))) |
@@ -1224,6 +1224,10 @@ static unsigned int ipath_poll(struct file *fp, | |||
1224 | 1224 | ||
1225 | if (tail == head) { | 1225 | if (tail == head) { |
1226 | set_bit(IPATH_PORT_WAITING_RCV, &pd->port_flag); | 1226 | set_bit(IPATH_PORT_WAITING_RCV, &pd->port_flag); |
1227 | if(dd->ipath_rhdrhead_intr_off) /* arm rcv interrupt */ | ||
1228 | (void)ipath_write_ureg(dd, ur_rcvhdrhead, | ||
1229 | dd->ipath_rhdrhead_intr_off | ||
1230 | | head, pd->port_port); | ||
1227 | poll_wait(fp, &pd->port_wait, pt); | 1231 | poll_wait(fp, &pd->port_wait, pt); |
1228 | 1232 | ||
1229 | if (test_bit(IPATH_PORT_WAITING_RCV, &pd->port_flag)) { | 1233 | if (test_bit(IPATH_PORT_WAITING_RCV, &pd->port_flag)) { |
diff --git a/drivers/infiniband/hw/ipath/ipath_ht400.c b/drivers/infiniband/hw/ipath/ipath_ht400.c index 4652435998f3..fac0a2b74de2 100644 --- a/drivers/infiniband/hw/ipath/ipath_ht400.c +++ b/drivers/infiniband/hw/ipath/ipath_ht400.c | |||
@@ -607,7 +607,12 @@ static int ipath_ht_boardname(struct ipath_devdata *dd, char *name, | |||
607 | case 4: /* Ponderosa is one of the bringup boards */ | 607 | case 4: /* Ponderosa is one of the bringup boards */ |
608 | n = "Ponderosa"; | 608 | n = "Ponderosa"; |
609 | break; | 609 | break; |
610 | case 5: /* HT-460 original production board */ | 610 | case 5: |
611 | /* | ||
612 | * HT-460 original production board; two production levels, with | ||
613 | * different serial number ranges. See ipath_ht_early_init() for | ||
614 | * case where we enable IPATH_GPIO_INTR for later serial # range. | ||
615 | */ | ||
611 | n = "InfiniPath_HT-460"; | 616 | n = "InfiniPath_HT-460"; |
612 | break; | 617 | break; |
613 | case 6: | 618 | case 6: |
@@ -642,7 +647,7 @@ static int ipath_ht_boardname(struct ipath_devdata *dd, char *name, | |||
642 | if (n) | 647 | if (n) |
643 | snprintf(name, namelen, "%s", n); | 648 | snprintf(name, namelen, "%s", n); |
644 | 649 | ||
645 | if (dd->ipath_majrev != 3 || dd->ipath_minrev != 2) { | 650 | if (dd->ipath_majrev != 3 || (dd->ipath_minrev < 2 || dd->ipath_minrev > 3)) { |
646 | /* | 651 | /* |
647 | * This version of the driver only supports the HT-400 | 652 | * This version of the driver only supports the HT-400 |
648 | * Rev 3.2 | 653 | * Rev 3.2 |
@@ -1520,6 +1525,18 @@ static int ipath_ht_early_init(struct ipath_devdata *dd) | |||
1520 | */ | 1525 | */ |
1521 | ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl, | 1526 | ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl, |
1522 | INFINIPATH_S_ABORT); | 1527 | INFINIPATH_S_ABORT); |
1528 | |||
1529 | ipath_get_eeprom_info(dd); | ||
1530 | if(dd->ipath_boardrev == 5 && dd->ipath_serial[0] == '1' && | ||
1531 | dd->ipath_serial[1] == '2' && dd->ipath_serial[2] == '8') { | ||
1532 | /* | ||
1533 | * Later production HT-460 has same changes as HT-465, so | ||
1534 | * can use GPIO interrupts. They have serial #'s starting | ||
1535 | * with 128, rather than 112. | ||
1536 | */ | ||
1537 | dd->ipath_flags |= IPATH_GPIO_INTR; | ||
1538 | dd->ipath_flags &= ~IPATH_POLL_RX_INTR; | ||
1539 | } | ||
1523 | return 0; | 1540 | return 0; |
1524 | } | 1541 | } |
1525 | 1542 | ||
diff --git a/drivers/infiniband/hw/ipath/ipath_init_chip.c b/drivers/infiniband/hw/ipath/ipath_init_chip.c index 2823ff9c0c62..dc83250d26a6 100644 --- a/drivers/infiniband/hw/ipath/ipath_init_chip.c +++ b/drivers/infiniband/hw/ipath/ipath_init_chip.c | |||
@@ -53,13 +53,19 @@ MODULE_PARM_DESC(cfgports, "Set max number of ports to use"); | |||
53 | 53 | ||
54 | /* | 54 | /* |
55 | * Number of buffers reserved for driver (layered drivers and SMA | 55 | * Number of buffers reserved for driver (layered drivers and SMA |
56 | * send). Reserved at end of buffer list. | 56 | * send). Reserved at end of buffer list. Initialized based on |
57 | * number of PIO buffers if not set via module interface. | ||
58 | * The problem with this is that it's global, but we'll use different | ||
59 | * numbers for different chip types. So the default value is not | ||
60 | * very useful. I've redefined it for the 1.3 release so that it's | ||
61 | * zero unless set by the user to something else, in which case we | ||
62 | * try to respect it. | ||
57 | */ | 63 | */ |
58 | static ushort ipath_kpiobufs = 32; | 64 | static ushort ipath_kpiobufs; |
59 | 65 | ||
60 | static int ipath_set_kpiobufs(const char *val, struct kernel_param *kp); | 66 | static int ipath_set_kpiobufs(const char *val, struct kernel_param *kp); |
61 | 67 | ||
62 | module_param_call(kpiobufs, ipath_set_kpiobufs, param_get_uint, | 68 | module_param_call(kpiobufs, ipath_set_kpiobufs, param_get_ushort, |
63 | &ipath_kpiobufs, S_IWUSR | S_IRUGO); | 69 | &ipath_kpiobufs, S_IWUSR | S_IRUGO); |
64 | MODULE_PARM_DESC(kpiobufs, "Set number of PIO buffers for driver"); | 70 | MODULE_PARM_DESC(kpiobufs, "Set number of PIO buffers for driver"); |
65 | 71 | ||
@@ -531,8 +537,11 @@ static int init_housekeeping(struct ipath_devdata *dd, | |||
531 | * Don't clear ipath_flags as 8bit mode was set before | 537 | * Don't clear ipath_flags as 8bit mode was set before |
532 | * entering this func. However, we do set the linkstate to | 538 | * entering this func. However, we do set the linkstate to |
533 | * unknown, so we can watch for a transition. | 539 | * unknown, so we can watch for a transition. |
540 | * PRESENT is set because we want register reads to work, | ||
541 | * and the kernel infrastructure saw it in config space; | ||
542 | * We clear it if we have failures. | ||
534 | */ | 543 | */ |
535 | dd->ipath_flags |= IPATH_LINKUNK; | 544 | dd->ipath_flags |= IPATH_LINKUNK | IPATH_PRESENT; |
536 | dd->ipath_flags &= ~(IPATH_LINKACTIVE | IPATH_LINKARMED | | 545 | dd->ipath_flags &= ~(IPATH_LINKACTIVE | IPATH_LINKARMED | |
537 | IPATH_LINKDOWN | IPATH_LINKINIT); | 546 | IPATH_LINKDOWN | IPATH_LINKINIT); |
538 | 547 | ||
@@ -560,6 +569,7 @@ static int init_housekeeping(struct ipath_devdata *dd, | |||
560 | || (dd->ipath_uregbase & 0xffffffff) == 0xffffffff) { | 569 | || (dd->ipath_uregbase & 0xffffffff) == 0xffffffff) { |
561 | ipath_dev_err(dd, "Register read failures from chip, " | 570 | ipath_dev_err(dd, "Register read failures from chip, " |
562 | "giving up initialization\n"); | 571 | "giving up initialization\n"); |
572 | dd->ipath_flags &= ~IPATH_PRESENT; | ||
563 | ret = -ENODEV; | 573 | ret = -ENODEV; |
564 | goto done; | 574 | goto done; |
565 | } | 575 | } |
@@ -682,16 +692,14 @@ int ipath_init_chip(struct ipath_devdata *dd, int reinit) | |||
682 | */ | 692 | */ |
683 | dd->ipath_pioavregs = ALIGN(val, sizeof(u64) * BITS_PER_BYTE / 2) | 693 | dd->ipath_pioavregs = ALIGN(val, sizeof(u64) * BITS_PER_BYTE / 2) |
684 | / (sizeof(u64) * BITS_PER_BYTE / 2); | 694 | / (sizeof(u64) * BITS_PER_BYTE / 2); |
685 | if (!ipath_kpiobufs) /* have to have at least 1, for SMA */ | 695 | if (ipath_kpiobufs == 0) { |
686 | kpiobufs = ipath_kpiobufs = 1; | 696 | /* not set by user, or set explictly to default */ |
687 | else if ((dd->ipath_piobcnt2k + dd->ipath_piobcnt4k) < | 697 | if ((dd->ipath_piobcnt2k + dd->ipath_piobcnt4k) > 128) |
688 | (dd->ipath_cfgports * IPATH_MIN_USER_PORT_BUFCNT)) { | 698 | kpiobufs = 32; |
689 | dev_info(&dd->pcidev->dev, "Too few PIO buffers (%u) " | 699 | else |
690 | "for %u ports to have %u each!\n", | 700 | kpiobufs = 16; |
691 | dd->ipath_piobcnt2k + dd->ipath_piobcnt4k, | 701 | } |
692 | dd->ipath_cfgports, IPATH_MIN_USER_PORT_BUFCNT); | 702 | else |
693 | kpiobufs = 1; /* reserve just the minimum for SMA/ether */ | ||
694 | } else | ||
695 | kpiobufs = ipath_kpiobufs; | 703 | kpiobufs = ipath_kpiobufs; |
696 | 704 | ||
697 | if (kpiobufs > | 705 | if (kpiobufs > |
@@ -871,7 +879,6 @@ int ipath_init_chip(struct ipath_devdata *dd, int reinit) | |||
871 | 879 | ||
872 | done: | 880 | done: |
873 | if (!ret) { | 881 | if (!ret) { |
874 | ipath_get_guid(dd); | ||
875 | *dd->ipath_statusp |= IPATH_STATUS_CHIP_PRESENT; | 882 | *dd->ipath_statusp |= IPATH_STATUS_CHIP_PRESENT; |
876 | if (!dd->ipath_f_intrsetup(dd)) { | 883 | if (!dd->ipath_f_intrsetup(dd)) { |
877 | /* now we can enable all interrupts from the chip */ | 884 | /* now we can enable all interrupts from the chip */ |
diff --git a/drivers/infiniband/hw/ipath/ipath_intr.c b/drivers/infiniband/hw/ipath/ipath_intr.c index 60f5f4108069..3e72a1fe3d73 100644 --- a/drivers/infiniband/hw/ipath/ipath_intr.c +++ b/drivers/infiniband/hw/ipath/ipath_intr.c | |||
@@ -172,8 +172,8 @@ static void handle_e_ibstatuschanged(struct ipath_devdata *dd, | |||
172 | "was %s\n", dd->ipath_unit, | 172 | "was %s\n", dd->ipath_unit, |
173 | ib_linkstate(lstate), | 173 | ib_linkstate(lstate), |
174 | ib_linkstate((unsigned) | 174 | ib_linkstate((unsigned) |
175 | dd->ipath_lastibcstat | 175 | dd->ipath_lastibcstat |
176 | & IPATH_IBSTATE_MASK)); | 176 | & IPATH_IBSTATE_MASK)); |
177 | } | 177 | } |
178 | else { | 178 | else { |
179 | lstate = dd->ipath_lastibcstat & IPATH_IBSTATE_MASK; | 179 | lstate = dd->ipath_lastibcstat & IPATH_IBSTATE_MASK; |
@@ -665,14 +665,14 @@ static void handle_layer_pioavail(struct ipath_devdata *dd) | |||
665 | 665 | ||
666 | ret = __ipath_layer_intr(dd, IPATH_LAYER_INT_SEND_CONTINUE); | 666 | ret = __ipath_layer_intr(dd, IPATH_LAYER_INT_SEND_CONTINUE); |
667 | if (ret > 0) | 667 | if (ret > 0) |
668 | goto clear; | 668 | goto set; |
669 | 669 | ||
670 | ret = __ipath_verbs_piobufavail(dd); | 670 | ret = __ipath_verbs_piobufavail(dd); |
671 | if (ret > 0) | 671 | if (ret > 0) |
672 | goto clear; | 672 | goto set; |
673 | 673 | ||
674 | return; | 674 | return; |
675 | clear: | 675 | set: |
676 | set_bit(IPATH_S_PIOINTBUFAVAIL, &dd->ipath_sendctrl); | 676 | set_bit(IPATH_S_PIOINTBUFAVAIL, &dd->ipath_sendctrl); |
677 | ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl, | 677 | ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl, |
678 | dd->ipath_sendctrl); | 678 | dd->ipath_sendctrl); |
@@ -719,11 +719,24 @@ static void handle_rcv(struct ipath_devdata *dd, u32 istat) | |||
719 | irqreturn_t ipath_intr(int irq, void *data, struct pt_regs *regs) | 719 | irqreturn_t ipath_intr(int irq, void *data, struct pt_regs *regs) |
720 | { | 720 | { |
721 | struct ipath_devdata *dd = data; | 721 | struct ipath_devdata *dd = data; |
722 | u32 istat = ipath_read_kreg32(dd, dd->ipath_kregs->kr_intstatus); | 722 | u32 istat; |
723 | ipath_err_t estat = 0; | 723 | ipath_err_t estat = 0; |
724 | static unsigned unexpected = 0; | 724 | static unsigned unexpected = 0; |
725 | irqreturn_t ret; | 725 | irqreturn_t ret; |
726 | 726 | ||
727 | if(!(dd->ipath_flags & IPATH_PRESENT)) { | ||
728 | /* this is mostly so we don't try to touch the chip while | ||
729 | * it is being reset */ | ||
730 | /* | ||
731 | * This return value is perhaps odd, but we do not want the | ||
732 | * interrupt core code to remove our interrupt handler | ||
733 | * because we don't appear to be handling an interrupt | ||
734 | * during a chip reset. | ||
735 | */ | ||
736 | return IRQ_HANDLED; | ||
737 | } | ||
738 | |||
739 | istat = ipath_read_kreg32(dd, dd->ipath_kregs->kr_intstatus); | ||
727 | if (unlikely(!istat)) { | 740 | if (unlikely(!istat)) { |
728 | ipath_stats.sps_nullintr++; | 741 | ipath_stats.sps_nullintr++; |
729 | ret = IRQ_NONE; /* not our interrupt, or already handled */ | 742 | ret = IRQ_NONE; /* not our interrupt, or already handled */ |
diff --git a/drivers/infiniband/hw/ipath/ipath_kernel.h b/drivers/infiniband/hw/ipath/ipath_kernel.h index 159d0aed31a5..5d92d57b6f54 100644 --- a/drivers/infiniband/hw/ipath/ipath_kernel.h +++ b/drivers/infiniband/hw/ipath/ipath_kernel.h | |||
@@ -528,7 +528,6 @@ extern spinlock_t ipath_devs_lock; | |||
528 | extern struct ipath_devdata *ipath_lookup(int unit); | 528 | extern struct ipath_devdata *ipath_lookup(int unit); |
529 | 529 | ||
530 | extern u16 ipath_layer_rcv_opcode; | 530 | extern u16 ipath_layer_rcv_opcode; |
531 | extern int ipath_verbs_registered; | ||
532 | extern int __ipath_layer_intr(struct ipath_devdata *, u32); | 531 | extern int __ipath_layer_intr(struct ipath_devdata *, u32); |
533 | extern int ipath_layer_intr(struct ipath_devdata *, u32); | 532 | extern int ipath_layer_intr(struct ipath_devdata *, u32); |
534 | extern int __ipath_layer_rcv(struct ipath_devdata *, void *, | 533 | extern int __ipath_layer_rcv(struct ipath_devdata *, void *, |
@@ -651,7 +650,7 @@ u32 __iomem *ipath_getpiobuf(struct ipath_devdata *, u32 *); | |||
651 | void ipath_init_pe800_funcs(struct ipath_devdata *); | 650 | void ipath_init_pe800_funcs(struct ipath_devdata *); |
652 | /* init HT-400-specific func */ | 651 | /* init HT-400-specific func */ |
653 | void ipath_init_ht400_funcs(struct ipath_devdata *); | 652 | void ipath_init_ht400_funcs(struct ipath_devdata *); |
654 | void ipath_get_guid(struct ipath_devdata *); | 653 | void ipath_get_eeprom_info(struct ipath_devdata *); |
655 | u64 ipath_snap_cntr(struct ipath_devdata *, ipath_creg); | 654 | u64 ipath_snap_cntr(struct ipath_devdata *, ipath_creg); |
656 | 655 | ||
657 | /* | 656 | /* |
@@ -732,7 +731,7 @@ u64 ipath_read_kreg64_port(const struct ipath_devdata *, ipath_kreg, | |||
732 | static inline u32 ipath_read_ureg32(const struct ipath_devdata *dd, | 731 | static inline u32 ipath_read_ureg32(const struct ipath_devdata *dd, |
733 | ipath_ureg regno, int port) | 732 | ipath_ureg regno, int port) |
734 | { | 733 | { |
735 | if (!dd->ipath_kregbase) | 734 | if (!dd->ipath_kregbase || !(dd->ipath_flags & IPATH_PRESENT)) |
736 | return 0; | 735 | return 0; |
737 | 736 | ||
738 | return readl(regno + (u64 __iomem *) | 737 | return readl(regno + (u64 __iomem *) |
@@ -763,7 +762,7 @@ static inline void ipath_write_ureg(const struct ipath_devdata *dd, | |||
763 | static inline u32 ipath_read_kreg32(const struct ipath_devdata *dd, | 762 | static inline u32 ipath_read_kreg32(const struct ipath_devdata *dd, |
764 | ipath_kreg regno) | 763 | ipath_kreg regno) |
765 | { | 764 | { |
766 | if (!dd->ipath_kregbase) | 765 | if (!dd->ipath_kregbase || !(dd->ipath_flags & IPATH_PRESENT)) |
767 | return -1; | 766 | return -1; |
768 | return readl((u32 __iomem *) & dd->ipath_kregbase[regno]); | 767 | return readl((u32 __iomem *) & dd->ipath_kregbase[regno]); |
769 | } | 768 | } |
@@ -771,7 +770,7 @@ static inline u32 ipath_read_kreg32(const struct ipath_devdata *dd, | |||
771 | static inline u64 ipath_read_kreg64(const struct ipath_devdata *dd, | 770 | static inline u64 ipath_read_kreg64(const struct ipath_devdata *dd, |
772 | ipath_kreg regno) | 771 | ipath_kreg regno) |
773 | { | 772 | { |
774 | if (!dd->ipath_kregbase) | 773 | if (!dd->ipath_kregbase || !(dd->ipath_flags & IPATH_PRESENT)) |
775 | return -1; | 774 | return -1; |
776 | 775 | ||
777 | return readq(&dd->ipath_kregbase[regno]); | 776 | return readq(&dd->ipath_kregbase[regno]); |
@@ -787,7 +786,7 @@ static inline void ipath_write_kreg(const struct ipath_devdata *dd, | |||
787 | static inline u64 ipath_read_creg(const struct ipath_devdata *dd, | 786 | static inline u64 ipath_read_creg(const struct ipath_devdata *dd, |
788 | ipath_sreg regno) | 787 | ipath_sreg regno) |
789 | { | 788 | { |
790 | if (!dd->ipath_kregbase) | 789 | if (!dd->ipath_kregbase || !(dd->ipath_flags & IPATH_PRESENT)) |
791 | return 0; | 790 | return 0; |
792 | 791 | ||
793 | return readq(regno + (u64 __iomem *) | 792 | return readq(regno + (u64 __iomem *) |
@@ -798,7 +797,7 @@ static inline u64 ipath_read_creg(const struct ipath_devdata *dd, | |||
798 | static inline u32 ipath_read_creg32(const struct ipath_devdata *dd, | 797 | static inline u32 ipath_read_creg32(const struct ipath_devdata *dd, |
799 | ipath_sreg regno) | 798 | ipath_sreg regno) |
800 | { | 799 | { |
801 | if (!dd->ipath_kregbase) | 800 | if (!dd->ipath_kregbase || !(dd->ipath_flags & IPATH_PRESENT)) |
802 | return 0; | 801 | return 0; |
803 | return readl(regno + (u64 __iomem *) | 802 | return readl(regno + (u64 __iomem *) |
804 | (dd->ipath_cregbase + | 803 | (dd->ipath_cregbase + |
diff --git a/drivers/infiniband/hw/ipath/ipath_keys.c b/drivers/infiniband/hw/ipath/ipath_keys.c index aa33b0e9f2f6..5ae8761f9dd2 100644 --- a/drivers/infiniband/hw/ipath/ipath_keys.c +++ b/drivers/infiniband/hw/ipath/ipath_keys.c | |||
@@ -136,9 +136,7 @@ int ipath_lkey_ok(struct ipath_lkey_table *rkt, struct ipath_sge *isge, | |||
136 | ret = 1; | 136 | ret = 1; |
137 | goto bail; | 137 | goto bail; |
138 | } | 138 | } |
139 | spin_lock(&rkt->lock); | ||
140 | mr = rkt->table[(sge->lkey >> (32 - ib_ipath_lkey_table_size))]; | 139 | mr = rkt->table[(sge->lkey >> (32 - ib_ipath_lkey_table_size))]; |
141 | spin_unlock(&rkt->lock); | ||
142 | if (unlikely(mr == NULL || mr->lkey != sge->lkey)) { | 140 | if (unlikely(mr == NULL || mr->lkey != sge->lkey)) { |
143 | ret = 0; | 141 | ret = 0; |
144 | goto bail; | 142 | goto bail; |
@@ -184,8 +182,6 @@ bail: | |||
184 | * @acc: access flags | 182 | * @acc: access flags |
185 | * | 183 | * |
186 | * Return 1 if successful, otherwise 0. | 184 | * Return 1 if successful, otherwise 0. |
187 | * | ||
188 | * The QP r_rq.lock should be held. | ||
189 | */ | 185 | */ |
190 | int ipath_rkey_ok(struct ipath_ibdev *dev, struct ipath_sge_state *ss, | 186 | int ipath_rkey_ok(struct ipath_ibdev *dev, struct ipath_sge_state *ss, |
191 | u32 len, u64 vaddr, u32 rkey, int acc) | 187 | u32 len, u64 vaddr, u32 rkey, int acc) |
@@ -196,9 +192,7 @@ int ipath_rkey_ok(struct ipath_ibdev *dev, struct ipath_sge_state *ss, | |||
196 | size_t off; | 192 | size_t off; |
197 | int ret; | 193 | int ret; |
198 | 194 | ||
199 | spin_lock(&rkt->lock); | ||
200 | mr = rkt->table[(rkey >> (32 - ib_ipath_lkey_table_size))]; | 195 | mr = rkt->table[(rkey >> (32 - ib_ipath_lkey_table_size))]; |
201 | spin_unlock(&rkt->lock); | ||
202 | if (unlikely(mr == NULL || mr->lkey != rkey)) { | 196 | if (unlikely(mr == NULL || mr->lkey != rkey)) { |
203 | ret = 0; | 197 | ret = 0; |
204 | goto bail; | 198 | goto bail; |
diff --git a/drivers/infiniband/hw/ipath/ipath_layer.c b/drivers/infiniband/hw/ipath/ipath_layer.c index 2cabf6340572..9ec4ac77b87f 100644 --- a/drivers/infiniband/hw/ipath/ipath_layer.c +++ b/drivers/infiniband/hw/ipath/ipath_layer.c | |||
@@ -46,13 +46,15 @@ | |||
46 | /* Acquire before ipath_devs_lock. */ | 46 | /* Acquire before ipath_devs_lock. */ |
47 | static DEFINE_MUTEX(ipath_layer_mutex); | 47 | static DEFINE_MUTEX(ipath_layer_mutex); |
48 | 48 | ||
49 | static int ipath_verbs_registered; | ||
50 | |||
49 | u16 ipath_layer_rcv_opcode; | 51 | u16 ipath_layer_rcv_opcode; |
52 | |||
50 | static int (*layer_intr)(void *, u32); | 53 | static int (*layer_intr)(void *, u32); |
51 | static int (*layer_rcv)(void *, void *, struct sk_buff *); | 54 | static int (*layer_rcv)(void *, void *, struct sk_buff *); |
52 | static int (*layer_rcv_lid)(void *, void *); | 55 | static int (*layer_rcv_lid)(void *, void *); |
53 | static int (*verbs_piobufavail)(void *); | 56 | static int (*verbs_piobufavail)(void *); |
54 | static void (*verbs_rcv)(void *, void *, void *, u32); | 57 | static void (*verbs_rcv)(void *, void *, void *, u32); |
55 | int ipath_verbs_registered; | ||
56 | 58 | ||
57 | static void *(*layer_add_one)(int, struct ipath_devdata *); | 59 | static void *(*layer_add_one)(int, struct ipath_devdata *); |
58 | static void (*layer_remove_one)(void *); | 60 | static void (*layer_remove_one)(void *); |
@@ -586,6 +588,8 @@ void ipath_verbs_unregister(void) | |||
586 | verbs_rcv = NULL; | 588 | verbs_rcv = NULL; |
587 | verbs_timer_cb = NULL; | 589 | verbs_timer_cb = NULL; |
588 | 590 | ||
591 | ipath_verbs_registered = 0; | ||
592 | |||
589 | mutex_unlock(&ipath_layer_mutex); | 593 | mutex_unlock(&ipath_layer_mutex); |
590 | } | 594 | } |
591 | 595 | ||
@@ -868,12 +872,13 @@ static void copy_io(u32 __iomem *piobuf, struct ipath_sge_state *ss, | |||
868 | update_sge(ss, len); | 872 | update_sge(ss, len); |
869 | length -= len; | 873 | length -= len; |
870 | } | 874 | } |
875 | /* Update address before sending packet. */ | ||
876 | update_sge(ss, length); | ||
871 | /* must flush early everything before trigger word */ | 877 | /* must flush early everything before trigger word */ |
872 | ipath_flush_wc(); | 878 | ipath_flush_wc(); |
873 | __raw_writel(last, piobuf); | 879 | __raw_writel(last, piobuf); |
874 | /* be sure trigger word is written */ | 880 | /* be sure trigger word is written */ |
875 | ipath_flush_wc(); | 881 | ipath_flush_wc(); |
876 | update_sge(ss, length); | ||
877 | } | 882 | } |
878 | 883 | ||
879 | /** | 884 | /** |
@@ -939,17 +944,18 @@ int ipath_verbs_send(struct ipath_devdata *dd, u32 hdrwords, | |||
939 | if (likely(ss->num_sge == 1 && len <= ss->sge.length && | 944 | if (likely(ss->num_sge == 1 && len <= ss->sge.length && |
940 | !((unsigned long)ss->sge.vaddr & (sizeof(u32) - 1)))) { | 945 | !((unsigned long)ss->sge.vaddr & (sizeof(u32) - 1)))) { |
941 | u32 w; | 946 | u32 w; |
947 | u32 *addr = (u32 *) ss->sge.vaddr; | ||
942 | 948 | ||
949 | /* Update address before sending packet. */ | ||
950 | update_sge(ss, len); | ||
943 | /* Need to round up for the last dword in the packet. */ | 951 | /* Need to round up for the last dword in the packet. */ |
944 | w = (len + 3) >> 2; | 952 | w = (len + 3) >> 2; |
945 | __iowrite32_copy(piobuf, ss->sge.vaddr, w - 1); | 953 | __iowrite32_copy(piobuf, addr, w - 1); |
946 | /* must flush early everything before trigger word */ | 954 | /* must flush early everything before trigger word */ |
947 | ipath_flush_wc(); | 955 | ipath_flush_wc(); |
948 | __raw_writel(((u32 *) ss->sge.vaddr)[w - 1], | 956 | __raw_writel(addr[w - 1], piobuf + w - 1); |
949 | piobuf + w - 1); | ||
950 | /* be sure trigger word is written */ | 957 | /* be sure trigger word is written */ |
951 | ipath_flush_wc(); | 958 | ipath_flush_wc(); |
952 | update_sge(ss, len); | ||
953 | ret = 0; | 959 | ret = 0; |
954 | goto bail; | 960 | goto bail; |
955 | } | 961 | } |
diff --git a/drivers/infiniband/hw/ipath/ipath_pe800.c b/drivers/infiniband/hw/ipath/ipath_pe800.c index e693a7a82667..02e8c75b24f6 100644 --- a/drivers/infiniband/hw/ipath/ipath_pe800.c +++ b/drivers/infiniband/hw/ipath/ipath_pe800.c | |||
@@ -305,8 +305,8 @@ static const struct ipath_cregs ipath_pe_cregs = { | |||
305 | * we'll print them and continue. We reuse the same message buffer as | 305 | * we'll print them and continue. We reuse the same message buffer as |
306 | * ipath_handle_errors() to avoid excessive stack usage. | 306 | * ipath_handle_errors() to avoid excessive stack usage. |
307 | */ | 307 | */ |
308 | void ipath_pe_handle_hwerrors(struct ipath_devdata *dd, char *msg, | 308 | static void ipath_pe_handle_hwerrors(struct ipath_devdata *dd, char *msg, |
309 | size_t msgl) | 309 | size_t msgl) |
310 | { | 310 | { |
311 | ipath_err_t hwerrs; | 311 | ipath_err_t hwerrs; |
312 | u32 bits, ctrl; | 312 | u32 bits, ctrl; |
@@ -552,7 +552,7 @@ static int ipath_pe_boardname(struct ipath_devdata *dd, char *name, | |||
552 | * freeze mode), and enable hardware errors as errors (along with | 552 | * freeze mode), and enable hardware errors as errors (along with |
553 | * everything else) in errormask | 553 | * everything else) in errormask |
554 | */ | 554 | */ |
555 | void ipath_pe_init_hwerrors(struct ipath_devdata *dd) | 555 | static void ipath_pe_init_hwerrors(struct ipath_devdata *dd) |
556 | { | 556 | { |
557 | ipath_err_t val; | 557 | ipath_err_t val; |
558 | u64 extsval; | 558 | u64 extsval; |
@@ -577,7 +577,7 @@ void ipath_pe_init_hwerrors(struct ipath_devdata *dd) | |||
577 | * ipath_pe_bringup_serdes - bring up the serdes | 577 | * ipath_pe_bringup_serdes - bring up the serdes |
578 | * @dd: the infinipath device | 578 | * @dd: the infinipath device |
579 | */ | 579 | */ |
580 | int ipath_pe_bringup_serdes(struct ipath_devdata *dd) | 580 | static int ipath_pe_bringup_serdes(struct ipath_devdata *dd) |
581 | { | 581 | { |
582 | u64 val, tmp, config1; | 582 | u64 val, tmp, config1; |
583 | int ret = 0, change = 0; | 583 | int ret = 0, change = 0; |
@@ -694,7 +694,7 @@ int ipath_pe_bringup_serdes(struct ipath_devdata *dd) | |||
694 | * @dd: the infinipath device | 694 | * @dd: the infinipath device |
695 | * Called when driver is being unloaded | 695 | * Called when driver is being unloaded |
696 | */ | 696 | */ |
697 | void ipath_pe_quiet_serdes(struct ipath_devdata *dd) | 697 | static void ipath_pe_quiet_serdes(struct ipath_devdata *dd) |
698 | { | 698 | { |
699 | u64 val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_serdesconfig0); | 699 | u64 val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_serdesconfig0); |
700 | 700 | ||
@@ -972,6 +972,8 @@ static int ipath_setup_pe_reset(struct ipath_devdata *dd) | |||
972 | /* Use ERROR so it shows up in logs, etc. */ | 972 | /* Use ERROR so it shows up in logs, etc. */ |
973 | ipath_dev_err(dd, "Resetting PE-800 unit %u\n", | 973 | ipath_dev_err(dd, "Resetting PE-800 unit %u\n", |
974 | dd->ipath_unit); | 974 | dd->ipath_unit); |
975 | /* keep chip from being accessed in a few places */ | ||
976 | dd->ipath_flags &= ~(IPATH_INITTED|IPATH_PRESENT); | ||
975 | val = dd->ipath_control | INFINIPATH_C_RESET; | 977 | val = dd->ipath_control | INFINIPATH_C_RESET; |
976 | ipath_write_kreg(dd, dd->ipath_kregs->kr_control, val); | 978 | ipath_write_kreg(dd, dd->ipath_kregs->kr_control, val); |
977 | mb(); | 979 | mb(); |
@@ -997,6 +999,8 @@ static int ipath_setup_pe_reset(struct ipath_devdata *dd) | |||
997 | if ((r = pci_enable_device(dd->pcidev))) | 999 | if ((r = pci_enable_device(dd->pcidev))) |
998 | ipath_dev_err(dd, "pci_enable_device failed after " | 1000 | ipath_dev_err(dd, "pci_enable_device failed after " |
999 | "reset: %d\n", r); | 1001 | "reset: %d\n", r); |
1002 | /* whether it worked or not, mark as present, again */ | ||
1003 | dd->ipath_flags |= IPATH_PRESENT; | ||
1000 | val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_revision); | 1004 | val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_revision); |
1001 | if (val == dd->ipath_revision) { | 1005 | if (val == dd->ipath_revision) { |
1002 | ipath_cdbg(VERBOSE, "Got matching revision " | 1006 | ipath_cdbg(VERBOSE, "Got matching revision " |
@@ -1176,6 +1180,8 @@ static int ipath_pe_early_init(struct ipath_devdata *dd) | |||
1176 | */ | 1180 | */ |
1177 | dd->ipath_rhdrhead_intr_off = 1ULL<<32; | 1181 | dd->ipath_rhdrhead_intr_off = 1ULL<<32; |
1178 | 1182 | ||
1183 | ipath_get_eeprom_info(dd); | ||
1184 | |||
1179 | return 0; | 1185 | return 0; |
1180 | } | 1186 | } |
1181 | 1187 | ||
diff --git a/drivers/infiniband/hw/ipath/ipath_qp.c b/drivers/infiniband/hw/ipath/ipath_qp.c index 6058d70d7577..9f8855d970c8 100644 --- a/drivers/infiniband/hw/ipath/ipath_qp.c +++ b/drivers/infiniband/hw/ipath/ipath_qp.c | |||
@@ -188,8 +188,8 @@ static void free_qpn(struct ipath_qp_table *qpt, u32 qpn) | |||
188 | * Allocate the next available QPN and put the QP into the hash table. | 188 | * Allocate the next available QPN and put the QP into the hash table. |
189 | * The hash table holds a reference to the QP. | 189 | * The hash table holds a reference to the QP. |
190 | */ | 190 | */ |
191 | int ipath_alloc_qpn(struct ipath_qp_table *qpt, struct ipath_qp *qp, | 191 | static int ipath_alloc_qpn(struct ipath_qp_table *qpt, struct ipath_qp *qp, |
192 | enum ib_qp_type type) | 192 | enum ib_qp_type type) |
193 | { | 193 | { |
194 | unsigned long flags; | 194 | unsigned long flags; |
195 | u32 qpn; | 195 | u32 qpn; |
@@ -232,7 +232,7 @@ bail: | |||
232 | * Remove the QP from the table so it can't be found asynchronously by | 232 | * Remove the QP from the table so it can't be found asynchronously by |
233 | * the receive interrupt routine. | 233 | * the receive interrupt routine. |
234 | */ | 234 | */ |
235 | void ipath_free_qp(struct ipath_qp_table *qpt, struct ipath_qp *qp) | 235 | static void ipath_free_qp(struct ipath_qp_table *qpt, struct ipath_qp *qp) |
236 | { | 236 | { |
237 | struct ipath_qp *q, **qpp; | 237 | struct ipath_qp *q, **qpp; |
238 | unsigned long flags; | 238 | unsigned long flags; |
@@ -358,6 +358,65 @@ static void ipath_reset_qp(struct ipath_qp *qp) | |||
358 | } | 358 | } |
359 | 359 | ||
360 | /** | 360 | /** |
361 | * ipath_error_qp - put a QP into an error state | ||
362 | * @qp: the QP to put into an error state | ||
363 | * | ||
364 | * Flushes both send and receive work queues. | ||
365 | * QP r_rq.lock and s_lock should be held. | ||
366 | */ | ||
367 | |||
368 | static void ipath_error_qp(struct ipath_qp *qp) | ||
369 | { | ||
370 | struct ipath_ibdev *dev = to_idev(qp->ibqp.device); | ||
371 | struct ib_wc wc; | ||
372 | |||
373 | _VERBS_INFO("QP%d/%d in error state\n", | ||
374 | qp->ibqp.qp_num, qp->remote_qpn); | ||
375 | |||
376 | spin_lock(&dev->pending_lock); | ||
377 | /* XXX What if its already removed by the timeout code? */ | ||
378 | if (!list_empty(&qp->timerwait)) | ||
379 | list_del_init(&qp->timerwait); | ||
380 | if (!list_empty(&qp->piowait)) | ||
381 | list_del_init(&qp->piowait); | ||
382 | spin_unlock(&dev->pending_lock); | ||
383 | |||
384 | wc.status = IB_WC_WR_FLUSH_ERR; | ||
385 | wc.vendor_err = 0; | ||
386 | wc.byte_len = 0; | ||
387 | wc.imm_data = 0; | ||
388 | wc.qp_num = qp->ibqp.qp_num; | ||
389 | wc.src_qp = 0; | ||
390 | wc.wc_flags = 0; | ||
391 | wc.pkey_index = 0; | ||
392 | wc.slid = 0; | ||
393 | wc.sl = 0; | ||
394 | wc.dlid_path_bits = 0; | ||
395 | wc.port_num = 0; | ||
396 | |||
397 | while (qp->s_last != qp->s_head) { | ||
398 | struct ipath_swqe *wqe = get_swqe_ptr(qp, qp->s_last); | ||
399 | |||
400 | wc.wr_id = wqe->wr.wr_id; | ||
401 | wc.opcode = ib_ipath_wc_opcode[wqe->wr.opcode]; | ||
402 | if (++qp->s_last >= qp->s_size) | ||
403 | qp->s_last = 0; | ||
404 | ipath_cq_enter(to_icq(qp->ibqp.send_cq), &wc, 1); | ||
405 | } | ||
406 | qp->s_cur = qp->s_tail = qp->s_head; | ||
407 | qp->s_hdrwords = 0; | ||
408 | qp->s_ack_state = IB_OPCODE_RC_ACKNOWLEDGE; | ||
409 | |||
410 | wc.opcode = IB_WC_RECV; | ||
411 | while (qp->r_rq.tail != qp->r_rq.head) { | ||
412 | wc.wr_id = get_rwqe_ptr(&qp->r_rq, qp->r_rq.tail)->wr_id; | ||
413 | if (++qp->r_rq.tail >= qp->r_rq.size) | ||
414 | qp->r_rq.tail = 0; | ||
415 | ipath_cq_enter(to_icq(qp->ibqp.recv_cq), &wc, 1); | ||
416 | } | ||
417 | } | ||
418 | |||
419 | /** | ||
361 | * ipath_modify_qp - modify the attributes of a queue pair | 420 | * ipath_modify_qp - modify the attributes of a queue pair |
362 | * @ibqp: the queue pair who's attributes we're modifying | 421 | * @ibqp: the queue pair who's attributes we're modifying |
363 | * @attr: the new attributes | 422 | * @attr: the new attributes |
@@ -368,6 +427,7 @@ static void ipath_reset_qp(struct ipath_qp *qp) | |||
368 | int ipath_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, | 427 | int ipath_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, |
369 | int attr_mask) | 428 | int attr_mask) |
370 | { | 429 | { |
430 | struct ipath_ibdev *dev = to_idev(ibqp->device); | ||
371 | struct ipath_qp *qp = to_iqp(ibqp); | 431 | struct ipath_qp *qp = to_iqp(ibqp); |
372 | enum ib_qp_state cur_state, new_state; | 432 | enum ib_qp_state cur_state, new_state; |
373 | unsigned long flags; | 433 | unsigned long flags; |
@@ -384,6 +444,19 @@ int ipath_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, | |||
384 | attr_mask)) | 444 | attr_mask)) |
385 | goto inval; | 445 | goto inval; |
386 | 446 | ||
447 | if (attr_mask & IB_QP_AV) | ||
448 | if (attr->ah_attr.dlid == 0 || | ||
449 | attr->ah_attr.dlid >= IPS_MULTICAST_LID_BASE) | ||
450 | goto inval; | ||
451 | |||
452 | if (attr_mask & IB_QP_PKEY_INDEX) | ||
453 | if (attr->pkey_index >= ipath_layer_get_npkeys(dev->dd)) | ||
454 | goto inval; | ||
455 | |||
456 | if (attr_mask & IB_QP_MIN_RNR_TIMER) | ||
457 | if (attr->min_rnr_timer > 31) | ||
458 | goto inval; | ||
459 | |||
387 | switch (new_state) { | 460 | switch (new_state) { |
388 | case IB_QPS_RESET: | 461 | case IB_QPS_RESET: |
389 | ipath_reset_qp(qp); | 462 | ipath_reset_qp(qp); |
@@ -398,13 +471,8 @@ int ipath_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, | |||
398 | 471 | ||
399 | } | 472 | } |
400 | 473 | ||
401 | if (attr_mask & IB_QP_PKEY_INDEX) { | 474 | if (attr_mask & IB_QP_PKEY_INDEX) |
402 | struct ipath_ibdev *dev = to_idev(ibqp->device); | ||
403 | |||
404 | if (attr->pkey_index >= ipath_layer_get_npkeys(dev->dd)) | ||
405 | goto inval; | ||
406 | qp->s_pkey_index = attr->pkey_index; | 475 | qp->s_pkey_index = attr->pkey_index; |
407 | } | ||
408 | 476 | ||
409 | if (attr_mask & IB_QP_DEST_QPN) | 477 | if (attr_mask & IB_QP_DEST_QPN) |
410 | qp->remote_qpn = attr->dest_qp_num; | 478 | qp->remote_qpn = attr->dest_qp_num; |
@@ -420,12 +488,8 @@ int ipath_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, | |||
420 | if (attr_mask & IB_QP_ACCESS_FLAGS) | 488 | if (attr_mask & IB_QP_ACCESS_FLAGS) |
421 | qp->qp_access_flags = attr->qp_access_flags; | 489 | qp->qp_access_flags = attr->qp_access_flags; |
422 | 490 | ||
423 | if (attr_mask & IB_QP_AV) { | 491 | if (attr_mask & IB_QP_AV) |
424 | if (attr->ah_attr.dlid == 0 || | ||
425 | attr->ah_attr.dlid >= IPS_MULTICAST_LID_BASE) | ||
426 | goto inval; | ||
427 | qp->remote_ah_attr = attr->ah_attr; | 492 | qp->remote_ah_attr = attr->ah_attr; |
428 | } | ||
429 | 493 | ||
430 | if (attr_mask & IB_QP_PATH_MTU) | 494 | if (attr_mask & IB_QP_PATH_MTU) |
431 | qp->path_mtu = attr->path_mtu; | 495 | qp->path_mtu = attr->path_mtu; |
@@ -440,11 +504,8 @@ int ipath_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, | |||
440 | qp->s_rnr_retry_cnt = qp->s_rnr_retry; | 504 | qp->s_rnr_retry_cnt = qp->s_rnr_retry; |
441 | } | 505 | } |
442 | 506 | ||
443 | if (attr_mask & IB_QP_MIN_RNR_TIMER) { | 507 | if (attr_mask & IB_QP_MIN_RNR_TIMER) |
444 | if (attr->min_rnr_timer > 31) | ||
445 | goto inval; | ||
446 | qp->s_min_rnr_timer = attr->min_rnr_timer; | 508 | qp->s_min_rnr_timer = attr->min_rnr_timer; |
447 | } | ||
448 | 509 | ||
449 | if (attr_mask & IB_QP_QKEY) | 510 | if (attr_mask & IB_QP_QKEY) |
450 | qp->qkey = attr->qkey; | 511 | qp->qkey = attr->qkey; |
@@ -651,10 +712,8 @@ struct ib_qp *ipath_create_qp(struct ib_pd *ibpd, | |||
651 | init_attr->qp_type == IB_QPT_RC ? | 712 | init_attr->qp_type == IB_QPT_RC ? |
652 | ipath_do_rc_send : ipath_do_uc_send, | 713 | ipath_do_rc_send : ipath_do_uc_send, |
653 | (unsigned long)qp); | 714 | (unsigned long)qp); |
654 | qp->piowait.next = LIST_POISON1; | 715 | INIT_LIST_HEAD(&qp->piowait); |
655 | qp->piowait.prev = LIST_POISON2; | 716 | INIT_LIST_HEAD(&qp->timerwait); |
656 | qp->timerwait.next = LIST_POISON1; | ||
657 | qp->timerwait.prev = LIST_POISON2; | ||
658 | qp->state = IB_QPS_RESET; | 717 | qp->state = IB_QPS_RESET; |
659 | qp->s_wq = swq; | 718 | qp->s_wq = swq; |
660 | qp->s_size = init_attr->cap.max_send_wr + 1; | 719 | qp->s_size = init_attr->cap.max_send_wr + 1; |
@@ -675,7 +734,7 @@ struct ib_qp *ipath_create_qp(struct ib_pd *ibpd, | |||
675 | ipath_reset_qp(qp); | 734 | ipath_reset_qp(qp); |
676 | 735 | ||
677 | /* Tell the core driver that the kernel SMA is present. */ | 736 | /* Tell the core driver that the kernel SMA is present. */ |
678 | if (qp->ibqp.qp_type == IB_QPT_SMI) | 737 | if (init_attr->qp_type == IB_QPT_SMI) |
679 | ipath_layer_set_verbs_flags(dev->dd, | 738 | ipath_layer_set_verbs_flags(dev->dd, |
680 | IPATH_VERBS_KERNEL_SMA); | 739 | IPATH_VERBS_KERNEL_SMA); |
681 | break; | 740 | break; |
@@ -724,10 +783,10 @@ int ipath_destroy_qp(struct ib_qp *ibqp) | |||
724 | 783 | ||
725 | /* Make sure the QP isn't on the timeout list. */ | 784 | /* Make sure the QP isn't on the timeout list. */ |
726 | spin_lock_irqsave(&dev->pending_lock, flags); | 785 | spin_lock_irqsave(&dev->pending_lock, flags); |
727 | if (qp->timerwait.next != LIST_POISON1) | 786 | if (!list_empty(&qp->timerwait)) |
728 | list_del(&qp->timerwait); | 787 | list_del_init(&qp->timerwait); |
729 | if (qp->piowait.next != LIST_POISON1) | 788 | if (!list_empty(&qp->piowait)) |
730 | list_del(&qp->piowait); | 789 | list_del_init(&qp->piowait); |
731 | spin_unlock_irqrestore(&dev->pending_lock, flags); | 790 | spin_unlock_irqrestore(&dev->pending_lock, flags); |
732 | 791 | ||
733 | /* | 792 | /* |
@@ -796,10 +855,10 @@ void ipath_sqerror_qp(struct ipath_qp *qp, struct ib_wc *wc) | |||
796 | 855 | ||
797 | spin_lock(&dev->pending_lock); | 856 | spin_lock(&dev->pending_lock); |
798 | /* XXX What if its already removed by the timeout code? */ | 857 | /* XXX What if its already removed by the timeout code? */ |
799 | if (qp->timerwait.next != LIST_POISON1) | 858 | if (!list_empty(&qp->timerwait)) |
800 | list_del(&qp->timerwait); | 859 | list_del_init(&qp->timerwait); |
801 | if (qp->piowait.next != LIST_POISON1) | 860 | if (!list_empty(&qp->piowait)) |
802 | list_del(&qp->piowait); | 861 | list_del_init(&qp->piowait); |
803 | spin_unlock(&dev->pending_lock); | 862 | spin_unlock(&dev->pending_lock); |
804 | 863 | ||
805 | ipath_cq_enter(to_icq(qp->ibqp.send_cq), wc, 1); | 864 | ipath_cq_enter(to_icq(qp->ibqp.send_cq), wc, 1); |
@@ -821,65 +880,6 @@ void ipath_sqerror_qp(struct ipath_qp *qp, struct ib_wc *wc) | |||
821 | } | 880 | } |
822 | 881 | ||
823 | /** | 882 | /** |
824 | * ipath_error_qp - put a QP into an error state | ||
825 | * @qp: the QP to put into an error state | ||
826 | * | ||
827 | * Flushes both send and receive work queues. | ||
828 | * QP r_rq.lock and s_lock should be held. | ||
829 | */ | ||
830 | |||
831 | void ipath_error_qp(struct ipath_qp *qp) | ||
832 | { | ||
833 | struct ipath_ibdev *dev = to_idev(qp->ibqp.device); | ||
834 | struct ib_wc wc; | ||
835 | |||
836 | _VERBS_INFO("QP%d/%d in error state\n", | ||
837 | qp->ibqp.qp_num, qp->remote_qpn); | ||
838 | |||
839 | spin_lock(&dev->pending_lock); | ||
840 | /* XXX What if its already removed by the timeout code? */ | ||
841 | if (qp->timerwait.next != LIST_POISON1) | ||
842 | list_del(&qp->timerwait); | ||
843 | if (qp->piowait.next != LIST_POISON1) | ||
844 | list_del(&qp->piowait); | ||
845 | spin_unlock(&dev->pending_lock); | ||
846 | |||
847 | wc.status = IB_WC_WR_FLUSH_ERR; | ||
848 | wc.vendor_err = 0; | ||
849 | wc.byte_len = 0; | ||
850 | wc.imm_data = 0; | ||
851 | wc.qp_num = qp->ibqp.qp_num; | ||
852 | wc.src_qp = 0; | ||
853 | wc.wc_flags = 0; | ||
854 | wc.pkey_index = 0; | ||
855 | wc.slid = 0; | ||
856 | wc.sl = 0; | ||
857 | wc.dlid_path_bits = 0; | ||
858 | wc.port_num = 0; | ||
859 | |||
860 | while (qp->s_last != qp->s_head) { | ||
861 | struct ipath_swqe *wqe = get_swqe_ptr(qp, qp->s_last); | ||
862 | |||
863 | wc.wr_id = wqe->wr.wr_id; | ||
864 | wc.opcode = ib_ipath_wc_opcode[wqe->wr.opcode]; | ||
865 | if (++qp->s_last >= qp->s_size) | ||
866 | qp->s_last = 0; | ||
867 | ipath_cq_enter(to_icq(qp->ibqp.send_cq), &wc, 1); | ||
868 | } | ||
869 | qp->s_cur = qp->s_tail = qp->s_head; | ||
870 | qp->s_hdrwords = 0; | ||
871 | qp->s_ack_state = IB_OPCODE_RC_ACKNOWLEDGE; | ||
872 | |||
873 | wc.opcode = IB_WC_RECV; | ||
874 | while (qp->r_rq.tail != qp->r_rq.head) { | ||
875 | wc.wr_id = get_rwqe_ptr(&qp->r_rq, qp->r_rq.tail)->wr_id; | ||
876 | if (++qp->r_rq.tail >= qp->r_rq.size) | ||
877 | qp->r_rq.tail = 0; | ||
878 | ipath_cq_enter(to_icq(qp->ibqp.recv_cq), &wc, 1); | ||
879 | } | ||
880 | } | ||
881 | |||
882 | /** | ||
883 | * ipath_get_credit - flush the send work queue of a QP | 883 | * ipath_get_credit - flush the send work queue of a QP |
884 | * @qp: the qp who's send work queue to flush | 884 | * @qp: the qp who's send work queue to flush |
885 | * @aeth: the Acknowledge Extended Transport Header | 885 | * @aeth: the Acknowledge Extended Transport Header |
diff --git a/drivers/infiniband/hw/ipath/ipath_rc.c b/drivers/infiniband/hw/ipath/ipath_rc.c index a4055ca00614..493b1821a934 100644 --- a/drivers/infiniband/hw/ipath/ipath_rc.c +++ b/drivers/infiniband/hw/ipath/ipath_rc.c | |||
@@ -57,7 +57,7 @@ static void ipath_init_restart(struct ipath_qp *qp, struct ipath_swqe *wqe) | |||
57 | qp->s_len = wqe->length - len; | 57 | qp->s_len = wqe->length - len; |
58 | dev = to_idev(qp->ibqp.device); | 58 | dev = to_idev(qp->ibqp.device); |
59 | spin_lock(&dev->pending_lock); | 59 | spin_lock(&dev->pending_lock); |
60 | if (qp->timerwait.next == LIST_POISON1) | 60 | if (list_empty(&qp->timerwait)) |
61 | list_add_tail(&qp->timerwait, | 61 | list_add_tail(&qp->timerwait, |
62 | &dev->pending[dev->pending_index]); | 62 | &dev->pending[dev->pending_index]); |
63 | spin_unlock(&dev->pending_lock); | 63 | spin_unlock(&dev->pending_lock); |
@@ -356,7 +356,7 @@ static inline int ipath_make_rc_req(struct ipath_qp *qp, | |||
356 | if ((int)(qp->s_psn - qp->s_next_psn) > 0) | 356 | if ((int)(qp->s_psn - qp->s_next_psn) > 0) |
357 | qp->s_next_psn = qp->s_psn; | 357 | qp->s_next_psn = qp->s_psn; |
358 | spin_lock(&dev->pending_lock); | 358 | spin_lock(&dev->pending_lock); |
359 | if (qp->timerwait.next == LIST_POISON1) | 359 | if (list_empty(&qp->timerwait)) |
360 | list_add_tail(&qp->timerwait, | 360 | list_add_tail(&qp->timerwait, |
361 | &dev->pending[dev->pending_index]); | 361 | &dev->pending[dev->pending_index]); |
362 | spin_unlock(&dev->pending_lock); | 362 | spin_unlock(&dev->pending_lock); |
@@ -726,8 +726,8 @@ void ipath_restart_rc(struct ipath_qp *qp, u32 psn, struct ib_wc *wc) | |||
726 | */ | 726 | */ |
727 | dev = to_idev(qp->ibqp.device); | 727 | dev = to_idev(qp->ibqp.device); |
728 | spin_lock(&dev->pending_lock); | 728 | spin_lock(&dev->pending_lock); |
729 | if (qp->timerwait.next != LIST_POISON1) | 729 | if (!list_empty(&qp->timerwait)) |
730 | list_del(&qp->timerwait); | 730 | list_del_init(&qp->timerwait); |
731 | spin_unlock(&dev->pending_lock); | 731 | spin_unlock(&dev->pending_lock); |
732 | 732 | ||
733 | if (wqe->wr.opcode == IB_WR_RDMA_READ) | 733 | if (wqe->wr.opcode == IB_WR_RDMA_READ) |
@@ -886,8 +886,8 @@ static int do_rc_ack(struct ipath_qp *qp, u32 aeth, u32 psn, int opcode) | |||
886 | * just won't find anything to restart if we ACK everything. | 886 | * just won't find anything to restart if we ACK everything. |
887 | */ | 887 | */ |
888 | spin_lock(&dev->pending_lock); | 888 | spin_lock(&dev->pending_lock); |
889 | if (qp->timerwait.next != LIST_POISON1) | 889 | if (!list_empty(&qp->timerwait)) |
890 | list_del(&qp->timerwait); | 890 | list_del_init(&qp->timerwait); |
891 | spin_unlock(&dev->pending_lock); | 891 | spin_unlock(&dev->pending_lock); |
892 | 892 | ||
893 | /* | 893 | /* |
@@ -1194,8 +1194,7 @@ static inline void ipath_rc_rcv_resp(struct ipath_ibdev *dev, | |||
1194 | IB_WR_RDMA_READ)) | 1194 | IB_WR_RDMA_READ)) |
1195 | goto ack_done; | 1195 | goto ack_done; |
1196 | spin_lock(&dev->pending_lock); | 1196 | spin_lock(&dev->pending_lock); |
1197 | if (qp->s_rnr_timeout == 0 && | 1197 | if (qp->s_rnr_timeout == 0 && !list_empty(&qp->timerwait)) |
1198 | qp->timerwait.next != LIST_POISON1) | ||
1199 | list_move_tail(&qp->timerwait, | 1198 | list_move_tail(&qp->timerwait, |
1200 | &dev->pending[dev->pending_index]); | 1199 | &dev->pending[dev->pending_index]); |
1201 | spin_unlock(&dev->pending_lock); | 1200 | spin_unlock(&dev->pending_lock); |
diff --git a/drivers/infiniband/hw/ipath/ipath_registers.h b/drivers/infiniband/hw/ipath/ipath_registers.h index 1e59750c5f63..402126eb79c9 100644 --- a/drivers/infiniband/hw/ipath/ipath_registers.h +++ b/drivers/infiniband/hw/ipath/ipath_registers.h | |||
@@ -34,8 +34,9 @@ | |||
34 | #define _IPATH_REGISTERS_H | 34 | #define _IPATH_REGISTERS_H |
35 | 35 | ||
36 | /* | 36 | /* |
37 | * This file should only be included by kernel source, and by the diags. | 37 | * This file should only be included by kernel source, and by the diags. It |
38 | * It defines the registers, and their contents, for the InfiniPath HT-400 chip | 38 | * defines the registers, and their contents, for the InfiniPath HT-400 |
39 | * chip. | ||
39 | */ | 40 | */ |
40 | 41 | ||
41 | /* | 42 | /* |
@@ -156,8 +157,10 @@ | |||
156 | #define INFINIPATH_IBCC_FLOWCTRLWATERMARK_SHIFT 8 | 157 | #define INFINIPATH_IBCC_FLOWCTRLWATERMARK_SHIFT 8 |
157 | #define INFINIPATH_IBCC_LINKINITCMD_MASK 0x3ULL | 158 | #define INFINIPATH_IBCC_LINKINITCMD_MASK 0x3ULL |
158 | #define INFINIPATH_IBCC_LINKINITCMD_DISABLE 1 | 159 | #define INFINIPATH_IBCC_LINKINITCMD_DISABLE 1 |
159 | #define INFINIPATH_IBCC_LINKINITCMD_POLL 2 /* cycle through TS1/TS2 till OK */ | 160 | /* cycle through TS1/TS2 till OK */ |
160 | #define INFINIPATH_IBCC_LINKINITCMD_SLEEP 3 /* wait for TS1, then go on */ | 161 | #define INFINIPATH_IBCC_LINKINITCMD_POLL 2 |
162 | /* wait for TS1, then go on */ | ||
163 | #define INFINIPATH_IBCC_LINKINITCMD_SLEEP 3 | ||
161 | #define INFINIPATH_IBCC_LINKINITCMD_SHIFT 16 | 164 | #define INFINIPATH_IBCC_LINKINITCMD_SHIFT 16 |
162 | #define INFINIPATH_IBCC_LINKCMD_MASK 0x3ULL | 165 | #define INFINIPATH_IBCC_LINKCMD_MASK 0x3ULL |
163 | #define INFINIPATH_IBCC_LINKCMD_INIT 1 /* move to 0x11 */ | 166 | #define INFINIPATH_IBCC_LINKCMD_INIT 1 /* move to 0x11 */ |
@@ -182,7 +185,8 @@ | |||
182 | #define INFINIPATH_IBCS_LINKSTATE_SHIFT 4 | 185 | #define INFINIPATH_IBCS_LINKSTATE_SHIFT 4 |
183 | #define INFINIPATH_IBCS_TXREADY 0x40000000 | 186 | #define INFINIPATH_IBCS_TXREADY 0x40000000 |
184 | #define INFINIPATH_IBCS_TXCREDITOK 0x80000000 | 187 | #define INFINIPATH_IBCS_TXCREDITOK 0x80000000 |
185 | /* link training states (shift by INFINIPATH_IBCS_LINKTRAININGSTATE_SHIFT) */ | 188 | /* link training states (shift by |
189 | INFINIPATH_IBCS_LINKTRAININGSTATE_SHIFT) */ | ||
186 | #define INFINIPATH_IBCS_LT_STATE_DISABLED 0x00 | 190 | #define INFINIPATH_IBCS_LT_STATE_DISABLED 0x00 |
187 | #define INFINIPATH_IBCS_LT_STATE_LINKUP 0x01 | 191 | #define INFINIPATH_IBCS_LT_STATE_LINKUP 0x01 |
188 | #define INFINIPATH_IBCS_LT_STATE_POLLACTIVE 0x02 | 192 | #define INFINIPATH_IBCS_LT_STATE_POLLACTIVE 0x02 |
@@ -267,10 +271,12 @@ | |||
267 | /* kr_serdesconfig0 bits */ | 271 | /* kr_serdesconfig0 bits */ |
268 | #define INFINIPATH_SERDC0_RESET_MASK 0xfULL /* overal reset bits */ | 272 | #define INFINIPATH_SERDC0_RESET_MASK 0xfULL /* overal reset bits */ |
269 | #define INFINIPATH_SERDC0_RESET_PLL 0x10000000ULL /* pll reset */ | 273 | #define INFINIPATH_SERDC0_RESET_PLL 0x10000000ULL /* pll reset */ |
270 | #define INFINIPATH_SERDC0_TXIDLE 0xF000ULL /* tx idle enables (per lane) */ | 274 | /* tx idle enables (per lane) */ |
271 | #define INFINIPATH_SERDC0_RXDETECT_EN 0xF0000ULL /* rx detect enables (per lane) */ | 275 | #define INFINIPATH_SERDC0_TXIDLE 0xF000ULL |
272 | #define INFINIPATH_SERDC0_L1PWR_DN 0xF0ULL /* L1 Power down; use with RXDETECT, | 276 | /* rx detect enables (per lane) */ |
273 | Otherwise not used on IB side */ | 277 | #define INFINIPATH_SERDC0_RXDETECT_EN 0xF0000ULL |
278 | /* L1 Power down; use with RXDETECT, Otherwise not used on IB side */ | ||
279 | #define INFINIPATH_SERDC0_L1PWR_DN 0xF0ULL | ||
274 | 280 | ||
275 | /* kr_xgxsconfig bits */ | 281 | /* kr_xgxsconfig bits */ |
276 | #define INFINIPATH_XGXS_RESET 0x7ULL | 282 | #define INFINIPATH_XGXS_RESET 0x7ULL |
@@ -390,12 +396,13 @@ struct ipath_kregs { | |||
390 | ipath_kreg kr_txintmemsize; | 396 | ipath_kreg kr_txintmemsize; |
391 | ipath_kreg kr_xgxsconfig; | 397 | ipath_kreg kr_xgxsconfig; |
392 | ipath_kreg kr_ibpllcfg; | 398 | ipath_kreg kr_ibpllcfg; |
393 | /* use these two (and the following N ports) only with ipath_k*_kreg64_port(); | 399 | /* use these two (and the following N ports) only with |
394 | * not *kreg64() */ | 400 | * ipath_k*_kreg64_port(); not *kreg64() */ |
395 | ipath_kreg kr_rcvhdraddr; | 401 | ipath_kreg kr_rcvhdraddr; |
396 | ipath_kreg kr_rcvhdrtailaddr; | 402 | ipath_kreg kr_rcvhdrtailaddr; |
397 | 403 | ||
398 | /* remaining registers are not present on all types of infinipath chips */ | 404 | /* remaining registers are not present on all types of infinipath |
405 | chips */ | ||
399 | ipath_kreg kr_rcvpktledcnt; | 406 | ipath_kreg kr_rcvpktledcnt; |
400 | ipath_kreg kr_pcierbuftestreg0; | 407 | ipath_kreg kr_pcierbuftestreg0; |
401 | ipath_kreg kr_pcierbuftestreg1; | 408 | ipath_kreg kr_pcierbuftestreg1; |
diff --git a/drivers/infiniband/hw/ipath/ipath_ruc.c b/drivers/infiniband/hw/ipath/ipath_ruc.c index f232e77b78ee..d38f4f3cfd1d 100644 --- a/drivers/infiniband/hw/ipath/ipath_ruc.c +++ b/drivers/infiniband/hw/ipath/ipath_ruc.c | |||
@@ -435,7 +435,7 @@ void ipath_no_bufs_available(struct ipath_qp *qp, struct ipath_ibdev *dev) | |||
435 | unsigned long flags; | 435 | unsigned long flags; |
436 | 436 | ||
437 | spin_lock_irqsave(&dev->pending_lock, flags); | 437 | spin_lock_irqsave(&dev->pending_lock, flags); |
438 | if (qp->piowait.next == LIST_POISON1) | 438 | if (list_empty(&qp->piowait)) |
439 | list_add_tail(&qp->piowait, &dev->piowait); | 439 | list_add_tail(&qp->piowait, &dev->piowait); |
440 | spin_unlock_irqrestore(&dev->pending_lock, flags); | 440 | spin_unlock_irqrestore(&dev->pending_lock, flags); |
441 | /* | 441 | /* |
@@ -531,19 +531,12 @@ int ipath_post_rc_send(struct ipath_qp *qp, struct ib_send_wr *wr) | |||
531 | } | 531 | } |
532 | wqe->wr.num_sge = j; | 532 | wqe->wr.num_sge = j; |
533 | qp->s_head = next; | 533 | qp->s_head = next; |
534 | /* | ||
535 | * Wake up the send tasklet if the QP is not waiting | ||
536 | * for an RNR timeout. | ||
537 | */ | ||
538 | next = qp->s_rnr_timeout; | ||
539 | spin_unlock_irqrestore(&qp->s_lock, flags); | 534 | spin_unlock_irqrestore(&qp->s_lock, flags); |
540 | 535 | ||
541 | if (next == 0) { | 536 | if (qp->ibqp.qp_type == IB_QPT_UC) |
542 | if (qp->ibqp.qp_type == IB_QPT_UC) | 537 | ipath_do_uc_send((unsigned long) qp); |
543 | ipath_do_uc_send((unsigned long) qp); | 538 | else |
544 | else | 539 | ipath_do_rc_send((unsigned long) qp); |
545 | ipath_do_rc_send((unsigned long) qp); | ||
546 | } | ||
547 | 540 | ||
548 | ret = 0; | 541 | ret = 0; |
549 | 542 | ||
diff --git a/drivers/infiniband/hw/ipath/ipath_sysfs.c b/drivers/infiniband/hw/ipath/ipath_sysfs.c index 32acd8048b49..f323791cc495 100644 --- a/drivers/infiniband/hw/ipath/ipath_sysfs.c +++ b/drivers/infiniband/hw/ipath/ipath_sysfs.c | |||
@@ -711,10 +711,22 @@ static struct attribute_group dev_attr_group = { | |||
711 | * enters diag mode. A device reset is quite likely to crash the | 711 | * enters diag mode. A device reset is quite likely to crash the |
712 | * machine entirely, so we don't want to normally make it | 712 | * machine entirely, so we don't want to normally make it |
713 | * available. | 713 | * available. |
714 | * | ||
715 | * Called with ipath_mutex held. | ||
714 | */ | 716 | */ |
715 | int ipath_expose_reset(struct device *dev) | 717 | int ipath_expose_reset(struct device *dev) |
716 | { | 718 | { |
717 | return device_create_file(dev, &dev_attr_reset); | 719 | static int exposed; |
720 | int ret; | ||
721 | |||
722 | if (!exposed) { | ||
723 | ret = device_create_file(dev, &dev_attr_reset); | ||
724 | exposed = 1; | ||
725 | } | ||
726 | else | ||
727 | ret = 0; | ||
728 | |||
729 | return ret; | ||
718 | } | 730 | } |
719 | 731 | ||
720 | int ipath_driver_create_group(struct device_driver *drv) | 732 | int ipath_driver_create_group(struct device_driver *drv) |
diff --git a/drivers/infiniband/hw/ipath/ipath_ud.c b/drivers/infiniband/hw/ipath/ipath_ud.c index 5ff3de6128b2..e606daf83210 100644 --- a/drivers/infiniband/hw/ipath/ipath_ud.c +++ b/drivers/infiniband/hw/ipath/ipath_ud.c | |||
@@ -46,8 +46,10 @@ | |||
46 | * This is called from ipath_post_ud_send() to forward a WQE addressed | 46 | * This is called from ipath_post_ud_send() to forward a WQE addressed |
47 | * to the same HCA. | 47 | * to the same HCA. |
48 | */ | 48 | */ |
49 | void ipath_ud_loopback(struct ipath_qp *sqp, struct ipath_sge_state *ss, | 49 | static void ipath_ud_loopback(struct ipath_qp *sqp, |
50 | u32 length, struct ib_send_wr *wr, struct ib_wc *wc) | 50 | struct ipath_sge_state *ss, |
51 | u32 length, struct ib_send_wr *wr, | ||
52 | struct ib_wc *wc) | ||
51 | { | 53 | { |
52 | struct ipath_ibdev *dev = to_idev(sqp->ibqp.device); | 54 | struct ipath_ibdev *dev = to_idev(sqp->ibqp.device); |
53 | struct ipath_qp *qp; | 55 | struct ipath_qp *qp; |
diff --git a/drivers/infiniband/hw/ipath/ipath_verbs.c b/drivers/infiniband/hw/ipath/ipath_verbs.c index 9f27fd35cdbb..28fdbdaa789d 100644 --- a/drivers/infiniband/hw/ipath/ipath_verbs.c +++ b/drivers/infiniband/hw/ipath/ipath_verbs.c | |||
@@ -41,7 +41,7 @@ | |||
41 | /* Not static, because we don't want the compiler removing it */ | 41 | /* Not static, because we don't want the compiler removing it */ |
42 | const char ipath_verbs_version[] = "ipath_verbs " IPATH_IDSTR; | 42 | const char ipath_verbs_version[] = "ipath_verbs " IPATH_IDSTR; |
43 | 43 | ||
44 | unsigned int ib_ipath_qp_table_size = 251; | 44 | static unsigned int ib_ipath_qp_table_size = 251; |
45 | module_param_named(qp_table_size, ib_ipath_qp_table_size, uint, S_IRUGO); | 45 | module_param_named(qp_table_size, ib_ipath_qp_table_size, uint, S_IRUGO); |
46 | MODULE_PARM_DESC(qp_table_size, "QP table size"); | 46 | MODULE_PARM_DESC(qp_table_size, "QP table size"); |
47 | 47 | ||
@@ -87,7 +87,7 @@ const enum ib_wc_opcode ib_ipath_wc_opcode[] = { | |||
87 | /* | 87 | /* |
88 | * System image GUID. | 88 | * System image GUID. |
89 | */ | 89 | */ |
90 | __be64 sys_image_guid; | 90 | static __be64 sys_image_guid; |
91 | 91 | ||
92 | /** | 92 | /** |
93 | * ipath_copy_sge - copy data to SGE memory | 93 | * ipath_copy_sge - copy data to SGE memory |
@@ -449,7 +449,6 @@ static void ipath_ib_timer(void *arg) | |||
449 | { | 449 | { |
450 | struct ipath_ibdev *dev = (struct ipath_ibdev *) arg; | 450 | struct ipath_ibdev *dev = (struct ipath_ibdev *) arg; |
451 | struct ipath_qp *resend = NULL; | 451 | struct ipath_qp *resend = NULL; |
452 | struct ipath_qp *rnr = NULL; | ||
453 | struct list_head *last; | 452 | struct list_head *last; |
454 | struct ipath_qp *qp; | 453 | struct ipath_qp *qp; |
455 | unsigned long flags; | 454 | unsigned long flags; |
@@ -465,32 +464,18 @@ static void ipath_ib_timer(void *arg) | |||
465 | last = &dev->pending[dev->pending_index]; | 464 | last = &dev->pending[dev->pending_index]; |
466 | while (!list_empty(last)) { | 465 | while (!list_empty(last)) { |
467 | qp = list_entry(last->next, struct ipath_qp, timerwait); | 466 | qp = list_entry(last->next, struct ipath_qp, timerwait); |
468 | if (last->next == LIST_POISON1 || | 467 | list_del_init(&qp->timerwait); |
469 | last->next != &qp->timerwait || | 468 | qp->timer_next = resend; |
470 | qp->timerwait.prev != last) { | 469 | resend = qp; |
471 | INIT_LIST_HEAD(last); | 470 | atomic_inc(&qp->refcount); |
472 | } else { | ||
473 | list_del(&qp->timerwait); | ||
474 | qp->timerwait.prev = (struct list_head *) resend; | ||
475 | resend = qp; | ||
476 | atomic_inc(&qp->refcount); | ||
477 | } | ||
478 | } | 471 | } |
479 | last = &dev->rnrwait; | 472 | last = &dev->rnrwait; |
480 | if (!list_empty(last)) { | 473 | if (!list_empty(last)) { |
481 | qp = list_entry(last->next, struct ipath_qp, timerwait); | 474 | qp = list_entry(last->next, struct ipath_qp, timerwait); |
482 | if (--qp->s_rnr_timeout == 0) { | 475 | if (--qp->s_rnr_timeout == 0) { |
483 | do { | 476 | do { |
484 | if (last->next == LIST_POISON1 || | 477 | list_del_init(&qp->timerwait); |
485 | last->next != &qp->timerwait || | 478 | tasklet_hi_schedule(&qp->s_task); |
486 | qp->timerwait.prev != last) { | ||
487 | INIT_LIST_HEAD(last); | ||
488 | break; | ||
489 | } | ||
490 | list_del(&qp->timerwait); | ||
491 | qp->timerwait.prev = | ||
492 | (struct list_head *) rnr; | ||
493 | rnr = qp; | ||
494 | if (list_empty(last)) | 479 | if (list_empty(last)) |
495 | break; | 480 | break; |
496 | qp = list_entry(last->next, struct ipath_qp, | 481 | qp = list_entry(last->next, struct ipath_qp, |
@@ -530,8 +515,7 @@ static void ipath_ib_timer(void *arg) | |||
530 | spin_unlock_irqrestore(&dev->pending_lock, flags); | 515 | spin_unlock_irqrestore(&dev->pending_lock, flags); |
531 | 516 | ||
532 | /* XXX What if timer fires again while this is running? */ | 517 | /* XXX What if timer fires again while this is running? */ |
533 | for (qp = resend; qp != NULL; | 518 | for (qp = resend; qp != NULL; qp = qp->timer_next) { |
534 | qp = (struct ipath_qp *) qp->timerwait.prev) { | ||
535 | struct ib_wc wc; | 519 | struct ib_wc wc; |
536 | 520 | ||
537 | spin_lock_irqsave(&qp->s_lock, flags); | 521 | spin_lock_irqsave(&qp->s_lock, flags); |
@@ -545,9 +529,6 @@ static void ipath_ib_timer(void *arg) | |||
545 | if (atomic_dec_and_test(&qp->refcount)) | 529 | if (atomic_dec_and_test(&qp->refcount)) |
546 | wake_up(&qp->wait); | 530 | wake_up(&qp->wait); |
547 | } | 531 | } |
548 | for (qp = rnr; qp != NULL; | ||
549 | qp = (struct ipath_qp *) qp->timerwait.prev) | ||
550 | tasklet_hi_schedule(&qp->s_task); | ||
551 | } | 532 | } |
552 | 533 | ||
553 | /** | 534 | /** |
@@ -556,9 +537,9 @@ static void ipath_ib_timer(void *arg) | |||
556 | * | 537 | * |
557 | * This is called from ipath_intr() at interrupt level when a PIO buffer is | 538 | * This is called from ipath_intr() at interrupt level when a PIO buffer is |
558 | * available after ipath_verbs_send() returned an error that no buffers were | 539 | * available after ipath_verbs_send() returned an error that no buffers were |
559 | * available. Return 0 if we consumed all the PIO buffers and we still have | 540 | * available. Return 1 if we consumed all the PIO buffers and we still have |
560 | * QPs waiting for buffers (for now, just do a tasklet_hi_schedule and | 541 | * QPs waiting for buffers (for now, just do a tasklet_hi_schedule and |
561 | * return one). | 542 | * return zero). |
562 | */ | 543 | */ |
563 | static int ipath_ib_piobufavail(void *arg) | 544 | static int ipath_ib_piobufavail(void *arg) |
564 | { | 545 | { |
@@ -573,13 +554,13 @@ static int ipath_ib_piobufavail(void *arg) | |||
573 | while (!list_empty(&dev->piowait)) { | 554 | while (!list_empty(&dev->piowait)) { |
574 | qp = list_entry(dev->piowait.next, struct ipath_qp, | 555 | qp = list_entry(dev->piowait.next, struct ipath_qp, |
575 | piowait); | 556 | piowait); |
576 | list_del(&qp->piowait); | 557 | list_del_init(&qp->piowait); |
577 | tasklet_hi_schedule(&qp->s_task); | 558 | tasklet_hi_schedule(&qp->s_task); |
578 | } | 559 | } |
579 | spin_unlock_irqrestore(&dev->pending_lock, flags); | 560 | spin_unlock_irqrestore(&dev->pending_lock, flags); |
580 | 561 | ||
581 | bail: | 562 | bail: |
582 | return 1; | 563 | return 0; |
583 | } | 564 | } |
584 | 565 | ||
585 | static int ipath_query_device(struct ib_device *ibdev, | 566 | static int ipath_query_device(struct ib_device *ibdev, |
@@ -970,6 +951,7 @@ static void *ipath_register_ib_device(int unit, struct ipath_devdata *dd) | |||
970 | idev->dd = dd; | 951 | idev->dd = dd; |
971 | 952 | ||
972 | strlcpy(dev->name, "ipath%d", IB_DEVICE_NAME_MAX); | 953 | strlcpy(dev->name, "ipath%d", IB_DEVICE_NAME_MAX); |
954 | dev->owner = THIS_MODULE; | ||
973 | dev->node_guid = ipath_layer_get_guid(dd); | 955 | dev->node_guid = ipath_layer_get_guid(dd); |
974 | dev->uverbs_abi_ver = IPATH_UVERBS_ABI_VERSION; | 956 | dev->uverbs_abi_ver = IPATH_UVERBS_ABI_VERSION; |
975 | dev->uverbs_cmd_mask = | 957 | dev->uverbs_cmd_mask = |
@@ -1110,7 +1092,7 @@ static void ipath_unregister_ib_device(void *arg) | |||
1110 | ib_dealloc_device(ibdev); | 1092 | ib_dealloc_device(ibdev); |
1111 | } | 1093 | } |
1112 | 1094 | ||
1113 | int __init ipath_verbs_init(void) | 1095 | static int __init ipath_verbs_init(void) |
1114 | { | 1096 | { |
1115 | return ipath_verbs_register(ipath_register_ib_device, | 1097 | return ipath_verbs_register(ipath_register_ib_device, |
1116 | ipath_unregister_ib_device, | 1098 | ipath_unregister_ib_device, |
@@ -1118,33 +1100,33 @@ int __init ipath_verbs_init(void) | |||
1118 | ipath_ib_timer); | 1100 | ipath_ib_timer); |
1119 | } | 1101 | } |
1120 | 1102 | ||
1121 | void __exit ipath_verbs_cleanup(void) | 1103 | static void __exit ipath_verbs_cleanup(void) |
1122 | { | 1104 | { |
1123 | ipath_verbs_unregister(); | 1105 | ipath_verbs_unregister(); |
1124 | } | 1106 | } |
1125 | 1107 | ||
1126 | static ssize_t show_rev(struct class_device *cdev, char *buf) | 1108 | static ssize_t show_rev(struct class_device *cdev, char *buf) |
1127 | { | 1109 | { |
1128 | struct ipath_ibdev *dev = | 1110 | struct ipath_ibdev *dev = |
1129 | container_of(cdev, struct ipath_ibdev, ibdev.class_dev); | 1111 | container_of(cdev, struct ipath_ibdev, ibdev.class_dev); |
1130 | int vendor, boardrev, majrev, minrev; | 1112 | int vendor, boardrev, majrev, minrev; |
1131 | 1113 | ||
1132 | ipath_layer_query_device(dev->dd, &vendor, &boardrev, | 1114 | ipath_layer_query_device(dev->dd, &vendor, &boardrev, |
1133 | &majrev, &minrev); | 1115 | &majrev, &minrev); |
1134 | return sprintf(buf, "%d.%d\n", majrev, minrev); | 1116 | return sprintf(buf, "%d.%d\n", majrev, minrev); |
1135 | } | 1117 | } |
1136 | 1118 | ||
1137 | static ssize_t show_hca(struct class_device *cdev, char *buf) | 1119 | static ssize_t show_hca(struct class_device *cdev, char *buf) |
1138 | { | 1120 | { |
1139 | struct ipath_ibdev *dev = | 1121 | struct ipath_ibdev *dev = |
1140 | container_of(cdev, struct ipath_ibdev, ibdev.class_dev); | 1122 | container_of(cdev, struct ipath_ibdev, ibdev.class_dev); |
1141 | int ret; | 1123 | int ret; |
1142 | 1124 | ||
1143 | ret = ipath_layer_get_boardname(dev->dd, buf, 128); | 1125 | ret = ipath_layer_get_boardname(dev->dd, buf, 128); |
1144 | if (ret < 0) | 1126 | if (ret < 0) |
1145 | goto bail; | 1127 | goto bail; |
1146 | strcat(buf, "\n"); | 1128 | strcat(buf, "\n"); |
1147 | ret = strlen(buf); | 1129 | ret = strlen(buf); |
1148 | 1130 | ||
1149 | bail: | 1131 | bail: |
1150 | return ret; | 1132 | return ret; |
@@ -1152,40 +1134,40 @@ bail: | |||
1152 | 1134 | ||
1153 | static ssize_t show_stats(struct class_device *cdev, char *buf) | 1135 | static ssize_t show_stats(struct class_device *cdev, char *buf) |
1154 | { | 1136 | { |
1155 | struct ipath_ibdev *dev = | 1137 | struct ipath_ibdev *dev = |
1156 | container_of(cdev, struct ipath_ibdev, ibdev.class_dev); | 1138 | container_of(cdev, struct ipath_ibdev, ibdev.class_dev); |
1157 | int i; | 1139 | int i; |
1158 | int len; | 1140 | int len; |
1159 | 1141 | ||
1160 | len = sprintf(buf, | 1142 | len = sprintf(buf, |
1161 | "RC resends %d\n" | 1143 | "RC resends %d\n" |
1162 | "RC QACKs %d\n" | 1144 | "RC no QACK %d\n" |
1163 | "RC ACKs %d\n" | 1145 | "RC ACKs %d\n" |
1164 | "RC SEQ NAKs %d\n" | 1146 | "RC SEQ NAKs %d\n" |
1165 | "RC RDMA seq %d\n" | 1147 | "RC RDMA seq %d\n" |
1166 | "RC RNR NAKs %d\n" | 1148 | "RC RNR NAKs %d\n" |
1167 | "RC OTH NAKs %d\n" | 1149 | "RC OTH NAKs %d\n" |
1168 | "RC timeouts %d\n" | 1150 | "RC timeouts %d\n" |
1169 | "RC RDMA dup %d\n" | 1151 | "RC RDMA dup %d\n" |
1170 | "piobuf wait %d\n" | 1152 | "piobuf wait %d\n" |
1171 | "no piobuf %d\n" | 1153 | "no piobuf %d\n" |
1172 | "PKT drops %d\n" | 1154 | "PKT drops %d\n" |
1173 | "WQE errs %d\n", | 1155 | "WQE errs %d\n", |
1174 | dev->n_rc_resends, dev->n_rc_qacks, dev->n_rc_acks, | 1156 | dev->n_rc_resends, dev->n_rc_qacks, dev->n_rc_acks, |
1175 | dev->n_seq_naks, dev->n_rdma_seq, dev->n_rnr_naks, | 1157 | dev->n_seq_naks, dev->n_rdma_seq, dev->n_rnr_naks, |
1176 | dev->n_other_naks, dev->n_timeouts, | 1158 | dev->n_other_naks, dev->n_timeouts, |
1177 | dev->n_rdma_dup_busy, dev->n_piowait, | 1159 | dev->n_rdma_dup_busy, dev->n_piowait, |
1178 | dev->n_no_piobuf, dev->n_pkt_drops, dev->n_wqe_errs); | 1160 | dev->n_no_piobuf, dev->n_pkt_drops, dev->n_wqe_errs); |
1179 | for (i = 0; i < ARRAY_SIZE(dev->opstats); i++) { | 1161 | for (i = 0; i < ARRAY_SIZE(dev->opstats); i++) { |
1180 | const struct ipath_opcode_stats *si = &dev->opstats[i]; | 1162 | const struct ipath_opcode_stats *si = &dev->opstats[i]; |
1181 | 1163 | ||
1182 | if (!si->n_packets && !si->n_bytes) | 1164 | if (!si->n_packets && !si->n_bytes) |
1183 | continue; | 1165 | continue; |
1184 | len += sprintf(buf + len, "%02x %llu/%llu\n", i, | 1166 | len += sprintf(buf + len, "%02x %llu/%llu\n", i, |
1185 | (unsigned long long) si->n_packets, | 1167 | (unsigned long long) si->n_packets, |
1186 | (unsigned long long) si->n_bytes); | 1168 | (unsigned long long) si->n_bytes); |
1187 | } | 1169 | } |
1188 | return len; | 1170 | return len; |
1189 | } | 1171 | } |
1190 | 1172 | ||
1191 | static CLASS_DEVICE_ATTR(hw_rev, S_IRUGO, show_rev, NULL); | 1173 | static CLASS_DEVICE_ATTR(hw_rev, S_IRUGO, show_rev, NULL); |
@@ -1194,25 +1176,25 @@ static CLASS_DEVICE_ATTR(board_id, S_IRUGO, show_hca, NULL); | |||
1194 | static CLASS_DEVICE_ATTR(stats, S_IRUGO, show_stats, NULL); | 1176 | static CLASS_DEVICE_ATTR(stats, S_IRUGO, show_stats, NULL); |
1195 | 1177 | ||
1196 | static struct class_device_attribute *ipath_class_attributes[] = { | 1178 | static struct class_device_attribute *ipath_class_attributes[] = { |
1197 | &class_device_attr_hw_rev, | 1179 | &class_device_attr_hw_rev, |
1198 | &class_device_attr_hca_type, | 1180 | &class_device_attr_hca_type, |
1199 | &class_device_attr_board_id, | 1181 | &class_device_attr_board_id, |
1200 | &class_device_attr_stats | 1182 | &class_device_attr_stats |
1201 | }; | 1183 | }; |
1202 | 1184 | ||
1203 | static int ipath_verbs_register_sysfs(struct ib_device *dev) | 1185 | static int ipath_verbs_register_sysfs(struct ib_device *dev) |
1204 | { | 1186 | { |
1205 | int i; | 1187 | int i; |
1206 | int ret; | 1188 | int ret; |
1207 | 1189 | ||
1208 | for (i = 0; i < ARRAY_SIZE(ipath_class_attributes); ++i) | 1190 | for (i = 0; i < ARRAY_SIZE(ipath_class_attributes); ++i) |
1209 | if (class_device_create_file(&dev->class_dev, | 1191 | if (class_device_create_file(&dev->class_dev, |
1210 | ipath_class_attributes[i])) { | 1192 | ipath_class_attributes[i])) { |
1211 | ret = 1; | 1193 | ret = 1; |
1212 | goto bail; | 1194 | goto bail; |
1213 | } | 1195 | } |
1214 | 1196 | ||
1215 | ret = 0; | 1197 | ret = 0; |
1216 | 1198 | ||
1217 | bail: | 1199 | bail: |
1218 | return ret; | 1200 | return ret; |
diff --git a/drivers/infiniband/hw/ipath/ipath_verbs.h b/drivers/infiniband/hw/ipath/ipath_verbs.h index b824632b2a8c..4f8d59300e9b 100644 --- a/drivers/infiniband/hw/ipath/ipath_verbs.h +++ b/drivers/infiniband/hw/ipath/ipath_verbs.h | |||
@@ -282,7 +282,8 @@ struct ipath_srq { | |||
282 | */ | 282 | */ |
283 | struct ipath_qp { | 283 | struct ipath_qp { |
284 | struct ib_qp ibqp; | 284 | struct ib_qp ibqp; |
285 | struct ipath_qp *next; /* link list for QPN hash table */ | 285 | struct ipath_qp *next; /* link list for QPN hash table */ |
286 | struct ipath_qp *timer_next; /* link list for ipath_ib_timer() */ | ||
286 | struct list_head piowait; /* link for wait PIO buf */ | 287 | struct list_head piowait; /* link for wait PIO buf */ |
287 | struct list_head timerwait; /* link for waiting for timeouts */ | 288 | struct list_head timerwait; /* link for waiting for timeouts */ |
288 | struct ib_ah_attr remote_ah_attr; | 289 | struct ib_ah_attr remote_ah_attr; |
@@ -577,8 +578,6 @@ int ipath_init_qp_table(struct ipath_ibdev *idev, int size); | |||
577 | 578 | ||
578 | void ipath_sqerror_qp(struct ipath_qp *qp, struct ib_wc *wc); | 579 | void ipath_sqerror_qp(struct ipath_qp *qp, struct ib_wc *wc); |
579 | 580 | ||
580 | void ipath_error_qp(struct ipath_qp *qp); | ||
581 | |||
582 | void ipath_get_credit(struct ipath_qp *qp, u32 aeth); | 581 | void ipath_get_credit(struct ipath_qp *qp, u32 aeth); |
583 | 582 | ||
584 | void ipath_do_rc_send(unsigned long data); | 583 | void ipath_do_rc_send(unsigned long data); |
@@ -607,9 +606,6 @@ void ipath_rc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr, | |||
607 | 606 | ||
608 | void ipath_restart_rc(struct ipath_qp *qp, u32 psn, struct ib_wc *wc); | 607 | void ipath_restart_rc(struct ipath_qp *qp, u32 psn, struct ib_wc *wc); |
609 | 608 | ||
610 | void ipath_ud_loopback(struct ipath_qp *sqp, struct ipath_sge_state *ss, | ||
611 | u32 length, struct ib_send_wr *wr, struct ib_wc *wc); | ||
612 | |||
613 | int ipath_post_ud_send(struct ipath_qp *qp, struct ib_send_wr *wr); | 609 | int ipath_post_ud_send(struct ipath_qp *qp, struct ib_send_wr *wr); |
614 | 610 | ||
615 | void ipath_ud_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr, | 611 | void ipath_ud_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr, |
diff --git a/drivers/infiniband/hw/ipath/ips_common.h b/drivers/infiniband/hw/ipath/ips_common.h index 410a764dfcef..ab7cbbbfd03a 100644 --- a/drivers/infiniband/hw/ipath/ips_common.h +++ b/drivers/infiniband/hw/ipath/ips_common.h | |||
@@ -95,7 +95,7 @@ struct ether_header { | |||
95 | __u8 seq_num; | 95 | __u8 seq_num; |
96 | __le32 len; | 96 | __le32 len; |
97 | /* MUST be of word size due to PIO write requirements */ | 97 | /* MUST be of word size due to PIO write requirements */ |
98 | __u32 csum; | 98 | __le32 csum; |
99 | __le16 csum_offset; | 99 | __le16 csum_offset; |
100 | __le16 flags; | 100 | __le16 flags; |
101 | __u16 first_2_bytes; | 101 | __u16 first_2_bytes; |
diff --git a/drivers/infiniband/hw/mthca/mthca_cmd.c b/drivers/infiniband/hw/mthca/mthca_cmd.c index 1985b5dfa481..798e13e14faf 100644 --- a/drivers/infiniband/hw/mthca/mthca_cmd.c +++ b/drivers/infiniband/hw/mthca/mthca_cmd.c | |||
@@ -182,7 +182,7 @@ struct mthca_cmd_context { | |||
182 | u8 status; | 182 | u8 status; |
183 | }; | 183 | }; |
184 | 184 | ||
185 | static int fw_cmd_doorbell = 1; | 185 | static int fw_cmd_doorbell = 0; |
186 | module_param(fw_cmd_doorbell, int, 0644); | 186 | module_param(fw_cmd_doorbell, int, 0644); |
187 | MODULE_PARM_DESC(fw_cmd_doorbell, "post FW commands through doorbell page if nonzero " | 187 | MODULE_PARM_DESC(fw_cmd_doorbell, "post FW commands through doorbell page if nonzero " |
188 | "(and supported by FW)"); | 188 | "(and supported by FW)"); |
diff --git a/drivers/infiniband/hw/mthca/mthca_cq.c b/drivers/infiniband/hw/mthca/mthca_cq.c index 312cf90731ea..205854e9c662 100644 --- a/drivers/infiniband/hw/mthca/mthca_cq.c +++ b/drivers/infiniband/hw/mthca/mthca_cq.c | |||
@@ -238,9 +238,9 @@ void mthca_cq_event(struct mthca_dev *dev, u32 cqn, | |||
238 | spin_lock(&dev->cq_table.lock); | 238 | spin_lock(&dev->cq_table.lock); |
239 | 239 | ||
240 | cq = mthca_array_get(&dev->cq_table.cq, cqn & (dev->limits.num_cqs - 1)); | 240 | cq = mthca_array_get(&dev->cq_table.cq, cqn & (dev->limits.num_cqs - 1)); |
241 | |||
242 | if (cq) | 241 | if (cq) |
243 | atomic_inc(&cq->refcount); | 242 | ++cq->refcount; |
243 | |||
244 | spin_unlock(&dev->cq_table.lock); | 244 | spin_unlock(&dev->cq_table.lock); |
245 | 245 | ||
246 | if (!cq) { | 246 | if (!cq) { |
@@ -254,8 +254,10 @@ void mthca_cq_event(struct mthca_dev *dev, u32 cqn, | |||
254 | if (cq->ibcq.event_handler) | 254 | if (cq->ibcq.event_handler) |
255 | cq->ibcq.event_handler(&event, cq->ibcq.cq_context); | 255 | cq->ibcq.event_handler(&event, cq->ibcq.cq_context); |
256 | 256 | ||
257 | if (atomic_dec_and_test(&cq->refcount)) | 257 | spin_lock(&dev->cq_table.lock); |
258 | if (!--cq->refcount) | ||
258 | wake_up(&cq->wait); | 259 | wake_up(&cq->wait); |
260 | spin_unlock(&dev->cq_table.lock); | ||
259 | } | 261 | } |
260 | 262 | ||
261 | static inline int is_recv_cqe(struct mthca_cqe *cqe) | 263 | static inline int is_recv_cqe(struct mthca_cqe *cqe) |
@@ -267,23 +269,13 @@ static inline int is_recv_cqe(struct mthca_cqe *cqe) | |||
267 | return !(cqe->is_send & 0x80); | 269 | return !(cqe->is_send & 0x80); |
268 | } | 270 | } |
269 | 271 | ||
270 | void mthca_cq_clean(struct mthca_dev *dev, u32 cqn, u32 qpn, | 272 | void mthca_cq_clean(struct mthca_dev *dev, struct mthca_cq *cq, u32 qpn, |
271 | struct mthca_srq *srq) | 273 | struct mthca_srq *srq) |
272 | { | 274 | { |
273 | struct mthca_cq *cq; | ||
274 | struct mthca_cqe *cqe; | 275 | struct mthca_cqe *cqe; |
275 | u32 prod_index; | 276 | u32 prod_index; |
276 | int nfreed = 0; | 277 | int nfreed = 0; |
277 | 278 | ||
278 | spin_lock_irq(&dev->cq_table.lock); | ||
279 | cq = mthca_array_get(&dev->cq_table.cq, cqn & (dev->limits.num_cqs - 1)); | ||
280 | if (cq) | ||
281 | atomic_inc(&cq->refcount); | ||
282 | spin_unlock_irq(&dev->cq_table.lock); | ||
283 | |||
284 | if (!cq) | ||
285 | return; | ||
286 | |||
287 | spin_lock_irq(&cq->lock); | 279 | spin_lock_irq(&cq->lock); |
288 | 280 | ||
289 | /* | 281 | /* |
@@ -301,7 +293,7 @@ void mthca_cq_clean(struct mthca_dev *dev, u32 cqn, u32 qpn, | |||
301 | 293 | ||
302 | if (0) | 294 | if (0) |
303 | mthca_dbg(dev, "Cleaning QPN %06x from CQN %06x; ci %d, pi %d\n", | 295 | mthca_dbg(dev, "Cleaning QPN %06x from CQN %06x; ci %d, pi %d\n", |
304 | qpn, cqn, cq->cons_index, prod_index); | 296 | qpn, cq->cqn, cq->cons_index, prod_index); |
305 | 297 | ||
306 | /* | 298 | /* |
307 | * Now sweep backwards through the CQ, removing CQ entries | 299 | * Now sweep backwards through the CQ, removing CQ entries |
@@ -325,8 +317,6 @@ void mthca_cq_clean(struct mthca_dev *dev, u32 cqn, u32 qpn, | |||
325 | } | 317 | } |
326 | 318 | ||
327 | spin_unlock_irq(&cq->lock); | 319 | spin_unlock_irq(&cq->lock); |
328 | if (atomic_dec_and_test(&cq->refcount)) | ||
329 | wake_up(&cq->wait); | ||
330 | } | 320 | } |
331 | 321 | ||
332 | void mthca_cq_resize_copy_cqes(struct mthca_cq *cq) | 322 | void mthca_cq_resize_copy_cqes(struct mthca_cq *cq) |
@@ -821,7 +811,7 @@ int mthca_init_cq(struct mthca_dev *dev, int nent, | |||
821 | } | 811 | } |
822 | 812 | ||
823 | spin_lock_init(&cq->lock); | 813 | spin_lock_init(&cq->lock); |
824 | atomic_set(&cq->refcount, 1); | 814 | cq->refcount = 1; |
825 | init_waitqueue_head(&cq->wait); | 815 | init_waitqueue_head(&cq->wait); |
826 | 816 | ||
827 | memset(cq_context, 0, sizeof *cq_context); | 817 | memset(cq_context, 0, sizeof *cq_context); |
@@ -896,6 +886,17 @@ err_out: | |||
896 | return err; | 886 | return err; |
897 | } | 887 | } |
898 | 888 | ||
889 | static inline int get_cq_refcount(struct mthca_dev *dev, struct mthca_cq *cq) | ||
890 | { | ||
891 | int c; | ||
892 | |||
893 | spin_lock_irq(&dev->cq_table.lock); | ||
894 | c = cq->refcount; | ||
895 | spin_unlock_irq(&dev->cq_table.lock); | ||
896 | |||
897 | return c; | ||
898 | } | ||
899 | |||
899 | void mthca_free_cq(struct mthca_dev *dev, | 900 | void mthca_free_cq(struct mthca_dev *dev, |
900 | struct mthca_cq *cq) | 901 | struct mthca_cq *cq) |
901 | { | 902 | { |
@@ -929,6 +930,7 @@ void mthca_free_cq(struct mthca_dev *dev, | |||
929 | spin_lock_irq(&dev->cq_table.lock); | 930 | spin_lock_irq(&dev->cq_table.lock); |
930 | mthca_array_clear(&dev->cq_table.cq, | 931 | mthca_array_clear(&dev->cq_table.cq, |
931 | cq->cqn & (dev->limits.num_cqs - 1)); | 932 | cq->cqn & (dev->limits.num_cqs - 1)); |
933 | --cq->refcount; | ||
932 | spin_unlock_irq(&dev->cq_table.lock); | 934 | spin_unlock_irq(&dev->cq_table.lock); |
933 | 935 | ||
934 | if (dev->mthca_flags & MTHCA_FLAG_MSI_X) | 936 | if (dev->mthca_flags & MTHCA_FLAG_MSI_X) |
@@ -936,8 +938,7 @@ void mthca_free_cq(struct mthca_dev *dev, | |||
936 | else | 938 | else |
937 | synchronize_irq(dev->pdev->irq); | 939 | synchronize_irq(dev->pdev->irq); |
938 | 940 | ||
939 | atomic_dec(&cq->refcount); | 941 | wait_event(cq->wait, !get_cq_refcount(dev, cq)); |
940 | wait_event(cq->wait, !atomic_read(&cq->refcount)); | ||
941 | 942 | ||
942 | if (cq->is_kernel) { | 943 | if (cq->is_kernel) { |
943 | mthca_free_cq_buf(dev, &cq->buf, cq->ibcq.cqe); | 944 | mthca_free_cq_buf(dev, &cq->buf, cq->ibcq.cqe); |
diff --git a/drivers/infiniband/hw/mthca/mthca_dev.h b/drivers/infiniband/hw/mthca/mthca_dev.h index 4c1dcb4c1822..f8160b8de090 100644 --- a/drivers/infiniband/hw/mthca/mthca_dev.h +++ b/drivers/infiniband/hw/mthca/mthca_dev.h | |||
@@ -496,7 +496,7 @@ void mthca_free_cq(struct mthca_dev *dev, | |||
496 | void mthca_cq_completion(struct mthca_dev *dev, u32 cqn); | 496 | void mthca_cq_completion(struct mthca_dev *dev, u32 cqn); |
497 | void mthca_cq_event(struct mthca_dev *dev, u32 cqn, | 497 | void mthca_cq_event(struct mthca_dev *dev, u32 cqn, |
498 | enum ib_event_type event_type); | 498 | enum ib_event_type event_type); |
499 | void mthca_cq_clean(struct mthca_dev *dev, u32 cqn, u32 qpn, | 499 | void mthca_cq_clean(struct mthca_dev *dev, struct mthca_cq *cq, u32 qpn, |
500 | struct mthca_srq *srq); | 500 | struct mthca_srq *srq); |
501 | void mthca_cq_resize_copy_cqes(struct mthca_cq *cq); | 501 | void mthca_cq_resize_copy_cqes(struct mthca_cq *cq); |
502 | int mthca_alloc_cq_buf(struct mthca_dev *dev, struct mthca_cq_buf *buf, int nent); | 502 | int mthca_alloc_cq_buf(struct mthca_dev *dev, struct mthca_cq_buf *buf, int nent); |
diff --git a/drivers/infiniband/hw/mthca/mthca_mad.c b/drivers/infiniband/hw/mthca/mthca_mad.c index f235c7ea42f0..4730863ece9a 100644 --- a/drivers/infiniband/hw/mthca/mthca_mad.c +++ b/drivers/infiniband/hw/mthca/mthca_mad.c | |||
@@ -49,7 +49,7 @@ enum { | |||
49 | MTHCA_VENDOR_CLASS2 = 0xa | 49 | MTHCA_VENDOR_CLASS2 = 0xa |
50 | }; | 50 | }; |
51 | 51 | ||
52 | int mthca_update_rate(struct mthca_dev *dev, u8 port_num) | 52 | static int mthca_update_rate(struct mthca_dev *dev, u8 port_num) |
53 | { | 53 | { |
54 | struct ib_port_attr *tprops = NULL; | 54 | struct ib_port_attr *tprops = NULL; |
55 | int ret; | 55 | int ret; |
diff --git a/drivers/infiniband/hw/mthca/mthca_mr.c b/drivers/infiniband/hw/mthca/mthca_mr.c index 25e1c1db9a40..a486dec1707e 100644 --- a/drivers/infiniband/hw/mthca/mthca_mr.c +++ b/drivers/infiniband/hw/mthca/mthca_mr.c | |||
@@ -761,6 +761,7 @@ void mthca_arbel_fmr_unmap(struct mthca_dev *dev, struct mthca_fmr *fmr) | |||
761 | 761 | ||
762 | int __devinit mthca_init_mr_table(struct mthca_dev *dev) | 762 | int __devinit mthca_init_mr_table(struct mthca_dev *dev) |
763 | { | 763 | { |
764 | unsigned long addr; | ||
764 | int err, i; | 765 | int err, i; |
765 | 766 | ||
766 | err = mthca_alloc_init(&dev->mr_table.mpt_alloc, | 767 | err = mthca_alloc_init(&dev->mr_table.mpt_alloc, |
@@ -796,9 +797,12 @@ int __devinit mthca_init_mr_table(struct mthca_dev *dev) | |||
796 | goto err_fmr_mpt; | 797 | goto err_fmr_mpt; |
797 | } | 798 | } |
798 | 799 | ||
800 | addr = pci_resource_start(dev->pdev, 4) + | ||
801 | ((pci_resource_len(dev->pdev, 4) - 1) & | ||
802 | dev->mr_table.mpt_base); | ||
803 | |||
799 | dev->mr_table.tavor_fmr.mpt_base = | 804 | dev->mr_table.tavor_fmr.mpt_base = |
800 | ioremap(dev->mr_table.mpt_base, | 805 | ioremap(addr, (1 << i) * sizeof(struct mthca_mpt_entry)); |
801 | (1 << i) * sizeof (struct mthca_mpt_entry)); | ||
802 | 806 | ||
803 | if (!dev->mr_table.tavor_fmr.mpt_base) { | 807 | if (!dev->mr_table.tavor_fmr.mpt_base) { |
804 | mthca_warn(dev, "MPT ioremap for FMR failed.\n"); | 808 | mthca_warn(dev, "MPT ioremap for FMR failed.\n"); |
@@ -806,9 +810,12 @@ int __devinit mthca_init_mr_table(struct mthca_dev *dev) | |||
806 | goto err_fmr_mpt; | 810 | goto err_fmr_mpt; |
807 | } | 811 | } |
808 | 812 | ||
813 | addr = pci_resource_start(dev->pdev, 4) + | ||
814 | ((pci_resource_len(dev->pdev, 4) - 1) & | ||
815 | dev->mr_table.mtt_base); | ||
816 | |||
809 | dev->mr_table.tavor_fmr.mtt_base = | 817 | dev->mr_table.tavor_fmr.mtt_base = |
810 | ioremap(dev->mr_table.mtt_base, | 818 | ioremap(addr, (1 << i) * MTHCA_MTT_SEG_SIZE); |
811 | (1 << i) * MTHCA_MTT_SEG_SIZE); | ||
812 | if (!dev->mr_table.tavor_fmr.mtt_base) { | 819 | if (!dev->mr_table.tavor_fmr.mtt_base) { |
813 | mthca_warn(dev, "MTT ioremap for FMR failed.\n"); | 820 | mthca_warn(dev, "MTT ioremap for FMR failed.\n"); |
814 | err = -ENOMEM; | 821 | err = -ENOMEM; |
diff --git a/drivers/infiniband/hw/mthca/mthca_provider.c b/drivers/infiniband/hw/mthca/mthca_provider.c index 565a24b1756f..a2eae8a30167 100644 --- a/drivers/infiniband/hw/mthca/mthca_provider.c +++ b/drivers/infiniband/hw/mthca/mthca_provider.c | |||
@@ -306,7 +306,7 @@ static int mthca_query_gid(struct ib_device *ibdev, u8 port, | |||
306 | goto out; | 306 | goto out; |
307 | } | 307 | } |
308 | 308 | ||
309 | memcpy(gid->raw + 8, out_mad->data + (index % 8) * 16, 8); | 309 | memcpy(gid->raw + 8, out_mad->data + (index % 8) * 8, 8); |
310 | 310 | ||
311 | out: | 311 | out: |
312 | kfree(in_mad); | 312 | kfree(in_mad); |
diff --git a/drivers/infiniband/hw/mthca/mthca_provider.h b/drivers/infiniband/hw/mthca/mthca_provider.h index 6676a786d690..179a8f610d0f 100644 --- a/drivers/infiniband/hw/mthca/mthca_provider.h +++ b/drivers/infiniband/hw/mthca/mthca_provider.h | |||
@@ -139,11 +139,12 @@ struct mthca_ah { | |||
139 | * a qp may be locked, with the send cq locked first. No other | 139 | * a qp may be locked, with the send cq locked first. No other |
140 | * nesting should be done. | 140 | * nesting should be done. |
141 | * | 141 | * |
142 | * Each struct mthca_cq/qp also has an atomic_t ref count. The | 142 | * Each struct mthca_cq/qp also has an ref count, protected by the |
143 | * pointer from the cq/qp_table to the struct counts as one reference. | 143 | * corresponding table lock. The pointer from the cq/qp_table to the |
144 | * This reference also is good for access through the consumer API, so | 144 | * struct counts as one reference. This reference also is good for |
145 | * modifying the CQ/QP etc doesn't need to take another reference. | 145 | * access through the consumer API, so modifying the CQ/QP etc doesn't |
146 | * Access because of a completion being polled does need a reference. | 146 | * need to take another reference. Access to a QP because of a |
147 | * completion being polled does not need a reference either. | ||
147 | * | 148 | * |
148 | * Finally, each struct mthca_cq/qp has a wait_queue_head_t for the | 149 | * Finally, each struct mthca_cq/qp has a wait_queue_head_t for the |
149 | * destroy function to sleep on. | 150 | * destroy function to sleep on. |
@@ -159,8 +160,9 @@ struct mthca_ah { | |||
159 | * - decrement ref count; if zero, wake up waiters | 160 | * - decrement ref count; if zero, wake up waiters |
160 | * | 161 | * |
161 | * To destroy a CQ/QP, we can do the following: | 162 | * To destroy a CQ/QP, we can do the following: |
162 | * - lock cq/qp_table, remove pointer, unlock cq/qp_table lock | 163 | * - lock cq/qp_table |
163 | * - decrement ref count | 164 | * - remove pointer and decrement ref count |
165 | * - unlock cq/qp_table lock | ||
164 | * - wait_event until ref count is zero | 166 | * - wait_event until ref count is zero |
165 | * | 167 | * |
166 | * It is the consumer's responsibilty to make sure that no QP | 168 | * It is the consumer's responsibilty to make sure that no QP |
@@ -197,7 +199,7 @@ struct mthca_cq_resize { | |||
197 | struct mthca_cq { | 199 | struct mthca_cq { |
198 | struct ib_cq ibcq; | 200 | struct ib_cq ibcq; |
199 | spinlock_t lock; | 201 | spinlock_t lock; |
200 | atomic_t refcount; | 202 | int refcount; |
201 | int cqn; | 203 | int cqn; |
202 | u32 cons_index; | 204 | u32 cons_index; |
203 | struct mthca_cq_buf buf; | 205 | struct mthca_cq_buf buf; |
@@ -217,7 +219,7 @@ struct mthca_cq { | |||
217 | struct mthca_srq { | 219 | struct mthca_srq { |
218 | struct ib_srq ibsrq; | 220 | struct ib_srq ibsrq; |
219 | spinlock_t lock; | 221 | spinlock_t lock; |
220 | atomic_t refcount; | 222 | int refcount; |
221 | int srqn; | 223 | int srqn; |
222 | int max; | 224 | int max; |
223 | int max_gs; | 225 | int max_gs; |
@@ -254,7 +256,7 @@ struct mthca_wq { | |||
254 | 256 | ||
255 | struct mthca_qp { | 257 | struct mthca_qp { |
256 | struct ib_qp ibqp; | 258 | struct ib_qp ibqp; |
257 | atomic_t refcount; | 259 | int refcount; |
258 | u32 qpn; | 260 | u32 qpn; |
259 | int is_direct; | 261 | int is_direct; |
260 | u8 port; /* for SQP and memfree use only */ | 262 | u8 port; /* for SQP and memfree use only */ |
diff --git a/drivers/infiniband/hw/mthca/mthca_qp.c b/drivers/infiniband/hw/mthca/mthca_qp.c index f37b0e367323..07c13be07a4a 100644 --- a/drivers/infiniband/hw/mthca/mthca_qp.c +++ b/drivers/infiniband/hw/mthca/mthca_qp.c | |||
@@ -240,7 +240,7 @@ void mthca_qp_event(struct mthca_dev *dev, u32 qpn, | |||
240 | spin_lock(&dev->qp_table.lock); | 240 | spin_lock(&dev->qp_table.lock); |
241 | qp = mthca_array_get(&dev->qp_table.qp, qpn & (dev->limits.num_qps - 1)); | 241 | qp = mthca_array_get(&dev->qp_table.qp, qpn & (dev->limits.num_qps - 1)); |
242 | if (qp) | 242 | if (qp) |
243 | atomic_inc(&qp->refcount); | 243 | ++qp->refcount; |
244 | spin_unlock(&dev->qp_table.lock); | 244 | spin_unlock(&dev->qp_table.lock); |
245 | 245 | ||
246 | if (!qp) { | 246 | if (!qp) { |
@@ -257,8 +257,10 @@ void mthca_qp_event(struct mthca_dev *dev, u32 qpn, | |||
257 | if (qp->ibqp.event_handler) | 257 | if (qp->ibqp.event_handler) |
258 | qp->ibqp.event_handler(&event, qp->ibqp.qp_context); | 258 | qp->ibqp.event_handler(&event, qp->ibqp.qp_context); |
259 | 259 | ||
260 | if (atomic_dec_and_test(&qp->refcount)) | 260 | spin_lock(&dev->qp_table.lock); |
261 | if (!--qp->refcount) | ||
261 | wake_up(&qp->wait); | 262 | wake_up(&qp->wait); |
263 | spin_unlock(&dev->qp_table.lock); | ||
262 | } | 264 | } |
263 | 265 | ||
264 | static int to_mthca_state(enum ib_qp_state ib_state) | 266 | static int to_mthca_state(enum ib_qp_state ib_state) |
@@ -833,10 +835,10 @@ int mthca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask) | |||
833 | * entries and reinitialize the QP. | 835 | * entries and reinitialize the QP. |
834 | */ | 836 | */ |
835 | if (new_state == IB_QPS_RESET && !qp->ibqp.uobject) { | 837 | if (new_state == IB_QPS_RESET && !qp->ibqp.uobject) { |
836 | mthca_cq_clean(dev, to_mcq(qp->ibqp.send_cq)->cqn, qp->qpn, | 838 | mthca_cq_clean(dev, to_mcq(qp->ibqp.send_cq), qp->qpn, |
837 | qp->ibqp.srq ? to_msrq(qp->ibqp.srq) : NULL); | 839 | qp->ibqp.srq ? to_msrq(qp->ibqp.srq) : NULL); |
838 | if (qp->ibqp.send_cq != qp->ibqp.recv_cq) | 840 | if (qp->ibqp.send_cq != qp->ibqp.recv_cq) |
839 | mthca_cq_clean(dev, to_mcq(qp->ibqp.recv_cq)->cqn, qp->qpn, | 841 | mthca_cq_clean(dev, to_mcq(qp->ibqp.recv_cq), qp->qpn, |
840 | qp->ibqp.srq ? to_msrq(qp->ibqp.srq) : NULL); | 842 | qp->ibqp.srq ? to_msrq(qp->ibqp.srq) : NULL); |
841 | 843 | ||
842 | mthca_wq_init(&qp->sq); | 844 | mthca_wq_init(&qp->sq); |
@@ -1096,7 +1098,7 @@ static int mthca_alloc_qp_common(struct mthca_dev *dev, | |||
1096 | int ret; | 1098 | int ret; |
1097 | int i; | 1099 | int i; |
1098 | 1100 | ||
1099 | atomic_set(&qp->refcount, 1); | 1101 | qp->refcount = 1; |
1100 | init_waitqueue_head(&qp->wait); | 1102 | init_waitqueue_head(&qp->wait); |
1101 | qp->state = IB_QPS_RESET; | 1103 | qp->state = IB_QPS_RESET; |
1102 | qp->atomic_rd_en = 0; | 1104 | qp->atomic_rd_en = 0; |
@@ -1318,6 +1320,17 @@ int mthca_alloc_sqp(struct mthca_dev *dev, | |||
1318 | return err; | 1320 | return err; |
1319 | } | 1321 | } |
1320 | 1322 | ||
1323 | static inline int get_qp_refcount(struct mthca_dev *dev, struct mthca_qp *qp) | ||
1324 | { | ||
1325 | int c; | ||
1326 | |||
1327 | spin_lock_irq(&dev->qp_table.lock); | ||
1328 | c = qp->refcount; | ||
1329 | spin_unlock_irq(&dev->qp_table.lock); | ||
1330 | |||
1331 | return c; | ||
1332 | } | ||
1333 | |||
1321 | void mthca_free_qp(struct mthca_dev *dev, | 1334 | void mthca_free_qp(struct mthca_dev *dev, |
1322 | struct mthca_qp *qp) | 1335 | struct mthca_qp *qp) |
1323 | { | 1336 | { |
@@ -1339,14 +1352,14 @@ void mthca_free_qp(struct mthca_dev *dev, | |||
1339 | spin_lock(&dev->qp_table.lock); | 1352 | spin_lock(&dev->qp_table.lock); |
1340 | mthca_array_clear(&dev->qp_table.qp, | 1353 | mthca_array_clear(&dev->qp_table.qp, |
1341 | qp->qpn & (dev->limits.num_qps - 1)); | 1354 | qp->qpn & (dev->limits.num_qps - 1)); |
1355 | --qp->refcount; | ||
1342 | spin_unlock(&dev->qp_table.lock); | 1356 | spin_unlock(&dev->qp_table.lock); |
1343 | 1357 | ||
1344 | if (send_cq != recv_cq) | 1358 | if (send_cq != recv_cq) |
1345 | spin_unlock(&recv_cq->lock); | 1359 | spin_unlock(&recv_cq->lock); |
1346 | spin_unlock_irq(&send_cq->lock); | 1360 | spin_unlock_irq(&send_cq->lock); |
1347 | 1361 | ||
1348 | atomic_dec(&qp->refcount); | 1362 | wait_event(qp->wait, !get_qp_refcount(dev, qp)); |
1349 | wait_event(qp->wait, !atomic_read(&qp->refcount)); | ||
1350 | 1363 | ||
1351 | if (qp->state != IB_QPS_RESET) | 1364 | if (qp->state != IB_QPS_RESET) |
1352 | mthca_MODIFY_QP(dev, qp->state, IB_QPS_RESET, qp->qpn, 0, | 1365 | mthca_MODIFY_QP(dev, qp->state, IB_QPS_RESET, qp->qpn, 0, |
@@ -1358,10 +1371,10 @@ void mthca_free_qp(struct mthca_dev *dev, | |||
1358 | * unref the mem-free tables and free the QPN in our table. | 1371 | * unref the mem-free tables and free the QPN in our table. |
1359 | */ | 1372 | */ |
1360 | if (!qp->ibqp.uobject) { | 1373 | if (!qp->ibqp.uobject) { |
1361 | mthca_cq_clean(dev, to_mcq(qp->ibqp.send_cq)->cqn, qp->qpn, | 1374 | mthca_cq_clean(dev, to_mcq(qp->ibqp.send_cq), qp->qpn, |
1362 | qp->ibqp.srq ? to_msrq(qp->ibqp.srq) : NULL); | 1375 | qp->ibqp.srq ? to_msrq(qp->ibqp.srq) : NULL); |
1363 | if (qp->ibqp.send_cq != qp->ibqp.recv_cq) | 1376 | if (qp->ibqp.send_cq != qp->ibqp.recv_cq) |
1364 | mthca_cq_clean(dev, to_mcq(qp->ibqp.recv_cq)->cqn, qp->qpn, | 1377 | mthca_cq_clean(dev, to_mcq(qp->ibqp.recv_cq), qp->qpn, |
1365 | qp->ibqp.srq ? to_msrq(qp->ibqp.srq) : NULL); | 1378 | qp->ibqp.srq ? to_msrq(qp->ibqp.srq) : NULL); |
1366 | 1379 | ||
1367 | mthca_free_memfree(dev, qp); | 1380 | mthca_free_memfree(dev, qp); |
@@ -1714,23 +1727,7 @@ int mthca_tavor_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr, | |||
1714 | 1727 | ||
1715 | ind = qp->rq.next_ind; | 1728 | ind = qp->rq.next_ind; |
1716 | 1729 | ||
1717 | for (nreq = 0; wr; ++nreq, wr = wr->next) { | 1730 | for (nreq = 0; wr; wr = wr->next) { |
1718 | if (unlikely(nreq == MTHCA_TAVOR_MAX_WQES_PER_RECV_DB)) { | ||
1719 | nreq = 0; | ||
1720 | |||
1721 | doorbell[0] = cpu_to_be32((qp->rq.next_ind << qp->rq.wqe_shift) | size0); | ||
1722 | doorbell[1] = cpu_to_be32(qp->qpn << 8); | ||
1723 | |||
1724 | wmb(); | ||
1725 | |||
1726 | mthca_write64(doorbell, | ||
1727 | dev->kar + MTHCA_RECEIVE_DOORBELL, | ||
1728 | MTHCA_GET_DOORBELL_LOCK(&dev->doorbell_lock)); | ||
1729 | |||
1730 | qp->rq.head += MTHCA_TAVOR_MAX_WQES_PER_RECV_DB; | ||
1731 | size0 = 0; | ||
1732 | } | ||
1733 | |||
1734 | if (mthca_wq_overflow(&qp->rq, nreq, qp->ibqp.recv_cq)) { | 1731 | if (mthca_wq_overflow(&qp->rq, nreq, qp->ibqp.recv_cq)) { |
1735 | mthca_err(dev, "RQ %06x full (%u head, %u tail," | 1732 | mthca_err(dev, "RQ %06x full (%u head, %u tail," |
1736 | " %d max, %d nreq)\n", qp->qpn, | 1733 | " %d max, %d nreq)\n", qp->qpn, |
@@ -1784,6 +1781,23 @@ int mthca_tavor_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr, | |||
1784 | ++ind; | 1781 | ++ind; |
1785 | if (unlikely(ind >= qp->rq.max)) | 1782 | if (unlikely(ind >= qp->rq.max)) |
1786 | ind -= qp->rq.max; | 1783 | ind -= qp->rq.max; |
1784 | |||
1785 | ++nreq; | ||
1786 | if (unlikely(nreq == MTHCA_TAVOR_MAX_WQES_PER_RECV_DB)) { | ||
1787 | nreq = 0; | ||
1788 | |||
1789 | doorbell[0] = cpu_to_be32((qp->rq.next_ind << qp->rq.wqe_shift) | size0); | ||
1790 | doorbell[1] = cpu_to_be32(qp->qpn << 8); | ||
1791 | |||
1792 | wmb(); | ||
1793 | |||
1794 | mthca_write64(doorbell, | ||
1795 | dev->kar + MTHCA_RECEIVE_DOORBELL, | ||
1796 | MTHCA_GET_DOORBELL_LOCK(&dev->doorbell_lock)); | ||
1797 | |||
1798 | qp->rq.head += MTHCA_TAVOR_MAX_WQES_PER_RECV_DB; | ||
1799 | size0 = 0; | ||
1800 | } | ||
1787 | } | 1801 | } |
1788 | 1802 | ||
1789 | out: | 1803 | out: |
diff --git a/drivers/infiniband/hw/mthca/mthca_srq.c b/drivers/infiniband/hw/mthca/mthca_srq.c index adcaf85355ae..b292fefa3b41 100644 --- a/drivers/infiniband/hw/mthca/mthca_srq.c +++ b/drivers/infiniband/hw/mthca/mthca_srq.c | |||
@@ -241,7 +241,7 @@ int mthca_alloc_srq(struct mthca_dev *dev, struct mthca_pd *pd, | |||
241 | goto err_out_mailbox; | 241 | goto err_out_mailbox; |
242 | 242 | ||
243 | spin_lock_init(&srq->lock); | 243 | spin_lock_init(&srq->lock); |
244 | atomic_set(&srq->refcount, 1); | 244 | srq->refcount = 1; |
245 | init_waitqueue_head(&srq->wait); | 245 | init_waitqueue_head(&srq->wait); |
246 | 246 | ||
247 | if (mthca_is_memfree(dev)) | 247 | if (mthca_is_memfree(dev)) |
@@ -308,6 +308,17 @@ err_out: | |||
308 | return err; | 308 | return err; |
309 | } | 309 | } |
310 | 310 | ||
311 | static inline int get_srq_refcount(struct mthca_dev *dev, struct mthca_srq *srq) | ||
312 | { | ||
313 | int c; | ||
314 | |||
315 | spin_lock_irq(&dev->srq_table.lock); | ||
316 | c = srq->refcount; | ||
317 | spin_unlock_irq(&dev->srq_table.lock); | ||
318 | |||
319 | return c; | ||
320 | } | ||
321 | |||
311 | void mthca_free_srq(struct mthca_dev *dev, struct mthca_srq *srq) | 322 | void mthca_free_srq(struct mthca_dev *dev, struct mthca_srq *srq) |
312 | { | 323 | { |
313 | struct mthca_mailbox *mailbox; | 324 | struct mthca_mailbox *mailbox; |
@@ -329,10 +340,10 @@ void mthca_free_srq(struct mthca_dev *dev, struct mthca_srq *srq) | |||
329 | spin_lock_irq(&dev->srq_table.lock); | 340 | spin_lock_irq(&dev->srq_table.lock); |
330 | mthca_array_clear(&dev->srq_table.srq, | 341 | mthca_array_clear(&dev->srq_table.srq, |
331 | srq->srqn & (dev->limits.num_srqs - 1)); | 342 | srq->srqn & (dev->limits.num_srqs - 1)); |
343 | --srq->refcount; | ||
332 | spin_unlock_irq(&dev->srq_table.lock); | 344 | spin_unlock_irq(&dev->srq_table.lock); |
333 | 345 | ||
334 | atomic_dec(&srq->refcount); | 346 | wait_event(srq->wait, !get_srq_refcount(dev, srq)); |
335 | wait_event(srq->wait, !atomic_read(&srq->refcount)); | ||
336 | 347 | ||
337 | if (!srq->ibsrq.uobject) { | 348 | if (!srq->ibsrq.uobject) { |
338 | mthca_free_srq_buf(dev, srq); | 349 | mthca_free_srq_buf(dev, srq); |
@@ -414,7 +425,7 @@ void mthca_srq_event(struct mthca_dev *dev, u32 srqn, | |||
414 | spin_lock(&dev->srq_table.lock); | 425 | spin_lock(&dev->srq_table.lock); |
415 | srq = mthca_array_get(&dev->srq_table.srq, srqn & (dev->limits.num_srqs - 1)); | 426 | srq = mthca_array_get(&dev->srq_table.srq, srqn & (dev->limits.num_srqs - 1)); |
416 | if (srq) | 427 | if (srq) |
417 | atomic_inc(&srq->refcount); | 428 | ++srq->refcount; |
418 | spin_unlock(&dev->srq_table.lock); | 429 | spin_unlock(&dev->srq_table.lock); |
419 | 430 | ||
420 | if (!srq) { | 431 | if (!srq) { |
@@ -431,8 +442,10 @@ void mthca_srq_event(struct mthca_dev *dev, u32 srqn, | |||
431 | srq->ibsrq.event_handler(&event, srq->ibsrq.srq_context); | 442 | srq->ibsrq.event_handler(&event, srq->ibsrq.srq_context); |
432 | 443 | ||
433 | out: | 444 | out: |
434 | if (atomic_dec_and_test(&srq->refcount)) | 445 | spin_lock(&dev->srq_table.lock); |
446 | if (!--srq->refcount) | ||
435 | wake_up(&srq->wait); | 447 | wake_up(&srq->wait); |
448 | spin_unlock(&dev->srq_table.lock); | ||
436 | } | 449 | } |
437 | 450 | ||
438 | /* | 451 | /* |
@@ -477,26 +490,7 @@ int mthca_tavor_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr, | |||
477 | 490 | ||
478 | first_ind = srq->first_free; | 491 | first_ind = srq->first_free; |
479 | 492 | ||
480 | for (nreq = 0; wr; ++nreq, wr = wr->next) { | 493 | for (nreq = 0; wr; wr = wr->next) { |
481 | if (unlikely(nreq == MTHCA_TAVOR_MAX_WQES_PER_RECV_DB)) { | ||
482 | nreq = 0; | ||
483 | |||
484 | doorbell[0] = cpu_to_be32(first_ind << srq->wqe_shift); | ||
485 | doorbell[1] = cpu_to_be32(srq->srqn << 8); | ||
486 | |||
487 | /* | ||
488 | * Make sure that descriptors are written | ||
489 | * before doorbell is rung. | ||
490 | */ | ||
491 | wmb(); | ||
492 | |||
493 | mthca_write64(doorbell, | ||
494 | dev->kar + MTHCA_RECEIVE_DOORBELL, | ||
495 | MTHCA_GET_DOORBELL_LOCK(&dev->doorbell_lock)); | ||
496 | |||
497 | first_ind = srq->first_free; | ||
498 | } | ||
499 | |||
500 | ind = srq->first_free; | 494 | ind = srq->first_free; |
501 | 495 | ||
502 | if (ind < 0) { | 496 | if (ind < 0) { |
@@ -556,6 +550,26 @@ int mthca_tavor_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr, | |||
556 | 550 | ||
557 | srq->wrid[ind] = wr->wr_id; | 551 | srq->wrid[ind] = wr->wr_id; |
558 | srq->first_free = next_ind; | 552 | srq->first_free = next_ind; |
553 | |||
554 | ++nreq; | ||
555 | if (unlikely(nreq == MTHCA_TAVOR_MAX_WQES_PER_RECV_DB)) { | ||
556 | nreq = 0; | ||
557 | |||
558 | doorbell[0] = cpu_to_be32(first_ind << srq->wqe_shift); | ||
559 | doorbell[1] = cpu_to_be32(srq->srqn << 8); | ||
560 | |||
561 | /* | ||
562 | * Make sure that descriptors are written | ||
563 | * before doorbell is rung. | ||
564 | */ | ||
565 | wmb(); | ||
566 | |||
567 | mthca_write64(doorbell, | ||
568 | dev->kar + MTHCA_RECEIVE_DOORBELL, | ||
569 | MTHCA_GET_DOORBELL_LOCK(&dev->doorbell_lock)); | ||
570 | |||
571 | first_ind = srq->first_free; | ||
572 | } | ||
559 | } | 573 | } |
560 | 574 | ||
561 | if (likely(nreq)) { | 575 | if (likely(nreq)) { |