diff options
Diffstat (limited to 'drivers/infiniband')
26 files changed, 333 insertions, 231 deletions
diff --git a/drivers/infiniband/core/sysfs.c b/drivers/infiniband/core/sysfs.c index 15121cb5a1f6..21f9282c1b25 100644 --- a/drivers/infiniband/core/sysfs.c +++ b/drivers/infiniband/core/sysfs.c | |||
@@ -336,7 +336,7 @@ static ssize_t show_pma_counter(struct ib_port *p, struct port_attribute *attr, | |||
336 | switch (width) { | 336 | switch (width) { |
337 | case 4: | 337 | case 4: |
338 | ret = sprintf(buf, "%u\n", (out_mad->data[40 + offset / 8] >> | 338 | ret = sprintf(buf, "%u\n", (out_mad->data[40 + offset / 8] >> |
339 | (offset % 4)) & 0xf); | 339 | (4 - (offset % 8))) & 0xf); |
340 | break; | 340 | break; |
341 | case 8: | 341 | case 8: |
342 | ret = sprintf(buf, "%u\n", out_mad->data[40 + offset / 8]); | 342 | ret = sprintf(buf, "%u\n", out_mad->data[40 + offset / 8]); |
diff --git a/drivers/infiniband/hw/ipath/ipath_debug.h b/drivers/infiniband/hw/ipath/ipath_debug.h index 593e28969c69..46762387f5f8 100644 --- a/drivers/infiniband/hw/ipath/ipath_debug.h +++ b/drivers/infiniband/hw/ipath/ipath_debug.h | |||
@@ -60,11 +60,11 @@ | |||
60 | #define __IPATH_KERNEL_SEND 0x2000 /* use kernel mode send */ | 60 | #define __IPATH_KERNEL_SEND 0x2000 /* use kernel mode send */ |
61 | #define __IPATH_EPKTDBG 0x4000 /* print ethernet packet data */ | 61 | #define __IPATH_EPKTDBG 0x4000 /* print ethernet packet data */ |
62 | #define __IPATH_SMADBG 0x8000 /* sma packet debug */ | 62 | #define __IPATH_SMADBG 0x8000 /* sma packet debug */ |
63 | #define __IPATH_IPATHDBG 0x10000 /* Ethernet (IPATH) general debug on */ | 63 | #define __IPATH_IPATHDBG 0x10000 /* Ethernet (IPATH) gen debug */ |
64 | #define __IPATH_IPATHWARN 0x20000 /* Ethernet (IPATH) warnings on */ | 64 | #define __IPATH_IPATHWARN 0x20000 /* Ethernet (IPATH) warnings */ |
65 | #define __IPATH_IPATHERR 0x40000 /* Ethernet (IPATH) errors on */ | 65 | #define __IPATH_IPATHERR 0x40000 /* Ethernet (IPATH) errors */ |
66 | #define __IPATH_IPATHPD 0x80000 /* Ethernet (IPATH) packet dump on */ | 66 | #define __IPATH_IPATHPD 0x80000 /* Ethernet (IPATH) packet dump */ |
67 | #define __IPATH_IPATHTABLE 0x100000 /* Ethernet (IPATH) table dump on */ | 67 | #define __IPATH_IPATHTABLE 0x100000 /* Ethernet (IPATH) table dump */ |
68 | 68 | ||
69 | #else /* _IPATH_DEBUGGING */ | 69 | #else /* _IPATH_DEBUGGING */ |
70 | 70 | ||
@@ -79,11 +79,12 @@ | |||
79 | #define __IPATH_TRSAMPLE 0x0 /* generate trace buffer sample entries */ | 79 | #define __IPATH_TRSAMPLE 0x0 /* generate trace buffer sample entries */ |
80 | #define __IPATH_VERBDBG 0x0 /* very verbose debug */ | 80 | #define __IPATH_VERBDBG 0x0 /* very verbose debug */ |
81 | #define __IPATH_PKTDBG 0x0 /* print packet data */ | 81 | #define __IPATH_PKTDBG 0x0 /* print packet data */ |
82 | #define __IPATH_PROCDBG 0x0 /* print process startup (init)/exit messages */ | 82 | #define __IPATH_PROCDBG 0x0 /* process startup (init)/exit messages */ |
83 | /* print mmap/nopage stuff, not using VDBG any more */ | 83 | /* print mmap/nopage stuff, not using VDBG any more */ |
84 | #define __IPATH_MMDBG 0x0 | 84 | #define __IPATH_MMDBG 0x0 |
85 | #define __IPATH_EPKTDBG 0x0 /* print ethernet packet data */ | 85 | #define __IPATH_EPKTDBG 0x0 /* print ethernet packet data */ |
86 | #define __IPATH_SMADBG 0x0 /* print process startup (init)/exit messages */#define __IPATH_IPATHDBG 0x0 /* Ethernet (IPATH) table dump on */ | 86 | #define __IPATH_SMADBG 0x0 /* process startup (init)/exit messages */ |
87 | #define __IPATH_IPATHDBG 0x0 /* Ethernet (IPATH) table dump on */ | ||
87 | #define __IPATH_IPATHWARN 0x0 /* Ethernet (IPATH) warnings on */ | 88 | #define __IPATH_IPATHWARN 0x0 /* Ethernet (IPATH) warnings on */ |
88 | #define __IPATH_IPATHERR 0x0 /* Ethernet (IPATH) errors on */ | 89 | #define __IPATH_IPATHERR 0x0 /* Ethernet (IPATH) errors on */ |
89 | #define __IPATH_IPATHPD 0x0 /* Ethernet (IPATH) packet dump on */ | 90 | #define __IPATH_IPATHPD 0x0 /* Ethernet (IPATH) packet dump on */ |
diff --git a/drivers/infiniband/hw/ipath/ipath_diag.c b/drivers/infiniband/hw/ipath/ipath_diag.c index 7d3fb6996b41..28ddceb260e8 100644 --- a/drivers/infiniband/hw/ipath/ipath_diag.c +++ b/drivers/infiniband/hw/ipath/ipath_diag.c | |||
@@ -277,13 +277,14 @@ static int ipath_diag_open(struct inode *in, struct file *fp) | |||
277 | 277 | ||
278 | bail: | 278 | bail: |
279 | spin_unlock_irqrestore(&ipath_devs_lock, flags); | 279 | spin_unlock_irqrestore(&ipath_devs_lock, flags); |
280 | mutex_unlock(&ipath_mutex); | ||
281 | 280 | ||
282 | /* Only expose a way to reset the device if we | 281 | /* Only expose a way to reset the device if we |
283 | make it into diag mode. */ | 282 | make it into diag mode. */ |
284 | if (ret == 0) | 283 | if (ret == 0) |
285 | ipath_expose_reset(&dd->pcidev->dev); | 284 | ipath_expose_reset(&dd->pcidev->dev); |
286 | 285 | ||
286 | mutex_unlock(&ipath_mutex); | ||
287 | |||
287 | return ret; | 288 | return ret; |
288 | } | 289 | } |
289 | 290 | ||
diff --git a/drivers/infiniband/hw/ipath/ipath_driver.c b/drivers/infiniband/hw/ipath/ipath_driver.c index e7617c3982ea..398add4d4cb1 100644 --- a/drivers/infiniband/hw/ipath/ipath_driver.c +++ b/drivers/infiniband/hw/ipath/ipath_driver.c | |||
@@ -418,9 +418,19 @@ static int __devinit ipath_init_one(struct pci_dev *pdev, | |||
418 | 418 | ||
419 | ret = pci_set_dma_mask(pdev, DMA_64BIT_MASK); | 419 | ret = pci_set_dma_mask(pdev, DMA_64BIT_MASK); |
420 | if (ret) { | 420 | if (ret) { |
421 | dev_info(&pdev->dev, "pci_set_dma_mask unit %u " | 421 | /* |
422 | "fails: %d\n", dd->ipath_unit, ret); | 422 | * if the 64 bit setup fails, try 32 bit. Some systems |
423 | goto bail_regions; | 423 | * do not setup 64 bit maps on systems with 2GB or less |
424 | * memory installed. | ||
425 | */ | ||
426 | ret = pci_set_dma_mask(pdev, DMA_32BIT_MASK); | ||
427 | if (ret) { | ||
428 | dev_info(&pdev->dev, "pci_set_dma_mask unit %u " | ||
429 | "fails: %d\n", dd->ipath_unit, ret); | ||
430 | goto bail_regions; | ||
431 | } | ||
432 | else | ||
433 | ipath_dbg("No 64bit DMA mask, used 32 bit mask\n"); | ||
424 | } | 434 | } |
425 | 435 | ||
426 | pci_set_master(pdev); | 436 | pci_set_master(pdev); |
@@ -1949,7 +1959,7 @@ int ipath_reset_device(int unit) | |||
1949 | } | 1959 | } |
1950 | 1960 | ||
1951 | if (dd->ipath_pd) | 1961 | if (dd->ipath_pd) |
1952 | for (i = 1; i < dd->ipath_portcnt; i++) { | 1962 | for (i = 1; i < dd->ipath_cfgports; i++) { |
1953 | if (dd->ipath_pd[i] && dd->ipath_pd[i]->port_cnt) { | 1963 | if (dd->ipath_pd[i] && dd->ipath_pd[i]->port_cnt) { |
1954 | ipath_dbg("unit %u port %d is in use " | 1964 | ipath_dbg("unit %u port %d is in use " |
1955 | "(PID %u cmd %s), can't reset\n", | 1965 | "(PID %u cmd %s), can't reset\n", |
diff --git a/drivers/infiniband/hw/ipath/ipath_init_chip.c b/drivers/infiniband/hw/ipath/ipath_init_chip.c index 2823ff9c0c62..16f640e1c16e 100644 --- a/drivers/infiniband/hw/ipath/ipath_init_chip.c +++ b/drivers/infiniband/hw/ipath/ipath_init_chip.c | |||
@@ -53,13 +53,19 @@ MODULE_PARM_DESC(cfgports, "Set max number of ports to use"); | |||
53 | 53 | ||
54 | /* | 54 | /* |
55 | * Number of buffers reserved for driver (layered drivers and SMA | 55 | * Number of buffers reserved for driver (layered drivers and SMA |
56 | * send). Reserved at end of buffer list. | 56 | * send). Reserved at end of buffer list. Initialized based on |
57 | * number of PIO buffers if not set via module interface. | ||
58 | * The problem with this is that it's global, but we'll use different | ||
59 | * numbers for different chip types. So the default value is not | ||
60 | * very useful. I've redefined it for the 1.3 release so that it's | ||
61 | * zero unless set by the user to something else, in which case we | ||
62 | * try to respect it. | ||
57 | */ | 63 | */ |
58 | static ushort ipath_kpiobufs = 32; | 64 | static ushort ipath_kpiobufs; |
59 | 65 | ||
60 | static int ipath_set_kpiobufs(const char *val, struct kernel_param *kp); | 66 | static int ipath_set_kpiobufs(const char *val, struct kernel_param *kp); |
61 | 67 | ||
62 | module_param_call(kpiobufs, ipath_set_kpiobufs, param_get_uint, | 68 | module_param_call(kpiobufs, ipath_set_kpiobufs, param_get_ushort, |
63 | &ipath_kpiobufs, S_IWUSR | S_IRUGO); | 69 | &ipath_kpiobufs, S_IWUSR | S_IRUGO); |
64 | MODULE_PARM_DESC(kpiobufs, "Set number of PIO buffers for driver"); | 70 | MODULE_PARM_DESC(kpiobufs, "Set number of PIO buffers for driver"); |
65 | 71 | ||
@@ -531,8 +537,11 @@ static int init_housekeeping(struct ipath_devdata *dd, | |||
531 | * Don't clear ipath_flags as 8bit mode was set before | 537 | * Don't clear ipath_flags as 8bit mode was set before |
532 | * entering this func. However, we do set the linkstate to | 538 | * entering this func. However, we do set the linkstate to |
533 | * unknown, so we can watch for a transition. | 539 | * unknown, so we can watch for a transition. |
540 | * PRESENT is set because we want register reads to work, | ||
541 | * and the kernel infrastructure saw it in config space; | ||
542 | * We clear it if we have failures. | ||
534 | */ | 543 | */ |
535 | dd->ipath_flags |= IPATH_LINKUNK; | 544 | dd->ipath_flags |= IPATH_LINKUNK | IPATH_PRESENT; |
536 | dd->ipath_flags &= ~(IPATH_LINKACTIVE | IPATH_LINKARMED | | 545 | dd->ipath_flags &= ~(IPATH_LINKACTIVE | IPATH_LINKARMED | |
537 | IPATH_LINKDOWN | IPATH_LINKINIT); | 546 | IPATH_LINKDOWN | IPATH_LINKINIT); |
538 | 547 | ||
@@ -560,6 +569,7 @@ static int init_housekeeping(struct ipath_devdata *dd, | |||
560 | || (dd->ipath_uregbase & 0xffffffff) == 0xffffffff) { | 569 | || (dd->ipath_uregbase & 0xffffffff) == 0xffffffff) { |
561 | ipath_dev_err(dd, "Register read failures from chip, " | 570 | ipath_dev_err(dd, "Register read failures from chip, " |
562 | "giving up initialization\n"); | 571 | "giving up initialization\n"); |
572 | dd->ipath_flags &= ~IPATH_PRESENT; | ||
563 | ret = -ENODEV; | 573 | ret = -ENODEV; |
564 | goto done; | 574 | goto done; |
565 | } | 575 | } |
@@ -682,16 +692,14 @@ int ipath_init_chip(struct ipath_devdata *dd, int reinit) | |||
682 | */ | 692 | */ |
683 | dd->ipath_pioavregs = ALIGN(val, sizeof(u64) * BITS_PER_BYTE / 2) | 693 | dd->ipath_pioavregs = ALIGN(val, sizeof(u64) * BITS_PER_BYTE / 2) |
684 | / (sizeof(u64) * BITS_PER_BYTE / 2); | 694 | / (sizeof(u64) * BITS_PER_BYTE / 2); |
685 | if (!ipath_kpiobufs) /* have to have at least 1, for SMA */ | 695 | if (ipath_kpiobufs == 0) { |
686 | kpiobufs = ipath_kpiobufs = 1; | 696 | /* not set by user, or set explictly to default */ |
687 | else if ((dd->ipath_piobcnt2k + dd->ipath_piobcnt4k) < | 697 | if ((dd->ipath_piobcnt2k + dd->ipath_piobcnt4k) > 128) |
688 | (dd->ipath_cfgports * IPATH_MIN_USER_PORT_BUFCNT)) { | 698 | kpiobufs = 32; |
689 | dev_info(&dd->pcidev->dev, "Too few PIO buffers (%u) " | 699 | else |
690 | "for %u ports to have %u each!\n", | 700 | kpiobufs = 16; |
691 | dd->ipath_piobcnt2k + dd->ipath_piobcnt4k, | 701 | } |
692 | dd->ipath_cfgports, IPATH_MIN_USER_PORT_BUFCNT); | 702 | else |
693 | kpiobufs = 1; /* reserve just the minimum for SMA/ether */ | ||
694 | } else | ||
695 | kpiobufs = ipath_kpiobufs; | 703 | kpiobufs = ipath_kpiobufs; |
696 | 704 | ||
697 | if (kpiobufs > | 705 | if (kpiobufs > |
diff --git a/drivers/infiniband/hw/ipath/ipath_intr.c b/drivers/infiniband/hw/ipath/ipath_intr.c index 0bcb428041f3..3e72a1fe3d73 100644 --- a/drivers/infiniband/hw/ipath/ipath_intr.c +++ b/drivers/infiniband/hw/ipath/ipath_intr.c | |||
@@ -665,14 +665,14 @@ static void handle_layer_pioavail(struct ipath_devdata *dd) | |||
665 | 665 | ||
666 | ret = __ipath_layer_intr(dd, IPATH_LAYER_INT_SEND_CONTINUE); | 666 | ret = __ipath_layer_intr(dd, IPATH_LAYER_INT_SEND_CONTINUE); |
667 | if (ret > 0) | 667 | if (ret > 0) |
668 | goto clear; | 668 | goto set; |
669 | 669 | ||
670 | ret = __ipath_verbs_piobufavail(dd); | 670 | ret = __ipath_verbs_piobufavail(dd); |
671 | if (ret > 0) | 671 | if (ret > 0) |
672 | goto clear; | 672 | goto set; |
673 | 673 | ||
674 | return; | 674 | return; |
675 | clear: | 675 | set: |
676 | set_bit(IPATH_S_PIOINTBUFAVAIL, &dd->ipath_sendctrl); | 676 | set_bit(IPATH_S_PIOINTBUFAVAIL, &dd->ipath_sendctrl); |
677 | ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl, | 677 | ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl, |
678 | dd->ipath_sendctrl); | 678 | dd->ipath_sendctrl); |
@@ -719,11 +719,24 @@ static void handle_rcv(struct ipath_devdata *dd, u32 istat) | |||
719 | irqreturn_t ipath_intr(int irq, void *data, struct pt_regs *regs) | 719 | irqreturn_t ipath_intr(int irq, void *data, struct pt_regs *regs) |
720 | { | 720 | { |
721 | struct ipath_devdata *dd = data; | 721 | struct ipath_devdata *dd = data; |
722 | u32 istat = ipath_read_kreg32(dd, dd->ipath_kregs->kr_intstatus); | 722 | u32 istat; |
723 | ipath_err_t estat = 0; | 723 | ipath_err_t estat = 0; |
724 | static unsigned unexpected = 0; | 724 | static unsigned unexpected = 0; |
725 | irqreturn_t ret; | 725 | irqreturn_t ret; |
726 | 726 | ||
727 | if(!(dd->ipath_flags & IPATH_PRESENT)) { | ||
728 | /* this is mostly so we don't try to touch the chip while | ||
729 | * it is being reset */ | ||
730 | /* | ||
731 | * This return value is perhaps odd, but we do not want the | ||
732 | * interrupt core code to remove our interrupt handler | ||
733 | * because we don't appear to be handling an interrupt | ||
734 | * during a chip reset. | ||
735 | */ | ||
736 | return IRQ_HANDLED; | ||
737 | } | ||
738 | |||
739 | istat = ipath_read_kreg32(dd, dd->ipath_kregs->kr_intstatus); | ||
727 | if (unlikely(!istat)) { | 740 | if (unlikely(!istat)) { |
728 | ipath_stats.sps_nullintr++; | 741 | ipath_stats.sps_nullintr++; |
729 | ret = IRQ_NONE; /* not our interrupt, or already handled */ | 742 | ret = IRQ_NONE; /* not our interrupt, or already handled */ |
diff --git a/drivers/infiniband/hw/ipath/ipath_kernel.h b/drivers/infiniband/hw/ipath/ipath_kernel.h index 0ce5f19c9d62..e6507f8115bc 100644 --- a/drivers/infiniband/hw/ipath/ipath_kernel.h +++ b/drivers/infiniband/hw/ipath/ipath_kernel.h | |||
@@ -731,7 +731,7 @@ u64 ipath_read_kreg64_port(const struct ipath_devdata *, ipath_kreg, | |||
731 | static inline u32 ipath_read_ureg32(const struct ipath_devdata *dd, | 731 | static inline u32 ipath_read_ureg32(const struct ipath_devdata *dd, |
732 | ipath_ureg regno, int port) | 732 | ipath_ureg regno, int port) |
733 | { | 733 | { |
734 | if (!dd->ipath_kregbase) | 734 | if (!dd->ipath_kregbase || !(dd->ipath_flags & IPATH_PRESENT)) |
735 | return 0; | 735 | return 0; |
736 | 736 | ||
737 | return readl(regno + (u64 __iomem *) | 737 | return readl(regno + (u64 __iomem *) |
@@ -762,7 +762,7 @@ static inline void ipath_write_ureg(const struct ipath_devdata *dd, | |||
762 | static inline u32 ipath_read_kreg32(const struct ipath_devdata *dd, | 762 | static inline u32 ipath_read_kreg32(const struct ipath_devdata *dd, |
763 | ipath_kreg regno) | 763 | ipath_kreg regno) |
764 | { | 764 | { |
765 | if (!dd->ipath_kregbase) | 765 | if (!dd->ipath_kregbase || !(dd->ipath_flags & IPATH_PRESENT)) |
766 | return -1; | 766 | return -1; |
767 | return readl((u32 __iomem *) & dd->ipath_kregbase[regno]); | 767 | return readl((u32 __iomem *) & dd->ipath_kregbase[regno]); |
768 | } | 768 | } |
@@ -770,7 +770,7 @@ static inline u32 ipath_read_kreg32(const struct ipath_devdata *dd, | |||
770 | static inline u64 ipath_read_kreg64(const struct ipath_devdata *dd, | 770 | static inline u64 ipath_read_kreg64(const struct ipath_devdata *dd, |
771 | ipath_kreg regno) | 771 | ipath_kreg regno) |
772 | { | 772 | { |
773 | if (!dd->ipath_kregbase) | 773 | if (!dd->ipath_kregbase || !(dd->ipath_flags & IPATH_PRESENT)) |
774 | return -1; | 774 | return -1; |
775 | 775 | ||
776 | return readq(&dd->ipath_kregbase[regno]); | 776 | return readq(&dd->ipath_kregbase[regno]); |
@@ -786,7 +786,7 @@ static inline void ipath_write_kreg(const struct ipath_devdata *dd, | |||
786 | static inline u64 ipath_read_creg(const struct ipath_devdata *dd, | 786 | static inline u64 ipath_read_creg(const struct ipath_devdata *dd, |
787 | ipath_sreg regno) | 787 | ipath_sreg regno) |
788 | { | 788 | { |
789 | if (!dd->ipath_kregbase) | 789 | if (!dd->ipath_kregbase || !(dd->ipath_flags & IPATH_PRESENT)) |
790 | return 0; | 790 | return 0; |
791 | 791 | ||
792 | return readq(regno + (u64 __iomem *) | 792 | return readq(regno + (u64 __iomem *) |
@@ -797,7 +797,7 @@ static inline u64 ipath_read_creg(const struct ipath_devdata *dd, | |||
797 | static inline u32 ipath_read_creg32(const struct ipath_devdata *dd, | 797 | static inline u32 ipath_read_creg32(const struct ipath_devdata *dd, |
798 | ipath_sreg regno) | 798 | ipath_sreg regno) |
799 | { | 799 | { |
800 | if (!dd->ipath_kregbase) | 800 | if (!dd->ipath_kregbase || !(dd->ipath_flags & IPATH_PRESENT)) |
801 | return 0; | 801 | return 0; |
802 | return readl(regno + (u64 __iomem *) | 802 | return readl(regno + (u64 __iomem *) |
803 | (dd->ipath_cregbase + | 803 | (dd->ipath_cregbase + |
diff --git a/drivers/infiniband/hw/ipath/ipath_layer.c b/drivers/infiniband/hw/ipath/ipath_layer.c index 69ed1100701a..9cb5258ffed9 100644 --- a/drivers/infiniband/hw/ipath/ipath_layer.c +++ b/drivers/infiniband/hw/ipath/ipath_layer.c | |||
@@ -46,13 +46,15 @@ | |||
46 | /* Acquire before ipath_devs_lock. */ | 46 | /* Acquire before ipath_devs_lock. */ |
47 | static DEFINE_MUTEX(ipath_layer_mutex); | 47 | static DEFINE_MUTEX(ipath_layer_mutex); |
48 | 48 | ||
49 | static int ipath_verbs_registered; | ||
50 | |||
49 | u16 ipath_layer_rcv_opcode; | 51 | u16 ipath_layer_rcv_opcode; |
52 | |||
50 | static int (*layer_intr)(void *, u32); | 53 | static int (*layer_intr)(void *, u32); |
51 | static int (*layer_rcv)(void *, void *, struct sk_buff *); | 54 | static int (*layer_rcv)(void *, void *, struct sk_buff *); |
52 | static int (*layer_rcv_lid)(void *, void *); | 55 | static int (*layer_rcv_lid)(void *, void *); |
53 | static int (*verbs_piobufavail)(void *); | 56 | static int (*verbs_piobufavail)(void *); |
54 | static void (*verbs_rcv)(void *, void *, void *, u32); | 57 | static void (*verbs_rcv)(void *, void *, void *, u32); |
55 | static int ipath_verbs_registered; | ||
56 | 58 | ||
57 | static void *(*layer_add_one)(int, struct ipath_devdata *); | 59 | static void *(*layer_add_one)(int, struct ipath_devdata *); |
58 | static void (*layer_remove_one)(void *); | 60 | static void (*layer_remove_one)(void *); |
@@ -586,6 +588,8 @@ void ipath_verbs_unregister(void) | |||
586 | verbs_rcv = NULL; | 588 | verbs_rcv = NULL; |
587 | verbs_timer_cb = NULL; | 589 | verbs_timer_cb = NULL; |
588 | 590 | ||
591 | ipath_verbs_registered = 0; | ||
592 | |||
589 | mutex_unlock(&ipath_layer_mutex); | 593 | mutex_unlock(&ipath_layer_mutex); |
590 | } | 594 | } |
591 | 595 | ||
diff --git a/drivers/infiniband/hw/ipath/ipath_pe800.c b/drivers/infiniband/hw/ipath/ipath_pe800.c index e1dc4f757062..6318067ab5ec 100644 --- a/drivers/infiniband/hw/ipath/ipath_pe800.c +++ b/drivers/infiniband/hw/ipath/ipath_pe800.c | |||
@@ -972,6 +972,8 @@ static int ipath_setup_pe_reset(struct ipath_devdata *dd) | |||
972 | /* Use ERROR so it shows up in logs, etc. */ | 972 | /* Use ERROR so it shows up in logs, etc. */ |
973 | ipath_dev_err(dd, "Resetting PE-800 unit %u\n", | 973 | ipath_dev_err(dd, "Resetting PE-800 unit %u\n", |
974 | dd->ipath_unit); | 974 | dd->ipath_unit); |
975 | /* keep chip from being accessed in a few places */ | ||
976 | dd->ipath_flags &= ~(IPATH_INITTED|IPATH_PRESENT); | ||
975 | val = dd->ipath_control | INFINIPATH_C_RESET; | 977 | val = dd->ipath_control | INFINIPATH_C_RESET; |
976 | ipath_write_kreg(dd, dd->ipath_kregs->kr_control, val); | 978 | ipath_write_kreg(dd, dd->ipath_kregs->kr_control, val); |
977 | mb(); | 979 | mb(); |
@@ -997,6 +999,8 @@ static int ipath_setup_pe_reset(struct ipath_devdata *dd) | |||
997 | if ((r = pci_enable_device(dd->pcidev))) | 999 | if ((r = pci_enable_device(dd->pcidev))) |
998 | ipath_dev_err(dd, "pci_enable_device failed after " | 1000 | ipath_dev_err(dd, "pci_enable_device failed after " |
999 | "reset: %d\n", r); | 1001 | "reset: %d\n", r); |
1002 | /* whether it worked or not, mark as present, again */ | ||
1003 | dd->ipath_flags |= IPATH_PRESENT; | ||
1000 | val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_revision); | 1004 | val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_revision); |
1001 | if (val == dd->ipath_revision) { | 1005 | if (val == dd->ipath_revision) { |
1002 | ipath_cdbg(VERBOSE, "Got matching revision " | 1006 | ipath_cdbg(VERBOSE, "Got matching revision " |
diff --git a/drivers/infiniband/hw/ipath/ipath_registers.h b/drivers/infiniband/hw/ipath/ipath_registers.h index 1e59750c5f63..402126eb79c9 100644 --- a/drivers/infiniband/hw/ipath/ipath_registers.h +++ b/drivers/infiniband/hw/ipath/ipath_registers.h | |||
@@ -34,8 +34,9 @@ | |||
34 | #define _IPATH_REGISTERS_H | 34 | #define _IPATH_REGISTERS_H |
35 | 35 | ||
36 | /* | 36 | /* |
37 | * This file should only be included by kernel source, and by the diags. | 37 | * This file should only be included by kernel source, and by the diags. It |
38 | * It defines the registers, and their contents, for the InfiniPath HT-400 chip | 38 | * defines the registers, and their contents, for the InfiniPath HT-400 |
39 | * chip. | ||
39 | */ | 40 | */ |
40 | 41 | ||
41 | /* | 42 | /* |
@@ -156,8 +157,10 @@ | |||
156 | #define INFINIPATH_IBCC_FLOWCTRLWATERMARK_SHIFT 8 | 157 | #define INFINIPATH_IBCC_FLOWCTRLWATERMARK_SHIFT 8 |
157 | #define INFINIPATH_IBCC_LINKINITCMD_MASK 0x3ULL | 158 | #define INFINIPATH_IBCC_LINKINITCMD_MASK 0x3ULL |
158 | #define INFINIPATH_IBCC_LINKINITCMD_DISABLE 1 | 159 | #define INFINIPATH_IBCC_LINKINITCMD_DISABLE 1 |
159 | #define INFINIPATH_IBCC_LINKINITCMD_POLL 2 /* cycle through TS1/TS2 till OK */ | 160 | /* cycle through TS1/TS2 till OK */ |
160 | #define INFINIPATH_IBCC_LINKINITCMD_SLEEP 3 /* wait for TS1, then go on */ | 161 | #define INFINIPATH_IBCC_LINKINITCMD_POLL 2 |
162 | /* wait for TS1, then go on */ | ||
163 | #define INFINIPATH_IBCC_LINKINITCMD_SLEEP 3 | ||
161 | #define INFINIPATH_IBCC_LINKINITCMD_SHIFT 16 | 164 | #define INFINIPATH_IBCC_LINKINITCMD_SHIFT 16 |
162 | #define INFINIPATH_IBCC_LINKCMD_MASK 0x3ULL | 165 | #define INFINIPATH_IBCC_LINKCMD_MASK 0x3ULL |
163 | #define INFINIPATH_IBCC_LINKCMD_INIT 1 /* move to 0x11 */ | 166 | #define INFINIPATH_IBCC_LINKCMD_INIT 1 /* move to 0x11 */ |
@@ -182,7 +185,8 @@ | |||
182 | #define INFINIPATH_IBCS_LINKSTATE_SHIFT 4 | 185 | #define INFINIPATH_IBCS_LINKSTATE_SHIFT 4 |
183 | #define INFINIPATH_IBCS_TXREADY 0x40000000 | 186 | #define INFINIPATH_IBCS_TXREADY 0x40000000 |
184 | #define INFINIPATH_IBCS_TXCREDITOK 0x80000000 | 187 | #define INFINIPATH_IBCS_TXCREDITOK 0x80000000 |
185 | /* link training states (shift by INFINIPATH_IBCS_LINKTRAININGSTATE_SHIFT) */ | 188 | /* link training states (shift by |
189 | INFINIPATH_IBCS_LINKTRAININGSTATE_SHIFT) */ | ||
186 | #define INFINIPATH_IBCS_LT_STATE_DISABLED 0x00 | 190 | #define INFINIPATH_IBCS_LT_STATE_DISABLED 0x00 |
187 | #define INFINIPATH_IBCS_LT_STATE_LINKUP 0x01 | 191 | #define INFINIPATH_IBCS_LT_STATE_LINKUP 0x01 |
188 | #define INFINIPATH_IBCS_LT_STATE_POLLACTIVE 0x02 | 192 | #define INFINIPATH_IBCS_LT_STATE_POLLACTIVE 0x02 |
@@ -267,10 +271,12 @@ | |||
267 | /* kr_serdesconfig0 bits */ | 271 | /* kr_serdesconfig0 bits */ |
268 | #define INFINIPATH_SERDC0_RESET_MASK 0xfULL /* overal reset bits */ | 272 | #define INFINIPATH_SERDC0_RESET_MASK 0xfULL /* overal reset bits */ |
269 | #define INFINIPATH_SERDC0_RESET_PLL 0x10000000ULL /* pll reset */ | 273 | #define INFINIPATH_SERDC0_RESET_PLL 0x10000000ULL /* pll reset */ |
270 | #define INFINIPATH_SERDC0_TXIDLE 0xF000ULL /* tx idle enables (per lane) */ | 274 | /* tx idle enables (per lane) */ |
271 | #define INFINIPATH_SERDC0_RXDETECT_EN 0xF0000ULL /* rx detect enables (per lane) */ | 275 | #define INFINIPATH_SERDC0_TXIDLE 0xF000ULL |
272 | #define INFINIPATH_SERDC0_L1PWR_DN 0xF0ULL /* L1 Power down; use with RXDETECT, | 276 | /* rx detect enables (per lane) */ |
273 | Otherwise not used on IB side */ | 277 | #define INFINIPATH_SERDC0_RXDETECT_EN 0xF0000ULL |
278 | /* L1 Power down; use with RXDETECT, Otherwise not used on IB side */ | ||
279 | #define INFINIPATH_SERDC0_L1PWR_DN 0xF0ULL | ||
274 | 280 | ||
275 | /* kr_xgxsconfig bits */ | 281 | /* kr_xgxsconfig bits */ |
276 | #define INFINIPATH_XGXS_RESET 0x7ULL | 282 | #define INFINIPATH_XGXS_RESET 0x7ULL |
@@ -390,12 +396,13 @@ struct ipath_kregs { | |||
390 | ipath_kreg kr_txintmemsize; | 396 | ipath_kreg kr_txintmemsize; |
391 | ipath_kreg kr_xgxsconfig; | 397 | ipath_kreg kr_xgxsconfig; |
392 | ipath_kreg kr_ibpllcfg; | 398 | ipath_kreg kr_ibpllcfg; |
393 | /* use these two (and the following N ports) only with ipath_k*_kreg64_port(); | 399 | /* use these two (and the following N ports) only with |
394 | * not *kreg64() */ | 400 | * ipath_k*_kreg64_port(); not *kreg64() */ |
395 | ipath_kreg kr_rcvhdraddr; | 401 | ipath_kreg kr_rcvhdraddr; |
396 | ipath_kreg kr_rcvhdrtailaddr; | 402 | ipath_kreg kr_rcvhdrtailaddr; |
397 | 403 | ||
398 | /* remaining registers are not present on all types of infinipath chips */ | 404 | /* remaining registers are not present on all types of infinipath |
405 | chips */ | ||
399 | ipath_kreg kr_rcvpktledcnt; | 406 | ipath_kreg kr_rcvpktledcnt; |
400 | ipath_kreg kr_pcierbuftestreg0; | 407 | ipath_kreg kr_pcierbuftestreg0; |
401 | ipath_kreg kr_pcierbuftestreg1; | 408 | ipath_kreg kr_pcierbuftestreg1; |
diff --git a/drivers/infiniband/hw/ipath/ipath_ruc.c b/drivers/infiniband/hw/ipath/ipath_ruc.c index f232e77b78ee..eb81424b3c5b 100644 --- a/drivers/infiniband/hw/ipath/ipath_ruc.c +++ b/drivers/infiniband/hw/ipath/ipath_ruc.c | |||
@@ -531,19 +531,12 @@ int ipath_post_rc_send(struct ipath_qp *qp, struct ib_send_wr *wr) | |||
531 | } | 531 | } |
532 | wqe->wr.num_sge = j; | 532 | wqe->wr.num_sge = j; |
533 | qp->s_head = next; | 533 | qp->s_head = next; |
534 | /* | ||
535 | * Wake up the send tasklet if the QP is not waiting | ||
536 | * for an RNR timeout. | ||
537 | */ | ||
538 | next = qp->s_rnr_timeout; | ||
539 | spin_unlock_irqrestore(&qp->s_lock, flags); | 534 | spin_unlock_irqrestore(&qp->s_lock, flags); |
540 | 535 | ||
541 | if (next == 0) { | 536 | if (qp->ibqp.qp_type == IB_QPT_UC) |
542 | if (qp->ibqp.qp_type == IB_QPT_UC) | 537 | ipath_do_uc_send((unsigned long) qp); |
543 | ipath_do_uc_send((unsigned long) qp); | 538 | else |
544 | else | 539 | ipath_do_rc_send((unsigned long) qp); |
545 | ipath_do_rc_send((unsigned long) qp); | ||
546 | } | ||
547 | 540 | ||
548 | ret = 0; | 541 | ret = 0; |
549 | 542 | ||
diff --git a/drivers/infiniband/hw/ipath/ipath_sysfs.c b/drivers/infiniband/hw/ipath/ipath_sysfs.c index 32acd8048b49..f323791cc495 100644 --- a/drivers/infiniband/hw/ipath/ipath_sysfs.c +++ b/drivers/infiniband/hw/ipath/ipath_sysfs.c | |||
@@ -711,10 +711,22 @@ static struct attribute_group dev_attr_group = { | |||
711 | * enters diag mode. A device reset is quite likely to crash the | 711 | * enters diag mode. A device reset is quite likely to crash the |
712 | * machine entirely, so we don't want to normally make it | 712 | * machine entirely, so we don't want to normally make it |
713 | * available. | 713 | * available. |
714 | * | ||
715 | * Called with ipath_mutex held. | ||
714 | */ | 716 | */ |
715 | int ipath_expose_reset(struct device *dev) | 717 | int ipath_expose_reset(struct device *dev) |
716 | { | 718 | { |
717 | return device_create_file(dev, &dev_attr_reset); | 719 | static int exposed; |
720 | int ret; | ||
721 | |||
722 | if (!exposed) { | ||
723 | ret = device_create_file(dev, &dev_attr_reset); | ||
724 | exposed = 1; | ||
725 | } | ||
726 | else | ||
727 | ret = 0; | ||
728 | |||
729 | return ret; | ||
718 | } | 730 | } |
719 | 731 | ||
720 | int ipath_driver_create_group(struct device_driver *drv) | 732 | int ipath_driver_create_group(struct device_driver *drv) |
diff --git a/drivers/infiniband/hw/ipath/ipath_ud.c b/drivers/infiniband/hw/ipath/ipath_ud.c index 01cfb30ee160..e606daf83210 100644 --- a/drivers/infiniband/hw/ipath/ipath_ud.c +++ b/drivers/infiniband/hw/ipath/ipath_ud.c | |||
@@ -46,8 +46,10 @@ | |||
46 | * This is called from ipath_post_ud_send() to forward a WQE addressed | 46 | * This is called from ipath_post_ud_send() to forward a WQE addressed |
47 | * to the same HCA. | 47 | * to the same HCA. |
48 | */ | 48 | */ |
49 | static void ipath_ud_loopback(struct ipath_qp *sqp, struct ipath_sge_state *ss, | 49 | static void ipath_ud_loopback(struct ipath_qp *sqp, |
50 | u32 length, struct ib_send_wr *wr, struct ib_wc *wc) | 50 | struct ipath_sge_state *ss, |
51 | u32 length, struct ib_send_wr *wr, | ||
52 | struct ib_wc *wc) | ||
51 | { | 53 | { |
52 | struct ipath_ibdev *dev = to_idev(sqp->ibqp.device); | 54 | struct ipath_ibdev *dev = to_idev(sqp->ibqp.device); |
53 | struct ipath_qp *qp; | 55 | struct ipath_qp *qp; |
diff --git a/drivers/infiniband/hw/ipath/ipath_verbs.c b/drivers/infiniband/hw/ipath/ipath_verbs.c index 8d2558a01f35..cb9e387c301f 100644 --- a/drivers/infiniband/hw/ipath/ipath_verbs.c +++ b/drivers/infiniband/hw/ipath/ipath_verbs.c | |||
@@ -449,7 +449,6 @@ static void ipath_ib_timer(void *arg) | |||
449 | { | 449 | { |
450 | struct ipath_ibdev *dev = (struct ipath_ibdev *) arg; | 450 | struct ipath_ibdev *dev = (struct ipath_ibdev *) arg; |
451 | struct ipath_qp *resend = NULL; | 451 | struct ipath_qp *resend = NULL; |
452 | struct ipath_qp *rnr = NULL; | ||
453 | struct list_head *last; | 452 | struct list_head *last; |
454 | struct ipath_qp *qp; | 453 | struct ipath_qp *qp; |
455 | unsigned long flags; | 454 | unsigned long flags; |
@@ -465,32 +464,18 @@ static void ipath_ib_timer(void *arg) | |||
465 | last = &dev->pending[dev->pending_index]; | 464 | last = &dev->pending[dev->pending_index]; |
466 | while (!list_empty(last)) { | 465 | while (!list_empty(last)) { |
467 | qp = list_entry(last->next, struct ipath_qp, timerwait); | 466 | qp = list_entry(last->next, struct ipath_qp, timerwait); |
468 | if (last->next == LIST_POISON1 || | 467 | list_del(&qp->timerwait); |
469 | last->next != &qp->timerwait || | 468 | qp->timer_next = resend; |
470 | qp->timerwait.prev != last) { | 469 | resend = qp; |
471 | INIT_LIST_HEAD(last); | 470 | atomic_inc(&qp->refcount); |
472 | } else { | ||
473 | list_del(&qp->timerwait); | ||
474 | qp->timerwait.prev = (struct list_head *) resend; | ||
475 | resend = qp; | ||
476 | atomic_inc(&qp->refcount); | ||
477 | } | ||
478 | } | 471 | } |
479 | last = &dev->rnrwait; | 472 | last = &dev->rnrwait; |
480 | if (!list_empty(last)) { | 473 | if (!list_empty(last)) { |
481 | qp = list_entry(last->next, struct ipath_qp, timerwait); | 474 | qp = list_entry(last->next, struct ipath_qp, timerwait); |
482 | if (--qp->s_rnr_timeout == 0) { | 475 | if (--qp->s_rnr_timeout == 0) { |
483 | do { | 476 | do { |
484 | if (last->next == LIST_POISON1 || | ||
485 | last->next != &qp->timerwait || | ||
486 | qp->timerwait.prev != last) { | ||
487 | INIT_LIST_HEAD(last); | ||
488 | break; | ||
489 | } | ||
490 | list_del(&qp->timerwait); | 477 | list_del(&qp->timerwait); |
491 | qp->timerwait.prev = | 478 | tasklet_hi_schedule(&qp->s_task); |
492 | (struct list_head *) rnr; | ||
493 | rnr = qp; | ||
494 | if (list_empty(last)) | 479 | if (list_empty(last)) |
495 | break; | 480 | break; |
496 | qp = list_entry(last->next, struct ipath_qp, | 481 | qp = list_entry(last->next, struct ipath_qp, |
@@ -530,8 +515,7 @@ static void ipath_ib_timer(void *arg) | |||
530 | spin_unlock_irqrestore(&dev->pending_lock, flags); | 515 | spin_unlock_irqrestore(&dev->pending_lock, flags); |
531 | 516 | ||
532 | /* XXX What if timer fires again while this is running? */ | 517 | /* XXX What if timer fires again while this is running? */ |
533 | for (qp = resend; qp != NULL; | 518 | for (qp = resend; qp != NULL; qp = qp->timer_next) { |
534 | qp = (struct ipath_qp *) qp->timerwait.prev) { | ||
535 | struct ib_wc wc; | 519 | struct ib_wc wc; |
536 | 520 | ||
537 | spin_lock_irqsave(&qp->s_lock, flags); | 521 | spin_lock_irqsave(&qp->s_lock, flags); |
@@ -545,9 +529,6 @@ static void ipath_ib_timer(void *arg) | |||
545 | if (atomic_dec_and_test(&qp->refcount)) | 529 | if (atomic_dec_and_test(&qp->refcount)) |
546 | wake_up(&qp->wait); | 530 | wake_up(&qp->wait); |
547 | } | 531 | } |
548 | for (qp = rnr; qp != NULL; | ||
549 | qp = (struct ipath_qp *) qp->timerwait.prev) | ||
550 | tasklet_hi_schedule(&qp->s_task); | ||
551 | } | 532 | } |
552 | 533 | ||
553 | /** | 534 | /** |
@@ -556,9 +537,9 @@ static void ipath_ib_timer(void *arg) | |||
556 | * | 537 | * |
557 | * This is called from ipath_intr() at interrupt level when a PIO buffer is | 538 | * This is called from ipath_intr() at interrupt level when a PIO buffer is |
558 | * available after ipath_verbs_send() returned an error that no buffers were | 539 | * available after ipath_verbs_send() returned an error that no buffers were |
559 | * available. Return 0 if we consumed all the PIO buffers and we still have | 540 | * available. Return 1 if we consumed all the PIO buffers and we still have |
560 | * QPs waiting for buffers (for now, just do a tasklet_hi_schedule and | 541 | * QPs waiting for buffers (for now, just do a tasklet_hi_schedule and |
561 | * return one). | 542 | * return zero). |
562 | */ | 543 | */ |
563 | static int ipath_ib_piobufavail(void *arg) | 544 | static int ipath_ib_piobufavail(void *arg) |
564 | { | 545 | { |
@@ -579,7 +560,7 @@ static int ipath_ib_piobufavail(void *arg) | |||
579 | spin_unlock_irqrestore(&dev->pending_lock, flags); | 560 | spin_unlock_irqrestore(&dev->pending_lock, flags); |
580 | 561 | ||
581 | bail: | 562 | bail: |
582 | return 1; | 563 | return 0; |
583 | } | 564 | } |
584 | 565 | ||
585 | static int ipath_query_device(struct ib_device *ibdev, | 566 | static int ipath_query_device(struct ib_device *ibdev, |
@@ -1159,7 +1140,7 @@ static ssize_t show_stats(struct class_device *cdev, char *buf) | |||
1159 | 1140 | ||
1160 | len = sprintf(buf, | 1141 | len = sprintf(buf, |
1161 | "RC resends %d\n" | 1142 | "RC resends %d\n" |
1162 | "RC QACKs %d\n" | 1143 | "RC no QACK %d\n" |
1163 | "RC ACKs %d\n" | 1144 | "RC ACKs %d\n" |
1164 | "RC SEQ NAKs %d\n" | 1145 | "RC SEQ NAKs %d\n" |
1165 | "RC RDMA seq %d\n" | 1146 | "RC RDMA seq %d\n" |
diff --git a/drivers/infiniband/hw/ipath/ipath_verbs.h b/drivers/infiniband/hw/ipath/ipath_verbs.h index fcafbc7c9e71..4f8d59300e9b 100644 --- a/drivers/infiniband/hw/ipath/ipath_verbs.h +++ b/drivers/infiniband/hw/ipath/ipath_verbs.h | |||
@@ -282,7 +282,8 @@ struct ipath_srq { | |||
282 | */ | 282 | */ |
283 | struct ipath_qp { | 283 | struct ipath_qp { |
284 | struct ib_qp ibqp; | 284 | struct ib_qp ibqp; |
285 | struct ipath_qp *next; /* link list for QPN hash table */ | 285 | struct ipath_qp *next; /* link list for QPN hash table */ |
286 | struct ipath_qp *timer_next; /* link list for ipath_ib_timer() */ | ||
286 | struct list_head piowait; /* link for wait PIO buf */ | 287 | struct list_head piowait; /* link for wait PIO buf */ |
287 | struct list_head timerwait; /* link for waiting for timeouts */ | 288 | struct list_head timerwait; /* link for waiting for timeouts */ |
288 | struct ib_ah_attr remote_ah_attr; | 289 | struct ib_ah_attr remote_ah_attr; |
diff --git a/drivers/infiniband/hw/ipath/ips_common.h b/drivers/infiniband/hw/ipath/ips_common.h index 410a764dfcef..ab7cbbbfd03a 100644 --- a/drivers/infiniband/hw/ipath/ips_common.h +++ b/drivers/infiniband/hw/ipath/ips_common.h | |||
@@ -95,7 +95,7 @@ struct ether_header { | |||
95 | __u8 seq_num; | 95 | __u8 seq_num; |
96 | __le32 len; | 96 | __le32 len; |
97 | /* MUST be of word size due to PIO write requirements */ | 97 | /* MUST be of word size due to PIO write requirements */ |
98 | __u32 csum; | 98 | __le32 csum; |
99 | __le16 csum_offset; | 99 | __le16 csum_offset; |
100 | __le16 flags; | 100 | __le16 flags; |
101 | __u16 first_2_bytes; | 101 | __u16 first_2_bytes; |
diff --git a/drivers/infiniband/hw/mthca/mthca_cq.c b/drivers/infiniband/hw/mthca/mthca_cq.c index 312cf90731ea..205854e9c662 100644 --- a/drivers/infiniband/hw/mthca/mthca_cq.c +++ b/drivers/infiniband/hw/mthca/mthca_cq.c | |||
@@ -238,9 +238,9 @@ void mthca_cq_event(struct mthca_dev *dev, u32 cqn, | |||
238 | spin_lock(&dev->cq_table.lock); | 238 | spin_lock(&dev->cq_table.lock); |
239 | 239 | ||
240 | cq = mthca_array_get(&dev->cq_table.cq, cqn & (dev->limits.num_cqs - 1)); | 240 | cq = mthca_array_get(&dev->cq_table.cq, cqn & (dev->limits.num_cqs - 1)); |
241 | |||
242 | if (cq) | 241 | if (cq) |
243 | atomic_inc(&cq->refcount); | 242 | ++cq->refcount; |
243 | |||
244 | spin_unlock(&dev->cq_table.lock); | 244 | spin_unlock(&dev->cq_table.lock); |
245 | 245 | ||
246 | if (!cq) { | 246 | if (!cq) { |
@@ -254,8 +254,10 @@ void mthca_cq_event(struct mthca_dev *dev, u32 cqn, | |||
254 | if (cq->ibcq.event_handler) | 254 | if (cq->ibcq.event_handler) |
255 | cq->ibcq.event_handler(&event, cq->ibcq.cq_context); | 255 | cq->ibcq.event_handler(&event, cq->ibcq.cq_context); |
256 | 256 | ||
257 | if (atomic_dec_and_test(&cq->refcount)) | 257 | spin_lock(&dev->cq_table.lock); |
258 | if (!--cq->refcount) | ||
258 | wake_up(&cq->wait); | 259 | wake_up(&cq->wait); |
260 | spin_unlock(&dev->cq_table.lock); | ||
259 | } | 261 | } |
260 | 262 | ||
261 | static inline int is_recv_cqe(struct mthca_cqe *cqe) | 263 | static inline int is_recv_cqe(struct mthca_cqe *cqe) |
@@ -267,23 +269,13 @@ static inline int is_recv_cqe(struct mthca_cqe *cqe) | |||
267 | return !(cqe->is_send & 0x80); | 269 | return !(cqe->is_send & 0x80); |
268 | } | 270 | } |
269 | 271 | ||
270 | void mthca_cq_clean(struct mthca_dev *dev, u32 cqn, u32 qpn, | 272 | void mthca_cq_clean(struct mthca_dev *dev, struct mthca_cq *cq, u32 qpn, |
271 | struct mthca_srq *srq) | 273 | struct mthca_srq *srq) |
272 | { | 274 | { |
273 | struct mthca_cq *cq; | ||
274 | struct mthca_cqe *cqe; | 275 | struct mthca_cqe *cqe; |
275 | u32 prod_index; | 276 | u32 prod_index; |
276 | int nfreed = 0; | 277 | int nfreed = 0; |
277 | 278 | ||
278 | spin_lock_irq(&dev->cq_table.lock); | ||
279 | cq = mthca_array_get(&dev->cq_table.cq, cqn & (dev->limits.num_cqs - 1)); | ||
280 | if (cq) | ||
281 | atomic_inc(&cq->refcount); | ||
282 | spin_unlock_irq(&dev->cq_table.lock); | ||
283 | |||
284 | if (!cq) | ||
285 | return; | ||
286 | |||
287 | spin_lock_irq(&cq->lock); | 279 | spin_lock_irq(&cq->lock); |
288 | 280 | ||
289 | /* | 281 | /* |
@@ -301,7 +293,7 @@ void mthca_cq_clean(struct mthca_dev *dev, u32 cqn, u32 qpn, | |||
301 | 293 | ||
302 | if (0) | 294 | if (0) |
303 | mthca_dbg(dev, "Cleaning QPN %06x from CQN %06x; ci %d, pi %d\n", | 295 | mthca_dbg(dev, "Cleaning QPN %06x from CQN %06x; ci %d, pi %d\n", |
304 | qpn, cqn, cq->cons_index, prod_index); | 296 | qpn, cq->cqn, cq->cons_index, prod_index); |
305 | 297 | ||
306 | /* | 298 | /* |
307 | * Now sweep backwards through the CQ, removing CQ entries | 299 | * Now sweep backwards through the CQ, removing CQ entries |
@@ -325,8 +317,6 @@ void mthca_cq_clean(struct mthca_dev *dev, u32 cqn, u32 qpn, | |||
325 | } | 317 | } |
326 | 318 | ||
327 | spin_unlock_irq(&cq->lock); | 319 | spin_unlock_irq(&cq->lock); |
328 | if (atomic_dec_and_test(&cq->refcount)) | ||
329 | wake_up(&cq->wait); | ||
330 | } | 320 | } |
331 | 321 | ||
332 | void mthca_cq_resize_copy_cqes(struct mthca_cq *cq) | 322 | void mthca_cq_resize_copy_cqes(struct mthca_cq *cq) |
@@ -821,7 +811,7 @@ int mthca_init_cq(struct mthca_dev *dev, int nent, | |||
821 | } | 811 | } |
822 | 812 | ||
823 | spin_lock_init(&cq->lock); | 813 | spin_lock_init(&cq->lock); |
824 | atomic_set(&cq->refcount, 1); | 814 | cq->refcount = 1; |
825 | init_waitqueue_head(&cq->wait); | 815 | init_waitqueue_head(&cq->wait); |
826 | 816 | ||
827 | memset(cq_context, 0, sizeof *cq_context); | 817 | memset(cq_context, 0, sizeof *cq_context); |
@@ -896,6 +886,17 @@ err_out: | |||
896 | return err; | 886 | return err; |
897 | } | 887 | } |
898 | 888 | ||
889 | static inline int get_cq_refcount(struct mthca_dev *dev, struct mthca_cq *cq) | ||
890 | { | ||
891 | int c; | ||
892 | |||
893 | spin_lock_irq(&dev->cq_table.lock); | ||
894 | c = cq->refcount; | ||
895 | spin_unlock_irq(&dev->cq_table.lock); | ||
896 | |||
897 | return c; | ||
898 | } | ||
899 | |||
899 | void mthca_free_cq(struct mthca_dev *dev, | 900 | void mthca_free_cq(struct mthca_dev *dev, |
900 | struct mthca_cq *cq) | 901 | struct mthca_cq *cq) |
901 | { | 902 | { |
@@ -929,6 +930,7 @@ void mthca_free_cq(struct mthca_dev *dev, | |||
929 | spin_lock_irq(&dev->cq_table.lock); | 930 | spin_lock_irq(&dev->cq_table.lock); |
930 | mthca_array_clear(&dev->cq_table.cq, | 931 | mthca_array_clear(&dev->cq_table.cq, |
931 | cq->cqn & (dev->limits.num_cqs - 1)); | 932 | cq->cqn & (dev->limits.num_cqs - 1)); |
933 | --cq->refcount; | ||
932 | spin_unlock_irq(&dev->cq_table.lock); | 934 | spin_unlock_irq(&dev->cq_table.lock); |
933 | 935 | ||
934 | if (dev->mthca_flags & MTHCA_FLAG_MSI_X) | 936 | if (dev->mthca_flags & MTHCA_FLAG_MSI_X) |
@@ -936,8 +938,7 @@ void mthca_free_cq(struct mthca_dev *dev, | |||
936 | else | 938 | else |
937 | synchronize_irq(dev->pdev->irq); | 939 | synchronize_irq(dev->pdev->irq); |
938 | 940 | ||
939 | atomic_dec(&cq->refcount); | 941 | wait_event(cq->wait, !get_cq_refcount(dev, cq)); |
940 | wait_event(cq->wait, !atomic_read(&cq->refcount)); | ||
941 | 942 | ||
942 | if (cq->is_kernel) { | 943 | if (cq->is_kernel) { |
943 | mthca_free_cq_buf(dev, &cq->buf, cq->ibcq.cqe); | 944 | mthca_free_cq_buf(dev, &cq->buf, cq->ibcq.cqe); |
diff --git a/drivers/infiniband/hw/mthca/mthca_dev.h b/drivers/infiniband/hw/mthca/mthca_dev.h index 4c1dcb4c1822..f8160b8de090 100644 --- a/drivers/infiniband/hw/mthca/mthca_dev.h +++ b/drivers/infiniband/hw/mthca/mthca_dev.h | |||
@@ -496,7 +496,7 @@ void mthca_free_cq(struct mthca_dev *dev, | |||
496 | void mthca_cq_completion(struct mthca_dev *dev, u32 cqn); | 496 | void mthca_cq_completion(struct mthca_dev *dev, u32 cqn); |
497 | void mthca_cq_event(struct mthca_dev *dev, u32 cqn, | 497 | void mthca_cq_event(struct mthca_dev *dev, u32 cqn, |
498 | enum ib_event_type event_type); | 498 | enum ib_event_type event_type); |
499 | void mthca_cq_clean(struct mthca_dev *dev, u32 cqn, u32 qpn, | 499 | void mthca_cq_clean(struct mthca_dev *dev, struct mthca_cq *cq, u32 qpn, |
500 | struct mthca_srq *srq); | 500 | struct mthca_srq *srq); |
501 | void mthca_cq_resize_copy_cqes(struct mthca_cq *cq); | 501 | void mthca_cq_resize_copy_cqes(struct mthca_cq *cq); |
502 | int mthca_alloc_cq_buf(struct mthca_dev *dev, struct mthca_cq_buf *buf, int nent); | 502 | int mthca_alloc_cq_buf(struct mthca_dev *dev, struct mthca_cq_buf *buf, int nent); |
diff --git a/drivers/infiniband/hw/mthca/mthca_mr.c b/drivers/infiniband/hw/mthca/mthca_mr.c index 25e1c1db9a40..a486dec1707e 100644 --- a/drivers/infiniband/hw/mthca/mthca_mr.c +++ b/drivers/infiniband/hw/mthca/mthca_mr.c | |||
@@ -761,6 +761,7 @@ void mthca_arbel_fmr_unmap(struct mthca_dev *dev, struct mthca_fmr *fmr) | |||
761 | 761 | ||
762 | int __devinit mthca_init_mr_table(struct mthca_dev *dev) | 762 | int __devinit mthca_init_mr_table(struct mthca_dev *dev) |
763 | { | 763 | { |
764 | unsigned long addr; | ||
764 | int err, i; | 765 | int err, i; |
765 | 766 | ||
766 | err = mthca_alloc_init(&dev->mr_table.mpt_alloc, | 767 | err = mthca_alloc_init(&dev->mr_table.mpt_alloc, |
@@ -796,9 +797,12 @@ int __devinit mthca_init_mr_table(struct mthca_dev *dev) | |||
796 | goto err_fmr_mpt; | 797 | goto err_fmr_mpt; |
797 | } | 798 | } |
798 | 799 | ||
800 | addr = pci_resource_start(dev->pdev, 4) + | ||
801 | ((pci_resource_len(dev->pdev, 4) - 1) & | ||
802 | dev->mr_table.mpt_base); | ||
803 | |||
799 | dev->mr_table.tavor_fmr.mpt_base = | 804 | dev->mr_table.tavor_fmr.mpt_base = |
800 | ioremap(dev->mr_table.mpt_base, | 805 | ioremap(addr, (1 << i) * sizeof(struct mthca_mpt_entry)); |
801 | (1 << i) * sizeof (struct mthca_mpt_entry)); | ||
802 | 806 | ||
803 | if (!dev->mr_table.tavor_fmr.mpt_base) { | 807 | if (!dev->mr_table.tavor_fmr.mpt_base) { |
804 | mthca_warn(dev, "MPT ioremap for FMR failed.\n"); | 808 | mthca_warn(dev, "MPT ioremap for FMR failed.\n"); |
@@ -806,9 +810,12 @@ int __devinit mthca_init_mr_table(struct mthca_dev *dev) | |||
806 | goto err_fmr_mpt; | 810 | goto err_fmr_mpt; |
807 | } | 811 | } |
808 | 812 | ||
813 | addr = pci_resource_start(dev->pdev, 4) + | ||
814 | ((pci_resource_len(dev->pdev, 4) - 1) & | ||
815 | dev->mr_table.mtt_base); | ||
816 | |||
809 | dev->mr_table.tavor_fmr.mtt_base = | 817 | dev->mr_table.tavor_fmr.mtt_base = |
810 | ioremap(dev->mr_table.mtt_base, | 818 | ioremap(addr, (1 << i) * MTHCA_MTT_SEG_SIZE); |
811 | (1 << i) * MTHCA_MTT_SEG_SIZE); | ||
812 | if (!dev->mr_table.tavor_fmr.mtt_base) { | 819 | if (!dev->mr_table.tavor_fmr.mtt_base) { |
813 | mthca_warn(dev, "MTT ioremap for FMR failed.\n"); | 820 | mthca_warn(dev, "MTT ioremap for FMR failed.\n"); |
814 | err = -ENOMEM; | 821 | err = -ENOMEM; |
diff --git a/drivers/infiniband/hw/mthca/mthca_provider.c b/drivers/infiniband/hw/mthca/mthca_provider.c index 565a24b1756f..a2eae8a30167 100644 --- a/drivers/infiniband/hw/mthca/mthca_provider.c +++ b/drivers/infiniband/hw/mthca/mthca_provider.c | |||
@@ -306,7 +306,7 @@ static int mthca_query_gid(struct ib_device *ibdev, u8 port, | |||
306 | goto out; | 306 | goto out; |
307 | } | 307 | } |
308 | 308 | ||
309 | memcpy(gid->raw + 8, out_mad->data + (index % 8) * 16, 8); | 309 | memcpy(gid->raw + 8, out_mad->data + (index % 8) * 8, 8); |
310 | 310 | ||
311 | out: | 311 | out: |
312 | kfree(in_mad); | 312 | kfree(in_mad); |
diff --git a/drivers/infiniband/hw/mthca/mthca_provider.h b/drivers/infiniband/hw/mthca/mthca_provider.h index 6676a786d690..179a8f610d0f 100644 --- a/drivers/infiniband/hw/mthca/mthca_provider.h +++ b/drivers/infiniband/hw/mthca/mthca_provider.h | |||
@@ -139,11 +139,12 @@ struct mthca_ah { | |||
139 | * a qp may be locked, with the send cq locked first. No other | 139 | * a qp may be locked, with the send cq locked first. No other |
140 | * nesting should be done. | 140 | * nesting should be done. |
141 | * | 141 | * |
142 | * Each struct mthca_cq/qp also has an atomic_t ref count. The | 142 | * Each struct mthca_cq/qp also has an ref count, protected by the |
143 | * pointer from the cq/qp_table to the struct counts as one reference. | 143 | * corresponding table lock. The pointer from the cq/qp_table to the |
144 | * This reference also is good for access through the consumer API, so | 144 | * struct counts as one reference. This reference also is good for |
145 | * modifying the CQ/QP etc doesn't need to take another reference. | 145 | * access through the consumer API, so modifying the CQ/QP etc doesn't |
146 | * Access because of a completion being polled does need a reference. | 146 | * need to take another reference. Access to a QP because of a |
147 | * completion being polled does not need a reference either. | ||
147 | * | 148 | * |
148 | * Finally, each struct mthca_cq/qp has a wait_queue_head_t for the | 149 | * Finally, each struct mthca_cq/qp has a wait_queue_head_t for the |
149 | * destroy function to sleep on. | 150 | * destroy function to sleep on. |
@@ -159,8 +160,9 @@ struct mthca_ah { | |||
159 | * - decrement ref count; if zero, wake up waiters | 160 | * - decrement ref count; if zero, wake up waiters |
160 | * | 161 | * |
161 | * To destroy a CQ/QP, we can do the following: | 162 | * To destroy a CQ/QP, we can do the following: |
162 | * - lock cq/qp_table, remove pointer, unlock cq/qp_table lock | 163 | * - lock cq/qp_table |
163 | * - decrement ref count | 164 | * - remove pointer and decrement ref count |
165 | * - unlock cq/qp_table lock | ||
164 | * - wait_event until ref count is zero | 166 | * - wait_event until ref count is zero |
165 | * | 167 | * |
166 | * It is the consumer's responsibilty to make sure that no QP | 168 | * It is the consumer's responsibilty to make sure that no QP |
@@ -197,7 +199,7 @@ struct mthca_cq_resize { | |||
197 | struct mthca_cq { | 199 | struct mthca_cq { |
198 | struct ib_cq ibcq; | 200 | struct ib_cq ibcq; |
199 | spinlock_t lock; | 201 | spinlock_t lock; |
200 | atomic_t refcount; | 202 | int refcount; |
201 | int cqn; | 203 | int cqn; |
202 | u32 cons_index; | 204 | u32 cons_index; |
203 | struct mthca_cq_buf buf; | 205 | struct mthca_cq_buf buf; |
@@ -217,7 +219,7 @@ struct mthca_cq { | |||
217 | struct mthca_srq { | 219 | struct mthca_srq { |
218 | struct ib_srq ibsrq; | 220 | struct ib_srq ibsrq; |
219 | spinlock_t lock; | 221 | spinlock_t lock; |
220 | atomic_t refcount; | 222 | int refcount; |
221 | int srqn; | 223 | int srqn; |
222 | int max; | 224 | int max; |
223 | int max_gs; | 225 | int max_gs; |
@@ -254,7 +256,7 @@ struct mthca_wq { | |||
254 | 256 | ||
255 | struct mthca_qp { | 257 | struct mthca_qp { |
256 | struct ib_qp ibqp; | 258 | struct ib_qp ibqp; |
257 | atomic_t refcount; | 259 | int refcount; |
258 | u32 qpn; | 260 | u32 qpn; |
259 | int is_direct; | 261 | int is_direct; |
260 | u8 port; /* for SQP and memfree use only */ | 262 | u8 port; /* for SQP and memfree use only */ |
diff --git a/drivers/infiniband/hw/mthca/mthca_qp.c b/drivers/infiniband/hw/mthca/mthca_qp.c index f37b0e367323..19765f6f8d58 100644 --- a/drivers/infiniband/hw/mthca/mthca_qp.c +++ b/drivers/infiniband/hw/mthca/mthca_qp.c | |||
@@ -240,7 +240,7 @@ void mthca_qp_event(struct mthca_dev *dev, u32 qpn, | |||
240 | spin_lock(&dev->qp_table.lock); | 240 | spin_lock(&dev->qp_table.lock); |
241 | qp = mthca_array_get(&dev->qp_table.qp, qpn & (dev->limits.num_qps - 1)); | 241 | qp = mthca_array_get(&dev->qp_table.qp, qpn & (dev->limits.num_qps - 1)); |
242 | if (qp) | 242 | if (qp) |
243 | atomic_inc(&qp->refcount); | 243 | ++qp->refcount; |
244 | spin_unlock(&dev->qp_table.lock); | 244 | spin_unlock(&dev->qp_table.lock); |
245 | 245 | ||
246 | if (!qp) { | 246 | if (!qp) { |
@@ -257,8 +257,10 @@ void mthca_qp_event(struct mthca_dev *dev, u32 qpn, | |||
257 | if (qp->ibqp.event_handler) | 257 | if (qp->ibqp.event_handler) |
258 | qp->ibqp.event_handler(&event, qp->ibqp.qp_context); | 258 | qp->ibqp.event_handler(&event, qp->ibqp.qp_context); |
259 | 259 | ||
260 | if (atomic_dec_and_test(&qp->refcount)) | 260 | spin_lock(&dev->qp_table.lock); |
261 | if (!--qp->refcount) | ||
261 | wake_up(&qp->wait); | 262 | wake_up(&qp->wait); |
263 | spin_unlock(&dev->qp_table.lock); | ||
262 | } | 264 | } |
263 | 265 | ||
264 | static int to_mthca_state(enum ib_qp_state ib_state) | 266 | static int to_mthca_state(enum ib_qp_state ib_state) |
@@ -833,10 +835,10 @@ int mthca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask) | |||
833 | * entries and reinitialize the QP. | 835 | * entries and reinitialize the QP. |
834 | */ | 836 | */ |
835 | if (new_state == IB_QPS_RESET && !qp->ibqp.uobject) { | 837 | if (new_state == IB_QPS_RESET && !qp->ibqp.uobject) { |
836 | mthca_cq_clean(dev, to_mcq(qp->ibqp.send_cq)->cqn, qp->qpn, | 838 | mthca_cq_clean(dev, to_mcq(qp->ibqp.send_cq), qp->qpn, |
837 | qp->ibqp.srq ? to_msrq(qp->ibqp.srq) : NULL); | 839 | qp->ibqp.srq ? to_msrq(qp->ibqp.srq) : NULL); |
838 | if (qp->ibqp.send_cq != qp->ibqp.recv_cq) | 840 | if (qp->ibqp.send_cq != qp->ibqp.recv_cq) |
839 | mthca_cq_clean(dev, to_mcq(qp->ibqp.recv_cq)->cqn, qp->qpn, | 841 | mthca_cq_clean(dev, to_mcq(qp->ibqp.recv_cq), qp->qpn, |
840 | qp->ibqp.srq ? to_msrq(qp->ibqp.srq) : NULL); | 842 | qp->ibqp.srq ? to_msrq(qp->ibqp.srq) : NULL); |
841 | 843 | ||
842 | mthca_wq_init(&qp->sq); | 844 | mthca_wq_init(&qp->sq); |
@@ -1096,7 +1098,7 @@ static int mthca_alloc_qp_common(struct mthca_dev *dev, | |||
1096 | int ret; | 1098 | int ret; |
1097 | int i; | 1099 | int i; |
1098 | 1100 | ||
1099 | atomic_set(&qp->refcount, 1); | 1101 | qp->refcount = 1; |
1100 | init_waitqueue_head(&qp->wait); | 1102 | init_waitqueue_head(&qp->wait); |
1101 | qp->state = IB_QPS_RESET; | 1103 | qp->state = IB_QPS_RESET; |
1102 | qp->atomic_rd_en = 0; | 1104 | qp->atomic_rd_en = 0; |
@@ -1318,6 +1320,17 @@ int mthca_alloc_sqp(struct mthca_dev *dev, | |||
1318 | return err; | 1320 | return err; |
1319 | } | 1321 | } |
1320 | 1322 | ||
1323 | static inline int get_qp_refcount(struct mthca_dev *dev, struct mthca_qp *qp) | ||
1324 | { | ||
1325 | int c; | ||
1326 | |||
1327 | spin_lock_irq(&dev->qp_table.lock); | ||
1328 | c = qp->refcount; | ||
1329 | spin_unlock_irq(&dev->qp_table.lock); | ||
1330 | |||
1331 | return c; | ||
1332 | } | ||
1333 | |||
1321 | void mthca_free_qp(struct mthca_dev *dev, | 1334 | void mthca_free_qp(struct mthca_dev *dev, |
1322 | struct mthca_qp *qp) | 1335 | struct mthca_qp *qp) |
1323 | { | 1336 | { |
@@ -1339,14 +1352,14 @@ void mthca_free_qp(struct mthca_dev *dev, | |||
1339 | spin_lock(&dev->qp_table.lock); | 1352 | spin_lock(&dev->qp_table.lock); |
1340 | mthca_array_clear(&dev->qp_table.qp, | 1353 | mthca_array_clear(&dev->qp_table.qp, |
1341 | qp->qpn & (dev->limits.num_qps - 1)); | 1354 | qp->qpn & (dev->limits.num_qps - 1)); |
1355 | --qp->refcount; | ||
1342 | spin_unlock(&dev->qp_table.lock); | 1356 | spin_unlock(&dev->qp_table.lock); |
1343 | 1357 | ||
1344 | if (send_cq != recv_cq) | 1358 | if (send_cq != recv_cq) |
1345 | spin_unlock(&recv_cq->lock); | 1359 | spin_unlock(&recv_cq->lock); |
1346 | spin_unlock_irq(&send_cq->lock); | 1360 | spin_unlock_irq(&send_cq->lock); |
1347 | 1361 | ||
1348 | atomic_dec(&qp->refcount); | 1362 | wait_event(qp->wait, !get_qp_refcount(dev, qp)); |
1349 | wait_event(qp->wait, !atomic_read(&qp->refcount)); | ||
1350 | 1363 | ||
1351 | if (qp->state != IB_QPS_RESET) | 1364 | if (qp->state != IB_QPS_RESET) |
1352 | mthca_MODIFY_QP(dev, qp->state, IB_QPS_RESET, qp->qpn, 0, | 1365 | mthca_MODIFY_QP(dev, qp->state, IB_QPS_RESET, qp->qpn, 0, |
@@ -1358,10 +1371,10 @@ void mthca_free_qp(struct mthca_dev *dev, | |||
1358 | * unref the mem-free tables and free the QPN in our table. | 1371 | * unref the mem-free tables and free the QPN in our table. |
1359 | */ | 1372 | */ |
1360 | if (!qp->ibqp.uobject) { | 1373 | if (!qp->ibqp.uobject) { |
1361 | mthca_cq_clean(dev, to_mcq(qp->ibqp.send_cq)->cqn, qp->qpn, | 1374 | mthca_cq_clean(dev, to_mcq(qp->ibqp.send_cq), qp->qpn, |
1362 | qp->ibqp.srq ? to_msrq(qp->ibqp.srq) : NULL); | 1375 | qp->ibqp.srq ? to_msrq(qp->ibqp.srq) : NULL); |
1363 | if (qp->ibqp.send_cq != qp->ibqp.recv_cq) | 1376 | if (qp->ibqp.send_cq != qp->ibqp.recv_cq) |
1364 | mthca_cq_clean(dev, to_mcq(qp->ibqp.recv_cq)->cqn, qp->qpn, | 1377 | mthca_cq_clean(dev, to_mcq(qp->ibqp.recv_cq), qp->qpn, |
1365 | qp->ibqp.srq ? to_msrq(qp->ibqp.srq) : NULL); | 1378 | qp->ibqp.srq ? to_msrq(qp->ibqp.srq) : NULL); |
1366 | 1379 | ||
1367 | mthca_free_memfree(dev, qp); | 1380 | mthca_free_memfree(dev, qp); |
diff --git a/drivers/infiniband/hw/mthca/mthca_srq.c b/drivers/infiniband/hw/mthca/mthca_srq.c index adcaf85355ae..1ea433291fa7 100644 --- a/drivers/infiniband/hw/mthca/mthca_srq.c +++ b/drivers/infiniband/hw/mthca/mthca_srq.c | |||
@@ -241,7 +241,7 @@ int mthca_alloc_srq(struct mthca_dev *dev, struct mthca_pd *pd, | |||
241 | goto err_out_mailbox; | 241 | goto err_out_mailbox; |
242 | 242 | ||
243 | spin_lock_init(&srq->lock); | 243 | spin_lock_init(&srq->lock); |
244 | atomic_set(&srq->refcount, 1); | 244 | srq->refcount = 1; |
245 | init_waitqueue_head(&srq->wait); | 245 | init_waitqueue_head(&srq->wait); |
246 | 246 | ||
247 | if (mthca_is_memfree(dev)) | 247 | if (mthca_is_memfree(dev)) |
@@ -308,6 +308,17 @@ err_out: | |||
308 | return err; | 308 | return err; |
309 | } | 309 | } |
310 | 310 | ||
311 | static inline int get_srq_refcount(struct mthca_dev *dev, struct mthca_srq *srq) | ||
312 | { | ||
313 | int c; | ||
314 | |||
315 | spin_lock_irq(&dev->srq_table.lock); | ||
316 | c = srq->refcount; | ||
317 | spin_unlock_irq(&dev->srq_table.lock); | ||
318 | |||
319 | return c; | ||
320 | } | ||
321 | |||
311 | void mthca_free_srq(struct mthca_dev *dev, struct mthca_srq *srq) | 322 | void mthca_free_srq(struct mthca_dev *dev, struct mthca_srq *srq) |
312 | { | 323 | { |
313 | struct mthca_mailbox *mailbox; | 324 | struct mthca_mailbox *mailbox; |
@@ -329,10 +340,10 @@ void mthca_free_srq(struct mthca_dev *dev, struct mthca_srq *srq) | |||
329 | spin_lock_irq(&dev->srq_table.lock); | 340 | spin_lock_irq(&dev->srq_table.lock); |
330 | mthca_array_clear(&dev->srq_table.srq, | 341 | mthca_array_clear(&dev->srq_table.srq, |
331 | srq->srqn & (dev->limits.num_srqs - 1)); | 342 | srq->srqn & (dev->limits.num_srqs - 1)); |
343 | --srq->refcount; | ||
332 | spin_unlock_irq(&dev->srq_table.lock); | 344 | spin_unlock_irq(&dev->srq_table.lock); |
333 | 345 | ||
334 | atomic_dec(&srq->refcount); | 346 | wait_event(srq->wait, !get_srq_refcount(dev, srq)); |
335 | wait_event(srq->wait, !atomic_read(&srq->refcount)); | ||
336 | 347 | ||
337 | if (!srq->ibsrq.uobject) { | 348 | if (!srq->ibsrq.uobject) { |
338 | mthca_free_srq_buf(dev, srq); | 349 | mthca_free_srq_buf(dev, srq); |
@@ -414,7 +425,7 @@ void mthca_srq_event(struct mthca_dev *dev, u32 srqn, | |||
414 | spin_lock(&dev->srq_table.lock); | 425 | spin_lock(&dev->srq_table.lock); |
415 | srq = mthca_array_get(&dev->srq_table.srq, srqn & (dev->limits.num_srqs - 1)); | 426 | srq = mthca_array_get(&dev->srq_table.srq, srqn & (dev->limits.num_srqs - 1)); |
416 | if (srq) | 427 | if (srq) |
417 | atomic_inc(&srq->refcount); | 428 | ++srq->refcount; |
418 | spin_unlock(&dev->srq_table.lock); | 429 | spin_unlock(&dev->srq_table.lock); |
419 | 430 | ||
420 | if (!srq) { | 431 | if (!srq) { |
@@ -431,8 +442,10 @@ void mthca_srq_event(struct mthca_dev *dev, u32 srqn, | |||
431 | srq->ibsrq.event_handler(&event, srq->ibsrq.srq_context); | 442 | srq->ibsrq.event_handler(&event, srq->ibsrq.srq_context); |
432 | 443 | ||
433 | out: | 444 | out: |
434 | if (atomic_dec_and_test(&srq->refcount)) | 445 | spin_lock(&dev->srq_table.lock); |
446 | if (!--srq->refcount) | ||
435 | wake_up(&srq->wait); | 447 | wake_up(&srq->wait); |
448 | spin_unlock(&dev->srq_table.lock); | ||
436 | } | 449 | } |
437 | 450 | ||
438 | /* | 451 | /* |
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_vlan.c b/drivers/infiniband/ulp/ipoib/ipoib_vlan.c index 4ca175553f9f..f887780e8093 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_vlan.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_vlan.c | |||
@@ -158,10 +158,8 @@ int ipoib_vlan_delete(struct net_device *pdev, unsigned short pkey) | |||
158 | if (priv->pkey == pkey) { | 158 | if (priv->pkey == pkey) { |
159 | unregister_netdev(priv->dev); | 159 | unregister_netdev(priv->dev); |
160 | ipoib_dev_cleanup(priv->dev); | 160 | ipoib_dev_cleanup(priv->dev); |
161 | |||
162 | list_del(&priv->list); | 161 | list_del(&priv->list); |
163 | 162 | free_netdev(priv->dev); | |
164 | kfree(priv); | ||
165 | 163 | ||
166 | ret = 0; | 164 | ret = 0; |
167 | break; | 165 | break; |
diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c index 5bb55742ada6..c32ce4348e1b 100644 --- a/drivers/infiniband/ulp/srp/ib_srp.c +++ b/drivers/infiniband/ulp/srp/ib_srp.c | |||
@@ -409,6 +409,34 @@ static int srp_connect_target(struct srp_target_port *target) | |||
409 | } | 409 | } |
410 | } | 410 | } |
411 | 411 | ||
412 | static void srp_unmap_data(struct scsi_cmnd *scmnd, | ||
413 | struct srp_target_port *target, | ||
414 | struct srp_request *req) | ||
415 | { | ||
416 | struct scatterlist *scat; | ||
417 | int nents; | ||
418 | |||
419 | if (!scmnd->request_buffer || | ||
420 | (scmnd->sc_data_direction != DMA_TO_DEVICE && | ||
421 | scmnd->sc_data_direction != DMA_FROM_DEVICE)) | ||
422 | return; | ||
423 | |||
424 | /* | ||
425 | * This handling of non-SG commands can be killed when the | ||
426 | * SCSI midlayer no longer generates non-SG commands. | ||
427 | */ | ||
428 | if (likely(scmnd->use_sg)) { | ||
429 | nents = scmnd->use_sg; | ||
430 | scat = scmnd->request_buffer; | ||
431 | } else { | ||
432 | nents = 1; | ||
433 | scat = &req->fake_sg; | ||
434 | } | ||
435 | |||
436 | dma_unmap_sg(target->srp_host->dev->dma_device, scat, nents, | ||
437 | scmnd->sc_data_direction); | ||
438 | } | ||
439 | |||
412 | static int srp_reconnect_target(struct srp_target_port *target) | 440 | static int srp_reconnect_target(struct srp_target_port *target) |
413 | { | 441 | { |
414 | struct ib_cm_id *new_cm_id; | 442 | struct ib_cm_id *new_cm_id; |
@@ -455,16 +483,16 @@ static int srp_reconnect_target(struct srp_target_port *target) | |||
455 | list_for_each_entry(req, &target->req_queue, list) { | 483 | list_for_each_entry(req, &target->req_queue, list) { |
456 | req->scmnd->result = DID_RESET << 16; | 484 | req->scmnd->result = DID_RESET << 16; |
457 | req->scmnd->scsi_done(req->scmnd); | 485 | req->scmnd->scsi_done(req->scmnd); |
486 | srp_unmap_data(req->scmnd, target, req); | ||
458 | } | 487 | } |
459 | 488 | ||
460 | target->rx_head = 0; | 489 | target->rx_head = 0; |
461 | target->tx_head = 0; | 490 | target->tx_head = 0; |
462 | target->tx_tail = 0; | 491 | target->tx_tail = 0; |
463 | target->req_head = 0; | 492 | INIT_LIST_HEAD(&target->free_reqs); |
464 | for (i = 0; i < SRP_SQ_SIZE - 1; ++i) | ||
465 | target->req_ring[i].next = i + 1; | ||
466 | target->req_ring[SRP_SQ_SIZE - 1].next = -1; | ||
467 | INIT_LIST_HEAD(&target->req_queue); | 493 | INIT_LIST_HEAD(&target->req_queue); |
494 | for (i = 0; i < SRP_SQ_SIZE; ++i) | ||
495 | list_add_tail(&target->req_ring[i].list, &target->free_reqs); | ||
468 | 496 | ||
469 | ret = srp_connect_target(target); | 497 | ret = srp_connect_target(target); |
470 | if (ret) | 498 | if (ret) |
@@ -589,40 +617,10 @@ static int srp_map_data(struct scsi_cmnd *scmnd, struct srp_target_port *target, | |||
589 | return len; | 617 | return len; |
590 | } | 618 | } |
591 | 619 | ||
592 | static void srp_unmap_data(struct scsi_cmnd *scmnd, | 620 | static void srp_remove_req(struct srp_target_port *target, struct srp_request *req) |
593 | struct srp_target_port *target, | ||
594 | struct srp_request *req) | ||
595 | { | ||
596 | struct scatterlist *scat; | ||
597 | int nents; | ||
598 | |||
599 | if (!scmnd->request_buffer || | ||
600 | (scmnd->sc_data_direction != DMA_TO_DEVICE && | ||
601 | scmnd->sc_data_direction != DMA_FROM_DEVICE)) | ||
602 | return; | ||
603 | |||
604 | /* | ||
605 | * This handling of non-SG commands can be killed when the | ||
606 | * SCSI midlayer no longer generates non-SG commands. | ||
607 | */ | ||
608 | if (likely(scmnd->use_sg)) { | ||
609 | nents = scmnd->use_sg; | ||
610 | scat = scmnd->request_buffer; | ||
611 | } else { | ||
612 | nents = 1; | ||
613 | scat = &req->fake_sg; | ||
614 | } | ||
615 | |||
616 | dma_unmap_sg(target->srp_host->dev->dma_device, scat, nents, | ||
617 | scmnd->sc_data_direction); | ||
618 | } | ||
619 | |||
620 | static void srp_remove_req(struct srp_target_port *target, struct srp_request *req, | ||
621 | int index) | ||
622 | { | 621 | { |
623 | list_del(&req->list); | 622 | srp_unmap_data(req->scmnd, target, req); |
624 | req->next = target->req_head; | 623 | list_move_tail(&req->list, &target->free_reqs); |
625 | target->req_head = index; | ||
626 | } | 624 | } |
627 | 625 | ||
628 | static void srp_process_rsp(struct srp_target_port *target, struct srp_rsp *rsp) | 626 | static void srp_process_rsp(struct srp_target_port *target, struct srp_rsp *rsp) |
@@ -647,7 +645,7 @@ static void srp_process_rsp(struct srp_target_port *target, struct srp_rsp *rsp) | |||
647 | req->tsk_status = rsp->data[3]; | 645 | req->tsk_status = rsp->data[3]; |
648 | complete(&req->done); | 646 | complete(&req->done); |
649 | } else { | 647 | } else { |
650 | scmnd = req->scmnd; | 648 | scmnd = req->scmnd; |
651 | if (!scmnd) | 649 | if (!scmnd) |
652 | printk(KERN_ERR "Null scmnd for RSP w/tag %016llx\n", | 650 | printk(KERN_ERR "Null scmnd for RSP w/tag %016llx\n", |
653 | (unsigned long long) rsp->tag); | 651 | (unsigned long long) rsp->tag); |
@@ -665,14 +663,11 @@ static void srp_process_rsp(struct srp_target_port *target, struct srp_rsp *rsp) | |||
665 | else if (rsp->flags & (SRP_RSP_FLAG_DIOVER | SRP_RSP_FLAG_DIUNDER)) | 663 | else if (rsp->flags & (SRP_RSP_FLAG_DIOVER | SRP_RSP_FLAG_DIUNDER)) |
666 | scmnd->resid = be32_to_cpu(rsp->data_in_res_cnt); | 664 | scmnd->resid = be32_to_cpu(rsp->data_in_res_cnt); |
667 | 665 | ||
668 | srp_unmap_data(scmnd, target, req); | ||
669 | |||
670 | if (!req->tsk_mgmt) { | 666 | if (!req->tsk_mgmt) { |
671 | req->scmnd = NULL; | ||
672 | scmnd->host_scribble = (void *) -1L; | 667 | scmnd->host_scribble = (void *) -1L; |
673 | scmnd->scsi_done(scmnd); | 668 | scmnd->scsi_done(scmnd); |
674 | 669 | ||
675 | srp_remove_req(target, req, rsp->tag & ~SRP_TAG_TSK_MGMT); | 670 | srp_remove_req(target, req); |
676 | } else | 671 | } else |
677 | req->cmd_done = 1; | 672 | req->cmd_done = 1; |
678 | } | 673 | } |
@@ -859,7 +854,6 @@ static int srp_queuecommand(struct scsi_cmnd *scmnd, | |||
859 | struct srp_request *req; | 854 | struct srp_request *req; |
860 | struct srp_iu *iu; | 855 | struct srp_iu *iu; |
861 | struct srp_cmd *cmd; | 856 | struct srp_cmd *cmd; |
862 | long req_index; | ||
863 | int len; | 857 | int len; |
864 | 858 | ||
865 | if (target->state == SRP_TARGET_CONNECTING) | 859 | if (target->state == SRP_TARGET_CONNECTING) |
@@ -879,22 +873,20 @@ static int srp_queuecommand(struct scsi_cmnd *scmnd, | |||
879 | dma_sync_single_for_cpu(target->srp_host->dev->dma_device, iu->dma, | 873 | dma_sync_single_for_cpu(target->srp_host->dev->dma_device, iu->dma, |
880 | SRP_MAX_IU_LEN, DMA_TO_DEVICE); | 874 | SRP_MAX_IU_LEN, DMA_TO_DEVICE); |
881 | 875 | ||
882 | req_index = target->req_head; | 876 | req = list_entry(target->free_reqs.next, struct srp_request, list); |
883 | 877 | ||
884 | scmnd->scsi_done = done; | 878 | scmnd->scsi_done = done; |
885 | scmnd->result = 0; | 879 | scmnd->result = 0; |
886 | scmnd->host_scribble = (void *) req_index; | 880 | scmnd->host_scribble = (void *) (long) req->index; |
887 | 881 | ||
888 | cmd = iu->buf; | 882 | cmd = iu->buf; |
889 | memset(cmd, 0, sizeof *cmd); | 883 | memset(cmd, 0, sizeof *cmd); |
890 | 884 | ||
891 | cmd->opcode = SRP_CMD; | 885 | cmd->opcode = SRP_CMD; |
892 | cmd->lun = cpu_to_be64((u64) scmnd->device->lun << 48); | 886 | cmd->lun = cpu_to_be64((u64) scmnd->device->lun << 48); |
893 | cmd->tag = req_index; | 887 | cmd->tag = req->index; |
894 | memcpy(cmd->cdb, scmnd->cmnd, scmnd->cmd_len); | 888 | memcpy(cmd->cdb, scmnd->cmnd, scmnd->cmd_len); |
895 | 889 | ||
896 | req = &target->req_ring[req_index]; | ||
897 | |||
898 | req->scmnd = scmnd; | 890 | req->scmnd = scmnd; |
899 | req->cmd = iu; | 891 | req->cmd = iu; |
900 | req->cmd_done = 0; | 892 | req->cmd_done = 0; |
@@ -919,8 +911,7 @@ static int srp_queuecommand(struct scsi_cmnd *scmnd, | |||
919 | goto err_unmap; | 911 | goto err_unmap; |
920 | } | 912 | } |
921 | 913 | ||
922 | target->req_head = req->next; | 914 | list_move_tail(&req->list, &target->req_queue); |
923 | list_add_tail(&req->list, &target->req_queue); | ||
924 | 915 | ||
925 | return 0; | 916 | return 0; |
926 | 917 | ||
@@ -1143,30 +1134,20 @@ static int srp_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event) | |||
1143 | return 0; | 1134 | return 0; |
1144 | } | 1135 | } |
1145 | 1136 | ||
1146 | static int srp_send_tsk_mgmt(struct scsi_cmnd *scmnd, u8 func) | 1137 | static int srp_send_tsk_mgmt(struct srp_target_port *target, |
1138 | struct srp_request *req, u8 func) | ||
1147 | { | 1139 | { |
1148 | struct srp_target_port *target = host_to_target(scmnd->device->host); | ||
1149 | struct srp_request *req; | ||
1150 | struct srp_iu *iu; | 1140 | struct srp_iu *iu; |
1151 | struct srp_tsk_mgmt *tsk_mgmt; | 1141 | struct srp_tsk_mgmt *tsk_mgmt; |
1152 | int req_index; | ||
1153 | int ret = FAILED; | ||
1154 | 1142 | ||
1155 | spin_lock_irq(target->scsi_host->host_lock); | 1143 | spin_lock_irq(target->scsi_host->host_lock); |
1156 | 1144 | ||
1157 | if (target->state == SRP_TARGET_DEAD || | 1145 | if (target->state == SRP_TARGET_DEAD || |
1158 | target->state == SRP_TARGET_REMOVED) { | 1146 | target->state == SRP_TARGET_REMOVED) { |
1159 | scmnd->result = DID_BAD_TARGET << 16; | 1147 | req->scmnd->result = DID_BAD_TARGET << 16; |
1160 | goto out; | 1148 | goto out; |
1161 | } | 1149 | } |
1162 | 1150 | ||
1163 | if (scmnd->host_scribble == (void *) -1L) | ||
1164 | goto out; | ||
1165 | |||
1166 | req_index = (long) scmnd->host_scribble; | ||
1167 | printk(KERN_ERR "Abort for req_index %d\n", req_index); | ||
1168 | |||
1169 | req = &target->req_ring[req_index]; | ||
1170 | init_completion(&req->done); | 1151 | init_completion(&req->done); |
1171 | 1152 | ||
1172 | iu = __srp_get_tx_iu(target); | 1153 | iu = __srp_get_tx_iu(target); |
@@ -1177,10 +1158,10 @@ static int srp_send_tsk_mgmt(struct scsi_cmnd *scmnd, u8 func) | |||
1177 | memset(tsk_mgmt, 0, sizeof *tsk_mgmt); | 1158 | memset(tsk_mgmt, 0, sizeof *tsk_mgmt); |
1178 | 1159 | ||
1179 | tsk_mgmt->opcode = SRP_TSK_MGMT; | 1160 | tsk_mgmt->opcode = SRP_TSK_MGMT; |
1180 | tsk_mgmt->lun = cpu_to_be64((u64) scmnd->device->lun << 48); | 1161 | tsk_mgmt->lun = cpu_to_be64((u64) req->scmnd->device->lun << 48); |
1181 | tsk_mgmt->tag = req_index | SRP_TAG_TSK_MGMT; | 1162 | tsk_mgmt->tag = req->index | SRP_TAG_TSK_MGMT; |
1182 | tsk_mgmt->tsk_mgmt_func = func; | 1163 | tsk_mgmt->tsk_mgmt_func = func; |
1183 | tsk_mgmt->task_tag = req_index; | 1164 | tsk_mgmt->task_tag = req->index; |
1184 | 1165 | ||
1185 | if (__srp_post_send(target, iu, sizeof *tsk_mgmt)) | 1166 | if (__srp_post_send(target, iu, sizeof *tsk_mgmt)) |
1186 | goto out; | 1167 | goto out; |
@@ -1188,37 +1169,85 @@ static int srp_send_tsk_mgmt(struct scsi_cmnd *scmnd, u8 func) | |||
1188 | req->tsk_mgmt = iu; | 1169 | req->tsk_mgmt = iu; |
1189 | 1170 | ||
1190 | spin_unlock_irq(target->scsi_host->host_lock); | 1171 | spin_unlock_irq(target->scsi_host->host_lock); |
1172 | |||
1191 | if (!wait_for_completion_timeout(&req->done, | 1173 | if (!wait_for_completion_timeout(&req->done, |
1192 | msecs_to_jiffies(SRP_ABORT_TIMEOUT_MS))) | 1174 | msecs_to_jiffies(SRP_ABORT_TIMEOUT_MS))) |
1193 | return FAILED; | 1175 | return -1; |
1194 | spin_lock_irq(target->scsi_host->host_lock); | ||
1195 | 1176 | ||
1196 | if (req->cmd_done) { | 1177 | return 0; |
1197 | srp_remove_req(target, req, req_index); | ||
1198 | scmnd->scsi_done(scmnd); | ||
1199 | } else if (!req->tsk_status) { | ||
1200 | srp_remove_req(target, req, req_index); | ||
1201 | scmnd->result = DID_ABORT << 16; | ||
1202 | ret = SUCCESS; | ||
1203 | } | ||
1204 | 1178 | ||
1205 | out: | 1179 | out: |
1206 | spin_unlock_irq(target->scsi_host->host_lock); | 1180 | spin_unlock_irq(target->scsi_host->host_lock); |
1207 | return ret; | 1181 | return -1; |
1182 | } | ||
1183 | |||
1184 | static int srp_find_req(struct srp_target_port *target, | ||
1185 | struct scsi_cmnd *scmnd, | ||
1186 | struct srp_request **req) | ||
1187 | { | ||
1188 | if (scmnd->host_scribble == (void *) -1L) | ||
1189 | return -1; | ||
1190 | |||
1191 | *req = &target->req_ring[(long) scmnd->host_scribble]; | ||
1192 | |||
1193 | return 0; | ||
1208 | } | 1194 | } |
1209 | 1195 | ||
1210 | static int srp_abort(struct scsi_cmnd *scmnd) | 1196 | static int srp_abort(struct scsi_cmnd *scmnd) |
1211 | { | 1197 | { |
1198 | struct srp_target_port *target = host_to_target(scmnd->device->host); | ||
1199 | struct srp_request *req; | ||
1200 | int ret = SUCCESS; | ||
1201 | |||
1212 | printk(KERN_ERR "SRP abort called\n"); | 1202 | printk(KERN_ERR "SRP abort called\n"); |
1213 | 1203 | ||
1214 | return srp_send_tsk_mgmt(scmnd, SRP_TSK_ABORT_TASK); | 1204 | if (srp_find_req(target, scmnd, &req)) |
1205 | return FAILED; | ||
1206 | if (srp_send_tsk_mgmt(target, req, SRP_TSK_ABORT_TASK)) | ||
1207 | return FAILED; | ||
1208 | |||
1209 | spin_lock_irq(target->scsi_host->host_lock); | ||
1210 | |||
1211 | if (req->cmd_done) { | ||
1212 | srp_remove_req(target, req); | ||
1213 | scmnd->scsi_done(scmnd); | ||
1214 | } else if (!req->tsk_status) { | ||
1215 | srp_remove_req(target, req); | ||
1216 | scmnd->result = DID_ABORT << 16; | ||
1217 | } else | ||
1218 | ret = FAILED; | ||
1219 | |||
1220 | spin_unlock_irq(target->scsi_host->host_lock); | ||
1221 | |||
1222 | return ret; | ||
1215 | } | 1223 | } |
1216 | 1224 | ||
1217 | static int srp_reset_device(struct scsi_cmnd *scmnd) | 1225 | static int srp_reset_device(struct scsi_cmnd *scmnd) |
1218 | { | 1226 | { |
1227 | struct srp_target_port *target = host_to_target(scmnd->device->host); | ||
1228 | struct srp_request *req, *tmp; | ||
1229 | |||
1219 | printk(KERN_ERR "SRP reset_device called\n"); | 1230 | printk(KERN_ERR "SRP reset_device called\n"); |
1220 | 1231 | ||
1221 | return srp_send_tsk_mgmt(scmnd, SRP_TSK_LUN_RESET); | 1232 | if (srp_find_req(target, scmnd, &req)) |
1233 | return FAILED; | ||
1234 | if (srp_send_tsk_mgmt(target, req, SRP_TSK_LUN_RESET)) | ||
1235 | return FAILED; | ||
1236 | if (req->tsk_status) | ||
1237 | return FAILED; | ||
1238 | |||
1239 | spin_lock_irq(target->scsi_host->host_lock); | ||
1240 | |||
1241 | list_for_each_entry_safe(req, tmp, &target->req_queue, list) | ||
1242 | if (req->scmnd->device == scmnd->device) { | ||
1243 | req->scmnd->result = DID_RESET << 16; | ||
1244 | scmnd->scsi_done(scmnd); | ||
1245 | srp_remove_req(target, req); | ||
1246 | } | ||
1247 | |||
1248 | spin_unlock_irq(target->scsi_host->host_lock); | ||
1249 | |||
1250 | return SUCCESS; | ||
1222 | } | 1251 | } |
1223 | 1252 | ||
1224 | static int srp_reset_host(struct scsi_cmnd *scmnd) | 1253 | static int srp_reset_host(struct scsi_cmnd *scmnd) |
@@ -1518,10 +1547,12 @@ static ssize_t srp_create_target(struct class_device *class_dev, | |||
1518 | 1547 | ||
1519 | INIT_WORK(&target->work, srp_reconnect_work, target); | 1548 | INIT_WORK(&target->work, srp_reconnect_work, target); |
1520 | 1549 | ||
1521 | for (i = 0; i < SRP_SQ_SIZE - 1; ++i) | 1550 | INIT_LIST_HEAD(&target->free_reqs); |
1522 | target->req_ring[i].next = i + 1; | ||
1523 | target->req_ring[SRP_SQ_SIZE - 1].next = -1; | ||
1524 | INIT_LIST_HEAD(&target->req_queue); | 1551 | INIT_LIST_HEAD(&target->req_queue); |
1552 | for (i = 0; i < SRP_SQ_SIZE; ++i) { | ||
1553 | target->req_ring[i].index = i; | ||
1554 | list_add_tail(&target->req_ring[i].list, &target->free_reqs); | ||
1555 | } | ||
1525 | 1556 | ||
1526 | ret = srp_parse_options(buf, target); | 1557 | ret = srp_parse_options(buf, target); |
1527 | if (ret) | 1558 | if (ret) |
diff --git a/drivers/infiniband/ulp/srp/ib_srp.h b/drivers/infiniband/ulp/srp/ib_srp.h index bd7f7c3115de..c5cd43aae860 100644 --- a/drivers/infiniband/ulp/srp/ib_srp.h +++ b/drivers/infiniband/ulp/srp/ib_srp.h | |||
@@ -101,7 +101,7 @@ struct srp_request { | |||
101 | */ | 101 | */ |
102 | struct scatterlist fake_sg; | 102 | struct scatterlist fake_sg; |
103 | struct completion done; | 103 | struct completion done; |
104 | short next; | 104 | short index; |
105 | u8 cmd_done; | 105 | u8 cmd_done; |
106 | u8 tsk_status; | 106 | u8 tsk_status; |
107 | }; | 107 | }; |
@@ -133,7 +133,7 @@ struct srp_target_port { | |||
133 | unsigned tx_tail; | 133 | unsigned tx_tail; |
134 | struct srp_iu *tx_ring[SRP_SQ_SIZE + 1]; | 134 | struct srp_iu *tx_ring[SRP_SQ_SIZE + 1]; |
135 | 135 | ||
136 | int req_head; | 136 | struct list_head free_reqs; |
137 | struct list_head req_queue; | 137 | struct list_head req_queue; |
138 | struct srp_request req_ring[SRP_SQ_SIZE]; | 138 | struct srp_request req_ring[SRP_SQ_SIZE]; |
139 | 139 | ||