diff options
Diffstat (limited to 'drivers/infiniband/hw/ipath/ipath_intr.c')
-rw-r--r-- | drivers/infiniband/hw/ipath/ipath_intr.c | 40 |
1 files changed, 20 insertions, 20 deletions
diff --git a/drivers/infiniband/hw/ipath/ipath_intr.c b/drivers/infiniband/hw/ipath/ipath_intr.c index e8c8a25cd922..bad20a395265 100644 --- a/drivers/infiniband/hw/ipath/ipath_intr.c +++ b/drivers/infiniband/hw/ipath/ipath_intr.c | |||
@@ -383,7 +383,7 @@ static unsigned handle_frequent_errors(struct ipath_devdata *dd, | |||
383 | return supp_msgs; | 383 | return supp_msgs; |
384 | } | 384 | } |
385 | 385 | ||
386 | static void handle_errors(struct ipath_devdata *dd, ipath_err_t errs) | 386 | static int handle_errors(struct ipath_devdata *dd, ipath_err_t errs) |
387 | { | 387 | { |
388 | char msg[512]; | 388 | char msg[512]; |
389 | u64 ignore_this_time = 0; | 389 | u64 ignore_this_time = 0; |
@@ -480,7 +480,7 @@ static void handle_errors(struct ipath_devdata *dd, ipath_err_t errs) | |||
480 | INFINIPATH_E_IBSTATUSCHANGED); | 480 | INFINIPATH_E_IBSTATUSCHANGED); |
481 | } | 481 | } |
482 | if (!errs) | 482 | if (!errs) |
483 | return; | 483 | return 0; |
484 | 484 | ||
485 | if (!noprint) | 485 | if (!noprint) |
486 | /* | 486 | /* |
@@ -604,9 +604,7 @@ static void handle_errors(struct ipath_devdata *dd, ipath_err_t errs) | |||
604 | wake_up_interruptible(&ipath_sma_state_wait); | 604 | wake_up_interruptible(&ipath_sma_state_wait); |
605 | } | 605 | } |
606 | 606 | ||
607 | if (chkerrpkts) | 607 | return chkerrpkts; |
608 | /* process possible error packets in hdrq */ | ||
609 | ipath_kreceive(dd); | ||
610 | } | 608 | } |
611 | 609 | ||
612 | /* this is separate to allow for better optimization of ipath_intr() */ | 610 | /* this is separate to allow for better optimization of ipath_intr() */ |
@@ -765,10 +763,10 @@ static void handle_urcv(struct ipath_devdata *dd, u32 istat) | |||
765 | irqreturn_t ipath_intr(int irq, void *data, struct pt_regs *regs) | 763 | irqreturn_t ipath_intr(int irq, void *data, struct pt_regs *regs) |
766 | { | 764 | { |
767 | struct ipath_devdata *dd = data; | 765 | struct ipath_devdata *dd = data; |
768 | u32 istat; | 766 | u32 istat, chk0rcv = 0; |
769 | ipath_err_t estat = 0; | 767 | ipath_err_t estat = 0; |
770 | irqreturn_t ret; | 768 | irqreturn_t ret; |
771 | u32 p0bits; | 769 | u32 p0bits, oldhead; |
772 | static unsigned unexpected = 0; | 770 | static unsigned unexpected = 0; |
773 | static const u32 port0rbits = (1U<<INFINIPATH_I_RCVAVAIL_SHIFT) | | 771 | static const u32 port0rbits = (1U<<INFINIPATH_I_RCVAVAIL_SHIFT) | |
774 | (1U<<INFINIPATH_I_RCVURG_SHIFT); | 772 | (1U<<INFINIPATH_I_RCVURG_SHIFT); |
@@ -810,9 +808,8 @@ irqreturn_t ipath_intr(int irq, void *data, struct pt_regs *regs) | |||
810 | * interrupts. We clear the interrupts first so that we don't | 808 | * interrupts. We clear the interrupts first so that we don't |
811 | * lose intr for later packets that arrive while we are processing. | 809 | * lose intr for later packets that arrive while we are processing. |
812 | */ | 810 | */ |
813 | if (dd->ipath_port0head != | 811 | oldhead = dd->ipath_port0head; |
814 | (u32)le64_to_cpu(*dd->ipath_hdrqtailptr)) { | 812 | if (oldhead != (u32) le64_to_cpu(*dd->ipath_hdrqtailptr)) { |
815 | u32 oldhead = dd->ipath_port0head; | ||
816 | if (dd->ipath_flags & IPATH_GPIO_INTR) { | 813 | if (dd->ipath_flags & IPATH_GPIO_INTR) { |
817 | ipath_write_kreg(dd, dd->ipath_kregs->kr_gpio_clear, | 814 | ipath_write_kreg(dd, dd->ipath_kregs->kr_gpio_clear, |
818 | (u64) (1 << 2)); | 815 | (u64) (1 << 2)); |
@@ -830,6 +827,8 @@ irqreturn_t ipath_intr(int irq, void *data, struct pt_regs *regs) | |||
830 | } | 827 | } |
831 | 828 | ||
832 | istat = ipath_read_kreg32(dd, dd->ipath_kregs->kr_intstatus); | 829 | istat = ipath_read_kreg32(dd, dd->ipath_kregs->kr_intstatus); |
830 | p0bits = port0rbits; | ||
831 | |||
833 | if (unlikely(!istat)) { | 832 | if (unlikely(!istat)) { |
834 | ipath_stats.sps_nullintr++; | 833 | ipath_stats.sps_nullintr++; |
835 | ret = IRQ_NONE; /* not our interrupt, or already handled */ | 834 | ret = IRQ_NONE; /* not our interrupt, or already handled */ |
@@ -867,10 +866,11 @@ irqreturn_t ipath_intr(int irq, void *data, struct pt_regs *regs) | |||
867 | ipath_dev_err(dd, "Read of error status failed " | 866 | ipath_dev_err(dd, "Read of error status failed " |
868 | "(all bits set); ignoring\n"); | 867 | "(all bits set); ignoring\n"); |
869 | else | 868 | else |
870 | handle_errors(dd, estat); | 869 | if (handle_errors(dd, estat)) |
870 | /* force calling ipath_kreceive() */ | ||
871 | chk0rcv = 1; | ||
871 | } | 872 | } |
872 | 873 | ||
873 | p0bits = port0rbits; | ||
874 | if (istat & INFINIPATH_I_GPIO) { | 874 | if (istat & INFINIPATH_I_GPIO) { |
875 | /* | 875 | /* |
876 | * Packets are available in the port 0 rcv queue. | 876 | * Packets are available in the port 0 rcv queue. |
@@ -892,8 +892,10 @@ irqreturn_t ipath_intr(int irq, void *data, struct pt_regs *regs) | |||
892 | ipath_write_kreg(dd, dd->ipath_kregs->kr_gpio_clear, | 892 | ipath_write_kreg(dd, dd->ipath_kregs->kr_gpio_clear, |
893 | (u64) (1 << 2)); | 893 | (u64) (1 << 2)); |
894 | p0bits |= INFINIPATH_I_GPIO; | 894 | p0bits |= INFINIPATH_I_GPIO; |
895 | chk0rcv = 1; | ||
895 | } | 896 | } |
896 | } | 897 | } |
898 | chk0rcv |= istat & p0bits; | ||
897 | 899 | ||
898 | /* | 900 | /* |
899 | * clear the ones we will deal with on this round | 901 | * clear the ones we will deal with on this round |
@@ -905,18 +907,16 @@ irqreturn_t ipath_intr(int irq, void *data, struct pt_regs *regs) | |||
905 | ipath_write_kreg(dd, dd->ipath_kregs->kr_intclear, istat); | 907 | ipath_write_kreg(dd, dd->ipath_kregs->kr_intclear, istat); |
906 | 908 | ||
907 | /* | 909 | /* |
908 | * we check for both transition from empty to non-empty, and urgent | 910 | * handle port0 receive before checking for pio buffers available, |
909 | * packets (those with the interrupt bit set in the header), and | 911 | * since receives can overflow; piobuf waiters can afford a few |
910 | * if enabled, the GPIO bit 2 interrupt used for port0 on some | 912 | * extra cycles, since they were waiting anyway, and user's waiting |
911 | * HT-400 boards. | 913 | * for receive are at the bottom. |
912 | * Do this before checking for pio buffers available, since | ||
913 | * receives can overflow; piobuf waiters can afford a few | ||
914 | * extra cycles, since they were waiting anyway. | ||
915 | */ | 914 | */ |
916 | if (istat & p0bits) { | 915 | if (chk0rcv) { |
917 | ipath_kreceive(dd); | 916 | ipath_kreceive(dd); |
918 | istat &= ~port0rbits; | 917 | istat &= ~port0rbits; |
919 | } | 918 | } |
919 | |||
920 | if (istat & ((infinipath_i_rcvavail_mask << | 920 | if (istat & ((infinipath_i_rcvavail_mask << |
921 | INFINIPATH_I_RCVAVAIL_SHIFT) | 921 | INFINIPATH_I_RCVAVAIL_SHIFT) |
922 | | (infinipath_i_rcvurg_mask << | 922 | | (infinipath_i_rcvurg_mask << |