diff options
Diffstat (limited to 'drivers/infiniband/hw/ipath/ipath_intr.c')
-rw-r--r-- | drivers/infiniband/hw/ipath/ipath_intr.c | 63 |
1 files changed, 16 insertions, 47 deletions
diff --git a/drivers/infiniband/hw/ipath/ipath_intr.c b/drivers/infiniband/hw/ipath/ipath_intr.c index 1fd91c59f246..b29fe7e9b11a 100644 --- a/drivers/infiniband/hw/ipath/ipath_intr.c +++ b/drivers/infiniband/hw/ipath/ipath_intr.c | |||
@@ -303,7 +303,7 @@ static void handle_e_ibstatuschanged(struct ipath_devdata *dd, | |||
303 | * Flush all queued sends when link went to DOWN or INIT, | 303 | * Flush all queued sends when link went to DOWN or INIT, |
304 | * to be sure that they don't block SMA and other MAD packets | 304 | * to be sure that they don't block SMA and other MAD packets |
305 | */ | 305 | */ |
306 | ipath_cancel_sends(dd); | 306 | ipath_cancel_sends(dd, 1); |
307 | } | 307 | } |
308 | else if (lstate == IPATH_IBSTATE_INIT || lstate == IPATH_IBSTATE_ARM || | 308 | else if (lstate == IPATH_IBSTATE_INIT || lstate == IPATH_IBSTATE_ARM || |
309 | lstate == IPATH_IBSTATE_ACTIVE) { | 309 | lstate == IPATH_IBSTATE_ACTIVE) { |
@@ -517,10 +517,7 @@ static int handle_errors(struct ipath_devdata *dd, ipath_err_t errs) | |||
517 | 517 | ||
518 | supp_msgs = handle_frequent_errors(dd, errs, msg, &noprint); | 518 | supp_msgs = handle_frequent_errors(dd, errs, msg, &noprint); |
519 | 519 | ||
520 | /* | 520 | /* don't report errors that are masked */ |
521 | * don't report errors that are masked (includes those always | ||
522 | * ignored) | ||
523 | */ | ||
524 | errs &= ~dd->ipath_maskederrs; | 521 | errs &= ~dd->ipath_maskederrs; |
525 | 522 | ||
526 | /* do these first, they are most important */ | 523 | /* do these first, they are most important */ |
@@ -566,19 +563,19 @@ static int handle_errors(struct ipath_devdata *dd, ipath_err_t errs) | |||
566 | * ones on this particular interrupt, which also isn't great | 563 | * ones on this particular interrupt, which also isn't great |
567 | */ | 564 | */ |
568 | dd->ipath_maskederrs |= dd->ipath_lasterror | errs; | 565 | dd->ipath_maskederrs |= dd->ipath_lasterror | errs; |
566 | dd->ipath_errormask &= ~dd->ipath_maskederrs; | ||
569 | ipath_write_kreg(dd, dd->ipath_kregs->kr_errormask, | 567 | ipath_write_kreg(dd, dd->ipath_kregs->kr_errormask, |
570 | ~dd->ipath_maskederrs); | 568 | dd->ipath_errormask); |
571 | s_iserr = ipath_decode_err(msg, sizeof msg, | 569 | s_iserr = ipath_decode_err(msg, sizeof msg, |
572 | (dd->ipath_maskederrs & ~dd-> | 570 | dd->ipath_maskederrs); |
573 | ipath_ignorederrs)); | ||
574 | 571 | ||
575 | if ((dd->ipath_maskederrs & ~dd->ipath_ignorederrs) & | 572 | if (dd->ipath_maskederrs & |
576 | ~(INFINIPATH_E_RRCVEGRFULL | | 573 | ~(INFINIPATH_E_RRCVEGRFULL | |
577 | INFINIPATH_E_RRCVHDRFULL | INFINIPATH_E_PKTERRS)) | 574 | INFINIPATH_E_RRCVHDRFULL | INFINIPATH_E_PKTERRS)) |
578 | ipath_dev_err(dd, "Temporarily disabling " | 575 | ipath_dev_err(dd, "Temporarily disabling " |
579 | "error(s) %llx reporting; too frequent (%s)\n", | 576 | "error(s) %llx reporting; too frequent (%s)\n", |
580 | (unsigned long long) (dd->ipath_maskederrs & | 577 | (unsigned long long)dd->ipath_maskederrs, |
581 | ~dd->ipath_ignorederrs), msg); | 578 | msg); |
582 | else { | 579 | else { |
583 | /* | 580 | /* |
584 | * rcvegrfull and rcvhdrqfull are "normal", | 581 | * rcvegrfull and rcvhdrqfull are "normal", |
@@ -793,19 +790,22 @@ void ipath_clear_freeze(struct ipath_devdata *dd) | |||
793 | /* disable error interrupts, to avoid confusion */ | 790 | /* disable error interrupts, to avoid confusion */ |
794 | ipath_write_kreg(dd, dd->ipath_kregs->kr_errormask, 0ULL); | 791 | ipath_write_kreg(dd, dd->ipath_kregs->kr_errormask, 0ULL); |
795 | 792 | ||
793 | /* also disable interrupts; errormask is sometimes overwriten */ | ||
794 | ipath_write_kreg(dd, dd->ipath_kregs->kr_intmask, 0ULL); | ||
795 | |||
796 | /* | 796 | /* |
797 | * clear all sends, because they have may been | 797 | * clear all sends, because they have may been |
798 | * completed by usercode while in freeze mode, and | 798 | * completed by usercode while in freeze mode, and |
799 | * therefore would not be sent, and eventually | 799 | * therefore would not be sent, and eventually |
800 | * might cause the process to run out of bufs | 800 | * might cause the process to run out of bufs |
801 | */ | 801 | */ |
802 | ipath_cancel_sends(dd); | 802 | ipath_cancel_sends(dd, 0); |
803 | ipath_write_kreg(dd, dd->ipath_kregs->kr_control, | 803 | ipath_write_kreg(dd, dd->ipath_kregs->kr_control, |
804 | dd->ipath_control); | 804 | dd->ipath_control); |
805 | 805 | ||
806 | /* ensure pio avail updates continue */ | 806 | /* ensure pio avail updates continue */ |
807 | ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl, | 807 | ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl, |
808 | dd->ipath_sendctrl & ~IPATH_S_PIOBUFAVAILUPD); | 808 | dd->ipath_sendctrl & ~INFINIPATH_S_PIOBUFAVAILUPD); |
809 | ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch); | 809 | ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch); |
810 | ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl, | 810 | ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl, |
811 | dd->ipath_sendctrl); | 811 | dd->ipath_sendctrl); |
@@ -817,7 +817,7 @@ void ipath_clear_freeze(struct ipath_devdata *dd) | |||
817 | for (i = 0; i < dd->ipath_pioavregs; i++) { | 817 | for (i = 0; i < dd->ipath_pioavregs; i++) { |
818 | /* deal with 6110 chip bug */ | 818 | /* deal with 6110 chip bug */ |
819 | im = i > 3 ? ((i&1) ? i-1 : i+1) : i; | 819 | im = i > 3 ? ((i&1) ? i-1 : i+1) : i; |
820 | val = ipath_read_kreg64(dd, 0x1000+(im*sizeof(u64))); | 820 | val = ipath_read_kreg64(dd, (0x1000/sizeof(u64))+im); |
821 | dd->ipath_pioavailregs_dma[i] = dd->ipath_pioavailshadow[i] | 821 | dd->ipath_pioavailregs_dma[i] = dd->ipath_pioavailshadow[i] |
822 | = le64_to_cpu(val); | 822 | = le64_to_cpu(val); |
823 | } | 823 | } |
@@ -832,7 +832,8 @@ void ipath_clear_freeze(struct ipath_devdata *dd) | |||
832 | ipath_write_kreg(dd, dd->ipath_kregs->kr_errorclear, | 832 | ipath_write_kreg(dd, dd->ipath_kregs->kr_errorclear, |
833 | E_SPKT_ERRS_IGNORE); | 833 | E_SPKT_ERRS_IGNORE); |
834 | ipath_write_kreg(dd, dd->ipath_kregs->kr_errormask, | 834 | ipath_write_kreg(dd, dd->ipath_kregs->kr_errormask, |
835 | ~dd->ipath_maskederrs); | 835 | dd->ipath_errormask); |
836 | ipath_write_kreg(dd, dd->ipath_kregs->kr_intmask, -1LL); | ||
836 | ipath_write_kreg(dd, dd->ipath_kregs->kr_intclear, 0ULL); | 837 | ipath_write_kreg(dd, dd->ipath_kregs->kr_intclear, 0ULL); |
837 | } | 838 | } |
838 | 839 | ||
@@ -1002,7 +1003,6 @@ irqreturn_t ipath_intr(int irq, void *data) | |||
1002 | u32 istat, chk0rcv = 0; | 1003 | u32 istat, chk0rcv = 0; |
1003 | ipath_err_t estat = 0; | 1004 | ipath_err_t estat = 0; |
1004 | irqreturn_t ret; | 1005 | irqreturn_t ret; |
1005 | u32 oldhead, curtail; | ||
1006 | static unsigned unexpected = 0; | 1006 | static unsigned unexpected = 0; |
1007 | static const u32 port0rbits = (1U<<INFINIPATH_I_RCVAVAIL_SHIFT) | | 1007 | static const u32 port0rbits = (1U<<INFINIPATH_I_RCVAVAIL_SHIFT) | |
1008 | (1U<<INFINIPATH_I_RCVURG_SHIFT); | 1008 | (1U<<INFINIPATH_I_RCVURG_SHIFT); |
@@ -1035,36 +1035,6 @@ irqreturn_t ipath_intr(int irq, void *data) | |||
1035 | goto bail; | 1035 | goto bail; |
1036 | } | 1036 | } |
1037 | 1037 | ||
1038 | /* | ||
1039 | * We try to avoid reading the interrupt status register, since | ||
1040 | * that's a PIO read, and stalls the processor for up to about | ||
1041 | * ~0.25 usec. The idea is that if we processed a port0 packet, | ||
1042 | * we blindly clear the port 0 receive interrupt bits, and nothing | ||
1043 | * else, then return. If other interrupts are pending, the chip | ||
1044 | * will re-interrupt us as soon as we write the intclear register. | ||
1045 | * We then won't process any more kernel packets (if not the 2nd | ||
1046 | * time, then the 3rd or 4th) and we'll then handle the other | ||
1047 | * interrupts. We clear the interrupts first so that we don't | ||
1048 | * lose intr for later packets that arrive while we are processing. | ||
1049 | */ | ||
1050 | oldhead = dd->ipath_port0head; | ||
1051 | curtail = (u32)le64_to_cpu(*dd->ipath_hdrqtailptr); | ||
1052 | if (oldhead != curtail) { | ||
1053 | if (dd->ipath_flags & IPATH_GPIO_INTR) { | ||
1054 | ipath_write_kreg(dd, dd->ipath_kregs->kr_gpio_clear, | ||
1055 | (u64) (1 << IPATH_GPIO_PORT0_BIT)); | ||
1056 | istat = port0rbits | INFINIPATH_I_GPIO; | ||
1057 | } | ||
1058 | else | ||
1059 | istat = port0rbits; | ||
1060 | ipath_write_kreg(dd, dd->ipath_kregs->kr_intclear, istat); | ||
1061 | ipath_kreceive(dd); | ||
1062 | if (oldhead != dd->ipath_port0head) { | ||
1063 | ipath_stats.sps_fastrcvint++; | ||
1064 | goto done; | ||
1065 | } | ||
1066 | } | ||
1067 | |||
1068 | istat = ipath_read_kreg32(dd, dd->ipath_kregs->kr_intstatus); | 1038 | istat = ipath_read_kreg32(dd, dd->ipath_kregs->kr_intstatus); |
1069 | 1039 | ||
1070 | if (unlikely(!istat)) { | 1040 | if (unlikely(!istat)) { |
@@ -1225,7 +1195,6 @@ irqreturn_t ipath_intr(int irq, void *data) | |||
1225 | handle_layer_pioavail(dd); | 1195 | handle_layer_pioavail(dd); |
1226 | } | 1196 | } |
1227 | 1197 | ||
1228 | done: | ||
1229 | ret = IRQ_HANDLED; | 1198 | ret = IRQ_HANDLED; |
1230 | 1199 | ||
1231 | bail: | 1200 | bail: |