aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/infiniband
diff options
context:
space:
mode:
authorBryan O'Sullivan <bos@pathscale.com>2006-07-01 07:36:04 -0400
committerLinus Torvalds <torvalds@g5.osdl.org>2006-07-01 12:56:00 -0400
commit13aef4942c291742064c1d5ac71df6493c4a00a9 (patch)
treeb52657fa68485a00b5c94d3f37ed0a9f0cb7179d /drivers/infiniband
parentf37bda92461313ad3bbfbf5660adc849c69718bf (diff)
[PATCH] IB/ipath: reduce overhead on receive interrupts
Also count the number of interrupts where that works (fastrcvint). On any interrupt where the port0 head and tail registers are not equal, just call the ipath_kreceive code without reading the interrupt status, thus saving the approximately 0.25usec processor stall waiting for the read to return. If any other interrupt bits are set, or head==tail, take the normal path, but that has been reordered to handle read ahead of pioavail. Also no longer call ipath_kreceive() from ipath_qcheck(), because that just seems to make things worse, and isn't really buying us anything, these days. Also no longer loop in ipath_kreceive(); better to not hold things off too long (I saw many cases where we would loop 4-8 times, and handle thousands (up to 3500) in a single call). Signed-off-by: Dave Olson <dave.olson@qlogic.com> Signed-off-by: Bryan O'Sullivan <bryan.osullivan@qlogic.com> Cc: "Michael S. Tsirkin" <mst@mellanox.co.il> Cc: Roland Dreier <rolandd@cisco.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'drivers/infiniband')
-rw-r--r--drivers/infiniband/hw/ipath/ipath_common.h4
-rw-r--r--drivers/infiniband/hw/ipath/ipath_driver.c11
-rw-r--r--drivers/infiniband/hw/ipath/ipath_intr.c162
-rw-r--r--drivers/infiniband/hw/ipath/ipath_stats.c1
4 files changed, 105 insertions, 73 deletions
diff --git a/drivers/infiniband/hw/ipath/ipath_common.h b/drivers/infiniband/hw/ipath/ipath_common.h
index 3a849e71638e..7a8a086a579a 100644
--- a/drivers/infiniband/hw/ipath/ipath_common.h
+++ b/drivers/infiniband/hw/ipath/ipath_common.h
@@ -97,8 +97,8 @@ struct infinipath_stats {
97 __u64 sps_hwerrs; 97 __u64 sps_hwerrs;
98 /* number of times IB link changed state unexpectedly */ 98 /* number of times IB link changed state unexpectedly */
99 __u64 sps_iblink; 99 __u64 sps_iblink;
100 /* no longer used; left for compatibility */ 100 /* kernel receive interrupts that didn't read intstat */
101 __u64 sps_unused3; 101 __u64 sps_fastrcvint;
102 /* number of kernel (port0) packets received */ 102 /* number of kernel (port0) packets received */
103 __u64 sps_port0pkts; 103 __u64 sps_port0pkts;
104 /* number of "ethernet" packets sent by driver */ 104 /* number of "ethernet" packets sent by driver */
diff --git a/drivers/infiniband/hw/ipath/ipath_driver.c b/drivers/infiniband/hw/ipath/ipath_driver.c
index 0b88642381f8..eb745234d8c3 100644
--- a/drivers/infiniband/hw/ipath/ipath_driver.c
+++ b/drivers/infiniband/hw/ipath/ipath_driver.c
@@ -888,12 +888,7 @@ void ipath_kreceive(struct ipath_devdata *dd)
888 (u32)le64_to_cpu(*dd->ipath_hdrqtailptr)) 888 (u32)le64_to_cpu(*dd->ipath_hdrqtailptr))
889 goto done; 889 goto done;
890 890
891gotmore: 891 /* read only once at start for performance */
892 /*
893 * read only once at start. If in flood situation, this helps
894 * performance slightly. If more arrive while we are processing,
895 * we'll come back here and do them
896 */
897 hdrqtail = (u32)le64_to_cpu(*dd->ipath_hdrqtailptr); 892 hdrqtail = (u32)le64_to_cpu(*dd->ipath_hdrqtailptr);
898 893
899 for (i = 0, l = dd->ipath_port0head; l != hdrqtail; i++) { 894 for (i = 0, l = dd->ipath_port0head; l != hdrqtail; i++) {
@@ -1022,10 +1017,6 @@ gotmore:
1022 1017
1023 dd->ipath_port0head = l; 1018 dd->ipath_port0head = l;
1024 1019
1025 if (hdrqtail != (u32)le64_to_cpu(*dd->ipath_hdrqtailptr))
1026 /* more arrived while we handled first batch */
1027 goto gotmore;
1028
1029 if (pkttot > ipath_stats.sps_maxpkts_call) 1020 if (pkttot > ipath_stats.sps_maxpkts_call)
1030 ipath_stats.sps_maxpkts_call = pkttot; 1021 ipath_stats.sps_maxpkts_call = pkttot;
1031 ipath_stats.sps_port0pkts += pkttot; 1022 ipath_stats.sps_port0pkts += pkttot;
diff --git a/drivers/infiniband/hw/ipath/ipath_intr.c b/drivers/infiniband/hw/ipath/ipath_intr.c
index 8a4d732b4858..e8c8a25cd922 100644
--- a/drivers/infiniband/hw/ipath/ipath_intr.c
+++ b/drivers/infiniband/hw/ipath/ipath_intr.c
@@ -539,10 +539,10 @@ static void handle_errors(struct ipath_devdata *dd, ipath_err_t errs)
539 continue; 539 continue;
540 if (hd == (tl + 1) || 540 if (hd == (tl + 1) ||
541 (!hd && tl == dd->ipath_hdrqlast)) { 541 (!hd && tl == dd->ipath_hdrqlast)) {
542 dd->ipath_lastrcvhdrqtails[i] = tl;
543 pd->port_hdrqfull++;
544 if (i == 0) 542 if (i == 0)
545 chkerrpkts = 1; 543 chkerrpkts = 1;
544 dd->ipath_lastrcvhdrqtails[i] = tl;
545 pd->port_hdrqfull++;
546 } 546 }
547 } 547 }
548 } 548 }
@@ -724,7 +724,12 @@ set:
724 dd->ipath_sendctrl); 724 dd->ipath_sendctrl);
725} 725}
726 726
727static void handle_rcv(struct ipath_devdata *dd, u32 istat) 727/*
728 * Handle receive interrupts for user ports; this means a user
729 * process was waiting for a packet to arrive, and didn't want
730 * to poll
731 */
732static void handle_urcv(struct ipath_devdata *dd, u32 istat)
728{ 733{
729 u64 portr; 734 u64 portr;
730 int i; 735 int i;
@@ -734,22 +739,17 @@ static void handle_rcv(struct ipath_devdata *dd, u32 istat)
734 infinipath_i_rcvavail_mask) 739 infinipath_i_rcvavail_mask)
735 | ((istat >> INFINIPATH_I_RCVURG_SHIFT) & 740 | ((istat >> INFINIPATH_I_RCVURG_SHIFT) &
736 infinipath_i_rcvurg_mask); 741 infinipath_i_rcvurg_mask);
737 for (i = 0; i < dd->ipath_cfgports; i++) { 742 for (i = 1; i < dd->ipath_cfgports; i++) {
738 struct ipath_portdata *pd = dd->ipath_pd[i]; 743 struct ipath_portdata *pd = dd->ipath_pd[i];
739 if (portr & (1 << i) && pd && 744 if (portr & (1 << i) && pd && pd->port_cnt &&
740 pd->port_cnt) { 745 test_bit(IPATH_PORT_WAITING_RCV, &pd->port_flag)) {
741 if (i == 0) 746 int rcbit;
742 ipath_kreceive(dd); 747 clear_bit(IPATH_PORT_WAITING_RCV,
743 else if (test_bit(IPATH_PORT_WAITING_RCV, 748 &pd->port_flag);
744 &pd->port_flag)) { 749 rcbit = i + INFINIPATH_R_INTRAVAIL_SHIFT;
745 int rcbit; 750 clear_bit(1UL << rcbit, &dd->ipath_rcvctrl);
746 clear_bit(IPATH_PORT_WAITING_RCV, 751 wake_up_interruptible(&pd->port_wait);
747 &pd->port_flag); 752 rcvdint = 1;
748 rcbit = i + INFINIPATH_R_INTRAVAIL_SHIFT;
749 clear_bit(1UL << rcbit, &dd->ipath_rcvctrl);
750 wake_up_interruptible(&pd->port_wait);
751 rcvdint = 1;
752 }
753 } 753 }
754 } 754 }
755 if (rcvdint) { 755 if (rcvdint) {
@@ -767,14 +767,17 @@ irqreturn_t ipath_intr(int irq, void *data, struct pt_regs *regs)
767 struct ipath_devdata *dd = data; 767 struct ipath_devdata *dd = data;
768 u32 istat; 768 u32 istat;
769 ipath_err_t estat = 0; 769 ipath_err_t estat = 0;
770 static unsigned unexpected = 0;
771 irqreturn_t ret; 770 irqreturn_t ret;
771 u32 p0bits;
772 static unsigned unexpected = 0;
773 static const u32 port0rbits = (1U<<INFINIPATH_I_RCVAVAIL_SHIFT) |
774 (1U<<INFINIPATH_I_RCVURG_SHIFT);
775
776 ipath_stats.sps_ints++;
772 777
773 if(!(dd->ipath_flags & IPATH_PRESENT)) { 778 if (!(dd->ipath_flags & IPATH_PRESENT)) {
774 /* this is mostly so we don't try to touch the chip while
775 * it is being reset */
776 /* 779 /*
777 * This return value is perhaps odd, but we do not want the 780 * This return value is not great, but we do not want the
778 * interrupt core code to remove our interrupt handler 781 * interrupt core code to remove our interrupt handler
779 * because we don't appear to be handling an interrupt 782 * because we don't appear to be handling an interrupt
780 * during a chip reset. 783 * during a chip reset.
@@ -782,6 +785,50 @@ irqreturn_t ipath_intr(int irq, void *data, struct pt_regs *regs)
782 return IRQ_HANDLED; 785 return IRQ_HANDLED;
783 } 786 }
784 787
788 /*
789 * this needs to be flags&initted, not statusp, so we keep
790 * taking interrupts even after link goes down, etc.
791 * Also, we *must* clear the interrupt at some point, or we won't
792 * take it again, which can be real bad for errors, etc...
793 */
794
795 if (!(dd->ipath_flags & IPATH_INITTED)) {
796 ipath_bad_intr(dd, &unexpected);
797 ret = IRQ_NONE;
798 goto bail;
799 }
800
801 /*
802 * We try to avoid reading the interrupt status register, since
803 * that's a PIO read, and stalls the processor for up to about
804 * ~0.25 usec. The idea is that if we processed a port0 packet,
805 * we blindly clear the port 0 receive interrupt bits, and nothing
806 * else, then return. If other interrupts are pending, the chip
807 * will re-interrupt us as soon as we write the intclear register.
808 * We then won't process any more kernel packets (if not the 2nd
809 * time, then the 3rd or 4th) and we'll then handle the other
810 * interrupts. We clear the interrupts first so that we don't
811 * lose intr for later packets that arrive while we are processing.
812 */
813 if (dd->ipath_port0head !=
814 (u32)le64_to_cpu(*dd->ipath_hdrqtailptr)) {
815 u32 oldhead = dd->ipath_port0head;
816 if (dd->ipath_flags & IPATH_GPIO_INTR) {
817 ipath_write_kreg(dd, dd->ipath_kregs->kr_gpio_clear,
818 (u64) (1 << 2));
819 p0bits = port0rbits | INFINIPATH_I_GPIO;
820 }
821 else
822 p0bits = port0rbits;
823 ipath_write_kreg(dd, dd->ipath_kregs->kr_intclear, p0bits);
824 ipath_kreceive(dd);
825 if (oldhead != dd->ipath_port0head) {
826 ipath_stats.sps_fastrcvint++;
827 goto done;
828 }
829 istat = ipath_read_kreg32(dd, dd->ipath_kregs->kr_intstatus);
830 }
831
785 istat = ipath_read_kreg32(dd, dd->ipath_kregs->kr_intstatus); 832 istat = ipath_read_kreg32(dd, dd->ipath_kregs->kr_intstatus);
786 if (unlikely(!istat)) { 833 if (unlikely(!istat)) {
787 ipath_stats.sps_nullintr++; 834 ipath_stats.sps_nullintr++;
@@ -795,31 +842,17 @@ irqreturn_t ipath_intr(int irq, void *data, struct pt_regs *regs)
795 goto bail; 842 goto bail;
796 } 843 }
797 844
798 ipath_stats.sps_ints++;
799
800 /*
801 * this needs to be flags&initted, not statusp, so we keep
802 * taking interrupts even after link goes down, etc.
803 * Also, we *must* clear the interrupt at some point, or we won't
804 * take it again, which can be real bad for errors, etc...
805 */
806
807 if (!(dd->ipath_flags & IPATH_INITTED)) {
808 ipath_bad_intr(dd, &unexpected);
809 ret = IRQ_NONE;
810 goto bail;
811 }
812 if (unexpected) 845 if (unexpected)
813 unexpected = 0; 846 unexpected = 0;
814 847
815 ipath_cdbg(VERBOSE, "intr stat=0x%x\n", istat); 848 if (unlikely(istat & ~infinipath_i_bitsextant))
816
817 if (istat & ~infinipath_i_bitsextant)
818 ipath_dev_err(dd, 849 ipath_dev_err(dd,
819 "interrupt with unknown interrupts %x set\n", 850 "interrupt with unknown interrupts %x set\n",
820 istat & (u32) ~ infinipath_i_bitsextant); 851 istat & (u32) ~ infinipath_i_bitsextant);
852 else
853 ipath_cdbg(VERBOSE, "intr stat=0x%x\n", istat);
821 854
822 if (istat & INFINIPATH_I_ERROR) { 855 if (unlikely(istat & INFINIPATH_I_ERROR)) {
823 ipath_stats.sps_errints++; 856 ipath_stats.sps_errints++;
824 estat = ipath_read_kreg64(dd, 857 estat = ipath_read_kreg64(dd,
825 dd->ipath_kregs->kr_errorstatus); 858 dd->ipath_kregs->kr_errorstatus);
@@ -837,7 +870,14 @@ irqreturn_t ipath_intr(int irq, void *data, struct pt_regs *regs)
837 handle_errors(dd, estat); 870 handle_errors(dd, estat);
838 } 871 }
839 872
873 p0bits = port0rbits;
840 if (istat & INFINIPATH_I_GPIO) { 874 if (istat & INFINIPATH_I_GPIO) {
875 /*
876 * Packets are available in the port 0 rcv queue.
877 * Eventually this needs to be generalized to check
878 * IPATH_GPIO_INTR, and the specific GPIO bit, if
879 * GPIO interrupts are used for anything else.
880 */
841 if (unlikely(!(dd->ipath_flags & IPATH_GPIO_INTR))) { 881 if (unlikely(!(dd->ipath_flags & IPATH_GPIO_INTR))) {
842 u32 gpiostatus; 882 u32 gpiostatus;
843 gpiostatus = ipath_read_kreg32( 883 gpiostatus = ipath_read_kreg32(
@@ -851,14 +891,7 @@ irqreturn_t ipath_intr(int irq, void *data, struct pt_regs *regs)
851 /* Clear GPIO status bit 2 */ 891 /* Clear GPIO status bit 2 */
852 ipath_write_kreg(dd, dd->ipath_kregs->kr_gpio_clear, 892 ipath_write_kreg(dd, dd->ipath_kregs->kr_gpio_clear,
853 (u64) (1 << 2)); 893 (u64) (1 << 2));
854 894 p0bits |= INFINIPATH_I_GPIO;
855 /*
856 * Packets are available in the port 0 rcv queue.
857 * Eventually this needs to be generalized to check
858 * IPATH_GPIO_INTR, and the specific GPIO bit, if
859 * GPIO interrupts are used for anything else.
860 */
861 ipath_kreceive(dd);
862 } 895 }
863 } 896 }
864 897
@@ -871,6 +904,25 @@ irqreturn_t ipath_intr(int irq, void *data, struct pt_regs *regs)
871 */ 904 */
872 ipath_write_kreg(dd, dd->ipath_kregs->kr_intclear, istat); 905 ipath_write_kreg(dd, dd->ipath_kregs->kr_intclear, istat);
873 906
907 /*
908 * we check for both transition from empty to non-empty, and urgent
909 * packets (those with the interrupt bit set in the header), and
910 * if enabled, the GPIO bit 2 interrupt used for port0 on some
911 * HT-400 boards.
912 * Do this before checking for pio buffers available, since
913 * receives can overflow; piobuf waiters can afford a few
914 * extra cycles, since they were waiting anyway.
915 */
916 if (istat & p0bits) {
917 ipath_kreceive(dd);
918 istat &= ~port0rbits;
919 }
920 if (istat & ((infinipath_i_rcvavail_mask <<
921 INFINIPATH_I_RCVAVAIL_SHIFT)
922 | (infinipath_i_rcvurg_mask <<
923 INFINIPATH_I_RCVURG_SHIFT)))
924 handle_urcv(dd, istat);
925
874 if (istat & INFINIPATH_I_SPIOBUFAVAIL) { 926 if (istat & INFINIPATH_I_SPIOBUFAVAIL) {
875 clear_bit(IPATH_S_PIOINTBUFAVAIL, &dd->ipath_sendctrl); 927 clear_bit(IPATH_S_PIOINTBUFAVAIL, &dd->ipath_sendctrl);
876 ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl, 928 ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl,
@@ -882,17 +934,7 @@ irqreturn_t ipath_intr(int irq, void *data, struct pt_regs *regs)
882 handle_layer_pioavail(dd); 934 handle_layer_pioavail(dd);
883 } 935 }
884 936
885 /* 937done:
886 * we check for both transition from empty to non-empty, and urgent
887 * packets (those with the interrupt bit set in the header)
888 */
889
890 if (istat & ((infinipath_i_rcvavail_mask <<
891 INFINIPATH_I_RCVAVAIL_SHIFT)
892 | (infinipath_i_rcvurg_mask <<
893 INFINIPATH_I_RCVURG_SHIFT)))
894 handle_rcv(dd, istat);
895
896 ret = IRQ_HANDLED; 938 ret = IRQ_HANDLED;
897 939
898bail: 940bail:
diff --git a/drivers/infiniband/hw/ipath/ipath_stats.c b/drivers/infiniband/hw/ipath/ipath_stats.c
index 2651701ce1b5..70351b7e35c0 100644
--- a/drivers/infiniband/hw/ipath/ipath_stats.c
+++ b/drivers/infiniband/hw/ipath/ipath_stats.c
@@ -186,7 +186,6 @@ static void ipath_qcheck(struct ipath_devdata *dd)
186 dd->ipath_port0head, 186 dd->ipath_port0head,
187 (unsigned long long) 187 (unsigned long long)
188 ipath_stats.sps_port0pkts); 188 ipath_stats.sps_port0pkts);
189 ipath_kreceive(dd);
190 } 189 }
191 dd->ipath_lastport0rcv_cnt = ipath_stats.sps_port0pkts; 190 dd->ipath_lastport0rcv_cnt = ipath_stats.sps_port0pkts;
192 } 191 }