diff options
author | Vladislav Zolotarov <vladz@broadcom.com> | 2009-11-16 01:05:58 -0500 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2009-11-17 07:08:48 -0500 |
commit | 54b9ddaa68414fad72ab2e1042be067c902441a6 (patch) | |
tree | 2adf18084aab8d2a013d31c8a2ac708f2e3ddec8 /drivers/net/bnx2x_main.c | |
parent | 208f2037ae4a2f23fe5f232d25f4030b3a35c3ed (diff) |
bnx2x: Handle Rx and Tx together in NAPI
Put Tx and Rx DPC to be handled in the NAPI:
- Saves status blocks.
- Moves the Tx work from hardIRQ to NAPI.
Signed-off-by: Vladislav Zolotarov <vladz@broadcom.com>
Signed-off-by: Eilon Greenstein <eilong@broadcom.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/bnx2x_main.c')
-rw-r--r-- | drivers/net/bnx2x_main.c | 367 |
1 files changed, 151 insertions, 216 deletions
diff --git a/drivers/net/bnx2x_main.c b/drivers/net/bnx2x_main.c index e2cf686d1118..bdecd42d2b29 100644 --- a/drivers/net/bnx2x_main.c +++ b/drivers/net/bnx2x_main.c | |||
@@ -57,7 +57,7 @@ | |||
57 | #include "bnx2x_init_ops.h" | 57 | #include "bnx2x_init_ops.h" |
58 | #include "bnx2x_dump.h" | 58 | #include "bnx2x_dump.h" |
59 | 59 | ||
60 | #define DRV_MODULE_VERSION "1.52.1-4" | 60 | #define DRV_MODULE_VERSION "1.52.1-5" |
61 | #define DRV_MODULE_RELDATE "2009/11/09" | 61 | #define DRV_MODULE_RELDATE "2009/11/09" |
62 | #define BNX2X_BC_VER 0x040200 | 62 | #define BNX2X_BC_VER 0x040200 |
63 | 63 | ||
@@ -91,15 +91,10 @@ module_param(multi_mode, int, 0); | |||
91 | MODULE_PARM_DESC(multi_mode, " Multi queue mode " | 91 | MODULE_PARM_DESC(multi_mode, " Multi queue mode " |
92 | "(0 Disable; 1 Enable (default))"); | 92 | "(0 Disable; 1 Enable (default))"); |
93 | 93 | ||
94 | static int num_rx_queues; | 94 | static int num_queues; |
95 | module_param(num_rx_queues, int, 0); | 95 | module_param(num_queues, int, 0); |
96 | MODULE_PARM_DESC(num_rx_queues, " Number of Rx queues for multi_mode=1" | 96 | MODULE_PARM_DESC(num_queues, " Number of queues for multi_mode=1" |
97 | " (default is half number of CPUs)"); | 97 | " (default is as a number of CPUs)"); |
98 | |||
99 | static int num_tx_queues; | ||
100 | module_param(num_tx_queues, int, 0); | ||
101 | MODULE_PARM_DESC(num_tx_queues, " Number of Tx queues for multi_mode=1" | ||
102 | " (default is half number of CPUs)"); | ||
103 | 98 | ||
104 | static int disable_tpa; | 99 | static int disable_tpa; |
105 | module_param(disable_tpa, int, 0); | 100 | module_param(disable_tpa, int, 0); |
@@ -558,7 +553,7 @@ static void bnx2x_panic_dump(struct bnx2x *bp) | |||
558 | bp->def_att_idx, bp->attn_state, bp->spq_prod_idx); | 553 | bp->def_att_idx, bp->attn_state, bp->spq_prod_idx); |
559 | 554 | ||
560 | /* Rx */ | 555 | /* Rx */ |
561 | for_each_rx_queue(bp, i) { | 556 | for_each_queue(bp, i) { |
562 | struct bnx2x_fastpath *fp = &bp->fp[i]; | 557 | struct bnx2x_fastpath *fp = &bp->fp[i]; |
563 | 558 | ||
564 | BNX2X_ERR("fp%d: rx_bd_prod(%x) rx_bd_cons(%x)" | 559 | BNX2X_ERR("fp%d: rx_bd_prod(%x) rx_bd_cons(%x)" |
@@ -575,7 +570,7 @@ static void bnx2x_panic_dump(struct bnx2x *bp) | |||
575 | } | 570 | } |
576 | 571 | ||
577 | /* Tx */ | 572 | /* Tx */ |
578 | for_each_tx_queue(bp, i) { | 573 | for_each_queue(bp, i) { |
579 | struct bnx2x_fastpath *fp = &bp->fp[i]; | 574 | struct bnx2x_fastpath *fp = &bp->fp[i]; |
580 | 575 | ||
581 | BNX2X_ERR("fp%d: tx_pkt_prod(%x) tx_pkt_cons(%x)" | 576 | BNX2X_ERR("fp%d: tx_pkt_prod(%x) tx_pkt_cons(%x)" |
@@ -590,7 +585,7 @@ static void bnx2x_panic_dump(struct bnx2x *bp) | |||
590 | 585 | ||
591 | /* Rings */ | 586 | /* Rings */ |
592 | /* Rx */ | 587 | /* Rx */ |
593 | for_each_rx_queue(bp, i) { | 588 | for_each_queue(bp, i) { |
594 | struct bnx2x_fastpath *fp = &bp->fp[i]; | 589 | struct bnx2x_fastpath *fp = &bp->fp[i]; |
595 | 590 | ||
596 | start = RX_BD(le16_to_cpu(*fp->rx_cons_sb) - 10); | 591 | start = RX_BD(le16_to_cpu(*fp->rx_cons_sb) - 10); |
@@ -624,7 +619,7 @@ static void bnx2x_panic_dump(struct bnx2x *bp) | |||
624 | } | 619 | } |
625 | 620 | ||
626 | /* Tx */ | 621 | /* Tx */ |
627 | for_each_tx_queue(bp, i) { | 622 | for_each_queue(bp, i) { |
628 | struct bnx2x_fastpath *fp = &bp->fp[i]; | 623 | struct bnx2x_fastpath *fp = &bp->fp[i]; |
629 | 624 | ||
630 | start = TX_BD(le16_to_cpu(*fp->tx_cons_sb) - 10); | 625 | start = TX_BD(le16_to_cpu(*fp->tx_cons_sb) - 10); |
@@ -792,21 +787,13 @@ static inline void bnx2x_ack_sb(struct bnx2x *bp, u8 sb_id, | |||
792 | barrier(); | 787 | barrier(); |
793 | } | 788 | } |
794 | 789 | ||
795 | static inline u16 bnx2x_update_fpsb_idx(struct bnx2x_fastpath *fp) | 790 | static inline void bnx2x_update_fpsb_idx(struct bnx2x_fastpath *fp) |
796 | { | 791 | { |
797 | struct host_status_block *fpsb = fp->status_blk; | 792 | struct host_status_block *fpsb = fp->status_blk; |
798 | u16 rc = 0; | ||
799 | 793 | ||
800 | barrier(); /* status block is written to by the chip */ | 794 | barrier(); /* status block is written to by the chip */ |
801 | if (fp->fp_c_idx != fpsb->c_status_block.status_block_index) { | 795 | fp->fp_c_idx = fpsb->c_status_block.status_block_index; |
802 | fp->fp_c_idx = fpsb->c_status_block.status_block_index; | 796 | fp->fp_u_idx = fpsb->u_status_block.status_block_index; |
803 | rc |= 1; | ||
804 | } | ||
805 | if (fp->fp_u_idx != fpsb->u_status_block.status_block_index) { | ||
806 | fp->fp_u_idx = fpsb->u_status_block.status_block_index; | ||
807 | rc |= 2; | ||
808 | } | ||
809 | return rc; | ||
810 | } | 797 | } |
811 | 798 | ||
812 | static u16 bnx2x_ack_int(struct bnx2x *bp) | 799 | static u16 bnx2x_ack_int(struct bnx2x *bp) |
@@ -846,6 +833,9 @@ static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fastpath *fp, | |||
846 | u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons; | 833 | u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons; |
847 | int nbd; | 834 | int nbd; |
848 | 835 | ||
836 | /* prefetch skb end pointer to speedup dev_kfree_skb() */ | ||
837 | prefetch(&skb->end); | ||
838 | |||
849 | DP(BNX2X_MSG_OFF, "pkt_idx %d buff @(%p)->skb %p\n", | 839 | DP(BNX2X_MSG_OFF, "pkt_idx %d buff @(%p)->skb %p\n", |
850 | idx, tx_buf, skb); | 840 | idx, tx_buf, skb); |
851 | 841 | ||
@@ -890,7 +880,7 @@ static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fastpath *fp, | |||
890 | 880 | ||
891 | /* release skb */ | 881 | /* release skb */ |
892 | WARN_ON(!skb); | 882 | WARN_ON(!skb); |
893 | dev_kfree_skb_any(skb); | 883 | dev_kfree_skb(skb); |
894 | tx_buf->first_bd = 0; | 884 | tx_buf->first_bd = 0; |
895 | tx_buf->skb = NULL; | 885 | tx_buf->skb = NULL; |
896 | 886 | ||
@@ -920,19 +910,28 @@ static inline u16 bnx2x_tx_avail(struct bnx2x_fastpath *fp) | |||
920 | return (s16)(fp->bp->tx_ring_size) - used; | 910 | return (s16)(fp->bp->tx_ring_size) - used; |
921 | } | 911 | } |
922 | 912 | ||
923 | static void bnx2x_tx_int(struct bnx2x_fastpath *fp) | 913 | static inline int bnx2x_has_tx_work(struct bnx2x_fastpath *fp) |
914 | { | ||
915 | u16 hw_cons; | ||
916 | |||
917 | /* Tell compiler that status block fields can change */ | ||
918 | barrier(); | ||
919 | hw_cons = le16_to_cpu(*fp->tx_cons_sb); | ||
920 | return hw_cons != fp->tx_pkt_cons; | ||
921 | } | ||
922 | |||
923 | static int bnx2x_tx_int(struct bnx2x_fastpath *fp) | ||
924 | { | 924 | { |
925 | struct bnx2x *bp = fp->bp; | 925 | struct bnx2x *bp = fp->bp; |
926 | struct netdev_queue *txq; | 926 | struct netdev_queue *txq; |
927 | u16 hw_cons, sw_cons, bd_cons = fp->tx_bd_cons; | 927 | u16 hw_cons, sw_cons, bd_cons = fp->tx_bd_cons; |
928 | int done = 0; | ||
929 | 928 | ||
930 | #ifdef BNX2X_STOP_ON_ERROR | 929 | #ifdef BNX2X_STOP_ON_ERROR |
931 | if (unlikely(bp->panic)) | 930 | if (unlikely(bp->panic)) |
932 | return; | 931 | return -1; |
933 | #endif | 932 | #endif |
934 | 933 | ||
935 | txq = netdev_get_tx_queue(bp->dev, fp->index - bp->num_rx_queues); | 934 | txq = netdev_get_tx_queue(bp->dev, fp->index); |
936 | hw_cons = le16_to_cpu(*fp->tx_cons_sb); | 935 | hw_cons = le16_to_cpu(*fp->tx_cons_sb); |
937 | sw_cons = fp->tx_pkt_cons; | 936 | sw_cons = fp->tx_pkt_cons; |
938 | 937 | ||
@@ -953,7 +952,6 @@ static void bnx2x_tx_int(struct bnx2x_fastpath *fp) | |||
953 | */ | 952 | */ |
954 | bd_cons = bnx2x_free_tx_pkt(bp, fp, pkt_cons); | 953 | bd_cons = bnx2x_free_tx_pkt(bp, fp, pkt_cons); |
955 | sw_cons++; | 954 | sw_cons++; |
956 | done++; | ||
957 | } | 955 | } |
958 | 956 | ||
959 | fp->tx_pkt_cons = sw_cons; | 957 | fp->tx_pkt_cons = sw_cons; |
@@ -975,6 +973,7 @@ static void bnx2x_tx_int(struct bnx2x_fastpath *fp) | |||
975 | (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3)) | 973 | (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3)) |
976 | netif_tx_wake_queue(txq); | 974 | netif_tx_wake_queue(txq); |
977 | } | 975 | } |
976 | return 0; | ||
978 | } | 977 | } |
979 | 978 | ||
980 | #ifdef BCM_CNIC | 979 | #ifdef BCM_CNIC |
@@ -1561,6 +1560,8 @@ static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget) | |||
1561 | } else { | 1560 | } else { |
1562 | rx_buf = &fp->rx_buf_ring[bd_cons]; | 1561 | rx_buf = &fp->rx_buf_ring[bd_cons]; |
1563 | skb = rx_buf->skb; | 1562 | skb = rx_buf->skb; |
1563 | prefetch(skb); | ||
1564 | prefetch((u8 *)skb + 256); | ||
1564 | len = le16_to_cpu(cqe->fast_path_cqe.pkt_len); | 1565 | len = le16_to_cpu(cqe->fast_path_cqe.pkt_len); |
1565 | pad = cqe->fast_path_cqe.placement_offset; | 1566 | pad = cqe->fast_path_cqe.placement_offset; |
1566 | 1567 | ||
@@ -1742,27 +1743,13 @@ static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie) | |||
1742 | if (unlikely(bp->panic)) | 1743 | if (unlikely(bp->panic)) |
1743 | return IRQ_HANDLED; | 1744 | return IRQ_HANDLED; |
1744 | #endif | 1745 | #endif |
1745 | /* Handle Rx or Tx according to MSI-X vector */ | ||
1746 | if (fp->is_rx_queue) { | ||
1747 | prefetch(fp->rx_cons_sb); | ||
1748 | prefetch(&fp->status_blk->u_status_block.status_block_index); | ||
1749 | |||
1750 | napi_schedule(&bnx2x_fp(bp, fp->index, napi)); | ||
1751 | |||
1752 | } else { | ||
1753 | prefetch(fp->tx_cons_sb); | ||
1754 | prefetch(&fp->status_blk->c_status_block.status_block_index); | ||
1755 | |||
1756 | bnx2x_update_fpsb_idx(fp); | ||
1757 | rmb(); | ||
1758 | bnx2x_tx_int(fp); | ||
1759 | 1746 | ||
1760 | /* Re-enable interrupts */ | 1747 | /* Handle Rx and Tx according to MSI-X vector */ |
1761 | bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID, | 1748 | prefetch(fp->rx_cons_sb); |
1762 | le16_to_cpu(fp->fp_u_idx), IGU_INT_NOP, 1); | 1749 | prefetch(fp->tx_cons_sb); |
1763 | bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID, | 1750 | prefetch(&fp->status_blk->u_status_block.status_block_index); |
1764 | le16_to_cpu(fp->fp_c_idx), IGU_INT_ENABLE, 1); | 1751 | prefetch(&fp->status_blk->c_status_block.status_block_index); |
1765 | } | 1752 | napi_schedule(&bnx2x_fp(bp, fp->index, napi)); |
1766 | 1753 | ||
1767 | return IRQ_HANDLED; | 1754 | return IRQ_HANDLED; |
1768 | } | 1755 | } |
@@ -1797,31 +1784,14 @@ static irqreturn_t bnx2x_interrupt(int irq, void *dev_instance) | |||
1797 | 1784 | ||
1798 | mask = 0x2 << fp->sb_id; | 1785 | mask = 0x2 << fp->sb_id; |
1799 | if (status & mask) { | 1786 | if (status & mask) { |
1800 | /* Handle Rx or Tx according to SB id */ | 1787 | /* Handle Rx and Tx according to SB id */ |
1801 | if (fp->is_rx_queue) { | 1788 | prefetch(fp->rx_cons_sb); |
1802 | prefetch(fp->rx_cons_sb); | 1789 | prefetch(&fp->status_blk->u_status_block. |
1803 | prefetch(&fp->status_blk->u_status_block. | 1790 | status_block_index); |
1804 | status_block_index); | 1791 | prefetch(fp->tx_cons_sb); |
1805 | 1792 | prefetch(&fp->status_blk->c_status_block. | |
1806 | napi_schedule(&bnx2x_fp(bp, fp->index, napi)); | 1793 | status_block_index); |
1807 | 1794 | napi_schedule(&bnx2x_fp(bp, fp->index, napi)); | |
1808 | } else { | ||
1809 | prefetch(fp->tx_cons_sb); | ||
1810 | prefetch(&fp->status_blk->c_status_block. | ||
1811 | status_block_index); | ||
1812 | |||
1813 | bnx2x_update_fpsb_idx(fp); | ||
1814 | rmb(); | ||
1815 | bnx2x_tx_int(fp); | ||
1816 | |||
1817 | /* Re-enable interrupts */ | ||
1818 | bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID, | ||
1819 | le16_to_cpu(fp->fp_u_idx), | ||
1820 | IGU_INT_NOP, 1); | ||
1821 | bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID, | ||
1822 | le16_to_cpu(fp->fp_c_idx), | ||
1823 | IGU_INT_ENABLE, 1); | ||
1824 | } | ||
1825 | status &= ~mask; | 1795 | status &= ~mask; |
1826 | } | 1796 | } |
1827 | } | 1797 | } |
@@ -4027,7 +3997,7 @@ static int bnx2x_storm_stats_update(struct bnx2x *bp) | |||
4027 | estats->no_buff_discard_hi = 0; | 3997 | estats->no_buff_discard_hi = 0; |
4028 | estats->no_buff_discard_lo = 0; | 3998 | estats->no_buff_discard_lo = 0; |
4029 | 3999 | ||
4030 | for_each_rx_queue(bp, i) { | 4000 | for_each_queue(bp, i) { |
4031 | struct bnx2x_fastpath *fp = &bp->fp[i]; | 4001 | struct bnx2x_fastpath *fp = &bp->fp[i]; |
4032 | int cl_id = fp->cl_id; | 4002 | int cl_id = fp->cl_id; |
4033 | struct tstorm_per_client_stats *tclient = | 4003 | struct tstorm_per_client_stats *tclient = |
@@ -4244,7 +4214,7 @@ static void bnx2x_net_stats_update(struct bnx2x *bp) | |||
4244 | nstats->tx_bytes = bnx2x_hilo(&estats->total_bytes_transmitted_hi); | 4214 | nstats->tx_bytes = bnx2x_hilo(&estats->total_bytes_transmitted_hi); |
4245 | 4215 | ||
4246 | nstats->rx_dropped = estats->mac_discard; | 4216 | nstats->rx_dropped = estats->mac_discard; |
4247 | for_each_rx_queue(bp, i) | 4217 | for_each_queue(bp, i) |
4248 | nstats->rx_dropped += | 4218 | nstats->rx_dropped += |
4249 | le32_to_cpu(bp->fp[i].old_tclient.checksum_discard); | 4219 | le32_to_cpu(bp->fp[i].old_tclient.checksum_discard); |
4250 | 4220 | ||
@@ -4298,7 +4268,7 @@ static void bnx2x_drv_stats_update(struct bnx2x *bp) | |||
4298 | estats->rx_err_discard_pkt = 0; | 4268 | estats->rx_err_discard_pkt = 0; |
4299 | estats->rx_skb_alloc_failed = 0; | 4269 | estats->rx_skb_alloc_failed = 0; |
4300 | estats->hw_csum_err = 0; | 4270 | estats->hw_csum_err = 0; |
4301 | for_each_rx_queue(bp, i) { | 4271 | for_each_queue(bp, i) { |
4302 | struct bnx2x_eth_q_stats *qstats = &bp->fp[i].eth_q_stats; | 4272 | struct bnx2x_eth_q_stats *qstats = &bp->fp[i].eth_q_stats; |
4303 | 4273 | ||
4304 | estats->driver_xoff += qstats->driver_xoff; | 4274 | estats->driver_xoff += qstats->driver_xoff; |
@@ -4329,7 +4299,7 @@ static void bnx2x_stats_update(struct bnx2x *bp) | |||
4329 | 4299 | ||
4330 | if (bp->msglevel & NETIF_MSG_TIMER) { | 4300 | if (bp->msglevel & NETIF_MSG_TIMER) { |
4331 | struct bnx2x_fastpath *fp0_rx = bp->fp; | 4301 | struct bnx2x_fastpath *fp0_rx = bp->fp; |
4332 | struct bnx2x_fastpath *fp0_tx = &(bp->fp[bp->num_rx_queues]); | 4302 | struct bnx2x_fastpath *fp0_tx = bp->fp; |
4333 | struct tstorm_per_client_stats *old_tclient = | 4303 | struct tstorm_per_client_stats *old_tclient = |
4334 | &bp->fp->old_tclient; | 4304 | &bp->fp->old_tclient; |
4335 | struct bnx2x_eth_q_stats *qstats = &bp->fp->eth_q_stats; | 4305 | struct bnx2x_eth_q_stats *qstats = &bp->fp->eth_q_stats; |
@@ -4984,7 +4954,7 @@ static void bnx2x_init_rx_rings(struct bnx2x *bp) | |||
4984 | 4954 | ||
4985 | if (bp->flags & TPA_ENABLE_FLAG) { | 4955 | if (bp->flags & TPA_ENABLE_FLAG) { |
4986 | 4956 | ||
4987 | for_each_rx_queue(bp, j) { | 4957 | for_each_queue(bp, j) { |
4988 | struct bnx2x_fastpath *fp = &bp->fp[j]; | 4958 | struct bnx2x_fastpath *fp = &bp->fp[j]; |
4989 | 4959 | ||
4990 | for (i = 0; i < max_agg_queues; i++) { | 4960 | for (i = 0; i < max_agg_queues; i++) { |
@@ -5007,16 +4977,13 @@ static void bnx2x_init_rx_rings(struct bnx2x *bp) | |||
5007 | } | 4977 | } |
5008 | } | 4978 | } |
5009 | 4979 | ||
5010 | for_each_rx_queue(bp, j) { | 4980 | for_each_queue(bp, j) { |
5011 | struct bnx2x_fastpath *fp = &bp->fp[j]; | 4981 | struct bnx2x_fastpath *fp = &bp->fp[j]; |
5012 | 4982 | ||
5013 | fp->rx_bd_cons = 0; | 4983 | fp->rx_bd_cons = 0; |
5014 | fp->rx_cons_sb = BNX2X_RX_SB_INDEX; | 4984 | fp->rx_cons_sb = BNX2X_RX_SB_INDEX; |
5015 | fp->rx_bd_cons_sb = BNX2X_RX_SB_BD_INDEX; | 4985 | fp->rx_bd_cons_sb = BNX2X_RX_SB_BD_INDEX; |
5016 | 4986 | ||
5017 | /* Mark queue as Rx */ | ||
5018 | fp->is_rx_queue = 1; | ||
5019 | |||
5020 | /* "next page" elements initialization */ | 4987 | /* "next page" elements initialization */ |
5021 | /* SGE ring */ | 4988 | /* SGE ring */ |
5022 | for (i = 1; i <= NUM_RX_SGE_PAGES; i++) { | 4989 | for (i = 1; i <= NUM_RX_SGE_PAGES; i++) { |
@@ -5122,7 +5089,7 @@ static void bnx2x_init_tx_ring(struct bnx2x *bp) | |||
5122 | { | 5089 | { |
5123 | int i, j; | 5090 | int i, j; |
5124 | 5091 | ||
5125 | for_each_tx_queue(bp, j) { | 5092 | for_each_queue(bp, j) { |
5126 | struct bnx2x_fastpath *fp = &bp->fp[j]; | 5093 | struct bnx2x_fastpath *fp = &bp->fp[j]; |
5127 | 5094 | ||
5128 | for (i = 1; i <= NUM_TX_RINGS; i++) { | 5095 | for (i = 1; i <= NUM_TX_RINGS; i++) { |
@@ -5148,10 +5115,6 @@ static void bnx2x_init_tx_ring(struct bnx2x *bp) | |||
5148 | fp->tx_cons_sb = BNX2X_TX_SB_INDEX; | 5115 | fp->tx_cons_sb = BNX2X_TX_SB_INDEX; |
5149 | fp->tx_pkt = 0; | 5116 | fp->tx_pkt = 0; |
5150 | } | 5117 | } |
5151 | |||
5152 | /* clean tx statistics */ | ||
5153 | for_each_rx_queue(bp, i) | ||
5154 | bnx2x_fp(bp, i, tx_pkt) = 0; | ||
5155 | } | 5118 | } |
5156 | 5119 | ||
5157 | static void bnx2x_init_sp_ring(struct bnx2x *bp) | 5120 | static void bnx2x_init_sp_ring(struct bnx2x *bp) |
@@ -5180,7 +5143,8 @@ static void bnx2x_init_context(struct bnx2x *bp) | |||
5180 | { | 5143 | { |
5181 | int i; | 5144 | int i; |
5182 | 5145 | ||
5183 | for_each_rx_queue(bp, i) { | 5146 | /* Rx */ |
5147 | for_each_queue(bp, i) { | ||
5184 | struct eth_context *context = bnx2x_sp(bp, context[i].eth); | 5148 | struct eth_context *context = bnx2x_sp(bp, context[i].eth); |
5185 | struct bnx2x_fastpath *fp = &bp->fp[i]; | 5149 | struct bnx2x_fastpath *fp = &bp->fp[i]; |
5186 | u8 cl_id = fp->cl_id; | 5150 | u8 cl_id = fp->cl_id; |
@@ -5232,10 +5196,11 @@ static void bnx2x_init_context(struct bnx2x *bp) | |||
5232 | ETH_CONNECTION_TYPE); | 5196 | ETH_CONNECTION_TYPE); |
5233 | } | 5197 | } |
5234 | 5198 | ||
5235 | for_each_tx_queue(bp, i) { | 5199 | /* Tx */ |
5200 | for_each_queue(bp, i) { | ||
5236 | struct bnx2x_fastpath *fp = &bp->fp[i]; | 5201 | struct bnx2x_fastpath *fp = &bp->fp[i]; |
5237 | struct eth_context *context = | 5202 | struct eth_context *context = |
5238 | bnx2x_sp(bp, context[i - bp->num_rx_queues].eth); | 5203 | bnx2x_sp(bp, context[i].eth); |
5239 | 5204 | ||
5240 | context->cstorm_st_context.sb_index_number = | 5205 | context->cstorm_st_context.sb_index_number = |
5241 | C_SB_ETH_TX_CQ_INDEX; | 5206 | C_SB_ETH_TX_CQ_INDEX; |
@@ -5263,7 +5228,7 @@ static void bnx2x_init_ind_table(struct bnx2x *bp) | |||
5263 | for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++) | 5228 | for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++) |
5264 | REG_WR8(bp, BAR_TSTRORM_INTMEM + | 5229 | REG_WR8(bp, BAR_TSTRORM_INTMEM + |
5265 | TSTORM_INDIRECTION_TABLE_OFFSET(func) + i, | 5230 | TSTORM_INDIRECTION_TABLE_OFFSET(func) + i, |
5266 | bp->fp->cl_id + (i % bp->num_rx_queues)); | 5231 | bp->fp->cl_id + (i % bp->num_queues)); |
5267 | } | 5232 | } |
5268 | 5233 | ||
5269 | static void bnx2x_set_client_config(struct bnx2x *bp) | 5234 | static void bnx2x_set_client_config(struct bnx2x *bp) |
@@ -5507,7 +5472,7 @@ static void bnx2x_init_internal_func(struct bnx2x *bp) | |||
5507 | min((u32)(min((u32)8, (u32)MAX_SKB_FRAGS) * | 5472 | min((u32)(min((u32)8, (u32)MAX_SKB_FRAGS) * |
5508 | SGE_PAGE_SIZE * PAGES_PER_SGE), | 5473 | SGE_PAGE_SIZE * PAGES_PER_SGE), |
5509 | (u32)0xffff); | 5474 | (u32)0xffff); |
5510 | for_each_rx_queue(bp, i) { | 5475 | for_each_queue(bp, i) { |
5511 | struct bnx2x_fastpath *fp = &bp->fp[i]; | 5476 | struct bnx2x_fastpath *fp = &bp->fp[i]; |
5512 | 5477 | ||
5513 | REG_WR(bp, BAR_USTRORM_INTMEM + | 5478 | REG_WR(bp, BAR_USTRORM_INTMEM + |
@@ -5542,7 +5507,7 @@ static void bnx2x_init_internal_func(struct bnx2x *bp) | |||
5542 | rx_pause.cqe_thr_high = 350; | 5507 | rx_pause.cqe_thr_high = 350; |
5543 | rx_pause.sge_thr_high = 0; | 5508 | rx_pause.sge_thr_high = 0; |
5544 | 5509 | ||
5545 | for_each_rx_queue(bp, i) { | 5510 | for_each_queue(bp, i) { |
5546 | struct bnx2x_fastpath *fp = &bp->fp[i]; | 5511 | struct bnx2x_fastpath *fp = &bp->fp[i]; |
5547 | 5512 | ||
5548 | if (!fp->disable_tpa) { | 5513 | if (!fp->disable_tpa) { |
@@ -5637,9 +5602,6 @@ static void bnx2x_nic_init(struct bnx2x *bp, u32 load_code) | |||
5637 | #else | 5602 | #else |
5638 | fp->sb_id = fp->cl_id; | 5603 | fp->sb_id = fp->cl_id; |
5639 | #endif | 5604 | #endif |
5640 | /* Suitable Rx and Tx SBs are served by the same client */ | ||
5641 | if (i >= bp->num_rx_queues) | ||
5642 | fp->cl_id -= bp->num_rx_queues; | ||
5643 | DP(NETIF_MSG_IFUP, | 5605 | DP(NETIF_MSG_IFUP, |
5644 | "queue[%d]: bnx2x_init_sb(%p,%p) cl_id %d sb %d\n", | 5606 | "queue[%d]: bnx2x_init_sb(%p,%p) cl_id %d sb %d\n", |
5645 | i, bp, fp->status_blk, fp->cl_id, fp->sb_id); | 5607 | i, bp, fp->status_blk, fp->cl_id, fp->sb_id); |
@@ -6749,7 +6711,7 @@ static void bnx2x_free_mem(struct bnx2x *bp) | |||
6749 | sizeof(struct host_status_block)); | 6711 | sizeof(struct host_status_block)); |
6750 | } | 6712 | } |
6751 | /* Rx */ | 6713 | /* Rx */ |
6752 | for_each_rx_queue(bp, i) { | 6714 | for_each_queue(bp, i) { |
6753 | 6715 | ||
6754 | /* fastpath rx rings: rx_buf rx_desc rx_comp */ | 6716 | /* fastpath rx rings: rx_buf rx_desc rx_comp */ |
6755 | BNX2X_FREE(bnx2x_fp(bp, i, rx_buf_ring)); | 6717 | BNX2X_FREE(bnx2x_fp(bp, i, rx_buf_ring)); |
@@ -6769,7 +6731,7 @@ static void bnx2x_free_mem(struct bnx2x *bp) | |||
6769 | BCM_PAGE_SIZE * NUM_RX_SGE_PAGES); | 6731 | BCM_PAGE_SIZE * NUM_RX_SGE_PAGES); |
6770 | } | 6732 | } |
6771 | /* Tx */ | 6733 | /* Tx */ |
6772 | for_each_tx_queue(bp, i) { | 6734 | for_each_queue(bp, i) { |
6773 | 6735 | ||
6774 | /* fastpath tx rings: tx_buf tx_desc */ | 6736 | /* fastpath tx rings: tx_buf tx_desc */ |
6775 | BNX2X_FREE(bnx2x_fp(bp, i, tx_buf_ring)); | 6737 | BNX2X_FREE(bnx2x_fp(bp, i, tx_buf_ring)); |
@@ -6831,7 +6793,7 @@ static int bnx2x_alloc_mem(struct bnx2x *bp) | |||
6831 | sizeof(struct host_status_block)); | 6793 | sizeof(struct host_status_block)); |
6832 | } | 6794 | } |
6833 | /* Rx */ | 6795 | /* Rx */ |
6834 | for_each_rx_queue(bp, i) { | 6796 | for_each_queue(bp, i) { |
6835 | 6797 | ||
6836 | /* fastpath rx rings: rx_buf rx_desc rx_comp */ | 6798 | /* fastpath rx rings: rx_buf rx_desc rx_comp */ |
6837 | BNX2X_ALLOC(bnx2x_fp(bp, i, rx_buf_ring), | 6799 | BNX2X_ALLOC(bnx2x_fp(bp, i, rx_buf_ring), |
@@ -6853,7 +6815,7 @@ static int bnx2x_alloc_mem(struct bnx2x *bp) | |||
6853 | BCM_PAGE_SIZE * NUM_RX_SGE_PAGES); | 6815 | BCM_PAGE_SIZE * NUM_RX_SGE_PAGES); |
6854 | } | 6816 | } |
6855 | /* Tx */ | 6817 | /* Tx */ |
6856 | for_each_tx_queue(bp, i) { | 6818 | for_each_queue(bp, i) { |
6857 | 6819 | ||
6858 | /* fastpath tx rings: tx_buf tx_desc */ | 6820 | /* fastpath tx rings: tx_buf tx_desc */ |
6859 | BNX2X_ALLOC(bnx2x_fp(bp, i, tx_buf_ring), | 6821 | BNX2X_ALLOC(bnx2x_fp(bp, i, tx_buf_ring), |
@@ -6909,7 +6871,7 @@ static void bnx2x_free_tx_skbs(struct bnx2x *bp) | |||
6909 | { | 6871 | { |
6910 | int i; | 6872 | int i; |
6911 | 6873 | ||
6912 | for_each_tx_queue(bp, i) { | 6874 | for_each_queue(bp, i) { |
6913 | struct bnx2x_fastpath *fp = &bp->fp[i]; | 6875 | struct bnx2x_fastpath *fp = &bp->fp[i]; |
6914 | 6876 | ||
6915 | u16 bd_cons = fp->tx_bd_cons; | 6877 | u16 bd_cons = fp->tx_bd_cons; |
@@ -6927,7 +6889,7 @@ static void bnx2x_free_rx_skbs(struct bnx2x *bp) | |||
6927 | { | 6889 | { |
6928 | int i, j; | 6890 | int i, j; |
6929 | 6891 | ||
6930 | for_each_rx_queue(bp, j) { | 6892 | for_each_queue(bp, j) { |
6931 | struct bnx2x_fastpath *fp = &bp->fp[j]; | 6893 | struct bnx2x_fastpath *fp = &bp->fp[j]; |
6932 | 6894 | ||
6933 | for (i = 0; i < NUM_RX_BD; i++) { | 6895 | for (i = 0; i < NUM_RX_BD; i++) { |
@@ -7042,12 +7004,8 @@ static int bnx2x_req_msix_irqs(struct bnx2x *bp) | |||
7042 | #endif | 7004 | #endif |
7043 | for_each_queue(bp, i) { | 7005 | for_each_queue(bp, i) { |
7044 | struct bnx2x_fastpath *fp = &bp->fp[i]; | 7006 | struct bnx2x_fastpath *fp = &bp->fp[i]; |
7045 | 7007 | snprintf(fp->name, sizeof(fp->name), "%s-fp-%d", | |
7046 | if (i < bp->num_rx_queues) | 7008 | bp->dev->name, i); |
7047 | sprintf(fp->name, "%s-rx-%d", bp->dev->name, i); | ||
7048 | else | ||
7049 | sprintf(fp->name, "%s-tx-%d", | ||
7050 | bp->dev->name, i - bp->num_rx_queues); | ||
7051 | 7009 | ||
7052 | rc = request_irq(bp->msix_table[i + offset].vector, | 7010 | rc = request_irq(bp->msix_table[i + offset].vector, |
7053 | bnx2x_msix_fp_int, 0, fp->name, fp); | 7011 | bnx2x_msix_fp_int, 0, fp->name, fp); |
@@ -7106,7 +7064,7 @@ static void bnx2x_napi_enable(struct bnx2x *bp) | |||
7106 | { | 7064 | { |
7107 | int i; | 7065 | int i; |
7108 | 7066 | ||
7109 | for_each_rx_queue(bp, i) | 7067 | for_each_queue(bp, i) |
7110 | napi_enable(&bnx2x_fp(bp, i, napi)); | 7068 | napi_enable(&bnx2x_fp(bp, i, napi)); |
7111 | } | 7069 | } |
7112 | 7070 | ||
@@ -7114,7 +7072,7 @@ static void bnx2x_napi_disable(struct bnx2x *bp) | |||
7114 | { | 7072 | { |
7115 | int i; | 7073 | int i; |
7116 | 7074 | ||
7117 | for_each_rx_queue(bp, i) | 7075 | for_each_queue(bp, i) |
7118 | napi_disable(&bnx2x_fp(bp, i, napi)); | 7076 | napi_disable(&bnx2x_fp(bp, i, napi)); |
7119 | } | 7077 | } |
7120 | 7078 | ||
@@ -7410,88 +7368,60 @@ static int bnx2x_setup_multi(struct bnx2x *bp, int index) | |||
7410 | 7368 | ||
7411 | static int bnx2x_poll(struct napi_struct *napi, int budget); | 7369 | static int bnx2x_poll(struct napi_struct *napi, int budget); |
7412 | 7370 | ||
7413 | static void bnx2x_set_int_mode_msix(struct bnx2x *bp, int *num_rx_queues_out, | 7371 | static void bnx2x_set_num_queues_msix(struct bnx2x *bp) |
7414 | int *num_tx_queues_out) | ||
7415 | { | 7372 | { |
7416 | int _num_rx_queues = 0, _num_tx_queues = 0; | ||
7417 | 7373 | ||
7418 | switch (bp->multi_mode) { | 7374 | switch (bp->multi_mode) { |
7419 | case ETH_RSS_MODE_DISABLED: | 7375 | case ETH_RSS_MODE_DISABLED: |
7420 | _num_rx_queues = 1; | 7376 | bp->num_queues = 1; |
7421 | _num_tx_queues = 1; | ||
7422 | break; | 7377 | break; |
7423 | 7378 | ||
7424 | case ETH_RSS_MODE_REGULAR: | 7379 | case ETH_RSS_MODE_REGULAR: |
7425 | if (num_rx_queues) | 7380 | if (num_queues) |
7426 | _num_rx_queues = min_t(u32, num_rx_queues, | 7381 | bp->num_queues = min_t(u32, num_queues, |
7427 | BNX2X_MAX_QUEUES(bp)); | 7382 | BNX2X_MAX_QUEUES(bp)); |
7428 | else | ||
7429 | _num_rx_queues = min_t(u32, num_online_cpus(), | ||
7430 | BNX2X_MAX_QUEUES(bp)); | ||
7431 | |||
7432 | if (num_tx_queues) | ||
7433 | _num_tx_queues = min_t(u32, num_tx_queues, | ||
7434 | BNX2X_MAX_QUEUES(bp)); | ||
7435 | else | 7383 | else |
7436 | _num_tx_queues = min_t(u32, num_online_cpus(), | 7384 | bp->num_queues = min_t(u32, num_online_cpus(), |
7437 | BNX2X_MAX_QUEUES(bp)); | 7385 | BNX2X_MAX_QUEUES(bp)); |
7438 | |||
7439 | /* There must be not more Tx queues than Rx queues */ | ||
7440 | if (_num_tx_queues > _num_rx_queues) { | ||
7441 | BNX2X_ERR("number of tx queues (%d) > " | ||
7442 | "number of rx queues (%d)" | ||
7443 | " defaulting to %d\n", | ||
7444 | _num_tx_queues, _num_rx_queues, | ||
7445 | _num_rx_queues); | ||
7446 | _num_tx_queues = _num_rx_queues; | ||
7447 | } | ||
7448 | break; | 7386 | break; |
7449 | 7387 | ||
7450 | 7388 | ||
7451 | default: | 7389 | default: |
7452 | _num_rx_queues = 1; | 7390 | bp->num_queues = 1; |
7453 | _num_tx_queues = 1; | ||
7454 | break; | 7391 | break; |
7455 | } | 7392 | } |
7456 | |||
7457 | *num_rx_queues_out = _num_rx_queues; | ||
7458 | *num_tx_queues_out = _num_tx_queues; | ||
7459 | } | 7393 | } |
7460 | 7394 | ||
7461 | static int bnx2x_set_int_mode(struct bnx2x *bp) | 7395 | static int bnx2x_set_num_queues(struct bnx2x *bp) |
7462 | { | 7396 | { |
7463 | int rc = 0; | 7397 | int rc = 0; |
7464 | 7398 | ||
7465 | switch (int_mode) { | 7399 | switch (int_mode) { |
7466 | case INT_MODE_INTx: | 7400 | case INT_MODE_INTx: |
7467 | case INT_MODE_MSI: | 7401 | case INT_MODE_MSI: |
7468 | bp->num_rx_queues = 1; | 7402 | bp->num_queues = 1; |
7469 | bp->num_tx_queues = 1; | ||
7470 | DP(NETIF_MSG_IFUP, "set number of queues to 1\n"); | 7403 | DP(NETIF_MSG_IFUP, "set number of queues to 1\n"); |
7471 | break; | 7404 | break; |
7472 | 7405 | ||
7473 | case INT_MODE_MSIX: | 7406 | case INT_MODE_MSIX: |
7474 | default: | 7407 | default: |
7475 | /* Set interrupt mode according to bp->multi_mode value */ | 7408 | /* Set number of queues according to bp->multi_mode value */ |
7476 | bnx2x_set_int_mode_msix(bp, &bp->num_rx_queues, | 7409 | bnx2x_set_num_queues_msix(bp); |
7477 | &bp->num_tx_queues); | ||
7478 | 7410 | ||
7479 | DP(NETIF_MSG_IFUP, "set number of queues to: rx %d tx %d\n", | 7411 | DP(NETIF_MSG_IFUP, "set number of queues to %d\n", |
7480 | bp->num_rx_queues, bp->num_tx_queues); | 7412 | bp->num_queues); |
7481 | 7413 | ||
7482 | /* if we can't use MSI-X we only need one fp, | 7414 | /* if we can't use MSI-X we only need one fp, |
7483 | * so try to enable MSI-X with the requested number of fp's | 7415 | * so try to enable MSI-X with the requested number of fp's |
7484 | * and fallback to MSI or legacy INTx with one fp | 7416 | * and fallback to MSI or legacy INTx with one fp |
7485 | */ | 7417 | */ |
7486 | rc = bnx2x_enable_msix(bp); | 7418 | rc = bnx2x_enable_msix(bp); |
7487 | if (rc) { | 7419 | if (rc) |
7488 | /* failed to enable MSI-X */ | 7420 | /* failed to enable MSI-X */ |
7489 | bp->num_rx_queues = 1; | 7421 | bp->num_queues = 1; |
7490 | bp->num_tx_queues = 1; | ||
7491 | } | ||
7492 | break; | 7422 | break; |
7493 | } | 7423 | } |
7494 | bp->dev->real_num_tx_queues = bp->num_tx_queues; | 7424 | bp->dev->real_num_tx_queues = bp->num_queues; |
7495 | return rc; | 7425 | return rc; |
7496 | } | 7426 | } |
7497 | 7427 | ||
@@ -7513,16 +7443,16 @@ static int bnx2x_nic_load(struct bnx2x *bp, int load_mode) | |||
7513 | 7443 | ||
7514 | bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD; | 7444 | bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD; |
7515 | 7445 | ||
7516 | rc = bnx2x_set_int_mode(bp); | 7446 | rc = bnx2x_set_num_queues(bp); |
7517 | 7447 | ||
7518 | if (bnx2x_alloc_mem(bp)) | 7448 | if (bnx2x_alloc_mem(bp)) |
7519 | return -ENOMEM; | 7449 | return -ENOMEM; |
7520 | 7450 | ||
7521 | for_each_rx_queue(bp, i) | 7451 | for_each_queue(bp, i) |
7522 | bnx2x_fp(bp, i, disable_tpa) = | 7452 | bnx2x_fp(bp, i, disable_tpa) = |
7523 | ((bp->flags & TPA_ENABLE_FLAG) == 0); | 7453 | ((bp->flags & TPA_ENABLE_FLAG) == 0); |
7524 | 7454 | ||
7525 | for_each_rx_queue(bp, i) | 7455 | for_each_queue(bp, i) |
7526 | netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi), | 7456 | netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi), |
7527 | bnx2x_poll, 128); | 7457 | bnx2x_poll, 128); |
7528 | 7458 | ||
@@ -7536,7 +7466,7 @@ static int bnx2x_nic_load(struct bnx2x *bp, int load_mode) | |||
7536 | } | 7466 | } |
7537 | } else { | 7467 | } else { |
7538 | /* Fall to INTx if failed to enable MSI-X due to lack of | 7468 | /* Fall to INTx if failed to enable MSI-X due to lack of |
7539 | memory (in bnx2x_set_int_mode()) */ | 7469 | memory (in bnx2x_set_num_queues()) */ |
7540 | if ((rc != -ENOMEM) && (int_mode != INT_MODE_INTx)) | 7470 | if ((rc != -ENOMEM) && (int_mode != INT_MODE_INTx)) |
7541 | bnx2x_enable_msi(bp); | 7471 | bnx2x_enable_msi(bp); |
7542 | bnx2x_ack_int(bp); | 7472 | bnx2x_ack_int(bp); |
@@ -7730,14 +7660,14 @@ load_error3: | |||
7730 | bp->port.pmf = 0; | 7660 | bp->port.pmf = 0; |
7731 | /* Free SKBs, SGEs, TPA pool and driver internals */ | 7661 | /* Free SKBs, SGEs, TPA pool and driver internals */ |
7732 | bnx2x_free_skbs(bp); | 7662 | bnx2x_free_skbs(bp); |
7733 | for_each_rx_queue(bp, i) | 7663 | for_each_queue(bp, i) |
7734 | bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE); | 7664 | bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE); |
7735 | load_error2: | 7665 | load_error2: |
7736 | /* Release IRQs */ | 7666 | /* Release IRQs */ |
7737 | bnx2x_free_irq(bp); | 7667 | bnx2x_free_irq(bp); |
7738 | load_error1: | 7668 | load_error1: |
7739 | bnx2x_napi_disable(bp); | 7669 | bnx2x_napi_disable(bp); |
7740 | for_each_rx_queue(bp, i) | 7670 | for_each_queue(bp, i) |
7741 | netif_napi_del(&bnx2x_fp(bp, i, napi)); | 7671 | netif_napi_del(&bnx2x_fp(bp, i, napi)); |
7742 | bnx2x_free_mem(bp); | 7672 | bnx2x_free_mem(bp); |
7743 | 7673 | ||
@@ -7928,7 +7858,7 @@ static int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode) | |||
7928 | bnx2x_free_irq(bp); | 7858 | bnx2x_free_irq(bp); |
7929 | 7859 | ||
7930 | /* Wait until tx fastpath tasks complete */ | 7860 | /* Wait until tx fastpath tasks complete */ |
7931 | for_each_tx_queue(bp, i) { | 7861 | for_each_queue(bp, i) { |
7932 | struct bnx2x_fastpath *fp = &bp->fp[i]; | 7862 | struct bnx2x_fastpath *fp = &bp->fp[i]; |
7933 | 7863 | ||
7934 | cnt = 1000; | 7864 | cnt = 1000; |
@@ -8071,9 +8001,9 @@ unload_error: | |||
8071 | 8001 | ||
8072 | /* Free SKBs, SGEs, TPA pool and driver internals */ | 8002 | /* Free SKBs, SGEs, TPA pool and driver internals */ |
8073 | bnx2x_free_skbs(bp); | 8003 | bnx2x_free_skbs(bp); |
8074 | for_each_rx_queue(bp, i) | 8004 | for_each_queue(bp, i) |
8075 | bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE); | 8005 | bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE); |
8076 | for_each_rx_queue(bp, i) | 8006 | for_each_queue(bp, i) |
8077 | netif_napi_del(&bnx2x_fp(bp, i, napi)); | 8007 | netif_napi_del(&bnx2x_fp(bp, i, napi)); |
8078 | bnx2x_free_mem(bp); | 8008 | bnx2x_free_mem(bp); |
8079 | 8009 | ||
@@ -10269,7 +10199,7 @@ static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode, u8 link_up) | |||
10269 | struct sk_buff *skb; | 10199 | struct sk_buff *skb; |
10270 | unsigned char *packet; | 10200 | unsigned char *packet; |
10271 | struct bnx2x_fastpath *fp_rx = &bp->fp[0]; | 10201 | struct bnx2x_fastpath *fp_rx = &bp->fp[0]; |
10272 | struct bnx2x_fastpath *fp_tx = &bp->fp[bp->num_rx_queues]; | 10202 | struct bnx2x_fastpath *fp_tx = &bp->fp[0]; |
10273 | u16 tx_start_idx, tx_idx; | 10203 | u16 tx_start_idx, tx_idx; |
10274 | u16 rx_start_idx, rx_idx; | 10204 | u16 rx_start_idx, rx_idx; |
10275 | u16 pkt_prod, bd_prod; | 10205 | u16 pkt_prod, bd_prod; |
@@ -10346,7 +10276,7 @@ static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode, u8 link_up) | |||
10346 | 10276 | ||
10347 | fp_tx->tx_db.data.prod += 2; | 10277 | fp_tx->tx_db.data.prod += 2; |
10348 | barrier(); | 10278 | barrier(); |
10349 | DOORBELL(bp, fp_tx->index - bp->num_rx_queues, fp_tx->tx_db.raw); | 10279 | DOORBELL(bp, fp_tx->index, fp_tx->tx_db.raw); |
10350 | 10280 | ||
10351 | mmiowb(); | 10281 | mmiowb(); |
10352 | 10282 | ||
@@ -10725,7 +10655,7 @@ static int bnx2x_get_sset_count(struct net_device *dev, int stringset) | |||
10725 | switch(stringset) { | 10655 | switch(stringset) { |
10726 | case ETH_SS_STATS: | 10656 | case ETH_SS_STATS: |
10727 | if (is_multi(bp)) { | 10657 | if (is_multi(bp)) { |
10728 | num_stats = BNX2X_NUM_Q_STATS * bp->num_rx_queues; | 10658 | num_stats = BNX2X_NUM_Q_STATS * bp->num_queues; |
10729 | if (!IS_E1HMF_MODE_STAT(bp)) | 10659 | if (!IS_E1HMF_MODE_STAT(bp)) |
10730 | num_stats += BNX2X_NUM_STATS; | 10660 | num_stats += BNX2X_NUM_STATS; |
10731 | } else { | 10661 | } else { |
@@ -10756,7 +10686,7 @@ static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf) | |||
10756 | case ETH_SS_STATS: | 10686 | case ETH_SS_STATS: |
10757 | if (is_multi(bp)) { | 10687 | if (is_multi(bp)) { |
10758 | k = 0; | 10688 | k = 0; |
10759 | for_each_rx_queue(bp, i) { | 10689 | for_each_queue(bp, i) { |
10760 | for (j = 0; j < BNX2X_NUM_Q_STATS; j++) | 10690 | for (j = 0; j < BNX2X_NUM_Q_STATS; j++) |
10761 | sprintf(buf + (k + j)*ETH_GSTRING_LEN, | 10691 | sprintf(buf + (k + j)*ETH_GSTRING_LEN, |
10762 | bnx2x_q_stats_arr[j].string, i); | 10692 | bnx2x_q_stats_arr[j].string, i); |
@@ -10793,7 +10723,7 @@ static void bnx2x_get_ethtool_stats(struct net_device *dev, | |||
10793 | 10723 | ||
10794 | if (is_multi(bp)) { | 10724 | if (is_multi(bp)) { |
10795 | k = 0; | 10725 | k = 0; |
10796 | for_each_rx_queue(bp, i) { | 10726 | for_each_queue(bp, i) { |
10797 | hw_stats = (u32 *)&bp->fp[i].eth_q_stats; | 10727 | hw_stats = (u32 *)&bp->fp[i].eth_q_stats; |
10798 | for (j = 0; j < BNX2X_NUM_Q_STATS; j++) { | 10728 | for (j = 0; j < BNX2X_NUM_Q_STATS; j++) { |
10799 | if (bnx2x_q_stats_arr[j].size == 0) { | 10729 | if (bnx2x_q_stats_arr[j].size == 0) { |
@@ -10989,54 +10919,60 @@ static inline int bnx2x_has_rx_work(struct bnx2x_fastpath *fp) | |||
10989 | 10919 | ||
10990 | static int bnx2x_poll(struct napi_struct *napi, int budget) | 10920 | static int bnx2x_poll(struct napi_struct *napi, int budget) |
10991 | { | 10921 | { |
10922 | int work_done = 0; | ||
10992 | struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath, | 10923 | struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath, |
10993 | napi); | 10924 | napi); |
10994 | struct bnx2x *bp = fp->bp; | 10925 | struct bnx2x *bp = fp->bp; |
10995 | int work_done = 0; | ||
10996 | 10926 | ||
10927 | while (1) { | ||
10997 | #ifdef BNX2X_STOP_ON_ERROR | 10928 | #ifdef BNX2X_STOP_ON_ERROR |
10998 | if (unlikely(bp->panic)) | 10929 | if (unlikely(bp->panic)) { |
10999 | goto poll_panic; | 10930 | napi_complete(napi); |
10931 | return 0; | ||
10932 | } | ||
11000 | #endif | 10933 | #endif |
11001 | 10934 | ||
11002 | prefetch(fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)].skb); | 10935 | if (bnx2x_has_tx_work(fp)) |
11003 | prefetch((char *)(fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)].skb) + 256); | 10936 | bnx2x_tx_int(fp); |
11004 | |||
11005 | bnx2x_update_fpsb_idx(fp); | ||
11006 | |||
11007 | if (bnx2x_has_rx_work(fp)) { | ||
11008 | work_done = bnx2x_rx_int(fp, budget); | ||
11009 | 10937 | ||
11010 | /* must not complete if we consumed full budget */ | 10938 | if (bnx2x_has_rx_work(fp)) { |
11011 | if (work_done >= budget) | 10939 | work_done += bnx2x_rx_int(fp, budget - work_done); |
11012 | goto poll_again; | ||
11013 | } | ||
11014 | 10940 | ||
11015 | /* bnx2x_has_rx_work() reads the status block, thus we need to | 10941 | /* must not complete if we consumed full budget */ |
11016 | * ensure that status block indices have been actually read | 10942 | if (work_done >= budget) |
11017 | * (bnx2x_update_fpsb_idx) prior to this check (bnx2x_has_rx_work) | 10943 | break; |
11018 | * so that we won't write the "newer" value of the status block to IGU | 10944 | } |
11019 | * (if there was a DMA right after bnx2x_has_rx_work and | ||
11020 | * if there is no rmb, the memory reading (bnx2x_update_fpsb_idx) | ||
11021 | * may be postponed to right before bnx2x_ack_sb). In this case | ||
11022 | * there will never be another interrupt until there is another update | ||
11023 | * of the status block, while there is still unhandled work. | ||
11024 | */ | ||
11025 | rmb(); | ||
11026 | 10945 | ||
11027 | if (!bnx2x_has_rx_work(fp)) { | 10946 | /* Fall out from the NAPI loop if needed */ |
11028 | #ifdef BNX2X_STOP_ON_ERROR | 10947 | if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) { |
11029 | poll_panic: | 10948 | bnx2x_update_fpsb_idx(fp); |
11030 | #endif | 10949 | /* bnx2x_has_rx_work() reads the status block, thus we need |
11031 | napi_complete(napi); | 10950 | * to ensure that status block indices have been actually read |
10951 | * (bnx2x_update_fpsb_idx) prior to this check | ||
10952 | * (bnx2x_has_rx_work) so that we won't write the "newer" | ||
10953 | * value of the status block to IGU (if there was a DMA right | ||
10954 | * after bnx2x_has_rx_work and if there is no rmb, the memory | ||
10955 | * reading (bnx2x_update_fpsb_idx) may be postponed to right | ||
10956 | * before bnx2x_ack_sb). In this case there will never be | ||
10957 | * another interrupt until there is another update of the | ||
10958 | * status block, while there is still unhandled work. | ||
10959 | */ | ||
10960 | rmb(); | ||
11032 | 10961 | ||
11033 | bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID, | 10962 | if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) { |
11034 | le16_to_cpu(fp->fp_u_idx), IGU_INT_NOP, 1); | 10963 | napi_complete(napi); |
11035 | bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID, | 10964 | /* Re-enable interrupts */ |
11036 | le16_to_cpu(fp->fp_c_idx), IGU_INT_ENABLE, 1); | 10965 | bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID, |
10966 | le16_to_cpu(fp->fp_c_idx), | ||
10967 | IGU_INT_NOP, 1); | ||
10968 | bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID, | ||
10969 | le16_to_cpu(fp->fp_u_idx), | ||
10970 | IGU_INT_ENABLE, 1); | ||
10971 | break; | ||
10972 | } | ||
10973 | } | ||
11037 | } | 10974 | } |
11038 | 10975 | ||
11039 | poll_again: | ||
11040 | return work_done; | 10976 | return work_done; |
11041 | } | 10977 | } |
11042 | 10978 | ||
@@ -11221,7 +11157,7 @@ exit_lbl: | |||
11221 | static netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev) | 11157 | static netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev) |
11222 | { | 11158 | { |
11223 | struct bnx2x *bp = netdev_priv(dev); | 11159 | struct bnx2x *bp = netdev_priv(dev); |
11224 | struct bnx2x_fastpath *fp, *fp_stat; | 11160 | struct bnx2x_fastpath *fp; |
11225 | struct netdev_queue *txq; | 11161 | struct netdev_queue *txq; |
11226 | struct sw_tx_bd *tx_buf; | 11162 | struct sw_tx_bd *tx_buf; |
11227 | struct eth_tx_start_bd *tx_start_bd; | 11163 | struct eth_tx_start_bd *tx_start_bd; |
@@ -11243,11 +11179,10 @@ static netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
11243 | fp_index = skb_get_queue_mapping(skb); | 11179 | fp_index = skb_get_queue_mapping(skb); |
11244 | txq = netdev_get_tx_queue(dev, fp_index); | 11180 | txq = netdev_get_tx_queue(dev, fp_index); |
11245 | 11181 | ||
11246 | fp = &bp->fp[fp_index + bp->num_rx_queues]; | 11182 | fp = &bp->fp[fp_index]; |
11247 | fp_stat = &bp->fp[fp_index]; | ||
11248 | 11183 | ||
11249 | if (unlikely(bnx2x_tx_avail(fp) < (skb_shinfo(skb)->nr_frags + 3))) { | 11184 | if (unlikely(bnx2x_tx_avail(fp) < (skb_shinfo(skb)->nr_frags + 3))) { |
11250 | fp_stat->eth_q_stats.driver_xoff++; | 11185 | fp->eth_q_stats.driver_xoff++; |
11251 | netif_tx_stop_queue(txq); | 11186 | netif_tx_stop_queue(txq); |
11252 | BNX2X_ERR("BUG! Tx ring full when queue awake!\n"); | 11187 | BNX2X_ERR("BUG! Tx ring full when queue awake!\n"); |
11253 | return NETDEV_TX_BUSY; | 11188 | return NETDEV_TX_BUSY; |
@@ -11473,7 +11408,7 @@ static netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
11473 | 11408 | ||
11474 | fp->tx_db.data.prod += nbd; | 11409 | fp->tx_db.data.prod += nbd; |
11475 | barrier(); | 11410 | barrier(); |
11476 | DOORBELL(bp, fp->index - bp->num_rx_queues, fp->tx_db.raw); | 11411 | DOORBELL(bp, fp->index, fp->tx_db.raw); |
11477 | 11412 | ||
11478 | mmiowb(); | 11413 | mmiowb(); |
11479 | 11414 | ||
@@ -11484,11 +11419,11 @@ static netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
11484 | /* We want bnx2x_tx_int to "see" the updated tx_bd_prod | 11419 | /* We want bnx2x_tx_int to "see" the updated tx_bd_prod |
11485 | if we put Tx into XOFF state. */ | 11420 | if we put Tx into XOFF state. */ |
11486 | smp_mb(); | 11421 | smp_mb(); |
11487 | fp_stat->eth_q_stats.driver_xoff++; | 11422 | fp->eth_q_stats.driver_xoff++; |
11488 | if (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3) | 11423 | if (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3) |
11489 | netif_tx_wake_queue(txq); | 11424 | netif_tx_wake_queue(txq); |
11490 | } | 11425 | } |
11491 | fp_stat->tx_pkt++; | 11426 | fp->tx_pkt++; |
11492 | 11427 | ||
11493 | return NETDEV_TX_OK; | 11428 | return NETDEV_TX_OK; |
11494 | } | 11429 | } |
@@ -12376,9 +12311,9 @@ static int bnx2x_eeh_nic_unload(struct bnx2x *bp) | |||
12376 | 12311 | ||
12377 | /* Free SKBs, SGEs, TPA pool and driver internals */ | 12312 | /* Free SKBs, SGEs, TPA pool and driver internals */ |
12378 | bnx2x_free_skbs(bp); | 12313 | bnx2x_free_skbs(bp); |
12379 | for_each_rx_queue(bp, i) | 12314 | for_each_queue(bp, i) |
12380 | bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE); | 12315 | bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE); |
12381 | for_each_rx_queue(bp, i) | 12316 | for_each_queue(bp, i) |
12382 | netif_napi_del(&bnx2x_fp(bp, i, napi)); | 12317 | netif_napi_del(&bnx2x_fp(bp, i, napi)); |
12383 | bnx2x_free_mem(bp); | 12318 | bnx2x_free_mem(bp); |
12384 | 12319 | ||