diff options
author | Eilon Greenstein <eilong@broadcom.com> | 2009-08-13 01:53:28 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2009-08-13 01:53:28 -0400 |
commit | ca00392cb8f5227c67ff52c656d91a764d022ab9 (patch) | |
tree | 007d82074e49d25d1ee6bfb484392032d463be91 /drivers/net/bnx2x_main.c | |
parent | 6200f09036ee6f12822a9133dba7ed011b179c69 (diff) |
bnx2x: Using the new FW
The new FW improves the packets per second rate. It required a lot of change in
the FW which implies many changes in the driver to support it. It is now also
possible for the driver to use a separate MSI-X vector for Rx and Tx - this also
add some to the complicity of this change.
All things said - after this patch, practically all performance matrixes show
improvement.
Though Vladislav Zolotarov is not signed on this patch, he did most of the job
and deserves credit for that.
Signed-off-by: Eilon Greenstein <eilong@broadcom.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/bnx2x_main.c')
-rw-r--r-- | drivers/net/bnx2x_main.c | 888 |
1 files changed, 481 insertions, 407 deletions
diff --git a/drivers/net/bnx2x_main.c b/drivers/net/bnx2x_main.c index 665ed36a0d48..762f37a7d038 100644 --- a/drivers/net/bnx2x_main.c +++ b/drivers/net/bnx2x_main.c | |||
@@ -10,7 +10,7 @@ | |||
10 | * Written by: Eliezer Tamir | 10 | * Written by: Eliezer Tamir |
11 | * Based on code from Michael Chan's bnx2 driver | 11 | * Based on code from Michael Chan's bnx2 driver |
12 | * UDP CSUM errata workaround by Arik Gendelman | 12 | * UDP CSUM errata workaround by Arik Gendelman |
13 | * Slowpath rework by Vladislav Zolotarov | 13 | * Slowpath and fastpath rework by Vladislav Zolotarov |
14 | * Statistics and Link management by Yitchak Gertner | 14 | * Statistics and Link management by Yitchak Gertner |
15 | * | 15 | * |
16 | */ | 16 | */ |
@@ -80,7 +80,18 @@ MODULE_VERSION(DRV_MODULE_VERSION); | |||
80 | 80 | ||
81 | static int multi_mode = 1; | 81 | static int multi_mode = 1; |
82 | module_param(multi_mode, int, 0); | 82 | module_param(multi_mode, int, 0); |
83 | MODULE_PARM_DESC(multi_mode, " Use per-CPU queues"); | 83 | MODULE_PARM_DESC(multi_mode, " Multi queue mode " |
84 | "(0 Disable; 1 Enable (default))"); | ||
85 | |||
86 | static int num_rx_queues; | ||
87 | module_param(num_rx_queues, int, 0); | ||
88 | MODULE_PARM_DESC(num_rx_queues, " Number of Rx queues for multi_mode=1" | ||
89 | " (default is half number of CPUs)"); | ||
90 | |||
91 | static int num_tx_queues; | ||
92 | module_param(num_tx_queues, int, 0); | ||
93 | MODULE_PARM_DESC(num_tx_queues, " Number of Tx queues for multi_mode=1" | ||
94 | " (default is half number of CPUs)"); | ||
84 | 95 | ||
85 | static int disable_tpa; | 96 | static int disable_tpa; |
86 | module_param(disable_tpa, int, 0); | 97 | module_param(disable_tpa, int, 0); |
@@ -542,16 +553,15 @@ static void bnx2x_panic_dump(struct bnx2x *bp) | |||
542 | /* Tx */ | 553 | /* Tx */ |
543 | for_each_tx_queue(bp, i) { | 554 | for_each_tx_queue(bp, i) { |
544 | struct bnx2x_fastpath *fp = &bp->fp[i]; | 555 | struct bnx2x_fastpath *fp = &bp->fp[i]; |
545 | struct eth_tx_db_data *hw_prods = fp->hw_tx_prods; | ||
546 | 556 | ||
547 | BNX2X_ERR("fp%d: tx_pkt_prod(%x) tx_pkt_cons(%x)" | 557 | BNX2X_ERR("fp%d: tx_pkt_prod(%x) tx_pkt_cons(%x)" |
548 | " tx_bd_prod(%x) tx_bd_cons(%x) *tx_cons_sb(%x)\n", | 558 | " tx_bd_prod(%x) tx_bd_cons(%x) *tx_cons_sb(%x)\n", |
549 | i, fp->tx_pkt_prod, fp->tx_pkt_cons, fp->tx_bd_prod, | 559 | i, fp->tx_pkt_prod, fp->tx_pkt_cons, fp->tx_bd_prod, |
550 | fp->tx_bd_cons, le16_to_cpu(*fp->tx_cons_sb)); | 560 | fp->tx_bd_cons, le16_to_cpu(*fp->tx_cons_sb)); |
551 | BNX2X_ERR(" fp_c_idx(%x) *sb_c_idx(%x)" | 561 | BNX2X_ERR(" fp_c_idx(%x) *sb_c_idx(%x)" |
552 | " bd data(%x,%x)\n", le16_to_cpu(fp->fp_c_idx), | 562 | " tx_db_prod(%x)\n", le16_to_cpu(fp->fp_c_idx), |
553 | fp->status_blk->c_status_block.status_block_index, | 563 | fp->status_blk->c_status_block.status_block_index, |
554 | hw_prods->packets_prod, hw_prods->bds_prod); | 564 | fp->tx_db.data.prod); |
555 | } | 565 | } |
556 | 566 | ||
557 | /* Rings */ | 567 | /* Rings */ |
@@ -790,16 +800,6 @@ static u16 bnx2x_ack_int(struct bnx2x *bp) | |||
790 | * fast path service functions | 800 | * fast path service functions |
791 | */ | 801 | */ |
792 | 802 | ||
793 | static inline int bnx2x_has_tx_work(struct bnx2x_fastpath *fp) | ||
794 | { | ||
795 | u16 tx_cons_sb; | ||
796 | |||
797 | /* Tell compiler that status block fields can change */ | ||
798 | barrier(); | ||
799 | tx_cons_sb = le16_to_cpu(*fp->tx_cons_sb); | ||
800 | return (fp->tx_pkt_cons != tx_cons_sb); | ||
801 | } | ||
802 | |||
803 | static inline int bnx2x_has_tx_work_unload(struct bnx2x_fastpath *fp) | 803 | static inline int bnx2x_has_tx_work_unload(struct bnx2x_fastpath *fp) |
804 | { | 804 | { |
805 | /* Tell compiler that consumer and producer can change */ | 805 | /* Tell compiler that consumer and producer can change */ |
@@ -814,7 +814,8 @@ static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fastpath *fp, | |||
814 | u16 idx) | 814 | u16 idx) |
815 | { | 815 | { |
816 | struct sw_tx_bd *tx_buf = &fp->tx_buf_ring[idx]; | 816 | struct sw_tx_bd *tx_buf = &fp->tx_buf_ring[idx]; |
817 | struct eth_tx_bd *tx_bd; | 817 | struct eth_tx_start_bd *tx_start_bd; |
818 | struct eth_tx_bd *tx_data_bd; | ||
818 | struct sk_buff *skb = tx_buf->skb; | 819 | struct sk_buff *skb = tx_buf->skb; |
819 | u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons; | 820 | u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons; |
820 | int nbd; | 821 | int nbd; |
@@ -824,51 +825,46 @@ static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fastpath *fp, | |||
824 | 825 | ||
825 | /* unmap first bd */ | 826 | /* unmap first bd */ |
826 | DP(BNX2X_MSG_OFF, "free bd_idx %d\n", bd_idx); | 827 | DP(BNX2X_MSG_OFF, "free bd_idx %d\n", bd_idx); |
827 | tx_bd = &fp->tx_desc_ring[bd_idx]; | 828 | tx_start_bd = &fp->tx_desc_ring[bd_idx].start_bd; |
828 | pci_unmap_single(bp->pdev, BD_UNMAP_ADDR(tx_bd), | 829 | pci_unmap_single(bp->pdev, BD_UNMAP_ADDR(tx_start_bd), |
829 | BD_UNMAP_LEN(tx_bd), PCI_DMA_TODEVICE); | 830 | BD_UNMAP_LEN(tx_start_bd), PCI_DMA_TODEVICE); |
830 | 831 | ||
831 | nbd = le16_to_cpu(tx_bd->nbd) - 1; | 832 | nbd = le16_to_cpu(tx_start_bd->nbd) - 1; |
832 | new_cons = nbd + tx_buf->first_bd; | ||
833 | #ifdef BNX2X_STOP_ON_ERROR | 833 | #ifdef BNX2X_STOP_ON_ERROR |
834 | if (nbd > (MAX_SKB_FRAGS + 2)) { | 834 | if ((nbd - 1) > (MAX_SKB_FRAGS + 2)) { |
835 | BNX2X_ERR("BAD nbd!\n"); | 835 | BNX2X_ERR("BAD nbd!\n"); |
836 | bnx2x_panic(); | 836 | bnx2x_panic(); |
837 | } | 837 | } |
838 | #endif | 838 | #endif |
839 | new_cons = nbd + tx_buf->first_bd; | ||
839 | 840 | ||
840 | /* Skip a parse bd and the TSO split header bd | 841 | /* Get the next bd */ |
841 | since they have no mapping */ | 842 | bd_idx = TX_BD(NEXT_TX_IDX(bd_idx)); |
842 | if (nbd) | ||
843 | bd_idx = TX_BD(NEXT_TX_IDX(bd_idx)); | ||
844 | 843 | ||
845 | if (tx_bd->bd_flags.as_bitfield & (ETH_TX_BD_FLAGS_IP_CSUM | | 844 | /* Skip a parse bd... */ |
846 | ETH_TX_BD_FLAGS_TCP_CSUM | | 845 | --nbd; |
847 | ETH_TX_BD_FLAGS_SW_LSO)) { | 846 | bd_idx = TX_BD(NEXT_TX_IDX(bd_idx)); |
848 | if (--nbd) | 847 | |
849 | bd_idx = TX_BD(NEXT_TX_IDX(bd_idx)); | 848 | /* ...and the TSO split header bd since they have no mapping */ |
850 | tx_bd = &fp->tx_desc_ring[bd_idx]; | 849 | if (tx_buf->flags & BNX2X_TSO_SPLIT_BD) { |
851 | /* is this a TSO split header bd? */ | 850 | --nbd; |
852 | if (tx_bd->bd_flags.as_bitfield & ETH_TX_BD_FLAGS_SW_LSO) { | 851 | bd_idx = TX_BD(NEXT_TX_IDX(bd_idx)); |
853 | if (--nbd) | ||
854 | bd_idx = TX_BD(NEXT_TX_IDX(bd_idx)); | ||
855 | } | ||
856 | } | 852 | } |
857 | 853 | ||
858 | /* now free frags */ | 854 | /* now free frags */ |
859 | while (nbd > 0) { | 855 | while (nbd > 0) { |
860 | 856 | ||
861 | DP(BNX2X_MSG_OFF, "free frag bd_idx %d\n", bd_idx); | 857 | DP(BNX2X_MSG_OFF, "free frag bd_idx %d\n", bd_idx); |
862 | tx_bd = &fp->tx_desc_ring[bd_idx]; | 858 | tx_data_bd = &fp->tx_desc_ring[bd_idx].reg_bd; |
863 | pci_unmap_page(bp->pdev, BD_UNMAP_ADDR(tx_bd), | 859 | pci_unmap_page(bp->pdev, BD_UNMAP_ADDR(tx_data_bd), |
864 | BD_UNMAP_LEN(tx_bd), PCI_DMA_TODEVICE); | 860 | BD_UNMAP_LEN(tx_data_bd), PCI_DMA_TODEVICE); |
865 | if (--nbd) | 861 | if (--nbd) |
866 | bd_idx = TX_BD(NEXT_TX_IDX(bd_idx)); | 862 | bd_idx = TX_BD(NEXT_TX_IDX(bd_idx)); |
867 | } | 863 | } |
868 | 864 | ||
869 | /* release skb */ | 865 | /* release skb */ |
870 | WARN_ON(!skb); | 866 | WARN_ON(!skb); |
871 | dev_kfree_skb(skb); | 867 | dev_kfree_skb_any(skb); |
872 | tx_buf->first_bd = 0; | 868 | tx_buf->first_bd = 0; |
873 | tx_buf->skb = NULL; | 869 | tx_buf->skb = NULL; |
874 | 870 | ||
@@ -910,7 +906,7 @@ static void bnx2x_tx_int(struct bnx2x_fastpath *fp) | |||
910 | return; | 906 | return; |
911 | #endif | 907 | #endif |
912 | 908 | ||
913 | txq = netdev_get_tx_queue(bp->dev, fp->index); | 909 | txq = netdev_get_tx_queue(bp->dev, fp->index - bp->num_rx_queues); |
914 | hw_cons = le16_to_cpu(*fp->tx_cons_sb); | 910 | hw_cons = le16_to_cpu(*fp->tx_cons_sb); |
915 | sw_cons = fp->tx_pkt_cons; | 911 | sw_cons = fp->tx_pkt_cons; |
916 | 912 | ||
@@ -940,8 +936,6 @@ static void bnx2x_tx_int(struct bnx2x_fastpath *fp) | |||
940 | /* TBD need a thresh? */ | 936 | /* TBD need a thresh? */ |
941 | if (unlikely(netif_tx_queue_stopped(txq))) { | 937 | if (unlikely(netif_tx_queue_stopped(txq))) { |
942 | 938 | ||
943 | __netif_tx_lock(txq, smp_processor_id()); | ||
944 | |||
945 | /* Need to make the tx_bd_cons update visible to start_xmit() | 939 | /* Need to make the tx_bd_cons update visible to start_xmit() |
946 | * before checking for netif_tx_queue_stopped(). Without the | 940 | * before checking for netif_tx_queue_stopped(). Without the |
947 | * memory barrier, there is a small possibility that | 941 | * memory barrier, there is a small possibility that |
@@ -954,8 +948,6 @@ static void bnx2x_tx_int(struct bnx2x_fastpath *fp) | |||
954 | (bp->state == BNX2X_STATE_OPEN) && | 948 | (bp->state == BNX2X_STATE_OPEN) && |
955 | (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3)) | 949 | (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3)) |
956 | netif_tx_wake_queue(txq); | 950 | netif_tx_wake_queue(txq); |
957 | |||
958 | __netif_tx_unlock(txq); | ||
959 | } | 951 | } |
960 | } | 952 | } |
961 | 953 | ||
@@ -1023,6 +1015,7 @@ static void bnx2x_sp_event(struct bnx2x_fastpath *fp, | |||
1023 | break; | 1015 | break; |
1024 | 1016 | ||
1025 | case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_CLOSING_WAIT4_HALT): | 1017 | case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_CLOSING_WAIT4_HALT): |
1018 | case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_DISABLED): | ||
1026 | DP(NETIF_MSG_IFDOWN, "got (un)set mac ramrod\n"); | 1019 | DP(NETIF_MSG_IFDOWN, "got (un)set mac ramrod\n"); |
1027 | break; | 1020 | break; |
1028 | 1021 | ||
@@ -1688,7 +1681,6 @@ static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie) | |||
1688 | { | 1681 | { |
1689 | struct bnx2x_fastpath *fp = fp_cookie; | 1682 | struct bnx2x_fastpath *fp = fp_cookie; |
1690 | struct bnx2x *bp = fp->bp; | 1683 | struct bnx2x *bp = fp->bp; |
1691 | int index = fp->index; | ||
1692 | 1684 | ||
1693 | /* Return here if interrupt is disabled */ | 1685 | /* Return here if interrupt is disabled */ |
1694 | if (unlikely(atomic_read(&bp->intr_sem) != 0)) { | 1686 | if (unlikely(atomic_read(&bp->intr_sem) != 0)) { |
@@ -1697,20 +1689,34 @@ static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie) | |||
1697 | } | 1689 | } |
1698 | 1690 | ||
1699 | DP(BNX2X_MSG_FP, "got an MSI-X interrupt on IDX:SB [%d:%d]\n", | 1691 | DP(BNX2X_MSG_FP, "got an MSI-X interrupt on IDX:SB [%d:%d]\n", |
1700 | index, fp->sb_id); | 1692 | fp->index, fp->sb_id); |
1701 | bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0); | 1693 | bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0); |
1702 | 1694 | ||
1703 | #ifdef BNX2X_STOP_ON_ERROR | 1695 | #ifdef BNX2X_STOP_ON_ERROR |
1704 | if (unlikely(bp->panic)) | 1696 | if (unlikely(bp->panic)) |
1705 | return IRQ_HANDLED; | 1697 | return IRQ_HANDLED; |
1706 | #endif | 1698 | #endif |
1699 | /* Handle Rx or Tx according to MSI-X vector */ | ||
1700 | if (fp->is_rx_queue) { | ||
1701 | prefetch(fp->rx_cons_sb); | ||
1702 | prefetch(&fp->status_blk->u_status_block.status_block_index); | ||
1707 | 1703 | ||
1708 | prefetch(fp->rx_cons_sb); | 1704 | napi_schedule(&bnx2x_fp(bp, fp->index, napi)); |
1709 | prefetch(fp->tx_cons_sb); | ||
1710 | prefetch(&fp->status_blk->c_status_block.status_block_index); | ||
1711 | prefetch(&fp->status_blk->u_status_block.status_block_index); | ||
1712 | 1705 | ||
1713 | napi_schedule(&bnx2x_fp(bp, index, napi)); | 1706 | } else { |
1707 | prefetch(fp->tx_cons_sb); | ||
1708 | prefetch(&fp->status_blk->c_status_block.status_block_index); | ||
1709 | |||
1710 | bnx2x_update_fpsb_idx(fp); | ||
1711 | rmb(); | ||
1712 | bnx2x_tx_int(fp); | ||
1713 | |||
1714 | /* Re-enable interrupts */ | ||
1715 | bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID, | ||
1716 | le16_to_cpu(fp->fp_u_idx), IGU_INT_NOP, 1); | ||
1717 | bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID, | ||
1718 | le16_to_cpu(fp->fp_c_idx), IGU_INT_ENABLE, 1); | ||
1719 | } | ||
1714 | 1720 | ||
1715 | return IRQ_HANDLED; | 1721 | return IRQ_HANDLED; |
1716 | } | 1722 | } |
@@ -1720,6 +1726,7 @@ static irqreturn_t bnx2x_interrupt(int irq, void *dev_instance) | |||
1720 | struct bnx2x *bp = netdev_priv(dev_instance); | 1726 | struct bnx2x *bp = netdev_priv(dev_instance); |
1721 | u16 status = bnx2x_ack_int(bp); | 1727 | u16 status = bnx2x_ack_int(bp); |
1722 | u16 mask; | 1728 | u16 mask; |
1729 | int i; | ||
1723 | 1730 | ||
1724 | /* Return here if interrupt is shared and it's not for us */ | 1731 | /* Return here if interrupt is shared and it's not for us */ |
1725 | if (unlikely(status == 0)) { | 1732 | if (unlikely(status == 0)) { |
@@ -1739,18 +1746,38 @@ static irqreturn_t bnx2x_interrupt(int irq, void *dev_instance) | |||
1739 | return IRQ_HANDLED; | 1746 | return IRQ_HANDLED; |
1740 | #endif | 1747 | #endif |
1741 | 1748 | ||
1742 | mask = 0x2 << bp->fp[0].sb_id; | 1749 | for (i = 0; i < BNX2X_NUM_QUEUES(bp); i++) { |
1743 | if (status & mask) { | 1750 | struct bnx2x_fastpath *fp = &bp->fp[i]; |
1744 | struct bnx2x_fastpath *fp = &bp->fp[0]; | ||
1745 | 1751 | ||
1746 | prefetch(fp->rx_cons_sb); | 1752 | mask = 0x2 << fp->sb_id; |
1747 | prefetch(fp->tx_cons_sb); | 1753 | if (status & mask) { |
1748 | prefetch(&fp->status_blk->c_status_block.status_block_index); | 1754 | /* Handle Rx or Tx according to SB id */ |
1749 | prefetch(&fp->status_blk->u_status_block.status_block_index); | 1755 | if (fp->is_rx_queue) { |
1756 | prefetch(fp->rx_cons_sb); | ||
1757 | prefetch(&fp->status_blk->u_status_block. | ||
1758 | status_block_index); | ||
1750 | 1759 | ||
1751 | napi_schedule(&bnx2x_fp(bp, 0, napi)); | 1760 | napi_schedule(&bnx2x_fp(bp, fp->index, napi)); |
1752 | 1761 | ||
1753 | status &= ~mask; | 1762 | } else { |
1763 | prefetch(fp->tx_cons_sb); | ||
1764 | prefetch(&fp->status_blk->c_status_block. | ||
1765 | status_block_index); | ||
1766 | |||
1767 | bnx2x_update_fpsb_idx(fp); | ||
1768 | rmb(); | ||
1769 | bnx2x_tx_int(fp); | ||
1770 | |||
1771 | /* Re-enable interrupts */ | ||
1772 | bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID, | ||
1773 | le16_to_cpu(fp->fp_u_idx), | ||
1774 | IGU_INT_NOP, 1); | ||
1775 | bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID, | ||
1776 | le16_to_cpu(fp->fp_c_idx), | ||
1777 | IGU_INT_ENABLE, 1); | ||
1778 | } | ||
1779 | status &= ~mask; | ||
1780 | } | ||
1754 | } | 1781 | } |
1755 | 1782 | ||
1756 | 1783 | ||
@@ -2298,7 +2325,7 @@ static void bnx2x_link_attn(struct bnx2x *bp) | |||
2298 | pause_enabled = 1; | 2325 | pause_enabled = 1; |
2299 | 2326 | ||
2300 | REG_WR(bp, BAR_USTRORM_INTMEM + | 2327 | REG_WR(bp, BAR_USTRORM_INTMEM + |
2301 | USTORM_PAUSE_ENABLED_OFFSET(port), | 2328 | USTORM_ETH_PAUSE_ENABLED_OFFSET(port), |
2302 | pause_enabled); | 2329 | pause_enabled); |
2303 | } | 2330 | } |
2304 | 2331 | ||
@@ -3756,7 +3783,7 @@ static int bnx2x_storm_stats_update(struct bnx2x *bp) | |||
3756 | estats->no_buff_discard_hi = 0; | 3783 | estats->no_buff_discard_hi = 0; |
3757 | estats->no_buff_discard_lo = 0; | 3784 | estats->no_buff_discard_lo = 0; |
3758 | 3785 | ||
3759 | for_each_queue(bp, i) { | 3786 | for_each_rx_queue(bp, i) { |
3760 | struct bnx2x_fastpath *fp = &bp->fp[i]; | 3787 | struct bnx2x_fastpath *fp = &bp->fp[i]; |
3761 | int cl_id = fp->cl_id; | 3788 | int cl_id = fp->cl_id; |
3762 | struct tstorm_per_client_stats *tclient = | 3789 | struct tstorm_per_client_stats *tclient = |
@@ -3795,11 +3822,24 @@ static int bnx2x_storm_stats_update(struct bnx2x *bp) | |||
3795 | } | 3822 | } |
3796 | 3823 | ||
3797 | qstats->total_bytes_received_hi = | 3824 | qstats->total_bytes_received_hi = |
3798 | qstats->valid_bytes_received_hi = | 3825 | le32_to_cpu(tclient->rcv_broadcast_bytes.hi); |
3799 | le32_to_cpu(tclient->total_rcv_bytes.hi); | ||
3800 | qstats->total_bytes_received_lo = | 3826 | qstats->total_bytes_received_lo = |
3827 | le32_to_cpu(tclient->rcv_broadcast_bytes.lo); | ||
3828 | |||
3829 | ADD_64(qstats->total_bytes_received_hi, | ||
3830 | le32_to_cpu(tclient->rcv_multicast_bytes.hi), | ||
3831 | qstats->total_bytes_received_lo, | ||
3832 | le32_to_cpu(tclient->rcv_multicast_bytes.lo)); | ||
3833 | |||
3834 | ADD_64(qstats->total_bytes_received_hi, | ||
3835 | le32_to_cpu(tclient->rcv_unicast_bytes.hi), | ||
3836 | qstats->total_bytes_received_lo, | ||
3837 | le32_to_cpu(tclient->rcv_unicast_bytes.lo)); | ||
3838 | |||
3839 | qstats->valid_bytes_received_hi = | ||
3840 | qstats->total_bytes_received_hi; | ||
3801 | qstats->valid_bytes_received_lo = | 3841 | qstats->valid_bytes_received_lo = |
3802 | le32_to_cpu(tclient->total_rcv_bytes.lo); | 3842 | qstats->total_bytes_received_lo; |
3803 | 3843 | ||
3804 | qstats->error_bytes_received_hi = | 3844 | qstats->error_bytes_received_hi = |
3805 | le32_to_cpu(tclient->rcv_error_bytes.hi); | 3845 | le32_to_cpu(tclient->rcv_error_bytes.hi); |
@@ -3832,9 +3872,19 @@ static int bnx2x_storm_stats_update(struct bnx2x *bp) | |||
3832 | UPDATE_EXTEND_USTAT(bcast_no_buff_pkts, no_buff_discard); | 3872 | UPDATE_EXTEND_USTAT(bcast_no_buff_pkts, no_buff_discard); |
3833 | 3873 | ||
3834 | qstats->total_bytes_transmitted_hi = | 3874 | qstats->total_bytes_transmitted_hi = |
3835 | le32_to_cpu(xclient->total_sent_bytes.hi); | 3875 | le32_to_cpu(xclient->unicast_bytes_sent.hi); |
3836 | qstats->total_bytes_transmitted_lo = | 3876 | qstats->total_bytes_transmitted_lo = |
3837 | le32_to_cpu(xclient->total_sent_bytes.lo); | 3877 | le32_to_cpu(xclient->unicast_bytes_sent.lo); |
3878 | |||
3879 | ADD_64(qstats->total_bytes_transmitted_hi, | ||
3880 | le32_to_cpu(xclient->multicast_bytes_sent.hi), | ||
3881 | qstats->total_bytes_transmitted_lo, | ||
3882 | le32_to_cpu(xclient->multicast_bytes_sent.lo)); | ||
3883 | |||
3884 | ADD_64(qstats->total_bytes_transmitted_hi, | ||
3885 | le32_to_cpu(xclient->broadcast_bytes_sent.hi), | ||
3886 | qstats->total_bytes_transmitted_lo, | ||
3887 | le32_to_cpu(xclient->broadcast_bytes_sent.lo)); | ||
3838 | 3888 | ||
3839 | UPDATE_EXTEND_XSTAT(unicast_pkts_sent, | 3889 | UPDATE_EXTEND_XSTAT(unicast_pkts_sent, |
3840 | total_unicast_packets_transmitted); | 3890 | total_unicast_packets_transmitted); |
@@ -3950,7 +4000,7 @@ static void bnx2x_net_stats_update(struct bnx2x *bp) | |||
3950 | nstats->tx_bytes = bnx2x_hilo(&estats->total_bytes_transmitted_hi); | 4000 | nstats->tx_bytes = bnx2x_hilo(&estats->total_bytes_transmitted_hi); |
3951 | 4001 | ||
3952 | nstats->rx_dropped = estats->mac_discard; | 4002 | nstats->rx_dropped = estats->mac_discard; |
3953 | for_each_queue(bp, i) | 4003 | for_each_rx_queue(bp, i) |
3954 | nstats->rx_dropped += | 4004 | nstats->rx_dropped += |
3955 | le32_to_cpu(bp->fp[i].old_tclient.checksum_discard); | 4005 | le32_to_cpu(bp->fp[i].old_tclient.checksum_discard); |
3956 | 4006 | ||
@@ -4004,7 +4054,7 @@ static void bnx2x_drv_stats_update(struct bnx2x *bp) | |||
4004 | estats->rx_err_discard_pkt = 0; | 4054 | estats->rx_err_discard_pkt = 0; |
4005 | estats->rx_skb_alloc_failed = 0; | 4055 | estats->rx_skb_alloc_failed = 0; |
4006 | estats->hw_csum_err = 0; | 4056 | estats->hw_csum_err = 0; |
4007 | for_each_queue(bp, i) { | 4057 | for_each_rx_queue(bp, i) { |
4008 | struct bnx2x_eth_q_stats *qstats = &bp->fp[i].eth_q_stats; | 4058 | struct bnx2x_eth_q_stats *qstats = &bp->fp[i].eth_q_stats; |
4009 | 4059 | ||
4010 | estats->driver_xoff += qstats->driver_xoff; | 4060 | estats->driver_xoff += qstats->driver_xoff; |
@@ -4034,6 +4084,8 @@ static void bnx2x_stats_update(struct bnx2x *bp) | |||
4034 | bnx2x_drv_stats_update(bp); | 4084 | bnx2x_drv_stats_update(bp); |
4035 | 4085 | ||
4036 | if (bp->msglevel & NETIF_MSG_TIMER) { | 4086 | if (bp->msglevel & NETIF_MSG_TIMER) { |
4087 | struct bnx2x_fastpath *fp0_rx = bp->fp; | ||
4088 | struct bnx2x_fastpath *fp0_tx = &(bp->fp[bp->num_rx_queues]); | ||
4037 | struct tstorm_per_client_stats *old_tclient = | 4089 | struct tstorm_per_client_stats *old_tclient = |
4038 | &bp->fp->old_tclient; | 4090 | &bp->fp->old_tclient; |
4039 | struct bnx2x_eth_q_stats *qstats = &bp->fp->eth_q_stats; | 4091 | struct bnx2x_eth_q_stats *qstats = &bp->fp->eth_q_stats; |
@@ -4044,13 +4096,13 @@ static void bnx2x_stats_update(struct bnx2x *bp) | |||
4044 | printk(KERN_DEBUG "%s:\n", bp->dev->name); | 4096 | printk(KERN_DEBUG "%s:\n", bp->dev->name); |
4045 | printk(KERN_DEBUG " tx avail (%4x) tx hc idx (%x)" | 4097 | printk(KERN_DEBUG " tx avail (%4x) tx hc idx (%x)" |
4046 | " tx pkt (%lx)\n", | 4098 | " tx pkt (%lx)\n", |
4047 | bnx2x_tx_avail(bp->fp), | 4099 | bnx2x_tx_avail(fp0_tx), |
4048 | le16_to_cpu(*bp->fp->tx_cons_sb), nstats->tx_packets); | 4100 | le16_to_cpu(*fp0_tx->tx_cons_sb), nstats->tx_packets); |
4049 | printk(KERN_DEBUG " rx usage (%4x) rx hc idx (%x)" | 4101 | printk(KERN_DEBUG " rx usage (%4x) rx hc idx (%x)" |
4050 | " rx pkt (%lx)\n", | 4102 | " rx pkt (%lx)\n", |
4051 | (u16)(le16_to_cpu(*bp->fp->rx_cons_sb) - | 4103 | (u16)(le16_to_cpu(*fp0_rx->rx_cons_sb) - |
4052 | bp->fp->rx_comp_cons), | 4104 | fp0_rx->rx_comp_cons), |
4053 | le16_to_cpu(*bp->fp->rx_cons_sb), nstats->rx_packets); | 4105 | le16_to_cpu(*fp0_rx->rx_cons_sb), nstats->rx_packets); |
4054 | printk(KERN_DEBUG " %s (Xoff events %u) brb drops %u " | 4106 | printk(KERN_DEBUG " %s (Xoff events %u) brb drops %u " |
4055 | "brb truncate %u\n", | 4107 | "brb truncate %u\n", |
4056 | (netif_queue_stopped(bp->dev) ? "Xoff" : "Xon"), | 4108 | (netif_queue_stopped(bp->dev) ? "Xoff" : "Xon"), |
@@ -4263,12 +4315,13 @@ static void bnx2x_zero_sb(struct bnx2x *bp, int sb_id) | |||
4263 | { | 4315 | { |
4264 | int port = BP_PORT(bp); | 4316 | int port = BP_PORT(bp); |
4265 | 4317 | ||
4266 | bnx2x_init_fill(bp, USTORM_INTMEM_ADDR + | 4318 | /* "CSTORM" */ |
4267 | USTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), 0, | 4319 | bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY + |
4268 | sizeof(struct ustorm_status_block)/4); | 4320 | CSTORM_SB_HOST_STATUS_BLOCK_U_OFFSET(port, sb_id), 0, |
4269 | bnx2x_init_fill(bp, CSTORM_INTMEM_ADDR + | 4321 | CSTORM_SB_STATUS_BLOCK_U_SIZE / 4); |
4270 | CSTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), 0, | 4322 | bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY + |
4271 | sizeof(struct cstorm_status_block)/4); | 4323 | CSTORM_SB_HOST_STATUS_BLOCK_C_OFFSET(port, sb_id), 0, |
4324 | CSTORM_SB_STATUS_BLOCK_C_SIZE / 4); | ||
4272 | } | 4325 | } |
4273 | 4326 | ||
4274 | static void bnx2x_init_sb(struct bnx2x *bp, struct host_status_block *sb, | 4327 | static void bnx2x_init_sb(struct bnx2x *bp, struct host_status_block *sb, |
@@ -4284,17 +4337,17 @@ static void bnx2x_init_sb(struct bnx2x *bp, struct host_status_block *sb, | |||
4284 | u_status_block); | 4337 | u_status_block); |
4285 | sb->u_status_block.status_block_id = sb_id; | 4338 | sb->u_status_block.status_block_id = sb_id; |
4286 | 4339 | ||
4287 | REG_WR(bp, BAR_USTRORM_INTMEM + | 4340 | REG_WR(bp, BAR_CSTRORM_INTMEM + |
4288 | USTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id), U64_LO(section)); | 4341 | CSTORM_SB_HOST_SB_ADDR_U_OFFSET(port, sb_id), U64_LO(section)); |
4289 | REG_WR(bp, BAR_USTRORM_INTMEM + | 4342 | REG_WR(bp, BAR_CSTRORM_INTMEM + |
4290 | ((USTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id)) + 4), | 4343 | ((CSTORM_SB_HOST_SB_ADDR_U_OFFSET(port, sb_id)) + 4), |
4291 | U64_HI(section)); | 4344 | U64_HI(section)); |
4292 | REG_WR8(bp, BAR_USTRORM_INTMEM + FP_USB_FUNC_OFF + | 4345 | REG_WR8(bp, BAR_CSTRORM_INTMEM + FP_USB_FUNC_OFF + |
4293 | USTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), func); | 4346 | CSTORM_SB_HOST_STATUS_BLOCK_U_OFFSET(port, sb_id), func); |
4294 | 4347 | ||
4295 | for (index = 0; index < HC_USTORM_SB_NUM_INDICES; index++) | 4348 | for (index = 0; index < HC_USTORM_SB_NUM_INDICES; index++) |
4296 | REG_WR16(bp, BAR_USTRORM_INTMEM + | 4349 | REG_WR16(bp, BAR_CSTRORM_INTMEM + |
4297 | USTORM_SB_HC_DISABLE_OFFSET(port, sb_id, index), 1); | 4350 | CSTORM_SB_HC_DISABLE_U_OFFSET(port, sb_id, index), 1); |
4298 | 4351 | ||
4299 | /* CSTORM */ | 4352 | /* CSTORM */ |
4300 | section = ((u64)mapping) + offsetof(struct host_status_block, | 4353 | section = ((u64)mapping) + offsetof(struct host_status_block, |
@@ -4302,16 +4355,16 @@ static void bnx2x_init_sb(struct bnx2x *bp, struct host_status_block *sb, | |||
4302 | sb->c_status_block.status_block_id = sb_id; | 4355 | sb->c_status_block.status_block_id = sb_id; |
4303 | 4356 | ||
4304 | REG_WR(bp, BAR_CSTRORM_INTMEM + | 4357 | REG_WR(bp, BAR_CSTRORM_INTMEM + |
4305 | CSTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id), U64_LO(section)); | 4358 | CSTORM_SB_HOST_SB_ADDR_C_OFFSET(port, sb_id), U64_LO(section)); |
4306 | REG_WR(bp, BAR_CSTRORM_INTMEM + | 4359 | REG_WR(bp, BAR_CSTRORM_INTMEM + |
4307 | ((CSTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id)) + 4), | 4360 | ((CSTORM_SB_HOST_SB_ADDR_C_OFFSET(port, sb_id)) + 4), |
4308 | U64_HI(section)); | 4361 | U64_HI(section)); |
4309 | REG_WR8(bp, BAR_CSTRORM_INTMEM + FP_CSB_FUNC_OFF + | 4362 | REG_WR8(bp, BAR_CSTRORM_INTMEM + FP_CSB_FUNC_OFF + |
4310 | CSTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), func); | 4363 | CSTORM_SB_HOST_STATUS_BLOCK_C_OFFSET(port, sb_id), func); |
4311 | 4364 | ||
4312 | for (index = 0; index < HC_CSTORM_SB_NUM_INDICES; index++) | 4365 | for (index = 0; index < HC_CSTORM_SB_NUM_INDICES; index++) |
4313 | REG_WR16(bp, BAR_CSTRORM_INTMEM + | 4366 | REG_WR16(bp, BAR_CSTRORM_INTMEM + |
4314 | CSTORM_SB_HC_DISABLE_OFFSET(port, sb_id, index), 1); | 4367 | CSTORM_SB_HC_DISABLE_C_OFFSET(port, sb_id, index), 1); |
4315 | 4368 | ||
4316 | bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0); | 4369 | bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0); |
4317 | } | 4370 | } |
@@ -4320,16 +4373,16 @@ static void bnx2x_zero_def_sb(struct bnx2x *bp) | |||
4320 | { | 4373 | { |
4321 | int func = BP_FUNC(bp); | 4374 | int func = BP_FUNC(bp); |
4322 | 4375 | ||
4323 | bnx2x_init_fill(bp, TSTORM_INTMEM_ADDR + | 4376 | bnx2x_init_fill(bp, TSEM_REG_FAST_MEMORY + |
4324 | TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0, | 4377 | TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0, |
4325 | sizeof(struct tstorm_def_status_block)/4); | 4378 | sizeof(struct tstorm_def_status_block)/4); |
4326 | bnx2x_init_fill(bp, USTORM_INTMEM_ADDR + | 4379 | bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY + |
4327 | USTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0, | 4380 | CSTORM_DEF_SB_HOST_STATUS_BLOCK_U_OFFSET(func), 0, |
4328 | sizeof(struct ustorm_def_status_block)/4); | 4381 | sizeof(struct cstorm_def_status_block_u)/4); |
4329 | bnx2x_init_fill(bp, CSTORM_INTMEM_ADDR + | 4382 | bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY + |
4330 | CSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0, | 4383 | CSTORM_DEF_SB_HOST_STATUS_BLOCK_C_OFFSET(func), 0, |
4331 | sizeof(struct cstorm_def_status_block)/4); | 4384 | sizeof(struct cstorm_def_status_block_c)/4); |
4332 | bnx2x_init_fill(bp, XSTORM_INTMEM_ADDR + | 4385 | bnx2x_init_fill(bp, XSEM_REG_FAST_MEMORY + |
4333 | XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0, | 4386 | XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0, |
4334 | sizeof(struct xstorm_def_status_block)/4); | 4387 | sizeof(struct xstorm_def_status_block)/4); |
4335 | } | 4388 | } |
@@ -4381,17 +4434,17 @@ static void bnx2x_init_def_sb(struct bnx2x *bp, | |||
4381 | u_def_status_block); | 4434 | u_def_status_block); |
4382 | def_sb->u_def_status_block.status_block_id = sb_id; | 4435 | def_sb->u_def_status_block.status_block_id = sb_id; |
4383 | 4436 | ||
4384 | REG_WR(bp, BAR_USTRORM_INTMEM + | 4437 | REG_WR(bp, BAR_CSTRORM_INTMEM + |
4385 | USTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section)); | 4438 | CSTORM_DEF_SB_HOST_SB_ADDR_U_OFFSET(func), U64_LO(section)); |
4386 | REG_WR(bp, BAR_USTRORM_INTMEM + | 4439 | REG_WR(bp, BAR_CSTRORM_INTMEM + |
4387 | ((USTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4), | 4440 | ((CSTORM_DEF_SB_HOST_SB_ADDR_U_OFFSET(func)) + 4), |
4388 | U64_HI(section)); | 4441 | U64_HI(section)); |
4389 | REG_WR8(bp, BAR_USTRORM_INTMEM + DEF_USB_FUNC_OFF + | 4442 | REG_WR8(bp, BAR_CSTRORM_INTMEM + DEF_USB_FUNC_OFF + |
4390 | USTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func); | 4443 | CSTORM_DEF_SB_HOST_STATUS_BLOCK_U_OFFSET(func), func); |
4391 | 4444 | ||
4392 | for (index = 0; index < HC_USTORM_DEF_SB_NUM_INDICES; index++) | 4445 | for (index = 0; index < HC_USTORM_DEF_SB_NUM_INDICES; index++) |
4393 | REG_WR16(bp, BAR_USTRORM_INTMEM + | 4446 | REG_WR16(bp, BAR_CSTRORM_INTMEM + |
4394 | USTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1); | 4447 | CSTORM_DEF_SB_HC_DISABLE_U_OFFSET(func, index), 1); |
4395 | 4448 | ||
4396 | /* CSTORM */ | 4449 | /* CSTORM */ |
4397 | section = ((u64)mapping) + offsetof(struct host_def_status_block, | 4450 | section = ((u64)mapping) + offsetof(struct host_def_status_block, |
@@ -4399,16 +4452,16 @@ static void bnx2x_init_def_sb(struct bnx2x *bp, | |||
4399 | def_sb->c_def_status_block.status_block_id = sb_id; | 4452 | def_sb->c_def_status_block.status_block_id = sb_id; |
4400 | 4453 | ||
4401 | REG_WR(bp, BAR_CSTRORM_INTMEM + | 4454 | REG_WR(bp, BAR_CSTRORM_INTMEM + |
4402 | CSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section)); | 4455 | CSTORM_DEF_SB_HOST_SB_ADDR_C_OFFSET(func), U64_LO(section)); |
4403 | REG_WR(bp, BAR_CSTRORM_INTMEM + | 4456 | REG_WR(bp, BAR_CSTRORM_INTMEM + |
4404 | ((CSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4), | 4457 | ((CSTORM_DEF_SB_HOST_SB_ADDR_C_OFFSET(func)) + 4), |
4405 | U64_HI(section)); | 4458 | U64_HI(section)); |
4406 | REG_WR8(bp, BAR_CSTRORM_INTMEM + DEF_CSB_FUNC_OFF + | 4459 | REG_WR8(bp, BAR_CSTRORM_INTMEM + DEF_CSB_FUNC_OFF + |
4407 | CSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func); | 4460 | CSTORM_DEF_SB_HOST_STATUS_BLOCK_C_OFFSET(func), func); |
4408 | 4461 | ||
4409 | for (index = 0; index < HC_CSTORM_DEF_SB_NUM_INDICES; index++) | 4462 | for (index = 0; index < HC_CSTORM_DEF_SB_NUM_INDICES; index++) |
4410 | REG_WR16(bp, BAR_CSTRORM_INTMEM + | 4463 | REG_WR16(bp, BAR_CSTRORM_INTMEM + |
4411 | CSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1); | 4464 | CSTORM_DEF_SB_HC_DISABLE_C_OFFSET(func, index), 1); |
4412 | 4465 | ||
4413 | /* TSTORM */ | 4466 | /* TSTORM */ |
4414 | section = ((u64)mapping) + offsetof(struct host_def_status_block, | 4467 | section = ((u64)mapping) + offsetof(struct host_def_status_block, |
@@ -4459,23 +4512,23 @@ static void bnx2x_update_coalesce(struct bnx2x *bp) | |||
4459 | int sb_id = bp->fp[i].sb_id; | 4512 | int sb_id = bp->fp[i].sb_id; |
4460 | 4513 | ||
4461 | /* HC_INDEX_U_ETH_RX_CQ_CONS */ | 4514 | /* HC_INDEX_U_ETH_RX_CQ_CONS */ |
4462 | REG_WR8(bp, BAR_USTRORM_INTMEM + | 4515 | REG_WR8(bp, BAR_CSTRORM_INTMEM + |
4463 | USTORM_SB_HC_TIMEOUT_OFFSET(port, sb_id, | 4516 | CSTORM_SB_HC_TIMEOUT_U_OFFSET(port, sb_id, |
4464 | U_SB_ETH_RX_CQ_INDEX), | 4517 | U_SB_ETH_RX_CQ_INDEX), |
4465 | bp->rx_ticks/12); | 4518 | bp->rx_ticks/12); |
4466 | REG_WR16(bp, BAR_USTRORM_INTMEM + | 4519 | REG_WR16(bp, BAR_CSTRORM_INTMEM + |
4467 | USTORM_SB_HC_DISABLE_OFFSET(port, sb_id, | 4520 | CSTORM_SB_HC_DISABLE_U_OFFSET(port, sb_id, |
4468 | U_SB_ETH_RX_CQ_INDEX), | 4521 | U_SB_ETH_RX_CQ_INDEX), |
4469 | (bp->rx_ticks/12) ? 0 : 1); | 4522 | (bp->rx_ticks/12) ? 0 : 1); |
4470 | 4523 | ||
4471 | /* HC_INDEX_C_ETH_TX_CQ_CONS */ | 4524 | /* HC_INDEX_C_ETH_TX_CQ_CONS */ |
4472 | REG_WR8(bp, BAR_CSTRORM_INTMEM + | 4525 | REG_WR8(bp, BAR_CSTRORM_INTMEM + |
4473 | CSTORM_SB_HC_TIMEOUT_OFFSET(port, sb_id, | 4526 | CSTORM_SB_HC_TIMEOUT_C_OFFSET(port, sb_id, |
4474 | C_SB_ETH_TX_CQ_INDEX), | 4527 | C_SB_ETH_TX_CQ_INDEX), |
4475 | bp->tx_ticks/12); | 4528 | bp->tx_ticks/12); |
4476 | REG_WR16(bp, BAR_CSTRORM_INTMEM + | 4529 | REG_WR16(bp, BAR_CSTRORM_INTMEM + |
4477 | CSTORM_SB_HC_DISABLE_OFFSET(port, sb_id, | 4530 | CSTORM_SB_HC_DISABLE_C_OFFSET(port, sb_id, |
4478 | C_SB_ETH_TX_CQ_INDEX), | 4531 | C_SB_ETH_TX_CQ_INDEX), |
4479 | (bp->tx_ticks/12) ? 0 : 1); | 4532 | (bp->tx_ticks/12) ? 0 : 1); |
4480 | } | 4533 | } |
4481 | } | 4534 | } |
@@ -4548,6 +4601,9 @@ static void bnx2x_init_rx_rings(struct bnx2x *bp) | |||
4548 | fp->rx_cons_sb = BNX2X_RX_SB_INDEX; | 4601 | fp->rx_cons_sb = BNX2X_RX_SB_INDEX; |
4549 | fp->rx_bd_cons_sb = BNX2X_RX_SB_BD_INDEX; | 4602 | fp->rx_bd_cons_sb = BNX2X_RX_SB_BD_INDEX; |
4550 | 4603 | ||
4604 | /* Mark queue as Rx */ | ||
4605 | fp->is_rx_queue = 1; | ||
4606 | |||
4551 | /* "next page" elements initialization */ | 4607 | /* "next page" elements initialization */ |
4552 | /* SGE ring */ | 4608 | /* SGE ring */ |
4553 | for (i = 1; i <= NUM_RX_SGE_PAGES; i++) { | 4609 | for (i = 1; i <= NUM_RX_SGE_PAGES; i++) { |
@@ -4657,17 +4713,21 @@ static void bnx2x_init_tx_ring(struct bnx2x *bp) | |||
4657 | struct bnx2x_fastpath *fp = &bp->fp[j]; | 4713 | struct bnx2x_fastpath *fp = &bp->fp[j]; |
4658 | 4714 | ||
4659 | for (i = 1; i <= NUM_TX_RINGS; i++) { | 4715 | for (i = 1; i <= NUM_TX_RINGS; i++) { |
4660 | struct eth_tx_bd *tx_bd = | 4716 | struct eth_tx_next_bd *tx_next_bd = |
4661 | &fp->tx_desc_ring[TX_DESC_CNT * i - 1]; | 4717 | &fp->tx_desc_ring[TX_DESC_CNT * i - 1].next_bd; |
4662 | 4718 | ||
4663 | tx_bd->addr_hi = | 4719 | tx_next_bd->addr_hi = |
4664 | cpu_to_le32(U64_HI(fp->tx_desc_mapping + | 4720 | cpu_to_le32(U64_HI(fp->tx_desc_mapping + |
4665 | BCM_PAGE_SIZE*(i % NUM_TX_RINGS))); | 4721 | BCM_PAGE_SIZE*(i % NUM_TX_RINGS))); |
4666 | tx_bd->addr_lo = | 4722 | tx_next_bd->addr_lo = |
4667 | cpu_to_le32(U64_LO(fp->tx_desc_mapping + | 4723 | cpu_to_le32(U64_LO(fp->tx_desc_mapping + |
4668 | BCM_PAGE_SIZE*(i % NUM_TX_RINGS))); | 4724 | BCM_PAGE_SIZE*(i % NUM_TX_RINGS))); |
4669 | } | 4725 | } |
4670 | 4726 | ||
4727 | fp->tx_db.data.header.header = DOORBELL_HDR_DB_TYPE; | ||
4728 | fp->tx_db.data.zero_fill1 = 0; | ||
4729 | fp->tx_db.data.prod = 0; | ||
4730 | |||
4671 | fp->tx_pkt_prod = 0; | 4731 | fp->tx_pkt_prod = 0; |
4672 | fp->tx_pkt_cons = 0; | 4732 | fp->tx_pkt_cons = 0; |
4673 | fp->tx_bd_prod = 0; | 4733 | fp->tx_bd_prod = 0; |
@@ -4703,16 +4763,15 @@ static void bnx2x_init_context(struct bnx2x *bp) | |||
4703 | { | 4763 | { |
4704 | int i; | 4764 | int i; |
4705 | 4765 | ||
4706 | for_each_queue(bp, i) { | 4766 | for_each_rx_queue(bp, i) { |
4707 | struct eth_context *context = bnx2x_sp(bp, context[i].eth); | 4767 | struct eth_context *context = bnx2x_sp(bp, context[i].eth); |
4708 | struct bnx2x_fastpath *fp = &bp->fp[i]; | 4768 | struct bnx2x_fastpath *fp = &bp->fp[i]; |
4709 | u8 cl_id = fp->cl_id; | 4769 | u8 cl_id = fp->cl_id; |
4710 | u8 sb_id = fp->sb_id; | ||
4711 | 4770 | ||
4712 | context->ustorm_st_context.common.sb_index_numbers = | 4771 | context->ustorm_st_context.common.sb_index_numbers = |
4713 | BNX2X_RX_SB_INDEX_NUM; | 4772 | BNX2X_RX_SB_INDEX_NUM; |
4714 | context->ustorm_st_context.common.clientId = cl_id; | 4773 | context->ustorm_st_context.common.clientId = cl_id; |
4715 | context->ustorm_st_context.common.status_block_id = sb_id; | 4774 | context->ustorm_st_context.common.status_block_id = fp->sb_id; |
4716 | context->ustorm_st_context.common.flags = | 4775 | context->ustorm_st_context.common.flags = |
4717 | (USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_MC_ALIGNMENT | | 4776 | (USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_MC_ALIGNMENT | |
4718 | USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_STATISTICS); | 4777 | USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_STATISTICS); |
@@ -4728,8 +4787,7 @@ static void bnx2x_init_context(struct bnx2x *bp) | |||
4728 | U64_LO(fp->rx_desc_mapping); | 4787 | U64_LO(fp->rx_desc_mapping); |
4729 | if (!fp->disable_tpa) { | 4788 | if (!fp->disable_tpa) { |
4730 | context->ustorm_st_context.common.flags |= | 4789 | context->ustorm_st_context.common.flags |= |
4731 | (USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_TPA | | 4790 | USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_TPA; |
4732 | USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_SGE_RING); | ||
4733 | context->ustorm_st_context.common.sge_buff_size = | 4791 | context->ustorm_st_context.common.sge_buff_size = |
4734 | (u16)min((u32)SGE_PAGE_SIZE*PAGES_PER_SGE, | 4792 | (u16)min((u32)SGE_PAGE_SIZE*PAGES_PER_SGE, |
4735 | (u32)0xffff); | 4793 | (u32)0xffff); |
@@ -4737,6 +4795,13 @@ static void bnx2x_init_context(struct bnx2x *bp) | |||
4737 | U64_HI(fp->rx_sge_mapping); | 4795 | U64_HI(fp->rx_sge_mapping); |
4738 | context->ustorm_st_context.common.sge_page_base_lo = | 4796 | context->ustorm_st_context.common.sge_page_base_lo = |
4739 | U64_LO(fp->rx_sge_mapping); | 4797 | U64_LO(fp->rx_sge_mapping); |
4798 | |||
4799 | context->ustorm_st_context.common.max_sges_for_packet = | ||
4800 | SGE_PAGE_ALIGN(bp->dev->mtu) >> SGE_PAGE_SHIFT; | ||
4801 | context->ustorm_st_context.common.max_sges_for_packet = | ||
4802 | ((context->ustorm_st_context.common. | ||
4803 | max_sges_for_packet + PAGES_PER_SGE - 1) & | ||
4804 | (~(PAGES_PER_SGE - 1))) >> PAGES_PER_SGE_SHIFT; | ||
4740 | } | 4805 | } |
4741 | 4806 | ||
4742 | context->ustorm_ag_context.cdu_usage = | 4807 | context->ustorm_ag_context.cdu_usage = |
@@ -4744,24 +4809,27 @@ static void bnx2x_init_context(struct bnx2x *bp) | |||
4744 | CDU_REGION_NUMBER_UCM_AG, | 4809 | CDU_REGION_NUMBER_UCM_AG, |
4745 | ETH_CONNECTION_TYPE); | 4810 | ETH_CONNECTION_TYPE); |
4746 | 4811 | ||
4812 | context->xstorm_ag_context.cdu_reserved = | ||
4813 | CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i), | ||
4814 | CDU_REGION_NUMBER_XCM_AG, | ||
4815 | ETH_CONNECTION_TYPE); | ||
4816 | } | ||
4817 | |||
4818 | for_each_tx_queue(bp, i) { | ||
4819 | struct bnx2x_fastpath *fp = &bp->fp[i]; | ||
4820 | struct eth_context *context = | ||
4821 | bnx2x_sp(bp, context[i - bp->num_rx_queues].eth); | ||
4822 | |||
4823 | context->cstorm_st_context.sb_index_number = | ||
4824 | C_SB_ETH_TX_CQ_INDEX; | ||
4825 | context->cstorm_st_context.status_block_id = fp->sb_id; | ||
4826 | |||
4747 | context->xstorm_st_context.tx_bd_page_base_hi = | 4827 | context->xstorm_st_context.tx_bd_page_base_hi = |
4748 | U64_HI(fp->tx_desc_mapping); | 4828 | U64_HI(fp->tx_desc_mapping); |
4749 | context->xstorm_st_context.tx_bd_page_base_lo = | 4829 | context->xstorm_st_context.tx_bd_page_base_lo = |
4750 | U64_LO(fp->tx_desc_mapping); | 4830 | U64_LO(fp->tx_desc_mapping); |
4751 | context->xstorm_st_context.db_data_addr_hi = | 4831 | context->xstorm_st_context.statistics_data = (fp->cl_id | |
4752 | U64_HI(fp->tx_prods_mapping); | ||
4753 | context->xstorm_st_context.db_data_addr_lo = | ||
4754 | U64_LO(fp->tx_prods_mapping); | ||
4755 | context->xstorm_st_context.statistics_data = (cl_id | | ||
4756 | XSTORM_ETH_ST_CONTEXT_STATISTICS_ENABLE); | 4832 | XSTORM_ETH_ST_CONTEXT_STATISTICS_ENABLE); |
4757 | context->cstorm_st_context.sb_index_number = | ||
4758 | C_SB_ETH_TX_CQ_INDEX; | ||
4759 | context->cstorm_st_context.status_block_id = sb_id; | ||
4760 | |||
4761 | context->xstorm_ag_context.cdu_reserved = | ||
4762 | CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i), | ||
4763 | CDU_REGION_NUMBER_XCM_AG, | ||
4764 | ETH_CONNECTION_TYPE); | ||
4765 | } | 4833 | } |
4766 | } | 4834 | } |
4767 | 4835 | ||
@@ -4799,18 +4867,6 @@ static void bnx2x_set_client_config(struct bnx2x *bp) | |||
4799 | } | 4867 | } |
4800 | #endif | 4868 | #endif |
4801 | 4869 | ||
4802 | if (bp->flags & TPA_ENABLE_FLAG) { | ||
4803 | tstorm_client.max_sges_for_packet = | ||
4804 | SGE_PAGE_ALIGN(tstorm_client.mtu) >> SGE_PAGE_SHIFT; | ||
4805 | tstorm_client.max_sges_for_packet = | ||
4806 | ((tstorm_client.max_sges_for_packet + | ||
4807 | PAGES_PER_SGE - 1) & (~(PAGES_PER_SGE - 1))) >> | ||
4808 | PAGES_PER_SGE_SHIFT; | ||
4809 | |||
4810 | tstorm_client.config_flags |= | ||
4811 | TSTORM_ETH_CLIENT_CONFIG_ENABLE_SGE_RING; | ||
4812 | } | ||
4813 | |||
4814 | for_each_queue(bp, i) { | 4870 | for_each_queue(bp, i) { |
4815 | tstorm_client.statistics_counter_id = bp->fp[i].cl_id; | 4871 | tstorm_client.statistics_counter_id = bp->fp[i].cl_id; |
4816 | 4872 | ||
@@ -4893,17 +4949,6 @@ static void bnx2x_init_internal_common(struct bnx2x *bp) | |||
4893 | { | 4949 | { |
4894 | int i; | 4950 | int i; |
4895 | 4951 | ||
4896 | if (bp->flags & TPA_ENABLE_FLAG) { | ||
4897 | struct tstorm_eth_tpa_exist tpa = {0}; | ||
4898 | |||
4899 | tpa.tpa_exist = 1; | ||
4900 | |||
4901 | REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_TPA_EXIST_OFFSET, | ||
4902 | ((u32 *)&tpa)[0]); | ||
4903 | REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_TPA_EXIST_OFFSET + 4, | ||
4904 | ((u32 *)&tpa)[1]); | ||
4905 | } | ||
4906 | |||
4907 | /* Zero this manually as its initialization is | 4952 | /* Zero this manually as its initialization is |
4908 | currently missing in the initTool */ | 4953 | currently missing in the initTool */ |
4909 | for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++) | 4954 | for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++) |
@@ -4915,8 +4960,10 @@ static void bnx2x_init_internal_port(struct bnx2x *bp) | |||
4915 | { | 4960 | { |
4916 | int port = BP_PORT(bp); | 4961 | int port = BP_PORT(bp); |
4917 | 4962 | ||
4918 | REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_HC_BTR_OFFSET(port), BNX2X_BTR); | 4963 | REG_WR(bp, |
4919 | REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_OFFSET(port), BNX2X_BTR); | 4964 | BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_U_OFFSET(port), BNX2X_BTR); |
4965 | REG_WR(bp, | ||
4966 | BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_C_OFFSET(port), BNX2X_BTR); | ||
4920 | REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_HC_BTR_OFFSET(port), BNX2X_BTR); | 4967 | REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_HC_BTR_OFFSET(port), BNX2X_BTR); |
4921 | REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_HC_BTR_OFFSET(port), BNX2X_BTR); | 4968 | REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_HC_BTR_OFFSET(port), BNX2X_BTR); |
4922 | } | 4969 | } |
@@ -4976,6 +5023,12 @@ static void bnx2x_init_internal_func(struct bnx2x *bp) | |||
4976 | tstorm_config.config_flags = MULTI_FLAGS(bp); | 5023 | tstorm_config.config_flags = MULTI_FLAGS(bp); |
4977 | tstorm_config.rss_result_mask = MULTI_MASK; | 5024 | tstorm_config.rss_result_mask = MULTI_MASK; |
4978 | } | 5025 | } |
5026 | |||
5027 | /* Enable TPA if needed */ | ||
5028 | if (bp->flags & TPA_ENABLE_FLAG) | ||
5029 | tstorm_config.config_flags |= | ||
5030 | TSTORM_ETH_FUNCTION_COMMON_CONFIG_ENABLE_TPA; | ||
5031 | |||
4979 | if (IS_E1HMF(bp)) | 5032 | if (IS_E1HMF(bp)) |
4980 | tstorm_config.config_flags |= | 5033 | tstorm_config.config_flags |= |
4981 | TSTORM_ETH_FUNCTION_COMMON_CONFIG_E1HOV_IN_CAM; | 5034 | TSTORM_ETH_FUNCTION_COMMON_CONFIG_E1HOV_IN_CAM; |
@@ -5087,6 +5140,14 @@ static void bnx2x_init_internal_func(struct bnx2x *bp) | |||
5087 | USTORM_CQE_PAGE_BASE_OFFSET(port, fp->cl_id) + 4, | 5140 | USTORM_CQE_PAGE_BASE_OFFSET(port, fp->cl_id) + 4, |
5088 | U64_HI(fp->rx_comp_mapping)); | 5141 | U64_HI(fp->rx_comp_mapping)); |
5089 | 5142 | ||
5143 | /* Next page */ | ||
5144 | REG_WR(bp, BAR_USTRORM_INTMEM + | ||
5145 | USTORM_CQE_PAGE_NEXT_OFFSET(port, fp->cl_id), | ||
5146 | U64_LO(fp->rx_comp_mapping + BCM_PAGE_SIZE)); | ||
5147 | REG_WR(bp, BAR_USTRORM_INTMEM + | ||
5148 | USTORM_CQE_PAGE_NEXT_OFFSET(port, fp->cl_id) + 4, | ||
5149 | U64_HI(fp->rx_comp_mapping + BCM_PAGE_SIZE)); | ||
5150 | |||
5090 | REG_WR16(bp, BAR_USTRORM_INTMEM + | 5151 | REG_WR16(bp, BAR_USTRORM_INTMEM + |
5091 | USTORM_MAX_AGG_SIZE_OFFSET(port, fp->cl_id), | 5152 | USTORM_MAX_AGG_SIZE_OFFSET(port, fp->cl_id), |
5092 | max_agg_size); | 5153 | max_agg_size); |
@@ -5197,6 +5258,9 @@ static void bnx2x_nic_init(struct bnx2x *bp, u32 load_code) | |||
5197 | fp->index = i; | 5258 | fp->index = i; |
5198 | fp->cl_id = BP_L_ID(bp) + i; | 5259 | fp->cl_id = BP_L_ID(bp) + i; |
5199 | fp->sb_id = fp->cl_id; | 5260 | fp->sb_id = fp->cl_id; |
5261 | /* Suitable Rx and Tx SBs are served by the same client */ | ||
5262 | if (i >= bp->num_rx_queues) | ||
5263 | fp->cl_id -= bp->num_rx_queues; | ||
5200 | DP(NETIF_MSG_IFUP, | 5264 | DP(NETIF_MSG_IFUP, |
5201 | "queue[%d]: bnx2x_init_sb(%p,%p) cl_id %d sb %d\n", | 5265 | "queue[%d]: bnx2x_init_sb(%p,%p) cl_id %d sb %d\n", |
5202 | i, bp, fp->status_blk, fp->cl_id, fp->sb_id); | 5266 | i, bp, fp->status_blk, fp->cl_id, fp->sb_id); |
@@ -5729,10 +5793,10 @@ static int bnx2x_init_common(struct bnx2x *bp) | |||
5729 | bnx2x_init_block(bp, USDM_BLOCK, COMMON_STAGE); | 5793 | bnx2x_init_block(bp, USDM_BLOCK, COMMON_STAGE); |
5730 | bnx2x_init_block(bp, XSDM_BLOCK, COMMON_STAGE); | 5794 | bnx2x_init_block(bp, XSDM_BLOCK, COMMON_STAGE); |
5731 | 5795 | ||
5732 | bnx2x_init_fill(bp, TSTORM_INTMEM_ADDR, 0, STORM_INTMEM_SIZE(bp)); | 5796 | bnx2x_init_fill(bp, TSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp)); |
5733 | bnx2x_init_fill(bp, USTORM_INTMEM_ADDR, 0, STORM_INTMEM_SIZE(bp)); | 5797 | bnx2x_init_fill(bp, USEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp)); |
5734 | bnx2x_init_fill(bp, CSTORM_INTMEM_ADDR, 0, STORM_INTMEM_SIZE(bp)); | 5798 | bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp)); |
5735 | bnx2x_init_fill(bp, XSTORM_INTMEM_ADDR, 0, STORM_INTMEM_SIZE(bp)); | 5799 | bnx2x_init_fill(bp, XSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp)); |
5736 | 5800 | ||
5737 | bnx2x_init_block(bp, TSEM_BLOCK, COMMON_STAGE); | 5801 | bnx2x_init_block(bp, TSEM_BLOCK, COMMON_STAGE); |
5738 | bnx2x_init_block(bp, USEM_BLOCK, COMMON_STAGE); | 5802 | bnx2x_init_block(bp, USEM_BLOCK, COMMON_STAGE); |
@@ -5765,11 +5829,6 @@ static int bnx2x_init_common(struct bnx2x *bp) | |||
5765 | bnx2x_init_block(bp, CDU_BLOCK, COMMON_STAGE); | 5829 | bnx2x_init_block(bp, CDU_BLOCK, COMMON_STAGE); |
5766 | val = (4 << 24) + (0 << 12) + 1024; | 5830 | val = (4 << 24) + (0 << 12) + 1024; |
5767 | REG_WR(bp, CDU_REG_CDU_GLOBAL_PARAMS, val); | 5831 | REG_WR(bp, CDU_REG_CDU_GLOBAL_PARAMS, val); |
5768 | if (CHIP_IS_E1(bp)) { | ||
5769 | /* !!! fix pxp client crdit until excel update */ | ||
5770 | REG_WR(bp, CDU_REG_CDU_DEBUG, 0x264); | ||
5771 | REG_WR(bp, CDU_REG_CDU_DEBUG, 0); | ||
5772 | } | ||
5773 | 5832 | ||
5774 | bnx2x_init_block(bp, CFC_BLOCK, COMMON_STAGE); | 5833 | bnx2x_init_block(bp, CFC_BLOCK, COMMON_STAGE); |
5775 | REG_WR(bp, CFC_REG_INIT_REG, 0x7FF); | 5834 | REG_WR(bp, CFC_REG_INIT_REG, 0x7FF); |
@@ -5782,19 +5841,14 @@ static int bnx2x_init_common(struct bnx2x *bp) | |||
5782 | bnx2x_init_block(bp, HC_BLOCK, COMMON_STAGE); | 5841 | bnx2x_init_block(bp, HC_BLOCK, COMMON_STAGE); |
5783 | bnx2x_init_block(bp, MISC_AEU_BLOCK, COMMON_STAGE); | 5842 | bnx2x_init_block(bp, MISC_AEU_BLOCK, COMMON_STAGE); |
5784 | 5843 | ||
5785 | /* PXPCS COMMON comes here */ | ||
5786 | bnx2x_init_block(bp, PXPCS_BLOCK, COMMON_STAGE); | 5844 | bnx2x_init_block(bp, PXPCS_BLOCK, COMMON_STAGE); |
5787 | /* Reset PCIE errors for debug */ | 5845 | /* Reset PCIE errors for debug */ |
5788 | REG_WR(bp, 0x2814, 0xffffffff); | 5846 | REG_WR(bp, 0x2814, 0xffffffff); |
5789 | REG_WR(bp, 0x3820, 0xffffffff); | 5847 | REG_WR(bp, 0x3820, 0xffffffff); |
5790 | 5848 | ||
5791 | /* EMAC0 COMMON comes here */ | ||
5792 | bnx2x_init_block(bp, EMAC0_BLOCK, COMMON_STAGE); | 5849 | bnx2x_init_block(bp, EMAC0_BLOCK, COMMON_STAGE); |
5793 | /* EMAC1 COMMON comes here */ | ||
5794 | bnx2x_init_block(bp, EMAC1_BLOCK, COMMON_STAGE); | 5850 | bnx2x_init_block(bp, EMAC1_BLOCK, COMMON_STAGE); |
5795 | /* DBU COMMON comes here */ | ||
5796 | bnx2x_init_block(bp, DBU_BLOCK, COMMON_STAGE); | 5851 | bnx2x_init_block(bp, DBU_BLOCK, COMMON_STAGE); |
5797 | /* DBG COMMON comes here */ | ||
5798 | bnx2x_init_block(bp, DBG_BLOCK, COMMON_STAGE); | 5852 | bnx2x_init_block(bp, DBG_BLOCK, COMMON_STAGE); |
5799 | 5853 | ||
5800 | bnx2x_init_block(bp, NIG_BLOCK, COMMON_STAGE); | 5854 | bnx2x_init_block(bp, NIG_BLOCK, COMMON_STAGE); |
@@ -5875,10 +5929,12 @@ static int bnx2x_init_port(struct bnx2x *bp) | |||
5875 | 5929 | ||
5876 | REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0); | 5930 | REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0); |
5877 | 5931 | ||
5878 | /* Port PXP comes here */ | ||
5879 | bnx2x_init_block(bp, PXP_BLOCK, init_stage); | 5932 | bnx2x_init_block(bp, PXP_BLOCK, init_stage); |
5880 | /* Port PXP2 comes here */ | ||
5881 | bnx2x_init_block(bp, PXP2_BLOCK, init_stage); | 5933 | bnx2x_init_block(bp, PXP2_BLOCK, init_stage); |
5934 | |||
5935 | bnx2x_init_block(bp, TCM_BLOCK, init_stage); | ||
5936 | bnx2x_init_block(bp, UCM_BLOCK, init_stage); | ||
5937 | bnx2x_init_block(bp, CCM_BLOCK, init_stage); | ||
5882 | #ifdef BCM_ISCSI | 5938 | #ifdef BCM_ISCSI |
5883 | /* Port0 1 | 5939 | /* Port0 1 |
5884 | * Port1 385 */ | 5940 | * Port1 385 */ |
@@ -5904,17 +5960,14 @@ static int bnx2x_init_port(struct bnx2x *bp) | |||
5904 | REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2); | 5960 | REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2); |
5905 | REG_WR(bp, PXP2_REG_PSWRQ_SRC0_L2P + func*4, PXP_ONE_ILT(i)); | 5961 | REG_WR(bp, PXP2_REG_PSWRQ_SRC0_L2P + func*4, PXP_ONE_ILT(i)); |
5906 | #endif | 5962 | #endif |
5907 | /* Port CMs come here */ | ||
5908 | bnx2x_init_block(bp, XCM_BLOCK, init_stage); | 5963 | bnx2x_init_block(bp, XCM_BLOCK, init_stage); |
5909 | 5964 | ||
5910 | /* Port QM comes here */ | ||
5911 | #ifdef BCM_ISCSI | 5965 | #ifdef BCM_ISCSI |
5912 | REG_WR(bp, TM_REG_LIN0_SCAN_TIME + func*4, 1024/64*20); | 5966 | REG_WR(bp, TM_REG_LIN0_SCAN_TIME + func*4, 1024/64*20); |
5913 | REG_WR(bp, TM_REG_LIN0_MAX_ACTIVE_CID + func*4, 31); | 5967 | REG_WR(bp, TM_REG_LIN0_MAX_ACTIVE_CID + func*4, 31); |
5914 | 5968 | ||
5915 | bnx2x_init_block(bp, TIMERS_BLOCK, init_stage); | 5969 | bnx2x_init_block(bp, TIMERS_BLOCK, init_stage); |
5916 | #endif | 5970 | #endif |
5917 | /* Port DQ comes here */ | ||
5918 | bnx2x_init_block(bp, DQ_BLOCK, init_stage); | 5971 | bnx2x_init_block(bp, DQ_BLOCK, init_stage); |
5919 | 5972 | ||
5920 | bnx2x_init_block(bp, BRB1_BLOCK, init_stage); | 5973 | bnx2x_init_block(bp, BRB1_BLOCK, init_stage); |
@@ -5941,15 +5994,11 @@ static int bnx2x_init_port(struct bnx2x *bp) | |||
5941 | REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_0 + port*4, high); | 5994 | REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_0 + port*4, high); |
5942 | 5995 | ||
5943 | 5996 | ||
5944 | /* Port PRS comes here */ | ||
5945 | bnx2x_init_block(bp, PRS_BLOCK, init_stage); | 5997 | bnx2x_init_block(bp, PRS_BLOCK, init_stage); |
5946 | /* Port TSDM comes here */ | 5998 | |
5947 | bnx2x_init_block(bp, TSDM_BLOCK, init_stage); | 5999 | bnx2x_init_block(bp, TSDM_BLOCK, init_stage); |
5948 | /* Port CSDM comes here */ | ||
5949 | bnx2x_init_block(bp, CSDM_BLOCK, init_stage); | 6000 | bnx2x_init_block(bp, CSDM_BLOCK, init_stage); |
5950 | /* Port USDM comes here */ | ||
5951 | bnx2x_init_block(bp, USDM_BLOCK, init_stage); | 6001 | bnx2x_init_block(bp, USDM_BLOCK, init_stage); |
5952 | /* Port XSDM comes here */ | ||
5953 | bnx2x_init_block(bp, XSDM_BLOCK, init_stage); | 6002 | bnx2x_init_block(bp, XSDM_BLOCK, init_stage); |
5954 | 6003 | ||
5955 | bnx2x_init_block(bp, TSEM_BLOCK, init_stage); | 6004 | bnx2x_init_block(bp, TSEM_BLOCK, init_stage); |
@@ -5957,9 +6006,7 @@ static int bnx2x_init_port(struct bnx2x *bp) | |||
5957 | bnx2x_init_block(bp, CSEM_BLOCK, init_stage); | 6006 | bnx2x_init_block(bp, CSEM_BLOCK, init_stage); |
5958 | bnx2x_init_block(bp, XSEM_BLOCK, init_stage); | 6007 | bnx2x_init_block(bp, XSEM_BLOCK, init_stage); |
5959 | 6008 | ||
5960 | /* Port UPB comes here */ | ||
5961 | bnx2x_init_block(bp, UPB_BLOCK, init_stage); | 6009 | bnx2x_init_block(bp, UPB_BLOCK, init_stage); |
5962 | /* Port XPB comes here */ | ||
5963 | bnx2x_init_block(bp, XPB_BLOCK, init_stage); | 6010 | bnx2x_init_block(bp, XPB_BLOCK, init_stage); |
5964 | 6011 | ||
5965 | bnx2x_init_block(bp, PBF_BLOCK, init_stage); | 6012 | bnx2x_init_block(bp, PBF_BLOCK, init_stage); |
@@ -5989,11 +6036,8 @@ static int bnx2x_init_port(struct bnx2x *bp) | |||
5989 | REG_WR_DMAE(bp, SRC_REG_LASTFREE0 + func*4, wb_write, 2); | 6036 | REG_WR_DMAE(bp, SRC_REG_LASTFREE0 + func*4, wb_write, 2); |
5990 | 6037 | ||
5991 | REG_WR(bp, SRC_REG_NUMBER_HASH_BITS0 + func*4, 10); | 6038 | REG_WR(bp, SRC_REG_NUMBER_HASH_BITS0 + func*4, 10); |
5992 | /* Port SRCH comes here */ | ||
5993 | #endif | 6039 | #endif |
5994 | /* Port CDU comes here */ | ||
5995 | bnx2x_init_block(bp, CDU_BLOCK, init_stage); | 6040 | bnx2x_init_block(bp, CDU_BLOCK, init_stage); |
5996 | /* Port CFC comes here */ | ||
5997 | bnx2x_init_block(bp, CFC_BLOCK, init_stage); | 6041 | bnx2x_init_block(bp, CFC_BLOCK, init_stage); |
5998 | 6042 | ||
5999 | if (CHIP_IS_E1(bp)) { | 6043 | if (CHIP_IS_E1(bp)) { |
@@ -6010,15 +6054,10 @@ static int bnx2x_init_port(struct bnx2x *bp) | |||
6010 | REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, | 6054 | REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, |
6011 | (IS_E1HMF(bp) ? 0xF7 : 0x7)); | 6055 | (IS_E1HMF(bp) ? 0xF7 : 0x7)); |
6012 | 6056 | ||
6013 | /* Port PXPCS comes here */ | ||
6014 | bnx2x_init_block(bp, PXPCS_BLOCK, init_stage); | 6057 | bnx2x_init_block(bp, PXPCS_BLOCK, init_stage); |
6015 | /* Port EMAC0 comes here */ | ||
6016 | bnx2x_init_block(bp, EMAC0_BLOCK, init_stage); | 6058 | bnx2x_init_block(bp, EMAC0_BLOCK, init_stage); |
6017 | /* Port EMAC1 comes here */ | ||
6018 | bnx2x_init_block(bp, EMAC1_BLOCK, init_stage); | 6059 | bnx2x_init_block(bp, EMAC1_BLOCK, init_stage); |
6019 | /* Port DBU comes here */ | ||
6020 | bnx2x_init_block(bp, DBU_BLOCK, init_stage); | 6060 | bnx2x_init_block(bp, DBU_BLOCK, init_stage); |
6021 | /* Port DBG comes here */ | ||
6022 | bnx2x_init_block(bp, DBG_BLOCK, init_stage); | 6061 | bnx2x_init_block(bp, DBG_BLOCK, init_stage); |
6023 | 6062 | ||
6024 | bnx2x_init_block(bp, NIG_BLOCK, init_stage); | 6063 | bnx2x_init_block(bp, NIG_BLOCK, init_stage); |
@@ -6040,9 +6079,7 @@ static int bnx2x_init_port(struct bnx2x *bp) | |||
6040 | } | 6079 | } |
6041 | } | 6080 | } |
6042 | 6081 | ||
6043 | /* Port MCP comes here */ | ||
6044 | bnx2x_init_block(bp, MCP_BLOCK, init_stage); | 6082 | bnx2x_init_block(bp, MCP_BLOCK, init_stage); |
6045 | /* Port DMAE comes here */ | ||
6046 | bnx2x_init_block(bp, DMAE_BLOCK, init_stage); | 6083 | bnx2x_init_block(bp, DMAE_BLOCK, init_stage); |
6047 | 6084 | ||
6048 | switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) { | 6085 | switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) { |
@@ -6302,8 +6339,7 @@ static void bnx2x_free_mem(struct bnx2x *bp) | |||
6302 | /* status blocks */ | 6339 | /* status blocks */ |
6303 | BNX2X_PCI_FREE(bnx2x_fp(bp, i, status_blk), | 6340 | BNX2X_PCI_FREE(bnx2x_fp(bp, i, status_blk), |
6304 | bnx2x_fp(bp, i, status_blk_mapping), | 6341 | bnx2x_fp(bp, i, status_blk_mapping), |
6305 | sizeof(struct host_status_block) + | 6342 | sizeof(struct host_status_block)); |
6306 | sizeof(struct eth_tx_db_data)); | ||
6307 | } | 6343 | } |
6308 | /* Rx */ | 6344 | /* Rx */ |
6309 | for_each_rx_queue(bp, i) { | 6345 | for_each_rx_queue(bp, i) { |
@@ -6332,7 +6368,7 @@ static void bnx2x_free_mem(struct bnx2x *bp) | |||
6332 | BNX2X_FREE(bnx2x_fp(bp, i, tx_buf_ring)); | 6368 | BNX2X_FREE(bnx2x_fp(bp, i, tx_buf_ring)); |
6333 | BNX2X_PCI_FREE(bnx2x_fp(bp, i, tx_desc_ring), | 6369 | BNX2X_PCI_FREE(bnx2x_fp(bp, i, tx_desc_ring), |
6334 | bnx2x_fp(bp, i, tx_desc_mapping), | 6370 | bnx2x_fp(bp, i, tx_desc_mapping), |
6335 | sizeof(struct eth_tx_bd) * NUM_TX_BD); | 6371 | sizeof(union eth_tx_bd_types) * NUM_TX_BD); |
6336 | } | 6372 | } |
6337 | /* end of fastpath */ | 6373 | /* end of fastpath */ |
6338 | 6374 | ||
@@ -6383,8 +6419,7 @@ static int bnx2x_alloc_mem(struct bnx2x *bp) | |||
6383 | /* status blocks */ | 6419 | /* status blocks */ |
6384 | BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, status_blk), | 6420 | BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, status_blk), |
6385 | &bnx2x_fp(bp, i, status_blk_mapping), | 6421 | &bnx2x_fp(bp, i, status_blk_mapping), |
6386 | sizeof(struct host_status_block) + | 6422 | sizeof(struct host_status_block)); |
6387 | sizeof(struct eth_tx_db_data)); | ||
6388 | } | 6423 | } |
6389 | /* Rx */ | 6424 | /* Rx */ |
6390 | for_each_rx_queue(bp, i) { | 6425 | for_each_rx_queue(bp, i) { |
@@ -6411,19 +6446,12 @@ static int bnx2x_alloc_mem(struct bnx2x *bp) | |||
6411 | /* Tx */ | 6446 | /* Tx */ |
6412 | for_each_tx_queue(bp, i) { | 6447 | for_each_tx_queue(bp, i) { |
6413 | 6448 | ||
6414 | bnx2x_fp(bp, i, hw_tx_prods) = | ||
6415 | (void *)(bnx2x_fp(bp, i, status_blk) + 1); | ||
6416 | |||
6417 | bnx2x_fp(bp, i, tx_prods_mapping) = | ||
6418 | bnx2x_fp(bp, i, status_blk_mapping) + | ||
6419 | sizeof(struct host_status_block); | ||
6420 | |||
6421 | /* fastpath tx rings: tx_buf tx_desc */ | 6449 | /* fastpath tx rings: tx_buf tx_desc */ |
6422 | BNX2X_ALLOC(bnx2x_fp(bp, i, tx_buf_ring), | 6450 | BNX2X_ALLOC(bnx2x_fp(bp, i, tx_buf_ring), |
6423 | sizeof(struct sw_tx_bd) * NUM_TX_BD); | 6451 | sizeof(struct sw_tx_bd) * NUM_TX_BD); |
6424 | BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, tx_desc_ring), | 6452 | BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, tx_desc_ring), |
6425 | &bnx2x_fp(bp, i, tx_desc_mapping), | 6453 | &bnx2x_fp(bp, i, tx_desc_mapping), |
6426 | sizeof(struct eth_tx_bd) * NUM_TX_BD); | 6454 | sizeof(union eth_tx_bd_types) * NUM_TX_BD); |
6427 | } | 6455 | } |
6428 | /* end of fastpath */ | 6456 | /* end of fastpath */ |
6429 | 6457 | ||
@@ -6600,7 +6628,12 @@ static int bnx2x_req_msix_irqs(struct bnx2x *bp) | |||
6600 | for_each_queue(bp, i) { | 6628 | for_each_queue(bp, i) { |
6601 | struct bnx2x_fastpath *fp = &bp->fp[i]; | 6629 | struct bnx2x_fastpath *fp = &bp->fp[i]; |
6602 | 6630 | ||
6603 | sprintf(fp->name, "%s.fp%d", bp->dev->name, i); | 6631 | if (i < bp->num_rx_queues) |
6632 | sprintf(fp->name, "%s-rx-%d", bp->dev->name, i); | ||
6633 | else | ||
6634 | sprintf(fp->name, "%s-tx-%d", | ||
6635 | bp->dev->name, i - bp->num_rx_queues); | ||
6636 | |||
6604 | rc = request_irq(bp->msix_table[i + offset].vector, | 6637 | rc = request_irq(bp->msix_table[i + offset].vector, |
6605 | bnx2x_msix_fp_int, 0, fp->name, fp); | 6638 | bnx2x_msix_fp_int, 0, fp->name, fp); |
6606 | if (rc) { | 6639 | if (rc) { |
@@ -6613,16 +6646,11 @@ static int bnx2x_req_msix_irqs(struct bnx2x *bp) | |||
6613 | } | 6646 | } |
6614 | 6647 | ||
6615 | i = BNX2X_NUM_QUEUES(bp); | 6648 | i = BNX2X_NUM_QUEUES(bp); |
6616 | if (is_multi(bp)) | 6649 | printk(KERN_INFO PFX "%s: using MSI-X IRQs: sp %d fp[%d] %d" |
6617 | printk(KERN_INFO PFX | 6650 | " ... fp[%d] %d\n", |
6618 | "%s: using MSI-X IRQs: sp %d fp %d - %d\n", | 6651 | bp->dev->name, bp->msix_table[0].vector, |
6619 | bp->dev->name, bp->msix_table[0].vector, | 6652 | 0, bp->msix_table[offset].vector, |
6620 | bp->msix_table[offset].vector, | 6653 | i - 1, bp->msix_table[offset + i - 1].vector); |
6621 | bp->msix_table[offset + i - 1].vector); | ||
6622 | else | ||
6623 | printk(KERN_INFO PFX "%s: using MSI-X IRQs: sp %d fp %d\n", | ||
6624 | bp->dev->name, bp->msix_table[0].vector, | ||
6625 | bp->msix_table[offset + i - 1].vector); | ||
6626 | 6654 | ||
6627 | return 0; | 6655 | return 0; |
6628 | } | 6656 | } |
@@ -6730,7 +6758,8 @@ static void bnx2x_set_mac_addr_e1(struct bnx2x *bp, int set) | |||
6730 | config->config_table[0].target_table_entry.flags = 0; | 6758 | config->config_table[0].target_table_entry.flags = 0; |
6731 | else | 6759 | else |
6732 | CAM_INVALIDATE(config->config_table[0]); | 6760 | CAM_INVALIDATE(config->config_table[0]); |
6733 | config->config_table[0].target_table_entry.client_id = 0; | 6761 | config->config_table[0].target_table_entry.clients_bit_vector = |
6762 | cpu_to_le32(1 << BP_L_ID(bp)); | ||
6734 | config->config_table[0].target_table_entry.vlan_id = 0; | 6763 | config->config_table[0].target_table_entry.vlan_id = 0; |
6735 | 6764 | ||
6736 | DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x)\n", | 6765 | DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x)\n", |
@@ -6749,7 +6778,8 @@ static void bnx2x_set_mac_addr_e1(struct bnx2x *bp, int set) | |||
6749 | TSTORM_CAM_TARGET_TABLE_ENTRY_BROADCAST; | 6778 | TSTORM_CAM_TARGET_TABLE_ENTRY_BROADCAST; |
6750 | else | 6779 | else |
6751 | CAM_INVALIDATE(config->config_table[1]); | 6780 | CAM_INVALIDATE(config->config_table[1]); |
6752 | config->config_table[1].target_table_entry.client_id = 0; | 6781 | config->config_table[1].target_table_entry.clients_bit_vector = |
6782 | cpu_to_le32(1 << BP_L_ID(bp)); | ||
6753 | config->config_table[1].target_table_entry.vlan_id = 0; | 6783 | config->config_table[1].target_table_entry.vlan_id = 0; |
6754 | 6784 | ||
6755 | bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0, | 6785 | bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0, |
@@ -6762,11 +6792,6 @@ static void bnx2x_set_mac_addr_e1h(struct bnx2x *bp, int set) | |||
6762 | struct mac_configuration_cmd_e1h *config = | 6792 | struct mac_configuration_cmd_e1h *config = |
6763 | (struct mac_configuration_cmd_e1h *)bnx2x_sp(bp, mac_config); | 6793 | (struct mac_configuration_cmd_e1h *)bnx2x_sp(bp, mac_config); |
6764 | 6794 | ||
6765 | if (set && (bp->state != BNX2X_STATE_OPEN)) { | ||
6766 | DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state); | ||
6767 | return; | ||
6768 | } | ||
6769 | |||
6770 | /* CAM allocation for E1H | 6795 | /* CAM allocation for E1H |
6771 | * unicasts: by func number | 6796 | * unicasts: by func number |
6772 | * multicast: 20+FUNC*20, 20 each | 6797 | * multicast: 20+FUNC*20, 20 each |
@@ -6783,7 +6808,8 @@ static void bnx2x_set_mac_addr_e1h(struct bnx2x *bp, int set) | |||
6783 | swab16(*(u16 *)&bp->dev->dev_addr[2]); | 6808 | swab16(*(u16 *)&bp->dev->dev_addr[2]); |
6784 | config->config_table[0].lsb_mac_addr = | 6809 | config->config_table[0].lsb_mac_addr = |
6785 | swab16(*(u16 *)&bp->dev->dev_addr[4]); | 6810 | swab16(*(u16 *)&bp->dev->dev_addr[4]); |
6786 | config->config_table[0].client_id = BP_L_ID(bp); | 6811 | config->config_table[0].clients_bit_vector = |
6812 | cpu_to_le32(1 << BP_L_ID(bp)); | ||
6787 | config->config_table[0].vlan_id = 0; | 6813 | config->config_table[0].vlan_id = 0; |
6788 | config->config_table[0].e1hov_id = cpu_to_le16(bp->e1hov); | 6814 | config->config_table[0].e1hov_id = cpu_to_le16(bp->e1hov); |
6789 | if (set) | 6815 | if (set) |
@@ -6880,49 +6906,94 @@ static int bnx2x_setup_multi(struct bnx2x *bp, int index) | |||
6880 | 6906 | ||
6881 | static int bnx2x_poll(struct napi_struct *napi, int budget); | 6907 | static int bnx2x_poll(struct napi_struct *napi, int budget); |
6882 | 6908 | ||
6883 | static void bnx2x_set_int_mode(struct bnx2x *bp) | 6909 | static void bnx2x_set_int_mode_msix(struct bnx2x *bp, int *num_rx_queues_out, |
6910 | int *num_tx_queues_out) | ||
6911 | { | ||
6912 | int _num_rx_queues = 0, _num_tx_queues = 0; | ||
6913 | |||
6914 | switch (bp->multi_mode) { | ||
6915 | case ETH_RSS_MODE_DISABLED: | ||
6916 | _num_rx_queues = 1; | ||
6917 | _num_tx_queues = 1; | ||
6918 | break; | ||
6919 | |||
6920 | case ETH_RSS_MODE_REGULAR: | ||
6921 | if (num_rx_queues) | ||
6922 | _num_rx_queues = min_t(u32, num_rx_queues, | ||
6923 | BNX2X_MAX_QUEUES(bp)); | ||
6924 | else | ||
6925 | _num_rx_queues = min_t(u32, num_online_cpus(), | ||
6926 | BNX2X_MAX_QUEUES(bp)); | ||
6927 | |||
6928 | if (num_tx_queues) | ||
6929 | _num_tx_queues = min_t(u32, num_tx_queues, | ||
6930 | BNX2X_MAX_QUEUES(bp)); | ||
6931 | else | ||
6932 | _num_tx_queues = min_t(u32, num_online_cpus(), | ||
6933 | BNX2X_MAX_QUEUES(bp)); | ||
6934 | |||
6935 | /* There must be not more Tx queues than Rx queues */ | ||
6936 | if (_num_tx_queues > _num_rx_queues) { | ||
6937 | BNX2X_ERR("number of tx queues (%d) > " | ||
6938 | "number of rx queues (%d)" | ||
6939 | " defaulting to %d\n", | ||
6940 | _num_tx_queues, _num_rx_queues, | ||
6941 | _num_rx_queues); | ||
6942 | _num_tx_queues = _num_rx_queues; | ||
6943 | } | ||
6944 | break; | ||
6945 | |||
6946 | |||
6947 | default: | ||
6948 | _num_rx_queues = 1; | ||
6949 | _num_tx_queues = 1; | ||
6950 | break; | ||
6951 | } | ||
6952 | |||
6953 | *num_rx_queues_out = _num_rx_queues; | ||
6954 | *num_tx_queues_out = _num_tx_queues; | ||
6955 | } | ||
6956 | |||
6957 | static int bnx2x_set_int_mode(struct bnx2x *bp) | ||
6884 | { | 6958 | { |
6885 | int num_queues; | 6959 | int rc = 0; |
6886 | 6960 | ||
6887 | switch (int_mode) { | 6961 | switch (int_mode) { |
6888 | case INT_MODE_INTx: | 6962 | case INT_MODE_INTx: |
6889 | case INT_MODE_MSI: | 6963 | case INT_MODE_MSI: |
6890 | num_queues = 1; | 6964 | bp->num_rx_queues = 1; |
6891 | bp->num_rx_queues = num_queues; | 6965 | bp->num_tx_queues = 1; |
6892 | bp->num_tx_queues = num_queues; | 6966 | DP(NETIF_MSG_IFUP, "set number of queues to 1\n"); |
6893 | DP(NETIF_MSG_IFUP, | ||
6894 | "set number of queues to %d\n", num_queues); | ||
6895 | break; | 6967 | break; |
6896 | 6968 | ||
6897 | case INT_MODE_MSIX: | 6969 | case INT_MODE_MSIX: |
6898 | default: | 6970 | default: |
6899 | if (bp->multi_mode == ETH_RSS_MODE_REGULAR) | 6971 | /* Set interrupt mode according to bp->multi_mode value */ |
6900 | num_queues = min_t(u32, num_online_cpus(), | 6972 | bnx2x_set_int_mode_msix(bp, &bp->num_rx_queues, |
6901 | BNX2X_MAX_QUEUES(bp)); | 6973 | &bp->num_tx_queues); |
6902 | else | 6974 | |
6903 | num_queues = 1; | 6975 | DP(NETIF_MSG_IFUP, "set number of queues to: rx %d tx %d\n", |
6904 | bp->num_rx_queues = num_queues; | ||
6905 | bp->num_tx_queues = num_queues; | ||
6906 | DP(NETIF_MSG_IFUP, "set number of rx queues to %d" | ||
6907 | " number of tx queues to %d\n", | ||
6908 | bp->num_rx_queues, bp->num_tx_queues); | 6976 | bp->num_rx_queues, bp->num_tx_queues); |
6977 | |||
6909 | /* if we can't use MSI-X we only need one fp, | 6978 | /* if we can't use MSI-X we only need one fp, |
6910 | * so try to enable MSI-X with the requested number of fp's | 6979 | * so try to enable MSI-X with the requested number of fp's |
6911 | * and fallback to MSI or legacy INTx with one fp | 6980 | * and fallback to MSI or legacy INTx with one fp |
6912 | */ | 6981 | */ |
6913 | if (bnx2x_enable_msix(bp)) { | 6982 | rc = bnx2x_enable_msix(bp); |
6983 | if (rc) { | ||
6914 | /* failed to enable MSI-X */ | 6984 | /* failed to enable MSI-X */ |
6915 | num_queues = 1; | ||
6916 | bp->num_rx_queues = num_queues; | ||
6917 | bp->num_tx_queues = num_queues; | ||
6918 | if (bp->multi_mode) | 6985 | if (bp->multi_mode) |
6919 | BNX2X_ERR("Multi requested but failed to " | 6986 | BNX2X_ERR("Multi requested but failed to " |
6920 | "enable MSI-X set number of " | 6987 | "enable MSI-X (rx %d tx %d), " |
6921 | "queues to %d\n", num_queues); | 6988 | "set number of queues to 1\n", |
6989 | bp->num_rx_queues, bp->num_tx_queues); | ||
6990 | bp->num_rx_queues = 1; | ||
6991 | bp->num_tx_queues = 1; | ||
6922 | } | 6992 | } |
6923 | break; | 6993 | break; |
6924 | } | 6994 | } |
6925 | bp->dev->real_num_tx_queues = bp->num_tx_queues; | 6995 | bp->dev->real_num_tx_queues = bp->num_tx_queues; |
6996 | return rc; | ||
6926 | } | 6997 | } |
6927 | 6998 | ||
6928 | static void bnx2x_set_rx_mode(struct net_device *dev); | 6999 | static void bnx2x_set_rx_mode(struct net_device *dev); |
@@ -6931,16 +7002,16 @@ static void bnx2x_set_rx_mode(struct net_device *dev); | |||
6931 | static int bnx2x_nic_load(struct bnx2x *bp, int load_mode) | 7002 | static int bnx2x_nic_load(struct bnx2x *bp, int load_mode) |
6932 | { | 7003 | { |
6933 | u32 load_code; | 7004 | u32 load_code; |
6934 | int i, rc = 0; | 7005 | int i, rc; |
7006 | |||
6935 | #ifdef BNX2X_STOP_ON_ERROR | 7007 | #ifdef BNX2X_STOP_ON_ERROR |
6936 | DP(NETIF_MSG_IFUP, "enter load_mode %d\n", load_mode); | ||
6937 | if (unlikely(bp->panic)) | 7008 | if (unlikely(bp->panic)) |
6938 | return -EPERM; | 7009 | return -EPERM; |
6939 | #endif | 7010 | #endif |
6940 | 7011 | ||
6941 | bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD; | 7012 | bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD; |
6942 | 7013 | ||
6943 | bnx2x_set_int_mode(bp); | 7014 | rc = bnx2x_set_int_mode(bp); |
6944 | 7015 | ||
6945 | if (bnx2x_alloc_mem(bp)) | 7016 | if (bnx2x_alloc_mem(bp)) |
6946 | return -ENOMEM; | 7017 | return -ENOMEM; |
@@ -6953,17 +7024,6 @@ static int bnx2x_nic_load(struct bnx2x *bp, int load_mode) | |||
6953 | netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi), | 7024 | netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi), |
6954 | bnx2x_poll, 128); | 7025 | bnx2x_poll, 128); |
6955 | 7026 | ||
6956 | #ifdef BNX2X_STOP_ON_ERROR | ||
6957 | for_each_rx_queue(bp, i) { | ||
6958 | struct bnx2x_fastpath *fp = &bp->fp[i]; | ||
6959 | |||
6960 | fp->poll_no_work = 0; | ||
6961 | fp->poll_calls = 0; | ||
6962 | fp->poll_max_calls = 0; | ||
6963 | fp->poll_complete = 0; | ||
6964 | fp->poll_exit = 0; | ||
6965 | } | ||
6966 | #endif | ||
6967 | bnx2x_napi_enable(bp); | 7027 | bnx2x_napi_enable(bp); |
6968 | 7028 | ||
6969 | if (bp->flags & USING_MSIX_FLAG) { | 7029 | if (bp->flags & USING_MSIX_FLAG) { |
@@ -6973,6 +7033,8 @@ static int bnx2x_nic_load(struct bnx2x *bp, int load_mode) | |||
6973 | goto load_error1; | 7033 | goto load_error1; |
6974 | } | 7034 | } |
6975 | } else { | 7035 | } else { |
7036 | /* Fall to INTx if failed to enable MSI-X due to lack of | ||
7037 | memory (in bnx2x_set_int_mode()) */ | ||
6976 | if ((rc != -ENOMEM) && (int_mode != INT_MODE_INTx)) | 7038 | if ((rc != -ENOMEM) && (int_mode != INT_MODE_INTx)) |
6977 | bnx2x_enable_msi(bp); | 7039 | bnx2x_enable_msi(bp); |
6978 | bnx2x_ack_int(bp); | 7040 | bnx2x_ack_int(bp); |
@@ -7065,17 +7127,18 @@ static int bnx2x_nic_load(struct bnx2x *bp, int load_mode) | |||
7065 | bp->state = BNX2X_STATE_DISABLED; | 7127 | bp->state = BNX2X_STATE_DISABLED; |
7066 | } | 7128 | } |
7067 | 7129 | ||
7068 | if (bp->state == BNX2X_STATE_OPEN) | 7130 | if (bp->state == BNX2X_STATE_OPEN) { |
7069 | for_each_nondefault_queue(bp, i) { | 7131 | for_each_nondefault_queue(bp, i) { |
7070 | rc = bnx2x_setup_multi(bp, i); | 7132 | rc = bnx2x_setup_multi(bp, i); |
7071 | if (rc) | 7133 | if (rc) |
7072 | goto load_error3; | 7134 | goto load_error3; |
7073 | } | 7135 | } |
7074 | 7136 | ||
7075 | if (CHIP_IS_E1(bp)) | 7137 | if (CHIP_IS_E1(bp)) |
7076 | bnx2x_set_mac_addr_e1(bp, 1); | 7138 | bnx2x_set_mac_addr_e1(bp, 1); |
7077 | else | 7139 | else |
7078 | bnx2x_set_mac_addr_e1h(bp, 1); | 7140 | bnx2x_set_mac_addr_e1h(bp, 1); |
7141 | } | ||
7079 | 7142 | ||
7080 | if (bp->port.pmf) | 7143 | if (bp->port.pmf) |
7081 | bnx2x_initial_phy_init(bp, load_mode); | 7144 | bnx2x_initial_phy_init(bp, load_mode); |
@@ -7083,14 +7146,18 @@ static int bnx2x_nic_load(struct bnx2x *bp, int load_mode) | |||
7083 | /* Start fast path */ | 7146 | /* Start fast path */ |
7084 | switch (load_mode) { | 7147 | switch (load_mode) { |
7085 | case LOAD_NORMAL: | 7148 | case LOAD_NORMAL: |
7086 | /* Tx queue should be only reenabled */ | 7149 | if (bp->state == BNX2X_STATE_OPEN) { |
7087 | netif_tx_wake_all_queues(bp->dev); | 7150 | /* Tx queue should be only reenabled */ |
7151 | netif_tx_wake_all_queues(bp->dev); | ||
7152 | } | ||
7088 | /* Initialize the receive filter. */ | 7153 | /* Initialize the receive filter. */ |
7089 | bnx2x_set_rx_mode(bp->dev); | 7154 | bnx2x_set_rx_mode(bp->dev); |
7090 | break; | 7155 | break; |
7091 | 7156 | ||
7092 | case LOAD_OPEN: | 7157 | case LOAD_OPEN: |
7093 | netif_tx_start_all_queues(bp->dev); | 7158 | netif_tx_start_all_queues(bp->dev); |
7159 | if (bp->state != BNX2X_STATE_OPEN) | ||
7160 | netif_tx_disable(bp->dev); | ||
7094 | /* Initialize the receive filter. */ | 7161 | /* Initialize the receive filter. */ |
7095 | bnx2x_set_rx_mode(bp->dev); | 7162 | bnx2x_set_rx_mode(bp->dev); |
7096 | break; | 7163 | break; |
@@ -9184,18 +9251,19 @@ static int bnx2x_get_coalesce(struct net_device *dev, | |||
9184 | return 0; | 9251 | return 0; |
9185 | } | 9252 | } |
9186 | 9253 | ||
9254 | #define BNX2X_MAX_COALES_TOUT (0xf0*12) /* Maximal coalescing timeout in us */ | ||
9187 | static int bnx2x_set_coalesce(struct net_device *dev, | 9255 | static int bnx2x_set_coalesce(struct net_device *dev, |
9188 | struct ethtool_coalesce *coal) | 9256 | struct ethtool_coalesce *coal) |
9189 | { | 9257 | { |
9190 | struct bnx2x *bp = netdev_priv(dev); | 9258 | struct bnx2x *bp = netdev_priv(dev); |
9191 | 9259 | ||
9192 | bp->rx_ticks = (u16) coal->rx_coalesce_usecs; | 9260 | bp->rx_ticks = (u16) coal->rx_coalesce_usecs; |
9193 | if (bp->rx_ticks > BNX2X_MAX_COALESCE_TOUT) | 9261 | if (bp->rx_ticks > BNX2X_MAX_COALES_TOUT) |
9194 | bp->rx_ticks = BNX2X_MAX_COALESCE_TOUT; | 9262 | bp->rx_ticks = BNX2X_MAX_COALES_TOUT; |
9195 | 9263 | ||
9196 | bp->tx_ticks = (u16) coal->tx_coalesce_usecs; | 9264 | bp->tx_ticks = (u16) coal->tx_coalesce_usecs; |
9197 | if (bp->tx_ticks > BNX2X_MAX_COALESCE_TOUT) | 9265 | if (bp->tx_ticks > BNX2X_MAX_COALES_TOUT) |
9198 | bp->tx_ticks = BNX2X_MAX_COALESCE_TOUT; | 9266 | bp->tx_ticks = BNX2X_MAX_COALES_TOUT; |
9199 | 9267 | ||
9200 | if (netif_running(dev)) | 9268 | if (netif_running(dev)) |
9201 | bnx2x_update_coalesce(bp); | 9269 | bnx2x_update_coalesce(bp); |
@@ -9554,12 +9622,14 @@ static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode, u8 link_up) | |||
9554 | unsigned int pkt_size, num_pkts, i; | 9622 | unsigned int pkt_size, num_pkts, i; |
9555 | struct sk_buff *skb; | 9623 | struct sk_buff *skb; |
9556 | unsigned char *packet; | 9624 | unsigned char *packet; |
9557 | struct bnx2x_fastpath *fp = &bp->fp[0]; | 9625 | struct bnx2x_fastpath *fp_rx = &bp->fp[0]; |
9626 | struct bnx2x_fastpath *fp_tx = &bp->fp[bp->num_rx_queues]; | ||
9558 | u16 tx_start_idx, tx_idx; | 9627 | u16 tx_start_idx, tx_idx; |
9559 | u16 rx_start_idx, rx_idx; | 9628 | u16 rx_start_idx, rx_idx; |
9560 | u16 pkt_prod; | 9629 | u16 pkt_prod, bd_prod; |
9561 | struct sw_tx_bd *tx_buf; | 9630 | struct sw_tx_bd *tx_buf; |
9562 | struct eth_tx_bd *tx_bd; | 9631 | struct eth_tx_start_bd *tx_start_bd; |
9632 | struct eth_tx_parse_bd *pbd = NULL; | ||
9563 | dma_addr_t mapping; | 9633 | dma_addr_t mapping; |
9564 | union eth_rx_cqe *cqe; | 9634 | union eth_rx_cqe *cqe; |
9565 | u8 cqe_fp_flags; | 9635 | u8 cqe_fp_flags; |
@@ -9591,57 +9661,64 @@ static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode, u8 link_up) | |||
9591 | } | 9661 | } |
9592 | packet = skb_put(skb, pkt_size); | 9662 | packet = skb_put(skb, pkt_size); |
9593 | memcpy(packet, bp->dev->dev_addr, ETH_ALEN); | 9663 | memcpy(packet, bp->dev->dev_addr, ETH_ALEN); |
9594 | memset(packet + ETH_ALEN, 0, (ETH_HLEN - ETH_ALEN)); | 9664 | memset(packet + ETH_ALEN, 0, ETH_ALEN); |
9665 | memset(packet + 2*ETH_ALEN, 0x77, (ETH_HLEN - 2*ETH_ALEN)); | ||
9595 | for (i = ETH_HLEN; i < pkt_size; i++) | 9666 | for (i = ETH_HLEN; i < pkt_size; i++) |
9596 | packet[i] = (unsigned char) (i & 0xff); | 9667 | packet[i] = (unsigned char) (i & 0xff); |
9597 | 9668 | ||
9598 | /* send the loopback packet */ | 9669 | /* send the loopback packet */ |
9599 | num_pkts = 0; | 9670 | num_pkts = 0; |
9600 | tx_start_idx = le16_to_cpu(*fp->tx_cons_sb); | 9671 | tx_start_idx = le16_to_cpu(*fp_tx->tx_cons_sb); |
9601 | rx_start_idx = le16_to_cpu(*fp->rx_cons_sb); | 9672 | rx_start_idx = le16_to_cpu(*fp_rx->rx_cons_sb); |
9602 | 9673 | ||
9603 | pkt_prod = fp->tx_pkt_prod++; | 9674 | pkt_prod = fp_tx->tx_pkt_prod++; |
9604 | tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)]; | 9675 | tx_buf = &fp_tx->tx_buf_ring[TX_BD(pkt_prod)]; |
9605 | tx_buf->first_bd = fp->tx_bd_prod; | 9676 | tx_buf->first_bd = fp_tx->tx_bd_prod; |
9606 | tx_buf->skb = skb; | 9677 | tx_buf->skb = skb; |
9678 | tx_buf->flags = 0; | ||
9607 | 9679 | ||
9608 | tx_bd = &fp->tx_desc_ring[TX_BD(fp->tx_bd_prod)]; | 9680 | bd_prod = TX_BD(fp_tx->tx_bd_prod); |
9681 | tx_start_bd = &fp_tx->tx_desc_ring[bd_prod].start_bd; | ||
9609 | mapping = pci_map_single(bp->pdev, skb->data, | 9682 | mapping = pci_map_single(bp->pdev, skb->data, |
9610 | skb_headlen(skb), PCI_DMA_TODEVICE); | 9683 | skb_headlen(skb), PCI_DMA_TODEVICE); |
9611 | tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping)); | 9684 | tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping)); |
9612 | tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping)); | 9685 | tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping)); |
9613 | tx_bd->nbd = cpu_to_le16(1); | 9686 | tx_start_bd->nbd = cpu_to_le16(2); /* start + pbd */ |
9614 | tx_bd->nbytes = cpu_to_le16(skb_headlen(skb)); | 9687 | tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb)); |
9615 | tx_bd->vlan = cpu_to_le16(pkt_prod); | 9688 | tx_start_bd->vlan = cpu_to_le16(pkt_prod); |
9616 | tx_bd->bd_flags.as_bitfield = (ETH_TX_BD_FLAGS_START_BD | | 9689 | tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD; |
9617 | ETH_TX_BD_FLAGS_END_BD); | 9690 | tx_start_bd->general_data = ((UNICAST_ADDRESS << |
9618 | tx_bd->general_data = ((UNICAST_ADDRESS << | 9691 | ETH_TX_START_BD_ETH_ADDR_TYPE_SHIFT) | 1); |
9619 | ETH_TX_BD_ETH_ADDR_TYPE_SHIFT) | 1); | 9692 | |
9693 | /* turn on parsing and get a BD */ | ||
9694 | bd_prod = TX_BD(NEXT_TX_IDX(bd_prod)); | ||
9695 | pbd = &fp_tx->tx_desc_ring[bd_prod].parse_bd; | ||
9696 | |||
9697 | memset(pbd, 0, sizeof(struct eth_tx_parse_bd)); | ||
9620 | 9698 | ||
9621 | wmb(); | 9699 | wmb(); |
9622 | 9700 | ||
9623 | le16_add_cpu(&fp->hw_tx_prods->bds_prod, 1); | 9701 | fp_tx->tx_db.data.prod += 2; |
9624 | mb(); /* FW restriction: must not reorder writing nbd and packets */ | 9702 | barrier(); |
9625 | le32_add_cpu(&fp->hw_tx_prods->packets_prod, 1); | 9703 | DOORBELL(bp, fp_tx->index - bp->num_rx_queues, fp_tx->tx_db.raw); |
9626 | DOORBELL(bp, fp->index, 0); | ||
9627 | 9704 | ||
9628 | mmiowb(); | 9705 | mmiowb(); |
9629 | 9706 | ||
9630 | num_pkts++; | 9707 | num_pkts++; |
9631 | fp->tx_bd_prod++; | 9708 | fp_tx->tx_bd_prod += 2; /* start + pbd */ |
9632 | bp->dev->trans_start = jiffies; | 9709 | bp->dev->trans_start = jiffies; |
9633 | 9710 | ||
9634 | udelay(100); | 9711 | udelay(100); |
9635 | 9712 | ||
9636 | tx_idx = le16_to_cpu(*fp->tx_cons_sb); | 9713 | tx_idx = le16_to_cpu(*fp_tx->tx_cons_sb); |
9637 | if (tx_idx != tx_start_idx + num_pkts) | 9714 | if (tx_idx != tx_start_idx + num_pkts) |
9638 | goto test_loopback_exit; | 9715 | goto test_loopback_exit; |
9639 | 9716 | ||
9640 | rx_idx = le16_to_cpu(*fp->rx_cons_sb); | 9717 | rx_idx = le16_to_cpu(*fp_rx->rx_cons_sb); |
9641 | if (rx_idx != rx_start_idx + num_pkts) | 9718 | if (rx_idx != rx_start_idx + num_pkts) |
9642 | goto test_loopback_exit; | 9719 | goto test_loopback_exit; |
9643 | 9720 | ||
9644 | cqe = &fp->rx_comp_ring[RCQ_BD(fp->rx_comp_cons)]; | 9721 | cqe = &fp_rx->rx_comp_ring[RCQ_BD(fp_rx->rx_comp_cons)]; |
9645 | cqe_fp_flags = cqe->fast_path_cqe.type_error_flags; | 9722 | cqe_fp_flags = cqe->fast_path_cqe.type_error_flags; |
9646 | if (CQE_TYPE(cqe_fp_flags) || (cqe_fp_flags & ETH_RX_ERROR_FALGS)) | 9723 | if (CQE_TYPE(cqe_fp_flags) || (cqe_fp_flags & ETH_RX_ERROR_FALGS)) |
9647 | goto test_loopback_rx_exit; | 9724 | goto test_loopback_rx_exit; |
@@ -9650,7 +9727,7 @@ static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode, u8 link_up) | |||
9650 | if (len != pkt_size) | 9727 | if (len != pkt_size) |
9651 | goto test_loopback_rx_exit; | 9728 | goto test_loopback_rx_exit; |
9652 | 9729 | ||
9653 | rx_buf = &fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)]; | 9730 | rx_buf = &fp_rx->rx_buf_ring[RX_BD(fp_rx->rx_bd_cons)]; |
9654 | skb = rx_buf->skb; | 9731 | skb = rx_buf->skb; |
9655 | skb_reserve(skb, cqe->fast_path_cqe.placement_offset); | 9732 | skb_reserve(skb, cqe->fast_path_cqe.placement_offset); |
9656 | for (i = ETH_HLEN; i < pkt_size; i++) | 9733 | for (i = ETH_HLEN; i < pkt_size; i++) |
@@ -9661,14 +9738,14 @@ static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode, u8 link_up) | |||
9661 | 9738 | ||
9662 | test_loopback_rx_exit: | 9739 | test_loopback_rx_exit: |
9663 | 9740 | ||
9664 | fp->rx_bd_cons = NEXT_RX_IDX(fp->rx_bd_cons); | 9741 | fp_rx->rx_bd_cons = NEXT_RX_IDX(fp_rx->rx_bd_cons); |
9665 | fp->rx_bd_prod = NEXT_RX_IDX(fp->rx_bd_prod); | 9742 | fp_rx->rx_bd_prod = NEXT_RX_IDX(fp_rx->rx_bd_prod); |
9666 | fp->rx_comp_cons = NEXT_RCQ_IDX(fp->rx_comp_cons); | 9743 | fp_rx->rx_comp_cons = NEXT_RCQ_IDX(fp_rx->rx_comp_cons); |
9667 | fp->rx_comp_prod = NEXT_RCQ_IDX(fp->rx_comp_prod); | 9744 | fp_rx->rx_comp_prod = NEXT_RCQ_IDX(fp_rx->rx_comp_prod); |
9668 | 9745 | ||
9669 | /* Update producers */ | 9746 | /* Update producers */ |
9670 | bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod, | 9747 | bnx2x_update_rx_prod(bp, fp_rx, fp_rx->rx_bd_prod, fp_rx->rx_comp_prod, |
9671 | fp->rx_sge_prod); | 9748 | fp_rx->rx_sge_prod); |
9672 | 9749 | ||
9673 | test_loopback_exit: | 9750 | test_loopback_exit: |
9674 | bp->link_params.loopback_mode = LOOPBACK_NONE; | 9751 | bp->link_params.loopback_mode = LOOPBACK_NONE; |
@@ -10001,7 +10078,7 @@ static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf) | |||
10001 | case ETH_SS_STATS: | 10078 | case ETH_SS_STATS: |
10002 | if (is_multi(bp)) { | 10079 | if (is_multi(bp)) { |
10003 | k = 0; | 10080 | k = 0; |
10004 | for_each_queue(bp, i) { | 10081 | for_each_rx_queue(bp, i) { |
10005 | for (j = 0; j < BNX2X_NUM_Q_STATS; j++) | 10082 | for (j = 0; j < BNX2X_NUM_Q_STATS; j++) |
10006 | sprintf(buf + (k + j)*ETH_GSTRING_LEN, | 10083 | sprintf(buf + (k + j)*ETH_GSTRING_LEN, |
10007 | bnx2x_q_stats_arr[j].string, i); | 10084 | bnx2x_q_stats_arr[j].string, i); |
@@ -10035,7 +10112,7 @@ static int bnx2x_get_stats_count(struct net_device *dev) | |||
10035 | int i, num_stats; | 10112 | int i, num_stats; |
10036 | 10113 | ||
10037 | if (is_multi(bp)) { | 10114 | if (is_multi(bp)) { |
10038 | num_stats = BNX2X_NUM_Q_STATS * BNX2X_NUM_QUEUES(bp); | 10115 | num_stats = BNX2X_NUM_Q_STATS * bp->num_rx_queues; |
10039 | if (!IS_E1HMF_MODE_STAT(bp)) | 10116 | if (!IS_E1HMF_MODE_STAT(bp)) |
10040 | num_stats += BNX2X_NUM_STATS; | 10117 | num_stats += BNX2X_NUM_STATS; |
10041 | } else { | 10118 | } else { |
@@ -10060,7 +10137,7 @@ static void bnx2x_get_ethtool_stats(struct net_device *dev, | |||
10060 | 10137 | ||
10061 | if (is_multi(bp)) { | 10138 | if (is_multi(bp)) { |
10062 | k = 0; | 10139 | k = 0; |
10063 | for_each_queue(bp, i) { | 10140 | for_each_rx_queue(bp, i) { |
10064 | hw_stats = (u32 *)&bp->fp[i].eth_q_stats; | 10141 | hw_stats = (u32 *)&bp->fp[i].eth_q_stats; |
10065 | for (j = 0; j < BNX2X_NUM_Q_STATS; j++) { | 10142 | for (j = 0; j < BNX2X_NUM_Q_STATS; j++) { |
10066 | if (bnx2x_q_stats_arr[j].size == 0) { | 10143 | if (bnx2x_q_stats_arr[j].size == 0) { |
@@ -10273,15 +10350,11 @@ static int bnx2x_poll(struct napi_struct *napi, int budget) | |||
10273 | goto poll_panic; | 10350 | goto poll_panic; |
10274 | #endif | 10351 | #endif |
10275 | 10352 | ||
10276 | prefetch(fp->tx_buf_ring[TX_BD(fp->tx_pkt_cons)].skb); | ||
10277 | prefetch(fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)].skb); | 10353 | prefetch(fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)].skb); |
10278 | prefetch((char *)(fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)].skb) + 256); | 10354 | prefetch((char *)(fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)].skb) + 256); |
10279 | 10355 | ||
10280 | bnx2x_update_fpsb_idx(fp); | 10356 | bnx2x_update_fpsb_idx(fp); |
10281 | 10357 | ||
10282 | if (bnx2x_has_tx_work(fp)) | ||
10283 | bnx2x_tx_int(fp); | ||
10284 | |||
10285 | if (bnx2x_has_rx_work(fp)) { | 10358 | if (bnx2x_has_rx_work(fp)) { |
10286 | work_done = bnx2x_rx_int(fp, budget); | 10359 | work_done = bnx2x_rx_int(fp, budget); |
10287 | 10360 | ||
@@ -10290,11 +10363,11 @@ static int bnx2x_poll(struct napi_struct *napi, int budget) | |||
10290 | goto poll_again; | 10363 | goto poll_again; |
10291 | } | 10364 | } |
10292 | 10365 | ||
10293 | /* BNX2X_HAS_WORK() reads the status block, thus we need to | 10366 | /* bnx2x_has_rx_work() reads the status block, thus we need to |
10294 | * ensure that status block indices have been actually read | 10367 | * ensure that status block indices have been actually read |
10295 | * (bnx2x_update_fpsb_idx) prior to this check (BNX2X_HAS_WORK) | 10368 | * (bnx2x_update_fpsb_idx) prior to this check (bnx2x_has_rx_work) |
10296 | * so that we won't write the "newer" value of the status block to IGU | 10369 | * so that we won't write the "newer" value of the status block to IGU |
10297 | * (if there was a DMA right after BNX2X_HAS_WORK and | 10370 | * (if there was a DMA right after bnx2x_has_rx_work and |
10298 | * if there is no rmb, the memory reading (bnx2x_update_fpsb_idx) | 10371 | * if there is no rmb, the memory reading (bnx2x_update_fpsb_idx) |
10299 | * may be postponed to right before bnx2x_ack_sb). In this case | 10372 | * may be postponed to right before bnx2x_ack_sb). In this case |
10300 | * there will never be another interrupt until there is another update | 10373 | * there will never be another interrupt until there is another update |
@@ -10302,7 +10375,7 @@ static int bnx2x_poll(struct napi_struct *napi, int budget) | |||
10302 | */ | 10375 | */ |
10303 | rmb(); | 10376 | rmb(); |
10304 | 10377 | ||
10305 | if (!BNX2X_HAS_WORK(fp)) { | 10378 | if (!bnx2x_has_rx_work(fp)) { |
10306 | #ifdef BNX2X_STOP_ON_ERROR | 10379 | #ifdef BNX2X_STOP_ON_ERROR |
10307 | poll_panic: | 10380 | poll_panic: |
10308 | #endif | 10381 | #endif |
@@ -10327,10 +10400,11 @@ poll_again: | |||
10327 | */ | 10400 | */ |
10328 | static noinline u16 bnx2x_tx_split(struct bnx2x *bp, | 10401 | static noinline u16 bnx2x_tx_split(struct bnx2x *bp, |
10329 | struct bnx2x_fastpath *fp, | 10402 | struct bnx2x_fastpath *fp, |
10330 | struct eth_tx_bd **tx_bd, u16 hlen, | 10403 | struct sw_tx_bd *tx_buf, |
10404 | struct eth_tx_start_bd **tx_bd, u16 hlen, | ||
10331 | u16 bd_prod, int nbd) | 10405 | u16 bd_prod, int nbd) |
10332 | { | 10406 | { |
10333 | struct eth_tx_bd *h_tx_bd = *tx_bd; | 10407 | struct eth_tx_start_bd *h_tx_bd = *tx_bd; |
10334 | struct eth_tx_bd *d_tx_bd; | 10408 | struct eth_tx_bd *d_tx_bd; |
10335 | dma_addr_t mapping; | 10409 | dma_addr_t mapping; |
10336 | int old_len = le16_to_cpu(h_tx_bd->nbytes); | 10410 | int old_len = le16_to_cpu(h_tx_bd->nbytes); |
@@ -10346,7 +10420,7 @@ static noinline u16 bnx2x_tx_split(struct bnx2x *bp, | |||
10346 | /* now get a new data BD | 10420 | /* now get a new data BD |
10347 | * (after the pbd) and fill it */ | 10421 | * (after the pbd) and fill it */ |
10348 | bd_prod = TX_BD(NEXT_TX_IDX(bd_prod)); | 10422 | bd_prod = TX_BD(NEXT_TX_IDX(bd_prod)); |
10349 | d_tx_bd = &fp->tx_desc_ring[bd_prod]; | 10423 | d_tx_bd = &fp->tx_desc_ring[bd_prod].reg_bd; |
10350 | 10424 | ||
10351 | mapping = HILO_U64(le32_to_cpu(h_tx_bd->addr_hi), | 10425 | mapping = HILO_U64(le32_to_cpu(h_tx_bd->addr_hi), |
10352 | le32_to_cpu(h_tx_bd->addr_lo)) + hlen; | 10426 | le32_to_cpu(h_tx_bd->addr_lo)) + hlen; |
@@ -10354,17 +10428,16 @@ static noinline u16 bnx2x_tx_split(struct bnx2x *bp, | |||
10354 | d_tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping)); | 10428 | d_tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping)); |
10355 | d_tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping)); | 10429 | d_tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping)); |
10356 | d_tx_bd->nbytes = cpu_to_le16(old_len - hlen); | 10430 | d_tx_bd->nbytes = cpu_to_le16(old_len - hlen); |
10357 | d_tx_bd->vlan = 0; | 10431 | |
10358 | /* this marks the BD as one that has no individual mapping | 10432 | /* this marks the BD as one that has no individual mapping */ |
10359 | * the FW ignores this flag in a BD not marked start | 10433 | tx_buf->flags |= BNX2X_TSO_SPLIT_BD; |
10360 | */ | 10434 | |
10361 | d_tx_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_SW_LSO; | ||
10362 | DP(NETIF_MSG_TX_QUEUED, | 10435 | DP(NETIF_MSG_TX_QUEUED, |
10363 | "TSO split data size is %d (%x:%x)\n", | 10436 | "TSO split data size is %d (%x:%x)\n", |
10364 | d_tx_bd->nbytes, d_tx_bd->addr_hi, d_tx_bd->addr_lo); | 10437 | d_tx_bd->nbytes, d_tx_bd->addr_hi, d_tx_bd->addr_lo); |
10365 | 10438 | ||
10366 | /* update tx_bd for marking the last BD flag */ | 10439 | /* update tx_bd */ |
10367 | *tx_bd = d_tx_bd; | 10440 | *tx_bd = (struct eth_tx_start_bd *)d_tx_bd; |
10368 | 10441 | ||
10369 | return bd_prod; | 10442 | return bd_prod; |
10370 | } | 10443 | } |
@@ -10499,18 +10572,19 @@ exit_lbl: | |||
10499 | static int bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev) | 10572 | static int bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev) |
10500 | { | 10573 | { |
10501 | struct bnx2x *bp = netdev_priv(dev); | 10574 | struct bnx2x *bp = netdev_priv(dev); |
10502 | struct bnx2x_fastpath *fp; | 10575 | struct bnx2x_fastpath *fp, *fp_stat; |
10503 | struct netdev_queue *txq; | 10576 | struct netdev_queue *txq; |
10504 | struct sw_tx_bd *tx_buf; | 10577 | struct sw_tx_bd *tx_buf; |
10505 | struct eth_tx_bd *tx_bd; | 10578 | struct eth_tx_start_bd *tx_start_bd; |
10579 | struct eth_tx_bd *tx_data_bd, *total_pkt_bd = NULL; | ||
10506 | struct eth_tx_parse_bd *pbd = NULL; | 10580 | struct eth_tx_parse_bd *pbd = NULL; |
10507 | u16 pkt_prod, bd_prod; | 10581 | u16 pkt_prod, bd_prod; |
10508 | int nbd, fp_index; | 10582 | int nbd, fp_index; |
10509 | dma_addr_t mapping; | 10583 | dma_addr_t mapping; |
10510 | u32 xmit_type = bnx2x_xmit_type(bp, skb); | 10584 | u32 xmit_type = bnx2x_xmit_type(bp, skb); |
10511 | int vlan_off = (bp->e1hov ? 4 : 0); | ||
10512 | int i; | 10585 | int i; |
10513 | u8 hlen = 0; | 10586 | u8 hlen = 0; |
10587 | __le16 pkt_size = 0; | ||
10514 | 10588 | ||
10515 | #ifdef BNX2X_STOP_ON_ERROR | 10589 | #ifdef BNX2X_STOP_ON_ERROR |
10516 | if (unlikely(bp->panic)) | 10590 | if (unlikely(bp->panic)) |
@@ -10520,10 +10594,11 @@ static int bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
10520 | fp_index = skb_get_queue_mapping(skb); | 10594 | fp_index = skb_get_queue_mapping(skb); |
10521 | txq = netdev_get_tx_queue(dev, fp_index); | 10595 | txq = netdev_get_tx_queue(dev, fp_index); |
10522 | 10596 | ||
10523 | fp = &bp->fp[fp_index]; | 10597 | fp = &bp->fp[fp_index + bp->num_rx_queues]; |
10598 | fp_stat = &bp->fp[fp_index]; | ||
10524 | 10599 | ||
10525 | if (unlikely(bnx2x_tx_avail(fp) < (skb_shinfo(skb)->nr_frags + 3))) { | 10600 | if (unlikely(bnx2x_tx_avail(fp) < (skb_shinfo(skb)->nr_frags + 3))) { |
10526 | fp->eth_q_stats.driver_xoff++, | 10601 | fp_stat->eth_q_stats.driver_xoff++; |
10527 | netif_tx_stop_queue(txq); | 10602 | netif_tx_stop_queue(txq); |
10528 | BNX2X_ERR("BUG! Tx ring full when queue awake!\n"); | 10603 | BNX2X_ERR("BUG! Tx ring full when queue awake!\n"); |
10529 | return NETDEV_TX_BUSY; | 10604 | return NETDEV_TX_BUSY; |
@@ -10552,7 +10627,7 @@ static int bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
10552 | 10627 | ||
10553 | /* | 10628 | /* |
10554 | Please read carefully. First we use one BD which we mark as start, | 10629 | Please read carefully. First we use one BD which we mark as start, |
10555 | then for TSO or xsum we have a parsing info BD, | 10630 | then we have a parsing info BD (used for TSO or xsum), |
10556 | and only then we have the rest of the TSO BDs. | 10631 | and only then we have the rest of the TSO BDs. |
10557 | (don't forget to mark the last one as last, | 10632 | (don't forget to mark the last one as last, |
10558 | and to unmap only AFTER you write to the BD ...) | 10633 | and to unmap only AFTER you write to the BD ...) |
@@ -10564,42 +10639,40 @@ static int bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
10564 | 10639 | ||
10565 | /* get a tx_buf and first BD */ | 10640 | /* get a tx_buf and first BD */ |
10566 | tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)]; | 10641 | tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)]; |
10567 | tx_bd = &fp->tx_desc_ring[bd_prod]; | 10642 | tx_start_bd = &fp->tx_desc_ring[bd_prod].start_bd; |
10568 | 10643 | ||
10569 | tx_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD; | 10644 | tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD; |
10570 | tx_bd->general_data = (UNICAST_ADDRESS << | 10645 | tx_start_bd->general_data = (UNICAST_ADDRESS << |
10571 | ETH_TX_BD_ETH_ADDR_TYPE_SHIFT); | 10646 | ETH_TX_START_BD_ETH_ADDR_TYPE_SHIFT); |
10572 | /* header nbd */ | 10647 | /* header nbd */ |
10573 | tx_bd->general_data |= (1 << ETH_TX_BD_HDR_NBDS_SHIFT); | 10648 | tx_start_bd->general_data |= (1 << ETH_TX_START_BD_HDR_NBDS_SHIFT); |
10574 | 10649 | ||
10575 | /* remember the first BD of the packet */ | 10650 | /* remember the first BD of the packet */ |
10576 | tx_buf->first_bd = fp->tx_bd_prod; | 10651 | tx_buf->first_bd = fp->tx_bd_prod; |
10577 | tx_buf->skb = skb; | 10652 | tx_buf->skb = skb; |
10653 | tx_buf->flags = 0; | ||
10578 | 10654 | ||
10579 | DP(NETIF_MSG_TX_QUEUED, | 10655 | DP(NETIF_MSG_TX_QUEUED, |
10580 | "sending pkt %u @%p next_idx %u bd %u @%p\n", | 10656 | "sending pkt %u @%p next_idx %u bd %u @%p\n", |
10581 | pkt_prod, tx_buf, fp->tx_pkt_prod, bd_prod, tx_bd); | 10657 | pkt_prod, tx_buf, fp->tx_pkt_prod, bd_prod, tx_start_bd); |
10582 | 10658 | ||
10583 | #ifdef BCM_VLAN | 10659 | #ifdef BCM_VLAN |
10584 | if ((bp->vlgrp != NULL) && vlan_tx_tag_present(skb) && | 10660 | if ((bp->vlgrp != NULL) && vlan_tx_tag_present(skb) && |
10585 | (bp->flags & HW_VLAN_TX_FLAG)) { | 10661 | (bp->flags & HW_VLAN_TX_FLAG)) { |
10586 | tx_bd->vlan = cpu_to_le16(vlan_tx_tag_get(skb)); | 10662 | tx_start_bd->vlan = cpu_to_le16(vlan_tx_tag_get(skb)); |
10587 | tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_VLAN_TAG; | 10663 | tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_VLAN_TAG; |
10588 | vlan_off += 4; | ||
10589 | } else | 10664 | } else |
10590 | #endif | 10665 | #endif |
10591 | tx_bd->vlan = cpu_to_le16(pkt_prod); | 10666 | tx_start_bd->vlan = cpu_to_le16(pkt_prod); |
10592 | 10667 | ||
10593 | if (xmit_type) { | 10668 | /* turn on parsing and get a BD */ |
10594 | /* turn on parsing and get a BD */ | 10669 | bd_prod = TX_BD(NEXT_TX_IDX(bd_prod)); |
10595 | bd_prod = TX_BD(NEXT_TX_IDX(bd_prod)); | 10670 | pbd = &fp->tx_desc_ring[bd_prod].parse_bd; |
10596 | pbd = (void *)&fp->tx_desc_ring[bd_prod]; | ||
10597 | 10671 | ||
10598 | memset(pbd, 0, sizeof(struct eth_tx_parse_bd)); | 10672 | memset(pbd, 0, sizeof(struct eth_tx_parse_bd)); |
10599 | } | ||
10600 | 10673 | ||
10601 | if (xmit_type & XMIT_CSUM) { | 10674 | if (xmit_type & XMIT_CSUM) { |
10602 | hlen = (skb_network_header(skb) - skb->data + vlan_off) / 2; | 10675 | hlen = (skb_network_header(skb) - skb->data) / 2; |
10603 | 10676 | ||
10604 | /* for now NS flag is not used in Linux */ | 10677 | /* for now NS flag is not used in Linux */ |
10605 | pbd->global_data = | 10678 | pbd->global_data = |
@@ -10612,15 +10685,16 @@ static int bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
10612 | hlen += pbd->ip_hlen + tcp_hdrlen(skb) / 2; | 10685 | hlen += pbd->ip_hlen + tcp_hdrlen(skb) / 2; |
10613 | 10686 | ||
10614 | pbd->total_hlen = cpu_to_le16(hlen); | 10687 | pbd->total_hlen = cpu_to_le16(hlen); |
10615 | hlen = hlen*2 - vlan_off; | 10688 | hlen = hlen*2; |
10616 | 10689 | ||
10617 | tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_TCP_CSUM; | 10690 | tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_L4_CSUM; |
10618 | 10691 | ||
10619 | if (xmit_type & XMIT_CSUM_V4) | 10692 | if (xmit_type & XMIT_CSUM_V4) |
10620 | tx_bd->bd_flags.as_bitfield |= | 10693 | tx_start_bd->bd_flags.as_bitfield |= |
10621 | ETH_TX_BD_FLAGS_IP_CSUM; | 10694 | ETH_TX_BD_FLAGS_IP_CSUM; |
10622 | else | 10695 | else |
10623 | tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IPV6; | 10696 | tx_start_bd->bd_flags.as_bitfield |= |
10697 | ETH_TX_BD_FLAGS_IPV6; | ||
10624 | 10698 | ||
10625 | if (xmit_type & XMIT_CSUM_TCP) { | 10699 | if (xmit_type & XMIT_CSUM_TCP) { |
10626 | pbd->tcp_pseudo_csum = swab16(tcp_hdr(skb)->check); | 10700 | pbd->tcp_pseudo_csum = swab16(tcp_hdr(skb)->check); |
@@ -10628,13 +10702,11 @@ static int bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
10628 | } else { | 10702 | } else { |
10629 | s8 fix = SKB_CS_OFF(skb); /* signed! */ | 10703 | s8 fix = SKB_CS_OFF(skb); /* signed! */ |
10630 | 10704 | ||
10631 | pbd->global_data |= ETH_TX_PARSE_BD_CS_ANY_FLG; | 10705 | pbd->global_data |= ETH_TX_PARSE_BD_UDP_CS_FLG; |
10632 | pbd->cs_offset = fix / 2; | ||
10633 | 10706 | ||
10634 | DP(NETIF_MSG_TX_QUEUED, | 10707 | DP(NETIF_MSG_TX_QUEUED, |
10635 | "hlen %d offset %d fix %d csum before fix %x\n", | 10708 | "hlen %d fix %d csum before fix %x\n", |
10636 | le16_to_cpu(pbd->total_hlen), pbd->cs_offset, fix, | 10709 | le16_to_cpu(pbd->total_hlen), fix, SKB_CS(skb)); |
10637 | SKB_CS(skb)); | ||
10638 | 10710 | ||
10639 | /* HW bug: fixup the CSUM */ | 10711 | /* HW bug: fixup the CSUM */ |
10640 | pbd->tcp_pseudo_csum = | 10712 | pbd->tcp_pseudo_csum = |
@@ -10649,17 +10721,18 @@ static int bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
10649 | mapping = pci_map_single(bp->pdev, skb->data, | 10721 | mapping = pci_map_single(bp->pdev, skb->data, |
10650 | skb_headlen(skb), PCI_DMA_TODEVICE); | 10722 | skb_headlen(skb), PCI_DMA_TODEVICE); |
10651 | 10723 | ||
10652 | tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping)); | 10724 | tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping)); |
10653 | tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping)); | 10725 | tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping)); |
10654 | nbd = skb_shinfo(skb)->nr_frags + ((pbd == NULL) ? 1 : 2); | 10726 | nbd = skb_shinfo(skb)->nr_frags + 2; /* start_bd + pbd + frags */ |
10655 | tx_bd->nbd = cpu_to_le16(nbd); | 10727 | tx_start_bd->nbd = cpu_to_le16(nbd); |
10656 | tx_bd->nbytes = cpu_to_le16(skb_headlen(skb)); | 10728 | tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb)); |
10729 | pkt_size = tx_start_bd->nbytes; | ||
10657 | 10730 | ||
10658 | DP(NETIF_MSG_TX_QUEUED, "first bd @%p addr (%x:%x) nbd %d" | 10731 | DP(NETIF_MSG_TX_QUEUED, "first bd @%p addr (%x:%x) nbd %d" |
10659 | " nbytes %d flags %x vlan %x\n", | 10732 | " nbytes %d flags %x vlan %x\n", |
10660 | tx_bd, tx_bd->addr_hi, tx_bd->addr_lo, le16_to_cpu(tx_bd->nbd), | 10733 | tx_start_bd, tx_start_bd->addr_hi, tx_start_bd->addr_lo, |
10661 | le16_to_cpu(tx_bd->nbytes), tx_bd->bd_flags.as_bitfield, | 10734 | le16_to_cpu(tx_start_bd->nbd), le16_to_cpu(tx_start_bd->nbytes), |
10662 | le16_to_cpu(tx_bd->vlan)); | 10735 | tx_start_bd->bd_flags.as_bitfield, le16_to_cpu(tx_start_bd->vlan)); |
10663 | 10736 | ||
10664 | if (xmit_type & XMIT_GSO) { | 10737 | if (xmit_type & XMIT_GSO) { |
10665 | 10738 | ||
@@ -10668,11 +10741,11 @@ static int bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
10668 | skb->len, hlen, skb_headlen(skb), | 10741 | skb->len, hlen, skb_headlen(skb), |
10669 | skb_shinfo(skb)->gso_size); | 10742 | skb_shinfo(skb)->gso_size); |
10670 | 10743 | ||
10671 | tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO; | 10744 | tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO; |
10672 | 10745 | ||
10673 | if (unlikely(skb_headlen(skb) > hlen)) | 10746 | if (unlikely(skb_headlen(skb) > hlen)) |
10674 | bd_prod = bnx2x_tx_split(bp, fp, &tx_bd, hlen, | 10747 | bd_prod = bnx2x_tx_split(bp, fp, tx_buf, &tx_start_bd, |
10675 | bd_prod, ++nbd); | 10748 | hlen, bd_prod, ++nbd); |
10676 | 10749 | ||
10677 | pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size); | 10750 | pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size); |
10678 | pbd->tcp_send_seq = swab32(tcp_hdr(skb)->seq); | 10751 | pbd->tcp_send_seq = swab32(tcp_hdr(skb)->seq); |
@@ -10693,33 +10766,31 @@ static int bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
10693 | 10766 | ||
10694 | pbd->global_data |= ETH_TX_PARSE_BD_PSEUDO_CS_WITHOUT_LEN; | 10767 | pbd->global_data |= ETH_TX_PARSE_BD_PSEUDO_CS_WITHOUT_LEN; |
10695 | } | 10768 | } |
10769 | tx_data_bd = (struct eth_tx_bd *)tx_start_bd; | ||
10696 | 10770 | ||
10697 | for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { | 10771 | for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { |
10698 | skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; | 10772 | skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; |
10699 | 10773 | ||
10700 | bd_prod = TX_BD(NEXT_TX_IDX(bd_prod)); | 10774 | bd_prod = TX_BD(NEXT_TX_IDX(bd_prod)); |
10701 | tx_bd = &fp->tx_desc_ring[bd_prod]; | 10775 | tx_data_bd = &fp->tx_desc_ring[bd_prod].reg_bd; |
10776 | if (total_pkt_bd == NULL) | ||
10777 | total_pkt_bd = &fp->tx_desc_ring[bd_prod].reg_bd; | ||
10702 | 10778 | ||
10703 | mapping = pci_map_page(bp->pdev, frag->page, frag->page_offset, | 10779 | mapping = pci_map_page(bp->pdev, frag->page, frag->page_offset, |
10704 | frag->size, PCI_DMA_TODEVICE); | 10780 | frag->size, PCI_DMA_TODEVICE); |
10705 | 10781 | ||
10706 | tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping)); | 10782 | tx_data_bd->addr_hi = cpu_to_le32(U64_HI(mapping)); |
10707 | tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping)); | 10783 | tx_data_bd->addr_lo = cpu_to_le32(U64_LO(mapping)); |
10708 | tx_bd->nbytes = cpu_to_le16(frag->size); | 10784 | tx_data_bd->nbytes = cpu_to_le16(frag->size); |
10709 | tx_bd->vlan = cpu_to_le16(pkt_prod); | 10785 | le16_add_cpu(&pkt_size, frag->size); |
10710 | tx_bd->bd_flags.as_bitfield = 0; | ||
10711 | 10786 | ||
10712 | DP(NETIF_MSG_TX_QUEUED, | 10787 | DP(NETIF_MSG_TX_QUEUED, |
10713 | "frag %d bd @%p addr (%x:%x) nbytes %d flags %x\n", | 10788 | "frag %d bd @%p addr (%x:%x) nbytes %d\n", |
10714 | i, tx_bd, tx_bd->addr_hi, tx_bd->addr_lo, | 10789 | i, tx_data_bd, tx_data_bd->addr_hi, tx_data_bd->addr_lo, |
10715 | le16_to_cpu(tx_bd->nbytes), tx_bd->bd_flags.as_bitfield); | 10790 | le16_to_cpu(tx_data_bd->nbytes)); |
10716 | } | 10791 | } |
10717 | 10792 | ||
10718 | /* now at last mark the BD as the last BD */ | 10793 | DP(NETIF_MSG_TX_QUEUED, "last bd @%p\n", tx_data_bd); |
10719 | tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_END_BD; | ||
10720 | |||
10721 | DP(NETIF_MSG_TX_QUEUED, "last bd @%p flags %x\n", | ||
10722 | tx_bd, tx_bd->bd_flags.as_bitfield); | ||
10723 | 10794 | ||
10724 | bd_prod = TX_BD(NEXT_TX_IDX(bd_prod)); | 10795 | bd_prod = TX_BD(NEXT_TX_IDX(bd_prod)); |
10725 | 10796 | ||
@@ -10729,6 +10800,9 @@ static int bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
10729 | if (TX_BD_POFF(bd_prod) < nbd) | 10800 | if (TX_BD_POFF(bd_prod) < nbd) |
10730 | nbd++; | 10801 | nbd++; |
10731 | 10802 | ||
10803 | if (total_pkt_bd != NULL) | ||
10804 | total_pkt_bd->total_pkt_bytes = pkt_size; | ||
10805 | |||
10732 | if (pbd) | 10806 | if (pbd) |
10733 | DP(NETIF_MSG_TX_QUEUED, | 10807 | DP(NETIF_MSG_TX_QUEUED, |
10734 | "PBD @%p ip_data %x ip_hlen %u ip_id %u lso_mss %u" | 10808 | "PBD @%p ip_data %x ip_hlen %u ip_id %u lso_mss %u" |
@@ -10748,25 +10822,24 @@ static int bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
10748 | */ | 10822 | */ |
10749 | wmb(); | 10823 | wmb(); |
10750 | 10824 | ||
10751 | le16_add_cpu(&fp->hw_tx_prods->bds_prod, nbd); | 10825 | fp->tx_db.data.prod += nbd; |
10752 | mb(); /* FW restriction: must not reorder writing nbd and packets */ | 10826 | barrier(); |
10753 | le32_add_cpu(&fp->hw_tx_prods->packets_prod, 1); | 10827 | DOORBELL(bp, fp->index - bp->num_rx_queues, fp->tx_db.raw); |
10754 | DOORBELL(bp, fp->index, 0); | ||
10755 | 10828 | ||
10756 | mmiowb(); | 10829 | mmiowb(); |
10757 | 10830 | ||
10758 | fp->tx_bd_prod += nbd; | 10831 | fp->tx_bd_prod += nbd; |
10759 | 10832 | ||
10760 | if (unlikely(bnx2x_tx_avail(fp) < MAX_SKB_FRAGS + 3)) { | 10833 | if (unlikely(bnx2x_tx_avail(fp) < MAX_SKB_FRAGS + 3)) { |
10834 | netif_tx_stop_queue(txq); | ||
10761 | /* We want bnx2x_tx_int to "see" the updated tx_bd_prod | 10835 | /* We want bnx2x_tx_int to "see" the updated tx_bd_prod |
10762 | if we put Tx into XOFF state. */ | 10836 | if we put Tx into XOFF state. */ |
10763 | smp_mb(); | 10837 | smp_mb(); |
10764 | netif_tx_stop_queue(txq); | 10838 | fp_stat->eth_q_stats.driver_xoff++; |
10765 | fp->eth_q_stats.driver_xoff++; | ||
10766 | if (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3) | 10839 | if (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3) |
10767 | netif_tx_wake_queue(txq); | 10840 | netif_tx_wake_queue(txq); |
10768 | } | 10841 | } |
10769 | fp->tx_pkt++; | 10842 | fp_stat->tx_pkt++; |
10770 | 10843 | ||
10771 | return NETDEV_TX_OK; | 10844 | return NETDEV_TX_OK; |
10772 | } | 10845 | } |
@@ -10842,8 +10915,9 @@ static void bnx2x_set_rx_mode(struct net_device *dev) | |||
10842 | cpu_to_le16(port); | 10915 | cpu_to_le16(port); |
10843 | config->config_table[i]. | 10916 | config->config_table[i]. |
10844 | target_table_entry.flags = 0; | 10917 | target_table_entry.flags = 0; |
10845 | config->config_table[i]. | 10918 | config->config_table[i].target_table_entry. |
10846 | target_table_entry.client_id = 0; | 10919 | clients_bit_vector = |
10920 | cpu_to_le32(1 << BP_L_ID(bp)); | ||
10847 | config->config_table[i]. | 10921 | config->config_table[i]. |
10848 | target_table_entry.vlan_id = 0; | 10922 | target_table_entry.vlan_id = 0; |
10849 | 10923 | ||