diff options
Diffstat (limited to 'drivers/net/bnx2x_main.c')
-rw-r--r-- | drivers/net/bnx2x_main.c | 1443 |
1 files changed, 968 insertions, 475 deletions
diff --git a/drivers/net/bnx2x_main.c b/drivers/net/bnx2x_main.c index 20f0ed956df2..77ba13520d87 100644 --- a/drivers/net/bnx2x_main.c +++ b/drivers/net/bnx2x_main.c | |||
@@ -49,6 +49,7 @@ | |||
49 | #include <linux/prefetch.h> | 49 | #include <linux/prefetch.h> |
50 | #include <linux/zlib.h> | 50 | #include <linux/zlib.h> |
51 | #include <linux/io.h> | 51 | #include <linux/io.h> |
52 | #include <linux/stringify.h> | ||
52 | 53 | ||
53 | 54 | ||
54 | #include "bnx2x.h" | 55 | #include "bnx2x.h" |
@@ -56,15 +57,20 @@ | |||
56 | #include "bnx2x_init_ops.h" | 57 | #include "bnx2x_init_ops.h" |
57 | #include "bnx2x_dump.h" | 58 | #include "bnx2x_dump.h" |
58 | 59 | ||
59 | #define DRV_MODULE_VERSION "1.52.1" | 60 | #define DRV_MODULE_VERSION "1.52.1-5" |
60 | #define DRV_MODULE_RELDATE "2009/08/12" | 61 | #define DRV_MODULE_RELDATE "2009/11/09" |
61 | #define BNX2X_BC_VER 0x040200 | 62 | #define BNX2X_BC_VER 0x040200 |
62 | 63 | ||
63 | #include <linux/firmware.h> | 64 | #include <linux/firmware.h> |
64 | #include "bnx2x_fw_file_hdr.h" | 65 | #include "bnx2x_fw_file_hdr.h" |
65 | /* FW files */ | 66 | /* FW files */ |
66 | #define FW_FILE_PREFIX_E1 "bnx2x-e1-" | 67 | #define FW_FILE_VERSION \ |
67 | #define FW_FILE_PREFIX_E1H "bnx2x-e1h-" | 68 | __stringify(BCM_5710_FW_MAJOR_VERSION) "." \ |
69 | __stringify(BCM_5710_FW_MINOR_VERSION) "." \ | ||
70 | __stringify(BCM_5710_FW_REVISION_VERSION) "." \ | ||
71 | __stringify(BCM_5710_FW_ENGINEERING_VERSION) | ||
72 | #define FW_FILE_NAME_E1 "bnx2x-e1-" FW_FILE_VERSION ".fw" | ||
73 | #define FW_FILE_NAME_E1H "bnx2x-e1h-" FW_FILE_VERSION ".fw" | ||
68 | 74 | ||
69 | /* Time in jiffies before concluding the transmitter is hung */ | 75 | /* Time in jiffies before concluding the transmitter is hung */ |
70 | #define TX_TIMEOUT (5*HZ) | 76 | #define TX_TIMEOUT (5*HZ) |
@@ -77,21 +83,18 @@ MODULE_AUTHOR("Eliezer Tamir"); | |||
77 | MODULE_DESCRIPTION("Broadcom NetXtreme II BCM57710/57711/57711E Driver"); | 83 | MODULE_DESCRIPTION("Broadcom NetXtreme II BCM57710/57711/57711E Driver"); |
78 | MODULE_LICENSE("GPL"); | 84 | MODULE_LICENSE("GPL"); |
79 | MODULE_VERSION(DRV_MODULE_VERSION); | 85 | MODULE_VERSION(DRV_MODULE_VERSION); |
86 | MODULE_FIRMWARE(FW_FILE_NAME_E1); | ||
87 | MODULE_FIRMWARE(FW_FILE_NAME_E1H); | ||
80 | 88 | ||
81 | static int multi_mode = 1; | 89 | static int multi_mode = 1; |
82 | module_param(multi_mode, int, 0); | 90 | module_param(multi_mode, int, 0); |
83 | MODULE_PARM_DESC(multi_mode, " Multi queue mode " | 91 | MODULE_PARM_DESC(multi_mode, " Multi queue mode " |
84 | "(0 Disable; 1 Enable (default))"); | 92 | "(0 Disable; 1 Enable (default))"); |
85 | 93 | ||
86 | static int num_rx_queues; | 94 | static int num_queues; |
87 | module_param(num_rx_queues, int, 0); | 95 | module_param(num_queues, int, 0); |
88 | MODULE_PARM_DESC(num_rx_queues, " Number of Rx queues for multi_mode=1" | 96 | MODULE_PARM_DESC(num_queues, " Number of queues for multi_mode=1" |
89 | " (default is half number of CPUs)"); | 97 | " (default is as a number of CPUs)"); |
90 | |||
91 | static int num_tx_queues; | ||
92 | module_param(num_tx_queues, int, 0); | ||
93 | MODULE_PARM_DESC(num_tx_queues, " Number of Tx queues for multi_mode=1" | ||
94 | " (default is half number of CPUs)"); | ||
95 | 98 | ||
96 | static int disable_tpa; | 99 | static int disable_tpa; |
97 | module_param(disable_tpa, int, 0); | 100 | module_param(disable_tpa, int, 0); |
@@ -550,7 +553,7 @@ static void bnx2x_panic_dump(struct bnx2x *bp) | |||
550 | bp->def_att_idx, bp->attn_state, bp->spq_prod_idx); | 553 | bp->def_att_idx, bp->attn_state, bp->spq_prod_idx); |
551 | 554 | ||
552 | /* Rx */ | 555 | /* Rx */ |
553 | for_each_rx_queue(bp, i) { | 556 | for_each_queue(bp, i) { |
554 | struct bnx2x_fastpath *fp = &bp->fp[i]; | 557 | struct bnx2x_fastpath *fp = &bp->fp[i]; |
555 | 558 | ||
556 | BNX2X_ERR("fp%d: rx_bd_prod(%x) rx_bd_cons(%x)" | 559 | BNX2X_ERR("fp%d: rx_bd_prod(%x) rx_bd_cons(%x)" |
@@ -567,7 +570,7 @@ static void bnx2x_panic_dump(struct bnx2x *bp) | |||
567 | } | 570 | } |
568 | 571 | ||
569 | /* Tx */ | 572 | /* Tx */ |
570 | for_each_tx_queue(bp, i) { | 573 | for_each_queue(bp, i) { |
571 | struct bnx2x_fastpath *fp = &bp->fp[i]; | 574 | struct bnx2x_fastpath *fp = &bp->fp[i]; |
572 | 575 | ||
573 | BNX2X_ERR("fp%d: tx_pkt_prod(%x) tx_pkt_cons(%x)" | 576 | BNX2X_ERR("fp%d: tx_pkt_prod(%x) tx_pkt_cons(%x)" |
@@ -582,7 +585,7 @@ static void bnx2x_panic_dump(struct bnx2x *bp) | |||
582 | 585 | ||
583 | /* Rings */ | 586 | /* Rings */ |
584 | /* Rx */ | 587 | /* Rx */ |
585 | for_each_rx_queue(bp, i) { | 588 | for_each_queue(bp, i) { |
586 | struct bnx2x_fastpath *fp = &bp->fp[i]; | 589 | struct bnx2x_fastpath *fp = &bp->fp[i]; |
587 | 590 | ||
588 | start = RX_BD(le16_to_cpu(*fp->rx_cons_sb) - 10); | 591 | start = RX_BD(le16_to_cpu(*fp->rx_cons_sb) - 10); |
@@ -616,7 +619,7 @@ static void bnx2x_panic_dump(struct bnx2x *bp) | |||
616 | } | 619 | } |
617 | 620 | ||
618 | /* Tx */ | 621 | /* Tx */ |
619 | for_each_tx_queue(bp, i) { | 622 | for_each_queue(bp, i) { |
620 | struct bnx2x_fastpath *fp = &bp->fp[i]; | 623 | struct bnx2x_fastpath *fp = &bp->fp[i]; |
621 | 624 | ||
622 | start = TX_BD(le16_to_cpu(*fp->tx_cons_sb) - 10); | 625 | start = TX_BD(le16_to_cpu(*fp->tx_cons_sb) - 10); |
@@ -742,6 +745,9 @@ static void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw) | |||
742 | if (msix) { | 745 | if (msix) { |
743 | synchronize_irq(bp->msix_table[0].vector); | 746 | synchronize_irq(bp->msix_table[0].vector); |
744 | offset = 1; | 747 | offset = 1; |
748 | #ifdef BCM_CNIC | ||
749 | offset++; | ||
750 | #endif | ||
745 | for_each_queue(bp, i) | 751 | for_each_queue(bp, i) |
746 | synchronize_irq(bp->msix_table[i + offset].vector); | 752 | synchronize_irq(bp->msix_table[i + offset].vector); |
747 | } else | 753 | } else |
@@ -781,21 +787,13 @@ static inline void bnx2x_ack_sb(struct bnx2x *bp, u8 sb_id, | |||
781 | barrier(); | 787 | barrier(); |
782 | } | 788 | } |
783 | 789 | ||
784 | static inline u16 bnx2x_update_fpsb_idx(struct bnx2x_fastpath *fp) | 790 | static inline void bnx2x_update_fpsb_idx(struct bnx2x_fastpath *fp) |
785 | { | 791 | { |
786 | struct host_status_block *fpsb = fp->status_blk; | 792 | struct host_status_block *fpsb = fp->status_blk; |
787 | u16 rc = 0; | ||
788 | 793 | ||
789 | barrier(); /* status block is written to by the chip */ | 794 | barrier(); /* status block is written to by the chip */ |
790 | if (fp->fp_c_idx != fpsb->c_status_block.status_block_index) { | 795 | fp->fp_c_idx = fpsb->c_status_block.status_block_index; |
791 | fp->fp_c_idx = fpsb->c_status_block.status_block_index; | 796 | fp->fp_u_idx = fpsb->u_status_block.status_block_index; |
792 | rc |= 1; | ||
793 | } | ||
794 | if (fp->fp_u_idx != fpsb->u_status_block.status_block_index) { | ||
795 | fp->fp_u_idx = fpsb->u_status_block.status_block_index; | ||
796 | rc |= 2; | ||
797 | } | ||
798 | return rc; | ||
799 | } | 797 | } |
800 | 798 | ||
801 | static u16 bnx2x_ack_int(struct bnx2x *bp) | 799 | static u16 bnx2x_ack_int(struct bnx2x *bp) |
@@ -835,6 +833,9 @@ static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fastpath *fp, | |||
835 | u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons; | 833 | u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons; |
836 | int nbd; | 834 | int nbd; |
837 | 835 | ||
836 | /* prefetch skb end pointer to speedup dev_kfree_skb() */ | ||
837 | prefetch(&skb->end); | ||
838 | |||
838 | DP(BNX2X_MSG_OFF, "pkt_idx %d buff @(%p)->skb %p\n", | 839 | DP(BNX2X_MSG_OFF, "pkt_idx %d buff @(%p)->skb %p\n", |
839 | idx, tx_buf, skb); | 840 | idx, tx_buf, skb); |
840 | 841 | ||
@@ -879,7 +880,7 @@ static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fastpath *fp, | |||
879 | 880 | ||
880 | /* release skb */ | 881 | /* release skb */ |
881 | WARN_ON(!skb); | 882 | WARN_ON(!skb); |
882 | dev_kfree_skb_any(skb); | 883 | dev_kfree_skb(skb); |
883 | tx_buf->first_bd = 0; | 884 | tx_buf->first_bd = 0; |
884 | tx_buf->skb = NULL; | 885 | tx_buf->skb = NULL; |
885 | 886 | ||
@@ -909,19 +910,28 @@ static inline u16 bnx2x_tx_avail(struct bnx2x_fastpath *fp) | |||
909 | return (s16)(fp->bp->tx_ring_size) - used; | 910 | return (s16)(fp->bp->tx_ring_size) - used; |
910 | } | 911 | } |
911 | 912 | ||
912 | static void bnx2x_tx_int(struct bnx2x_fastpath *fp) | 913 | static inline int bnx2x_has_tx_work(struct bnx2x_fastpath *fp) |
914 | { | ||
915 | u16 hw_cons; | ||
916 | |||
917 | /* Tell compiler that status block fields can change */ | ||
918 | barrier(); | ||
919 | hw_cons = le16_to_cpu(*fp->tx_cons_sb); | ||
920 | return hw_cons != fp->tx_pkt_cons; | ||
921 | } | ||
922 | |||
923 | static int bnx2x_tx_int(struct bnx2x_fastpath *fp) | ||
913 | { | 924 | { |
914 | struct bnx2x *bp = fp->bp; | 925 | struct bnx2x *bp = fp->bp; |
915 | struct netdev_queue *txq; | 926 | struct netdev_queue *txq; |
916 | u16 hw_cons, sw_cons, bd_cons = fp->tx_bd_cons; | 927 | u16 hw_cons, sw_cons, bd_cons = fp->tx_bd_cons; |
917 | int done = 0; | ||
918 | 928 | ||
919 | #ifdef BNX2X_STOP_ON_ERROR | 929 | #ifdef BNX2X_STOP_ON_ERROR |
920 | if (unlikely(bp->panic)) | 930 | if (unlikely(bp->panic)) |
921 | return; | 931 | return -1; |
922 | #endif | 932 | #endif |
923 | 933 | ||
924 | txq = netdev_get_tx_queue(bp->dev, fp->index - bp->num_rx_queues); | 934 | txq = netdev_get_tx_queue(bp->dev, fp->index); |
925 | hw_cons = le16_to_cpu(*fp->tx_cons_sb); | 935 | hw_cons = le16_to_cpu(*fp->tx_cons_sb); |
926 | sw_cons = fp->tx_pkt_cons; | 936 | sw_cons = fp->tx_pkt_cons; |
927 | 937 | ||
@@ -942,7 +952,6 @@ static void bnx2x_tx_int(struct bnx2x_fastpath *fp) | |||
942 | */ | 952 | */ |
943 | bd_cons = bnx2x_free_tx_pkt(bp, fp, pkt_cons); | 953 | bd_cons = bnx2x_free_tx_pkt(bp, fp, pkt_cons); |
944 | sw_cons++; | 954 | sw_cons++; |
945 | done++; | ||
946 | } | 955 | } |
947 | 956 | ||
948 | fp->tx_pkt_cons = sw_cons; | 957 | fp->tx_pkt_cons = sw_cons; |
@@ -964,8 +973,12 @@ static void bnx2x_tx_int(struct bnx2x_fastpath *fp) | |||
964 | (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3)) | 973 | (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3)) |
965 | netif_tx_wake_queue(txq); | 974 | netif_tx_wake_queue(txq); |
966 | } | 975 | } |
976 | return 0; | ||
967 | } | 977 | } |
968 | 978 | ||
979 | #ifdef BCM_CNIC | ||
980 | static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid); | ||
981 | #endif | ||
969 | 982 | ||
970 | static void bnx2x_sp_event(struct bnx2x_fastpath *fp, | 983 | static void bnx2x_sp_event(struct bnx2x_fastpath *fp, |
971 | union eth_rx_cqe *rr_cqe) | 984 | union eth_rx_cqe *rr_cqe) |
@@ -1022,16 +1035,24 @@ static void bnx2x_sp_event(struct bnx2x_fastpath *fp, | |||
1022 | bnx2x_fp(bp, cid, state) = BNX2X_FP_STATE_CLOSED; | 1035 | bnx2x_fp(bp, cid, state) = BNX2X_FP_STATE_CLOSED; |
1023 | break; | 1036 | break; |
1024 | 1037 | ||
1038 | #ifdef BCM_CNIC | ||
1039 | case (RAMROD_CMD_ID_ETH_CFC_DEL | BNX2X_STATE_OPEN): | ||
1040 | DP(NETIF_MSG_IFDOWN, "got delete ramrod for CID %d\n", cid); | ||
1041 | bnx2x_cnic_cfc_comp(bp, cid); | ||
1042 | break; | ||
1043 | #endif | ||
1025 | 1044 | ||
1026 | case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_OPEN): | 1045 | case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_OPEN): |
1027 | case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_DIAG): | 1046 | case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_DIAG): |
1028 | DP(NETIF_MSG_IFUP, "got set mac ramrod\n"); | 1047 | DP(NETIF_MSG_IFUP, "got set mac ramrod\n"); |
1029 | bp->set_mac_pending = 0; | 1048 | bp->set_mac_pending--; |
1049 | smp_wmb(); | ||
1030 | break; | 1050 | break; |
1031 | 1051 | ||
1032 | case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_CLOSING_WAIT4_HALT): | 1052 | case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_CLOSING_WAIT4_HALT): |
1033 | case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_DISABLED): | ||
1034 | DP(NETIF_MSG_IFDOWN, "got (un)set mac ramrod\n"); | 1053 | DP(NETIF_MSG_IFDOWN, "got (un)set mac ramrod\n"); |
1054 | bp->set_mac_pending--; | ||
1055 | smp_wmb(); | ||
1035 | break; | 1056 | break; |
1036 | 1057 | ||
1037 | default: | 1058 | default: |
@@ -1539,6 +1560,8 @@ static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget) | |||
1539 | } else { | 1560 | } else { |
1540 | rx_buf = &fp->rx_buf_ring[bd_cons]; | 1561 | rx_buf = &fp->rx_buf_ring[bd_cons]; |
1541 | skb = rx_buf->skb; | 1562 | skb = rx_buf->skb; |
1563 | prefetch(skb); | ||
1564 | prefetch((u8 *)skb + 256); | ||
1542 | len = le16_to_cpu(cqe->fast_path_cqe.pkt_len); | 1565 | len = le16_to_cpu(cqe->fast_path_cqe.pkt_len); |
1543 | pad = cqe->fast_path_cqe.placement_offset; | 1566 | pad = cqe->fast_path_cqe.placement_offset; |
1544 | 1567 | ||
@@ -1720,27 +1743,13 @@ static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie) | |||
1720 | if (unlikely(bp->panic)) | 1743 | if (unlikely(bp->panic)) |
1721 | return IRQ_HANDLED; | 1744 | return IRQ_HANDLED; |
1722 | #endif | 1745 | #endif |
1723 | /* Handle Rx or Tx according to MSI-X vector */ | ||
1724 | if (fp->is_rx_queue) { | ||
1725 | prefetch(fp->rx_cons_sb); | ||
1726 | prefetch(&fp->status_blk->u_status_block.status_block_index); | ||
1727 | 1746 | ||
1728 | napi_schedule(&bnx2x_fp(bp, fp->index, napi)); | 1747 | /* Handle Rx and Tx according to MSI-X vector */ |
1729 | 1748 | prefetch(fp->rx_cons_sb); | |
1730 | } else { | 1749 | prefetch(fp->tx_cons_sb); |
1731 | prefetch(fp->tx_cons_sb); | 1750 | prefetch(&fp->status_blk->u_status_block.status_block_index); |
1732 | prefetch(&fp->status_blk->c_status_block.status_block_index); | 1751 | prefetch(&fp->status_blk->c_status_block.status_block_index); |
1733 | 1752 | napi_schedule(&bnx2x_fp(bp, fp->index, napi)); | |
1734 | bnx2x_update_fpsb_idx(fp); | ||
1735 | rmb(); | ||
1736 | bnx2x_tx_int(fp); | ||
1737 | |||
1738 | /* Re-enable interrupts */ | ||
1739 | bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID, | ||
1740 | le16_to_cpu(fp->fp_u_idx), IGU_INT_NOP, 1); | ||
1741 | bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID, | ||
1742 | le16_to_cpu(fp->fp_c_idx), IGU_INT_ENABLE, 1); | ||
1743 | } | ||
1744 | 1753 | ||
1745 | return IRQ_HANDLED; | 1754 | return IRQ_HANDLED; |
1746 | } | 1755 | } |
@@ -1775,35 +1784,32 @@ static irqreturn_t bnx2x_interrupt(int irq, void *dev_instance) | |||
1775 | 1784 | ||
1776 | mask = 0x2 << fp->sb_id; | 1785 | mask = 0x2 << fp->sb_id; |
1777 | if (status & mask) { | 1786 | if (status & mask) { |
1778 | /* Handle Rx or Tx according to SB id */ | 1787 | /* Handle Rx and Tx according to SB id */ |
1779 | if (fp->is_rx_queue) { | 1788 | prefetch(fp->rx_cons_sb); |
1780 | prefetch(fp->rx_cons_sb); | 1789 | prefetch(&fp->status_blk->u_status_block. |
1781 | prefetch(&fp->status_blk->u_status_block. | 1790 | status_block_index); |
1782 | status_block_index); | 1791 | prefetch(fp->tx_cons_sb); |
1783 | 1792 | prefetch(&fp->status_blk->c_status_block. | |
1784 | napi_schedule(&bnx2x_fp(bp, fp->index, napi)); | 1793 | status_block_index); |
1785 | 1794 | napi_schedule(&bnx2x_fp(bp, fp->index, napi)); | |
1786 | } else { | ||
1787 | prefetch(fp->tx_cons_sb); | ||
1788 | prefetch(&fp->status_blk->c_status_block. | ||
1789 | status_block_index); | ||
1790 | |||
1791 | bnx2x_update_fpsb_idx(fp); | ||
1792 | rmb(); | ||
1793 | bnx2x_tx_int(fp); | ||
1794 | |||
1795 | /* Re-enable interrupts */ | ||
1796 | bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID, | ||
1797 | le16_to_cpu(fp->fp_u_idx), | ||
1798 | IGU_INT_NOP, 1); | ||
1799 | bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID, | ||
1800 | le16_to_cpu(fp->fp_c_idx), | ||
1801 | IGU_INT_ENABLE, 1); | ||
1802 | } | ||
1803 | status &= ~mask; | 1795 | status &= ~mask; |
1804 | } | 1796 | } |
1805 | } | 1797 | } |
1806 | 1798 | ||
1799 | #ifdef BCM_CNIC | ||
1800 | mask = 0x2 << CNIC_SB_ID(bp); | ||
1801 | if (status & (mask | 0x1)) { | ||
1802 | struct cnic_ops *c_ops = NULL; | ||
1803 | |||
1804 | rcu_read_lock(); | ||
1805 | c_ops = rcu_dereference(bp->cnic_ops); | ||
1806 | if (c_ops) | ||
1807 | c_ops->cnic_handler(bp->cnic_data, NULL); | ||
1808 | rcu_read_unlock(); | ||
1809 | |||
1810 | status &= ~mask; | ||
1811 | } | ||
1812 | #endif | ||
1807 | 1813 | ||
1808 | if (unlikely(status & 0x1)) { | 1814 | if (unlikely(status & 0x1)) { |
1809 | queue_delayed_work(bnx2x_wq, &bp->sp_task, 0); | 1815 | queue_delayed_work(bnx2x_wq, &bp->sp_task, 0); |
@@ -2128,18 +2134,30 @@ static void bnx2x_calc_fc_adv(struct bnx2x *bp) | |||
2128 | 2134 | ||
2129 | static void bnx2x_link_report(struct bnx2x *bp) | 2135 | static void bnx2x_link_report(struct bnx2x *bp) |
2130 | { | 2136 | { |
2131 | if (bp->state == BNX2X_STATE_DISABLED) { | 2137 | if (bp->flags & MF_FUNC_DIS) { |
2132 | netif_carrier_off(bp->dev); | 2138 | netif_carrier_off(bp->dev); |
2133 | printk(KERN_ERR PFX "%s NIC Link is Down\n", bp->dev->name); | 2139 | printk(KERN_ERR PFX "%s NIC Link is Down\n", bp->dev->name); |
2134 | return; | 2140 | return; |
2135 | } | 2141 | } |
2136 | 2142 | ||
2137 | if (bp->link_vars.link_up) { | 2143 | if (bp->link_vars.link_up) { |
2144 | u16 line_speed; | ||
2145 | |||
2138 | if (bp->state == BNX2X_STATE_OPEN) | 2146 | if (bp->state == BNX2X_STATE_OPEN) |
2139 | netif_carrier_on(bp->dev); | 2147 | netif_carrier_on(bp->dev); |
2140 | printk(KERN_INFO PFX "%s NIC Link is Up, ", bp->dev->name); | 2148 | printk(KERN_INFO PFX "%s NIC Link is Up, ", bp->dev->name); |
2141 | 2149 | ||
2142 | printk("%d Mbps ", bp->link_vars.line_speed); | 2150 | line_speed = bp->link_vars.line_speed; |
2151 | if (IS_E1HMF(bp)) { | ||
2152 | u16 vn_max_rate; | ||
2153 | |||
2154 | vn_max_rate = | ||
2155 | ((bp->mf_config & FUNC_MF_CFG_MAX_BW_MASK) >> | ||
2156 | FUNC_MF_CFG_MAX_BW_SHIFT) * 100; | ||
2157 | if (vn_max_rate < line_speed) | ||
2158 | line_speed = vn_max_rate; | ||
2159 | } | ||
2160 | printk("%d Mbps ", line_speed); | ||
2143 | 2161 | ||
2144 | if (bp->link_vars.duplex == DUPLEX_FULL) | 2162 | if (bp->link_vars.duplex == DUPLEX_FULL) |
2145 | printk("full duplex"); | 2163 | printk("full duplex"); |
@@ -2304,8 +2322,14 @@ static void bnx2x_calc_vn_weight_sum(struct bnx2x *bp) | |||
2304 | } | 2322 | } |
2305 | 2323 | ||
2306 | /* ... only if all min rates are zeros - disable fairness */ | 2324 | /* ... only if all min rates are zeros - disable fairness */ |
2307 | if (all_zero) | 2325 | if (all_zero) { |
2308 | bp->vn_weight_sum = 0; | 2326 | bp->cmng.flags.cmng_enables &= |
2327 | ~CMNG_FLAGS_PER_PORT_FAIRNESS_VN; | ||
2328 | DP(NETIF_MSG_IFUP, "All MIN values are zeroes" | ||
2329 | " fairness will be disabled\n"); | ||
2330 | } else | ||
2331 | bp->cmng.flags.cmng_enables |= | ||
2332 | CMNG_FLAGS_PER_PORT_FAIRNESS_VN; | ||
2309 | } | 2333 | } |
2310 | 2334 | ||
2311 | static void bnx2x_init_vn_minmax(struct bnx2x *bp, int func) | 2335 | static void bnx2x_init_vn_minmax(struct bnx2x *bp, int func) |
@@ -2324,17 +2348,14 @@ static void bnx2x_init_vn_minmax(struct bnx2x *bp, int func) | |||
2324 | } else { | 2348 | } else { |
2325 | vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >> | 2349 | vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >> |
2326 | FUNC_MF_CFG_MIN_BW_SHIFT) * 100; | 2350 | FUNC_MF_CFG_MIN_BW_SHIFT) * 100; |
2327 | /* If fairness is enabled (not all min rates are zeroes) and | 2351 | /* If min rate is zero - set it to 1 */ |
2328 | if current min rate is zero - set it to 1. | 2352 | if (!vn_min_rate) |
2329 | This is a requirement of the algorithm. */ | ||
2330 | if (bp->vn_weight_sum && (vn_min_rate == 0)) | ||
2331 | vn_min_rate = DEF_MIN_RATE; | 2353 | vn_min_rate = DEF_MIN_RATE; |
2332 | vn_max_rate = ((vn_cfg & FUNC_MF_CFG_MAX_BW_MASK) >> | 2354 | vn_max_rate = ((vn_cfg & FUNC_MF_CFG_MAX_BW_MASK) >> |
2333 | FUNC_MF_CFG_MAX_BW_SHIFT) * 100; | 2355 | FUNC_MF_CFG_MAX_BW_SHIFT) * 100; |
2334 | } | 2356 | } |
2335 | |||
2336 | DP(NETIF_MSG_IFUP, | 2357 | DP(NETIF_MSG_IFUP, |
2337 | "func %d: vn_min_rate=%d vn_max_rate=%d vn_weight_sum=%d\n", | 2358 | "func %d: vn_min_rate %d vn_max_rate %d vn_weight_sum %d\n", |
2338 | func, vn_min_rate, vn_max_rate, bp->vn_weight_sum); | 2359 | func, vn_min_rate, vn_max_rate, bp->vn_weight_sum); |
2339 | 2360 | ||
2340 | memset(&m_rs_vn, 0, sizeof(struct rate_shaping_vars_per_vn)); | 2361 | memset(&m_rs_vn, 0, sizeof(struct rate_shaping_vars_per_vn)); |
@@ -2405,8 +2426,7 @@ static void bnx2x_link_attn(struct bnx2x *bp) | |||
2405 | memset(&(pstats->mac_stx[0]), 0, | 2426 | memset(&(pstats->mac_stx[0]), 0, |
2406 | sizeof(struct mac_stx)); | 2427 | sizeof(struct mac_stx)); |
2407 | } | 2428 | } |
2408 | if ((bp->state == BNX2X_STATE_OPEN) || | 2429 | if (bp->state == BNX2X_STATE_OPEN) |
2409 | (bp->state == BNX2X_STATE_DISABLED)) | ||
2410 | bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP); | 2430 | bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP); |
2411 | } | 2431 | } |
2412 | 2432 | ||
@@ -2449,9 +2469,7 @@ static void bnx2x_link_attn(struct bnx2x *bp) | |||
2449 | 2469 | ||
2450 | static void bnx2x__link_status_update(struct bnx2x *bp) | 2470 | static void bnx2x__link_status_update(struct bnx2x *bp) |
2451 | { | 2471 | { |
2452 | int func = BP_FUNC(bp); | 2472 | if ((bp->state != BNX2X_STATE_OPEN) || (bp->flags & MF_FUNC_DIS)) |
2453 | |||
2454 | if (bp->state != BNX2X_STATE_OPEN) | ||
2455 | return; | 2473 | return; |
2456 | 2474 | ||
2457 | bnx2x_link_status_update(&bp->link_params, &bp->link_vars); | 2475 | bnx2x_link_status_update(&bp->link_params, &bp->link_vars); |
@@ -2461,7 +2479,6 @@ static void bnx2x__link_status_update(struct bnx2x *bp) | |||
2461 | else | 2479 | else |
2462 | bnx2x_stats_handle(bp, STATS_EVENT_STOP); | 2480 | bnx2x_stats_handle(bp, STATS_EVENT_STOP); |
2463 | 2481 | ||
2464 | bp->mf_config = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config); | ||
2465 | bnx2x_calc_vn_weight_sum(bp); | 2482 | bnx2x_calc_vn_weight_sum(bp); |
2466 | 2483 | ||
2467 | /* indicate link status */ | 2484 | /* indicate link status */ |
@@ -2501,6 +2518,7 @@ u32 bnx2x_fw_command(struct bnx2x *bp, u32 command) | |||
2501 | u32 cnt = 1; | 2518 | u32 cnt = 1; |
2502 | u8 delay = CHIP_REV_IS_SLOW(bp) ? 100 : 10; | 2519 | u8 delay = CHIP_REV_IS_SLOW(bp) ? 100 : 10; |
2503 | 2520 | ||
2521 | mutex_lock(&bp->fw_mb_mutex); | ||
2504 | SHMEM_WR(bp, func_mb[func].drv_mb_header, (command | seq)); | 2522 | SHMEM_WR(bp, func_mb[func].drv_mb_header, (command | seq)); |
2505 | DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB\n", (command | seq)); | 2523 | DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB\n", (command | seq)); |
2506 | 2524 | ||
@@ -2510,8 +2528,8 @@ u32 bnx2x_fw_command(struct bnx2x *bp, u32 command) | |||
2510 | 2528 | ||
2511 | rc = SHMEM_RD(bp, func_mb[func].fw_mb_header); | 2529 | rc = SHMEM_RD(bp, func_mb[func].fw_mb_header); |
2512 | 2530 | ||
2513 | /* Give the FW up to 2 second (200*10ms) */ | 2531 | /* Give the FW up to 5 second (500*10ms) */ |
2514 | } while ((seq != (rc & FW_MSG_SEQ_NUMBER_MASK)) && (cnt++ < 200)); | 2532 | } while ((seq != (rc & FW_MSG_SEQ_NUMBER_MASK)) && (cnt++ < 500)); |
2515 | 2533 | ||
2516 | DP(BNX2X_MSG_MCP, "[after %d ms] read (%x) seq is (%x) from FW MB\n", | 2534 | DP(BNX2X_MSG_MCP, "[after %d ms] read (%x) seq is (%x) from FW MB\n", |
2517 | cnt*delay, rc, seq); | 2535 | cnt*delay, rc, seq); |
@@ -2525,32 +2543,23 @@ u32 bnx2x_fw_command(struct bnx2x *bp, u32 command) | |||
2525 | bnx2x_fw_dump(bp); | 2543 | bnx2x_fw_dump(bp); |
2526 | rc = 0; | 2544 | rc = 0; |
2527 | } | 2545 | } |
2546 | mutex_unlock(&bp->fw_mb_mutex); | ||
2528 | 2547 | ||
2529 | return rc; | 2548 | return rc; |
2530 | } | 2549 | } |
2531 | 2550 | ||
2532 | static void bnx2x_set_storm_rx_mode(struct bnx2x *bp); | 2551 | static void bnx2x_set_storm_rx_mode(struct bnx2x *bp); |
2533 | static void bnx2x_set_mac_addr_e1h(struct bnx2x *bp, int set); | 2552 | static void bnx2x_set_eth_mac_addr_e1h(struct bnx2x *bp, int set); |
2534 | static void bnx2x_set_rx_mode(struct net_device *dev); | 2553 | static void bnx2x_set_rx_mode(struct net_device *dev); |
2535 | 2554 | ||
2536 | static void bnx2x_e1h_disable(struct bnx2x *bp) | 2555 | static void bnx2x_e1h_disable(struct bnx2x *bp) |
2537 | { | 2556 | { |
2538 | int port = BP_PORT(bp); | 2557 | int port = BP_PORT(bp); |
2539 | int i; | ||
2540 | |||
2541 | bp->rx_mode = BNX2X_RX_MODE_NONE; | ||
2542 | bnx2x_set_storm_rx_mode(bp); | ||
2543 | 2558 | ||
2544 | netif_tx_disable(bp->dev); | 2559 | netif_tx_disable(bp->dev); |
2545 | bp->dev->trans_start = jiffies; /* prevent tx timeout */ | ||
2546 | 2560 | ||
2547 | REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0); | 2561 | REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0); |
2548 | 2562 | ||
2549 | bnx2x_set_mac_addr_e1h(bp, 0); | ||
2550 | |||
2551 | for (i = 0; i < MC_HASH_SIZE; i++) | ||
2552 | REG_WR(bp, MC_HASH_OFFSET(bp, i), 0); | ||
2553 | |||
2554 | netif_carrier_off(bp->dev); | 2563 | netif_carrier_off(bp->dev); |
2555 | } | 2564 | } |
2556 | 2565 | ||
@@ -2560,13 +2569,13 @@ static void bnx2x_e1h_enable(struct bnx2x *bp) | |||
2560 | 2569 | ||
2561 | REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1); | 2570 | REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1); |
2562 | 2571 | ||
2563 | bnx2x_set_mac_addr_e1h(bp, 1); | ||
2564 | |||
2565 | /* Tx queue should be only reenabled */ | 2572 | /* Tx queue should be only reenabled */ |
2566 | netif_tx_wake_all_queues(bp->dev); | 2573 | netif_tx_wake_all_queues(bp->dev); |
2567 | 2574 | ||
2568 | /* Initialize the receive filter. */ | 2575 | /* |
2569 | bnx2x_set_rx_mode(bp->dev); | 2576 | * Should not call netif_carrier_on since it will be called if the link |
2577 | * is up when checking for link state | ||
2578 | */ | ||
2570 | } | 2579 | } |
2571 | 2580 | ||
2572 | static void bnx2x_update_min_max(struct bnx2x *bp) | 2581 | static void bnx2x_update_min_max(struct bnx2x *bp) |
@@ -2605,21 +2614,23 @@ static void bnx2x_update_min_max(struct bnx2x *bp) | |||
2605 | 2614 | ||
2606 | static void bnx2x_dcc_event(struct bnx2x *bp, u32 dcc_event) | 2615 | static void bnx2x_dcc_event(struct bnx2x *bp, u32 dcc_event) |
2607 | { | 2616 | { |
2608 | int func = BP_FUNC(bp); | ||
2609 | |||
2610 | DP(BNX2X_MSG_MCP, "dcc_event 0x%x\n", dcc_event); | 2617 | DP(BNX2X_MSG_MCP, "dcc_event 0x%x\n", dcc_event); |
2611 | bp->mf_config = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config); | ||
2612 | 2618 | ||
2613 | if (dcc_event & DRV_STATUS_DCC_DISABLE_ENABLE_PF) { | 2619 | if (dcc_event & DRV_STATUS_DCC_DISABLE_ENABLE_PF) { |
2614 | 2620 | ||
2621 | /* | ||
2622 | * This is the only place besides the function initialization | ||
2623 | * where the bp->flags can change so it is done without any | ||
2624 | * locks | ||
2625 | */ | ||
2615 | if (bp->mf_config & FUNC_MF_CFG_FUNC_DISABLED) { | 2626 | if (bp->mf_config & FUNC_MF_CFG_FUNC_DISABLED) { |
2616 | DP(NETIF_MSG_IFDOWN, "mf_cfg function disabled\n"); | 2627 | DP(NETIF_MSG_IFDOWN, "mf_cfg function disabled\n"); |
2617 | bp->state = BNX2X_STATE_DISABLED; | 2628 | bp->flags |= MF_FUNC_DIS; |
2618 | 2629 | ||
2619 | bnx2x_e1h_disable(bp); | 2630 | bnx2x_e1h_disable(bp); |
2620 | } else { | 2631 | } else { |
2621 | DP(NETIF_MSG_IFUP, "mf_cfg function enabled\n"); | 2632 | DP(NETIF_MSG_IFUP, "mf_cfg function enabled\n"); |
2622 | bp->state = BNX2X_STATE_OPEN; | 2633 | bp->flags &= ~MF_FUNC_DIS; |
2623 | 2634 | ||
2624 | bnx2x_e1h_enable(bp); | 2635 | bnx2x_e1h_enable(bp); |
2625 | } | 2636 | } |
@@ -2638,11 +2649,40 @@ static void bnx2x_dcc_event(struct bnx2x *bp, u32 dcc_event) | |||
2638 | bnx2x_fw_command(bp, DRV_MSG_CODE_DCC_OK); | 2649 | bnx2x_fw_command(bp, DRV_MSG_CODE_DCC_OK); |
2639 | } | 2650 | } |
2640 | 2651 | ||
2652 | /* must be called under the spq lock */ | ||
2653 | static inline struct eth_spe *bnx2x_sp_get_next(struct bnx2x *bp) | ||
2654 | { | ||
2655 | struct eth_spe *next_spe = bp->spq_prod_bd; | ||
2656 | |||
2657 | if (bp->spq_prod_bd == bp->spq_last_bd) { | ||
2658 | bp->spq_prod_bd = bp->spq; | ||
2659 | bp->spq_prod_idx = 0; | ||
2660 | DP(NETIF_MSG_TIMER, "end of spq\n"); | ||
2661 | } else { | ||
2662 | bp->spq_prod_bd++; | ||
2663 | bp->spq_prod_idx++; | ||
2664 | } | ||
2665 | return next_spe; | ||
2666 | } | ||
2667 | |||
2668 | /* must be called under the spq lock */ | ||
2669 | static inline void bnx2x_sp_prod_update(struct bnx2x *bp) | ||
2670 | { | ||
2671 | int func = BP_FUNC(bp); | ||
2672 | |||
2673 | /* Make sure that BD data is updated before writing the producer */ | ||
2674 | wmb(); | ||
2675 | |||
2676 | REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func), | ||
2677 | bp->spq_prod_idx); | ||
2678 | mmiowb(); | ||
2679 | } | ||
2680 | |||
2641 | /* the slow path queue is odd since completions arrive on the fastpath ring */ | 2681 | /* the slow path queue is odd since completions arrive on the fastpath ring */ |
2642 | static int bnx2x_sp_post(struct bnx2x *bp, int command, int cid, | 2682 | static int bnx2x_sp_post(struct bnx2x *bp, int command, int cid, |
2643 | u32 data_hi, u32 data_lo, int common) | 2683 | u32 data_hi, u32 data_lo, int common) |
2644 | { | 2684 | { |
2645 | int func = BP_FUNC(bp); | 2685 | struct eth_spe *spe; |
2646 | 2686 | ||
2647 | DP(BNX2X_MSG_SP/*NETIF_MSG_TIMER*/, | 2687 | DP(BNX2X_MSG_SP/*NETIF_MSG_TIMER*/, |
2648 | "SPQE (%x:%x) command %d hw_cid %x data (%x:%x) left %x\n", | 2688 | "SPQE (%x:%x) command %d hw_cid %x data (%x:%x) left %x\n", |
@@ -2664,38 +2704,23 @@ static int bnx2x_sp_post(struct bnx2x *bp, int command, int cid, | |||
2664 | return -EBUSY; | 2704 | return -EBUSY; |
2665 | } | 2705 | } |
2666 | 2706 | ||
2707 | spe = bnx2x_sp_get_next(bp); | ||
2708 | |||
2667 | /* CID needs port number to be encoded int it */ | 2709 | /* CID needs port number to be encoded int it */ |
2668 | bp->spq_prod_bd->hdr.conn_and_cmd_data = | 2710 | spe->hdr.conn_and_cmd_data = |
2669 | cpu_to_le32(((command << SPE_HDR_CMD_ID_SHIFT) | | 2711 | cpu_to_le32(((command << SPE_HDR_CMD_ID_SHIFT) | |
2670 | HW_CID(bp, cid))); | 2712 | HW_CID(bp, cid))); |
2671 | bp->spq_prod_bd->hdr.type = cpu_to_le16(ETH_CONNECTION_TYPE); | 2713 | spe->hdr.type = cpu_to_le16(ETH_CONNECTION_TYPE); |
2672 | if (common) | 2714 | if (common) |
2673 | bp->spq_prod_bd->hdr.type |= | 2715 | spe->hdr.type |= |
2674 | cpu_to_le16((1 << SPE_HDR_COMMON_RAMROD_SHIFT)); | 2716 | cpu_to_le16((1 << SPE_HDR_COMMON_RAMROD_SHIFT)); |
2675 | 2717 | ||
2676 | bp->spq_prod_bd->data.mac_config_addr.hi = cpu_to_le32(data_hi); | 2718 | spe->data.mac_config_addr.hi = cpu_to_le32(data_hi); |
2677 | bp->spq_prod_bd->data.mac_config_addr.lo = cpu_to_le32(data_lo); | 2719 | spe->data.mac_config_addr.lo = cpu_to_le32(data_lo); |
2678 | 2720 | ||
2679 | bp->spq_left--; | 2721 | bp->spq_left--; |
2680 | 2722 | ||
2681 | if (bp->spq_prod_bd == bp->spq_last_bd) { | 2723 | bnx2x_sp_prod_update(bp); |
2682 | bp->spq_prod_bd = bp->spq; | ||
2683 | bp->spq_prod_idx = 0; | ||
2684 | DP(NETIF_MSG_TIMER, "end of spq\n"); | ||
2685 | |||
2686 | } else { | ||
2687 | bp->spq_prod_bd++; | ||
2688 | bp->spq_prod_idx++; | ||
2689 | } | ||
2690 | |||
2691 | /* Make sure that BD data is updated before writing the producer */ | ||
2692 | wmb(); | ||
2693 | |||
2694 | REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func), | ||
2695 | bp->spq_prod_idx); | ||
2696 | |||
2697 | mmiowb(); | ||
2698 | |||
2699 | spin_unlock_bh(&bp->spq_lock); | 2724 | spin_unlock_bh(&bp->spq_lock); |
2700 | return 0; | 2725 | return 0; |
2701 | } | 2726 | } |
@@ -3024,6 +3049,8 @@ static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn) | |||
3024 | int func = BP_FUNC(bp); | 3049 | int func = BP_FUNC(bp); |
3025 | 3050 | ||
3026 | REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0); | 3051 | REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0); |
3052 | bp->mf_config = SHMEM_RD(bp, | ||
3053 | mf_cfg.func_mf_config[func].config); | ||
3027 | val = SHMEM_RD(bp, func_mb[func].drv_status); | 3054 | val = SHMEM_RD(bp, func_mb[func].drv_status); |
3028 | if (val & DRV_STATUS_DCC_EVENT_MASK) | 3055 | if (val & DRV_STATUS_DCC_EVENT_MASK) |
3029 | bnx2x_dcc_event(bp, | 3056 | bnx2x_dcc_event(bp, |
@@ -3227,6 +3254,17 @@ static irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance) | |||
3227 | return IRQ_HANDLED; | 3254 | return IRQ_HANDLED; |
3228 | #endif | 3255 | #endif |
3229 | 3256 | ||
3257 | #ifdef BCM_CNIC | ||
3258 | { | ||
3259 | struct cnic_ops *c_ops; | ||
3260 | |||
3261 | rcu_read_lock(); | ||
3262 | c_ops = rcu_dereference(bp->cnic_ops); | ||
3263 | if (c_ops) | ||
3264 | c_ops->cnic_handler(bp->cnic_data, NULL); | ||
3265 | rcu_read_unlock(); | ||
3266 | } | ||
3267 | #endif | ||
3230 | queue_delayed_work(bnx2x_wq, &bp->sp_task, 0); | 3268 | queue_delayed_work(bnx2x_wq, &bp->sp_task, 0); |
3231 | 3269 | ||
3232 | return IRQ_HANDLED; | 3270 | return IRQ_HANDLED; |
@@ -3958,7 +3996,7 @@ static int bnx2x_storm_stats_update(struct bnx2x *bp) | |||
3958 | estats->no_buff_discard_hi = 0; | 3996 | estats->no_buff_discard_hi = 0; |
3959 | estats->no_buff_discard_lo = 0; | 3997 | estats->no_buff_discard_lo = 0; |
3960 | 3998 | ||
3961 | for_each_rx_queue(bp, i) { | 3999 | for_each_queue(bp, i) { |
3962 | struct bnx2x_fastpath *fp = &bp->fp[i]; | 4000 | struct bnx2x_fastpath *fp = &bp->fp[i]; |
3963 | int cl_id = fp->cl_id; | 4001 | int cl_id = fp->cl_id; |
3964 | struct tstorm_per_client_stats *tclient = | 4002 | struct tstorm_per_client_stats *tclient = |
@@ -4175,7 +4213,7 @@ static void bnx2x_net_stats_update(struct bnx2x *bp) | |||
4175 | nstats->tx_bytes = bnx2x_hilo(&estats->total_bytes_transmitted_hi); | 4213 | nstats->tx_bytes = bnx2x_hilo(&estats->total_bytes_transmitted_hi); |
4176 | 4214 | ||
4177 | nstats->rx_dropped = estats->mac_discard; | 4215 | nstats->rx_dropped = estats->mac_discard; |
4178 | for_each_rx_queue(bp, i) | 4216 | for_each_queue(bp, i) |
4179 | nstats->rx_dropped += | 4217 | nstats->rx_dropped += |
4180 | le32_to_cpu(bp->fp[i].old_tclient.checksum_discard); | 4218 | le32_to_cpu(bp->fp[i].old_tclient.checksum_discard); |
4181 | 4219 | ||
@@ -4229,7 +4267,7 @@ static void bnx2x_drv_stats_update(struct bnx2x *bp) | |||
4229 | estats->rx_err_discard_pkt = 0; | 4267 | estats->rx_err_discard_pkt = 0; |
4230 | estats->rx_skb_alloc_failed = 0; | 4268 | estats->rx_skb_alloc_failed = 0; |
4231 | estats->hw_csum_err = 0; | 4269 | estats->hw_csum_err = 0; |
4232 | for_each_rx_queue(bp, i) { | 4270 | for_each_queue(bp, i) { |
4233 | struct bnx2x_eth_q_stats *qstats = &bp->fp[i].eth_q_stats; | 4271 | struct bnx2x_eth_q_stats *qstats = &bp->fp[i].eth_q_stats; |
4234 | 4272 | ||
4235 | estats->driver_xoff += qstats->driver_xoff; | 4273 | estats->driver_xoff += qstats->driver_xoff; |
@@ -4260,7 +4298,7 @@ static void bnx2x_stats_update(struct bnx2x *bp) | |||
4260 | 4298 | ||
4261 | if (bp->msglevel & NETIF_MSG_TIMER) { | 4299 | if (bp->msglevel & NETIF_MSG_TIMER) { |
4262 | struct bnx2x_fastpath *fp0_rx = bp->fp; | 4300 | struct bnx2x_fastpath *fp0_rx = bp->fp; |
4263 | struct bnx2x_fastpath *fp0_tx = &(bp->fp[bp->num_rx_queues]); | 4301 | struct bnx2x_fastpath *fp0_tx = bp->fp; |
4264 | struct tstorm_per_client_stats *old_tclient = | 4302 | struct tstorm_per_client_stats *old_tclient = |
4265 | &bp->fp->old_tclient; | 4303 | &bp->fp->old_tclient; |
4266 | struct bnx2x_eth_q_stats *qstats = &bp->fp->eth_q_stats; | 4304 | struct bnx2x_eth_q_stats *qstats = &bp->fp->eth_q_stats; |
@@ -4640,8 +4678,7 @@ static void bnx2x_timer(unsigned long data) | |||
4640 | } | 4678 | } |
4641 | } | 4679 | } |
4642 | 4680 | ||
4643 | if ((bp->state == BNX2X_STATE_OPEN) || | 4681 | if (bp->state == BNX2X_STATE_OPEN) |
4644 | (bp->state == BNX2X_STATE_DISABLED)) | ||
4645 | bnx2x_stats_handle(bp, STATS_EVENT_UPDATE); | 4682 | bnx2x_stats_handle(bp, STATS_EVENT_UPDATE); |
4646 | 4683 | ||
4647 | timer_restart: | 4684 | timer_restart: |
@@ -4860,21 +4897,21 @@ static void bnx2x_update_coalesce(struct bnx2x *bp) | |||
4860 | REG_WR8(bp, BAR_CSTRORM_INTMEM + | 4897 | REG_WR8(bp, BAR_CSTRORM_INTMEM + |
4861 | CSTORM_SB_HC_TIMEOUT_U_OFFSET(port, sb_id, | 4898 | CSTORM_SB_HC_TIMEOUT_U_OFFSET(port, sb_id, |
4862 | U_SB_ETH_RX_CQ_INDEX), | 4899 | U_SB_ETH_RX_CQ_INDEX), |
4863 | bp->rx_ticks/12); | 4900 | bp->rx_ticks/(4 * BNX2X_BTR)); |
4864 | REG_WR16(bp, BAR_CSTRORM_INTMEM + | 4901 | REG_WR16(bp, BAR_CSTRORM_INTMEM + |
4865 | CSTORM_SB_HC_DISABLE_U_OFFSET(port, sb_id, | 4902 | CSTORM_SB_HC_DISABLE_U_OFFSET(port, sb_id, |
4866 | U_SB_ETH_RX_CQ_INDEX), | 4903 | U_SB_ETH_RX_CQ_INDEX), |
4867 | (bp->rx_ticks/12) ? 0 : 1); | 4904 | (bp->rx_ticks/(4 * BNX2X_BTR)) ? 0 : 1); |
4868 | 4905 | ||
4869 | /* HC_INDEX_C_ETH_TX_CQ_CONS */ | 4906 | /* HC_INDEX_C_ETH_TX_CQ_CONS */ |
4870 | REG_WR8(bp, BAR_CSTRORM_INTMEM + | 4907 | REG_WR8(bp, BAR_CSTRORM_INTMEM + |
4871 | CSTORM_SB_HC_TIMEOUT_C_OFFSET(port, sb_id, | 4908 | CSTORM_SB_HC_TIMEOUT_C_OFFSET(port, sb_id, |
4872 | C_SB_ETH_TX_CQ_INDEX), | 4909 | C_SB_ETH_TX_CQ_INDEX), |
4873 | bp->tx_ticks/12); | 4910 | bp->tx_ticks/(4 * BNX2X_BTR)); |
4874 | REG_WR16(bp, BAR_CSTRORM_INTMEM + | 4911 | REG_WR16(bp, BAR_CSTRORM_INTMEM + |
4875 | CSTORM_SB_HC_DISABLE_C_OFFSET(port, sb_id, | 4912 | CSTORM_SB_HC_DISABLE_C_OFFSET(port, sb_id, |
4876 | C_SB_ETH_TX_CQ_INDEX), | 4913 | C_SB_ETH_TX_CQ_INDEX), |
4877 | (bp->tx_ticks/12) ? 0 : 1); | 4914 | (bp->tx_ticks/(4 * BNX2X_BTR)) ? 0 : 1); |
4878 | } | 4915 | } |
4879 | } | 4916 | } |
4880 | 4917 | ||
@@ -4916,7 +4953,7 @@ static void bnx2x_init_rx_rings(struct bnx2x *bp) | |||
4916 | 4953 | ||
4917 | if (bp->flags & TPA_ENABLE_FLAG) { | 4954 | if (bp->flags & TPA_ENABLE_FLAG) { |
4918 | 4955 | ||
4919 | for_each_rx_queue(bp, j) { | 4956 | for_each_queue(bp, j) { |
4920 | struct bnx2x_fastpath *fp = &bp->fp[j]; | 4957 | struct bnx2x_fastpath *fp = &bp->fp[j]; |
4921 | 4958 | ||
4922 | for (i = 0; i < max_agg_queues; i++) { | 4959 | for (i = 0; i < max_agg_queues; i++) { |
@@ -4939,16 +4976,13 @@ static void bnx2x_init_rx_rings(struct bnx2x *bp) | |||
4939 | } | 4976 | } |
4940 | } | 4977 | } |
4941 | 4978 | ||
4942 | for_each_rx_queue(bp, j) { | 4979 | for_each_queue(bp, j) { |
4943 | struct bnx2x_fastpath *fp = &bp->fp[j]; | 4980 | struct bnx2x_fastpath *fp = &bp->fp[j]; |
4944 | 4981 | ||
4945 | fp->rx_bd_cons = 0; | 4982 | fp->rx_bd_cons = 0; |
4946 | fp->rx_cons_sb = BNX2X_RX_SB_INDEX; | 4983 | fp->rx_cons_sb = BNX2X_RX_SB_INDEX; |
4947 | fp->rx_bd_cons_sb = BNX2X_RX_SB_BD_INDEX; | 4984 | fp->rx_bd_cons_sb = BNX2X_RX_SB_BD_INDEX; |
4948 | 4985 | ||
4949 | /* Mark queue as Rx */ | ||
4950 | fp->is_rx_queue = 1; | ||
4951 | |||
4952 | /* "next page" elements initialization */ | 4986 | /* "next page" elements initialization */ |
4953 | /* SGE ring */ | 4987 | /* SGE ring */ |
4954 | for (i = 1; i <= NUM_RX_SGE_PAGES; i++) { | 4988 | for (i = 1; i <= NUM_RX_SGE_PAGES; i++) { |
@@ -5054,7 +5088,7 @@ static void bnx2x_init_tx_ring(struct bnx2x *bp) | |||
5054 | { | 5088 | { |
5055 | int i, j; | 5089 | int i, j; |
5056 | 5090 | ||
5057 | for_each_tx_queue(bp, j) { | 5091 | for_each_queue(bp, j) { |
5058 | struct bnx2x_fastpath *fp = &bp->fp[j]; | 5092 | struct bnx2x_fastpath *fp = &bp->fp[j]; |
5059 | 5093 | ||
5060 | for (i = 1; i <= NUM_TX_RINGS; i++) { | 5094 | for (i = 1; i <= NUM_TX_RINGS; i++) { |
@@ -5080,10 +5114,6 @@ static void bnx2x_init_tx_ring(struct bnx2x *bp) | |||
5080 | fp->tx_cons_sb = BNX2X_TX_SB_INDEX; | 5114 | fp->tx_cons_sb = BNX2X_TX_SB_INDEX; |
5081 | fp->tx_pkt = 0; | 5115 | fp->tx_pkt = 0; |
5082 | } | 5116 | } |
5083 | |||
5084 | /* clean tx statistics */ | ||
5085 | for_each_rx_queue(bp, i) | ||
5086 | bnx2x_fp(bp, i, tx_pkt) = 0; | ||
5087 | } | 5117 | } |
5088 | 5118 | ||
5089 | static void bnx2x_init_sp_ring(struct bnx2x *bp) | 5119 | static void bnx2x_init_sp_ring(struct bnx2x *bp) |
@@ -5112,7 +5142,8 @@ static void bnx2x_init_context(struct bnx2x *bp) | |||
5112 | { | 5142 | { |
5113 | int i; | 5143 | int i; |
5114 | 5144 | ||
5115 | for_each_rx_queue(bp, i) { | 5145 | /* Rx */ |
5146 | for_each_queue(bp, i) { | ||
5116 | struct eth_context *context = bnx2x_sp(bp, context[i].eth); | 5147 | struct eth_context *context = bnx2x_sp(bp, context[i].eth); |
5117 | struct bnx2x_fastpath *fp = &bp->fp[i]; | 5148 | struct bnx2x_fastpath *fp = &bp->fp[i]; |
5118 | u8 cl_id = fp->cl_id; | 5149 | u8 cl_id = fp->cl_id; |
@@ -5164,10 +5195,11 @@ static void bnx2x_init_context(struct bnx2x *bp) | |||
5164 | ETH_CONNECTION_TYPE); | 5195 | ETH_CONNECTION_TYPE); |
5165 | } | 5196 | } |
5166 | 5197 | ||
5167 | for_each_tx_queue(bp, i) { | 5198 | /* Tx */ |
5199 | for_each_queue(bp, i) { | ||
5168 | struct bnx2x_fastpath *fp = &bp->fp[i]; | 5200 | struct bnx2x_fastpath *fp = &bp->fp[i]; |
5169 | struct eth_context *context = | 5201 | struct eth_context *context = |
5170 | bnx2x_sp(bp, context[i - bp->num_rx_queues].eth); | 5202 | bnx2x_sp(bp, context[i].eth); |
5171 | 5203 | ||
5172 | context->cstorm_st_context.sb_index_number = | 5204 | context->cstorm_st_context.sb_index_number = |
5173 | C_SB_ETH_TX_CQ_INDEX; | 5205 | C_SB_ETH_TX_CQ_INDEX; |
@@ -5195,7 +5227,7 @@ static void bnx2x_init_ind_table(struct bnx2x *bp) | |||
5195 | for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++) | 5227 | for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++) |
5196 | REG_WR8(bp, BAR_TSTRORM_INTMEM + | 5228 | REG_WR8(bp, BAR_TSTRORM_INTMEM + |
5197 | TSTORM_INDIRECTION_TABLE_OFFSET(func) + i, | 5229 | TSTORM_INDIRECTION_TABLE_OFFSET(func) + i, |
5198 | bp->fp->cl_id + (i % bp->num_rx_queues)); | 5230 | bp->fp->cl_id + (i % bp->num_queues)); |
5199 | } | 5231 | } |
5200 | 5232 | ||
5201 | static void bnx2x_set_client_config(struct bnx2x *bp) | 5233 | static void bnx2x_set_client_config(struct bnx2x *bp) |
@@ -5235,7 +5267,7 @@ static void bnx2x_set_storm_rx_mode(struct bnx2x *bp) | |||
5235 | { | 5267 | { |
5236 | struct tstorm_eth_mac_filter_config tstorm_mac_filter = {0}; | 5268 | struct tstorm_eth_mac_filter_config tstorm_mac_filter = {0}; |
5237 | int mode = bp->rx_mode; | 5269 | int mode = bp->rx_mode; |
5238 | int mask = (1 << BP_L_ID(bp)); | 5270 | int mask = bp->rx_mode_cl_mask; |
5239 | int func = BP_FUNC(bp); | 5271 | int func = BP_FUNC(bp); |
5240 | int port = BP_PORT(bp); | 5272 | int port = BP_PORT(bp); |
5241 | int i; | 5273 | int i; |
@@ -5348,6 +5380,7 @@ static void bnx2x_init_internal_func(struct bnx2x *bp) | |||
5348 | (*(u32 *)&tstorm_config)); | 5380 | (*(u32 *)&tstorm_config)); |
5349 | 5381 | ||
5350 | bp->rx_mode = BNX2X_RX_MODE_NONE; /* no rx until link is up */ | 5382 | bp->rx_mode = BNX2X_RX_MODE_NONE; /* no rx until link is up */ |
5383 | bp->rx_mode_cl_mask = (1 << BP_L_ID(bp)); | ||
5351 | bnx2x_set_storm_rx_mode(bp); | 5384 | bnx2x_set_storm_rx_mode(bp); |
5352 | 5385 | ||
5353 | for_each_queue(bp, i) { | 5386 | for_each_queue(bp, i) { |
@@ -5438,7 +5471,7 @@ static void bnx2x_init_internal_func(struct bnx2x *bp) | |||
5438 | min((u32)(min((u32)8, (u32)MAX_SKB_FRAGS) * | 5471 | min((u32)(min((u32)8, (u32)MAX_SKB_FRAGS) * |
5439 | SGE_PAGE_SIZE * PAGES_PER_SGE), | 5472 | SGE_PAGE_SIZE * PAGES_PER_SGE), |
5440 | (u32)0xffff); | 5473 | (u32)0xffff); |
5441 | for_each_rx_queue(bp, i) { | 5474 | for_each_queue(bp, i) { |
5442 | struct bnx2x_fastpath *fp = &bp->fp[i]; | 5475 | struct bnx2x_fastpath *fp = &bp->fp[i]; |
5443 | 5476 | ||
5444 | REG_WR(bp, BAR_USTRORM_INTMEM + | 5477 | REG_WR(bp, BAR_USTRORM_INTMEM + |
@@ -5473,7 +5506,7 @@ static void bnx2x_init_internal_func(struct bnx2x *bp) | |||
5473 | rx_pause.cqe_thr_high = 350; | 5506 | rx_pause.cqe_thr_high = 350; |
5474 | rx_pause.sge_thr_high = 0; | 5507 | rx_pause.sge_thr_high = 0; |
5475 | 5508 | ||
5476 | for_each_rx_queue(bp, i) { | 5509 | for_each_queue(bp, i) { |
5477 | struct bnx2x_fastpath *fp = &bp->fp[i]; | 5510 | struct bnx2x_fastpath *fp = &bp->fp[i]; |
5478 | 5511 | ||
5479 | if (!fp->disable_tpa) { | 5512 | if (!fp->disable_tpa) { |
@@ -5504,20 +5537,18 @@ static void bnx2x_init_internal_func(struct bnx2x *bp) | |||
5504 | bp->link_vars.line_speed = SPEED_10000; | 5537 | bp->link_vars.line_speed = SPEED_10000; |
5505 | bnx2x_init_port_minmax(bp); | 5538 | bnx2x_init_port_minmax(bp); |
5506 | 5539 | ||
5540 | if (!BP_NOMCP(bp)) | ||
5541 | bp->mf_config = | ||
5542 | SHMEM_RD(bp, mf_cfg.func_mf_config[func].config); | ||
5507 | bnx2x_calc_vn_weight_sum(bp); | 5543 | bnx2x_calc_vn_weight_sum(bp); |
5508 | 5544 | ||
5509 | for (vn = VN_0; vn < E1HVN_MAX; vn++) | 5545 | for (vn = VN_0; vn < E1HVN_MAX; vn++) |
5510 | bnx2x_init_vn_minmax(bp, 2*vn + port); | 5546 | bnx2x_init_vn_minmax(bp, 2*vn + port); |
5511 | 5547 | ||
5512 | /* Enable rate shaping and fairness */ | 5548 | /* Enable rate shaping and fairness */ |
5513 | bp->cmng.flags.cmng_enables = | 5549 | bp->cmng.flags.cmng_enables |= |
5514 | CMNG_FLAGS_PER_PORT_RATE_SHAPING_VN; | 5550 | CMNG_FLAGS_PER_PORT_RATE_SHAPING_VN; |
5515 | if (bp->vn_weight_sum) | 5551 | |
5516 | bp->cmng.flags.cmng_enables |= | ||
5517 | CMNG_FLAGS_PER_PORT_FAIRNESS_VN; | ||
5518 | else | ||
5519 | DP(NETIF_MSG_IFUP, "All MIN values are zeroes" | ||
5520 | " fairness will be disabled\n"); | ||
5521 | } else { | 5552 | } else { |
5522 | /* rate shaping and fairness are disabled */ | 5553 | /* rate shaping and fairness are disabled */ |
5523 | DP(NETIF_MSG_IFUP, | 5554 | DP(NETIF_MSG_IFUP, |
@@ -5565,10 +5596,11 @@ static void bnx2x_nic_init(struct bnx2x *bp, u32 load_code) | |||
5565 | fp->state = BNX2X_FP_STATE_CLOSED; | 5596 | fp->state = BNX2X_FP_STATE_CLOSED; |
5566 | fp->index = i; | 5597 | fp->index = i; |
5567 | fp->cl_id = BP_L_ID(bp) + i; | 5598 | fp->cl_id = BP_L_ID(bp) + i; |
5599 | #ifdef BCM_CNIC | ||
5600 | fp->sb_id = fp->cl_id + 1; | ||
5601 | #else | ||
5568 | fp->sb_id = fp->cl_id; | 5602 | fp->sb_id = fp->cl_id; |
5569 | /* Suitable Rx and Tx SBs are served by the same client */ | 5603 | #endif |
5570 | if (i >= bp->num_rx_queues) | ||
5571 | fp->cl_id -= bp->num_rx_queues; | ||
5572 | DP(NETIF_MSG_IFUP, | 5604 | DP(NETIF_MSG_IFUP, |
5573 | "queue[%d]: bnx2x_init_sb(%p,%p) cl_id %d sb %d\n", | 5605 | "queue[%d]: bnx2x_init_sb(%p,%p) cl_id %d sb %d\n", |
5574 | i, bp, fp->status_blk, fp->cl_id, fp->sb_id); | 5606 | i, bp, fp->status_blk, fp->cl_id, fp->sb_id); |
@@ -5867,7 +5899,7 @@ static int bnx2x_int_mem_test(struct bnx2x *bp) | |||
5867 | msleep(50); | 5899 | msleep(50); |
5868 | bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE); | 5900 | bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE); |
5869 | bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE); | 5901 | bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE); |
5870 | #ifndef BCM_ISCSI | 5902 | #ifndef BCM_CNIC |
5871 | /* set NIC mode */ | 5903 | /* set NIC mode */ |
5872 | REG_WR(bp, PRS_REG_NIC_MODE, 1); | 5904 | REG_WR(bp, PRS_REG_NIC_MODE, 1); |
5873 | #endif | 5905 | #endif |
@@ -6006,6 +6038,9 @@ static void bnx2x_setup_fan_failure_detection(struct bnx2x *bp) | |||
6006 | static int bnx2x_init_common(struct bnx2x *bp) | 6038 | static int bnx2x_init_common(struct bnx2x *bp) |
6007 | { | 6039 | { |
6008 | u32 val, i; | 6040 | u32 val, i; |
6041 | #ifdef BCM_CNIC | ||
6042 | u32 wb_write[2]; | ||
6043 | #endif | ||
6009 | 6044 | ||
6010 | DP(BNX2X_MSG_MCP, "starting common init func %d\n", BP_FUNC(bp)); | 6045 | DP(BNX2X_MSG_MCP, "starting common init func %d\n", BP_FUNC(bp)); |
6011 | 6046 | ||
@@ -6048,7 +6083,7 @@ static int bnx2x_init_common(struct bnx2x *bp) | |||
6048 | #endif | 6083 | #endif |
6049 | 6084 | ||
6050 | REG_WR(bp, PXP2_REG_RQ_CDU_P_SIZE, 2); | 6085 | REG_WR(bp, PXP2_REG_RQ_CDU_P_SIZE, 2); |
6051 | #ifdef BCM_ISCSI | 6086 | #ifdef BCM_CNIC |
6052 | REG_WR(bp, PXP2_REG_RQ_TM_P_SIZE, 5); | 6087 | REG_WR(bp, PXP2_REG_RQ_TM_P_SIZE, 5); |
6053 | REG_WR(bp, PXP2_REG_RQ_QM_P_SIZE, 5); | 6088 | REG_WR(bp, PXP2_REG_RQ_QM_P_SIZE, 5); |
6054 | REG_WR(bp, PXP2_REG_RQ_SRC_P_SIZE, 5); | 6089 | REG_WR(bp, PXP2_REG_RQ_SRC_P_SIZE, 5); |
@@ -6091,11 +6126,26 @@ static int bnx2x_init_common(struct bnx2x *bp) | |||
6091 | bnx2x_read_dmae(bp, USEM_REG_PASSIVE_BUFFER, 3); | 6126 | bnx2x_read_dmae(bp, USEM_REG_PASSIVE_BUFFER, 3); |
6092 | 6127 | ||
6093 | bnx2x_init_block(bp, QM_BLOCK, COMMON_STAGE); | 6128 | bnx2x_init_block(bp, QM_BLOCK, COMMON_STAGE); |
6129 | |||
6130 | #ifdef BCM_CNIC | ||
6131 | wb_write[0] = 0; | ||
6132 | wb_write[1] = 0; | ||
6133 | for (i = 0; i < 64; i++) { | ||
6134 | REG_WR(bp, QM_REG_BASEADDR + i*4, 1024 * 4 * (i%16)); | ||
6135 | bnx2x_init_ind_wr(bp, QM_REG_PTRTBL + i*8, wb_write, 2); | ||
6136 | |||
6137 | if (CHIP_IS_E1H(bp)) { | ||
6138 | REG_WR(bp, QM_REG_BASEADDR_EXT_A + i*4, 1024*4*(i%16)); | ||
6139 | bnx2x_init_ind_wr(bp, QM_REG_PTRTBL_EXT_A + i*8, | ||
6140 | wb_write, 2); | ||
6141 | } | ||
6142 | } | ||
6143 | #endif | ||
6094 | /* soft reset pulse */ | 6144 | /* soft reset pulse */ |
6095 | REG_WR(bp, QM_REG_SOFT_RESET, 1); | 6145 | REG_WR(bp, QM_REG_SOFT_RESET, 1); |
6096 | REG_WR(bp, QM_REG_SOFT_RESET, 0); | 6146 | REG_WR(bp, QM_REG_SOFT_RESET, 0); |
6097 | 6147 | ||
6098 | #ifdef BCM_ISCSI | 6148 | #ifdef BCM_CNIC |
6099 | bnx2x_init_block(bp, TIMERS_BLOCK, COMMON_STAGE); | 6149 | bnx2x_init_block(bp, TIMERS_BLOCK, COMMON_STAGE); |
6100 | #endif | 6150 | #endif |
6101 | 6151 | ||
@@ -6109,8 +6159,10 @@ static int bnx2x_init_common(struct bnx2x *bp) | |||
6109 | bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE); | 6159 | bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE); |
6110 | bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE); | 6160 | bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE); |
6111 | REG_WR(bp, PRS_REG_A_PRSU_20, 0xf); | 6161 | REG_WR(bp, PRS_REG_A_PRSU_20, 0xf); |
6162 | #ifndef BCM_CNIC | ||
6112 | /* set NIC mode */ | 6163 | /* set NIC mode */ |
6113 | REG_WR(bp, PRS_REG_NIC_MODE, 1); | 6164 | REG_WR(bp, PRS_REG_NIC_MODE, 1); |
6165 | #endif | ||
6114 | if (CHIP_IS_E1H(bp)) | 6166 | if (CHIP_IS_E1H(bp)) |
6115 | REG_WR(bp, PRS_REG_E1HOV_MODE, IS_E1HMF(bp)); | 6167 | REG_WR(bp, PRS_REG_E1HOV_MODE, IS_E1HMF(bp)); |
6116 | 6168 | ||
@@ -6145,6 +6197,18 @@ static int bnx2x_init_common(struct bnx2x *bp) | |||
6145 | /* TODO: replace with something meaningful */ | 6197 | /* TODO: replace with something meaningful */ |
6146 | } | 6198 | } |
6147 | bnx2x_init_block(bp, SRCH_BLOCK, COMMON_STAGE); | 6199 | bnx2x_init_block(bp, SRCH_BLOCK, COMMON_STAGE); |
6200 | #ifdef BCM_CNIC | ||
6201 | REG_WR(bp, SRC_REG_KEYSEARCH_0, 0x63285672); | ||
6202 | REG_WR(bp, SRC_REG_KEYSEARCH_1, 0x24b8f2cc); | ||
6203 | REG_WR(bp, SRC_REG_KEYSEARCH_2, 0x223aef9b); | ||
6204 | REG_WR(bp, SRC_REG_KEYSEARCH_3, 0x26001e3a); | ||
6205 | REG_WR(bp, SRC_REG_KEYSEARCH_4, 0x7ae91116); | ||
6206 | REG_WR(bp, SRC_REG_KEYSEARCH_5, 0x5ce5230b); | ||
6207 | REG_WR(bp, SRC_REG_KEYSEARCH_6, 0x298d8adf); | ||
6208 | REG_WR(bp, SRC_REG_KEYSEARCH_7, 0x6eb0ff09); | ||
6209 | REG_WR(bp, SRC_REG_KEYSEARCH_8, 0x1830f82f); | ||
6210 | REG_WR(bp, SRC_REG_KEYSEARCH_9, 0x01e46be7); | ||
6211 | #endif | ||
6148 | REG_WR(bp, SRC_REG_SOFT_RST, 0); | 6212 | REG_WR(bp, SRC_REG_SOFT_RST, 0); |
6149 | 6213 | ||
6150 | if (sizeof(union cdu_context) != 1024) | 6214 | if (sizeof(union cdu_context) != 1024) |
@@ -6261,38 +6325,14 @@ static int bnx2x_init_port(struct bnx2x *bp) | |||
6261 | bnx2x_init_block(bp, TCM_BLOCK, init_stage); | 6325 | bnx2x_init_block(bp, TCM_BLOCK, init_stage); |
6262 | bnx2x_init_block(bp, UCM_BLOCK, init_stage); | 6326 | bnx2x_init_block(bp, UCM_BLOCK, init_stage); |
6263 | bnx2x_init_block(bp, CCM_BLOCK, init_stage); | 6327 | bnx2x_init_block(bp, CCM_BLOCK, init_stage); |
6264 | #ifdef BCM_ISCSI | ||
6265 | /* Port0 1 | ||
6266 | * Port1 385 */ | ||
6267 | i++; | ||
6268 | wb_write[0] = ONCHIP_ADDR1(bp->timers_mapping); | ||
6269 | wb_write[1] = ONCHIP_ADDR2(bp->timers_mapping); | ||
6270 | REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2); | ||
6271 | REG_WR(bp, PXP2_REG_PSWRQ_TM0_L2P + func*4, PXP_ONE_ILT(i)); | ||
6272 | |||
6273 | /* Port0 2 | ||
6274 | * Port1 386 */ | ||
6275 | i++; | ||
6276 | wb_write[0] = ONCHIP_ADDR1(bp->qm_mapping); | ||
6277 | wb_write[1] = ONCHIP_ADDR2(bp->qm_mapping); | ||
6278 | REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2); | ||
6279 | REG_WR(bp, PXP2_REG_PSWRQ_QM0_L2P + func*4, PXP_ONE_ILT(i)); | ||
6280 | |||
6281 | /* Port0 3 | ||
6282 | * Port1 387 */ | ||
6283 | i++; | ||
6284 | wb_write[0] = ONCHIP_ADDR1(bp->t1_mapping); | ||
6285 | wb_write[1] = ONCHIP_ADDR2(bp->t1_mapping); | ||
6286 | REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2); | ||
6287 | REG_WR(bp, PXP2_REG_PSWRQ_SRC0_L2P + func*4, PXP_ONE_ILT(i)); | ||
6288 | #endif | ||
6289 | bnx2x_init_block(bp, XCM_BLOCK, init_stage); | 6328 | bnx2x_init_block(bp, XCM_BLOCK, init_stage); |
6290 | 6329 | ||
6291 | #ifdef BCM_ISCSI | 6330 | #ifdef BCM_CNIC |
6292 | REG_WR(bp, TM_REG_LIN0_SCAN_TIME + func*4, 1024/64*20); | 6331 | REG_WR(bp, QM_REG_CONNNUM_0 + port*4, 1024/16 - 1); |
6293 | REG_WR(bp, TM_REG_LIN0_MAX_ACTIVE_CID + func*4, 31); | ||
6294 | 6332 | ||
6295 | bnx2x_init_block(bp, TIMERS_BLOCK, init_stage); | 6333 | bnx2x_init_block(bp, TIMERS_BLOCK, init_stage); |
6334 | REG_WR(bp, TM_REG_LIN0_SCAN_TIME + port*4, 20); | ||
6335 | REG_WR(bp, TM_REG_LIN0_MAX_ACTIVE_CID + port*4, 31); | ||
6296 | #endif | 6336 | #endif |
6297 | bnx2x_init_block(bp, DQ_BLOCK, init_stage); | 6337 | bnx2x_init_block(bp, DQ_BLOCK, init_stage); |
6298 | 6338 | ||
@@ -6350,18 +6390,8 @@ static int bnx2x_init_port(struct bnx2x *bp) | |||
6350 | msleep(5); | 6390 | msleep(5); |
6351 | REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0); | 6391 | REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0); |
6352 | 6392 | ||
6353 | #ifdef BCM_ISCSI | 6393 | #ifdef BCM_CNIC |
6354 | /* tell the searcher where the T2 table is */ | 6394 | bnx2x_init_block(bp, SRCH_BLOCK, init_stage); |
6355 | REG_WR(bp, SRC_REG_COUNTFREE0 + func*4, 16*1024/64); | ||
6356 | |||
6357 | wb_write[0] = U64_LO(bp->t2_mapping); | ||
6358 | wb_write[1] = U64_HI(bp->t2_mapping); | ||
6359 | REG_WR_DMAE(bp, SRC_REG_FIRSTFREE0 + func*4, wb_write, 2); | ||
6360 | wb_write[0] = U64_LO((u64)bp->t2_mapping + 16*1024 - 64); | ||
6361 | wb_write[1] = U64_HI((u64)bp->t2_mapping + 16*1024 - 64); | ||
6362 | REG_WR_DMAE(bp, SRC_REG_LASTFREE0 + func*4, wb_write, 2); | ||
6363 | |||
6364 | REG_WR(bp, SRC_REG_NUMBER_HASH_BITS0 + func*4, 10); | ||
6365 | #endif | 6395 | #endif |
6366 | bnx2x_init_block(bp, CDU_BLOCK, init_stage); | 6396 | bnx2x_init_block(bp, CDU_BLOCK, init_stage); |
6367 | bnx2x_init_block(bp, CFC_BLOCK, init_stage); | 6397 | bnx2x_init_block(bp, CFC_BLOCK, init_stage); |
@@ -6470,7 +6500,12 @@ static int bnx2x_init_port(struct bnx2x *bp) | |||
6470 | #define PXP_ONE_ILT(x) (((x) << 10) | x) | 6500 | #define PXP_ONE_ILT(x) (((x) << 10) | x) |
6471 | #define PXP_ILT_RANGE(f, l) (((l) << 10) | f) | 6501 | #define PXP_ILT_RANGE(f, l) (((l) << 10) | f) |
6472 | 6502 | ||
6503 | #ifdef BCM_CNIC | ||
6504 | #define CNIC_ILT_LINES 127 | ||
6505 | #define CNIC_CTX_PER_ILT 16 | ||
6506 | #else | ||
6473 | #define CNIC_ILT_LINES 0 | 6507 | #define CNIC_ILT_LINES 0 |
6508 | #endif | ||
6474 | 6509 | ||
6475 | static void bnx2x_ilt_wr(struct bnx2x *bp, u32 index, dma_addr_t addr) | 6510 | static void bnx2x_ilt_wr(struct bnx2x *bp, u32 index, dma_addr_t addr) |
6476 | { | 6511 | { |
@@ -6509,6 +6544,46 @@ static int bnx2x_init_func(struct bnx2x *bp) | |||
6509 | REG_WR(bp, PXP2_REG_PSWRQ_CDU0_L2P + func*4, | 6544 | REG_WR(bp, PXP2_REG_PSWRQ_CDU0_L2P + func*4, |
6510 | PXP_ILT_RANGE(i, i + CNIC_ILT_LINES)); | 6545 | PXP_ILT_RANGE(i, i + CNIC_ILT_LINES)); |
6511 | 6546 | ||
6547 | #ifdef BCM_CNIC | ||
6548 | i += 1 + CNIC_ILT_LINES; | ||
6549 | bnx2x_ilt_wr(bp, i, bp->timers_mapping); | ||
6550 | if (CHIP_IS_E1(bp)) | ||
6551 | REG_WR(bp, PXP2_REG_PSWRQ_TM0_L2P + func*4, PXP_ONE_ILT(i)); | ||
6552 | else { | ||
6553 | REG_WR(bp, PXP2_REG_RQ_TM_FIRST_ILT, i); | ||
6554 | REG_WR(bp, PXP2_REG_RQ_TM_LAST_ILT, i); | ||
6555 | } | ||
6556 | |||
6557 | i++; | ||
6558 | bnx2x_ilt_wr(bp, i, bp->qm_mapping); | ||
6559 | if (CHIP_IS_E1(bp)) | ||
6560 | REG_WR(bp, PXP2_REG_PSWRQ_QM0_L2P + func*4, PXP_ONE_ILT(i)); | ||
6561 | else { | ||
6562 | REG_WR(bp, PXP2_REG_RQ_QM_FIRST_ILT, i); | ||
6563 | REG_WR(bp, PXP2_REG_RQ_QM_LAST_ILT, i); | ||
6564 | } | ||
6565 | |||
6566 | i++; | ||
6567 | bnx2x_ilt_wr(bp, i, bp->t1_mapping); | ||
6568 | if (CHIP_IS_E1(bp)) | ||
6569 | REG_WR(bp, PXP2_REG_PSWRQ_SRC0_L2P + func*4, PXP_ONE_ILT(i)); | ||
6570 | else { | ||
6571 | REG_WR(bp, PXP2_REG_RQ_SRC_FIRST_ILT, i); | ||
6572 | REG_WR(bp, PXP2_REG_RQ_SRC_LAST_ILT, i); | ||
6573 | } | ||
6574 | |||
6575 | /* tell the searcher where the T2 table is */ | ||
6576 | REG_WR(bp, SRC_REG_COUNTFREE0 + port*4, 16*1024/64); | ||
6577 | |||
6578 | bnx2x_wb_wr(bp, SRC_REG_FIRSTFREE0 + port*16, | ||
6579 | U64_LO(bp->t2_mapping), U64_HI(bp->t2_mapping)); | ||
6580 | |||
6581 | bnx2x_wb_wr(bp, SRC_REG_LASTFREE0 + port*16, | ||
6582 | U64_LO((u64)bp->t2_mapping + 16*1024 - 64), | ||
6583 | U64_HI((u64)bp->t2_mapping + 16*1024 - 64)); | ||
6584 | |||
6585 | REG_WR(bp, SRC_REG_NUMBER_HASH_BITS0 + port*4, 10); | ||
6586 | #endif | ||
6512 | 6587 | ||
6513 | if (CHIP_IS_E1H(bp)) { | 6588 | if (CHIP_IS_E1H(bp)) { |
6514 | bnx2x_init_block(bp, MISC_BLOCK, FUNC0_STAGE + func); | 6589 | bnx2x_init_block(bp, MISC_BLOCK, FUNC0_STAGE + func); |
@@ -6593,6 +6668,9 @@ static int bnx2x_init_hw(struct bnx2x *bp, u32 load_code) | |||
6593 | bnx2x_zero_def_sb(bp); | 6668 | bnx2x_zero_def_sb(bp); |
6594 | for_each_queue(bp, i) | 6669 | for_each_queue(bp, i) |
6595 | bnx2x_zero_sb(bp, BP_L_ID(bp) + i); | 6670 | bnx2x_zero_sb(bp, BP_L_ID(bp) + i); |
6671 | #ifdef BCM_CNIC | ||
6672 | bnx2x_zero_sb(bp, BP_L_ID(bp) + i); | ||
6673 | #endif | ||
6596 | 6674 | ||
6597 | init_hw_err: | 6675 | init_hw_err: |
6598 | bnx2x_gunzip_end(bp); | 6676 | bnx2x_gunzip_end(bp); |
@@ -6632,7 +6710,7 @@ static void bnx2x_free_mem(struct bnx2x *bp) | |||
6632 | sizeof(struct host_status_block)); | 6710 | sizeof(struct host_status_block)); |
6633 | } | 6711 | } |
6634 | /* Rx */ | 6712 | /* Rx */ |
6635 | for_each_rx_queue(bp, i) { | 6713 | for_each_queue(bp, i) { |
6636 | 6714 | ||
6637 | /* fastpath rx rings: rx_buf rx_desc rx_comp */ | 6715 | /* fastpath rx rings: rx_buf rx_desc rx_comp */ |
6638 | BNX2X_FREE(bnx2x_fp(bp, i, rx_buf_ring)); | 6716 | BNX2X_FREE(bnx2x_fp(bp, i, rx_buf_ring)); |
@@ -6652,7 +6730,7 @@ static void bnx2x_free_mem(struct bnx2x *bp) | |||
6652 | BCM_PAGE_SIZE * NUM_RX_SGE_PAGES); | 6730 | BCM_PAGE_SIZE * NUM_RX_SGE_PAGES); |
6653 | } | 6731 | } |
6654 | /* Tx */ | 6732 | /* Tx */ |
6655 | for_each_tx_queue(bp, i) { | 6733 | for_each_queue(bp, i) { |
6656 | 6734 | ||
6657 | /* fastpath tx rings: tx_buf tx_desc */ | 6735 | /* fastpath tx rings: tx_buf tx_desc */ |
6658 | BNX2X_FREE(bnx2x_fp(bp, i, tx_buf_ring)); | 6736 | BNX2X_FREE(bnx2x_fp(bp, i, tx_buf_ring)); |
@@ -6668,11 +6746,13 @@ static void bnx2x_free_mem(struct bnx2x *bp) | |||
6668 | BNX2X_PCI_FREE(bp->slowpath, bp->slowpath_mapping, | 6746 | BNX2X_PCI_FREE(bp->slowpath, bp->slowpath_mapping, |
6669 | sizeof(struct bnx2x_slowpath)); | 6747 | sizeof(struct bnx2x_slowpath)); |
6670 | 6748 | ||
6671 | #ifdef BCM_ISCSI | 6749 | #ifdef BCM_CNIC |
6672 | BNX2X_PCI_FREE(bp->t1, bp->t1_mapping, 64*1024); | 6750 | BNX2X_PCI_FREE(bp->t1, bp->t1_mapping, 64*1024); |
6673 | BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, 16*1024); | 6751 | BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, 16*1024); |
6674 | BNX2X_PCI_FREE(bp->timers, bp->timers_mapping, 8*1024); | 6752 | BNX2X_PCI_FREE(bp->timers, bp->timers_mapping, 8*1024); |
6675 | BNX2X_PCI_FREE(bp->qm, bp->qm_mapping, 128*1024); | 6753 | BNX2X_PCI_FREE(bp->qm, bp->qm_mapping, 128*1024); |
6754 | BNX2X_PCI_FREE(bp->cnic_sb, bp->cnic_sb_mapping, | ||
6755 | sizeof(struct host_status_block)); | ||
6676 | #endif | 6756 | #endif |
6677 | BNX2X_PCI_FREE(bp->spq, bp->spq_mapping, BCM_PAGE_SIZE); | 6757 | BNX2X_PCI_FREE(bp->spq, bp->spq_mapping, BCM_PAGE_SIZE); |
6678 | 6758 | ||
@@ -6712,7 +6792,7 @@ static int bnx2x_alloc_mem(struct bnx2x *bp) | |||
6712 | sizeof(struct host_status_block)); | 6792 | sizeof(struct host_status_block)); |
6713 | } | 6793 | } |
6714 | /* Rx */ | 6794 | /* Rx */ |
6715 | for_each_rx_queue(bp, i) { | 6795 | for_each_queue(bp, i) { |
6716 | 6796 | ||
6717 | /* fastpath rx rings: rx_buf rx_desc rx_comp */ | 6797 | /* fastpath rx rings: rx_buf rx_desc rx_comp */ |
6718 | BNX2X_ALLOC(bnx2x_fp(bp, i, rx_buf_ring), | 6798 | BNX2X_ALLOC(bnx2x_fp(bp, i, rx_buf_ring), |
@@ -6734,7 +6814,7 @@ static int bnx2x_alloc_mem(struct bnx2x *bp) | |||
6734 | BCM_PAGE_SIZE * NUM_RX_SGE_PAGES); | 6814 | BCM_PAGE_SIZE * NUM_RX_SGE_PAGES); |
6735 | } | 6815 | } |
6736 | /* Tx */ | 6816 | /* Tx */ |
6737 | for_each_tx_queue(bp, i) { | 6817 | for_each_queue(bp, i) { |
6738 | 6818 | ||
6739 | /* fastpath tx rings: tx_buf tx_desc */ | 6819 | /* fastpath tx rings: tx_buf tx_desc */ |
6740 | BNX2X_ALLOC(bnx2x_fp(bp, i, tx_buf_ring), | 6820 | BNX2X_ALLOC(bnx2x_fp(bp, i, tx_buf_ring), |
@@ -6751,32 +6831,26 @@ static int bnx2x_alloc_mem(struct bnx2x *bp) | |||
6751 | BNX2X_PCI_ALLOC(bp->slowpath, &bp->slowpath_mapping, | 6831 | BNX2X_PCI_ALLOC(bp->slowpath, &bp->slowpath_mapping, |
6752 | sizeof(struct bnx2x_slowpath)); | 6832 | sizeof(struct bnx2x_slowpath)); |
6753 | 6833 | ||
6754 | #ifdef BCM_ISCSI | 6834 | #ifdef BCM_CNIC |
6755 | BNX2X_PCI_ALLOC(bp->t1, &bp->t1_mapping, 64*1024); | 6835 | BNX2X_PCI_ALLOC(bp->t1, &bp->t1_mapping, 64*1024); |
6756 | 6836 | ||
6757 | /* Initialize T1 */ | ||
6758 | for (i = 0; i < 64*1024; i += 64) { | ||
6759 | *(u64 *)((char *)bp->t1 + i + 56) = 0x0UL; | ||
6760 | *(u64 *)((char *)bp->t1 + i + 3) = 0x0UL; | ||
6761 | } | ||
6762 | |||
6763 | /* allocate searcher T2 table | 6837 | /* allocate searcher T2 table |
6764 | we allocate 1/4 of alloc num for T2 | 6838 | we allocate 1/4 of alloc num for T2 |
6765 | (which is not entered into the ILT) */ | 6839 | (which is not entered into the ILT) */ |
6766 | BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, 16*1024); | 6840 | BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, 16*1024); |
6767 | 6841 | ||
6768 | /* Initialize T2 */ | 6842 | /* Initialize T2 (for 1024 connections) */ |
6769 | for (i = 0; i < 16*1024; i += 64) | 6843 | for (i = 0; i < 16*1024; i += 64) |
6770 | * (u64 *)((char *)bp->t2 + i + 56) = bp->t2_mapping + i + 64; | 6844 | *(u64 *)((char *)bp->t2 + i + 56) = bp->t2_mapping + i + 64; |
6771 | |||
6772 | /* now fixup the last line in the block to point to the next block */ | ||
6773 | *(u64 *)((char *)bp->t2 + 1024*16-8) = bp->t2_mapping; | ||
6774 | 6845 | ||
6775 | /* Timer block array (MAX_CONN*8) phys uncached for now 1024 conns */ | 6846 | /* Timer block array (8*MAX_CONN) phys uncached for now 1024 conns */ |
6776 | BNX2X_PCI_ALLOC(bp->timers, &bp->timers_mapping, 8*1024); | 6847 | BNX2X_PCI_ALLOC(bp->timers, &bp->timers_mapping, 8*1024); |
6777 | 6848 | ||
6778 | /* QM queues (128*MAX_CONN) */ | 6849 | /* QM queues (128*MAX_CONN) */ |
6779 | BNX2X_PCI_ALLOC(bp->qm, &bp->qm_mapping, 128*1024); | 6850 | BNX2X_PCI_ALLOC(bp->qm, &bp->qm_mapping, 128*1024); |
6851 | |||
6852 | BNX2X_PCI_ALLOC(bp->cnic_sb, &bp->cnic_sb_mapping, | ||
6853 | sizeof(struct host_status_block)); | ||
6780 | #endif | 6854 | #endif |
6781 | 6855 | ||
6782 | /* Slow path ring */ | 6856 | /* Slow path ring */ |
@@ -6796,7 +6870,7 @@ static void bnx2x_free_tx_skbs(struct bnx2x *bp) | |||
6796 | { | 6870 | { |
6797 | int i; | 6871 | int i; |
6798 | 6872 | ||
6799 | for_each_tx_queue(bp, i) { | 6873 | for_each_queue(bp, i) { |
6800 | struct bnx2x_fastpath *fp = &bp->fp[i]; | 6874 | struct bnx2x_fastpath *fp = &bp->fp[i]; |
6801 | 6875 | ||
6802 | u16 bd_cons = fp->tx_bd_cons; | 6876 | u16 bd_cons = fp->tx_bd_cons; |
@@ -6814,7 +6888,7 @@ static void bnx2x_free_rx_skbs(struct bnx2x *bp) | |||
6814 | { | 6888 | { |
6815 | int i, j; | 6889 | int i, j; |
6816 | 6890 | ||
6817 | for_each_rx_queue(bp, j) { | 6891 | for_each_queue(bp, j) { |
6818 | struct bnx2x_fastpath *fp = &bp->fp[j]; | 6892 | struct bnx2x_fastpath *fp = &bp->fp[j]; |
6819 | 6893 | ||
6820 | for (i = 0; i < NUM_RX_BD; i++) { | 6894 | for (i = 0; i < NUM_RX_BD; i++) { |
@@ -6852,6 +6926,9 @@ static void bnx2x_free_msix_irqs(struct bnx2x *bp) | |||
6852 | DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n", | 6926 | DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n", |
6853 | bp->msix_table[0].vector); | 6927 | bp->msix_table[0].vector); |
6854 | 6928 | ||
6929 | #ifdef BCM_CNIC | ||
6930 | offset++; | ||
6931 | #endif | ||
6855 | for_each_queue(bp, i) { | 6932 | for_each_queue(bp, i) { |
6856 | DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq " | 6933 | DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq " |
6857 | "state %x\n", i, bp->msix_table[i + offset].vector, | 6934 | "state %x\n", i, bp->msix_table[i + offset].vector, |
@@ -6885,6 +6962,12 @@ static int bnx2x_enable_msix(struct bnx2x *bp) | |||
6885 | bp->msix_table[0].entry = igu_vec; | 6962 | bp->msix_table[0].entry = igu_vec; |
6886 | DP(NETIF_MSG_IFUP, "msix_table[0].entry = %d (slowpath)\n", igu_vec); | 6963 | DP(NETIF_MSG_IFUP, "msix_table[0].entry = %d (slowpath)\n", igu_vec); |
6887 | 6964 | ||
6965 | #ifdef BCM_CNIC | ||
6966 | igu_vec = BP_L_ID(bp) + offset; | ||
6967 | bp->msix_table[1].entry = igu_vec; | ||
6968 | DP(NETIF_MSG_IFUP, "msix_table[1].entry = %d (CNIC)\n", igu_vec); | ||
6969 | offset++; | ||
6970 | #endif | ||
6888 | for_each_queue(bp, i) { | 6971 | for_each_queue(bp, i) { |
6889 | igu_vec = BP_L_ID(bp) + offset + i; | 6972 | igu_vec = BP_L_ID(bp) + offset + i; |
6890 | bp->msix_table[i + offset].entry = igu_vec; | 6973 | bp->msix_table[i + offset].entry = igu_vec; |
@@ -6915,14 +6998,13 @@ static int bnx2x_req_msix_irqs(struct bnx2x *bp) | |||
6915 | return -EBUSY; | 6998 | return -EBUSY; |
6916 | } | 6999 | } |
6917 | 7000 | ||
7001 | #ifdef BCM_CNIC | ||
7002 | offset++; | ||
7003 | #endif | ||
6918 | for_each_queue(bp, i) { | 7004 | for_each_queue(bp, i) { |
6919 | struct bnx2x_fastpath *fp = &bp->fp[i]; | 7005 | struct bnx2x_fastpath *fp = &bp->fp[i]; |
6920 | 7006 | snprintf(fp->name, sizeof(fp->name), "%s-fp-%d", | |
6921 | if (i < bp->num_rx_queues) | 7007 | bp->dev->name, i); |
6922 | sprintf(fp->name, "%s-rx-%d", bp->dev->name, i); | ||
6923 | else | ||
6924 | sprintf(fp->name, "%s-tx-%d", | ||
6925 | bp->dev->name, i - bp->num_rx_queues); | ||
6926 | 7008 | ||
6927 | rc = request_irq(bp->msix_table[i + offset].vector, | 7009 | rc = request_irq(bp->msix_table[i + offset].vector, |
6928 | bnx2x_msix_fp_int, 0, fp->name, fp); | 7010 | bnx2x_msix_fp_int, 0, fp->name, fp); |
@@ -6981,7 +7063,7 @@ static void bnx2x_napi_enable(struct bnx2x *bp) | |||
6981 | { | 7063 | { |
6982 | int i; | 7064 | int i; |
6983 | 7065 | ||
6984 | for_each_rx_queue(bp, i) | 7066 | for_each_queue(bp, i) |
6985 | napi_enable(&bnx2x_fp(bp, i, napi)); | 7067 | napi_enable(&bnx2x_fp(bp, i, napi)); |
6986 | } | 7068 | } |
6987 | 7069 | ||
@@ -6989,7 +7071,7 @@ static void bnx2x_napi_disable(struct bnx2x *bp) | |||
6989 | { | 7071 | { |
6990 | int i; | 7072 | int i; |
6991 | 7073 | ||
6992 | for_each_rx_queue(bp, i) | 7074 | for_each_queue(bp, i) |
6993 | napi_disable(&bnx2x_fp(bp, i, napi)); | 7075 | napi_disable(&bnx2x_fp(bp, i, napi)); |
6994 | } | 7076 | } |
6995 | 7077 | ||
@@ -7015,14 +7097,25 @@ static void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw) | |||
7015 | bnx2x_int_disable_sync(bp, disable_hw); | 7097 | bnx2x_int_disable_sync(bp, disable_hw); |
7016 | bnx2x_napi_disable(bp); | 7098 | bnx2x_napi_disable(bp); |
7017 | netif_tx_disable(bp->dev); | 7099 | netif_tx_disable(bp->dev); |
7018 | bp->dev->trans_start = jiffies; /* prevent tx timeout */ | ||
7019 | } | 7100 | } |
7020 | 7101 | ||
7021 | /* | 7102 | /* |
7022 | * Init service functions | 7103 | * Init service functions |
7023 | */ | 7104 | */ |
7024 | 7105 | ||
7025 | static void bnx2x_set_mac_addr_e1(struct bnx2x *bp, int set) | 7106 | /** |
7107 | * Sets a MAC in a CAM for a few L2 Clients for E1 chip | ||
7108 | * | ||
7109 | * @param bp driver descriptor | ||
7110 | * @param set set or clear an entry (1 or 0) | ||
7111 | * @param mac pointer to a buffer containing a MAC | ||
7112 | * @param cl_bit_vec bit vector of clients to register a MAC for | ||
7113 | * @param cam_offset offset in a CAM to use | ||
7114 | * @param with_bcast set broadcast MAC as well | ||
7115 | */ | ||
7116 | static void bnx2x_set_mac_addr_e1_gen(struct bnx2x *bp, int set, u8 *mac, | ||
7117 | u32 cl_bit_vec, u8 cam_offset, | ||
7118 | u8 with_bcast) | ||
7026 | { | 7119 | { |
7027 | struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config); | 7120 | struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config); |
7028 | int port = BP_PORT(bp); | 7121 | int port = BP_PORT(bp); |
@@ -7031,25 +7124,25 @@ static void bnx2x_set_mac_addr_e1(struct bnx2x *bp, int set) | |||
7031 | * unicasts 0-31:port0 32-63:port1 | 7124 | * unicasts 0-31:port0 32-63:port1 |
7032 | * multicast 64-127:port0 128-191:port1 | 7125 | * multicast 64-127:port0 128-191:port1 |
7033 | */ | 7126 | */ |
7034 | config->hdr.length = 2; | 7127 | config->hdr.length = 1 + (with_bcast ? 1 : 0); |
7035 | config->hdr.offset = port ? 32 : 0; | 7128 | config->hdr.offset = cam_offset; |
7036 | config->hdr.client_id = bp->fp->cl_id; | 7129 | config->hdr.client_id = 0xff; |
7037 | config->hdr.reserved1 = 0; | 7130 | config->hdr.reserved1 = 0; |
7038 | 7131 | ||
7039 | /* primary MAC */ | 7132 | /* primary MAC */ |
7040 | config->config_table[0].cam_entry.msb_mac_addr = | 7133 | config->config_table[0].cam_entry.msb_mac_addr = |
7041 | swab16(*(u16 *)&bp->dev->dev_addr[0]); | 7134 | swab16(*(u16 *)&mac[0]); |
7042 | config->config_table[0].cam_entry.middle_mac_addr = | 7135 | config->config_table[0].cam_entry.middle_mac_addr = |
7043 | swab16(*(u16 *)&bp->dev->dev_addr[2]); | 7136 | swab16(*(u16 *)&mac[2]); |
7044 | config->config_table[0].cam_entry.lsb_mac_addr = | 7137 | config->config_table[0].cam_entry.lsb_mac_addr = |
7045 | swab16(*(u16 *)&bp->dev->dev_addr[4]); | 7138 | swab16(*(u16 *)&mac[4]); |
7046 | config->config_table[0].cam_entry.flags = cpu_to_le16(port); | 7139 | config->config_table[0].cam_entry.flags = cpu_to_le16(port); |
7047 | if (set) | 7140 | if (set) |
7048 | config->config_table[0].target_table_entry.flags = 0; | 7141 | config->config_table[0].target_table_entry.flags = 0; |
7049 | else | 7142 | else |
7050 | CAM_INVALIDATE(config->config_table[0]); | 7143 | CAM_INVALIDATE(config->config_table[0]); |
7051 | config->config_table[0].target_table_entry.clients_bit_vector = | 7144 | config->config_table[0].target_table_entry.clients_bit_vector = |
7052 | cpu_to_le32(1 << BP_L_ID(bp)); | 7145 | cpu_to_le32(cl_bit_vec); |
7053 | config->config_table[0].target_table_entry.vlan_id = 0; | 7146 | config->config_table[0].target_table_entry.vlan_id = 0; |
7054 | 7147 | ||
7055 | DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x)\n", | 7148 | DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x)\n", |
@@ -7059,47 +7152,58 @@ static void bnx2x_set_mac_addr_e1(struct bnx2x *bp, int set) | |||
7059 | config->config_table[0].cam_entry.lsb_mac_addr); | 7152 | config->config_table[0].cam_entry.lsb_mac_addr); |
7060 | 7153 | ||
7061 | /* broadcast */ | 7154 | /* broadcast */ |
7062 | config->config_table[1].cam_entry.msb_mac_addr = cpu_to_le16(0xffff); | 7155 | if (with_bcast) { |
7063 | config->config_table[1].cam_entry.middle_mac_addr = cpu_to_le16(0xffff); | 7156 | config->config_table[1].cam_entry.msb_mac_addr = |
7064 | config->config_table[1].cam_entry.lsb_mac_addr = cpu_to_le16(0xffff); | 7157 | cpu_to_le16(0xffff); |
7065 | config->config_table[1].cam_entry.flags = cpu_to_le16(port); | 7158 | config->config_table[1].cam_entry.middle_mac_addr = |
7066 | if (set) | 7159 | cpu_to_le16(0xffff); |
7067 | config->config_table[1].target_table_entry.flags = | 7160 | config->config_table[1].cam_entry.lsb_mac_addr = |
7068 | TSTORM_CAM_TARGET_TABLE_ENTRY_BROADCAST; | 7161 | cpu_to_le16(0xffff); |
7069 | else | 7162 | config->config_table[1].cam_entry.flags = cpu_to_le16(port); |
7070 | CAM_INVALIDATE(config->config_table[1]); | 7163 | if (set) |
7071 | config->config_table[1].target_table_entry.clients_bit_vector = | 7164 | config->config_table[1].target_table_entry.flags = |
7072 | cpu_to_le32(1 << BP_L_ID(bp)); | 7165 | TSTORM_CAM_TARGET_TABLE_ENTRY_BROADCAST; |
7073 | config->config_table[1].target_table_entry.vlan_id = 0; | 7166 | else |
7167 | CAM_INVALIDATE(config->config_table[1]); | ||
7168 | config->config_table[1].target_table_entry.clients_bit_vector = | ||
7169 | cpu_to_le32(cl_bit_vec); | ||
7170 | config->config_table[1].target_table_entry.vlan_id = 0; | ||
7171 | } | ||
7074 | 7172 | ||
7075 | bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0, | 7173 | bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0, |
7076 | U64_HI(bnx2x_sp_mapping(bp, mac_config)), | 7174 | U64_HI(bnx2x_sp_mapping(bp, mac_config)), |
7077 | U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0); | 7175 | U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0); |
7078 | } | 7176 | } |
7079 | 7177 | ||
7080 | static void bnx2x_set_mac_addr_e1h(struct bnx2x *bp, int set) | 7178 | /** |
7179 | * Sets a MAC in a CAM for a few L2 Clients for E1H chip | ||
7180 | * | ||
7181 | * @param bp driver descriptor | ||
7182 | * @param set set or clear an entry (1 or 0) | ||
7183 | * @param mac pointer to a buffer containing a MAC | ||
7184 | * @param cl_bit_vec bit vector of clients to register a MAC for | ||
7185 | * @param cam_offset offset in a CAM to use | ||
7186 | */ | ||
7187 | static void bnx2x_set_mac_addr_e1h_gen(struct bnx2x *bp, int set, u8 *mac, | ||
7188 | u32 cl_bit_vec, u8 cam_offset) | ||
7081 | { | 7189 | { |
7082 | struct mac_configuration_cmd_e1h *config = | 7190 | struct mac_configuration_cmd_e1h *config = |
7083 | (struct mac_configuration_cmd_e1h *)bnx2x_sp(bp, mac_config); | 7191 | (struct mac_configuration_cmd_e1h *)bnx2x_sp(bp, mac_config); |
7084 | 7192 | ||
7085 | /* CAM allocation for E1H | ||
7086 | * unicasts: by func number | ||
7087 | * multicast: 20+FUNC*20, 20 each | ||
7088 | */ | ||
7089 | config->hdr.length = 1; | 7193 | config->hdr.length = 1; |
7090 | config->hdr.offset = BP_FUNC(bp); | 7194 | config->hdr.offset = cam_offset; |
7091 | config->hdr.client_id = bp->fp->cl_id; | 7195 | config->hdr.client_id = 0xff; |
7092 | config->hdr.reserved1 = 0; | 7196 | config->hdr.reserved1 = 0; |
7093 | 7197 | ||
7094 | /* primary MAC */ | 7198 | /* primary MAC */ |
7095 | config->config_table[0].msb_mac_addr = | 7199 | config->config_table[0].msb_mac_addr = |
7096 | swab16(*(u16 *)&bp->dev->dev_addr[0]); | 7200 | swab16(*(u16 *)&mac[0]); |
7097 | config->config_table[0].middle_mac_addr = | 7201 | config->config_table[0].middle_mac_addr = |
7098 | swab16(*(u16 *)&bp->dev->dev_addr[2]); | 7202 | swab16(*(u16 *)&mac[2]); |
7099 | config->config_table[0].lsb_mac_addr = | 7203 | config->config_table[0].lsb_mac_addr = |
7100 | swab16(*(u16 *)&bp->dev->dev_addr[4]); | 7204 | swab16(*(u16 *)&mac[4]); |
7101 | config->config_table[0].clients_bit_vector = | 7205 | config->config_table[0].clients_bit_vector = |
7102 | cpu_to_le32(1 << BP_L_ID(bp)); | 7206 | cpu_to_le32(cl_bit_vec); |
7103 | config->config_table[0].vlan_id = 0; | 7207 | config->config_table[0].vlan_id = 0; |
7104 | config->config_table[0].e1hov_id = cpu_to_le16(bp->e1hov); | 7208 | config->config_table[0].e1hov_id = cpu_to_le16(bp->e1hov); |
7105 | if (set) | 7209 | if (set) |
@@ -7108,11 +7212,11 @@ static void bnx2x_set_mac_addr_e1h(struct bnx2x *bp, int set) | |||
7108 | config->config_table[0].flags = | 7212 | config->config_table[0].flags = |
7109 | MAC_CONFIGURATION_ENTRY_E1H_ACTION_TYPE; | 7213 | MAC_CONFIGURATION_ENTRY_E1H_ACTION_TYPE; |
7110 | 7214 | ||
7111 | DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x) E1HOV %d CLID %d\n", | 7215 | DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x) E1HOV %d CLID mask %d\n", |
7112 | (set ? "setting" : "clearing"), | 7216 | (set ? "setting" : "clearing"), |
7113 | config->config_table[0].msb_mac_addr, | 7217 | config->config_table[0].msb_mac_addr, |
7114 | config->config_table[0].middle_mac_addr, | 7218 | config->config_table[0].middle_mac_addr, |
7115 | config->config_table[0].lsb_mac_addr, bp->e1hov, BP_L_ID(bp)); | 7219 | config->config_table[0].lsb_mac_addr, bp->e1hov, cl_bit_vec); |
7116 | 7220 | ||
7117 | bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0, | 7221 | bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0, |
7118 | U64_HI(bnx2x_sp_mapping(bp, mac_config)), | 7222 | U64_HI(bnx2x_sp_mapping(bp, mac_config)), |
@@ -7164,6 +7268,69 @@ static int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx, | |||
7164 | return -EBUSY; | 7268 | return -EBUSY; |
7165 | } | 7269 | } |
7166 | 7270 | ||
7271 | static void bnx2x_set_eth_mac_addr_e1h(struct bnx2x *bp, int set) | ||
7272 | { | ||
7273 | bp->set_mac_pending++; | ||
7274 | smp_wmb(); | ||
7275 | |||
7276 | bnx2x_set_mac_addr_e1h_gen(bp, set, bp->dev->dev_addr, | ||
7277 | (1 << bp->fp->cl_id), BP_FUNC(bp)); | ||
7278 | |||
7279 | /* Wait for a completion */ | ||
7280 | bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending, set ? 0 : 1); | ||
7281 | } | ||
7282 | |||
7283 | static void bnx2x_set_eth_mac_addr_e1(struct bnx2x *bp, int set) | ||
7284 | { | ||
7285 | bp->set_mac_pending++; | ||
7286 | smp_wmb(); | ||
7287 | |||
7288 | bnx2x_set_mac_addr_e1_gen(bp, set, bp->dev->dev_addr, | ||
7289 | (1 << bp->fp->cl_id), (BP_PORT(bp) ? 32 : 0), | ||
7290 | 1); | ||
7291 | |||
7292 | /* Wait for a completion */ | ||
7293 | bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending, set ? 0 : 1); | ||
7294 | } | ||
7295 | |||
7296 | #ifdef BCM_CNIC | ||
7297 | /** | ||
7298 | * Set iSCSI MAC(s) at the next enties in the CAM after the ETH | ||
7299 | * MAC(s). This function will wait until the ramdord completion | ||
7300 | * returns. | ||
7301 | * | ||
7302 | * @param bp driver handle | ||
7303 | * @param set set or clear the CAM entry | ||
7304 | * | ||
7305 | * @return 0 if cussess, -ENODEV if ramrod doesn't return. | ||
7306 | */ | ||
7307 | static int bnx2x_set_iscsi_eth_mac_addr(struct bnx2x *bp, int set) | ||
7308 | { | ||
7309 | u32 cl_bit_vec = (1 << BCM_ISCSI_ETH_CL_ID); | ||
7310 | |||
7311 | bp->set_mac_pending++; | ||
7312 | smp_wmb(); | ||
7313 | |||
7314 | /* Send a SET_MAC ramrod */ | ||
7315 | if (CHIP_IS_E1(bp)) | ||
7316 | bnx2x_set_mac_addr_e1_gen(bp, set, bp->iscsi_mac, | ||
7317 | cl_bit_vec, (BP_PORT(bp) ? 32 : 0) + 2, | ||
7318 | 1); | ||
7319 | else | ||
7320 | /* CAM allocation for E1H | ||
7321 | * unicasts: by func number | ||
7322 | * multicast: 20+FUNC*20, 20 each | ||
7323 | */ | ||
7324 | bnx2x_set_mac_addr_e1h_gen(bp, set, bp->iscsi_mac, | ||
7325 | cl_bit_vec, E1H_FUNC_MAX + BP_FUNC(bp)); | ||
7326 | |||
7327 | /* Wait for a completion when setting */ | ||
7328 | bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending, set ? 0 : 1); | ||
7329 | |||
7330 | return 0; | ||
7331 | } | ||
7332 | #endif | ||
7333 | |||
7167 | static int bnx2x_setup_leading(struct bnx2x *bp) | 7334 | static int bnx2x_setup_leading(struct bnx2x *bp) |
7168 | { | 7335 | { |
7169 | int rc; | 7336 | int rc; |
@@ -7199,96 +7366,67 @@ static int bnx2x_setup_multi(struct bnx2x *bp, int index) | |||
7199 | 7366 | ||
7200 | static int bnx2x_poll(struct napi_struct *napi, int budget); | 7367 | static int bnx2x_poll(struct napi_struct *napi, int budget); |
7201 | 7368 | ||
7202 | static void bnx2x_set_int_mode_msix(struct bnx2x *bp, int *num_rx_queues_out, | 7369 | static void bnx2x_set_num_queues_msix(struct bnx2x *bp) |
7203 | int *num_tx_queues_out) | ||
7204 | { | 7370 | { |
7205 | int _num_rx_queues = 0, _num_tx_queues = 0; | ||
7206 | 7371 | ||
7207 | switch (bp->multi_mode) { | 7372 | switch (bp->multi_mode) { |
7208 | case ETH_RSS_MODE_DISABLED: | 7373 | case ETH_RSS_MODE_DISABLED: |
7209 | _num_rx_queues = 1; | 7374 | bp->num_queues = 1; |
7210 | _num_tx_queues = 1; | ||
7211 | break; | 7375 | break; |
7212 | 7376 | ||
7213 | case ETH_RSS_MODE_REGULAR: | 7377 | case ETH_RSS_MODE_REGULAR: |
7214 | if (num_rx_queues) | 7378 | if (num_queues) |
7215 | _num_rx_queues = min_t(u32, num_rx_queues, | 7379 | bp->num_queues = min_t(u32, num_queues, |
7216 | BNX2X_MAX_QUEUES(bp)); | 7380 | BNX2X_MAX_QUEUES(bp)); |
7217 | else | 7381 | else |
7218 | _num_rx_queues = min_t(u32, num_online_cpus(), | 7382 | bp->num_queues = min_t(u32, num_online_cpus(), |
7219 | BNX2X_MAX_QUEUES(bp)); | 7383 | BNX2X_MAX_QUEUES(bp)); |
7220 | |||
7221 | if (num_tx_queues) | ||
7222 | _num_tx_queues = min_t(u32, num_tx_queues, | ||
7223 | BNX2X_MAX_QUEUES(bp)); | ||
7224 | else | ||
7225 | _num_tx_queues = min_t(u32, num_online_cpus(), | ||
7226 | BNX2X_MAX_QUEUES(bp)); | ||
7227 | |||
7228 | /* There must be not more Tx queues than Rx queues */ | ||
7229 | if (_num_tx_queues > _num_rx_queues) { | ||
7230 | BNX2X_ERR("number of tx queues (%d) > " | ||
7231 | "number of rx queues (%d)" | ||
7232 | " defaulting to %d\n", | ||
7233 | _num_tx_queues, _num_rx_queues, | ||
7234 | _num_rx_queues); | ||
7235 | _num_tx_queues = _num_rx_queues; | ||
7236 | } | ||
7237 | break; | 7384 | break; |
7238 | 7385 | ||
7239 | 7386 | ||
7240 | default: | 7387 | default: |
7241 | _num_rx_queues = 1; | 7388 | bp->num_queues = 1; |
7242 | _num_tx_queues = 1; | ||
7243 | break; | 7389 | break; |
7244 | } | 7390 | } |
7245 | |||
7246 | *num_rx_queues_out = _num_rx_queues; | ||
7247 | *num_tx_queues_out = _num_tx_queues; | ||
7248 | } | 7391 | } |
7249 | 7392 | ||
7250 | static int bnx2x_set_int_mode(struct bnx2x *bp) | 7393 | static int bnx2x_set_num_queues(struct bnx2x *bp) |
7251 | { | 7394 | { |
7252 | int rc = 0; | 7395 | int rc = 0; |
7253 | 7396 | ||
7254 | switch (int_mode) { | 7397 | switch (int_mode) { |
7255 | case INT_MODE_INTx: | 7398 | case INT_MODE_INTx: |
7256 | case INT_MODE_MSI: | 7399 | case INT_MODE_MSI: |
7257 | bp->num_rx_queues = 1; | 7400 | bp->num_queues = 1; |
7258 | bp->num_tx_queues = 1; | ||
7259 | DP(NETIF_MSG_IFUP, "set number of queues to 1\n"); | 7401 | DP(NETIF_MSG_IFUP, "set number of queues to 1\n"); |
7260 | break; | 7402 | break; |
7261 | 7403 | ||
7262 | case INT_MODE_MSIX: | 7404 | case INT_MODE_MSIX: |
7263 | default: | 7405 | default: |
7264 | /* Set interrupt mode according to bp->multi_mode value */ | 7406 | /* Set number of queues according to bp->multi_mode value */ |
7265 | bnx2x_set_int_mode_msix(bp, &bp->num_rx_queues, | 7407 | bnx2x_set_num_queues_msix(bp); |
7266 | &bp->num_tx_queues); | ||
7267 | 7408 | ||
7268 | DP(NETIF_MSG_IFUP, "set number of queues to: rx %d tx %d\n", | 7409 | DP(NETIF_MSG_IFUP, "set number of queues to %d\n", |
7269 | bp->num_rx_queues, bp->num_tx_queues); | 7410 | bp->num_queues); |
7270 | 7411 | ||
7271 | /* if we can't use MSI-X we only need one fp, | 7412 | /* if we can't use MSI-X we only need one fp, |
7272 | * so try to enable MSI-X with the requested number of fp's | 7413 | * so try to enable MSI-X with the requested number of fp's |
7273 | * and fallback to MSI or legacy INTx with one fp | 7414 | * and fallback to MSI or legacy INTx with one fp |
7274 | */ | 7415 | */ |
7275 | rc = bnx2x_enable_msix(bp); | 7416 | rc = bnx2x_enable_msix(bp); |
7276 | if (rc) { | 7417 | if (rc) |
7277 | /* failed to enable MSI-X */ | 7418 | /* failed to enable MSI-X */ |
7278 | if (bp->multi_mode) | 7419 | bp->num_queues = 1; |
7279 | BNX2X_ERR("Multi requested but failed to " | ||
7280 | "enable MSI-X (rx %d tx %d), " | ||
7281 | "set number of queues to 1\n", | ||
7282 | bp->num_rx_queues, bp->num_tx_queues); | ||
7283 | bp->num_rx_queues = 1; | ||
7284 | bp->num_tx_queues = 1; | ||
7285 | } | ||
7286 | break; | 7420 | break; |
7287 | } | 7421 | } |
7288 | bp->dev->real_num_tx_queues = bp->num_tx_queues; | 7422 | bp->dev->real_num_tx_queues = bp->num_queues; |
7289 | return rc; | 7423 | return rc; |
7290 | } | 7424 | } |
7291 | 7425 | ||
7426 | #ifdef BCM_CNIC | ||
7427 | static int bnx2x_cnic_notify(struct bnx2x *bp, int cmd); | ||
7428 | static void bnx2x_setup_cnic_irq_info(struct bnx2x *bp); | ||
7429 | #endif | ||
7292 | 7430 | ||
7293 | /* must be called with rtnl_lock */ | 7431 | /* must be called with rtnl_lock */ |
7294 | static int bnx2x_nic_load(struct bnx2x *bp, int load_mode) | 7432 | static int bnx2x_nic_load(struct bnx2x *bp, int load_mode) |
@@ -7303,16 +7441,16 @@ static int bnx2x_nic_load(struct bnx2x *bp, int load_mode) | |||
7303 | 7441 | ||
7304 | bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD; | 7442 | bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD; |
7305 | 7443 | ||
7306 | rc = bnx2x_set_int_mode(bp); | 7444 | rc = bnx2x_set_num_queues(bp); |
7307 | 7445 | ||
7308 | if (bnx2x_alloc_mem(bp)) | 7446 | if (bnx2x_alloc_mem(bp)) |
7309 | return -ENOMEM; | 7447 | return -ENOMEM; |
7310 | 7448 | ||
7311 | for_each_rx_queue(bp, i) | 7449 | for_each_queue(bp, i) |
7312 | bnx2x_fp(bp, i, disable_tpa) = | 7450 | bnx2x_fp(bp, i, disable_tpa) = |
7313 | ((bp->flags & TPA_ENABLE_FLAG) == 0); | 7451 | ((bp->flags & TPA_ENABLE_FLAG) == 0); |
7314 | 7452 | ||
7315 | for_each_rx_queue(bp, i) | 7453 | for_each_queue(bp, i) |
7316 | netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi), | 7454 | netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi), |
7317 | bnx2x_poll, 128); | 7455 | bnx2x_poll, 128); |
7318 | 7456 | ||
@@ -7326,7 +7464,7 @@ static int bnx2x_nic_load(struct bnx2x *bp, int load_mode) | |||
7326 | } | 7464 | } |
7327 | } else { | 7465 | } else { |
7328 | /* Fall to INTx if failed to enable MSI-X due to lack of | 7466 | /* Fall to INTx if failed to enable MSI-X due to lack of |
7329 | memory (in bnx2x_set_int_mode()) */ | 7467 | memory (in bnx2x_set_num_queues()) */ |
7330 | if ((rc != -ENOMEM) && (int_mode != INT_MODE_INTx)) | 7468 | if ((rc != -ENOMEM) && (int_mode != INT_MODE_INTx)) |
7331 | bnx2x_enable_msi(bp); | 7469 | bnx2x_enable_msi(bp); |
7332 | bnx2x_ack_int(bp); | 7470 | bnx2x_ack_int(bp); |
@@ -7427,20 +7565,37 @@ static int bnx2x_nic_load(struct bnx2x *bp, int load_mode) | |||
7427 | if (CHIP_IS_E1H(bp)) | 7565 | if (CHIP_IS_E1H(bp)) |
7428 | if (bp->mf_config & FUNC_MF_CFG_FUNC_DISABLED) { | 7566 | if (bp->mf_config & FUNC_MF_CFG_FUNC_DISABLED) { |
7429 | DP(NETIF_MSG_IFUP, "mf_cfg function disabled\n"); | 7567 | DP(NETIF_MSG_IFUP, "mf_cfg function disabled\n"); |
7430 | bp->state = BNX2X_STATE_DISABLED; | 7568 | bp->flags |= MF_FUNC_DIS; |
7431 | } | 7569 | } |
7432 | 7570 | ||
7433 | if (bp->state == BNX2X_STATE_OPEN) { | 7571 | if (bp->state == BNX2X_STATE_OPEN) { |
7572 | #ifdef BCM_CNIC | ||
7573 | /* Enable Timer scan */ | ||
7574 | REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + BP_PORT(bp)*4, 1); | ||
7575 | #endif | ||
7434 | for_each_nondefault_queue(bp, i) { | 7576 | for_each_nondefault_queue(bp, i) { |
7435 | rc = bnx2x_setup_multi(bp, i); | 7577 | rc = bnx2x_setup_multi(bp, i); |
7436 | if (rc) | 7578 | if (rc) |
7579 | #ifdef BCM_CNIC | ||
7580 | goto load_error4; | ||
7581 | #else | ||
7437 | goto load_error3; | 7582 | goto load_error3; |
7583 | #endif | ||
7438 | } | 7584 | } |
7439 | 7585 | ||
7440 | if (CHIP_IS_E1(bp)) | 7586 | if (CHIP_IS_E1(bp)) |
7441 | bnx2x_set_mac_addr_e1(bp, 1); | 7587 | bnx2x_set_eth_mac_addr_e1(bp, 1); |
7442 | else | 7588 | else |
7443 | bnx2x_set_mac_addr_e1h(bp, 1); | 7589 | bnx2x_set_eth_mac_addr_e1h(bp, 1); |
7590 | #ifdef BCM_CNIC | ||
7591 | /* Set iSCSI L2 MAC */ | ||
7592 | mutex_lock(&bp->cnic_mutex); | ||
7593 | if (bp->cnic_eth_dev.drv_state & CNIC_DRV_STATE_REGD) { | ||
7594 | bnx2x_set_iscsi_eth_mac_addr(bp, 1); | ||
7595 | bp->cnic_flags |= BNX2X_CNIC_FLAG_MAC_SET; | ||
7596 | } | ||
7597 | mutex_unlock(&bp->cnic_mutex); | ||
7598 | #endif | ||
7444 | } | 7599 | } |
7445 | 7600 | ||
7446 | if (bp->port.pmf) | 7601 | if (bp->port.pmf) |
@@ -7481,9 +7636,19 @@ static int bnx2x_nic_load(struct bnx2x *bp, int load_mode) | |||
7481 | /* start the timer */ | 7636 | /* start the timer */ |
7482 | mod_timer(&bp->timer, jiffies + bp->current_interval); | 7637 | mod_timer(&bp->timer, jiffies + bp->current_interval); |
7483 | 7638 | ||
7639 | #ifdef BCM_CNIC | ||
7640 | bnx2x_setup_cnic_irq_info(bp); | ||
7641 | if (bp->state == BNX2X_STATE_OPEN) | ||
7642 | bnx2x_cnic_notify(bp, CNIC_CTL_START_CMD); | ||
7643 | #endif | ||
7484 | 7644 | ||
7485 | return 0; | 7645 | return 0; |
7486 | 7646 | ||
7647 | #ifdef BCM_CNIC | ||
7648 | load_error4: | ||
7649 | /* Disable Timer scan */ | ||
7650 | REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + BP_PORT(bp)*4, 0); | ||
7651 | #endif | ||
7487 | load_error3: | 7652 | load_error3: |
7488 | bnx2x_int_disable_sync(bp, 1); | 7653 | bnx2x_int_disable_sync(bp, 1); |
7489 | if (!BP_NOMCP(bp)) { | 7654 | if (!BP_NOMCP(bp)) { |
@@ -7493,14 +7658,14 @@ load_error3: | |||
7493 | bp->port.pmf = 0; | 7658 | bp->port.pmf = 0; |
7494 | /* Free SKBs, SGEs, TPA pool and driver internals */ | 7659 | /* Free SKBs, SGEs, TPA pool and driver internals */ |
7495 | bnx2x_free_skbs(bp); | 7660 | bnx2x_free_skbs(bp); |
7496 | for_each_rx_queue(bp, i) | 7661 | for_each_queue(bp, i) |
7497 | bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE); | 7662 | bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE); |
7498 | load_error2: | 7663 | load_error2: |
7499 | /* Release IRQs */ | 7664 | /* Release IRQs */ |
7500 | bnx2x_free_irq(bp); | 7665 | bnx2x_free_irq(bp); |
7501 | load_error1: | 7666 | load_error1: |
7502 | bnx2x_napi_disable(bp); | 7667 | bnx2x_napi_disable(bp); |
7503 | for_each_rx_queue(bp, i) | 7668 | for_each_queue(bp, i) |
7504 | netif_napi_del(&bnx2x_fp(bp, i, napi)); | 7669 | netif_napi_del(&bnx2x_fp(bp, i, napi)); |
7505 | bnx2x_free_mem(bp); | 7670 | bnx2x_free_mem(bp); |
7506 | 7671 | ||
@@ -7591,6 +7756,19 @@ static void bnx2x_reset_func(struct bnx2x *bp) | |||
7591 | REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0); | 7756 | REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0); |
7592 | REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0); | 7757 | REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0); |
7593 | 7758 | ||
7759 | #ifdef BCM_CNIC | ||
7760 | /* Disable Timer scan */ | ||
7761 | REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 0); | ||
7762 | /* | ||
7763 | * Wait for at least 10ms and up to 2 second for the timers scan to | ||
7764 | * complete | ||
7765 | */ | ||
7766 | for (i = 0; i < 200; i++) { | ||
7767 | msleep(10); | ||
7768 | if (!REG_RD(bp, TM_REG_LIN0_SCAN_ON + port*4)) | ||
7769 | break; | ||
7770 | } | ||
7771 | #endif | ||
7594 | /* Clear ILT */ | 7772 | /* Clear ILT */ |
7595 | base = FUNC_ILT_BASE(func); | 7773 | base = FUNC_ILT_BASE(func); |
7596 | for (i = base; i < base + ILT_PER_FUNC; i++) | 7774 | for (i = base; i < base + ILT_PER_FUNC; i++) |
@@ -7657,6 +7835,9 @@ static int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode) | |||
7657 | u32 reset_code = 0; | 7835 | u32 reset_code = 0; |
7658 | int i, cnt, rc; | 7836 | int i, cnt, rc; |
7659 | 7837 | ||
7838 | #ifdef BCM_CNIC | ||
7839 | bnx2x_cnic_notify(bp, CNIC_CTL_STOP_CMD); | ||
7840 | #endif | ||
7660 | bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT; | 7841 | bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT; |
7661 | 7842 | ||
7662 | /* Set "drop all" */ | 7843 | /* Set "drop all" */ |
@@ -7675,7 +7856,7 @@ static int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode) | |||
7675 | bnx2x_free_irq(bp); | 7856 | bnx2x_free_irq(bp); |
7676 | 7857 | ||
7677 | /* Wait until tx fastpath tasks complete */ | 7858 | /* Wait until tx fastpath tasks complete */ |
7678 | for_each_tx_queue(bp, i) { | 7859 | for_each_queue(bp, i) { |
7679 | struct bnx2x_fastpath *fp = &bp->fp[i]; | 7860 | struct bnx2x_fastpath *fp = &bp->fp[i]; |
7680 | 7861 | ||
7681 | cnt = 1000; | 7862 | cnt = 1000; |
@@ -7703,7 +7884,7 @@ static int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode) | |||
7703 | struct mac_configuration_cmd *config = | 7884 | struct mac_configuration_cmd *config = |
7704 | bnx2x_sp(bp, mcast_config); | 7885 | bnx2x_sp(bp, mcast_config); |
7705 | 7886 | ||
7706 | bnx2x_set_mac_addr_e1(bp, 0); | 7887 | bnx2x_set_eth_mac_addr_e1(bp, 0); |
7707 | 7888 | ||
7708 | for (i = 0; i < config->hdr.length; i++) | 7889 | for (i = 0; i < config->hdr.length; i++) |
7709 | CAM_INVALIDATE(config->config_table[i]); | 7890 | CAM_INVALIDATE(config->config_table[i]); |
@@ -7716,6 +7897,9 @@ static int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode) | |||
7716 | config->hdr.client_id = bp->fp->cl_id; | 7897 | config->hdr.client_id = bp->fp->cl_id; |
7717 | config->hdr.reserved1 = 0; | 7898 | config->hdr.reserved1 = 0; |
7718 | 7899 | ||
7900 | bp->set_mac_pending++; | ||
7901 | smp_wmb(); | ||
7902 | |||
7719 | bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0, | 7903 | bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0, |
7720 | U64_HI(bnx2x_sp_mapping(bp, mcast_config)), | 7904 | U64_HI(bnx2x_sp_mapping(bp, mcast_config)), |
7721 | U64_LO(bnx2x_sp_mapping(bp, mcast_config)), 0); | 7905 | U64_LO(bnx2x_sp_mapping(bp, mcast_config)), 0); |
@@ -7723,13 +7907,22 @@ static int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode) | |||
7723 | } else { /* E1H */ | 7907 | } else { /* E1H */ |
7724 | REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0); | 7908 | REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0); |
7725 | 7909 | ||
7726 | bnx2x_set_mac_addr_e1h(bp, 0); | 7910 | bnx2x_set_eth_mac_addr_e1h(bp, 0); |
7727 | 7911 | ||
7728 | for (i = 0; i < MC_HASH_SIZE; i++) | 7912 | for (i = 0; i < MC_HASH_SIZE; i++) |
7729 | REG_WR(bp, MC_HASH_OFFSET(bp, i), 0); | 7913 | REG_WR(bp, MC_HASH_OFFSET(bp, i), 0); |
7730 | 7914 | ||
7731 | REG_WR(bp, MISC_REG_E1HMF_MODE, 0); | 7915 | REG_WR(bp, MISC_REG_E1HMF_MODE, 0); |
7732 | } | 7916 | } |
7917 | #ifdef BCM_CNIC | ||
7918 | /* Clear iSCSI L2 MAC */ | ||
7919 | mutex_lock(&bp->cnic_mutex); | ||
7920 | if (bp->cnic_flags & BNX2X_CNIC_FLAG_MAC_SET) { | ||
7921 | bnx2x_set_iscsi_eth_mac_addr(bp, 0); | ||
7922 | bp->cnic_flags &= ~BNX2X_CNIC_FLAG_MAC_SET; | ||
7923 | } | ||
7924 | mutex_unlock(&bp->cnic_mutex); | ||
7925 | #endif | ||
7733 | 7926 | ||
7734 | if (unload_mode == UNLOAD_NORMAL) | 7927 | if (unload_mode == UNLOAD_NORMAL) |
7735 | reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS; | 7928 | reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS; |
@@ -7806,9 +7999,9 @@ unload_error: | |||
7806 | 7999 | ||
7807 | /* Free SKBs, SGEs, TPA pool and driver internals */ | 8000 | /* Free SKBs, SGEs, TPA pool and driver internals */ |
7808 | bnx2x_free_skbs(bp); | 8001 | bnx2x_free_skbs(bp); |
7809 | for_each_rx_queue(bp, i) | 8002 | for_each_queue(bp, i) |
7810 | bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE); | 8003 | bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE); |
7811 | for_each_rx_queue(bp, i) | 8004 | for_each_queue(bp, i) |
7812 | netif_napi_del(&bnx2x_fp(bp, i, napi)); | 8005 | netif_napi_del(&bnx2x_fp(bp, i, napi)); |
7813 | bnx2x_free_mem(bp); | 8006 | bnx2x_free_mem(bp); |
7814 | 8007 | ||
@@ -8506,6 +8699,14 @@ static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp) | |||
8506 | bp->link_params.req_flow_ctrl, bp->port.advertising); | 8699 | bp->link_params.req_flow_ctrl, bp->port.advertising); |
8507 | } | 8700 | } |
8508 | 8701 | ||
8702 | static void __devinit bnx2x_set_mac_buf(u8 *mac_buf, u32 mac_lo, u16 mac_hi) | ||
8703 | { | ||
8704 | mac_hi = cpu_to_be16(mac_hi); | ||
8705 | mac_lo = cpu_to_be32(mac_lo); | ||
8706 | memcpy(mac_buf, &mac_hi, sizeof(mac_hi)); | ||
8707 | memcpy(mac_buf + sizeof(mac_hi), &mac_lo, sizeof(mac_lo)); | ||
8708 | } | ||
8709 | |||
8509 | static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp) | 8710 | static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp) |
8510 | { | 8711 | { |
8511 | int port = BP_PORT(bp); | 8712 | int port = BP_PORT(bp); |
@@ -8587,14 +8788,15 @@ static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp) | |||
8587 | 8788 | ||
8588 | val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper); | 8789 | val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper); |
8589 | val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower); | 8790 | val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower); |
8590 | bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff); | 8791 | bnx2x_set_mac_buf(bp->dev->dev_addr, val, val2); |
8591 | bp->dev->dev_addr[1] = (u8)(val2 & 0xff); | ||
8592 | bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff); | ||
8593 | bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff); | ||
8594 | bp->dev->dev_addr[4] = (u8)(val >> 8 & 0xff); | ||
8595 | bp->dev->dev_addr[5] = (u8)(val & 0xff); | ||
8596 | memcpy(bp->link_params.mac_addr, bp->dev->dev_addr, ETH_ALEN); | 8792 | memcpy(bp->link_params.mac_addr, bp->dev->dev_addr, ETH_ALEN); |
8597 | memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN); | 8793 | memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN); |
8794 | |||
8795 | #ifdef BCM_CNIC | ||
8796 | val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].iscsi_mac_upper); | ||
8797 | val = SHMEM_RD(bp, dev_info.port_hw_config[port].iscsi_mac_lower); | ||
8798 | bnx2x_set_mac_buf(bp->iscsi_mac, val, val2); | ||
8799 | #endif | ||
8598 | } | 8800 | } |
8599 | 8801 | ||
8600 | static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp) | 8802 | static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp) |
@@ -8690,6 +8892,10 @@ static int __devinit bnx2x_init_bp(struct bnx2x *bp) | |||
8690 | smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */ | 8892 | smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */ |
8691 | 8893 | ||
8692 | mutex_init(&bp->port.phy_mutex); | 8894 | mutex_init(&bp->port.phy_mutex); |
8895 | mutex_init(&bp->fw_mb_mutex); | ||
8896 | #ifdef BCM_CNIC | ||
8897 | mutex_init(&bp->cnic_mutex); | ||
8898 | #endif | ||
8693 | 8899 | ||
8694 | INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task); | 8900 | INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task); |
8695 | INIT_WORK(&bp->reset_task, bnx2x_reset_task); | 8901 | INIT_WORK(&bp->reset_task, bnx2x_reset_task); |
@@ -8738,8 +8944,9 @@ static int __devinit bnx2x_init_bp(struct bnx2x *bp) | |||
8738 | 8944 | ||
8739 | bp->rx_csum = 1; | 8945 | bp->rx_csum = 1; |
8740 | 8946 | ||
8741 | bp->tx_ticks = 50; | 8947 | /* make sure that the numbers are in the right granularity */ |
8742 | bp->rx_ticks = 25; | 8948 | bp->tx_ticks = (50 / (4 * BNX2X_BTR)) * (4 * BNX2X_BTR); |
8949 | bp->rx_ticks = (25 / (4 * BNX2X_BTR)) * (4 * BNX2X_BTR); | ||
8743 | 8950 | ||
8744 | timer_interval = (CHIP_REV_IS_SLOW(bp) ? 5*HZ : HZ); | 8951 | timer_interval = (CHIP_REV_IS_SLOW(bp) ? 5*HZ : HZ); |
8745 | bp->current_interval = (poll ? poll : timer_interval); | 8952 | bp->current_interval = (poll ? poll : timer_interval); |
@@ -8765,20 +8972,23 @@ static int bnx2x_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) | |||
8765 | cmd->supported = bp->port.supported; | 8972 | cmd->supported = bp->port.supported; |
8766 | cmd->advertising = bp->port.advertising; | 8973 | cmd->advertising = bp->port.advertising; |
8767 | 8974 | ||
8768 | if (netif_carrier_ok(dev)) { | 8975 | if ((bp->state == BNX2X_STATE_OPEN) && |
8976 | !(bp->flags & MF_FUNC_DIS) && | ||
8977 | (bp->link_vars.link_up)) { | ||
8769 | cmd->speed = bp->link_vars.line_speed; | 8978 | cmd->speed = bp->link_vars.line_speed; |
8770 | cmd->duplex = bp->link_vars.duplex; | 8979 | cmd->duplex = bp->link_vars.duplex; |
8771 | } else { | 8980 | if (IS_E1HMF(bp)) { |
8772 | cmd->speed = bp->link_params.req_line_speed; | 8981 | u16 vn_max_rate; |
8773 | cmd->duplex = bp->link_params.req_duplex; | ||
8774 | } | ||
8775 | if (IS_E1HMF(bp)) { | ||
8776 | u16 vn_max_rate; | ||
8777 | 8982 | ||
8778 | vn_max_rate = ((bp->mf_config & FUNC_MF_CFG_MAX_BW_MASK) >> | 8983 | vn_max_rate = |
8984 | ((bp->mf_config & FUNC_MF_CFG_MAX_BW_MASK) >> | ||
8779 | FUNC_MF_CFG_MAX_BW_SHIFT) * 100; | 8985 | FUNC_MF_CFG_MAX_BW_SHIFT) * 100; |
8780 | if (vn_max_rate < cmd->speed) | 8986 | if (vn_max_rate < cmd->speed) |
8781 | cmd->speed = vn_max_rate; | 8987 | cmd->speed = vn_max_rate; |
8988 | } | ||
8989 | } else { | ||
8990 | cmd->speed = -1; | ||
8991 | cmd->duplex = -1; | ||
8782 | } | 8992 | } |
8783 | 8993 | ||
8784 | if (bp->link_params.switch_cfg == SWITCH_CFG_10G) { | 8994 | if (bp->link_params.switch_cfg == SWITCH_CFG_10G) { |
@@ -9163,6 +9373,9 @@ static u32 bnx2x_get_link(struct net_device *dev) | |||
9163 | { | 9373 | { |
9164 | struct bnx2x *bp = netdev_priv(dev); | 9374 | struct bnx2x *bp = netdev_priv(dev); |
9165 | 9375 | ||
9376 | if (bp->flags & MF_FUNC_DIS) | ||
9377 | return 0; | ||
9378 | |||
9166 | return bp->link_vars.link_up; | 9379 | return bp->link_vars.link_up; |
9167 | } | 9380 | } |
9168 | 9381 | ||
@@ -9567,8 +9780,7 @@ static int bnx2x_set_eeprom(struct net_device *dev, | |||
9567 | 9780 | ||
9568 | } else if (eeprom->magic == 0x50485952) { | 9781 | } else if (eeprom->magic == 0x50485952) { |
9569 | /* 'PHYR' (0x50485952): re-init link after FW upgrade */ | 9782 | /* 'PHYR' (0x50485952): re-init link after FW upgrade */ |
9570 | if ((bp->state == BNX2X_STATE_OPEN) || | 9783 | if (bp->state == BNX2X_STATE_OPEN) { |
9571 | (bp->state == BNX2X_STATE_DISABLED)) { | ||
9572 | bnx2x_acquire_phy_lock(bp); | 9784 | bnx2x_acquire_phy_lock(bp); |
9573 | rc |= bnx2x_link_reset(&bp->link_params, | 9785 | rc |= bnx2x_link_reset(&bp->link_params, |
9574 | &bp->link_vars, 1); | 9786 | &bp->link_vars, 1); |
@@ -9818,11 +10030,6 @@ static const struct { | |||
9818 | { "idle check (online)" } | 10030 | { "idle check (online)" } |
9819 | }; | 10031 | }; |
9820 | 10032 | ||
9821 | static int bnx2x_self_test_count(struct net_device *dev) | ||
9822 | { | ||
9823 | return BNX2X_NUM_TESTS; | ||
9824 | } | ||
9825 | |||
9826 | static int bnx2x_test_registers(struct bnx2x *bp) | 10033 | static int bnx2x_test_registers(struct bnx2x *bp) |
9827 | { | 10034 | { |
9828 | int idx, i, rc = -ENODEV; | 10035 | int idx, i, rc = -ENODEV; |
@@ -9990,7 +10197,7 @@ static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode, u8 link_up) | |||
9990 | struct sk_buff *skb; | 10197 | struct sk_buff *skb; |
9991 | unsigned char *packet; | 10198 | unsigned char *packet; |
9992 | struct bnx2x_fastpath *fp_rx = &bp->fp[0]; | 10199 | struct bnx2x_fastpath *fp_rx = &bp->fp[0]; |
9993 | struct bnx2x_fastpath *fp_tx = &bp->fp[bp->num_rx_queues]; | 10200 | struct bnx2x_fastpath *fp_tx = &bp->fp[0]; |
9994 | u16 tx_start_idx, tx_idx; | 10201 | u16 tx_start_idx, tx_idx; |
9995 | u16 rx_start_idx, rx_idx; | 10202 | u16 rx_start_idx, rx_idx; |
9996 | u16 pkt_prod, bd_prod; | 10203 | u16 pkt_prod, bd_prod; |
@@ -10067,13 +10274,12 @@ static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode, u8 link_up) | |||
10067 | 10274 | ||
10068 | fp_tx->tx_db.data.prod += 2; | 10275 | fp_tx->tx_db.data.prod += 2; |
10069 | barrier(); | 10276 | barrier(); |
10070 | DOORBELL(bp, fp_tx->index - bp->num_rx_queues, fp_tx->tx_db.raw); | 10277 | DOORBELL(bp, fp_tx->index, fp_tx->tx_db.raw); |
10071 | 10278 | ||
10072 | mmiowb(); | 10279 | mmiowb(); |
10073 | 10280 | ||
10074 | num_pkts++; | 10281 | num_pkts++; |
10075 | fp_tx->tx_bd_prod += 2; /* start + pbd */ | 10282 | fp_tx->tx_bd_prod += 2; /* start + pbd */ |
10076 | bp->dev->trans_start = jiffies; | ||
10077 | 10283 | ||
10078 | udelay(100); | 10284 | udelay(100); |
10079 | 10285 | ||
@@ -10223,14 +10429,16 @@ static int bnx2x_test_intr(struct bnx2x *bp) | |||
10223 | config->hdr.client_id = bp->fp->cl_id; | 10429 | config->hdr.client_id = bp->fp->cl_id; |
10224 | config->hdr.reserved1 = 0; | 10430 | config->hdr.reserved1 = 0; |
10225 | 10431 | ||
10432 | bp->set_mac_pending++; | ||
10433 | smp_wmb(); | ||
10226 | rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0, | 10434 | rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0, |
10227 | U64_HI(bnx2x_sp_mapping(bp, mac_config)), | 10435 | U64_HI(bnx2x_sp_mapping(bp, mac_config)), |
10228 | U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0); | 10436 | U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0); |
10229 | if (rc == 0) { | 10437 | if (rc == 0) { |
10230 | bp->set_mac_pending++; | ||
10231 | for (i = 0; i < 10; i++) { | 10438 | for (i = 0; i < 10; i++) { |
10232 | if (!bp->set_mac_pending) | 10439 | if (!bp->set_mac_pending) |
10233 | break; | 10440 | break; |
10441 | smp_rmb(); | ||
10234 | msleep_interruptible(10); | 10442 | msleep_interruptible(10); |
10235 | } | 10443 | } |
10236 | if (i == 10) | 10444 | if (i == 10) |
@@ -10264,7 +10472,7 @@ static void bnx2x_self_test(struct net_device *dev, | |||
10264 | /* disable input for TX port IF */ | 10472 | /* disable input for TX port IF */ |
10265 | REG_WR(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4, 0); | 10473 | REG_WR(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4, 0); |
10266 | 10474 | ||
10267 | link_up = bp->link_vars.link_up; | 10475 | link_up = (bnx2x_link_test(bp) == 0); |
10268 | bnx2x_nic_unload(bp, UNLOAD_NORMAL); | 10476 | bnx2x_nic_unload(bp, UNLOAD_NORMAL); |
10269 | bnx2x_nic_load(bp, LOAD_DIAG); | 10477 | bnx2x_nic_load(bp, LOAD_DIAG); |
10270 | /* wait until link state is restored */ | 10478 | /* wait until link state is restored */ |
@@ -10436,6 +10644,36 @@ static const struct { | |||
10436 | #define IS_E1HMF_MODE_STAT(bp) \ | 10644 | #define IS_E1HMF_MODE_STAT(bp) \ |
10437 | (IS_E1HMF(bp) && !(bp->msglevel & BNX2X_MSG_STATS)) | 10645 | (IS_E1HMF(bp) && !(bp->msglevel & BNX2X_MSG_STATS)) |
10438 | 10646 | ||
10647 | static int bnx2x_get_sset_count(struct net_device *dev, int stringset) | ||
10648 | { | ||
10649 | struct bnx2x *bp = netdev_priv(dev); | ||
10650 | int i, num_stats; | ||
10651 | |||
10652 | switch(stringset) { | ||
10653 | case ETH_SS_STATS: | ||
10654 | if (is_multi(bp)) { | ||
10655 | num_stats = BNX2X_NUM_Q_STATS * bp->num_queues; | ||
10656 | if (!IS_E1HMF_MODE_STAT(bp)) | ||
10657 | num_stats += BNX2X_NUM_STATS; | ||
10658 | } else { | ||
10659 | if (IS_E1HMF_MODE_STAT(bp)) { | ||
10660 | num_stats = 0; | ||
10661 | for (i = 0; i < BNX2X_NUM_STATS; i++) | ||
10662 | if (IS_FUNC_STAT(i)) | ||
10663 | num_stats++; | ||
10664 | } else | ||
10665 | num_stats = BNX2X_NUM_STATS; | ||
10666 | } | ||
10667 | return num_stats; | ||
10668 | |||
10669 | case ETH_SS_TEST: | ||
10670 | return BNX2X_NUM_TESTS; | ||
10671 | |||
10672 | default: | ||
10673 | return -EINVAL; | ||
10674 | } | ||
10675 | } | ||
10676 | |||
10439 | static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf) | 10677 | static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf) |
10440 | { | 10678 | { |
10441 | struct bnx2x *bp = netdev_priv(dev); | 10679 | struct bnx2x *bp = netdev_priv(dev); |
@@ -10445,7 +10683,7 @@ static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf) | |||
10445 | case ETH_SS_STATS: | 10683 | case ETH_SS_STATS: |
10446 | if (is_multi(bp)) { | 10684 | if (is_multi(bp)) { |
10447 | k = 0; | 10685 | k = 0; |
10448 | for_each_rx_queue(bp, i) { | 10686 | for_each_queue(bp, i) { |
10449 | for (j = 0; j < BNX2X_NUM_Q_STATS; j++) | 10687 | for (j = 0; j < BNX2X_NUM_Q_STATS; j++) |
10450 | sprintf(buf + (k + j)*ETH_GSTRING_LEN, | 10688 | sprintf(buf + (k + j)*ETH_GSTRING_LEN, |
10451 | bnx2x_q_stats_arr[j].string, i); | 10689 | bnx2x_q_stats_arr[j].string, i); |
@@ -10473,28 +10711,6 @@ static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf) | |||
10473 | } | 10711 | } |
10474 | } | 10712 | } |
10475 | 10713 | ||
10476 | static int bnx2x_get_stats_count(struct net_device *dev) | ||
10477 | { | ||
10478 | struct bnx2x *bp = netdev_priv(dev); | ||
10479 | int i, num_stats; | ||
10480 | |||
10481 | if (is_multi(bp)) { | ||
10482 | num_stats = BNX2X_NUM_Q_STATS * bp->num_rx_queues; | ||
10483 | if (!IS_E1HMF_MODE_STAT(bp)) | ||
10484 | num_stats += BNX2X_NUM_STATS; | ||
10485 | } else { | ||
10486 | if (IS_E1HMF_MODE_STAT(bp)) { | ||
10487 | num_stats = 0; | ||
10488 | for (i = 0; i < BNX2X_NUM_STATS; i++) | ||
10489 | if (IS_FUNC_STAT(i)) | ||
10490 | num_stats++; | ||
10491 | } else | ||
10492 | num_stats = BNX2X_NUM_STATS; | ||
10493 | } | ||
10494 | |||
10495 | return num_stats; | ||
10496 | } | ||
10497 | |||
10498 | static void bnx2x_get_ethtool_stats(struct net_device *dev, | 10714 | static void bnx2x_get_ethtool_stats(struct net_device *dev, |
10499 | struct ethtool_stats *stats, u64 *buf) | 10715 | struct ethtool_stats *stats, u64 *buf) |
10500 | { | 10716 | { |
@@ -10504,7 +10720,7 @@ static void bnx2x_get_ethtool_stats(struct net_device *dev, | |||
10504 | 10720 | ||
10505 | if (is_multi(bp)) { | 10721 | if (is_multi(bp)) { |
10506 | k = 0; | 10722 | k = 0; |
10507 | for_each_rx_queue(bp, i) { | 10723 | for_each_queue(bp, i) { |
10508 | hw_stats = (u32 *)&bp->fp[i].eth_q_stats; | 10724 | hw_stats = (u32 *)&bp->fp[i].eth_q_stats; |
10509 | for (j = 0; j < BNX2X_NUM_Q_STATS; j++) { | 10725 | for (j = 0; j < BNX2X_NUM_Q_STATS; j++) { |
10510 | if (bnx2x_q_stats_arr[j].size == 0) { | 10726 | if (bnx2x_q_stats_arr[j].size == 0) { |
@@ -10570,7 +10786,6 @@ static void bnx2x_get_ethtool_stats(struct net_device *dev, | |||
10570 | static int bnx2x_phys_id(struct net_device *dev, u32 data) | 10786 | static int bnx2x_phys_id(struct net_device *dev, u32 data) |
10571 | { | 10787 | { |
10572 | struct bnx2x *bp = netdev_priv(dev); | 10788 | struct bnx2x *bp = netdev_priv(dev); |
10573 | int port = BP_PORT(bp); | ||
10574 | int i; | 10789 | int i; |
10575 | 10790 | ||
10576 | if (!netif_running(dev)) | 10791 | if (!netif_running(dev)) |
@@ -10584,13 +10799,10 @@ static int bnx2x_phys_id(struct net_device *dev, u32 data) | |||
10584 | 10799 | ||
10585 | for (i = 0; i < (data * 2); i++) { | 10800 | for (i = 0; i < (data * 2); i++) { |
10586 | if ((i % 2) == 0) | 10801 | if ((i % 2) == 0) |
10587 | bnx2x_set_led(bp, port, LED_MODE_OPER, SPEED_1000, | 10802 | bnx2x_set_led(&bp->link_params, LED_MODE_OPER, |
10588 | bp->link_params.hw_led_mode, | 10803 | SPEED_1000); |
10589 | bp->link_params.chip_id); | ||
10590 | else | 10804 | else |
10591 | bnx2x_set_led(bp, port, LED_MODE_OFF, 0, | 10805 | bnx2x_set_led(&bp->link_params, LED_MODE_OFF, 0); |
10592 | bp->link_params.hw_led_mode, | ||
10593 | bp->link_params.chip_id); | ||
10594 | 10806 | ||
10595 | msleep_interruptible(500); | 10807 | msleep_interruptible(500); |
10596 | if (signal_pending(current)) | 10808 | if (signal_pending(current)) |
@@ -10598,10 +10810,8 @@ static int bnx2x_phys_id(struct net_device *dev, u32 data) | |||
10598 | } | 10810 | } |
10599 | 10811 | ||
10600 | if (bp->link_vars.link_up) | 10812 | if (bp->link_vars.link_up) |
10601 | bnx2x_set_led(bp, port, LED_MODE_OPER, | 10813 | bnx2x_set_led(&bp->link_params, LED_MODE_OPER, |
10602 | bp->link_vars.line_speed, | 10814 | bp->link_vars.line_speed); |
10603 | bp->link_params.hw_led_mode, | ||
10604 | bp->link_params.chip_id); | ||
10605 | 10815 | ||
10606 | return 0; | 10816 | return 0; |
10607 | } | 10817 | } |
@@ -10637,11 +10847,10 @@ static const struct ethtool_ops bnx2x_ethtool_ops = { | |||
10637 | .set_sg = ethtool_op_set_sg, | 10847 | .set_sg = ethtool_op_set_sg, |
10638 | .get_tso = ethtool_op_get_tso, | 10848 | .get_tso = ethtool_op_get_tso, |
10639 | .set_tso = bnx2x_set_tso, | 10849 | .set_tso = bnx2x_set_tso, |
10640 | .self_test_count = bnx2x_self_test_count, | ||
10641 | .self_test = bnx2x_self_test, | 10850 | .self_test = bnx2x_self_test, |
10851 | .get_sset_count = bnx2x_get_sset_count, | ||
10642 | .get_strings = bnx2x_get_strings, | 10852 | .get_strings = bnx2x_get_strings, |
10643 | .phys_id = bnx2x_phys_id, | 10853 | .phys_id = bnx2x_phys_id, |
10644 | .get_stats_count = bnx2x_get_stats_count, | ||
10645 | .get_ethtool_stats = bnx2x_get_ethtool_stats, | 10854 | .get_ethtool_stats = bnx2x_get_ethtool_stats, |
10646 | }; | 10855 | }; |
10647 | 10856 | ||
@@ -10707,54 +10916,60 @@ static inline int bnx2x_has_rx_work(struct bnx2x_fastpath *fp) | |||
10707 | 10916 | ||
10708 | static int bnx2x_poll(struct napi_struct *napi, int budget) | 10917 | static int bnx2x_poll(struct napi_struct *napi, int budget) |
10709 | { | 10918 | { |
10919 | int work_done = 0; | ||
10710 | struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath, | 10920 | struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath, |
10711 | napi); | 10921 | napi); |
10712 | struct bnx2x *bp = fp->bp; | 10922 | struct bnx2x *bp = fp->bp; |
10713 | int work_done = 0; | ||
10714 | 10923 | ||
10924 | while (1) { | ||
10715 | #ifdef BNX2X_STOP_ON_ERROR | 10925 | #ifdef BNX2X_STOP_ON_ERROR |
10716 | if (unlikely(bp->panic)) | 10926 | if (unlikely(bp->panic)) { |
10717 | goto poll_panic; | 10927 | napi_complete(napi); |
10928 | return 0; | ||
10929 | } | ||
10718 | #endif | 10930 | #endif |
10719 | 10931 | ||
10720 | prefetch(fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)].skb); | 10932 | if (bnx2x_has_tx_work(fp)) |
10721 | prefetch((char *)(fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)].skb) + 256); | 10933 | bnx2x_tx_int(fp); |
10722 | |||
10723 | bnx2x_update_fpsb_idx(fp); | ||
10724 | |||
10725 | if (bnx2x_has_rx_work(fp)) { | ||
10726 | work_done = bnx2x_rx_int(fp, budget); | ||
10727 | 10934 | ||
10728 | /* must not complete if we consumed full budget */ | 10935 | if (bnx2x_has_rx_work(fp)) { |
10729 | if (work_done >= budget) | 10936 | work_done += bnx2x_rx_int(fp, budget - work_done); |
10730 | goto poll_again; | ||
10731 | } | ||
10732 | 10937 | ||
10733 | /* bnx2x_has_rx_work() reads the status block, thus we need to | 10938 | /* must not complete if we consumed full budget */ |
10734 | * ensure that status block indices have been actually read | 10939 | if (work_done >= budget) |
10735 | * (bnx2x_update_fpsb_idx) prior to this check (bnx2x_has_rx_work) | 10940 | break; |
10736 | * so that we won't write the "newer" value of the status block to IGU | 10941 | } |
10737 | * (if there was a DMA right after bnx2x_has_rx_work and | ||
10738 | * if there is no rmb, the memory reading (bnx2x_update_fpsb_idx) | ||
10739 | * may be postponed to right before bnx2x_ack_sb). In this case | ||
10740 | * there will never be another interrupt until there is another update | ||
10741 | * of the status block, while there is still unhandled work. | ||
10742 | */ | ||
10743 | rmb(); | ||
10744 | 10942 | ||
10745 | if (!bnx2x_has_rx_work(fp)) { | 10943 | /* Fall out from the NAPI loop if needed */ |
10746 | #ifdef BNX2X_STOP_ON_ERROR | 10944 | if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) { |
10747 | poll_panic: | 10945 | bnx2x_update_fpsb_idx(fp); |
10748 | #endif | 10946 | /* bnx2x_has_rx_work() reads the status block, thus we need |
10749 | napi_complete(napi); | 10947 | * to ensure that status block indices have been actually read |
10948 | * (bnx2x_update_fpsb_idx) prior to this check | ||
10949 | * (bnx2x_has_rx_work) so that we won't write the "newer" | ||
10950 | * value of the status block to IGU (if there was a DMA right | ||
10951 | * after bnx2x_has_rx_work and if there is no rmb, the memory | ||
10952 | * reading (bnx2x_update_fpsb_idx) may be postponed to right | ||
10953 | * before bnx2x_ack_sb). In this case there will never be | ||
10954 | * another interrupt until there is another update of the | ||
10955 | * status block, while there is still unhandled work. | ||
10956 | */ | ||
10957 | rmb(); | ||
10750 | 10958 | ||
10751 | bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID, | 10959 | if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) { |
10752 | le16_to_cpu(fp->fp_u_idx), IGU_INT_NOP, 1); | 10960 | napi_complete(napi); |
10753 | bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID, | 10961 | /* Re-enable interrupts */ |
10754 | le16_to_cpu(fp->fp_c_idx), IGU_INT_ENABLE, 1); | 10962 | bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID, |
10963 | le16_to_cpu(fp->fp_c_idx), | ||
10964 | IGU_INT_NOP, 1); | ||
10965 | bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID, | ||
10966 | le16_to_cpu(fp->fp_u_idx), | ||
10967 | IGU_INT_ENABLE, 1); | ||
10968 | break; | ||
10969 | } | ||
10970 | } | ||
10755 | } | 10971 | } |
10756 | 10972 | ||
10757 | poll_again: | ||
10758 | return work_done; | 10973 | return work_done; |
10759 | } | 10974 | } |
10760 | 10975 | ||
@@ -10843,10 +11058,10 @@ static inline u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb) | |||
10843 | } | 11058 | } |
10844 | 11059 | ||
10845 | if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4) | 11060 | if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4) |
10846 | rc |= XMIT_GSO_V4; | 11061 | rc |= (XMIT_GSO_V4 | XMIT_CSUM_V4 | XMIT_CSUM_TCP); |
10847 | 11062 | ||
10848 | else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) | 11063 | else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) |
10849 | rc |= XMIT_GSO_V6; | 11064 | rc |= (XMIT_GSO_V6 | XMIT_CSUM_TCP | XMIT_CSUM_V6); |
10850 | 11065 | ||
10851 | return rc; | 11066 | return rc; |
10852 | } | 11067 | } |
@@ -10939,7 +11154,7 @@ exit_lbl: | |||
10939 | static netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev) | 11154 | static netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev) |
10940 | { | 11155 | { |
10941 | struct bnx2x *bp = netdev_priv(dev); | 11156 | struct bnx2x *bp = netdev_priv(dev); |
10942 | struct bnx2x_fastpath *fp, *fp_stat; | 11157 | struct bnx2x_fastpath *fp; |
10943 | struct netdev_queue *txq; | 11158 | struct netdev_queue *txq; |
10944 | struct sw_tx_bd *tx_buf; | 11159 | struct sw_tx_bd *tx_buf; |
10945 | struct eth_tx_start_bd *tx_start_bd; | 11160 | struct eth_tx_start_bd *tx_start_bd; |
@@ -10961,11 +11176,10 @@ static netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
10961 | fp_index = skb_get_queue_mapping(skb); | 11176 | fp_index = skb_get_queue_mapping(skb); |
10962 | txq = netdev_get_tx_queue(dev, fp_index); | 11177 | txq = netdev_get_tx_queue(dev, fp_index); |
10963 | 11178 | ||
10964 | fp = &bp->fp[fp_index + bp->num_rx_queues]; | 11179 | fp = &bp->fp[fp_index]; |
10965 | fp_stat = &bp->fp[fp_index]; | ||
10966 | 11180 | ||
10967 | if (unlikely(bnx2x_tx_avail(fp) < (skb_shinfo(skb)->nr_frags + 3))) { | 11181 | if (unlikely(bnx2x_tx_avail(fp) < (skb_shinfo(skb)->nr_frags + 3))) { |
10968 | fp_stat->eth_q_stats.driver_xoff++; | 11182 | fp->eth_q_stats.driver_xoff++; |
10969 | netif_tx_stop_queue(txq); | 11183 | netif_tx_stop_queue(txq); |
10970 | BNX2X_ERR("BUG! Tx ring full when queue awake!\n"); | 11184 | BNX2X_ERR("BUG! Tx ring full when queue awake!\n"); |
10971 | return NETDEV_TX_BUSY; | 11185 | return NETDEV_TX_BUSY; |
@@ -11191,7 +11405,7 @@ static netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
11191 | 11405 | ||
11192 | fp->tx_db.data.prod += nbd; | 11406 | fp->tx_db.data.prod += nbd; |
11193 | barrier(); | 11407 | barrier(); |
11194 | DOORBELL(bp, fp->index - bp->num_rx_queues, fp->tx_db.raw); | 11408 | DOORBELL(bp, fp->index, fp->tx_db.raw); |
11195 | 11409 | ||
11196 | mmiowb(); | 11410 | mmiowb(); |
11197 | 11411 | ||
@@ -11202,11 +11416,11 @@ static netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
11202 | /* We want bnx2x_tx_int to "see" the updated tx_bd_prod | 11416 | /* We want bnx2x_tx_int to "see" the updated tx_bd_prod |
11203 | if we put Tx into XOFF state. */ | 11417 | if we put Tx into XOFF state. */ |
11204 | smp_mb(); | 11418 | smp_mb(); |
11205 | fp_stat->eth_q_stats.driver_xoff++; | 11419 | fp->eth_q_stats.driver_xoff++; |
11206 | if (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3) | 11420 | if (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3) |
11207 | netif_tx_wake_queue(txq); | 11421 | netif_tx_wake_queue(txq); |
11208 | } | 11422 | } |
11209 | fp_stat->tx_pkt++; | 11423 | fp->tx_pkt++; |
11210 | 11424 | ||
11211 | return NETDEV_TX_OK; | 11425 | return NETDEV_TX_OK; |
11212 | } | 11426 | } |
@@ -11321,6 +11535,9 @@ static void bnx2x_set_rx_mode(struct net_device *dev) | |||
11321 | config->hdr.client_id = bp->fp->cl_id; | 11535 | config->hdr.client_id = bp->fp->cl_id; |
11322 | config->hdr.reserved1 = 0; | 11536 | config->hdr.reserved1 = 0; |
11323 | 11537 | ||
11538 | bp->set_mac_pending++; | ||
11539 | smp_wmb(); | ||
11540 | |||
11324 | bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0, | 11541 | bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0, |
11325 | U64_HI(bnx2x_sp_mapping(bp, mcast_config)), | 11542 | U64_HI(bnx2x_sp_mapping(bp, mcast_config)), |
11326 | U64_LO(bnx2x_sp_mapping(bp, mcast_config)), | 11543 | U64_LO(bnx2x_sp_mapping(bp, mcast_config)), |
@@ -11370,9 +11587,9 @@ static int bnx2x_change_mac_addr(struct net_device *dev, void *p) | |||
11370 | memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); | 11587 | memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); |
11371 | if (netif_running(dev)) { | 11588 | if (netif_running(dev)) { |
11372 | if (CHIP_IS_E1(bp)) | 11589 | if (CHIP_IS_E1(bp)) |
11373 | bnx2x_set_mac_addr_e1(bp, 1); | 11590 | bnx2x_set_eth_mac_addr_e1(bp, 1); |
11374 | else | 11591 | else |
11375 | bnx2x_set_mac_addr_e1h(bp, 1); | 11592 | bnx2x_set_eth_mac_addr_e1h(bp, 1); |
11376 | } | 11593 | } |
11377 | 11594 | ||
11378 | return 0; | 11595 | return 0; |
@@ -11830,21 +12047,14 @@ static inline void be16_to_cpu_n(const u8 *_source, u8 *_target, u32 n) | |||
11830 | 12047 | ||
11831 | static int __devinit bnx2x_init_firmware(struct bnx2x *bp, struct device *dev) | 12048 | static int __devinit bnx2x_init_firmware(struct bnx2x *bp, struct device *dev) |
11832 | { | 12049 | { |
11833 | char fw_file_name[40] = {0}; | 12050 | const char *fw_file_name; |
11834 | struct bnx2x_fw_file_hdr *fw_hdr; | 12051 | struct bnx2x_fw_file_hdr *fw_hdr; |
11835 | int rc, offset; | 12052 | int rc; |
11836 | 12053 | ||
11837 | /* Create a FW file name */ | ||
11838 | if (CHIP_IS_E1(bp)) | 12054 | if (CHIP_IS_E1(bp)) |
11839 | offset = sprintf(fw_file_name, FW_FILE_PREFIX_E1); | 12055 | fw_file_name = FW_FILE_NAME_E1; |
11840 | else | 12056 | else |
11841 | offset = sprintf(fw_file_name, FW_FILE_PREFIX_E1H); | 12057 | fw_file_name = FW_FILE_NAME_E1H; |
11842 | |||
11843 | sprintf(fw_file_name + offset, "%d.%d.%d.%d.fw", | ||
11844 | BCM_5710_FW_MAJOR_VERSION, | ||
11845 | BCM_5710_FW_MINOR_VERSION, | ||
11846 | BCM_5710_FW_REVISION_VERSION, | ||
11847 | BCM_5710_FW_ENGINEERING_VERSION); | ||
11848 | 12058 | ||
11849 | printk(KERN_INFO PFX "Loading %s\n", fw_file_name); | 12059 | printk(KERN_INFO PFX "Loading %s\n", fw_file_name); |
11850 | 12060 | ||
@@ -12098,9 +12308,9 @@ static int bnx2x_eeh_nic_unload(struct bnx2x *bp) | |||
12098 | 12308 | ||
12099 | /* Free SKBs, SGEs, TPA pool and driver internals */ | 12309 | /* Free SKBs, SGEs, TPA pool and driver internals */ |
12100 | bnx2x_free_skbs(bp); | 12310 | bnx2x_free_skbs(bp); |
12101 | for_each_rx_queue(bp, i) | 12311 | for_each_queue(bp, i) |
12102 | bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE); | 12312 | bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE); |
12103 | for_each_rx_queue(bp, i) | 12313 | for_each_queue(bp, i) |
12104 | netif_napi_del(&bnx2x_fp(bp, i, napi)); | 12314 | netif_napi_del(&bnx2x_fp(bp, i, napi)); |
12105 | bnx2x_free_mem(bp); | 12315 | bnx2x_free_mem(bp); |
12106 | 12316 | ||
@@ -12276,4 +12486,287 @@ static void __exit bnx2x_cleanup(void) | |||
12276 | module_init(bnx2x_init); | 12486 | module_init(bnx2x_init); |
12277 | module_exit(bnx2x_cleanup); | 12487 | module_exit(bnx2x_cleanup); |
12278 | 12488 | ||
12489 | #ifdef BCM_CNIC | ||
12490 | |||
12491 | /* count denotes the number of new completions we have seen */ | ||
12492 | static void bnx2x_cnic_sp_post(struct bnx2x *bp, int count) | ||
12493 | { | ||
12494 | struct eth_spe *spe; | ||
12495 | |||
12496 | #ifdef BNX2X_STOP_ON_ERROR | ||
12497 | if (unlikely(bp->panic)) | ||
12498 | return; | ||
12499 | #endif | ||
12500 | |||
12501 | spin_lock_bh(&bp->spq_lock); | ||
12502 | bp->cnic_spq_pending -= count; | ||
12503 | |||
12504 | for (; bp->cnic_spq_pending < bp->cnic_eth_dev.max_kwqe_pending; | ||
12505 | bp->cnic_spq_pending++) { | ||
12506 | |||
12507 | if (!bp->cnic_kwq_pending) | ||
12508 | break; | ||
12509 | |||
12510 | spe = bnx2x_sp_get_next(bp); | ||
12511 | *spe = *bp->cnic_kwq_cons; | ||
12512 | |||
12513 | bp->cnic_kwq_pending--; | ||
12514 | |||
12515 | DP(NETIF_MSG_TIMER, "pending on SPQ %d, on KWQ %d count %d\n", | ||
12516 | bp->cnic_spq_pending, bp->cnic_kwq_pending, count); | ||
12517 | |||
12518 | if (bp->cnic_kwq_cons == bp->cnic_kwq_last) | ||
12519 | bp->cnic_kwq_cons = bp->cnic_kwq; | ||
12520 | else | ||
12521 | bp->cnic_kwq_cons++; | ||
12522 | } | ||
12523 | bnx2x_sp_prod_update(bp); | ||
12524 | spin_unlock_bh(&bp->spq_lock); | ||
12525 | } | ||
12526 | |||
12527 | static int bnx2x_cnic_sp_queue(struct net_device *dev, | ||
12528 | struct kwqe_16 *kwqes[], u32 count) | ||
12529 | { | ||
12530 | struct bnx2x *bp = netdev_priv(dev); | ||
12531 | int i; | ||
12532 | |||
12533 | #ifdef BNX2X_STOP_ON_ERROR | ||
12534 | if (unlikely(bp->panic)) | ||
12535 | return -EIO; | ||
12536 | #endif | ||
12537 | |||
12538 | spin_lock_bh(&bp->spq_lock); | ||
12539 | |||
12540 | for (i = 0; i < count; i++) { | ||
12541 | struct eth_spe *spe = (struct eth_spe *)kwqes[i]; | ||
12542 | |||
12543 | if (bp->cnic_kwq_pending == MAX_SP_DESC_CNT) | ||
12544 | break; | ||
12545 | |||
12546 | *bp->cnic_kwq_prod = *spe; | ||
12547 | |||
12548 | bp->cnic_kwq_pending++; | ||
12549 | |||
12550 | DP(NETIF_MSG_TIMER, "L5 SPQE %x %x %x:%x pos %d\n", | ||
12551 | spe->hdr.conn_and_cmd_data, spe->hdr.type, | ||
12552 | spe->data.mac_config_addr.hi, | ||
12553 | spe->data.mac_config_addr.lo, | ||
12554 | bp->cnic_kwq_pending); | ||
12555 | |||
12556 | if (bp->cnic_kwq_prod == bp->cnic_kwq_last) | ||
12557 | bp->cnic_kwq_prod = bp->cnic_kwq; | ||
12558 | else | ||
12559 | bp->cnic_kwq_prod++; | ||
12560 | } | ||
12561 | |||
12562 | spin_unlock_bh(&bp->spq_lock); | ||
12563 | |||
12564 | if (bp->cnic_spq_pending < bp->cnic_eth_dev.max_kwqe_pending) | ||
12565 | bnx2x_cnic_sp_post(bp, 0); | ||
12566 | |||
12567 | return i; | ||
12568 | } | ||
12569 | |||
12570 | static int bnx2x_cnic_ctl_send(struct bnx2x *bp, struct cnic_ctl_info *ctl) | ||
12571 | { | ||
12572 | struct cnic_ops *c_ops; | ||
12573 | int rc = 0; | ||
12574 | |||
12575 | mutex_lock(&bp->cnic_mutex); | ||
12576 | c_ops = bp->cnic_ops; | ||
12577 | if (c_ops) | ||
12578 | rc = c_ops->cnic_ctl(bp->cnic_data, ctl); | ||
12579 | mutex_unlock(&bp->cnic_mutex); | ||
12580 | |||
12581 | return rc; | ||
12582 | } | ||
12583 | |||
12584 | static int bnx2x_cnic_ctl_send_bh(struct bnx2x *bp, struct cnic_ctl_info *ctl) | ||
12585 | { | ||
12586 | struct cnic_ops *c_ops; | ||
12587 | int rc = 0; | ||
12588 | |||
12589 | rcu_read_lock(); | ||
12590 | c_ops = rcu_dereference(bp->cnic_ops); | ||
12591 | if (c_ops) | ||
12592 | rc = c_ops->cnic_ctl(bp->cnic_data, ctl); | ||
12593 | rcu_read_unlock(); | ||
12594 | |||
12595 | return rc; | ||
12596 | } | ||
12597 | |||
12598 | /* | ||
12599 | * for commands that have no data | ||
12600 | */ | ||
12601 | static int bnx2x_cnic_notify(struct bnx2x *bp, int cmd) | ||
12602 | { | ||
12603 | struct cnic_ctl_info ctl = {0}; | ||
12604 | |||
12605 | ctl.cmd = cmd; | ||
12606 | |||
12607 | return bnx2x_cnic_ctl_send(bp, &ctl); | ||
12608 | } | ||
12609 | |||
12610 | static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid) | ||
12611 | { | ||
12612 | struct cnic_ctl_info ctl; | ||
12613 | |||
12614 | /* first we tell CNIC and only then we count this as a completion */ | ||
12615 | ctl.cmd = CNIC_CTL_COMPLETION_CMD; | ||
12616 | ctl.data.comp.cid = cid; | ||
12617 | |||
12618 | bnx2x_cnic_ctl_send_bh(bp, &ctl); | ||
12619 | bnx2x_cnic_sp_post(bp, 1); | ||
12620 | } | ||
12621 | |||
12622 | static int bnx2x_drv_ctl(struct net_device *dev, struct drv_ctl_info *ctl) | ||
12623 | { | ||
12624 | struct bnx2x *bp = netdev_priv(dev); | ||
12625 | int rc = 0; | ||
12626 | |||
12627 | switch (ctl->cmd) { | ||
12628 | case DRV_CTL_CTXTBL_WR_CMD: { | ||
12629 | u32 index = ctl->data.io.offset; | ||
12630 | dma_addr_t addr = ctl->data.io.dma_addr; | ||
12631 | |||
12632 | bnx2x_ilt_wr(bp, index, addr); | ||
12633 | break; | ||
12634 | } | ||
12635 | |||
12636 | case DRV_CTL_COMPLETION_CMD: { | ||
12637 | int count = ctl->data.comp.comp_count; | ||
12638 | |||
12639 | bnx2x_cnic_sp_post(bp, count); | ||
12640 | break; | ||
12641 | } | ||
12642 | |||
12643 | /* rtnl_lock is held. */ | ||
12644 | case DRV_CTL_START_L2_CMD: { | ||
12645 | u32 cli = ctl->data.ring.client_id; | ||
12646 | |||
12647 | bp->rx_mode_cl_mask |= (1 << cli); | ||
12648 | bnx2x_set_storm_rx_mode(bp); | ||
12649 | break; | ||
12650 | } | ||
12651 | |||
12652 | /* rtnl_lock is held. */ | ||
12653 | case DRV_CTL_STOP_L2_CMD: { | ||
12654 | u32 cli = ctl->data.ring.client_id; | ||
12655 | |||
12656 | bp->rx_mode_cl_mask &= ~(1 << cli); | ||
12657 | bnx2x_set_storm_rx_mode(bp); | ||
12658 | break; | ||
12659 | } | ||
12660 | |||
12661 | default: | ||
12662 | BNX2X_ERR("unknown command %x\n", ctl->cmd); | ||
12663 | rc = -EINVAL; | ||
12664 | } | ||
12665 | |||
12666 | return rc; | ||
12667 | } | ||
12668 | |||
12669 | static void bnx2x_setup_cnic_irq_info(struct bnx2x *bp) | ||
12670 | { | ||
12671 | struct cnic_eth_dev *cp = &bp->cnic_eth_dev; | ||
12672 | |||
12673 | if (bp->flags & USING_MSIX_FLAG) { | ||
12674 | cp->drv_state |= CNIC_DRV_STATE_USING_MSIX; | ||
12675 | cp->irq_arr[0].irq_flags |= CNIC_IRQ_FL_MSIX; | ||
12676 | cp->irq_arr[0].vector = bp->msix_table[1].vector; | ||
12677 | } else { | ||
12678 | cp->drv_state &= ~CNIC_DRV_STATE_USING_MSIX; | ||
12679 | cp->irq_arr[0].irq_flags &= ~CNIC_IRQ_FL_MSIX; | ||
12680 | } | ||
12681 | cp->irq_arr[0].status_blk = bp->cnic_sb; | ||
12682 | cp->irq_arr[0].status_blk_num = CNIC_SB_ID(bp); | ||
12683 | cp->irq_arr[1].status_blk = bp->def_status_blk; | ||
12684 | cp->irq_arr[1].status_blk_num = DEF_SB_ID; | ||
12685 | |||
12686 | cp->num_irq = 2; | ||
12687 | } | ||
12688 | |||
12689 | static int bnx2x_register_cnic(struct net_device *dev, struct cnic_ops *ops, | ||
12690 | void *data) | ||
12691 | { | ||
12692 | struct bnx2x *bp = netdev_priv(dev); | ||
12693 | struct cnic_eth_dev *cp = &bp->cnic_eth_dev; | ||
12694 | |||
12695 | if (ops == NULL) | ||
12696 | return -EINVAL; | ||
12697 | |||
12698 | if (atomic_read(&bp->intr_sem) != 0) | ||
12699 | return -EBUSY; | ||
12700 | |||
12701 | bp->cnic_kwq = kzalloc(PAGE_SIZE, GFP_KERNEL); | ||
12702 | if (!bp->cnic_kwq) | ||
12703 | return -ENOMEM; | ||
12704 | |||
12705 | bp->cnic_kwq_cons = bp->cnic_kwq; | ||
12706 | bp->cnic_kwq_prod = bp->cnic_kwq; | ||
12707 | bp->cnic_kwq_last = bp->cnic_kwq + MAX_SP_DESC_CNT; | ||
12708 | |||
12709 | bp->cnic_spq_pending = 0; | ||
12710 | bp->cnic_kwq_pending = 0; | ||
12711 | |||
12712 | bp->cnic_data = data; | ||
12713 | |||
12714 | cp->num_irq = 0; | ||
12715 | cp->drv_state = CNIC_DRV_STATE_REGD; | ||
12716 | |||
12717 | bnx2x_init_sb(bp, bp->cnic_sb, bp->cnic_sb_mapping, CNIC_SB_ID(bp)); | ||
12718 | |||
12719 | bnx2x_setup_cnic_irq_info(bp); | ||
12720 | bnx2x_set_iscsi_eth_mac_addr(bp, 1); | ||
12721 | bp->cnic_flags |= BNX2X_CNIC_FLAG_MAC_SET; | ||
12722 | rcu_assign_pointer(bp->cnic_ops, ops); | ||
12723 | |||
12724 | return 0; | ||
12725 | } | ||
12726 | |||
12727 | static int bnx2x_unregister_cnic(struct net_device *dev) | ||
12728 | { | ||
12729 | struct bnx2x *bp = netdev_priv(dev); | ||
12730 | struct cnic_eth_dev *cp = &bp->cnic_eth_dev; | ||
12731 | |||
12732 | mutex_lock(&bp->cnic_mutex); | ||
12733 | if (bp->cnic_flags & BNX2X_CNIC_FLAG_MAC_SET) { | ||
12734 | bp->cnic_flags &= ~BNX2X_CNIC_FLAG_MAC_SET; | ||
12735 | bnx2x_set_iscsi_eth_mac_addr(bp, 0); | ||
12736 | } | ||
12737 | cp->drv_state = 0; | ||
12738 | rcu_assign_pointer(bp->cnic_ops, NULL); | ||
12739 | mutex_unlock(&bp->cnic_mutex); | ||
12740 | synchronize_rcu(); | ||
12741 | kfree(bp->cnic_kwq); | ||
12742 | bp->cnic_kwq = NULL; | ||
12743 | |||
12744 | return 0; | ||
12745 | } | ||
12746 | |||
12747 | struct cnic_eth_dev *bnx2x_cnic_probe(struct net_device *dev) | ||
12748 | { | ||
12749 | struct bnx2x *bp = netdev_priv(dev); | ||
12750 | struct cnic_eth_dev *cp = &bp->cnic_eth_dev; | ||
12751 | |||
12752 | cp->drv_owner = THIS_MODULE; | ||
12753 | cp->chip_id = CHIP_ID(bp); | ||
12754 | cp->pdev = bp->pdev; | ||
12755 | cp->io_base = bp->regview; | ||
12756 | cp->io_base2 = bp->doorbells; | ||
12757 | cp->max_kwqe_pending = 8; | ||
12758 | cp->ctx_blk_size = CNIC_CTX_PER_ILT * sizeof(union cdu_context); | ||
12759 | cp->ctx_tbl_offset = FUNC_ILT_BASE(BP_FUNC(bp)) + 1; | ||
12760 | cp->ctx_tbl_len = CNIC_ILT_LINES; | ||
12761 | cp->starting_cid = BCM_CNIC_CID_START; | ||
12762 | cp->drv_submit_kwqes_16 = bnx2x_cnic_sp_queue; | ||
12763 | cp->drv_ctl = bnx2x_drv_ctl; | ||
12764 | cp->drv_register_cnic = bnx2x_register_cnic; | ||
12765 | cp->drv_unregister_cnic = bnx2x_unregister_cnic; | ||
12766 | |||
12767 | return cp; | ||
12768 | } | ||
12769 | EXPORT_SYMBOL(bnx2x_cnic_probe); | ||
12770 | |||
12771 | #endif /* BCM_CNIC */ | ||
12279 | 12772 | ||