diff options
author | Eilon Greenstein <eilong@broadcom.com> | 2009-02-12 03:36:11 -0500 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2009-02-16 02:31:05 -0500 |
commit | 555f6c78373f969f14487253abe331d085449360 (patch) | |
tree | aa5d388ebd501f795bd9cec3c3727bedb97d59a7 /drivers/net | |
parent | 8d9c5f34a25d6a30d15a800d83a3428ad44271d8 (diff) |
bnx2x: Multi-queue
Adding Tx multi-queue and enabling multi-queue by default
Signed-off-by: Eilon Greenstein <eilong@broadcom.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net')
-rw-r--r-- | drivers/net/bnx2x.h | 33 | ||||
-rw-r--r-- | drivers/net/bnx2x_fw_defs.h | 4 | ||||
-rw-r--r-- | drivers/net/bnx2x_main.c | 254 |
3 files changed, 178 insertions, 113 deletions
diff --git a/drivers/net/bnx2x.h b/drivers/net/bnx2x.h index de094d4b68a0..7edad1f9b330 100644 --- a/drivers/net/bnx2x.h +++ b/drivers/net/bnx2x.h | |||
@@ -25,6 +25,10 @@ | |||
25 | #endif | 25 | #endif |
26 | 26 | ||
27 | 27 | ||
28 | #define BNX2X_MULTI_QUEUE | ||
29 | |||
30 | #define BNX2X_NEW_NAPI | ||
31 | |||
28 | /* error/debug prints */ | 32 | /* error/debug prints */ |
29 | 33 | ||
30 | #define DRV_MODULE_NAME "bnx2x" | 34 | #define DRV_MODULE_NAME "bnx2x" |
@@ -266,6 +270,7 @@ struct bnx2x_fastpath { | |||
266 | u64 tpa_queue_used; | 270 | u64 tpa_queue_used; |
267 | #endif | 271 | #endif |
268 | 272 | ||
273 | char name[IFNAMSIZ]; | ||
269 | struct bnx2x *bp; /* parent */ | 274 | struct bnx2x *bp; /* parent */ |
270 | }; | 275 | }; |
271 | 276 | ||
@@ -680,11 +685,7 @@ struct bnx2x_eth_stats { | |||
680 | (offsetof(struct bnx2x_eth_stats, stat_name) / 4) | 685 | (offsetof(struct bnx2x_eth_stats, stat_name) / 4) |
681 | 686 | ||
682 | 687 | ||
683 | #ifdef BNX2X_MULTI | ||
684 | #define MAX_CONTEXT 16 | 688 | #define MAX_CONTEXT 16 |
685 | #else | ||
686 | #define MAX_CONTEXT 1 | ||
687 | #endif | ||
688 | 689 | ||
689 | union cdu_context { | 690 | union cdu_context { |
690 | struct eth_context eth; | 691 | struct eth_context eth; |
@@ -859,8 +860,9 @@ struct bnx2x { | |||
859 | #define BNX2X_STATE_DIAG 0xe000 | 860 | #define BNX2X_STATE_DIAG 0xe000 |
860 | #define BNX2X_STATE_ERROR 0xf000 | 861 | #define BNX2X_STATE_ERROR 0xf000 |
861 | 862 | ||
862 | int num_queues; | 863 | int multi_mode; |
863 | #define BP_MAX_QUEUES(bp) (IS_E1HMF(bp) ? 4 : 16) | 864 | int num_rx_queues; |
865 | int num_tx_queues; | ||
864 | 866 | ||
865 | u32 rx_mode; | 867 | u32 rx_mode; |
866 | #define BNX2X_RX_MODE_NONE 0 | 868 | #define BNX2X_RX_MODE_NONE 0 |
@@ -911,11 +913,19 @@ struct bnx2x { | |||
911 | }; | 913 | }; |
912 | 914 | ||
913 | 915 | ||
914 | #define for_each_queue(bp, var) for (var = 0; var < bp->num_queues; var++) | 916 | #define BNX2X_MAX_QUEUES(bp) (IS_E1HMF(bp) ? (MAX_CONTEXT / E1HVN_MAX) : \ |
917 | MAX_CONTEXT) | ||
918 | #define BNX2X_NUM_QUEUES(bp) max(bp->num_rx_queues, bp->num_tx_queues) | ||
919 | #define is_multi(bp) (BNX2X_NUM_QUEUES(bp) > 1) | ||
915 | 920 | ||
921 | #define for_each_rx_queue(bp, var) \ | ||
922 | for (var = 0; var < bp->num_rx_queues; var++) | ||
923 | #define for_each_tx_queue(bp, var) \ | ||
924 | for (var = 0; var < bp->num_tx_queues; var++) | ||
925 | #define for_each_queue(bp, var) \ | ||
926 | for (var = 0; var < BNX2X_NUM_QUEUES(bp); var++) | ||
916 | #define for_each_nondefault_queue(bp, var) \ | 927 | #define for_each_nondefault_queue(bp, var) \ |
917 | for (var = 1; var < bp->num_queues; var++) | 928 | for (var = 1; var < BNX2X_NUM_QUEUES(bp); var++) |
918 | #define is_multi(bp) (bp->num_queues > 1) | ||
919 | 929 | ||
920 | 930 | ||
921 | void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32); | 931 | void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32); |
@@ -1120,12 +1130,13 @@ static inline u32 reg_poll(struct bnx2x *bp, u32 reg, u32 expected, int ms, | |||
1120 | AEU_INPUTS_ATTN_BITS_MISC_PARITY_ERROR) | 1130 | AEU_INPUTS_ATTN_BITS_MISC_PARITY_ERROR) |
1121 | 1131 | ||
1122 | 1132 | ||
1123 | #define MULTI_FLAGS \ | 1133 | #define MULTI_FLAGS(bp) \ |
1124 | (TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV4_CAPABILITY | \ | 1134 | (TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV4_CAPABILITY | \ |
1125 | TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV4_TCP_CAPABILITY | \ | 1135 | TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV4_TCP_CAPABILITY | \ |
1126 | TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV6_CAPABILITY | \ | 1136 | TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV6_CAPABILITY | \ |
1127 | TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV6_TCP_CAPABILITY | \ | 1137 | TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV6_TCP_CAPABILITY | \ |
1128 | TSTORM_ETH_FUNCTION_COMMON_CONFIG_DEFAULT_ENABLE) | 1138 | (bp->multi_mode << \ |
1139 | TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_MODE_SHIFT)) | ||
1129 | 1140 | ||
1130 | #define MULTI_MASK 0x7f | 1141 | #define MULTI_MASK 0x7f |
1131 | 1142 | ||
diff --git a/drivers/net/bnx2x_fw_defs.h b/drivers/net/bnx2x_fw_defs.h index 2fe14a25ea3e..9eb1d131179c 100644 --- a/drivers/net/bnx2x_fw_defs.h +++ b/drivers/net/bnx2x_fw_defs.h | |||
@@ -259,6 +259,10 @@ | |||
259 | #define ETH_MAX_AGGREGATION_QUEUES_E1 (32) | 259 | #define ETH_MAX_AGGREGATION_QUEUES_E1 (32) |
260 | #define ETH_MAX_AGGREGATION_QUEUES_E1H (64) | 260 | #define ETH_MAX_AGGREGATION_QUEUES_E1H (64) |
261 | 261 | ||
262 | /* ETH RSS modes */ | ||
263 | #define ETH_RSS_MODE_DISABLED 0 | ||
264 | #define ETH_RSS_MODE_REGULAR 1 | ||
265 | |||
262 | 266 | ||
263 | /** | 267 | /** |
264 | * This file defines HSI constatnts common to all microcode flows | 268 | * This file defines HSI constatnts common to all microcode flows |
diff --git a/drivers/net/bnx2x_main.c b/drivers/net/bnx2x_main.c index 3ca9c969a688..60762f769efc 100644 --- a/drivers/net/bnx2x_main.c +++ b/drivers/net/bnx2x_main.c | |||
@@ -73,12 +73,14 @@ MODULE_DESCRIPTION("Broadcom NetXtreme II BCM57710/57711/57711E Driver"); | |||
73 | MODULE_LICENSE("GPL"); | 73 | MODULE_LICENSE("GPL"); |
74 | MODULE_VERSION(DRV_MODULE_VERSION); | 74 | MODULE_VERSION(DRV_MODULE_VERSION); |
75 | 75 | ||
76 | static int multi_mode = 1; | ||
77 | module_param(multi_mode, int, 0); | ||
78 | |||
76 | static int disable_tpa; | 79 | static int disable_tpa; |
77 | static int use_inta; | 80 | static int use_inta; |
78 | static int poll; | 81 | static int poll; |
79 | static int debug; | 82 | static int debug; |
80 | static int load_count[3]; /* 0-common, 1-port0, 2-port1 */ | 83 | static int load_count[3]; /* 0-common, 1-port0, 2-port1 */ |
81 | static int use_multi; | ||
82 | 84 | ||
83 | module_param(disable_tpa, int, 0); | 85 | module_param(disable_tpa, int, 0); |
84 | module_param(use_inta, int, 0); | 86 | module_param(use_inta, int, 0); |
@@ -89,10 +91,6 @@ MODULE_PARM_DESC(use_inta, "use INT#A instead of MSI-X"); | |||
89 | MODULE_PARM_DESC(poll, "use polling (for debug)"); | 91 | MODULE_PARM_DESC(poll, "use polling (for debug)"); |
90 | MODULE_PARM_DESC(debug, "default debug msglevel"); | 92 | MODULE_PARM_DESC(debug, "default debug msglevel"); |
91 | 93 | ||
92 | #ifdef BNX2X_MULTI | ||
93 | module_param(use_multi, int, 0); | ||
94 | MODULE_PARM_DESC(use_multi, "use per-CPU queues"); | ||
95 | #endif | ||
96 | static struct workqueue_struct *bnx2x_wq; | 94 | static struct workqueue_struct *bnx2x_wq; |
97 | 95 | ||
98 | enum bnx2x_board_type { | 96 | enum bnx2x_board_type { |
@@ -845,6 +843,7 @@ static inline u16 bnx2x_tx_avail(struct bnx2x_fastpath *fp) | |||
845 | static void bnx2x_tx_int(struct bnx2x_fastpath *fp, int work) | 843 | static void bnx2x_tx_int(struct bnx2x_fastpath *fp, int work) |
846 | { | 844 | { |
847 | struct bnx2x *bp = fp->bp; | 845 | struct bnx2x *bp = fp->bp; |
846 | struct netdev_queue *txq; | ||
848 | u16 hw_cons, sw_cons, bd_cons = fp->tx_bd_cons; | 847 | u16 hw_cons, sw_cons, bd_cons = fp->tx_bd_cons; |
849 | int done = 0; | 848 | int done = 0; |
850 | 849 | ||
@@ -853,6 +852,7 @@ static void bnx2x_tx_int(struct bnx2x_fastpath *fp, int work) | |||
853 | return; | 852 | return; |
854 | #endif | 853 | #endif |
855 | 854 | ||
855 | txq = netdev_get_tx_queue(bp->dev, fp->index); | ||
856 | hw_cons = le16_to_cpu(*fp->tx_cons_sb); | 856 | hw_cons = le16_to_cpu(*fp->tx_cons_sb); |
857 | sw_cons = fp->tx_pkt_cons; | 857 | sw_cons = fp->tx_pkt_cons; |
858 | 858 | ||
@@ -882,24 +882,24 @@ static void bnx2x_tx_int(struct bnx2x_fastpath *fp, int work) | |||
882 | fp->tx_pkt_cons = sw_cons; | 882 | fp->tx_pkt_cons = sw_cons; |
883 | fp->tx_bd_cons = bd_cons; | 883 | fp->tx_bd_cons = bd_cons; |
884 | 884 | ||
885 | /* Need to make the tx_cons update visible to start_xmit() | 885 | /* Need to make the tx_bd_cons update visible to start_xmit() |
886 | * before checking for netif_queue_stopped(). Without the | 886 | * before checking for netif_tx_queue_stopped(). Without the |
887 | * memory barrier, there is a small possibility that start_xmit() | 887 | * memory barrier, there is a small possibility that start_xmit() |
888 | * will miss it and cause the queue to be stopped forever. | 888 | * will miss it and cause the queue to be stopped forever. |
889 | */ | 889 | */ |
890 | smp_mb(); | 890 | smp_mb(); |
891 | 891 | ||
892 | /* TBD need a thresh? */ | 892 | /* TBD need a thresh? */ |
893 | if (unlikely(netif_queue_stopped(bp->dev))) { | 893 | if (unlikely(netif_tx_queue_stopped(txq))) { |
894 | 894 | ||
895 | netif_tx_lock(bp->dev); | 895 | __netif_tx_lock(txq, smp_processor_id()); |
896 | 896 | ||
897 | if (netif_queue_stopped(bp->dev) && | 897 | if ((netif_tx_queue_stopped(txq)) && |
898 | (bp->state == BNX2X_STATE_OPEN) && | 898 | (bp->state == BNX2X_STATE_OPEN) && |
899 | (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3)) | 899 | (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3)) |
900 | netif_wake_queue(bp->dev); | 900 | netif_tx_wake_queue(txq); |
901 | 901 | ||
902 | netif_tx_unlock(bp->dev); | 902 | __netif_tx_unlock(txq); |
903 | } | 903 | } |
904 | } | 904 | } |
905 | 905 | ||
@@ -1403,8 +1403,8 @@ static inline void bnx2x_update_rx_prod(struct bnx2x *bp, | |||
1403 | mmiowb(); /* keep prod updates ordered */ | 1403 | mmiowb(); /* keep prod updates ordered */ |
1404 | 1404 | ||
1405 | DP(NETIF_MSG_RX_STATUS, | 1405 | DP(NETIF_MSG_RX_STATUS, |
1406 | "Wrote: bd_prod %u cqe_prod %u sge_prod %u\n", | 1406 | "queue[%d]: wrote bd_prod %u cqe_prod %u sge_prod %u\n", |
1407 | bd_prod, rx_comp_prod, rx_sge_prod); | 1407 | fp->index, bd_prod, rx_comp_prod, rx_sge_prod); |
1408 | } | 1408 | } |
1409 | 1409 | ||
1410 | static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget) | 1410 | static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget) |
@@ -1662,8 +1662,7 @@ static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie) | |||
1662 | 1662 | ||
1663 | static irqreturn_t bnx2x_interrupt(int irq, void *dev_instance) | 1663 | static irqreturn_t bnx2x_interrupt(int irq, void *dev_instance) |
1664 | { | 1664 | { |
1665 | struct net_device *dev = dev_instance; | 1665 | struct bnx2x *bp = netdev_priv(dev_instance); |
1666 | struct bnx2x *bp = netdev_priv(dev); | ||
1667 | u16 status = bnx2x_ack_int(bp); | 1666 | u16 status = bnx2x_ack_int(bp); |
1668 | u16 mask; | 1667 | u16 mask; |
1669 | 1668 | ||
@@ -4295,7 +4294,7 @@ static void bnx2x_init_rx_rings(struct bnx2x *bp) | |||
4295 | "rx_buf_size %d effective_mtu %d\n", | 4294 | "rx_buf_size %d effective_mtu %d\n", |
4296 | bp->rx_buf_size, bp->dev->mtu + ETH_OVREHEAD); | 4295 | bp->rx_buf_size, bp->dev->mtu + ETH_OVREHEAD); |
4297 | 4296 | ||
4298 | for_each_queue(bp, j) { | 4297 | for_each_rx_queue(bp, j) { |
4299 | struct bnx2x_fastpath *fp = &bp->fp[j]; | 4298 | struct bnx2x_fastpath *fp = &bp->fp[j]; |
4300 | 4299 | ||
4301 | for (i = 0; i < max_agg_queues; i++) { | 4300 | for (i = 0; i < max_agg_queues; i++) { |
@@ -4318,7 +4317,7 @@ static void bnx2x_init_rx_rings(struct bnx2x *bp) | |||
4318 | } | 4317 | } |
4319 | } | 4318 | } |
4320 | 4319 | ||
4321 | for_each_queue(bp, j) { | 4320 | for_each_rx_queue(bp, j) { |
4322 | struct bnx2x_fastpath *fp = &bp->fp[j]; | 4321 | struct bnx2x_fastpath *fp = &bp->fp[j]; |
4323 | 4322 | ||
4324 | fp->rx_bd_cons = 0; | 4323 | fp->rx_bd_cons = 0; |
@@ -4430,7 +4429,7 @@ static void bnx2x_init_tx_ring(struct bnx2x *bp) | |||
4430 | { | 4429 | { |
4431 | int i, j; | 4430 | int i, j; |
4432 | 4431 | ||
4433 | for_each_queue(bp, j) { | 4432 | for_each_tx_queue(bp, j) { |
4434 | struct bnx2x_fastpath *fp = &bp->fp[j]; | 4433 | struct bnx2x_fastpath *fp = &bp->fp[j]; |
4435 | 4434 | ||
4436 | for (i = 1; i <= NUM_TX_RINGS; i++) { | 4435 | for (i = 1; i <= NUM_TX_RINGS; i++) { |
@@ -4543,14 +4542,15 @@ static void bnx2x_init_ind_table(struct bnx2x *bp) | |||
4543 | int func = BP_FUNC(bp); | 4542 | int func = BP_FUNC(bp); |
4544 | int i; | 4543 | int i; |
4545 | 4544 | ||
4546 | if (!is_multi(bp)) | 4545 | if (bp->multi_mode == ETH_RSS_MODE_DISABLED) |
4547 | return; | 4546 | return; |
4548 | 4547 | ||
4549 | DP(NETIF_MSG_IFUP, "Initializing indirection table\n"); | 4548 | DP(NETIF_MSG_IFUP, |
4549 | "Initializing indirection table multi_mode %d\n", bp->multi_mode); | ||
4550 | for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++) | 4550 | for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++) |
4551 | REG_WR8(bp, BAR_TSTRORM_INTMEM + | 4551 | REG_WR8(bp, BAR_TSTRORM_INTMEM + |
4552 | TSTORM_INDIRECTION_TABLE_OFFSET(func) + i, | 4552 | TSTORM_INDIRECTION_TABLE_OFFSET(func) + i, |
4553 | BP_CL_ID(bp) + (i % bp->num_queues)); | 4553 | BP_CL_ID(bp) + (i % bp->num_rx_queues)); |
4554 | } | 4554 | } |
4555 | 4555 | ||
4556 | static void bnx2x_set_client_config(struct bnx2x *bp) | 4556 | static void bnx2x_set_client_config(struct bnx2x *bp) |
@@ -4684,7 +4684,7 @@ static void bnx2x_init_internal_func(struct bnx2x *bp) | |||
4684 | u16 max_agg_size; | 4684 | u16 max_agg_size; |
4685 | 4685 | ||
4686 | if (is_multi(bp)) { | 4686 | if (is_multi(bp)) { |
4687 | tstorm_config.config_flags = MULTI_FLAGS; | 4687 | tstorm_config.config_flags = MULTI_FLAGS(bp); |
4688 | tstorm_config.rss_result_mask = MULTI_MASK; | 4688 | tstorm_config.rss_result_mask = MULTI_MASK; |
4689 | } | 4689 | } |
4690 | if (IS_E1HMF(bp)) | 4690 | if (IS_E1HMF(bp)) |
@@ -4764,7 +4764,7 @@ static void bnx2x_init_internal_func(struct bnx2x *bp) | |||
4764 | min((u32)(min((u32)8, (u32)MAX_SKB_FRAGS) * | 4764 | min((u32)(min((u32)8, (u32)MAX_SKB_FRAGS) * |
4765 | SGE_PAGE_SIZE * PAGES_PER_SGE), | 4765 | SGE_PAGE_SIZE * PAGES_PER_SGE), |
4766 | (u32)0xffff); | 4766 | (u32)0xffff); |
4767 | for_each_queue(bp, i) { | 4767 | for_each_rx_queue(bp, i) { |
4768 | struct bnx2x_fastpath *fp = &bp->fp[i]; | 4768 | struct bnx2x_fastpath *fp = &bp->fp[i]; |
4769 | 4769 | ||
4770 | REG_WR(bp, BAR_USTRORM_INTMEM + | 4770 | REG_WR(bp, BAR_USTRORM_INTMEM + |
@@ -5809,20 +5809,19 @@ static void bnx2x_free_mem(struct bnx2x *bp) | |||
5809 | int i; | 5809 | int i; |
5810 | 5810 | ||
5811 | /* fastpath */ | 5811 | /* fastpath */ |
5812 | /* Common */ | ||
5812 | for_each_queue(bp, i) { | 5813 | for_each_queue(bp, i) { |
5813 | 5814 | ||
5814 | /* Status blocks */ | 5815 | /* status blocks */ |
5815 | BNX2X_PCI_FREE(bnx2x_fp(bp, i, status_blk), | 5816 | BNX2X_PCI_FREE(bnx2x_fp(bp, i, status_blk), |
5816 | bnx2x_fp(bp, i, status_blk_mapping), | 5817 | bnx2x_fp(bp, i, status_blk_mapping), |
5817 | sizeof(struct host_status_block) + | 5818 | sizeof(struct host_status_block) + |
5818 | sizeof(struct eth_tx_db_data)); | 5819 | sizeof(struct eth_tx_db_data)); |
5820 | } | ||
5821 | /* Rx */ | ||
5822 | for_each_rx_queue(bp, i) { | ||
5819 | 5823 | ||
5820 | /* fast path rings: tx_buf tx_desc rx_buf rx_desc rx_comp */ | 5824 | /* fastpath rx rings: rx_buf rx_desc rx_comp */ |
5821 | BNX2X_FREE(bnx2x_fp(bp, i, tx_buf_ring)); | ||
5822 | BNX2X_PCI_FREE(bnx2x_fp(bp, i, tx_desc_ring), | ||
5823 | bnx2x_fp(bp, i, tx_desc_mapping), | ||
5824 | sizeof(struct eth_tx_bd) * NUM_TX_BD); | ||
5825 | |||
5826 | BNX2X_FREE(bnx2x_fp(bp, i, rx_buf_ring)); | 5825 | BNX2X_FREE(bnx2x_fp(bp, i, rx_buf_ring)); |
5827 | BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_desc_ring), | 5826 | BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_desc_ring), |
5828 | bnx2x_fp(bp, i, rx_desc_mapping), | 5827 | bnx2x_fp(bp, i, rx_desc_mapping), |
@@ -5839,6 +5838,15 @@ static void bnx2x_free_mem(struct bnx2x *bp) | |||
5839 | bnx2x_fp(bp, i, rx_sge_mapping), | 5838 | bnx2x_fp(bp, i, rx_sge_mapping), |
5840 | BCM_PAGE_SIZE * NUM_RX_SGE_PAGES); | 5839 | BCM_PAGE_SIZE * NUM_RX_SGE_PAGES); |
5841 | } | 5840 | } |
5841 | /* Tx */ | ||
5842 | for_each_tx_queue(bp, i) { | ||
5843 | |||
5844 | /* fastpath tx rings: tx_buf tx_desc */ | ||
5845 | BNX2X_FREE(bnx2x_fp(bp, i, tx_buf_ring)); | ||
5846 | BNX2X_PCI_FREE(bnx2x_fp(bp, i, tx_desc_ring), | ||
5847 | bnx2x_fp(bp, i, tx_desc_mapping), | ||
5848 | sizeof(struct eth_tx_bd) * NUM_TX_BD); | ||
5849 | } | ||
5842 | /* end of fastpath */ | 5850 | /* end of fastpath */ |
5843 | 5851 | ||
5844 | BNX2X_PCI_FREE(bp->def_status_blk, bp->def_status_blk_mapping, | 5852 | BNX2X_PCI_FREE(bp->def_status_blk, bp->def_status_blk_mapping, |
@@ -5881,29 +5889,20 @@ static int bnx2x_alloc_mem(struct bnx2x *bp) | |||
5881 | int i; | 5889 | int i; |
5882 | 5890 | ||
5883 | /* fastpath */ | 5891 | /* fastpath */ |
5892 | /* Common */ | ||
5884 | for_each_queue(bp, i) { | 5893 | for_each_queue(bp, i) { |
5885 | bnx2x_fp(bp, i, bp) = bp; | 5894 | bnx2x_fp(bp, i, bp) = bp; |
5886 | 5895 | ||
5887 | /* Status blocks */ | 5896 | /* status blocks */ |
5888 | BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, status_blk), | 5897 | BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, status_blk), |
5889 | &bnx2x_fp(bp, i, status_blk_mapping), | 5898 | &bnx2x_fp(bp, i, status_blk_mapping), |
5890 | sizeof(struct host_status_block) + | 5899 | sizeof(struct host_status_block) + |
5891 | sizeof(struct eth_tx_db_data)); | 5900 | sizeof(struct eth_tx_db_data)); |
5901 | } | ||
5902 | /* Rx */ | ||
5903 | for_each_rx_queue(bp, i) { | ||
5892 | 5904 | ||
5893 | bnx2x_fp(bp, i, hw_tx_prods) = | 5905 | /* fastpath rx rings: rx_buf rx_desc rx_comp */ |
5894 | (void *)(bnx2x_fp(bp, i, status_blk) + 1); | ||
5895 | |||
5896 | bnx2x_fp(bp, i, tx_prods_mapping) = | ||
5897 | bnx2x_fp(bp, i, status_blk_mapping) + | ||
5898 | sizeof(struct host_status_block); | ||
5899 | |||
5900 | /* fast path rings: tx_buf tx_desc rx_buf rx_desc rx_comp */ | ||
5901 | BNX2X_ALLOC(bnx2x_fp(bp, i, tx_buf_ring), | ||
5902 | sizeof(struct sw_tx_bd) * NUM_TX_BD); | ||
5903 | BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, tx_desc_ring), | ||
5904 | &bnx2x_fp(bp, i, tx_desc_mapping), | ||
5905 | sizeof(struct eth_tx_bd) * NUM_TX_BD); | ||
5906 | |||
5907 | BNX2X_ALLOC(bnx2x_fp(bp, i, rx_buf_ring), | 5906 | BNX2X_ALLOC(bnx2x_fp(bp, i, rx_buf_ring), |
5908 | sizeof(struct sw_rx_bd) * NUM_RX_BD); | 5907 | sizeof(struct sw_rx_bd) * NUM_RX_BD); |
5909 | BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_desc_ring), | 5908 | BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_desc_ring), |
@@ -5922,6 +5921,23 @@ static int bnx2x_alloc_mem(struct bnx2x *bp) | |||
5922 | &bnx2x_fp(bp, i, rx_sge_mapping), | 5921 | &bnx2x_fp(bp, i, rx_sge_mapping), |
5923 | BCM_PAGE_SIZE * NUM_RX_SGE_PAGES); | 5922 | BCM_PAGE_SIZE * NUM_RX_SGE_PAGES); |
5924 | } | 5923 | } |
5924 | /* Tx */ | ||
5925 | for_each_tx_queue(bp, i) { | ||
5926 | |||
5927 | bnx2x_fp(bp, i, hw_tx_prods) = | ||
5928 | (void *)(bnx2x_fp(bp, i, status_blk) + 1); | ||
5929 | |||
5930 | bnx2x_fp(bp, i, tx_prods_mapping) = | ||
5931 | bnx2x_fp(bp, i, status_blk_mapping) + | ||
5932 | sizeof(struct host_status_block); | ||
5933 | |||
5934 | /* fastpath tx rings: tx_buf tx_desc */ | ||
5935 | BNX2X_ALLOC(bnx2x_fp(bp, i, tx_buf_ring), | ||
5936 | sizeof(struct sw_tx_bd) * NUM_TX_BD); | ||
5937 | BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, tx_desc_ring), | ||
5938 | &bnx2x_fp(bp, i, tx_desc_mapping), | ||
5939 | sizeof(struct eth_tx_bd) * NUM_TX_BD); | ||
5940 | } | ||
5925 | /* end of fastpath */ | 5941 | /* end of fastpath */ |
5926 | 5942 | ||
5927 | BNX2X_PCI_ALLOC(bp->def_status_blk, &bp->def_status_blk_mapping, | 5943 | BNX2X_PCI_ALLOC(bp->def_status_blk, &bp->def_status_blk_mapping, |
@@ -5975,7 +5991,7 @@ static void bnx2x_free_tx_skbs(struct bnx2x *bp) | |||
5975 | { | 5991 | { |
5976 | int i; | 5992 | int i; |
5977 | 5993 | ||
5978 | for_each_queue(bp, i) { | 5994 | for_each_tx_queue(bp, i) { |
5979 | struct bnx2x_fastpath *fp = &bp->fp[i]; | 5995 | struct bnx2x_fastpath *fp = &bp->fp[i]; |
5980 | 5996 | ||
5981 | u16 bd_cons = fp->tx_bd_cons; | 5997 | u16 bd_cons = fp->tx_bd_cons; |
@@ -5993,7 +6009,7 @@ static void bnx2x_free_rx_skbs(struct bnx2x *bp) | |||
5993 | { | 6009 | { |
5994 | int i, j; | 6010 | int i, j; |
5995 | 6011 | ||
5996 | for_each_queue(bp, j) { | 6012 | for_each_rx_queue(bp, j) { |
5997 | struct bnx2x_fastpath *fp = &bp->fp[j]; | 6013 | struct bnx2x_fastpath *fp = &bp->fp[j]; |
5998 | 6014 | ||
5999 | for (i = 0; i < NUM_RX_BD; i++) { | 6015 | for (i = 0; i < NUM_RX_BD; i++) { |
@@ -6073,7 +6089,7 @@ static int bnx2x_enable_msix(struct bnx2x *bp) | |||
6073 | } | 6089 | } |
6074 | 6090 | ||
6075 | rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], | 6091 | rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], |
6076 | bp->num_queues + offset); | 6092 | BNX2X_NUM_QUEUES(bp) + offset); |
6077 | if (rc) { | 6093 | if (rc) { |
6078 | DP(NETIF_MSG_IFUP, "MSI-X is not attainable\n"); | 6094 | DP(NETIF_MSG_IFUP, "MSI-X is not attainable\n"); |
6079 | return -1; | 6095 | return -1; |
@@ -6095,19 +6111,32 @@ static int bnx2x_req_msix_irqs(struct bnx2x *bp) | |||
6095 | } | 6111 | } |
6096 | 6112 | ||
6097 | for_each_queue(bp, i) { | 6113 | for_each_queue(bp, i) { |
6114 | struct bnx2x_fastpath *fp = &bp->fp[i]; | ||
6115 | |||
6116 | sprintf(fp->name, "%s.fp%d", bp->dev->name, i); | ||
6098 | rc = request_irq(bp->msix_table[i + offset].vector, | 6117 | rc = request_irq(bp->msix_table[i + offset].vector, |
6099 | bnx2x_msix_fp_int, 0, | 6118 | bnx2x_msix_fp_int, 0, fp->name, fp); |
6100 | bp->dev->name, &bp->fp[i]); | ||
6101 | if (rc) { | 6119 | if (rc) { |
6102 | BNX2X_ERR("request fp #%d irq failed rc -%d\n", | 6120 | BNX2X_ERR("request fp #%d irq failed rc %d\n", i, rc); |
6103 | i + offset, -rc); | ||
6104 | bnx2x_free_msix_irqs(bp); | 6121 | bnx2x_free_msix_irqs(bp); |
6105 | return -EBUSY; | 6122 | return -EBUSY; |
6106 | } | 6123 | } |
6107 | 6124 | ||
6108 | bnx2x_fp(bp, i, state) = BNX2X_FP_STATE_IRQ; | 6125 | fp->state = BNX2X_FP_STATE_IRQ; |
6109 | } | 6126 | } |
6110 | 6127 | ||
6128 | i = BNX2X_NUM_QUEUES(bp); | ||
6129 | if (is_multi(bp)) | ||
6130 | printk(KERN_INFO PFX | ||
6131 | "%s: using MSI-X IRQs: sp %d fp %d - %d\n", | ||
6132 | bp->dev->name, bp->msix_table[0].vector, | ||
6133 | bp->msix_table[offset].vector, | ||
6134 | bp->msix_table[offset + i - 1].vector); | ||
6135 | else | ||
6136 | printk(KERN_INFO PFX "%s: using MSI-X IRQs: sp %d fp %d\n", | ||
6137 | bp->dev->name, bp->msix_table[0].vector, | ||
6138 | bp->msix_table[offset + i - 1].vector); | ||
6139 | |||
6111 | return 0; | 6140 | return 0; |
6112 | } | 6141 | } |
6113 | 6142 | ||
@@ -6127,7 +6156,7 @@ static void bnx2x_napi_enable(struct bnx2x *bp) | |||
6127 | { | 6156 | { |
6128 | int i; | 6157 | int i; |
6129 | 6158 | ||
6130 | for_each_queue(bp, i) | 6159 | for_each_rx_queue(bp, i) |
6131 | napi_enable(&bnx2x_fp(bp, i, napi)); | 6160 | napi_enable(&bnx2x_fp(bp, i, napi)); |
6132 | } | 6161 | } |
6133 | 6162 | ||
@@ -6135,7 +6164,7 @@ static void bnx2x_napi_disable(struct bnx2x *bp) | |||
6135 | { | 6164 | { |
6136 | int i; | 6165 | int i; |
6137 | 6166 | ||
6138 | for_each_queue(bp, i) | 6167 | for_each_rx_queue(bp, i) |
6139 | napi_disable(&bnx2x_fp(bp, i, napi)); | 6168 | napi_disable(&bnx2x_fp(bp, i, napi)); |
6140 | } | 6169 | } |
6141 | 6170 | ||
@@ -6143,10 +6172,10 @@ static void bnx2x_netif_start(struct bnx2x *bp) | |||
6143 | { | 6172 | { |
6144 | if (atomic_dec_and_test(&bp->intr_sem)) { | 6173 | if (atomic_dec_and_test(&bp->intr_sem)) { |
6145 | if (netif_running(bp->dev)) { | 6174 | if (netif_running(bp->dev)) { |
6146 | if (bp->state == BNX2X_STATE_OPEN) | ||
6147 | netif_wake_queue(bp->dev); | ||
6148 | bnx2x_napi_enable(bp); | 6175 | bnx2x_napi_enable(bp); |
6149 | bnx2x_int_enable(bp); | 6176 | bnx2x_int_enable(bp); |
6177 | if (bp->state == BNX2X_STATE_OPEN) | ||
6178 | netif_tx_wake_all_queues(bp->dev); | ||
6150 | } | 6179 | } |
6151 | } | 6180 | } |
6152 | } | 6181 | } |
@@ -6320,16 +6349,19 @@ static int bnx2x_setup_leading(struct bnx2x *bp) | |||
6320 | 6349 | ||
6321 | static int bnx2x_setup_multi(struct bnx2x *bp, int index) | 6350 | static int bnx2x_setup_multi(struct bnx2x *bp, int index) |
6322 | { | 6351 | { |
6352 | struct bnx2x_fastpath *fp = &bp->fp[index]; | ||
6353 | |||
6323 | /* reset IGU state */ | 6354 | /* reset IGU state */ |
6324 | bnx2x_ack_sb(bp, bp->fp[index].sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0); | 6355 | bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0); |
6325 | 6356 | ||
6326 | /* SETUP ramrod */ | 6357 | /* SETUP ramrod */ |
6327 | bp->fp[index].state = BNX2X_FP_STATE_OPENING; | 6358 | fp->state = BNX2X_FP_STATE_OPENING; |
6328 | bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CLIENT_SETUP, index, 0, index, 0); | 6359 | bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CLIENT_SETUP, index, 0, |
6360 | fp->cl_id, 0); | ||
6329 | 6361 | ||
6330 | /* Wait for completion */ | 6362 | /* Wait for completion */ |
6331 | return bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_OPEN, index, | 6363 | return bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_OPEN, index, |
6332 | &(bp->fp[index].state), 0); | 6364 | &(fp->state), 0); |
6333 | } | 6365 | } |
6334 | 6366 | ||
6335 | static int bnx2x_poll(struct napi_struct *napi, int budget); | 6367 | static int bnx2x_poll(struct napi_struct *napi, int budget); |
@@ -6340,6 +6372,7 @@ static int bnx2x_nic_load(struct bnx2x *bp, int load_mode) | |||
6340 | { | 6372 | { |
6341 | u32 load_code; | 6373 | u32 load_code; |
6342 | int i, rc = 0; | 6374 | int i, rc = 0; |
6375 | int num_queues; | ||
6343 | #ifdef BNX2X_STOP_ON_ERROR | 6376 | #ifdef BNX2X_STOP_ON_ERROR |
6344 | if (unlikely(bp->panic)) | 6377 | if (unlikely(bp->panic)) |
6345 | return -EPERM; | 6378 | return -EPERM; |
@@ -6348,22 +6381,22 @@ static int bnx2x_nic_load(struct bnx2x *bp, int load_mode) | |||
6348 | bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD; | 6381 | bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD; |
6349 | 6382 | ||
6350 | if (use_inta) { | 6383 | if (use_inta) { |
6351 | bp->num_queues = 1; | 6384 | num_queues = 1; |
6352 | 6385 | bp->num_rx_queues = num_queues; | |
6386 | bp->num_tx_queues = num_queues; | ||
6387 | DP(NETIF_MSG_IFUP, | ||
6388 | "set number of queues to %d\n", num_queues); | ||
6353 | } else { | 6389 | } else { |
6354 | if ((use_multi > 1) && (use_multi <= BP_MAX_QUEUES(bp))) | 6390 | if (bp->multi_mode == ETH_RSS_MODE_REGULAR) |
6355 | /* user requested number */ | 6391 | num_queues = min_t(u32, num_online_cpus(), |
6356 | bp->num_queues = use_multi; | 6392 | BNX2X_MAX_QUEUES(bp)); |
6357 | |||
6358 | else if (use_multi) | ||
6359 | bp->num_queues = min_t(u32, num_online_cpus(), | ||
6360 | BP_MAX_QUEUES(bp)); | ||
6361 | else | 6393 | else |
6362 | bp->num_queues = 1; | 6394 | num_queues = 1; |
6363 | 6395 | bp->num_rx_queues = num_queues; | |
6364 | DP(NETIF_MSG_IFUP, | 6396 | bp->num_tx_queues = num_queues; |
6365 | "set number of queues to %d\n", bp->num_queues); | 6397 | DP(NETIF_MSG_IFUP, "set number of rx queues to %d" |
6366 | 6398 | " number of tx queues to %d\n", | |
6399 | bp->num_rx_queues, bp->num_tx_queues); | ||
6367 | /* if we can't use MSI-X we only need one fp, | 6400 | /* if we can't use MSI-X we only need one fp, |
6368 | * so try to enable MSI-X with the requested number of fp's | 6401 | * so try to enable MSI-X with the requested number of fp's |
6369 | * and fallback to MSI or legacy INTx with one fp | 6402 | * and fallback to MSI or legacy INTx with one fp |
@@ -6371,26 +6404,30 @@ static int bnx2x_nic_load(struct bnx2x *bp, int load_mode) | |||
6371 | rc = bnx2x_enable_msix(bp); | 6404 | rc = bnx2x_enable_msix(bp); |
6372 | if (rc) { | 6405 | if (rc) { |
6373 | /* failed to enable MSI-X */ | 6406 | /* failed to enable MSI-X */ |
6374 | bp->num_queues = 1; | 6407 | num_queues = 1; |
6375 | if (use_multi) | 6408 | bp->num_rx_queues = num_queues; |
6376 | BNX2X_ERR("Multi requested but failed" | 6409 | bp->num_tx_queues = num_queues; |
6377 | " to enable MSI-X\n"); | 6410 | if (bp->multi_mode) |
6411 | BNX2X_ERR("Multi requested but failed to " | ||
6412 | "enable MSI-X set number of " | ||
6413 | "queues to %d\n", num_queues); | ||
6378 | } | 6414 | } |
6379 | } | 6415 | } |
6416 | bp->dev->real_num_tx_queues = bp->num_tx_queues; | ||
6380 | 6417 | ||
6381 | if (bnx2x_alloc_mem(bp)) | 6418 | if (bnx2x_alloc_mem(bp)) |
6382 | return -ENOMEM; | 6419 | return -ENOMEM; |
6383 | 6420 | ||
6384 | for_each_queue(bp, i) | 6421 | for_each_rx_queue(bp, i) |
6385 | bnx2x_fp(bp, i, disable_tpa) = | 6422 | bnx2x_fp(bp, i, disable_tpa) = |
6386 | ((bp->flags & TPA_ENABLE_FLAG) == 0); | 6423 | ((bp->flags & TPA_ENABLE_FLAG) == 0); |
6387 | 6424 | ||
6388 | for_each_queue(bp, i) | 6425 | for_each_rx_queue(bp, i) |
6389 | netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi), | 6426 | netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi), |
6390 | bnx2x_poll, 128); | 6427 | bnx2x_poll, 128); |
6391 | 6428 | ||
6392 | #ifdef BNX2X_STOP_ON_ERROR | 6429 | #ifdef BNX2X_STOP_ON_ERROR |
6393 | for_each_queue(bp, i) { | 6430 | for_each_rx_queue(bp, i) { |
6394 | struct bnx2x_fastpath *fp = &bp->fp[i]; | 6431 | struct bnx2x_fastpath *fp = &bp->fp[i]; |
6395 | 6432 | ||
6396 | fp->poll_no_work = 0; | 6433 | fp->poll_no_work = 0; |
@@ -6512,13 +6549,13 @@ static int bnx2x_nic_load(struct bnx2x *bp, int load_mode) | |||
6512 | switch (load_mode) { | 6549 | switch (load_mode) { |
6513 | case LOAD_NORMAL: | 6550 | case LOAD_NORMAL: |
6514 | /* Tx queue should be only reenabled */ | 6551 | /* Tx queue should be only reenabled */ |
6515 | netif_wake_queue(bp->dev); | 6552 | netif_tx_wake_all_queues(bp->dev); |
6516 | /* Initialize the receive filter. */ | 6553 | /* Initialize the receive filter. */ |
6517 | bnx2x_set_rx_mode(bp->dev); | 6554 | bnx2x_set_rx_mode(bp->dev); |
6518 | break; | 6555 | break; |
6519 | 6556 | ||
6520 | case LOAD_OPEN: | 6557 | case LOAD_OPEN: |
6521 | netif_start_queue(bp->dev); | 6558 | netif_tx_start_all_queues(bp->dev); |
6522 | /* Initialize the receive filter. */ | 6559 | /* Initialize the receive filter. */ |
6523 | bnx2x_set_rx_mode(bp->dev); | 6560 | bnx2x_set_rx_mode(bp->dev); |
6524 | break; | 6561 | break; |
@@ -6551,14 +6588,14 @@ load_error3: | |||
6551 | bp->port.pmf = 0; | 6588 | bp->port.pmf = 0; |
6552 | /* Free SKBs, SGEs, TPA pool and driver internals */ | 6589 | /* Free SKBs, SGEs, TPA pool and driver internals */ |
6553 | bnx2x_free_skbs(bp); | 6590 | bnx2x_free_skbs(bp); |
6554 | for_each_queue(bp, i) | 6591 | for_each_rx_queue(bp, i) |
6555 | bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE); | 6592 | bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE); |
6556 | load_error2: | 6593 | load_error2: |
6557 | /* Release IRQs */ | 6594 | /* Release IRQs */ |
6558 | bnx2x_free_irq(bp); | 6595 | bnx2x_free_irq(bp); |
6559 | load_error1: | 6596 | load_error1: |
6560 | bnx2x_napi_disable(bp); | 6597 | bnx2x_napi_disable(bp); |
6561 | for_each_queue(bp, i) | 6598 | for_each_rx_queue(bp, i) |
6562 | netif_napi_del(&bnx2x_fp(bp, i, napi)); | 6599 | netif_napi_del(&bnx2x_fp(bp, i, napi)); |
6563 | bnx2x_free_mem(bp); | 6600 | bnx2x_free_mem(bp); |
6564 | 6601 | ||
@@ -6569,15 +6606,16 @@ load_error1: | |||
6569 | 6606 | ||
6570 | static int bnx2x_stop_multi(struct bnx2x *bp, int index) | 6607 | static int bnx2x_stop_multi(struct bnx2x *bp, int index) |
6571 | { | 6608 | { |
6609 | struct bnx2x_fastpath *fp = &bp->fp[index]; | ||
6572 | int rc; | 6610 | int rc; |
6573 | 6611 | ||
6574 | /* halt the connection */ | 6612 | /* halt the connection */ |
6575 | bp->fp[index].state = BNX2X_FP_STATE_HALTING; | 6613 | fp->state = BNX2X_FP_STATE_HALTING; |
6576 | bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, index, 0, index, 0); | 6614 | bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, index, 0, fp->cl_id, 0); |
6577 | 6615 | ||
6578 | /* Wait for completion */ | 6616 | /* Wait for completion */ |
6579 | rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, index, | 6617 | rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, index, |
6580 | &(bp->fp[index].state), 1); | 6618 | &(fp->state), 1); |
6581 | if (rc) /* timeout */ | 6619 | if (rc) /* timeout */ |
6582 | return rc; | 6620 | return rc; |
6583 | 6621 | ||
@@ -6586,7 +6624,7 @@ static int bnx2x_stop_multi(struct bnx2x *bp, int index) | |||
6586 | 6624 | ||
6587 | /* Wait for completion */ | 6625 | /* Wait for completion */ |
6588 | rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_CLOSED, index, | 6626 | rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_CLOSED, index, |
6589 | &(bp->fp[index].state), 1); | 6627 | &(fp->state), 1); |
6590 | return rc; | 6628 | return rc; |
6591 | } | 6629 | } |
6592 | 6630 | ||
@@ -6734,8 +6772,8 @@ static int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode) | |||
6734 | /* Release IRQs */ | 6772 | /* Release IRQs */ |
6735 | bnx2x_free_irq(bp); | 6773 | bnx2x_free_irq(bp); |
6736 | 6774 | ||
6737 | /* Wait until tx fast path tasks complete */ | 6775 | /* Wait until tx fastpath tasks complete */ |
6738 | for_each_queue(bp, i) { | 6776 | for_each_tx_queue(bp, i) { |
6739 | struct bnx2x_fastpath *fp = &bp->fp[i]; | 6777 | struct bnx2x_fastpath *fp = &bp->fp[i]; |
6740 | 6778 | ||
6741 | cnt = 1000; | 6779 | cnt = 1000; |
@@ -6867,9 +6905,9 @@ unload_error: | |||
6867 | 6905 | ||
6868 | /* Free SKBs, SGEs, TPA pool and driver internals */ | 6906 | /* Free SKBs, SGEs, TPA pool and driver internals */ |
6869 | bnx2x_free_skbs(bp); | 6907 | bnx2x_free_skbs(bp); |
6870 | for_each_queue(bp, i) | 6908 | for_each_rx_queue(bp, i) |
6871 | bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE); | 6909 | bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE); |
6872 | for_each_queue(bp, i) | 6910 | for_each_rx_queue(bp, i) |
6873 | netif_napi_del(&bnx2x_fp(bp, i, napi)); | 6911 | netif_napi_del(&bnx2x_fp(bp, i, napi)); |
6874 | bnx2x_free_mem(bp); | 6912 | bnx2x_free_mem(bp); |
6875 | 6913 | ||
@@ -7597,6 +7635,15 @@ static int __devinit bnx2x_init_bp(struct bnx2x *bp) | |||
7597 | printk(KERN_ERR PFX | 7635 | printk(KERN_ERR PFX |
7598 | "MCP disabled, must load devices in order!\n"); | 7636 | "MCP disabled, must load devices in order!\n"); |
7599 | 7637 | ||
7638 | /* Set multi queue mode */ | ||
7639 | if ((multi_mode != ETH_RSS_MODE_DISABLED) && (!use_inta)) { | ||
7640 | printk(KERN_ERR PFX | ||
7641 | "Multi disabled since INTA is requested\n"); | ||
7642 | multi_mode = ETH_RSS_MODE_DISABLED; | ||
7643 | } | ||
7644 | bp->multi_mode = multi_mode; | ||
7645 | |||
7646 | |||
7600 | /* Set TPA flags */ | 7647 | /* Set TPA flags */ |
7601 | if (disable_tpa) { | 7648 | if (disable_tpa) { |
7602 | bp->flags &= ~TPA_ENABLE_FLAG; | 7649 | bp->flags &= ~TPA_ENABLE_FLAG; |
@@ -9569,6 +9616,7 @@ static int bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
9569 | { | 9616 | { |
9570 | struct bnx2x *bp = netdev_priv(dev); | 9617 | struct bnx2x *bp = netdev_priv(dev); |
9571 | struct bnx2x_fastpath *fp; | 9618 | struct bnx2x_fastpath *fp; |
9619 | struct netdev_queue *txq; | ||
9572 | struct sw_tx_bd *tx_buf; | 9620 | struct sw_tx_bd *tx_buf; |
9573 | struct eth_tx_bd *tx_bd; | 9621 | struct eth_tx_bd *tx_bd; |
9574 | struct eth_tx_parse_bd *pbd = NULL; | 9622 | struct eth_tx_parse_bd *pbd = NULL; |
@@ -9585,12 +9633,14 @@ static int bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
9585 | return NETDEV_TX_BUSY; | 9633 | return NETDEV_TX_BUSY; |
9586 | #endif | 9634 | #endif |
9587 | 9635 | ||
9588 | fp_index = (smp_processor_id() % bp->num_queues); | 9636 | fp_index = skb_get_queue_mapping(skb); |
9637 | txq = netdev_get_tx_queue(dev, fp_index); | ||
9638 | |||
9589 | fp = &bp->fp[fp_index]; | 9639 | fp = &bp->fp[fp_index]; |
9590 | 9640 | ||
9591 | if (unlikely(bnx2x_tx_avail(fp) < (skb_shinfo(skb)->nr_frags + 3))) { | 9641 | if (unlikely(bnx2x_tx_avail(fp) < (skb_shinfo(skb)->nr_frags + 3))) { |
9592 | bp->eth_stats.driver_xoff++, | 9642 | bp->eth_stats.driver_xoff++, |
9593 | netif_stop_queue(dev); | 9643 | netif_tx_stop_queue(txq); |
9594 | BNX2X_ERR("BUG! Tx ring full when queue awake!\n"); | 9644 | BNX2X_ERR("BUG! Tx ring full when queue awake!\n"); |
9595 | return NETDEV_TX_BUSY; | 9645 | return NETDEV_TX_BUSY; |
9596 | } | 9646 | } |
@@ -9829,10 +9879,10 @@ static int bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
9829 | /* We want bnx2x_tx_int to "see" the updated tx_bd_prod | 9879 | /* We want bnx2x_tx_int to "see" the updated tx_bd_prod |
9830 | if we put Tx into XOFF state. */ | 9880 | if we put Tx into XOFF state. */ |
9831 | smp_mb(); | 9881 | smp_mb(); |
9832 | netif_stop_queue(dev); | 9882 | netif_tx_stop_queue(txq); |
9833 | bp->eth_stats.driver_xoff++; | 9883 | bp->eth_stats.driver_xoff++; |
9834 | if (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3) | 9884 | if (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3) |
9835 | netif_wake_queue(dev); | 9885 | netif_tx_wake_queue(txq); |
9836 | } | 9886 | } |
9837 | fp->tx_pkt++; | 9887 | fp->tx_pkt++; |
9838 | 9888 | ||
@@ -10324,7 +10374,7 @@ static int __devinit bnx2x_init_one(struct pci_dev *pdev, | |||
10324 | printk(KERN_INFO "%s", version); | 10374 | printk(KERN_INFO "%s", version); |
10325 | 10375 | ||
10326 | /* dev zeroed in init_etherdev */ | 10376 | /* dev zeroed in init_etherdev */ |
10327 | dev = alloc_etherdev(sizeof(*bp)); | 10377 | dev = alloc_etherdev_mq(sizeof(*bp), MAX_CONTEXT); |
10328 | if (!dev) { | 10378 | if (!dev) { |
10329 | printk(KERN_ERR PFX "Cannot allocate net device\n"); | 10379 | printk(KERN_ERR PFX "Cannot allocate net device\n"); |
10330 | return -ENOMEM; | 10380 | return -ENOMEM; |
@@ -10496,9 +10546,9 @@ static int bnx2x_eeh_nic_unload(struct bnx2x *bp) | |||
10496 | 10546 | ||
10497 | /* Free SKBs, SGEs, TPA pool and driver internals */ | 10547 | /* Free SKBs, SGEs, TPA pool and driver internals */ |
10498 | bnx2x_free_skbs(bp); | 10548 | bnx2x_free_skbs(bp); |
10499 | for_each_queue(bp, i) | 10549 | for_each_rx_queue(bp, i) |
10500 | bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE); | 10550 | bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE); |
10501 | for_each_queue(bp, i) | 10551 | for_each_rx_queue(bp, i) |
10502 | netif_napi_del(&bnx2x_fp(bp, i, napi)); | 10552 | netif_napi_del(&bnx2x_fp(bp, i, napi)); |
10503 | bnx2x_free_mem(bp); | 10553 | bnx2x_free_mem(bp); |
10504 | 10554 | ||