aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net
diff options
context:
space:
mode:
authorEilon Greenstein <eilong@broadcom.com>2009-08-13 01:53:28 -0400
committerDavid S. Miller <davem@davemloft.net>2009-08-13 01:53:28 -0400
commitca00392cb8f5227c67ff52c656d91a764d022ab9 (patch)
tree007d82074e49d25d1ee6bfb484392032d463be91 /drivers/net
parent6200f09036ee6f12822a9133dba7ed011b179c69 (diff)
bnx2x: Using the new FW
The new FW improves the packets per second rate. It required a lot of change in the FW which implies many changes in the driver to support it. It is now also possible for the driver to use a separate MSI-X vector for Rx and Tx - this also add some to the complicity of this change. All things said - after this patch, practically all performance matrixes show improvement. Though Vladislav Zolotarov is not signed on this patch, he did most of the job and deserves credit for that. Signed-off-by: Eilon Greenstein <eilong@broadcom.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net')
-rw-r--r--drivers/net/bnx2x.h45
-rw-r--r--drivers/net/bnx2x_fw_defs.h379
-rw-r--r--drivers/net/bnx2x_hsi.h361
-rw-r--r--drivers/net/bnx2x_main.c888
-rw-r--r--drivers/net/bnx2x_reg.h60
5 files changed, 1074 insertions, 659 deletions
diff --git a/drivers/net/bnx2x.h b/drivers/net/bnx2x.h
index 8bd80fca9788..16ccba8dda1b 100644
--- a/drivers/net/bnx2x.h
+++ b/drivers/net/bnx2x.h
@@ -142,6 +142,9 @@ struct sw_rx_bd {
142struct sw_tx_bd { 142struct sw_tx_bd {
143 struct sk_buff *skb; 143 struct sk_buff *skb;
144 u16 first_bd; 144 u16 first_bd;
145 u8 flags;
146/* Set on the first BD descriptor when there is a split BD */
147#define BNX2X_TSO_SPLIT_BD (1<<0)
145}; 148};
146 149
147struct sw_rx_page { 150struct sw_rx_page {
@@ -149,6 +152,11 @@ struct sw_rx_page {
149 DECLARE_PCI_UNMAP_ADDR(mapping) 152 DECLARE_PCI_UNMAP_ADDR(mapping)
150}; 153};
151 154
155union db_prod {
156 struct doorbell_set_prod data;
157 u32 raw;
158};
159
152 160
153/* MC hsi */ 161/* MC hsi */
154#define BCM_PAGE_SHIFT 12 162#define BCM_PAGE_SHIFT 12
@@ -234,15 +242,14 @@ struct bnx2x_fastpath {
234 242
235 struct napi_struct napi; 243 struct napi_struct napi;
236 244
245 u8 is_rx_queue;
246
237 struct host_status_block *status_blk; 247 struct host_status_block *status_blk;
238 dma_addr_t status_blk_mapping; 248 dma_addr_t status_blk_mapping;
239 249
240 struct eth_tx_db_data *hw_tx_prods;
241 dma_addr_t tx_prods_mapping;
242
243 struct sw_tx_bd *tx_buf_ring; 250 struct sw_tx_bd *tx_buf_ring;
244 251
245 struct eth_tx_bd *tx_desc_ring; 252 union eth_tx_bd_types *tx_desc_ring;
246 dma_addr_t tx_desc_mapping; 253 dma_addr_t tx_desc_mapping;
247 254
248 struct sw_rx_bd *rx_buf_ring; /* BDs mappings ring */ 255 struct sw_rx_bd *rx_buf_ring; /* BDs mappings ring */
@@ -272,6 +279,8 @@ struct bnx2x_fastpath {
272 u8 cl_id; /* eth client id */ 279 u8 cl_id; /* eth client id */
273 u8 sb_id; /* status block number in HW */ 280 u8 sb_id; /* status block number in HW */
274 281
282 union db_prod tx_db;
283
275 u16 tx_pkt_prod; 284 u16 tx_pkt_prod;
276 u16 tx_pkt_cons; 285 u16 tx_pkt_cons;
277 u16 tx_bd_prod; 286 u16 tx_bd_prod;
@@ -309,21 +318,24 @@ struct bnx2x_fastpath {
309 struct xstorm_per_client_stats old_xclient; 318 struct xstorm_per_client_stats old_xclient;
310 struct bnx2x_eth_q_stats eth_q_stats; 319 struct bnx2x_eth_q_stats eth_q_stats;
311 320
312 char name[IFNAMSIZ]; 321 /* The size is calculated using the following:
322 sizeof name field from netdev structure +
323 4 ('-Xx-' string) +
324 4 (for the digits and to make it DWORD aligned) */
325#define FP_NAME_SIZE (sizeof(((struct net_device *)0)->name) + 8)
326 char name[FP_NAME_SIZE];
313 struct bnx2x *bp; /* parent */ 327 struct bnx2x *bp; /* parent */
314}; 328};
315 329
316#define bnx2x_fp(bp, nr, var) (bp->fp[nr].var) 330#define bnx2x_fp(bp, nr, var) (bp->fp[nr].var)
317 331
318#define BNX2X_HAS_WORK(fp) (bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))
319
320 332
321/* MC hsi */ 333/* MC hsi */
322#define MAX_FETCH_BD 13 /* HW max BDs per packet */ 334#define MAX_FETCH_BD 13 /* HW max BDs per packet */
323#define RX_COPY_THRESH 92 335#define RX_COPY_THRESH 92
324 336
325#define NUM_TX_RINGS 16 337#define NUM_TX_RINGS 16
326#define TX_DESC_CNT (BCM_PAGE_SIZE / sizeof(struct eth_tx_bd)) 338#define TX_DESC_CNT (BCM_PAGE_SIZE / sizeof(union eth_tx_bd_types))
327#define MAX_TX_DESC_CNT (TX_DESC_CNT - 1) 339#define MAX_TX_DESC_CNT (TX_DESC_CNT - 1)
328#define NUM_TX_BD (TX_DESC_CNT * NUM_TX_RINGS) 340#define NUM_TX_BD (TX_DESC_CNT * NUM_TX_RINGS)
329#define MAX_TX_BD (NUM_TX_BD - 1) 341#define MAX_TX_BD (NUM_TX_BD - 1)
@@ -395,7 +407,7 @@ struct bnx2x_fastpath {
395#define DPM_TRIGER_TYPE 0x40 407#define DPM_TRIGER_TYPE 0x40
396#define DOORBELL(bp, cid, val) \ 408#define DOORBELL(bp, cid, val) \
397 do { \ 409 do { \
398 writel((u32)val, (bp)->doorbells + (BCM_PAGE_SIZE * cid) + \ 410 writel((u32)(val), bp->doorbells + (BCM_PAGE_SIZE * (cid)) + \
399 DPM_TRIGER_TYPE); \ 411 DPM_TRIGER_TYPE); \
400 } while (0) 412 } while (0)
401 413
@@ -902,8 +914,6 @@ struct bnx2x {
902 u16 rx_quick_cons_trip; 914 u16 rx_quick_cons_trip;
903 u16 rx_ticks_int; 915 u16 rx_ticks_int;
904 u16 rx_ticks; 916 u16 rx_ticks;
905/* Maximal coalescing timeout in us */
906#define BNX2X_MAX_COALESCE_TOUT (0xf0*12)
907 917
908 u32 lin_cnt; 918 u32 lin_cnt;
909 919
@@ -985,19 +995,20 @@ struct bnx2x {
985}; 995};
986 996
987 997
988#define BNX2X_MAX_QUEUES(bp) (IS_E1HMF(bp) ? (MAX_CONTEXT / E1HVN_MAX) : \ 998#define BNX2X_MAX_QUEUES(bp) (IS_E1HMF(bp) ? (MAX_CONTEXT/(2 * E1HVN_MAX)) \
989 MAX_CONTEXT) 999 : (MAX_CONTEXT/2))
990#define BNX2X_NUM_QUEUES(bp) max(bp->num_rx_queues, bp->num_tx_queues) 1000#define BNX2X_NUM_QUEUES(bp) (bp->num_rx_queues + bp->num_tx_queues)
991#define is_multi(bp) (BNX2X_NUM_QUEUES(bp) > 1) 1001#define is_multi(bp) (BNX2X_NUM_QUEUES(bp) > 2)
992 1002
993#define for_each_rx_queue(bp, var) \ 1003#define for_each_rx_queue(bp, var) \
994 for (var = 0; var < bp->num_rx_queues; var++) 1004 for (var = 0; var < bp->num_rx_queues; var++)
995#define for_each_tx_queue(bp, var) \ 1005#define for_each_tx_queue(bp, var) \
996 for (var = 0; var < bp->num_tx_queues; var++) 1006 for (var = bp->num_rx_queues; \
1007 var < BNX2X_NUM_QUEUES(bp); var++)
997#define for_each_queue(bp, var) \ 1008#define for_each_queue(bp, var) \
998 for (var = 0; var < BNX2X_NUM_QUEUES(bp); var++) 1009 for (var = 0; var < BNX2X_NUM_QUEUES(bp); var++)
999#define for_each_nondefault_queue(bp, var) \ 1010#define for_each_nondefault_queue(bp, var) \
1000 for (var = 1; var < BNX2X_NUM_QUEUES(bp); var++) 1011 for (var = 1; var < bp->num_rx_queues; var++)
1001 1012
1002 1013
1003void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32); 1014void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32);
diff --git a/drivers/net/bnx2x_fw_defs.h b/drivers/net/bnx2x_fw_defs.h
index e2df23803598..931dcace5628 100644
--- a/drivers/net/bnx2x_fw_defs.h
+++ b/drivers/net/bnx2x_fw_defs.h
@@ -12,48 +12,117 @@
12 (IS_E1H_OFFSET ? 0x7000 : 0x1000) 12 (IS_E1H_OFFSET ? 0x7000 : 0x1000)
13#define CSTORM_ASSERT_LIST_OFFSET(idx) \ 13#define CSTORM_ASSERT_LIST_OFFSET(idx) \
14 (IS_E1H_OFFSET ? (0x7020 + (idx * 0x10)) : (0x1020 + (idx * 0x10))) 14 (IS_E1H_OFFSET ? (0x7020 + (idx * 0x10)) : (0x1020 + (idx * 0x10)))
15#define CSTORM_DEF_SB_HC_DISABLE_OFFSET(function, index) \ 15#define CSTORM_DEF_SB_HC_DISABLE_C_OFFSET(function, index) \
16 (IS_E1H_OFFSET ? (0x8522 + ((function>>1) * 0x40) + \ 16 (IS_E1H_OFFSET ? (0x8622 + ((function>>1) * 0x40) + \
17 ((function&1) * 0x100) + (index * 0x4)) : (0x1922 + (function * \ 17 ((function&1) * 0x100) + (index * 0x4)) : (0x3562 + (function * \
18 0x40) + (index * 0x4))) 18 0x40) + (index * 0x4)))
19#define CSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(function) \ 19#define CSTORM_DEF_SB_HC_DISABLE_U_OFFSET(function, index) \
20 (IS_E1H_OFFSET ? (0x8500 + ((function>>1) * 0x40) + \ 20 (IS_E1H_OFFSET ? (0x8822 + ((function>>1) * 0x80) + \
21 ((function&1) * 0x100)) : (0x1900 + (function * 0x40))) 21 ((function&1) * 0x200) + (index * 0x4)) : (0x35e2 + (function * \
22#define CSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(function) \ 22 0x80) + (index * 0x4)))
23 (IS_E1H_OFFSET ? (0x8508 + ((function>>1) * 0x40) + \ 23#define CSTORM_DEF_SB_HOST_SB_ADDR_C_OFFSET(function) \
24 ((function&1) * 0x100)) : (0x1908 + (function * 0x40))) 24 (IS_E1H_OFFSET ? (0x8600 + ((function>>1) * 0x40) + \
25 ((function&1) * 0x100)) : (0x3540 + (function * 0x40)))
26#define CSTORM_DEF_SB_HOST_SB_ADDR_U_OFFSET(function) \
27 (IS_E1H_OFFSET ? (0x8800 + ((function>>1) * 0x80) + \
28 ((function&1) * 0x200)) : (0x35c0 + (function * 0x80)))
29#define CSTORM_DEF_SB_HOST_STATUS_BLOCK_C_OFFSET(function) \
30 (IS_E1H_OFFSET ? (0x8608 + ((function>>1) * 0x40) + \
31 ((function&1) * 0x100)) : (0x3548 + (function * 0x40)))
32#define CSTORM_DEF_SB_HOST_STATUS_BLOCK_U_OFFSET(function) \
33 (IS_E1H_OFFSET ? (0x8808 + ((function>>1) * 0x80) + \
34 ((function&1) * 0x200)) : (0x35c8 + (function * 0x80)))
25#define CSTORM_FUNCTION_MODE_OFFSET \ 35#define CSTORM_FUNCTION_MODE_OFFSET \
26 (IS_E1H_OFFSET ? 0x11e8 : 0xffffffff) 36 (IS_E1H_OFFSET ? 0x11e8 : 0xffffffff)
27#define CSTORM_HC_BTR_OFFSET(port) \ 37#define CSTORM_HC_BTR_C_OFFSET(port) \
28 (IS_E1H_OFFSET ? (0x8704 + (port * 0xf0)) : (0x1984 + (port * 0xc0))) 38 (IS_E1H_OFFSET ? (0x8c04 + (port * 0xf0)) : (0x36c4 + (port * 0xc0)))
29#define CSTORM_SB_HC_DISABLE_OFFSET(port, cpu_id, index) \ 39#define CSTORM_HC_BTR_U_OFFSET(port) \
30 (IS_E1H_OFFSET ? (0x801a + (port * 0x280) + (cpu_id * 0x28) + \ 40 (IS_E1H_OFFSET ? (0x8de4 + (port * 0xf0)) : (0x3844 + (port * 0xc0)))
31 (index * 0x4)) : (0x141a + (port * 0x280) + (cpu_id * 0x28) + \ 41#define CSTORM_ISCSI_CQ_SIZE_OFFSET(function) \
42 (IS_E1H_OFFSET ? (0x6680 + (function * 0x8)) : (0x25a0 + \
43 (function * 0x8)))
44#define CSTORM_ISCSI_CQ_SQN_SIZE_OFFSET(function) \
45 (IS_E1H_OFFSET ? (0x66c0 + (function * 0x8)) : (0x25b0 + \
46 (function * 0x8)))
47#define CSTORM_ISCSI_EQ_CONS_OFFSET(function, eqIdx) \
48 (IS_E1H_OFFSET ? (0x6040 + (function * 0xc0) + (eqIdx * 0x18)) : \
49 (0x2410 + (function * 0xc0) + (eqIdx * 0x18)))
50#define CSTORM_ISCSI_EQ_NEXT_EQE_ADDR_OFFSET(function, eqIdx) \
51 (IS_E1H_OFFSET ? (0x6044 + (function * 0xc0) + (eqIdx * 0x18)) : \
52 (0x2414 + (function * 0xc0) + (eqIdx * 0x18)))
53#define CSTORM_ISCSI_EQ_NEXT_PAGE_ADDR_OFFSET(function, eqIdx) \
54 (IS_E1H_OFFSET ? (0x604c + (function * 0xc0) + (eqIdx * 0x18)) : \
55 (0x241c + (function * 0xc0) + (eqIdx * 0x18)))
56#define CSTORM_ISCSI_EQ_NEXT_PAGE_ADDR_VALID_OFFSET(function, eqIdx) \
57 (IS_E1H_OFFSET ? (0x6057 + (function * 0xc0) + (eqIdx * 0x18)) : \
58 (0x2427 + (function * 0xc0) + (eqIdx * 0x18)))
59#define CSTORM_ISCSI_EQ_PROD_OFFSET(function, eqIdx) \
60 (IS_E1H_OFFSET ? (0x6042 + (function * 0xc0) + (eqIdx * 0x18)) : \
61 (0x2412 + (function * 0xc0) + (eqIdx * 0x18)))
62#define CSTORM_ISCSI_EQ_SB_INDEX_OFFSET(function, eqIdx) \
63 (IS_E1H_OFFSET ? (0x6056 + (function * 0xc0) + (eqIdx * 0x18)) : \
64 (0x2426 + (function * 0xc0) + (eqIdx * 0x18)))
65#define CSTORM_ISCSI_EQ_SB_NUM_OFFSET(function, eqIdx) \
66 (IS_E1H_OFFSET ? (0x6054 + (function * 0xc0) + (eqIdx * 0x18)) : \
67 (0x2424 + (function * 0xc0) + (eqIdx * 0x18)))
68#define CSTORM_ISCSI_HQ_SIZE_OFFSET(function) \
69 (IS_E1H_OFFSET ? (0x6640 + (function * 0x8)) : (0x2590 + \
70 (function * 0x8)))
71#define CSTORM_ISCSI_NUM_OF_TASKS_OFFSET(function) \
72 (IS_E1H_OFFSET ? (0x6004 + (function * 0x8)) : (0x2404 + \
73 (function * 0x8)))
74#define CSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(function) \
75 (IS_E1H_OFFSET ? (0x6002 + (function * 0x8)) : (0x2402 + \
76 (function * 0x8)))
77#define CSTORM_ISCSI_PAGE_SIZE_OFFSET(function) \
78 (IS_E1H_OFFSET ? (0x6000 + (function * 0x8)) : (0x2400 + \
79 (function * 0x8)))
80#define CSTORM_SB_HC_DISABLE_C_OFFSET(port, cpu_id, index) \
81 (IS_E1H_OFFSET ? (0x811a + (port * 0x280) + (cpu_id * 0x28) + \
82 (index * 0x4)) : (0x305a + (port * 0x280) + (cpu_id * 0x28) + \
83 (index * 0x4)))
84#define CSTORM_SB_HC_DISABLE_U_OFFSET(port, cpu_id, index) \
85 (IS_E1H_OFFSET ? (0xb01a + (port * 0x800) + (cpu_id * 0x80) + \
86 (index * 0x4)) : (0x401a + (port * 0x800) + (cpu_id * 0x80) + \
32 (index * 0x4))) 87 (index * 0x4)))
33#define CSTORM_SB_HC_TIMEOUT_OFFSET(port, cpu_id, index) \ 88#define CSTORM_SB_HC_TIMEOUT_C_OFFSET(port, cpu_id, index) \
34 (IS_E1H_OFFSET ? (0x8018 + (port * 0x280) + (cpu_id * 0x28) + \ 89 (IS_E1H_OFFSET ? (0x8118 + (port * 0x280) + (cpu_id * 0x28) + \
35 (index * 0x4)) : (0x1418 + (port * 0x280) + (cpu_id * 0x28) + \ 90 (index * 0x4)) : (0x3058 + (port * 0x280) + (cpu_id * 0x28) + \
36 (index * 0x4))) 91 (index * 0x4)))
37#define CSTORM_SB_HOST_SB_ADDR_OFFSET(port, cpu_id) \ 92#define CSTORM_SB_HC_TIMEOUT_U_OFFSET(port, cpu_id, index) \
38 (IS_E1H_OFFSET ? (0x8000 + (port * 0x280) + (cpu_id * 0x28)) : \ 93 (IS_E1H_OFFSET ? (0xb018 + (port * 0x800) + (cpu_id * 0x80) + \
39 (0x1400 + (port * 0x280) + (cpu_id * 0x28))) 94 (index * 0x4)) : (0x4018 + (port * 0x800) + (cpu_id * 0x80) + \
40#define CSTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, cpu_id) \ 95 (index * 0x4)))
41 (IS_E1H_OFFSET ? (0x8008 + (port * 0x280) + (cpu_id * 0x28)) : \ 96#define CSTORM_SB_HOST_SB_ADDR_C_OFFSET(port, cpu_id) \
42 (0x1408 + (port * 0x280) + (cpu_id * 0x28))) 97 (IS_E1H_OFFSET ? (0x8100 + (port * 0x280) + (cpu_id * 0x28)) : \
98 (0x3040 + (port * 0x280) + (cpu_id * 0x28)))
99#define CSTORM_SB_HOST_SB_ADDR_U_OFFSET(port, cpu_id) \
100 (IS_E1H_OFFSET ? (0xb000 + (port * 0x800) + (cpu_id * 0x80)) : \
101 (0x4000 + (port * 0x800) + (cpu_id * 0x80)))
102#define CSTORM_SB_HOST_STATUS_BLOCK_C_OFFSET(port, cpu_id) \
103 (IS_E1H_OFFSET ? (0x8108 + (port * 0x280) + (cpu_id * 0x28)) : \
104 (0x3048 + (port * 0x280) + (cpu_id * 0x28)))
105#define CSTORM_SB_HOST_STATUS_BLOCK_U_OFFSET(port, cpu_id) \
106 (IS_E1H_OFFSET ? (0xb008 + (port * 0x800) + (cpu_id * 0x80)) : \
107 (0x4008 + (port * 0x800) + (cpu_id * 0x80)))
108#define CSTORM_SB_STATUS_BLOCK_C_SIZE 0x10
109#define CSTORM_SB_STATUS_BLOCK_U_SIZE 0x60
43#define CSTORM_STATS_FLAGS_OFFSET(function) \ 110#define CSTORM_STATS_FLAGS_OFFSET(function) \
44 (IS_E1H_OFFSET ? (0x1108 + (function * 0x8)) : (0x5108 + \ 111 (IS_E1H_OFFSET ? (0x1108 + (function * 0x8)) : (0x5108 + \
45 (function * 0x8))) 112 (function * 0x8)))
46#define TSTORM_APPROXIMATE_MATCH_MULTICAST_FILTERING_OFFSET(function) \ 113#define TSTORM_APPROXIMATE_MATCH_MULTICAST_FILTERING_OFFSET(function) \
47 (IS_E1H_OFFSET ? (0x31c0 + (function * 0x20)) : 0xffffffff) 114 (IS_E1H_OFFSET ? (0x3200 + (function * 0x20)) : 0xffffffff)
48#define TSTORM_ASSERT_LIST_INDEX_OFFSET \ 115#define TSTORM_ASSERT_LIST_INDEX_OFFSET \
49 (IS_E1H_OFFSET ? 0xa000 : 0x1000) 116 (IS_E1H_OFFSET ? 0xa000 : 0x1000)
50#define TSTORM_ASSERT_LIST_OFFSET(idx) \ 117#define TSTORM_ASSERT_LIST_OFFSET(idx) \
51 (IS_E1H_OFFSET ? (0xa020 + (idx * 0x10)) : (0x1020 + (idx * 0x10))) 118 (IS_E1H_OFFSET ? (0xa020 + (idx * 0x10)) : (0x1020 + (idx * 0x10)))
52#define TSTORM_CLIENT_CONFIG_OFFSET(port, client_id) \ 119#define TSTORM_CLIENT_CONFIG_OFFSET(port, client_id) \
53 (IS_E1H_OFFSET ? (0x3350 + (port * 0x190) + (client_id * 0x10)) \ 120 (IS_E1H_OFFSET ? (0x33a0 + (port * 0x1a0) + (client_id * 0x10)) \
54 : (0x9c0 + (port * 0x130) + (client_id * 0x10))) 121 : (0x9c0 + (port * 0x120) + (client_id * 0x10)))
55#define TSTORM_COMMON_SAFC_WORKAROUND_ENABLE_OFFSET \ 122#define TSTORM_COMMON_SAFC_WORKAROUND_ENABLE_OFFSET \
56 (IS_E1H_OFFSET ? 0x1ad8 : 0xffffffff) 123 (IS_E1H_OFFSET ? 0x1ed8 : 0xffffffff)
124#define TSTORM_COMMON_SAFC_WORKAROUND_TIMEOUT_10USEC_OFFSET \
125 (IS_E1H_OFFSET ? 0x1eda : 0xffffffff)
57#define TSTORM_DEF_SB_HC_DISABLE_OFFSET(function, index) \ 126#define TSTORM_DEF_SB_HC_DISABLE_OFFSET(function, index) \
58 (IS_E1H_OFFSET ? (0xb01a + ((function>>1) * 0x28) + \ 127 (IS_E1H_OFFSET ? (0xb01a + ((function>>1) * 0x28) + \
59 ((function&1) * 0xa0) + (index * 0x4)) : (0x141a + (function * \ 128 ((function&1) * 0xa0) + (index * 0x4)) : (0x141a + (function * \
@@ -65,95 +134,133 @@
65 (IS_E1H_OFFSET ? (0xb008 + ((function>>1) * 0x28) + \ 134 (IS_E1H_OFFSET ? (0xb008 + ((function>>1) * 0x28) + \
66 ((function&1) * 0xa0)) : (0x1408 + (function * 0x28))) 135 ((function&1) * 0xa0)) : (0x1408 + (function * 0x28)))
67#define TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(function) \ 136#define TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(function) \
68 (IS_E1H_OFFSET ? (0x2b80 + (function * 0x8)) : (0x4b68 + \ 137 (IS_E1H_OFFSET ? (0x2940 + (function * 0x8)) : (0x4928 + \
69 (function * 0x8))) 138 (function * 0x8)))
70#define TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(function) \ 139#define TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(function) \
71 (IS_E1H_OFFSET ? (0x3000 + (function * 0x38)) : (0x1500 + \ 140 (IS_E1H_OFFSET ? (0x3000 + (function * 0x40)) : (0x1500 + \
72 (function * 0x38))) 141 (function * 0x40)))
73#define TSTORM_FUNCTION_MODE_OFFSET \ 142#define TSTORM_FUNCTION_MODE_OFFSET \
74 (IS_E1H_OFFSET ? 0x1ad0 : 0xffffffff) 143 (IS_E1H_OFFSET ? 0x1ed0 : 0xffffffff)
75#define TSTORM_HC_BTR_OFFSET(port) \ 144#define TSTORM_HC_BTR_OFFSET(port) \
76 (IS_E1H_OFFSET ? (0xb144 + (port * 0x30)) : (0x1454 + (port * 0x18))) 145 (IS_E1H_OFFSET ? (0xb144 + (port * 0x30)) : (0x1454 + (port * 0x18)))
77#define TSTORM_INDIRECTION_TABLE_OFFSET(function) \ 146#define TSTORM_INDIRECTION_TABLE_OFFSET(function) \
78 (IS_E1H_OFFSET ? (0x12c8 + (function * 0x80)) : (0x22c8 + \ 147 (IS_E1H_OFFSET ? (0x12c8 + (function * 0x80)) : (0x22c8 + \
79 (function * 0x80))) 148 (function * 0x80)))
80#define TSTORM_INDIRECTION_TABLE_SIZE 0x80 149#define TSTORM_INDIRECTION_TABLE_SIZE 0x80
150#define TSTORM_ISCSI_CONN_BUF_PBL_OFFSET(function, pblEntry) \
151 (IS_E1H_OFFSET ? (0x60c0 + (function * 0x40) + (pblEntry * 0x8)) \
152 : (0x4c30 + (function * 0x40) + (pblEntry * 0x8)))
153#define TSTORM_ISCSI_ERROR_BITMAP_OFFSET(function) \
154 (IS_E1H_OFFSET ? (0x6340 + (function * 0x8)) : (0x4cd0 + \
155 (function * 0x8)))
156#define TSTORM_ISCSI_NUM_OF_TASKS_OFFSET(function) \
157 (IS_E1H_OFFSET ? (0x6004 + (function * 0x8)) : (0x4c04 + \
158 (function * 0x8)))
159#define TSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(function) \
160 (IS_E1H_OFFSET ? (0x6002 + (function * 0x8)) : (0x4c02 + \
161 (function * 0x8)))
162#define TSTORM_ISCSI_PAGE_SIZE_OFFSET(function) \
163 (IS_E1H_OFFSET ? (0x6000 + (function * 0x8)) : (0x4c00 + \
164 (function * 0x8)))
165#define TSTORM_ISCSI_RQ_SIZE_OFFSET(function) \
166 (IS_E1H_OFFSET ? (0x6080 + (function * 0x8)) : (0x4c20 + \
167 (function * 0x8)))
168#define TSTORM_ISCSI_TCP_VARS_FLAGS_OFFSET(function) \
169 (IS_E1H_OFFSET ? (0x6040 + (function * 0x8)) : (0x4c10 + \
170 (function * 0x8)))
171#define TSTORM_ISCSI_TCP_VARS_LSB_LOCAL_MAC_ADDR_OFFSET(function) \
172 (IS_E1H_OFFSET ? (0x6042 + (function * 0x8)) : (0x4c12 + \
173 (function * 0x8)))
174#define TSTORM_ISCSI_TCP_VARS_MSB_LOCAL_MAC_ADDR_OFFSET(function) \
175 (IS_E1H_OFFSET ? (0x6044 + (function * 0x8)) : (0x4c14 + \
176 (function * 0x8)))
81#define TSTORM_MAC_FILTER_CONFIG_OFFSET(function) \ 177#define TSTORM_MAC_FILTER_CONFIG_OFFSET(function) \
82 (IS_E1H_OFFSET ? (0x3008 + (function * 0x38)) : (0x1508 + \ 178 (IS_E1H_OFFSET ? (0x3008 + (function * 0x40)) : (0x1508 + \
83 (function * 0x38))) 179 (function * 0x40)))
84#define TSTORM_PER_COUNTER_ID_STATS_OFFSET(port, stats_counter_id) \ 180#define TSTORM_PER_COUNTER_ID_STATS_OFFSET(port, stats_counter_id) \
85 (IS_E1H_OFFSET ? (0x2010 + (port * 0x5b0) + (stats_counter_id * \ 181 (IS_E1H_OFFSET ? (0x2010 + (port * 0x490) + (stats_counter_id * \
86 0x50)) : (0x4080 + (port * 0x5b0) + (stats_counter_id * 0x50))) 182 0x40)) : (0x4010 + (port * 0x490) + (stats_counter_id * 0x40)))
87#define TSTORM_STATS_FLAGS_OFFSET(function) \ 183#define TSTORM_STATS_FLAGS_OFFSET(function) \
88 (IS_E1H_OFFSET ? (0x2c00 + (function * 0x8)) : (0x4b88 + \ 184 (IS_E1H_OFFSET ? (0x29c0 + (function * 0x8)) : (0x4948 + \
89 (function * 0x8))) 185 (function * 0x8)))
90#define TSTORM_TPA_EXIST_OFFSET (IS_E1H_OFFSET ? 0x3680 : 0x1c20) 186#define TSTORM_TCP_MAX_CWND_OFFSET(function) \
91#define USTORM_AGG_DATA_OFFSET (IS_E1H_OFFSET ? 0xa040 : 0x2c10) 187 (IS_E1H_OFFSET ? (0x4004 + (function * 0x8)) : (0x1fb4 + \
92#define USTORM_AGG_DATA_SIZE (IS_E1H_OFFSET ? 0x2440 : 0x1200) 188 (function * 0x8)))
189#define USTORM_AGG_DATA_OFFSET (IS_E1H_OFFSET ? 0xa000 : 0x3000)
190#define USTORM_AGG_DATA_SIZE (IS_E1H_OFFSET ? 0x2000 : 0x1000)
93#define USTORM_ASSERT_LIST_INDEX_OFFSET \ 191#define USTORM_ASSERT_LIST_INDEX_OFFSET \
94 (IS_E1H_OFFSET ? 0x8960 : 0x1000) 192 (IS_E1H_OFFSET ? 0x8000 : 0x1000)
95#define USTORM_ASSERT_LIST_OFFSET(idx) \ 193#define USTORM_ASSERT_LIST_OFFSET(idx) \
96 (IS_E1H_OFFSET ? (0x8980 + (idx * 0x10)) : (0x1020 + (idx * 0x10))) 194 (IS_E1H_OFFSET ? (0x8020 + (idx * 0x10)) : (0x1020 + (idx * 0x10)))
97#define USTORM_CQE_PAGE_BASE_OFFSET(port, clientId) \ 195#define USTORM_CQE_PAGE_BASE_OFFSET(port, clientId) \
98 (IS_E1H_OFFSET ? (0x8018 + (port * 0x4b0) + (clientId * 0x30)) : \ 196 (IS_E1H_OFFSET ? (0x1010 + (port * 0x680) + (clientId * 0x40)) : \
99 (0x5330 + (port * 0x260) + (clientId * 0x20))) 197 (0x4010 + (port * 0x360) + (clientId * 0x30)))
100#define USTORM_DEF_SB_HC_DISABLE_OFFSET(function, index) \ 198#define USTORM_CQE_PAGE_NEXT_OFFSET(port, clientId) \
101 (IS_E1H_OFFSET ? (0x9522 + ((function>>1) * 0x40) + \ 199 (IS_E1H_OFFSET ? (0x1028 + (port * 0x680) + (clientId * 0x40)) : \
102 ((function&1) * 0x100) + (index * 0x4)) : (0x1922 + (function * \ 200 (0x4028 + (port * 0x360) + (clientId * 0x30)))
103 0x40) + (index * 0x4))) 201#define USTORM_ETH_PAUSE_ENABLED_OFFSET(port) \
104#define USTORM_DEF_SB_HOST_SB_ADDR_OFFSET(function) \ 202 (IS_E1H_OFFSET ? (0x2ad4 + (port * 0x8)) : 0xffffffff)
105 (IS_E1H_OFFSET ? (0x9500 + ((function>>1) * 0x40) + \
106 ((function&1) * 0x100)) : (0x1900 + (function * 0x40)))
107#define USTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(function) \
108 (IS_E1H_OFFSET ? (0x9508 + ((function>>1) * 0x40) + \
109 ((function&1) * 0x100)) : (0x1908 + (function * 0x40)))
110#define USTORM_ETH_RING_PAUSE_DATA_OFFSET(port, clientId) \ 203#define USTORM_ETH_RING_PAUSE_DATA_OFFSET(port, clientId) \
111 (IS_E1H_OFFSET ? (0x8020 + (port * 0x4b0) + (clientId * 0x30)) : \ 204 (IS_E1H_OFFSET ? (0x1030 + (port * 0x680) + (clientId * 0x40)) : \
112 0xffffffff) 205 0xffffffff)
113#define USTORM_ETH_STATS_QUERY_ADDR_OFFSET(function) \ 206#define USTORM_ETH_STATS_QUERY_ADDR_OFFSET(function) \
114 (IS_E1H_OFFSET ? (0x2a50 + (function * 0x8)) : (0x1d98 + \ 207 (IS_E1H_OFFSET ? (0x2a50 + (function * 0x8)) : (0x1dd0 + \
115 (function * 0x8))) 208 (function * 0x8)))
116#define USTORM_FUNCTION_MODE_OFFSET \ 209#define USTORM_FUNCTION_MODE_OFFSET \
117 (IS_E1H_OFFSET ? 0x2448 : 0xffffffff) 210 (IS_E1H_OFFSET ? 0x2448 : 0xffffffff)
118#define USTORM_HC_BTR_OFFSET(port) \ 211#define USTORM_ISCSI_CQ_SIZE_OFFSET(function) \
119 (IS_E1H_OFFSET ? (0x9704 + (port * 0xf0)) : (0x1984 + (port * 0xc0))) 212 (IS_E1H_OFFSET ? (0x7044 + (function * 0x8)) : (0x2414 + \
213 (function * 0x8)))
214#define USTORM_ISCSI_CQ_SQN_SIZE_OFFSET(function) \
215 (IS_E1H_OFFSET ? (0x7046 + (function * 0x8)) : (0x2416 + \
216 (function * 0x8)))
217#define USTORM_ISCSI_ERROR_BITMAP_OFFSET(function) \
218 (IS_E1H_OFFSET ? (0x7688 + (function * 0x8)) : (0x29c8 + \
219 (function * 0x8)))
220#define USTORM_ISCSI_GLOBAL_BUF_PHYS_ADDR_OFFSET(function) \
221 (IS_E1H_OFFSET ? (0x7648 + (function * 0x8)) : (0x29b8 + \
222 (function * 0x8)))
223#define USTORM_ISCSI_NUM_OF_TASKS_OFFSET(function) \
224 (IS_E1H_OFFSET ? (0x7004 + (function * 0x8)) : (0x2404 + \
225 (function * 0x8)))
226#define USTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(function) \
227 (IS_E1H_OFFSET ? (0x7002 + (function * 0x8)) : (0x2402 + \
228 (function * 0x8)))
229#define USTORM_ISCSI_PAGE_SIZE_OFFSET(function) \
230 (IS_E1H_OFFSET ? (0x7000 + (function * 0x8)) : (0x2400 + \
231 (function * 0x8)))
232#define USTORM_ISCSI_R2TQ_SIZE_OFFSET(function) \
233 (IS_E1H_OFFSET ? (0x7040 + (function * 0x8)) : (0x2410 + \
234 (function * 0x8)))
235#define USTORM_ISCSI_RQ_BUFFER_SIZE_OFFSET(function) \
236 (IS_E1H_OFFSET ? (0x7080 + (function * 0x8)) : (0x2420 + \
237 (function * 0x8)))
238#define USTORM_ISCSI_RQ_SIZE_OFFSET(function) \
239 (IS_E1H_OFFSET ? (0x7084 + (function * 0x8)) : (0x2424 + \
240 (function * 0x8)))
120#define USTORM_MAX_AGG_SIZE_OFFSET(port, clientId) \ 241#define USTORM_MAX_AGG_SIZE_OFFSET(port, clientId) \
121 (IS_E1H_OFFSET ? (0x8010 + (port * 0x4b0) + (clientId * 0x30)) : \ 242 (IS_E1H_OFFSET ? (0x1018 + (port * 0x680) + (clientId * 0x40)) : \
122 (0x5328 + (port * 0x260) + (clientId * 0x20))) 243 (0x4018 + (port * 0x360) + (clientId * 0x30)))
123#define USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(function) \ 244#define USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(function) \
124 (IS_E1H_OFFSET ? (0x2408 + (function * 0x8)) : (0x5308 + \ 245 (IS_E1H_OFFSET ? (0x2408 + (function * 0x8)) : (0x1da8 + \
125 (function * 0x8))) 246 (function * 0x8)))
126#define USTORM_PAUSE_ENABLED_OFFSET(port) \
127 (IS_E1H_OFFSET ? (0x2ad4 + (port * 0x8)) : 0xffffffff)
128#define USTORM_PER_COUNTER_ID_STATS_OFFSET(port, stats_counter_id) \ 247#define USTORM_PER_COUNTER_ID_STATS_OFFSET(port, stats_counter_id) \
129 (IS_E1H_OFFSET ? (0x2450 + (port * 0x2d0) + (stats_counter_id * \ 248 (IS_E1H_OFFSET ? (0x2450 + (port * 0x2d0) + (stats_counter_id * \
130 0x28)) : (0x4740 + (port * 0x2d0) + (stats_counter_id * 0x28))) 249 0x28)) : (0x1500 + (port * 0x2d0) + (stats_counter_id * 0x28)))
131#define USTORM_RX_PRODS_OFFSET(port, client_id) \ 250#define USTORM_RX_PRODS_OFFSET(port, client_id) \
132 (IS_E1H_OFFSET ? (0x8000 + (port * 0x4b0) + (client_id * 0x30)) \ 251 (IS_E1H_OFFSET ? (0x1000 + (port * 0x680) + (client_id * 0x40)) \
133 : (0x5318 + (port * 0x260) + (client_id * 0x20))) 252 : (0x4000 + (port * 0x360) + (client_id * 0x30)))
134#define USTORM_SB_HC_DISABLE_OFFSET(port, cpu_id, index) \
135 (IS_E1H_OFFSET ? (0x901a + (port * 0x280) + (cpu_id * 0x28) + \
136 (index * 0x4)) : (0x141a + (port * 0x280) + (cpu_id * 0x28) + \
137 (index * 0x4)))
138#define USTORM_SB_HC_TIMEOUT_OFFSET(port, cpu_id, index) \
139 (IS_E1H_OFFSET ? (0x9018 + (port * 0x280) + (cpu_id * 0x28) + \
140 (index * 0x4)) : (0x1418 + (port * 0x280) + (cpu_id * 0x28) + \
141 (index * 0x4)))
142#define USTORM_SB_HOST_SB_ADDR_OFFSET(port, cpu_id) \
143 (IS_E1H_OFFSET ? (0x9000 + (port * 0x280) + (cpu_id * 0x28)) : \
144 (0x1400 + (port * 0x280) + (cpu_id * 0x28)))
145#define USTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, cpu_id) \
146 (IS_E1H_OFFSET ? (0x9008 + (port * 0x280) + (cpu_id * 0x28)) : \
147 (0x1408 + (port * 0x280) + (cpu_id * 0x28)))
148#define USTORM_STATS_FLAGS_OFFSET(function) \ 253#define USTORM_STATS_FLAGS_OFFSET(function) \
149 (IS_E1H_OFFSET ? (0x29f0 + (function * 0x8)) : (0x1d80 + \ 254 (IS_E1H_OFFSET ? (0x29f0 + (function * 0x8)) : (0x1db8 + \
150 (function * 0x8))) 255 (function * 0x8)))
256#define USTORM_TPA_BTR_OFFSET (IS_E1H_OFFSET ? 0x3da5 : 0x5095)
257#define USTORM_TPA_BTR_SIZE 0x1
151#define XSTORM_ASSERT_LIST_INDEX_OFFSET \ 258#define XSTORM_ASSERT_LIST_INDEX_OFFSET \
152 (IS_E1H_OFFSET ? 0x9000 : 0x1000) 259 (IS_E1H_OFFSET ? 0x9000 : 0x1000)
153#define XSTORM_ASSERT_LIST_OFFSET(idx) \ 260#define XSTORM_ASSERT_LIST_OFFSET(idx) \
154 (IS_E1H_OFFSET ? (0x9020 + (idx * 0x10)) : (0x1020 + (idx * 0x10))) 261 (IS_E1H_OFFSET ? (0x9020 + (idx * 0x10)) : (0x1020 + (idx * 0x10)))
155#define XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) \ 262#define XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) \
156 (IS_E1H_OFFSET ? (0x24a8 + (port * 0x50)) : (0x3ba0 + (port * 0x50))) 263 (IS_E1H_OFFSET ? (0x24a8 + (port * 0x50)) : (0x3a80 + (port * 0x50)))
157#define XSTORM_DEF_SB_HC_DISABLE_OFFSET(function, index) \ 264#define XSTORM_DEF_SB_HC_DISABLE_OFFSET(function, index) \
158 (IS_E1H_OFFSET ? (0xa01a + ((function>>1) * 0x28) + \ 265 (IS_E1H_OFFSET ? (0xa01a + ((function>>1) * 0x28) + \
159 ((function&1) * 0xa0) + (index * 0x4)) : (0x141a + (function * \ 266 ((function&1) * 0xa0) + (index * 0x4)) : (0x141a + (function * \
@@ -165,22 +272,73 @@
165 (IS_E1H_OFFSET ? (0xa008 + ((function>>1) * 0x28) + \ 272 (IS_E1H_OFFSET ? (0xa008 + ((function>>1) * 0x28) + \
166 ((function&1) * 0xa0)) : (0x1408 + (function * 0x28))) 273 ((function&1) * 0xa0)) : (0x1408 + (function * 0x28)))
167#define XSTORM_E1HOV_OFFSET(function) \ 274#define XSTORM_E1HOV_OFFSET(function) \
168 (IS_E1H_OFFSET ? (0x2c10 + (function * 0x2)) : 0xffffffff) 275 (IS_E1H_OFFSET ? (0x2c10 + (function * 0x8)) : 0xffffffff)
169#define XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(function) \ 276#define XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(function) \
170 (IS_E1H_OFFSET ? (0x2418 + (function * 0x8)) : (0x3b70 + \ 277 (IS_E1H_OFFSET ? (0x2418 + (function * 0x8)) : (0x3a50 + \
171 (function * 0x8))) 278 (function * 0x8)))
172#define XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(function) \ 279#define XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(function) \
173 (IS_E1H_OFFSET ? (0x2588 + (function * 0x90)) : (0x3c80 + \ 280 (IS_E1H_OFFSET ? (0x2588 + (function * 0x90)) : (0x3b60 + \
174 (function * 0x90))) 281 (function * 0x90)))
175#define XSTORM_FUNCTION_MODE_OFFSET \ 282#define XSTORM_FUNCTION_MODE_OFFSET \
176 (IS_E1H_OFFSET ? 0x2c20 : 0xffffffff) 283 (IS_E1H_OFFSET ? 0x2c50 : 0xffffffff)
177#define XSTORM_HC_BTR_OFFSET(port) \ 284#define XSTORM_HC_BTR_OFFSET(port) \
178 (IS_E1H_OFFSET ? (0xa144 + (port * 0x30)) : (0x1454 + (port * 0x18))) 285 (IS_E1H_OFFSET ? (0xa144 + (port * 0x30)) : (0x1454 + (port * 0x18)))
286#define XSTORM_ISCSI_HQ_SIZE_OFFSET(function) \
287 (IS_E1H_OFFSET ? (0x80c0 + (function * 0x8)) : (0x1c30 + \
288 (function * 0x8)))
289#define XSTORM_ISCSI_LOCAL_MAC_ADDR0_OFFSET(function) \
290 (IS_E1H_OFFSET ? (0x8080 + (function * 0x8)) : (0x1c20 + \
291 (function * 0x8)))
292#define XSTORM_ISCSI_LOCAL_MAC_ADDR1_OFFSET(function) \
293 (IS_E1H_OFFSET ? (0x8081 + (function * 0x8)) : (0x1c21 + \
294 (function * 0x8)))
295#define XSTORM_ISCSI_LOCAL_MAC_ADDR2_OFFSET(function) \
296 (IS_E1H_OFFSET ? (0x8082 + (function * 0x8)) : (0x1c22 + \
297 (function * 0x8)))
298#define XSTORM_ISCSI_LOCAL_MAC_ADDR3_OFFSET(function) \
299 (IS_E1H_OFFSET ? (0x8083 + (function * 0x8)) : (0x1c23 + \
300 (function * 0x8)))
301#define XSTORM_ISCSI_LOCAL_MAC_ADDR4_OFFSET(function) \
302 (IS_E1H_OFFSET ? (0x8084 + (function * 0x8)) : (0x1c24 + \
303 (function * 0x8)))
304#define XSTORM_ISCSI_LOCAL_MAC_ADDR5_OFFSET(function) \
305 (IS_E1H_OFFSET ? (0x8085 + (function * 0x8)) : (0x1c25 + \
306 (function * 0x8)))
307#define XSTORM_ISCSI_LOCAL_VLAN_OFFSET(function) \
308 (IS_E1H_OFFSET ? (0x8086 + (function * 0x8)) : (0x1c26 + \
309 (function * 0x8)))
310#define XSTORM_ISCSI_NUM_OF_TASKS_OFFSET(function) \
311 (IS_E1H_OFFSET ? (0x8004 + (function * 0x8)) : (0x1c04 + \
312 (function * 0x8)))
313#define XSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(function) \
314 (IS_E1H_OFFSET ? (0x8002 + (function * 0x8)) : (0x1c02 + \
315 (function * 0x8)))
316#define XSTORM_ISCSI_PAGE_SIZE_OFFSET(function) \
317 (IS_E1H_OFFSET ? (0x8000 + (function * 0x8)) : (0x1c00 + \
318 (function * 0x8)))
319#define XSTORM_ISCSI_R2TQ_SIZE_OFFSET(function) \
320 (IS_E1H_OFFSET ? (0x80c4 + (function * 0x8)) : (0x1c34 + \
321 (function * 0x8)))
322#define XSTORM_ISCSI_SQ_SIZE_OFFSET(function) \
323 (IS_E1H_OFFSET ? (0x80c2 + (function * 0x8)) : (0x1c32 + \
324 (function * 0x8)))
325#define XSTORM_ISCSI_TCP_VARS_ADV_WND_SCL_OFFSET(function) \
326 (IS_E1H_OFFSET ? (0x8043 + (function * 0x8)) : (0x1c13 + \
327 (function * 0x8)))
328#define XSTORM_ISCSI_TCP_VARS_FLAGS_OFFSET(function) \
329 (IS_E1H_OFFSET ? (0x8042 + (function * 0x8)) : (0x1c12 + \
330 (function * 0x8)))
331#define XSTORM_ISCSI_TCP_VARS_TOS_OFFSET(function) \
332 (IS_E1H_OFFSET ? (0x8041 + (function * 0x8)) : (0x1c11 + \
333 (function * 0x8)))
334#define XSTORM_ISCSI_TCP_VARS_TTL_OFFSET(function) \
335 (IS_E1H_OFFSET ? (0x8040 + (function * 0x8)) : (0x1c10 + \
336 (function * 0x8)))
179#define XSTORM_PER_COUNTER_ID_STATS_OFFSET(port, stats_counter_id) \ 337#define XSTORM_PER_COUNTER_ID_STATS_OFFSET(port, stats_counter_id) \
180 (IS_E1H_OFFSET ? (0xc000 + (port * 0x3f0) + (stats_counter_id * \ 338 (IS_E1H_OFFSET ? (0xc000 + (port * 0x360) + (stats_counter_id * \
181 0x38)) : (0x3378 + (port * 0x3f0) + (stats_counter_id * 0x38))) 339 0x30)) : (0x3378 + (port * 0x360) + (stats_counter_id * 0x30)))
182#define XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(function) \ 340#define XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(function) \
183 (IS_E1H_OFFSET ? (0x2548 + (function * 0x90)) : (0x3c40 + \ 341 (IS_E1H_OFFSET ? (0x2548 + (function * 0x90)) : (0x3b20 + \
184 (function * 0x90))) 342 (function * 0x90)))
185#define XSTORM_SPQ_PAGE_BASE_OFFSET(function) \ 343#define XSTORM_SPQ_PAGE_BASE_OFFSET(function) \
186 (IS_E1H_OFFSET ? (0x2000 + (function * 0x10)) : (0x3328 + \ 344 (IS_E1H_OFFSET ? (0x2000 + (function * 0x10)) : (0x3328 + \
@@ -189,8 +347,15 @@
189 (IS_E1H_OFFSET ? (0x2008 + (function * 0x10)) : (0x3330 + \ 347 (IS_E1H_OFFSET ? (0x2008 + (function * 0x10)) : (0x3330 + \
190 (function * 0x10))) 348 (function * 0x10)))
191#define XSTORM_STATS_FLAGS_OFFSET(function) \ 349#define XSTORM_STATS_FLAGS_OFFSET(function) \
192 (IS_E1H_OFFSET ? (0x23d8 + (function * 0x8)) : (0x3b60 + \ 350 (IS_E1H_OFFSET ? (0x23d8 + (function * 0x8)) : (0x3a40 + \
193 (function * 0x8))) 351 (function * 0x8)))
352#define XSTORM_TCP_GLOBAL_DEL_ACK_COUNTER_ENABLED_OFFSET(port) \
353 (IS_E1H_OFFSET ? (0x4000 + (port * 0x8)) : (0x1960 + (port * 0x8)))
354#define XSTORM_TCP_GLOBAL_DEL_ACK_COUNTER_MAX_COUNT_OFFSET(port) \
355 (IS_E1H_OFFSET ? (0x4001 + (port * 0x8)) : (0x1961 + (port * 0x8)))
356#define XSTORM_TCP_TX_SWS_TIMER_VAL_OFFSET(function) \
357 (IS_E1H_OFFSET ? (0x4060 + ((function>>1) * 0x8) + ((function&1) \
358 * 0x4)) : (0x1978 + (function * 0x4)))
194#define COMMON_ASM_INVALID_ASSERT_OPCODE 0x0 359#define COMMON_ASM_INVALID_ASSERT_OPCODE 0x0
195 360
196/** 361/**
@@ -211,6 +376,9 @@
211#define TCP_IPV4_HASH_TYPE 2 376#define TCP_IPV4_HASH_TYPE 2
212#define IPV6_HASH_TYPE 3 377#define IPV6_HASH_TYPE 3
213#define TCP_IPV6_HASH_TYPE 4 378#define TCP_IPV6_HASH_TYPE 4
379#define VLAN_PRI_HASH_TYPE 5
380#define E1HOV_PRI_HASH_TYPE 6
381#define DSCP_HASH_TYPE 7
214 382
215 383
216/* Ethernet Ring parameters */ 384/* Ethernet Ring parameters */
@@ -218,30 +386,26 @@
218#define FIRST_BD_IN_PKT 0 386#define FIRST_BD_IN_PKT 0
219#define PARSE_BD_INDEX 1 387#define PARSE_BD_INDEX 1
220#define NUM_OF_ETH_BDS_IN_PAGE ((PAGE_SIZE)/(STRUCT_SIZE(eth_tx_bd)/8)) 388#define NUM_OF_ETH_BDS_IN_PAGE ((PAGE_SIZE)/(STRUCT_SIZE(eth_tx_bd)/8))
221 389#define U_ETH_NUM_OF_SGES_TO_FETCH 8
390#define U_ETH_MAX_SGES_FOR_PACKET 3
222 391
223/* Rx ring params */ 392/* Rx ring params */
224#define U_ETH_LOCAL_BD_RING_SIZE 16 393#define U_ETH_LOCAL_BD_RING_SIZE 8
225#define U_ETH_LOCAL_SGE_RING_SIZE 12 394#define U_ETH_LOCAL_SGE_RING_SIZE 10
226#define U_ETH_SGL_SIZE 8 395#define U_ETH_SGL_SIZE 8
227 396
228 397
229#define U_ETH_BDS_PER_PAGE_MASK \
230 ((PAGE_SIZE/(STRUCT_SIZE(eth_rx_bd)/8))-1)
231#define U_ETH_CQE_PER_PAGE_MASK \
232 ((PAGE_SIZE/(STRUCT_SIZE(eth_rx_cqe)/8))-1)
233#define U_ETH_SGES_PER_PAGE_MASK \
234 ((PAGE_SIZE/(STRUCT_SIZE(eth_rx_sge)/8))-1)
235
236#define U_ETH_SGES_PER_PAGE_INVERSE_MASK \ 398#define U_ETH_SGES_PER_PAGE_INVERSE_MASK \
237 (0xFFFF - ((PAGE_SIZE/((STRUCT_SIZE(eth_rx_sge))/8))-1)) 399 (0xFFFF - ((PAGE_SIZE/((STRUCT_SIZE(eth_rx_sge))/8))-1))
238 400
239 401#define TU_ETH_CQES_PER_PAGE (PAGE_SIZE/(STRUCT_SIZE(eth_rx_cqe)/8))
240#define TU_ETH_CQES_PER_PAGE \
241 (PAGE_SIZE/(STRUCT_SIZE(eth_rx_cqe_next_page)/8))
242#define U_ETH_BDS_PER_PAGE (PAGE_SIZE/(STRUCT_SIZE(eth_rx_bd)/8)) 402#define U_ETH_BDS_PER_PAGE (PAGE_SIZE/(STRUCT_SIZE(eth_rx_bd)/8))
243#define U_ETH_SGES_PER_PAGE (PAGE_SIZE/(STRUCT_SIZE(eth_rx_sge)/8)) 403#define U_ETH_SGES_PER_PAGE (PAGE_SIZE/(STRUCT_SIZE(eth_rx_sge)/8))
244 404
405#define U_ETH_BDS_PER_PAGE_MASK (U_ETH_BDS_PER_PAGE-1)
406#define U_ETH_CQE_PER_PAGE_MASK (TU_ETH_CQES_PER_PAGE-1)
407#define U_ETH_SGES_PER_PAGE_MASK (U_ETH_SGES_PER_PAGE-1)
408
245#define U_ETH_UNDEFINED_Q 0xFF 409#define U_ETH_UNDEFINED_Q 0xFF
246 410
247/* values of command IDs in the ramrod message */ 411/* values of command IDs in the ramrod message */
@@ -266,8 +430,8 @@
266#define T_ETH_CRC32_HASH_SEED 0x00000000 430#define T_ETH_CRC32_HASH_SEED 0x00000000
267 431
268/* Maximal L2 clients supported */ 432/* Maximal L2 clients supported */
269#define ETH_MAX_RX_CLIENTS_E1 19 433#define ETH_MAX_RX_CLIENTS_E1 18
270#define ETH_MAX_RX_CLIENTS_E1H 25 434#define ETH_MAX_RX_CLIENTS_E1H 26
271 435
272/* Maximal aggregation queues supported */ 436/* Maximal aggregation queues supported */
273#define ETH_MAX_AGGREGATION_QUEUES_E1 32 437#define ETH_MAX_AGGREGATION_QUEUES_E1 32
@@ -276,6 +440,9 @@
276/* ETH RSS modes */ 440/* ETH RSS modes */
277#define ETH_RSS_MODE_DISABLED 0 441#define ETH_RSS_MODE_DISABLED 0
278#define ETH_RSS_MODE_REGULAR 1 442#define ETH_RSS_MODE_REGULAR 1
443#define ETH_RSS_MODE_VLAN_PRI 2
444#define ETH_RSS_MODE_E1HOV_PRI 3
445#define ETH_RSS_MODE_IP_DSCP 4
279 446
280 447
281/** 448/**
@@ -332,12 +499,14 @@
332#define HC_INDEX_DEF_C_ETH_SLOW_PATH 3 499#define HC_INDEX_DEF_C_ETH_SLOW_PATH 3
333#define HC_INDEX_DEF_C_ETH_RDMA_CQ_CONS 4 500#define HC_INDEX_DEF_C_ETH_RDMA_CQ_CONS 4
334#define HC_INDEX_DEF_C_ETH_ISCSI_CQ_CONS 5 501#define HC_INDEX_DEF_C_ETH_ISCSI_CQ_CONS 5
502#define HC_INDEX_DEF_C_ETH_FCOE_CQ_CONS 6
335 503
336#define HC_INDEX_DEF_U_ETH_RDMA_RX_CQ_CONS 0 504#define HC_INDEX_DEF_U_ETH_RDMA_RX_CQ_CONS 0
337#define HC_INDEX_DEF_U_ETH_ISCSI_RX_CQ_CONS 1 505#define HC_INDEX_DEF_U_ETH_ISCSI_RX_CQ_CONS 1
338#define HC_INDEX_DEF_U_ETH_RDMA_RX_BD_CONS 2 506#define HC_INDEX_DEF_U_ETH_RDMA_RX_BD_CONS 2
339#define HC_INDEX_DEF_U_ETH_ISCSI_RX_BD_CONS 3 507#define HC_INDEX_DEF_U_ETH_ISCSI_RX_BD_CONS 3
340 508#define HC_INDEX_DEF_U_ETH_FCOE_RX_CQ_CONS 4
509#define HC_INDEX_DEF_U_ETH_FCOE_RX_BD_CONS 5
341 510
342/* used by the driver to get the SB offset */ 511/* used by the driver to get the SB offset */
343#define USTORM_ID 0 512#define USTORM_ID 0
diff --git a/drivers/net/bnx2x_hsi.h b/drivers/net/bnx2x_hsi.h
index 7de83c4a557a..da62cc5608d3 100644
--- a/drivers/net/bnx2x_hsi.h
+++ b/drivers/net/bnx2x_hsi.h
@@ -1218,9 +1218,9 @@ struct host_func_stats {
1218}; 1218};
1219 1219
1220 1220
1221#define BCM_5710_FW_MAJOR_VERSION 4 1221#define BCM_5710_FW_MAJOR_VERSION 5
1222#define BCM_5710_FW_MINOR_VERSION 8 1222#define BCM_5710_FW_MINOR_VERSION 0
1223#define BCM_5710_FW_REVISION_VERSION 53 1223#define BCM_5710_FW_REVISION_VERSION 21
1224#define BCM_5710_FW_ENGINEERING_VERSION 0 1224#define BCM_5710_FW_ENGINEERING_VERSION 0
1225#define BCM_5710_FW_COMPILE_FLAGS 1 1225#define BCM_5710_FW_COMPILE_FLAGS 1
1226 1226
@@ -1270,6 +1270,22 @@ struct doorbell {
1270 1270
1271 1271
1272/* 1272/*
1273 * doorbell message sent to the chip
1274 */
1275struct doorbell_set_prod {
1276#if defined(__BIG_ENDIAN)
1277 u16 prod;
1278 u8 zero_fill1;
1279 struct doorbell_hdr header;
1280#elif defined(__LITTLE_ENDIAN)
1281 struct doorbell_hdr header;
1282 u8 zero_fill1;
1283 u16 prod;
1284#endif
1285};
1286
1287
1288/*
1273 * IGU driver acknowledgement register 1289 * IGU driver acknowledgement register
1274 */ 1290 */
1275struct igu_ack_register { 1291struct igu_ack_register {
@@ -1304,6 +1320,62 @@ struct igu_ack_register {
1304 1320
1305 1321
1306/* 1322/*
1323 * IGU driver acknowledgement register
1324 */
1325struct igu_backward_compatible {
1326 u32 sb_id_and_flags;
1327#define IGU_BACKWARD_COMPATIBLE_SB_INDEX (0xFFFF<<0)
1328#define IGU_BACKWARD_COMPATIBLE_SB_INDEX_SHIFT 0
1329#define IGU_BACKWARD_COMPATIBLE_SB_SELECT (0x1F<<16)
1330#define IGU_BACKWARD_COMPATIBLE_SB_SELECT_SHIFT 16
1331#define IGU_BACKWARD_COMPATIBLE_SEGMENT_ACCESS (0x7<<21)
1332#define IGU_BACKWARD_COMPATIBLE_SEGMENT_ACCESS_SHIFT 21
1333#define IGU_BACKWARD_COMPATIBLE_BUPDATE (0x1<<24)
1334#define IGU_BACKWARD_COMPATIBLE_BUPDATE_SHIFT 24
1335#define IGU_BACKWARD_COMPATIBLE_ENABLE_INT (0x3<<25)
1336#define IGU_BACKWARD_COMPATIBLE_ENABLE_INT_SHIFT 25
1337#define IGU_BACKWARD_COMPATIBLE_RESERVED_0 (0x1F<<27)
1338#define IGU_BACKWARD_COMPATIBLE_RESERVED_0_SHIFT 27
1339 u32 reserved_2;
1340};
1341
1342
1343/*
1344 * IGU driver acknowledgement register
1345 */
1346struct igu_regular {
1347 u32 sb_id_and_flags;
1348#define IGU_REGULAR_SB_INDEX (0xFFFFF<<0)
1349#define IGU_REGULAR_SB_INDEX_SHIFT 0
1350#define IGU_REGULAR_RESERVED0 (0x1<<20)
1351#define IGU_REGULAR_RESERVED0_SHIFT 20
1352#define IGU_REGULAR_SEGMENT_ACCESS (0x7<<21)
1353#define IGU_REGULAR_SEGMENT_ACCESS_SHIFT 21
1354#define IGU_REGULAR_BUPDATE (0x1<<24)
1355#define IGU_REGULAR_BUPDATE_SHIFT 24
1356#define IGU_REGULAR_ENABLE_INT (0x3<<25)
1357#define IGU_REGULAR_ENABLE_INT_SHIFT 25
1358#define IGU_REGULAR_RESERVED_1 (0x1<<27)
1359#define IGU_REGULAR_RESERVED_1_SHIFT 27
1360#define IGU_REGULAR_CLEANUP_TYPE (0x3<<28)
1361#define IGU_REGULAR_CLEANUP_TYPE_SHIFT 28
1362#define IGU_REGULAR_CLEANUP_SET (0x1<<30)
1363#define IGU_REGULAR_CLEANUP_SET_SHIFT 30
1364#define IGU_REGULAR_BCLEANUP (0x1<<31)
1365#define IGU_REGULAR_BCLEANUP_SHIFT 31
1366 u32 reserved_2;
1367};
1368
1369/*
1370 * IGU driver acknowledgement register
1371 */
1372union igu_consprod_reg {
1373 struct igu_regular regular;
1374 struct igu_backward_compatible backward_compatible;
1375};
1376
1377
1378/*
1307 * Parser parsing flags field 1379 * Parser parsing flags field
1308 */ 1380 */
1309struct parsing_flags { 1381struct parsing_flags {
@@ -1434,12 +1506,10 @@ struct ustorm_eth_st_context_config {
1434#define USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_DYNAMIC_HC_SHIFT 1 1506#define USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_DYNAMIC_HC_SHIFT 1
1435#define USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_TPA (0x1<<2) 1507#define USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_TPA (0x1<<2)
1436#define USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_TPA_SHIFT 2 1508#define USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_TPA_SHIFT 2
1437#define USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_SGE_RING (0x1<<3) 1509#define USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_STATISTICS (0x1<<3)
1438#define USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_SGE_RING_SHIFT 3 1510#define USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_STATISTICS_SHIFT 3
1439#define USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_STATISTICS (0x1<<4) 1511#define __USTORM_ETH_ST_CONTEXT_CONFIG_RESERVED0 (0xF<<4)
1440#define USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_STATISTICS_SHIFT 4 1512#define __USTORM_ETH_ST_CONTEXT_CONFIG_RESERVED0_SHIFT 4
1441#define __USTORM_ETH_ST_CONTEXT_CONFIG_RESERVED0 (0x7<<5)
1442#define __USTORM_ETH_ST_CONTEXT_CONFIG_RESERVED0_SHIFT 5
1443 u8 status_block_id; 1513 u8 status_block_id;
1444 u8 clientId; 1514 u8 clientId;
1445 u8 sb_index_numbers; 1515 u8 sb_index_numbers;
@@ -1462,12 +1532,10 @@ struct ustorm_eth_st_context_config {
1462#define USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_DYNAMIC_HC_SHIFT 1 1532#define USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_DYNAMIC_HC_SHIFT 1
1463#define USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_TPA (0x1<<2) 1533#define USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_TPA (0x1<<2)
1464#define USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_TPA_SHIFT 2 1534#define USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_TPA_SHIFT 2
1465#define USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_SGE_RING (0x1<<3) 1535#define USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_STATISTICS (0x1<<3)
1466#define USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_SGE_RING_SHIFT 3 1536#define USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_STATISTICS_SHIFT 3
1467#define USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_STATISTICS (0x1<<4) 1537#define __USTORM_ETH_ST_CONTEXT_CONFIG_RESERVED0 (0xF<<4)
1468#define USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_STATISTICS_SHIFT 4 1538#define __USTORM_ETH_ST_CONTEXT_CONFIG_RESERVED0_SHIFT 4
1469#define __USTORM_ETH_ST_CONTEXT_CONFIG_RESERVED0 (0x7<<5)
1470#define __USTORM_ETH_ST_CONTEXT_CONFIG_RESERVED0_SHIFT 5
1471#endif 1539#endif
1472#if defined(__BIG_ENDIAN) 1540#if defined(__BIG_ENDIAN)
1473 u16 bd_buff_size; 1541 u16 bd_buff_size;
@@ -1487,11 +1555,36 @@ struct ustorm_eth_st_context_config {
1487 u8 __local_bd_prod; 1555 u8 __local_bd_prod;
1488 u8 __local_sge_prod; 1556 u8 __local_sge_prod;
1489#endif 1557#endif
1490 u32 reserved; 1558#if defined(__BIG_ENDIAN)
1559 u16 __sdm_bd_expected_counter;
1560 u8 cstorm_agg_int;
1561 u8 __expected_bds_on_ram;
1562#elif defined(__LITTLE_ENDIAN)
1563 u8 __expected_bds_on_ram;
1564 u8 cstorm_agg_int;
1565 u16 __sdm_bd_expected_counter;
1566#endif
1567#if defined(__BIG_ENDIAN)
1568 u16 __ring_data_ram_addr;
1569 u16 __hc_cstorm_ram_addr;
1570#elif defined(__LITTLE_ENDIAN)
1571 u16 __hc_cstorm_ram_addr;
1572 u16 __ring_data_ram_addr;
1573#endif
1574#if defined(__BIG_ENDIAN)
1575 u8 reserved1;
1576 u8 max_sges_for_packet;
1577 u16 __bd_ring_ram_addr;
1578#elif defined(__LITTLE_ENDIAN)
1579 u16 __bd_ring_ram_addr;
1580 u8 max_sges_for_packet;
1581 u8 reserved1;
1582#endif
1491 u32 bd_page_base_lo; 1583 u32 bd_page_base_lo;
1492 u32 bd_page_base_hi; 1584 u32 bd_page_base_hi;
1493 u32 sge_page_base_lo; 1585 u32 sge_page_base_lo;
1494 u32 sge_page_base_hi; 1586 u32 sge_page_base_hi;
1587 struct regpair reserved2;
1495}; 1588};
1496 1589
1497/* 1590/*
@@ -1514,8 +1607,8 @@ struct eth_rx_sge {
1514 * Local BDs and SGEs rings (in ETH) 1607 * Local BDs and SGEs rings (in ETH)
1515 */ 1608 */
1516struct eth_local_rx_rings { 1609struct eth_local_rx_rings {
1517 struct eth_rx_bd __local_bd_ring[16]; 1610 struct eth_rx_bd __local_bd_ring[8];
1518 struct eth_rx_sge __local_sge_ring[12]; 1611 struct eth_rx_sge __local_sge_ring[10];
1519}; 1612};
1520 1613
1521/* 1614/*
@@ -1607,13 +1700,13 @@ struct xstorm_eth_extra_ag_context_section {
1607 */ 1700 */
1608struct xstorm_eth_ag_context { 1701struct xstorm_eth_ag_context {
1609#if defined(__BIG_ENDIAN) 1702#if defined(__BIG_ENDIAN)
1610 u16 __bd_prod; 1703 u16 agg_val1;
1611 u8 __agg_vars1; 1704 u8 __agg_vars1;
1612 u8 __state; 1705 u8 __state;
1613#elif defined(__LITTLE_ENDIAN) 1706#elif defined(__LITTLE_ENDIAN)
1614 u8 __state; 1707 u8 __state;
1615 u8 __agg_vars1; 1708 u8 __agg_vars1;
1616 u16 __bd_prod; 1709 u16 agg_val1;
1617#endif 1710#endif
1618#if defined(__BIG_ENDIAN) 1711#if defined(__BIG_ENDIAN)
1619 u8 cdu_reserved; 1712 u8 cdu_reserved;
@@ -1626,7 +1719,7 @@ struct xstorm_eth_ag_context {
1626 u8 __agg_vars4; 1719 u8 __agg_vars4;
1627 u8 cdu_reserved; 1720 u8 cdu_reserved;
1628#endif 1721#endif
1629 u32 __more_packets_to_send; 1722 u32 __bd_prod;
1630#if defined(__BIG_ENDIAN) 1723#if defined(__BIG_ENDIAN)
1631 u16 __agg_vars5; 1724 u16 __agg_vars5;
1632 u16 __agg_val4_th; 1725 u16 __agg_val4_th;
@@ -1892,8 +1985,8 @@ struct eth_tx_bd_flags {
1892#define ETH_TX_BD_FLAGS_VLAN_TAG_SHIFT 0 1985#define ETH_TX_BD_FLAGS_VLAN_TAG_SHIFT 0
1893#define ETH_TX_BD_FLAGS_IP_CSUM (0x1<<1) 1986#define ETH_TX_BD_FLAGS_IP_CSUM (0x1<<1)
1894#define ETH_TX_BD_FLAGS_IP_CSUM_SHIFT 1 1987#define ETH_TX_BD_FLAGS_IP_CSUM_SHIFT 1
1895#define ETH_TX_BD_FLAGS_TCP_CSUM (0x1<<2) 1988#define ETH_TX_BD_FLAGS_L4_CSUM (0x1<<2)
1896#define ETH_TX_BD_FLAGS_TCP_CSUM_SHIFT 2 1989#define ETH_TX_BD_FLAGS_L4_CSUM_SHIFT 2
1897#define ETH_TX_BD_FLAGS_END_BD (0x1<<3) 1990#define ETH_TX_BD_FLAGS_END_BD (0x1<<3)
1898#define ETH_TX_BD_FLAGS_END_BD_SHIFT 3 1991#define ETH_TX_BD_FLAGS_END_BD_SHIFT 3
1899#define ETH_TX_BD_FLAGS_START_BD (0x1<<4) 1992#define ETH_TX_BD_FLAGS_START_BD (0x1<<4)
@@ -1909,7 +2002,7 @@ struct eth_tx_bd_flags {
1909/* 2002/*
1910 * The eth Tx Buffer Descriptor 2003 * The eth Tx Buffer Descriptor
1911 */ 2004 */
1912struct eth_tx_bd { 2005struct eth_tx_start_bd {
1913 __le32 addr_lo; 2006 __le32 addr_lo;
1914 __le32 addr_hi; 2007 __le32 addr_hi;
1915 __le16 nbd; 2008 __le16 nbd;
@@ -1917,10 +2010,21 @@ struct eth_tx_bd {
1917 __le16 vlan; 2010 __le16 vlan;
1918 struct eth_tx_bd_flags bd_flags; 2011 struct eth_tx_bd_flags bd_flags;
1919 u8 general_data; 2012 u8 general_data;
1920#define ETH_TX_BD_HDR_NBDS (0x3F<<0) 2013#define ETH_TX_START_BD_HDR_NBDS (0x3F<<0)
1921#define ETH_TX_BD_HDR_NBDS_SHIFT 0 2014#define ETH_TX_START_BD_HDR_NBDS_SHIFT 0
1922#define ETH_TX_BD_ETH_ADDR_TYPE (0x3<<6) 2015#define ETH_TX_START_BD_ETH_ADDR_TYPE (0x3<<6)
1923#define ETH_TX_BD_ETH_ADDR_TYPE_SHIFT 6 2016#define ETH_TX_START_BD_ETH_ADDR_TYPE_SHIFT 6
2017};
2018
2019/*
2020 * Tx regular BD structure
2021 */
2022struct eth_tx_bd {
2023 u32 addr_lo;
2024 u32 addr_hi;
2025 u16 total_pkt_bytes;
2026 u16 nbytes;
2027 u8 reserved[4];
1924}; 2028};
1925 2029
1926/* 2030/*
@@ -1930,8 +2034,8 @@ struct eth_tx_parse_bd {
1930 u8 global_data; 2034 u8 global_data;
1931#define ETH_TX_PARSE_BD_IP_HDR_START_OFFSET (0xF<<0) 2035#define ETH_TX_PARSE_BD_IP_HDR_START_OFFSET (0xF<<0)
1932#define ETH_TX_PARSE_BD_IP_HDR_START_OFFSET_SHIFT 0 2036#define ETH_TX_PARSE_BD_IP_HDR_START_OFFSET_SHIFT 0
1933#define ETH_TX_PARSE_BD_CS_ANY_FLG (0x1<<4) 2037#define ETH_TX_PARSE_BD_UDP_CS_FLG (0x1<<4)
1934#define ETH_TX_PARSE_BD_CS_ANY_FLG_SHIFT 4 2038#define ETH_TX_PARSE_BD_UDP_CS_FLG_SHIFT 4
1935#define ETH_TX_PARSE_BD_PSEUDO_CS_WITHOUT_LEN (0x1<<5) 2039#define ETH_TX_PARSE_BD_PSEUDO_CS_WITHOUT_LEN (0x1<<5)
1936#define ETH_TX_PARSE_BD_PSEUDO_CS_WITHOUT_LEN_SHIFT 5 2040#define ETH_TX_PARSE_BD_PSEUDO_CS_WITHOUT_LEN_SHIFT 5
1937#define ETH_TX_PARSE_BD_LLC_SNAP_EN (0x1<<6) 2041#define ETH_TX_PARSE_BD_LLC_SNAP_EN (0x1<<6)
@@ -1956,10 +2060,10 @@ struct eth_tx_parse_bd {
1956#define ETH_TX_PARSE_BD_CWR_FLG (0x1<<7) 2060#define ETH_TX_PARSE_BD_CWR_FLG (0x1<<7)
1957#define ETH_TX_PARSE_BD_CWR_FLG_SHIFT 7 2061#define ETH_TX_PARSE_BD_CWR_FLG_SHIFT 7
1958 u8 ip_hlen; 2062 u8 ip_hlen;
1959 s8 cs_offset; 2063 s8 reserved;
1960 __le16 total_hlen; 2064 __le16 total_hlen;
1961 __le16 lso_mss;
1962 __le16 tcp_pseudo_csum; 2065 __le16 tcp_pseudo_csum;
2066 __le16 lso_mss;
1963 __le16 ip_id; 2067 __le16 ip_id;
1964 __le32 tcp_send_seq; 2068 __le32 tcp_send_seq;
1965}; 2069};
@@ -1968,15 +2072,16 @@ struct eth_tx_parse_bd {
1968 * The last BD in the BD memory will hold a pointer to the next BD memory 2072 * The last BD in the BD memory will hold a pointer to the next BD memory
1969 */ 2073 */
1970struct eth_tx_next_bd { 2074struct eth_tx_next_bd {
1971 u32 addr_lo; 2075 __le32 addr_lo;
1972 u32 addr_hi; 2076 __le32 addr_hi;
1973 u8 reserved[8]; 2077 u8 reserved[8];
1974}; 2078};
1975 2079
1976/* 2080/*
1977 * union for 3 Bd types 2081 * union for 4 Bd types
1978 */ 2082 */
1979union eth_tx_bd_types { 2083union eth_tx_bd_types {
2084 struct eth_tx_start_bd start_bd;
1980 struct eth_tx_bd reg_bd; 2085 struct eth_tx_bd reg_bd;
1981 struct eth_tx_parse_bd parse_bd; 2086 struct eth_tx_parse_bd parse_bd;
1982 struct eth_tx_next_bd next_bd; 2087 struct eth_tx_next_bd next_bd;
@@ -2005,11 +2110,35 @@ struct xstorm_eth_st_context {
2005#define XSTORM_ETH_ST_CONTEXT_STATISTICS_ENABLE_SHIFT 7 2110#define XSTORM_ETH_ST_CONTEXT_STATISTICS_ENABLE_SHIFT 7
2006 u16 tx_bd_cons; 2111 u16 tx_bd_cons;
2007#endif 2112#endif
2008 u32 db_data_addr_lo; 2113 u32 __reserved1;
2009 u32 db_data_addr_hi; 2114 u32 __reserved2;
2010 u32 __pkt_cons; 2115#if defined(__BIG_ENDIAN)
2011 u32 __gso_next; 2116 u8 __ram_cache_index;
2012 u32 is_eth_conn_1b; 2117 u8 __double_buffer_client;
2118 u16 __pkt_cons;
2119#elif defined(__LITTLE_ENDIAN)
2120 u16 __pkt_cons;
2121 u8 __double_buffer_client;
2122 u8 __ram_cache_index;
2123#endif
2124#if defined(__BIG_ENDIAN)
2125 u16 __statistics_address;
2126 u16 __gso_next;
2127#elif defined(__LITTLE_ENDIAN)
2128 u16 __gso_next;
2129 u16 __statistics_address;
2130#endif
2131#if defined(__BIG_ENDIAN)
2132 u8 __local_tx_bd_cons;
2133 u8 safc_group_num;
2134 u8 safc_group_en;
2135 u8 __is_eth_conn;
2136#elif defined(__LITTLE_ENDIAN)
2137 u8 __is_eth_conn;
2138 u8 safc_group_en;
2139 u8 safc_group_num;
2140 u8 __local_tx_bd_cons;
2141#endif
2013 union eth_tx_bd_types __bds[13]; 2142 union eth_tx_bd_types __bds[13];
2014}; 2143};
2015 2144
@@ -2074,9 +2203,9 @@ struct eth_tx_doorbell {
2074 2203
2075 2204
2076/* 2205/*
2077 * ustorm status block 2206 * cstorm default status block, generated by ustorm
2078 */ 2207 */
2079struct ustorm_def_status_block { 2208struct cstorm_def_status_block_u {
2080 __le16 index_values[HC_USTORM_DEF_SB_NUM_INDICES]; 2209 __le16 index_values[HC_USTORM_DEF_SB_NUM_INDICES];
2081 __le16 status_block_index; 2210 __le16 status_block_index;
2082 u8 func; 2211 u8 func;
@@ -2085,9 +2214,9 @@ struct ustorm_def_status_block {
2085}; 2214};
2086 2215
2087/* 2216/*
2088 * cstorm status block 2217 * cstorm default status block, generated by cstorm
2089 */ 2218 */
2090struct cstorm_def_status_block { 2219struct cstorm_def_status_block_c {
2091 __le16 index_values[HC_CSTORM_DEF_SB_NUM_INDICES]; 2220 __le16 index_values[HC_CSTORM_DEF_SB_NUM_INDICES];
2092 __le16 status_block_index; 2221 __le16 status_block_index;
2093 u8 func; 2222 u8 func;
@@ -2122,17 +2251,17 @@ struct tstorm_def_status_block {
2122 */ 2251 */
2123struct host_def_status_block { 2252struct host_def_status_block {
2124 struct atten_def_status_block atten_status_block; 2253 struct atten_def_status_block atten_status_block;
2125 struct ustorm_def_status_block u_def_status_block; 2254 struct cstorm_def_status_block_u u_def_status_block;
2126 struct cstorm_def_status_block c_def_status_block; 2255 struct cstorm_def_status_block_c c_def_status_block;
2127 struct xstorm_def_status_block x_def_status_block; 2256 struct xstorm_def_status_block x_def_status_block;
2128 struct tstorm_def_status_block t_def_status_block; 2257 struct tstorm_def_status_block t_def_status_block;
2129}; 2258};
2130 2259
2131 2260
2132/* 2261/*
2133 * ustorm status block 2262 * cstorm status block, generated by ustorm
2134 */ 2263 */
2135struct ustorm_status_block { 2264struct cstorm_status_block_u {
2136 __le16 index_values[HC_USTORM_SB_NUM_INDICES]; 2265 __le16 index_values[HC_USTORM_SB_NUM_INDICES];
2137 __le16 status_block_index; 2266 __le16 status_block_index;
2138 u8 func; 2267 u8 func;
@@ -2141,9 +2270,9 @@ struct ustorm_status_block {
2141}; 2270};
2142 2271
2143/* 2272/*
2144 * cstorm status block 2273 * cstorm status block, generated by cstorm
2145 */ 2274 */
2146struct cstorm_status_block { 2275struct cstorm_status_block_c {
2147 __le16 index_values[HC_CSTORM_SB_NUM_INDICES]; 2276 __le16 index_values[HC_CSTORM_SB_NUM_INDICES];
2148 __le16 status_block_index; 2277 __le16 status_block_index;
2149 u8 func; 2278 u8 func;
@@ -2155,8 +2284,8 @@ struct cstorm_status_block {
2155 * host status block 2284 * host status block
2156 */ 2285 */
2157struct host_status_block { 2286struct host_status_block {
2158 struct ustorm_status_block u_status_block; 2287 struct cstorm_status_block_u u_status_block;
2159 struct cstorm_status_block c_status_block; 2288 struct cstorm_status_block_c c_status_block;
2160}; 2289};
2161 2290
2162 2291
@@ -2172,15 +2301,6 @@ struct eth_client_setup_ramrod_data {
2172 2301
2173 2302
2174/* 2303/*
2175 * L2 dynamic host coalescing init parameters
2176 */
2177struct eth_dynamic_hc_config {
2178 u32 threshold[3];
2179 u8 hc_timeout[4];
2180};
2181
2182
2183/*
2184 * regular eth FP CQE parameters struct 2304 * regular eth FP CQE parameters struct
2185 */ 2305 */
2186struct eth_fast_path_rx_cqe { 2306struct eth_fast_path_rx_cqe {
@@ -2344,12 +2464,10 @@ struct eth_spe {
2344 2464
2345 2465
2346/* 2466/*
2347 * doorbell data in host memory 2467 * array of 13 bds as appears in the eth xstorm context
2348 */ 2468 */
2349struct eth_tx_db_data { 2469struct eth_tx_bds_array {
2350 __le32 packets_prod; 2470 union eth_tx_bd_types bds[13];
2351 __le16 bds_prod;
2352 __le16 reserved;
2353}; 2471};
2354 2472
2355 2473
@@ -2377,8 +2495,10 @@ struct tstorm_eth_function_common_config {
2377#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_VLAN_IN_CAM_SHIFT 8 2495#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_VLAN_IN_CAM_SHIFT 8
2378#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_E1HOV_IN_CAM (0x1<<9) 2496#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_E1HOV_IN_CAM (0x1<<9)
2379#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_E1HOV_IN_CAM_SHIFT 9 2497#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_E1HOV_IN_CAM_SHIFT 9
2380#define __TSTORM_ETH_FUNCTION_COMMON_CONFIG_RESERVED0 (0x3F<<10) 2498#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_ENABLE_TPA (0x1<<10)
2381#define __TSTORM_ETH_FUNCTION_COMMON_CONFIG_RESERVED0_SHIFT 10 2499#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_ENABLE_TPA_SHIFT 10
2500#define __TSTORM_ETH_FUNCTION_COMMON_CONFIG_RESERVED0 (0x1F<<11)
2501#define __TSTORM_ETH_FUNCTION_COMMON_CONFIG_RESERVED0_SHIFT 11
2382#elif defined(__LITTLE_ENDIAN) 2502#elif defined(__LITTLE_ENDIAN)
2383 u16 config_flags; 2503 u16 config_flags;
2384#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV4_CAPABILITY (0x1<<0) 2504#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV4_CAPABILITY (0x1<<0)
@@ -2397,8 +2517,10 @@ struct tstorm_eth_function_common_config {
2397#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_VLAN_IN_CAM_SHIFT 8 2517#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_VLAN_IN_CAM_SHIFT 8
2398#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_E1HOV_IN_CAM (0x1<<9) 2518#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_E1HOV_IN_CAM (0x1<<9)
2399#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_E1HOV_IN_CAM_SHIFT 9 2519#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_E1HOV_IN_CAM_SHIFT 9
2400#define __TSTORM_ETH_FUNCTION_COMMON_CONFIG_RESERVED0 (0x3F<<10) 2520#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_ENABLE_TPA (0x1<<10)
2401#define __TSTORM_ETH_FUNCTION_COMMON_CONFIG_RESERVED0_SHIFT 10 2521#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_ENABLE_TPA_SHIFT 10
2522#define __TSTORM_ETH_FUNCTION_COMMON_CONFIG_RESERVED0 (0x1F<<11)
2523#define __TSTORM_ETH_FUNCTION_COMMON_CONFIG_RESERVED0_SHIFT 11
2402 u8 rss_result_mask; 2524 u8 rss_result_mask;
2403 u8 leading_client_id; 2525 u8 leading_client_id;
2404#endif 2526#endif
@@ -2406,11 +2528,38 @@ struct tstorm_eth_function_common_config {
2406}; 2528};
2407 2529
2408/* 2530/*
2531 * RSS idirection table update configuration
2532 */
2533struct rss_update_config {
2534#if defined(__BIG_ENDIAN)
2535 u16 toe_rss_bitmap;
2536 u16 flags;
2537#define RSS_UPDATE_CONFIG_ETH_UPDATE_ENABLE (0x1<<0)
2538#define RSS_UPDATE_CONFIG_ETH_UPDATE_ENABLE_SHIFT 0
2539#define RSS_UPDATE_CONFIG_TOE_UPDATE_ENABLE (0x1<<1)
2540#define RSS_UPDATE_CONFIG_TOE_UPDATE_ENABLE_SHIFT 1
2541#define __RSS_UPDATE_CONFIG_RESERVED0 (0x3FFF<<2)
2542#define __RSS_UPDATE_CONFIG_RESERVED0_SHIFT 2
2543#elif defined(__LITTLE_ENDIAN)
2544 u16 flags;
2545#define RSS_UPDATE_CONFIG_ETH_UPDATE_ENABLE (0x1<<0)
2546#define RSS_UPDATE_CONFIG_ETH_UPDATE_ENABLE_SHIFT 0
2547#define RSS_UPDATE_CONFIG_TOE_UPDATE_ENABLE (0x1<<1)
2548#define RSS_UPDATE_CONFIG_TOE_UPDATE_ENABLE_SHIFT 1
2549#define __RSS_UPDATE_CONFIG_RESERVED0 (0x3FFF<<2)
2550#define __RSS_UPDATE_CONFIG_RESERVED0_SHIFT 2
2551 u16 toe_rss_bitmap;
2552#endif
2553 u32 reserved1;
2554};
2555
2556/*
2409 * parameters for eth update ramrod 2557 * parameters for eth update ramrod
2410 */ 2558 */
2411struct eth_update_ramrod_data { 2559struct eth_update_ramrod_data {
2412 struct tstorm_eth_function_common_config func_config; 2560 struct tstorm_eth_function_common_config func_config;
2413 u8 indirectionTable[128]; 2561 u8 indirectionTable[128];
2562 struct rss_update_config rss_config;
2414}; 2563};
2415 2564
2416 2565
@@ -2455,8 +2604,9 @@ struct tstorm_cam_target_table_entry {
2455#define TSTORM_CAM_TARGET_TABLE_ENTRY_RDMA_MAC_SHIFT 3 2604#define TSTORM_CAM_TARGET_TABLE_ENTRY_RDMA_MAC_SHIFT 3
2456#define TSTORM_CAM_TARGET_TABLE_ENTRY_RESERVED0 (0xF<<4) 2605#define TSTORM_CAM_TARGET_TABLE_ENTRY_RESERVED0 (0xF<<4)
2457#define TSTORM_CAM_TARGET_TABLE_ENTRY_RESERVED0_SHIFT 4 2606#define TSTORM_CAM_TARGET_TABLE_ENTRY_RESERVED0_SHIFT 4
2458 u8 client_id; 2607 u8 reserved1;
2459 u16 vlan_id; 2608 u16 vlan_id;
2609 u32 clients_bit_vector;
2460}; 2610};
2461 2611
2462/* 2612/*
@@ -2485,7 +2635,7 @@ struct mac_configuration_entry_e1h {
2485 __le16 msb_mac_addr; 2635 __le16 msb_mac_addr;
2486 __le16 vlan_id; 2636 __le16 vlan_id;
2487 __le16 e1hov_id; 2637 __le16 e1hov_id;
2488 u8 client_id; 2638 u8 reserved0;
2489 u8 flags; 2639 u8 flags;
2490#define MAC_CONFIGURATION_ENTRY_E1H_PORT (0x1<<0) 2640#define MAC_CONFIGURATION_ENTRY_E1H_PORT (0x1<<0)
2491#define MAC_CONFIGURATION_ENTRY_E1H_PORT_SHIFT 0 2641#define MAC_CONFIGURATION_ENTRY_E1H_PORT_SHIFT 0
@@ -2493,8 +2643,9 @@ struct mac_configuration_entry_e1h {
2493#define MAC_CONFIGURATION_ENTRY_E1H_ACTION_TYPE_SHIFT 1 2643#define MAC_CONFIGURATION_ENTRY_E1H_ACTION_TYPE_SHIFT 1
2494#define MAC_CONFIGURATION_ENTRY_E1H_RDMA_MAC (0x1<<2) 2644#define MAC_CONFIGURATION_ENTRY_E1H_RDMA_MAC (0x1<<2)
2495#define MAC_CONFIGURATION_ENTRY_E1H_RDMA_MAC_SHIFT 2 2645#define MAC_CONFIGURATION_ENTRY_E1H_RDMA_MAC_SHIFT 2
2496#define MAC_CONFIGURATION_ENTRY_E1H_RESERVED0 (0x1F<<3) 2646#define MAC_CONFIGURATION_ENTRY_E1H_RESERVED1 (0x1F<<3)
2497#define MAC_CONFIGURATION_ENTRY_E1H_RESERVED0_SHIFT 3 2647#define MAC_CONFIGURATION_ENTRY_E1H_RESERVED1_SHIFT 3
2648 u32 clients_bit_vector;
2498}; 2649};
2499 2650
2500/* 2651/*
@@ -2519,13 +2670,13 @@ struct tstorm_eth_approximate_match_multicast_filtering {
2519 */ 2670 */
2520struct tstorm_eth_client_config { 2671struct tstorm_eth_client_config {
2521#if defined(__BIG_ENDIAN) 2672#if defined(__BIG_ENDIAN)
2522 u8 max_sges_for_packet; 2673 u8 reserved0;
2523 u8 statistics_counter_id; 2674 u8 statistics_counter_id;
2524 u16 mtu; 2675 u16 mtu;
2525#elif defined(__LITTLE_ENDIAN) 2676#elif defined(__LITTLE_ENDIAN)
2526 u16 mtu; 2677 u16 mtu;
2527 u8 statistics_counter_id; 2678 u8 statistics_counter_id;
2528 u8 max_sges_for_packet; 2679 u8 reserved0;
2529#endif 2680#endif
2530#if defined(__BIG_ENDIAN) 2681#if defined(__BIG_ENDIAN)
2531 u16 drop_flags; 2682 u16 drop_flags;
@@ -2537,8 +2688,8 @@ struct tstorm_eth_client_config {
2537#define TSTORM_ETH_CLIENT_CONFIG_DROP_TTL0_SHIFT 2 2688#define TSTORM_ETH_CLIENT_CONFIG_DROP_TTL0_SHIFT 2
2538#define TSTORM_ETH_CLIENT_CONFIG_DROP_UDP_CS_ERR (0x1<<3) 2689#define TSTORM_ETH_CLIENT_CONFIG_DROP_UDP_CS_ERR (0x1<<3)
2539#define TSTORM_ETH_CLIENT_CONFIG_DROP_UDP_CS_ERR_SHIFT 3 2690#define TSTORM_ETH_CLIENT_CONFIG_DROP_UDP_CS_ERR_SHIFT 3
2540#define __TSTORM_ETH_CLIENT_CONFIG_RESERVED1 (0xFFF<<4) 2691#define __TSTORM_ETH_CLIENT_CONFIG_RESERVED2 (0xFFF<<4)
2541#define __TSTORM_ETH_CLIENT_CONFIG_RESERVED1_SHIFT 4 2692#define __TSTORM_ETH_CLIENT_CONFIG_RESERVED2_SHIFT 4
2542 u16 config_flags; 2693 u16 config_flags;
2543#define TSTORM_ETH_CLIENT_CONFIG_VLAN_REM_ENABLE (0x1<<0) 2694#define TSTORM_ETH_CLIENT_CONFIG_VLAN_REM_ENABLE (0x1<<0)
2544#define TSTORM_ETH_CLIENT_CONFIG_VLAN_REM_ENABLE_SHIFT 0 2695#define TSTORM_ETH_CLIENT_CONFIG_VLAN_REM_ENABLE_SHIFT 0
@@ -2546,10 +2697,8 @@ struct tstorm_eth_client_config {
2546#define TSTORM_ETH_CLIENT_CONFIG_E1HOV_REM_ENABLE_SHIFT 1 2697#define TSTORM_ETH_CLIENT_CONFIG_E1HOV_REM_ENABLE_SHIFT 1
2547#define TSTORM_ETH_CLIENT_CONFIG_STATSITICS_ENABLE (0x1<<2) 2698#define TSTORM_ETH_CLIENT_CONFIG_STATSITICS_ENABLE (0x1<<2)
2548#define TSTORM_ETH_CLIENT_CONFIG_STATSITICS_ENABLE_SHIFT 2 2699#define TSTORM_ETH_CLIENT_CONFIG_STATSITICS_ENABLE_SHIFT 2
2549#define TSTORM_ETH_CLIENT_CONFIG_ENABLE_SGE_RING (0x1<<3) 2700#define __TSTORM_ETH_CLIENT_CONFIG_RESERVED1 (0x1FFF<<3)
2550#define TSTORM_ETH_CLIENT_CONFIG_ENABLE_SGE_RING_SHIFT 3 2701#define __TSTORM_ETH_CLIENT_CONFIG_RESERVED1_SHIFT 3
2551#define __TSTORM_ETH_CLIENT_CONFIG_RESERVED0 (0xFFF<<4)
2552#define __TSTORM_ETH_CLIENT_CONFIG_RESERVED0_SHIFT 4
2553#elif defined(__LITTLE_ENDIAN) 2702#elif defined(__LITTLE_ENDIAN)
2554 u16 config_flags; 2703 u16 config_flags;
2555#define TSTORM_ETH_CLIENT_CONFIG_VLAN_REM_ENABLE (0x1<<0) 2704#define TSTORM_ETH_CLIENT_CONFIG_VLAN_REM_ENABLE (0x1<<0)
@@ -2558,10 +2707,8 @@ struct tstorm_eth_client_config {
2558#define TSTORM_ETH_CLIENT_CONFIG_E1HOV_REM_ENABLE_SHIFT 1 2707#define TSTORM_ETH_CLIENT_CONFIG_E1HOV_REM_ENABLE_SHIFT 1
2559#define TSTORM_ETH_CLIENT_CONFIG_STATSITICS_ENABLE (0x1<<2) 2708#define TSTORM_ETH_CLIENT_CONFIG_STATSITICS_ENABLE (0x1<<2)
2560#define TSTORM_ETH_CLIENT_CONFIG_STATSITICS_ENABLE_SHIFT 2 2709#define TSTORM_ETH_CLIENT_CONFIG_STATSITICS_ENABLE_SHIFT 2
2561#define TSTORM_ETH_CLIENT_CONFIG_ENABLE_SGE_RING (0x1<<3) 2710#define __TSTORM_ETH_CLIENT_CONFIG_RESERVED1 (0x1FFF<<3)
2562#define TSTORM_ETH_CLIENT_CONFIG_ENABLE_SGE_RING_SHIFT 3 2711#define __TSTORM_ETH_CLIENT_CONFIG_RESERVED1_SHIFT 3
2563#define __TSTORM_ETH_CLIENT_CONFIG_RESERVED0 (0xFFF<<4)
2564#define __TSTORM_ETH_CLIENT_CONFIG_RESERVED0_SHIFT 4
2565 u16 drop_flags; 2712 u16 drop_flags;
2566#define TSTORM_ETH_CLIENT_CONFIG_DROP_IP_CS_ERR (0x1<<0) 2713#define TSTORM_ETH_CLIENT_CONFIG_DROP_IP_CS_ERR (0x1<<0)
2567#define TSTORM_ETH_CLIENT_CONFIG_DROP_IP_CS_ERR_SHIFT 0 2714#define TSTORM_ETH_CLIENT_CONFIG_DROP_IP_CS_ERR_SHIFT 0
@@ -2571,8 +2718,8 @@ struct tstorm_eth_client_config {
2571#define TSTORM_ETH_CLIENT_CONFIG_DROP_TTL0_SHIFT 2 2718#define TSTORM_ETH_CLIENT_CONFIG_DROP_TTL0_SHIFT 2
2572#define TSTORM_ETH_CLIENT_CONFIG_DROP_UDP_CS_ERR (0x1<<3) 2719#define TSTORM_ETH_CLIENT_CONFIG_DROP_UDP_CS_ERR (0x1<<3)
2573#define TSTORM_ETH_CLIENT_CONFIG_DROP_UDP_CS_ERR_SHIFT 3 2720#define TSTORM_ETH_CLIENT_CONFIG_DROP_UDP_CS_ERR_SHIFT 3
2574#define __TSTORM_ETH_CLIENT_CONFIG_RESERVED1 (0xFFF<<4) 2721#define __TSTORM_ETH_CLIENT_CONFIG_RESERVED2 (0xFFF<<4)
2575#define __TSTORM_ETH_CLIENT_CONFIG_RESERVED1_SHIFT 4 2722#define __TSTORM_ETH_CLIENT_CONFIG_RESERVED2_SHIFT 4
2576#endif 2723#endif
2577}; 2724};
2578 2725
@@ -2695,7 +2842,6 @@ struct rate_shaping_vars_per_port {
2695 u32 rs_threshold; 2842 u32 rs_threshold;
2696}; 2843};
2697 2844
2698
2699/* 2845/*
2700 * per-port fairness variables 2846 * per-port fairness variables
2701 */ 2847 */
@@ -2705,7 +2851,6 @@ struct fairness_vars_per_port {
2705 u32 fairness_timeout; 2851 u32 fairness_timeout;
2706}; 2852};
2707 2853
2708
2709/* 2854/*
2710 * per-port SAFC variables 2855 * per-port SAFC variables
2711 */ 2856 */
@@ -2722,7 +2867,6 @@ struct safc_struct_per_port {
2722 u16 cos_to_pause_mask[NUM_OF_SAFC_BITS]; 2867 u16 cos_to_pause_mask[NUM_OF_SAFC_BITS];
2723}; 2868};
2724 2869
2725
2726/* 2870/*
2727 * Per-port congestion management variables 2871 * Per-port congestion management variables
2728 */ 2872 */
@@ -2735,11 +2879,23 @@ struct cmng_struct_per_port {
2735 2879
2736 2880
2737/* 2881/*
2882 * Dynamic host coalescing init parameters
2883 */
2884struct dynamic_hc_config {
2885 u32 threshold[3];
2886 u8 shift_per_protocol[HC_USTORM_SB_NUM_INDICES];
2887 u8 hc_timeout0[HC_USTORM_SB_NUM_INDICES];
2888 u8 hc_timeout1[HC_USTORM_SB_NUM_INDICES];
2889 u8 hc_timeout2[HC_USTORM_SB_NUM_INDICES];
2890 u8 hc_timeout3[HC_USTORM_SB_NUM_INDICES];
2891};
2892
2893
2894/*
2738 * Protocol-common statistics collected by the Xstorm (per client) 2895 * Protocol-common statistics collected by the Xstorm (per client)
2739 */ 2896 */
2740struct xstorm_per_client_stats { 2897struct xstorm_per_client_stats {
2741 struct regpair total_sent_bytes; 2898 __le32 reserved0;
2742 __le32 total_sent_pkts;
2743 __le32 unicast_pkts_sent; 2899 __le32 unicast_pkts_sent;
2744 struct regpair unicast_bytes_sent; 2900 struct regpair unicast_bytes_sent;
2745 struct regpair multicast_bytes_sent; 2901 struct regpair multicast_bytes_sent;
@@ -2747,11 +2903,10 @@ struct xstorm_per_client_stats {
2747 __le32 broadcast_pkts_sent; 2903 __le32 broadcast_pkts_sent;
2748 struct regpair broadcast_bytes_sent; 2904 struct regpair broadcast_bytes_sent;
2749 __le16 stats_counter; 2905 __le16 stats_counter;
2750 __le16 reserved0; 2906 __le16 reserved1;
2751 __le32 reserved1; 2907 __le32 reserved2;
2752}; 2908};
2753 2909
2754
2755/* 2910/*
2756 * Common statistics collected by the Xstorm (per port) 2911 * Common statistics collected by the Xstorm (per port)
2757 */ 2912 */
@@ -2759,7 +2914,6 @@ struct xstorm_common_stats {
2759 struct xstorm_per_client_stats client_statistics[MAX_X_STAT_COUNTER_ID]; 2914 struct xstorm_per_client_stats client_statistics[MAX_X_STAT_COUNTER_ID];
2760}; 2915};
2761 2916
2762
2763/* 2917/*
2764 * Protocol-common statistics collected by the Tstorm (per port) 2918 * Protocol-common statistics collected by the Tstorm (per port)
2765 */ 2919 */
@@ -2770,19 +2924,16 @@ struct tstorm_per_port_stats {
2770 __le32 mac_discard; 2924 __le32 mac_discard;
2771}; 2925};
2772 2926
2773
2774/* 2927/*
2775 * Protocol-common statistics collected by the Tstorm (per client) 2928 * Protocol-common statistics collected by the Tstorm (per client)
2776 */ 2929 */
2777struct tstorm_per_client_stats { 2930struct tstorm_per_client_stats {
2778 struct regpair total_rcv_bytes;
2779 struct regpair rcv_unicast_bytes; 2931 struct regpair rcv_unicast_bytes;
2780 struct regpair rcv_broadcast_bytes; 2932 struct regpair rcv_broadcast_bytes;
2781 struct regpair rcv_multicast_bytes; 2933 struct regpair rcv_multicast_bytes;
2782 struct regpair rcv_error_bytes; 2934 struct regpair rcv_error_bytes;
2783 __le32 checksum_discard; 2935 __le32 checksum_discard;
2784 __le32 packets_too_big_discard; 2936 __le32 packets_too_big_discard;
2785 __le32 total_rcv_pkts;
2786 __le32 rcv_unicast_pkts; 2937 __le32 rcv_unicast_pkts;
2787 __le32 rcv_broadcast_pkts; 2938 __le32 rcv_broadcast_pkts;
2788 __le32 rcv_multicast_pkts; 2939 __le32 rcv_multicast_pkts;
@@ -2790,7 +2941,6 @@ struct tstorm_per_client_stats {
2790 __le32 ttl0_discard; 2941 __le32 ttl0_discard;
2791 __le16 stats_counter; 2942 __le16 stats_counter;
2792 __le16 reserved0; 2943 __le16 reserved0;
2793 __le32 reserved1;
2794}; 2944};
2795 2945
2796/* 2946/*
@@ -2893,6 +3043,15 @@ struct pram_fw_version {
2893 3043
2894 3044
2895/* 3045/*
3046 * The send queue element
3047 */
3048struct protocol_common_spe {
3049 struct spe_hdr hdr;
3050 struct regpair phy_address;
3051};
3052
3053
3054/*
2896 * a single rate shaping counter. can be used as protocol or vnic counter 3055 * a single rate shaping counter. can be used as protocol or vnic counter
2897 */ 3056 */
2898struct rate_shaping_counter { 3057struct rate_shaping_counter {
diff --git a/drivers/net/bnx2x_main.c b/drivers/net/bnx2x_main.c
index 665ed36a0d48..762f37a7d038 100644
--- a/drivers/net/bnx2x_main.c
+++ b/drivers/net/bnx2x_main.c
@@ -10,7 +10,7 @@
10 * Written by: Eliezer Tamir 10 * Written by: Eliezer Tamir
11 * Based on code from Michael Chan's bnx2 driver 11 * Based on code from Michael Chan's bnx2 driver
12 * UDP CSUM errata workaround by Arik Gendelman 12 * UDP CSUM errata workaround by Arik Gendelman
13 * Slowpath rework by Vladislav Zolotarov 13 * Slowpath and fastpath rework by Vladislav Zolotarov
14 * Statistics and Link management by Yitchak Gertner 14 * Statistics and Link management by Yitchak Gertner
15 * 15 *
16 */ 16 */
@@ -80,7 +80,18 @@ MODULE_VERSION(DRV_MODULE_VERSION);
80 80
81static int multi_mode = 1; 81static int multi_mode = 1;
82module_param(multi_mode, int, 0); 82module_param(multi_mode, int, 0);
83MODULE_PARM_DESC(multi_mode, " Use per-CPU queues"); 83MODULE_PARM_DESC(multi_mode, " Multi queue mode "
84 "(0 Disable; 1 Enable (default))");
85
86static int num_rx_queues;
87module_param(num_rx_queues, int, 0);
88MODULE_PARM_DESC(num_rx_queues, " Number of Rx queues for multi_mode=1"
89 " (default is half number of CPUs)");
90
91static int num_tx_queues;
92module_param(num_tx_queues, int, 0);
93MODULE_PARM_DESC(num_tx_queues, " Number of Tx queues for multi_mode=1"
94 " (default is half number of CPUs)");
84 95
85static int disable_tpa; 96static int disable_tpa;
86module_param(disable_tpa, int, 0); 97module_param(disable_tpa, int, 0);
@@ -542,16 +553,15 @@ static void bnx2x_panic_dump(struct bnx2x *bp)
542 /* Tx */ 553 /* Tx */
543 for_each_tx_queue(bp, i) { 554 for_each_tx_queue(bp, i) {
544 struct bnx2x_fastpath *fp = &bp->fp[i]; 555 struct bnx2x_fastpath *fp = &bp->fp[i];
545 struct eth_tx_db_data *hw_prods = fp->hw_tx_prods;
546 556
547 BNX2X_ERR("fp%d: tx_pkt_prod(%x) tx_pkt_cons(%x)" 557 BNX2X_ERR("fp%d: tx_pkt_prod(%x) tx_pkt_cons(%x)"
548 " tx_bd_prod(%x) tx_bd_cons(%x) *tx_cons_sb(%x)\n", 558 " tx_bd_prod(%x) tx_bd_cons(%x) *tx_cons_sb(%x)\n",
549 i, fp->tx_pkt_prod, fp->tx_pkt_cons, fp->tx_bd_prod, 559 i, fp->tx_pkt_prod, fp->tx_pkt_cons, fp->tx_bd_prod,
550 fp->tx_bd_cons, le16_to_cpu(*fp->tx_cons_sb)); 560 fp->tx_bd_cons, le16_to_cpu(*fp->tx_cons_sb));
551 BNX2X_ERR(" fp_c_idx(%x) *sb_c_idx(%x)" 561 BNX2X_ERR(" fp_c_idx(%x) *sb_c_idx(%x)"
552 " bd data(%x,%x)\n", le16_to_cpu(fp->fp_c_idx), 562 " tx_db_prod(%x)\n", le16_to_cpu(fp->fp_c_idx),
553 fp->status_blk->c_status_block.status_block_index, 563 fp->status_blk->c_status_block.status_block_index,
554 hw_prods->packets_prod, hw_prods->bds_prod); 564 fp->tx_db.data.prod);
555 } 565 }
556 566
557 /* Rings */ 567 /* Rings */
@@ -790,16 +800,6 @@ static u16 bnx2x_ack_int(struct bnx2x *bp)
790 * fast path service functions 800 * fast path service functions
791 */ 801 */
792 802
793static inline int bnx2x_has_tx_work(struct bnx2x_fastpath *fp)
794{
795 u16 tx_cons_sb;
796
797 /* Tell compiler that status block fields can change */
798 barrier();
799 tx_cons_sb = le16_to_cpu(*fp->tx_cons_sb);
800 return (fp->tx_pkt_cons != tx_cons_sb);
801}
802
803static inline int bnx2x_has_tx_work_unload(struct bnx2x_fastpath *fp) 803static inline int bnx2x_has_tx_work_unload(struct bnx2x_fastpath *fp)
804{ 804{
805 /* Tell compiler that consumer and producer can change */ 805 /* Tell compiler that consumer and producer can change */
@@ -814,7 +814,8 @@ static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fastpath *fp,
814 u16 idx) 814 u16 idx)
815{ 815{
816 struct sw_tx_bd *tx_buf = &fp->tx_buf_ring[idx]; 816 struct sw_tx_bd *tx_buf = &fp->tx_buf_ring[idx];
817 struct eth_tx_bd *tx_bd; 817 struct eth_tx_start_bd *tx_start_bd;
818 struct eth_tx_bd *tx_data_bd;
818 struct sk_buff *skb = tx_buf->skb; 819 struct sk_buff *skb = tx_buf->skb;
819 u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons; 820 u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
820 int nbd; 821 int nbd;
@@ -824,51 +825,46 @@ static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fastpath *fp,
824 825
825 /* unmap first bd */ 826 /* unmap first bd */
826 DP(BNX2X_MSG_OFF, "free bd_idx %d\n", bd_idx); 827 DP(BNX2X_MSG_OFF, "free bd_idx %d\n", bd_idx);
827 tx_bd = &fp->tx_desc_ring[bd_idx]; 828 tx_start_bd = &fp->tx_desc_ring[bd_idx].start_bd;
828 pci_unmap_single(bp->pdev, BD_UNMAP_ADDR(tx_bd), 829 pci_unmap_single(bp->pdev, BD_UNMAP_ADDR(tx_start_bd),
829 BD_UNMAP_LEN(tx_bd), PCI_DMA_TODEVICE); 830 BD_UNMAP_LEN(tx_start_bd), PCI_DMA_TODEVICE);
830 831
831 nbd = le16_to_cpu(tx_bd->nbd) - 1; 832 nbd = le16_to_cpu(tx_start_bd->nbd) - 1;
832 new_cons = nbd + tx_buf->first_bd;
833#ifdef BNX2X_STOP_ON_ERROR 833#ifdef BNX2X_STOP_ON_ERROR
834 if (nbd > (MAX_SKB_FRAGS + 2)) { 834 if ((nbd - 1) > (MAX_SKB_FRAGS + 2)) {
835 BNX2X_ERR("BAD nbd!\n"); 835 BNX2X_ERR("BAD nbd!\n");
836 bnx2x_panic(); 836 bnx2x_panic();
837 } 837 }
838#endif 838#endif
839 new_cons = nbd + tx_buf->first_bd;
839 840
840 /* Skip a parse bd and the TSO split header bd 841 /* Get the next bd */
841 since they have no mapping */ 842 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
842 if (nbd)
843 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
844 843
845 if (tx_bd->bd_flags.as_bitfield & (ETH_TX_BD_FLAGS_IP_CSUM | 844 /* Skip a parse bd... */
846 ETH_TX_BD_FLAGS_TCP_CSUM | 845 --nbd;
847 ETH_TX_BD_FLAGS_SW_LSO)) { 846 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
848 if (--nbd) 847
849 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx)); 848 /* ...and the TSO split header bd since they have no mapping */
850 tx_bd = &fp->tx_desc_ring[bd_idx]; 849 if (tx_buf->flags & BNX2X_TSO_SPLIT_BD) {
851 /* is this a TSO split header bd? */ 850 --nbd;
852 if (tx_bd->bd_flags.as_bitfield & ETH_TX_BD_FLAGS_SW_LSO) { 851 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
853 if (--nbd)
854 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
855 }
856 } 852 }
857 853
858 /* now free frags */ 854 /* now free frags */
859 while (nbd > 0) { 855 while (nbd > 0) {
860 856
861 DP(BNX2X_MSG_OFF, "free frag bd_idx %d\n", bd_idx); 857 DP(BNX2X_MSG_OFF, "free frag bd_idx %d\n", bd_idx);
862 tx_bd = &fp->tx_desc_ring[bd_idx]; 858 tx_data_bd = &fp->tx_desc_ring[bd_idx].reg_bd;
863 pci_unmap_page(bp->pdev, BD_UNMAP_ADDR(tx_bd), 859 pci_unmap_page(bp->pdev, BD_UNMAP_ADDR(tx_data_bd),
864 BD_UNMAP_LEN(tx_bd), PCI_DMA_TODEVICE); 860 BD_UNMAP_LEN(tx_data_bd), PCI_DMA_TODEVICE);
865 if (--nbd) 861 if (--nbd)
866 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx)); 862 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
867 } 863 }
868 864
869 /* release skb */ 865 /* release skb */
870 WARN_ON(!skb); 866 WARN_ON(!skb);
871 dev_kfree_skb(skb); 867 dev_kfree_skb_any(skb);
872 tx_buf->first_bd = 0; 868 tx_buf->first_bd = 0;
873 tx_buf->skb = NULL; 869 tx_buf->skb = NULL;
874 870
@@ -910,7 +906,7 @@ static void bnx2x_tx_int(struct bnx2x_fastpath *fp)
910 return; 906 return;
911#endif 907#endif
912 908
913 txq = netdev_get_tx_queue(bp->dev, fp->index); 909 txq = netdev_get_tx_queue(bp->dev, fp->index - bp->num_rx_queues);
914 hw_cons = le16_to_cpu(*fp->tx_cons_sb); 910 hw_cons = le16_to_cpu(*fp->tx_cons_sb);
915 sw_cons = fp->tx_pkt_cons; 911 sw_cons = fp->tx_pkt_cons;
916 912
@@ -940,8 +936,6 @@ static void bnx2x_tx_int(struct bnx2x_fastpath *fp)
940 /* TBD need a thresh? */ 936 /* TBD need a thresh? */
941 if (unlikely(netif_tx_queue_stopped(txq))) { 937 if (unlikely(netif_tx_queue_stopped(txq))) {
942 938
943 __netif_tx_lock(txq, smp_processor_id());
944
945 /* Need to make the tx_bd_cons update visible to start_xmit() 939 /* Need to make the tx_bd_cons update visible to start_xmit()
946 * before checking for netif_tx_queue_stopped(). Without the 940 * before checking for netif_tx_queue_stopped(). Without the
947 * memory barrier, there is a small possibility that 941 * memory barrier, there is a small possibility that
@@ -954,8 +948,6 @@ static void bnx2x_tx_int(struct bnx2x_fastpath *fp)
954 (bp->state == BNX2X_STATE_OPEN) && 948 (bp->state == BNX2X_STATE_OPEN) &&
955 (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3)) 949 (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3))
956 netif_tx_wake_queue(txq); 950 netif_tx_wake_queue(txq);
957
958 __netif_tx_unlock(txq);
959 } 951 }
960} 952}
961 953
@@ -1023,6 +1015,7 @@ static void bnx2x_sp_event(struct bnx2x_fastpath *fp,
1023 break; 1015 break;
1024 1016
1025 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_CLOSING_WAIT4_HALT): 1017 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_CLOSING_WAIT4_HALT):
1018 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_DISABLED):
1026 DP(NETIF_MSG_IFDOWN, "got (un)set mac ramrod\n"); 1019 DP(NETIF_MSG_IFDOWN, "got (un)set mac ramrod\n");
1027 break; 1020 break;
1028 1021
@@ -1688,7 +1681,6 @@ static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
1688{ 1681{
1689 struct bnx2x_fastpath *fp = fp_cookie; 1682 struct bnx2x_fastpath *fp = fp_cookie;
1690 struct bnx2x *bp = fp->bp; 1683 struct bnx2x *bp = fp->bp;
1691 int index = fp->index;
1692 1684
1693 /* Return here if interrupt is disabled */ 1685 /* Return here if interrupt is disabled */
1694 if (unlikely(atomic_read(&bp->intr_sem) != 0)) { 1686 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
@@ -1697,20 +1689,34 @@ static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
1697 } 1689 }
1698 1690
1699 DP(BNX2X_MSG_FP, "got an MSI-X interrupt on IDX:SB [%d:%d]\n", 1691 DP(BNX2X_MSG_FP, "got an MSI-X interrupt on IDX:SB [%d:%d]\n",
1700 index, fp->sb_id); 1692 fp->index, fp->sb_id);
1701 bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0); 1693 bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
1702 1694
1703#ifdef BNX2X_STOP_ON_ERROR 1695#ifdef BNX2X_STOP_ON_ERROR
1704 if (unlikely(bp->panic)) 1696 if (unlikely(bp->panic))
1705 return IRQ_HANDLED; 1697 return IRQ_HANDLED;
1706#endif 1698#endif
1699 /* Handle Rx or Tx according to MSI-X vector */
1700 if (fp->is_rx_queue) {
1701 prefetch(fp->rx_cons_sb);
1702 prefetch(&fp->status_blk->u_status_block.status_block_index);
1707 1703
1708 prefetch(fp->rx_cons_sb); 1704 napi_schedule(&bnx2x_fp(bp, fp->index, napi));
1709 prefetch(fp->tx_cons_sb);
1710 prefetch(&fp->status_blk->c_status_block.status_block_index);
1711 prefetch(&fp->status_blk->u_status_block.status_block_index);
1712 1705
1713 napi_schedule(&bnx2x_fp(bp, index, napi)); 1706 } else {
1707 prefetch(fp->tx_cons_sb);
1708 prefetch(&fp->status_blk->c_status_block.status_block_index);
1709
1710 bnx2x_update_fpsb_idx(fp);
1711 rmb();
1712 bnx2x_tx_int(fp);
1713
1714 /* Re-enable interrupts */
1715 bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID,
1716 le16_to_cpu(fp->fp_u_idx), IGU_INT_NOP, 1);
1717 bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID,
1718 le16_to_cpu(fp->fp_c_idx), IGU_INT_ENABLE, 1);
1719 }
1714 1720
1715 return IRQ_HANDLED; 1721 return IRQ_HANDLED;
1716} 1722}
@@ -1720,6 +1726,7 @@ static irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
1720 struct bnx2x *bp = netdev_priv(dev_instance); 1726 struct bnx2x *bp = netdev_priv(dev_instance);
1721 u16 status = bnx2x_ack_int(bp); 1727 u16 status = bnx2x_ack_int(bp);
1722 u16 mask; 1728 u16 mask;
1729 int i;
1723 1730
1724 /* Return here if interrupt is shared and it's not for us */ 1731 /* Return here if interrupt is shared and it's not for us */
1725 if (unlikely(status == 0)) { 1732 if (unlikely(status == 0)) {
@@ -1739,18 +1746,38 @@ static irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
1739 return IRQ_HANDLED; 1746 return IRQ_HANDLED;
1740#endif 1747#endif
1741 1748
1742 mask = 0x2 << bp->fp[0].sb_id; 1749 for (i = 0; i < BNX2X_NUM_QUEUES(bp); i++) {
1743 if (status & mask) { 1750 struct bnx2x_fastpath *fp = &bp->fp[i];
1744 struct bnx2x_fastpath *fp = &bp->fp[0];
1745 1751
1746 prefetch(fp->rx_cons_sb); 1752 mask = 0x2 << fp->sb_id;
1747 prefetch(fp->tx_cons_sb); 1753 if (status & mask) {
1748 prefetch(&fp->status_blk->c_status_block.status_block_index); 1754 /* Handle Rx or Tx according to SB id */
1749 prefetch(&fp->status_blk->u_status_block.status_block_index); 1755 if (fp->is_rx_queue) {
1756 prefetch(fp->rx_cons_sb);
1757 prefetch(&fp->status_blk->u_status_block.
1758 status_block_index);
1750 1759
1751 napi_schedule(&bnx2x_fp(bp, 0, napi)); 1760 napi_schedule(&bnx2x_fp(bp, fp->index, napi));
1752 1761
1753 status &= ~mask; 1762 } else {
1763 prefetch(fp->tx_cons_sb);
1764 prefetch(&fp->status_blk->c_status_block.
1765 status_block_index);
1766
1767 bnx2x_update_fpsb_idx(fp);
1768 rmb();
1769 bnx2x_tx_int(fp);
1770
1771 /* Re-enable interrupts */
1772 bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID,
1773 le16_to_cpu(fp->fp_u_idx),
1774 IGU_INT_NOP, 1);
1775 bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID,
1776 le16_to_cpu(fp->fp_c_idx),
1777 IGU_INT_ENABLE, 1);
1778 }
1779 status &= ~mask;
1780 }
1754 } 1781 }
1755 1782
1756 1783
@@ -2298,7 +2325,7 @@ static void bnx2x_link_attn(struct bnx2x *bp)
2298 pause_enabled = 1; 2325 pause_enabled = 1;
2299 2326
2300 REG_WR(bp, BAR_USTRORM_INTMEM + 2327 REG_WR(bp, BAR_USTRORM_INTMEM +
2301 USTORM_PAUSE_ENABLED_OFFSET(port), 2328 USTORM_ETH_PAUSE_ENABLED_OFFSET(port),
2302 pause_enabled); 2329 pause_enabled);
2303 } 2330 }
2304 2331
@@ -3756,7 +3783,7 @@ static int bnx2x_storm_stats_update(struct bnx2x *bp)
3756 estats->no_buff_discard_hi = 0; 3783 estats->no_buff_discard_hi = 0;
3757 estats->no_buff_discard_lo = 0; 3784 estats->no_buff_discard_lo = 0;
3758 3785
3759 for_each_queue(bp, i) { 3786 for_each_rx_queue(bp, i) {
3760 struct bnx2x_fastpath *fp = &bp->fp[i]; 3787 struct bnx2x_fastpath *fp = &bp->fp[i];
3761 int cl_id = fp->cl_id; 3788 int cl_id = fp->cl_id;
3762 struct tstorm_per_client_stats *tclient = 3789 struct tstorm_per_client_stats *tclient =
@@ -3795,11 +3822,24 @@ static int bnx2x_storm_stats_update(struct bnx2x *bp)
3795 } 3822 }
3796 3823
3797 qstats->total_bytes_received_hi = 3824 qstats->total_bytes_received_hi =
3798 qstats->valid_bytes_received_hi = 3825 le32_to_cpu(tclient->rcv_broadcast_bytes.hi);
3799 le32_to_cpu(tclient->total_rcv_bytes.hi);
3800 qstats->total_bytes_received_lo = 3826 qstats->total_bytes_received_lo =
3827 le32_to_cpu(tclient->rcv_broadcast_bytes.lo);
3828
3829 ADD_64(qstats->total_bytes_received_hi,
3830 le32_to_cpu(tclient->rcv_multicast_bytes.hi),
3831 qstats->total_bytes_received_lo,
3832 le32_to_cpu(tclient->rcv_multicast_bytes.lo));
3833
3834 ADD_64(qstats->total_bytes_received_hi,
3835 le32_to_cpu(tclient->rcv_unicast_bytes.hi),
3836 qstats->total_bytes_received_lo,
3837 le32_to_cpu(tclient->rcv_unicast_bytes.lo));
3838
3839 qstats->valid_bytes_received_hi =
3840 qstats->total_bytes_received_hi;
3801 qstats->valid_bytes_received_lo = 3841 qstats->valid_bytes_received_lo =
3802 le32_to_cpu(tclient->total_rcv_bytes.lo); 3842 qstats->total_bytes_received_lo;
3803 3843
3804 qstats->error_bytes_received_hi = 3844 qstats->error_bytes_received_hi =
3805 le32_to_cpu(tclient->rcv_error_bytes.hi); 3845 le32_to_cpu(tclient->rcv_error_bytes.hi);
@@ -3832,9 +3872,19 @@ static int bnx2x_storm_stats_update(struct bnx2x *bp)
3832 UPDATE_EXTEND_USTAT(bcast_no_buff_pkts, no_buff_discard); 3872 UPDATE_EXTEND_USTAT(bcast_no_buff_pkts, no_buff_discard);
3833 3873
3834 qstats->total_bytes_transmitted_hi = 3874 qstats->total_bytes_transmitted_hi =
3835 le32_to_cpu(xclient->total_sent_bytes.hi); 3875 le32_to_cpu(xclient->unicast_bytes_sent.hi);
3836 qstats->total_bytes_transmitted_lo = 3876 qstats->total_bytes_transmitted_lo =
3837 le32_to_cpu(xclient->total_sent_bytes.lo); 3877 le32_to_cpu(xclient->unicast_bytes_sent.lo);
3878
3879 ADD_64(qstats->total_bytes_transmitted_hi,
3880 le32_to_cpu(xclient->multicast_bytes_sent.hi),
3881 qstats->total_bytes_transmitted_lo,
3882 le32_to_cpu(xclient->multicast_bytes_sent.lo));
3883
3884 ADD_64(qstats->total_bytes_transmitted_hi,
3885 le32_to_cpu(xclient->broadcast_bytes_sent.hi),
3886 qstats->total_bytes_transmitted_lo,
3887 le32_to_cpu(xclient->broadcast_bytes_sent.lo));
3838 3888
3839 UPDATE_EXTEND_XSTAT(unicast_pkts_sent, 3889 UPDATE_EXTEND_XSTAT(unicast_pkts_sent,
3840 total_unicast_packets_transmitted); 3890 total_unicast_packets_transmitted);
@@ -3950,7 +4000,7 @@ static void bnx2x_net_stats_update(struct bnx2x *bp)
3950 nstats->tx_bytes = bnx2x_hilo(&estats->total_bytes_transmitted_hi); 4000 nstats->tx_bytes = bnx2x_hilo(&estats->total_bytes_transmitted_hi);
3951 4001
3952 nstats->rx_dropped = estats->mac_discard; 4002 nstats->rx_dropped = estats->mac_discard;
3953 for_each_queue(bp, i) 4003 for_each_rx_queue(bp, i)
3954 nstats->rx_dropped += 4004 nstats->rx_dropped +=
3955 le32_to_cpu(bp->fp[i].old_tclient.checksum_discard); 4005 le32_to_cpu(bp->fp[i].old_tclient.checksum_discard);
3956 4006
@@ -4004,7 +4054,7 @@ static void bnx2x_drv_stats_update(struct bnx2x *bp)
4004 estats->rx_err_discard_pkt = 0; 4054 estats->rx_err_discard_pkt = 0;
4005 estats->rx_skb_alloc_failed = 0; 4055 estats->rx_skb_alloc_failed = 0;
4006 estats->hw_csum_err = 0; 4056 estats->hw_csum_err = 0;
4007 for_each_queue(bp, i) { 4057 for_each_rx_queue(bp, i) {
4008 struct bnx2x_eth_q_stats *qstats = &bp->fp[i].eth_q_stats; 4058 struct bnx2x_eth_q_stats *qstats = &bp->fp[i].eth_q_stats;
4009 4059
4010 estats->driver_xoff += qstats->driver_xoff; 4060 estats->driver_xoff += qstats->driver_xoff;
@@ -4034,6 +4084,8 @@ static void bnx2x_stats_update(struct bnx2x *bp)
4034 bnx2x_drv_stats_update(bp); 4084 bnx2x_drv_stats_update(bp);
4035 4085
4036 if (bp->msglevel & NETIF_MSG_TIMER) { 4086 if (bp->msglevel & NETIF_MSG_TIMER) {
4087 struct bnx2x_fastpath *fp0_rx = bp->fp;
4088 struct bnx2x_fastpath *fp0_tx = &(bp->fp[bp->num_rx_queues]);
4037 struct tstorm_per_client_stats *old_tclient = 4089 struct tstorm_per_client_stats *old_tclient =
4038 &bp->fp->old_tclient; 4090 &bp->fp->old_tclient;
4039 struct bnx2x_eth_q_stats *qstats = &bp->fp->eth_q_stats; 4091 struct bnx2x_eth_q_stats *qstats = &bp->fp->eth_q_stats;
@@ -4044,13 +4096,13 @@ static void bnx2x_stats_update(struct bnx2x *bp)
4044 printk(KERN_DEBUG "%s:\n", bp->dev->name); 4096 printk(KERN_DEBUG "%s:\n", bp->dev->name);
4045 printk(KERN_DEBUG " tx avail (%4x) tx hc idx (%x)" 4097 printk(KERN_DEBUG " tx avail (%4x) tx hc idx (%x)"
4046 " tx pkt (%lx)\n", 4098 " tx pkt (%lx)\n",
4047 bnx2x_tx_avail(bp->fp), 4099 bnx2x_tx_avail(fp0_tx),
4048 le16_to_cpu(*bp->fp->tx_cons_sb), nstats->tx_packets); 4100 le16_to_cpu(*fp0_tx->tx_cons_sb), nstats->tx_packets);
4049 printk(KERN_DEBUG " rx usage (%4x) rx hc idx (%x)" 4101 printk(KERN_DEBUG " rx usage (%4x) rx hc idx (%x)"
4050 " rx pkt (%lx)\n", 4102 " rx pkt (%lx)\n",
4051 (u16)(le16_to_cpu(*bp->fp->rx_cons_sb) - 4103 (u16)(le16_to_cpu(*fp0_rx->rx_cons_sb) -
4052 bp->fp->rx_comp_cons), 4104 fp0_rx->rx_comp_cons),
4053 le16_to_cpu(*bp->fp->rx_cons_sb), nstats->rx_packets); 4105 le16_to_cpu(*fp0_rx->rx_cons_sb), nstats->rx_packets);
4054 printk(KERN_DEBUG " %s (Xoff events %u) brb drops %u " 4106 printk(KERN_DEBUG " %s (Xoff events %u) brb drops %u "
4055 "brb truncate %u\n", 4107 "brb truncate %u\n",
4056 (netif_queue_stopped(bp->dev) ? "Xoff" : "Xon"), 4108 (netif_queue_stopped(bp->dev) ? "Xoff" : "Xon"),
@@ -4263,12 +4315,13 @@ static void bnx2x_zero_sb(struct bnx2x *bp, int sb_id)
4263{ 4315{
4264 int port = BP_PORT(bp); 4316 int port = BP_PORT(bp);
4265 4317
4266 bnx2x_init_fill(bp, USTORM_INTMEM_ADDR + 4318 /* "CSTORM" */
4267 USTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), 0, 4319 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
4268 sizeof(struct ustorm_status_block)/4); 4320 CSTORM_SB_HOST_STATUS_BLOCK_U_OFFSET(port, sb_id), 0,
4269 bnx2x_init_fill(bp, CSTORM_INTMEM_ADDR + 4321 CSTORM_SB_STATUS_BLOCK_U_SIZE / 4);
4270 CSTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), 0, 4322 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
4271 sizeof(struct cstorm_status_block)/4); 4323 CSTORM_SB_HOST_STATUS_BLOCK_C_OFFSET(port, sb_id), 0,
4324 CSTORM_SB_STATUS_BLOCK_C_SIZE / 4);
4272} 4325}
4273 4326
4274static void bnx2x_init_sb(struct bnx2x *bp, struct host_status_block *sb, 4327static void bnx2x_init_sb(struct bnx2x *bp, struct host_status_block *sb,
@@ -4284,17 +4337,17 @@ static void bnx2x_init_sb(struct bnx2x *bp, struct host_status_block *sb,
4284 u_status_block); 4337 u_status_block);
4285 sb->u_status_block.status_block_id = sb_id; 4338 sb->u_status_block.status_block_id = sb_id;
4286 4339
4287 REG_WR(bp, BAR_USTRORM_INTMEM + 4340 REG_WR(bp, BAR_CSTRORM_INTMEM +
4288 USTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id), U64_LO(section)); 4341 CSTORM_SB_HOST_SB_ADDR_U_OFFSET(port, sb_id), U64_LO(section));
4289 REG_WR(bp, BAR_USTRORM_INTMEM + 4342 REG_WR(bp, BAR_CSTRORM_INTMEM +
4290 ((USTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id)) + 4), 4343 ((CSTORM_SB_HOST_SB_ADDR_U_OFFSET(port, sb_id)) + 4),
4291 U64_HI(section)); 4344 U64_HI(section));
4292 REG_WR8(bp, BAR_USTRORM_INTMEM + FP_USB_FUNC_OFF + 4345 REG_WR8(bp, BAR_CSTRORM_INTMEM + FP_USB_FUNC_OFF +
4293 USTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), func); 4346 CSTORM_SB_HOST_STATUS_BLOCK_U_OFFSET(port, sb_id), func);
4294 4347
4295 for (index = 0; index < HC_USTORM_SB_NUM_INDICES; index++) 4348 for (index = 0; index < HC_USTORM_SB_NUM_INDICES; index++)
4296 REG_WR16(bp, BAR_USTRORM_INTMEM + 4349 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4297 USTORM_SB_HC_DISABLE_OFFSET(port, sb_id, index), 1); 4350 CSTORM_SB_HC_DISABLE_U_OFFSET(port, sb_id, index), 1);
4298 4351
4299 /* CSTORM */ 4352 /* CSTORM */
4300 section = ((u64)mapping) + offsetof(struct host_status_block, 4353 section = ((u64)mapping) + offsetof(struct host_status_block,
@@ -4302,16 +4355,16 @@ static void bnx2x_init_sb(struct bnx2x *bp, struct host_status_block *sb,
4302 sb->c_status_block.status_block_id = sb_id; 4355 sb->c_status_block.status_block_id = sb_id;
4303 4356
4304 REG_WR(bp, BAR_CSTRORM_INTMEM + 4357 REG_WR(bp, BAR_CSTRORM_INTMEM +
4305 CSTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id), U64_LO(section)); 4358 CSTORM_SB_HOST_SB_ADDR_C_OFFSET(port, sb_id), U64_LO(section));
4306 REG_WR(bp, BAR_CSTRORM_INTMEM + 4359 REG_WR(bp, BAR_CSTRORM_INTMEM +
4307 ((CSTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id)) + 4), 4360 ((CSTORM_SB_HOST_SB_ADDR_C_OFFSET(port, sb_id)) + 4),
4308 U64_HI(section)); 4361 U64_HI(section));
4309 REG_WR8(bp, BAR_CSTRORM_INTMEM + FP_CSB_FUNC_OFF + 4362 REG_WR8(bp, BAR_CSTRORM_INTMEM + FP_CSB_FUNC_OFF +
4310 CSTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), func); 4363 CSTORM_SB_HOST_STATUS_BLOCK_C_OFFSET(port, sb_id), func);
4311 4364
4312 for (index = 0; index < HC_CSTORM_SB_NUM_INDICES; index++) 4365 for (index = 0; index < HC_CSTORM_SB_NUM_INDICES; index++)
4313 REG_WR16(bp, BAR_CSTRORM_INTMEM + 4366 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4314 CSTORM_SB_HC_DISABLE_OFFSET(port, sb_id, index), 1); 4367 CSTORM_SB_HC_DISABLE_C_OFFSET(port, sb_id, index), 1);
4315 4368
4316 bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0); 4369 bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
4317} 4370}
@@ -4320,16 +4373,16 @@ static void bnx2x_zero_def_sb(struct bnx2x *bp)
4320{ 4373{
4321 int func = BP_FUNC(bp); 4374 int func = BP_FUNC(bp);
4322 4375
4323 bnx2x_init_fill(bp, TSTORM_INTMEM_ADDR + 4376 bnx2x_init_fill(bp, TSEM_REG_FAST_MEMORY +
4324 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0, 4377 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4325 sizeof(struct tstorm_def_status_block)/4); 4378 sizeof(struct tstorm_def_status_block)/4);
4326 bnx2x_init_fill(bp, USTORM_INTMEM_ADDR + 4379 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
4327 USTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0, 4380 CSTORM_DEF_SB_HOST_STATUS_BLOCK_U_OFFSET(func), 0,
4328 sizeof(struct ustorm_def_status_block)/4); 4381 sizeof(struct cstorm_def_status_block_u)/4);
4329 bnx2x_init_fill(bp, CSTORM_INTMEM_ADDR + 4382 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
4330 CSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0, 4383 CSTORM_DEF_SB_HOST_STATUS_BLOCK_C_OFFSET(func), 0,
4331 sizeof(struct cstorm_def_status_block)/4); 4384 sizeof(struct cstorm_def_status_block_c)/4);
4332 bnx2x_init_fill(bp, XSTORM_INTMEM_ADDR + 4385 bnx2x_init_fill(bp, XSEM_REG_FAST_MEMORY +
4333 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0, 4386 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4334 sizeof(struct xstorm_def_status_block)/4); 4387 sizeof(struct xstorm_def_status_block)/4);
4335} 4388}
@@ -4381,17 +4434,17 @@ static void bnx2x_init_def_sb(struct bnx2x *bp,
4381 u_def_status_block); 4434 u_def_status_block);
4382 def_sb->u_def_status_block.status_block_id = sb_id; 4435 def_sb->u_def_status_block.status_block_id = sb_id;
4383 4436
4384 REG_WR(bp, BAR_USTRORM_INTMEM + 4437 REG_WR(bp, BAR_CSTRORM_INTMEM +
4385 USTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section)); 4438 CSTORM_DEF_SB_HOST_SB_ADDR_U_OFFSET(func), U64_LO(section));
4386 REG_WR(bp, BAR_USTRORM_INTMEM + 4439 REG_WR(bp, BAR_CSTRORM_INTMEM +
4387 ((USTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4), 4440 ((CSTORM_DEF_SB_HOST_SB_ADDR_U_OFFSET(func)) + 4),
4388 U64_HI(section)); 4441 U64_HI(section));
4389 REG_WR8(bp, BAR_USTRORM_INTMEM + DEF_USB_FUNC_OFF + 4442 REG_WR8(bp, BAR_CSTRORM_INTMEM + DEF_USB_FUNC_OFF +
4390 USTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func); 4443 CSTORM_DEF_SB_HOST_STATUS_BLOCK_U_OFFSET(func), func);
4391 4444
4392 for (index = 0; index < HC_USTORM_DEF_SB_NUM_INDICES; index++) 4445 for (index = 0; index < HC_USTORM_DEF_SB_NUM_INDICES; index++)
4393 REG_WR16(bp, BAR_USTRORM_INTMEM + 4446 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4394 USTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1); 4447 CSTORM_DEF_SB_HC_DISABLE_U_OFFSET(func, index), 1);
4395 4448
4396 /* CSTORM */ 4449 /* CSTORM */
4397 section = ((u64)mapping) + offsetof(struct host_def_status_block, 4450 section = ((u64)mapping) + offsetof(struct host_def_status_block,
@@ -4399,16 +4452,16 @@ static void bnx2x_init_def_sb(struct bnx2x *bp,
4399 def_sb->c_def_status_block.status_block_id = sb_id; 4452 def_sb->c_def_status_block.status_block_id = sb_id;
4400 4453
4401 REG_WR(bp, BAR_CSTRORM_INTMEM + 4454 REG_WR(bp, BAR_CSTRORM_INTMEM +
4402 CSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section)); 4455 CSTORM_DEF_SB_HOST_SB_ADDR_C_OFFSET(func), U64_LO(section));
4403 REG_WR(bp, BAR_CSTRORM_INTMEM + 4456 REG_WR(bp, BAR_CSTRORM_INTMEM +
4404 ((CSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4), 4457 ((CSTORM_DEF_SB_HOST_SB_ADDR_C_OFFSET(func)) + 4),
4405 U64_HI(section)); 4458 U64_HI(section));
4406 REG_WR8(bp, BAR_CSTRORM_INTMEM + DEF_CSB_FUNC_OFF + 4459 REG_WR8(bp, BAR_CSTRORM_INTMEM + DEF_CSB_FUNC_OFF +
4407 CSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func); 4460 CSTORM_DEF_SB_HOST_STATUS_BLOCK_C_OFFSET(func), func);
4408 4461
4409 for (index = 0; index < HC_CSTORM_DEF_SB_NUM_INDICES; index++) 4462 for (index = 0; index < HC_CSTORM_DEF_SB_NUM_INDICES; index++)
4410 REG_WR16(bp, BAR_CSTRORM_INTMEM + 4463 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4411 CSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1); 4464 CSTORM_DEF_SB_HC_DISABLE_C_OFFSET(func, index), 1);
4412 4465
4413 /* TSTORM */ 4466 /* TSTORM */
4414 section = ((u64)mapping) + offsetof(struct host_def_status_block, 4467 section = ((u64)mapping) + offsetof(struct host_def_status_block,
@@ -4459,23 +4512,23 @@ static void bnx2x_update_coalesce(struct bnx2x *bp)
4459 int sb_id = bp->fp[i].sb_id; 4512 int sb_id = bp->fp[i].sb_id;
4460 4513
4461 /* HC_INDEX_U_ETH_RX_CQ_CONS */ 4514 /* HC_INDEX_U_ETH_RX_CQ_CONS */
4462 REG_WR8(bp, BAR_USTRORM_INTMEM + 4515 REG_WR8(bp, BAR_CSTRORM_INTMEM +
4463 USTORM_SB_HC_TIMEOUT_OFFSET(port, sb_id, 4516 CSTORM_SB_HC_TIMEOUT_U_OFFSET(port, sb_id,
4464 U_SB_ETH_RX_CQ_INDEX), 4517 U_SB_ETH_RX_CQ_INDEX),
4465 bp->rx_ticks/12); 4518 bp->rx_ticks/12);
4466 REG_WR16(bp, BAR_USTRORM_INTMEM + 4519 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4467 USTORM_SB_HC_DISABLE_OFFSET(port, sb_id, 4520 CSTORM_SB_HC_DISABLE_U_OFFSET(port, sb_id,
4468 U_SB_ETH_RX_CQ_INDEX), 4521 U_SB_ETH_RX_CQ_INDEX),
4469 (bp->rx_ticks/12) ? 0 : 1); 4522 (bp->rx_ticks/12) ? 0 : 1);
4470 4523
4471 /* HC_INDEX_C_ETH_TX_CQ_CONS */ 4524 /* HC_INDEX_C_ETH_TX_CQ_CONS */
4472 REG_WR8(bp, BAR_CSTRORM_INTMEM + 4525 REG_WR8(bp, BAR_CSTRORM_INTMEM +
4473 CSTORM_SB_HC_TIMEOUT_OFFSET(port, sb_id, 4526 CSTORM_SB_HC_TIMEOUT_C_OFFSET(port, sb_id,
4474 C_SB_ETH_TX_CQ_INDEX), 4527 C_SB_ETH_TX_CQ_INDEX),
4475 bp->tx_ticks/12); 4528 bp->tx_ticks/12);
4476 REG_WR16(bp, BAR_CSTRORM_INTMEM + 4529 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4477 CSTORM_SB_HC_DISABLE_OFFSET(port, sb_id, 4530 CSTORM_SB_HC_DISABLE_C_OFFSET(port, sb_id,
4478 C_SB_ETH_TX_CQ_INDEX), 4531 C_SB_ETH_TX_CQ_INDEX),
4479 (bp->tx_ticks/12) ? 0 : 1); 4532 (bp->tx_ticks/12) ? 0 : 1);
4480 } 4533 }
4481} 4534}
@@ -4548,6 +4601,9 @@ static void bnx2x_init_rx_rings(struct bnx2x *bp)
4548 fp->rx_cons_sb = BNX2X_RX_SB_INDEX; 4601 fp->rx_cons_sb = BNX2X_RX_SB_INDEX;
4549 fp->rx_bd_cons_sb = BNX2X_RX_SB_BD_INDEX; 4602 fp->rx_bd_cons_sb = BNX2X_RX_SB_BD_INDEX;
4550 4603
4604 /* Mark queue as Rx */
4605 fp->is_rx_queue = 1;
4606
4551 /* "next page" elements initialization */ 4607 /* "next page" elements initialization */
4552 /* SGE ring */ 4608 /* SGE ring */
4553 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) { 4609 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
@@ -4657,17 +4713,21 @@ static void bnx2x_init_tx_ring(struct bnx2x *bp)
4657 struct bnx2x_fastpath *fp = &bp->fp[j]; 4713 struct bnx2x_fastpath *fp = &bp->fp[j];
4658 4714
4659 for (i = 1; i <= NUM_TX_RINGS; i++) { 4715 for (i = 1; i <= NUM_TX_RINGS; i++) {
4660 struct eth_tx_bd *tx_bd = 4716 struct eth_tx_next_bd *tx_next_bd =
4661 &fp->tx_desc_ring[TX_DESC_CNT * i - 1]; 4717 &fp->tx_desc_ring[TX_DESC_CNT * i - 1].next_bd;
4662 4718
4663 tx_bd->addr_hi = 4719 tx_next_bd->addr_hi =
4664 cpu_to_le32(U64_HI(fp->tx_desc_mapping + 4720 cpu_to_le32(U64_HI(fp->tx_desc_mapping +
4665 BCM_PAGE_SIZE*(i % NUM_TX_RINGS))); 4721 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
4666 tx_bd->addr_lo = 4722 tx_next_bd->addr_lo =
4667 cpu_to_le32(U64_LO(fp->tx_desc_mapping + 4723 cpu_to_le32(U64_LO(fp->tx_desc_mapping +
4668 BCM_PAGE_SIZE*(i % NUM_TX_RINGS))); 4724 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
4669 } 4725 }
4670 4726
4727 fp->tx_db.data.header.header = DOORBELL_HDR_DB_TYPE;
4728 fp->tx_db.data.zero_fill1 = 0;
4729 fp->tx_db.data.prod = 0;
4730
4671 fp->tx_pkt_prod = 0; 4731 fp->tx_pkt_prod = 0;
4672 fp->tx_pkt_cons = 0; 4732 fp->tx_pkt_cons = 0;
4673 fp->tx_bd_prod = 0; 4733 fp->tx_bd_prod = 0;
@@ -4703,16 +4763,15 @@ static void bnx2x_init_context(struct bnx2x *bp)
4703{ 4763{
4704 int i; 4764 int i;
4705 4765
4706 for_each_queue(bp, i) { 4766 for_each_rx_queue(bp, i) {
4707 struct eth_context *context = bnx2x_sp(bp, context[i].eth); 4767 struct eth_context *context = bnx2x_sp(bp, context[i].eth);
4708 struct bnx2x_fastpath *fp = &bp->fp[i]; 4768 struct bnx2x_fastpath *fp = &bp->fp[i];
4709 u8 cl_id = fp->cl_id; 4769 u8 cl_id = fp->cl_id;
4710 u8 sb_id = fp->sb_id;
4711 4770
4712 context->ustorm_st_context.common.sb_index_numbers = 4771 context->ustorm_st_context.common.sb_index_numbers =
4713 BNX2X_RX_SB_INDEX_NUM; 4772 BNX2X_RX_SB_INDEX_NUM;
4714 context->ustorm_st_context.common.clientId = cl_id; 4773 context->ustorm_st_context.common.clientId = cl_id;
4715 context->ustorm_st_context.common.status_block_id = sb_id; 4774 context->ustorm_st_context.common.status_block_id = fp->sb_id;
4716 context->ustorm_st_context.common.flags = 4775 context->ustorm_st_context.common.flags =
4717 (USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_MC_ALIGNMENT | 4776 (USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_MC_ALIGNMENT |
4718 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_STATISTICS); 4777 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_STATISTICS);
@@ -4728,8 +4787,7 @@ static void bnx2x_init_context(struct bnx2x *bp)
4728 U64_LO(fp->rx_desc_mapping); 4787 U64_LO(fp->rx_desc_mapping);
4729 if (!fp->disable_tpa) { 4788 if (!fp->disable_tpa) {
4730 context->ustorm_st_context.common.flags |= 4789 context->ustorm_st_context.common.flags |=
4731 (USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_TPA | 4790 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_TPA;
4732 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_SGE_RING);
4733 context->ustorm_st_context.common.sge_buff_size = 4791 context->ustorm_st_context.common.sge_buff_size =
4734 (u16)min((u32)SGE_PAGE_SIZE*PAGES_PER_SGE, 4792 (u16)min((u32)SGE_PAGE_SIZE*PAGES_PER_SGE,
4735 (u32)0xffff); 4793 (u32)0xffff);
@@ -4737,6 +4795,13 @@ static void bnx2x_init_context(struct bnx2x *bp)
4737 U64_HI(fp->rx_sge_mapping); 4795 U64_HI(fp->rx_sge_mapping);
4738 context->ustorm_st_context.common.sge_page_base_lo = 4796 context->ustorm_st_context.common.sge_page_base_lo =
4739 U64_LO(fp->rx_sge_mapping); 4797 U64_LO(fp->rx_sge_mapping);
4798
4799 context->ustorm_st_context.common.max_sges_for_packet =
4800 SGE_PAGE_ALIGN(bp->dev->mtu) >> SGE_PAGE_SHIFT;
4801 context->ustorm_st_context.common.max_sges_for_packet =
4802 ((context->ustorm_st_context.common.
4803 max_sges_for_packet + PAGES_PER_SGE - 1) &
4804 (~(PAGES_PER_SGE - 1))) >> PAGES_PER_SGE_SHIFT;
4740 } 4805 }
4741 4806
4742 context->ustorm_ag_context.cdu_usage = 4807 context->ustorm_ag_context.cdu_usage =
@@ -4744,24 +4809,27 @@ static void bnx2x_init_context(struct bnx2x *bp)
4744 CDU_REGION_NUMBER_UCM_AG, 4809 CDU_REGION_NUMBER_UCM_AG,
4745 ETH_CONNECTION_TYPE); 4810 ETH_CONNECTION_TYPE);
4746 4811
4812 context->xstorm_ag_context.cdu_reserved =
4813 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
4814 CDU_REGION_NUMBER_XCM_AG,
4815 ETH_CONNECTION_TYPE);
4816 }
4817
4818 for_each_tx_queue(bp, i) {
4819 struct bnx2x_fastpath *fp = &bp->fp[i];
4820 struct eth_context *context =
4821 bnx2x_sp(bp, context[i - bp->num_rx_queues].eth);
4822
4823 context->cstorm_st_context.sb_index_number =
4824 C_SB_ETH_TX_CQ_INDEX;
4825 context->cstorm_st_context.status_block_id = fp->sb_id;
4826
4747 context->xstorm_st_context.tx_bd_page_base_hi = 4827 context->xstorm_st_context.tx_bd_page_base_hi =
4748 U64_HI(fp->tx_desc_mapping); 4828 U64_HI(fp->tx_desc_mapping);
4749 context->xstorm_st_context.tx_bd_page_base_lo = 4829 context->xstorm_st_context.tx_bd_page_base_lo =
4750 U64_LO(fp->tx_desc_mapping); 4830 U64_LO(fp->tx_desc_mapping);
4751 context->xstorm_st_context.db_data_addr_hi = 4831 context->xstorm_st_context.statistics_data = (fp->cl_id |
4752 U64_HI(fp->tx_prods_mapping);
4753 context->xstorm_st_context.db_data_addr_lo =
4754 U64_LO(fp->tx_prods_mapping);
4755 context->xstorm_st_context.statistics_data = (cl_id |
4756 XSTORM_ETH_ST_CONTEXT_STATISTICS_ENABLE); 4832 XSTORM_ETH_ST_CONTEXT_STATISTICS_ENABLE);
4757 context->cstorm_st_context.sb_index_number =
4758 C_SB_ETH_TX_CQ_INDEX;
4759 context->cstorm_st_context.status_block_id = sb_id;
4760
4761 context->xstorm_ag_context.cdu_reserved =
4762 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
4763 CDU_REGION_NUMBER_XCM_AG,
4764 ETH_CONNECTION_TYPE);
4765 } 4833 }
4766} 4834}
4767 4835
@@ -4799,18 +4867,6 @@ static void bnx2x_set_client_config(struct bnx2x *bp)
4799 } 4867 }
4800#endif 4868#endif
4801 4869
4802 if (bp->flags & TPA_ENABLE_FLAG) {
4803 tstorm_client.max_sges_for_packet =
4804 SGE_PAGE_ALIGN(tstorm_client.mtu) >> SGE_PAGE_SHIFT;
4805 tstorm_client.max_sges_for_packet =
4806 ((tstorm_client.max_sges_for_packet +
4807 PAGES_PER_SGE - 1) & (~(PAGES_PER_SGE - 1))) >>
4808 PAGES_PER_SGE_SHIFT;
4809
4810 tstorm_client.config_flags |=
4811 TSTORM_ETH_CLIENT_CONFIG_ENABLE_SGE_RING;
4812 }
4813
4814 for_each_queue(bp, i) { 4870 for_each_queue(bp, i) {
4815 tstorm_client.statistics_counter_id = bp->fp[i].cl_id; 4871 tstorm_client.statistics_counter_id = bp->fp[i].cl_id;
4816 4872
@@ -4893,17 +4949,6 @@ static void bnx2x_init_internal_common(struct bnx2x *bp)
4893{ 4949{
4894 int i; 4950 int i;
4895 4951
4896 if (bp->flags & TPA_ENABLE_FLAG) {
4897 struct tstorm_eth_tpa_exist tpa = {0};
4898
4899 tpa.tpa_exist = 1;
4900
4901 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_TPA_EXIST_OFFSET,
4902 ((u32 *)&tpa)[0]);
4903 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_TPA_EXIST_OFFSET + 4,
4904 ((u32 *)&tpa)[1]);
4905 }
4906
4907 /* Zero this manually as its initialization is 4952 /* Zero this manually as its initialization is
4908 currently missing in the initTool */ 4953 currently missing in the initTool */
4909 for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++) 4954 for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++)
@@ -4915,8 +4960,10 @@ static void bnx2x_init_internal_port(struct bnx2x *bp)
4915{ 4960{
4916 int port = BP_PORT(bp); 4961 int port = BP_PORT(bp);
4917 4962
4918 REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_HC_BTR_OFFSET(port), BNX2X_BTR); 4963 REG_WR(bp,
4919 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_OFFSET(port), BNX2X_BTR); 4964 BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_U_OFFSET(port), BNX2X_BTR);
4965 REG_WR(bp,
4966 BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_C_OFFSET(port), BNX2X_BTR);
4920 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_HC_BTR_OFFSET(port), BNX2X_BTR); 4967 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4921 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_HC_BTR_OFFSET(port), BNX2X_BTR); 4968 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4922} 4969}
@@ -4976,6 +5023,12 @@ static void bnx2x_init_internal_func(struct bnx2x *bp)
4976 tstorm_config.config_flags = MULTI_FLAGS(bp); 5023 tstorm_config.config_flags = MULTI_FLAGS(bp);
4977 tstorm_config.rss_result_mask = MULTI_MASK; 5024 tstorm_config.rss_result_mask = MULTI_MASK;
4978 } 5025 }
5026
5027 /* Enable TPA if needed */
5028 if (bp->flags & TPA_ENABLE_FLAG)
5029 tstorm_config.config_flags |=
5030 TSTORM_ETH_FUNCTION_COMMON_CONFIG_ENABLE_TPA;
5031
4979 if (IS_E1HMF(bp)) 5032 if (IS_E1HMF(bp))
4980 tstorm_config.config_flags |= 5033 tstorm_config.config_flags |=
4981 TSTORM_ETH_FUNCTION_COMMON_CONFIG_E1HOV_IN_CAM; 5034 TSTORM_ETH_FUNCTION_COMMON_CONFIG_E1HOV_IN_CAM;
@@ -5087,6 +5140,14 @@ static void bnx2x_init_internal_func(struct bnx2x *bp)
5087 USTORM_CQE_PAGE_BASE_OFFSET(port, fp->cl_id) + 4, 5140 USTORM_CQE_PAGE_BASE_OFFSET(port, fp->cl_id) + 4,
5088 U64_HI(fp->rx_comp_mapping)); 5141 U64_HI(fp->rx_comp_mapping));
5089 5142
5143 /* Next page */
5144 REG_WR(bp, BAR_USTRORM_INTMEM +
5145 USTORM_CQE_PAGE_NEXT_OFFSET(port, fp->cl_id),
5146 U64_LO(fp->rx_comp_mapping + BCM_PAGE_SIZE));
5147 REG_WR(bp, BAR_USTRORM_INTMEM +
5148 USTORM_CQE_PAGE_NEXT_OFFSET(port, fp->cl_id) + 4,
5149 U64_HI(fp->rx_comp_mapping + BCM_PAGE_SIZE));
5150
5090 REG_WR16(bp, BAR_USTRORM_INTMEM + 5151 REG_WR16(bp, BAR_USTRORM_INTMEM +
5091 USTORM_MAX_AGG_SIZE_OFFSET(port, fp->cl_id), 5152 USTORM_MAX_AGG_SIZE_OFFSET(port, fp->cl_id),
5092 max_agg_size); 5153 max_agg_size);
@@ -5197,6 +5258,9 @@ static void bnx2x_nic_init(struct bnx2x *bp, u32 load_code)
5197 fp->index = i; 5258 fp->index = i;
5198 fp->cl_id = BP_L_ID(bp) + i; 5259 fp->cl_id = BP_L_ID(bp) + i;
5199 fp->sb_id = fp->cl_id; 5260 fp->sb_id = fp->cl_id;
5261 /* Suitable Rx and Tx SBs are served by the same client */
5262 if (i >= bp->num_rx_queues)
5263 fp->cl_id -= bp->num_rx_queues;
5200 DP(NETIF_MSG_IFUP, 5264 DP(NETIF_MSG_IFUP,
5201 "queue[%d]: bnx2x_init_sb(%p,%p) cl_id %d sb %d\n", 5265 "queue[%d]: bnx2x_init_sb(%p,%p) cl_id %d sb %d\n",
5202 i, bp, fp->status_blk, fp->cl_id, fp->sb_id); 5266 i, bp, fp->status_blk, fp->cl_id, fp->sb_id);
@@ -5729,10 +5793,10 @@ static int bnx2x_init_common(struct bnx2x *bp)
5729 bnx2x_init_block(bp, USDM_BLOCK, COMMON_STAGE); 5793 bnx2x_init_block(bp, USDM_BLOCK, COMMON_STAGE);
5730 bnx2x_init_block(bp, XSDM_BLOCK, COMMON_STAGE); 5794 bnx2x_init_block(bp, XSDM_BLOCK, COMMON_STAGE);
5731 5795
5732 bnx2x_init_fill(bp, TSTORM_INTMEM_ADDR, 0, STORM_INTMEM_SIZE(bp)); 5796 bnx2x_init_fill(bp, TSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
5733 bnx2x_init_fill(bp, USTORM_INTMEM_ADDR, 0, STORM_INTMEM_SIZE(bp)); 5797 bnx2x_init_fill(bp, USEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
5734 bnx2x_init_fill(bp, CSTORM_INTMEM_ADDR, 0, STORM_INTMEM_SIZE(bp)); 5798 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
5735 bnx2x_init_fill(bp, XSTORM_INTMEM_ADDR, 0, STORM_INTMEM_SIZE(bp)); 5799 bnx2x_init_fill(bp, XSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
5736 5800
5737 bnx2x_init_block(bp, TSEM_BLOCK, COMMON_STAGE); 5801 bnx2x_init_block(bp, TSEM_BLOCK, COMMON_STAGE);
5738 bnx2x_init_block(bp, USEM_BLOCK, COMMON_STAGE); 5802 bnx2x_init_block(bp, USEM_BLOCK, COMMON_STAGE);
@@ -5765,11 +5829,6 @@ static int bnx2x_init_common(struct bnx2x *bp)
5765 bnx2x_init_block(bp, CDU_BLOCK, COMMON_STAGE); 5829 bnx2x_init_block(bp, CDU_BLOCK, COMMON_STAGE);
5766 val = (4 << 24) + (0 << 12) + 1024; 5830 val = (4 << 24) + (0 << 12) + 1024;
5767 REG_WR(bp, CDU_REG_CDU_GLOBAL_PARAMS, val); 5831 REG_WR(bp, CDU_REG_CDU_GLOBAL_PARAMS, val);
5768 if (CHIP_IS_E1(bp)) {
5769 /* !!! fix pxp client crdit until excel update */
5770 REG_WR(bp, CDU_REG_CDU_DEBUG, 0x264);
5771 REG_WR(bp, CDU_REG_CDU_DEBUG, 0);
5772 }
5773 5832
5774 bnx2x_init_block(bp, CFC_BLOCK, COMMON_STAGE); 5833 bnx2x_init_block(bp, CFC_BLOCK, COMMON_STAGE);
5775 REG_WR(bp, CFC_REG_INIT_REG, 0x7FF); 5834 REG_WR(bp, CFC_REG_INIT_REG, 0x7FF);
@@ -5782,19 +5841,14 @@ static int bnx2x_init_common(struct bnx2x *bp)
5782 bnx2x_init_block(bp, HC_BLOCK, COMMON_STAGE); 5841 bnx2x_init_block(bp, HC_BLOCK, COMMON_STAGE);
5783 bnx2x_init_block(bp, MISC_AEU_BLOCK, COMMON_STAGE); 5842 bnx2x_init_block(bp, MISC_AEU_BLOCK, COMMON_STAGE);
5784 5843
5785 /* PXPCS COMMON comes here */
5786 bnx2x_init_block(bp, PXPCS_BLOCK, COMMON_STAGE); 5844 bnx2x_init_block(bp, PXPCS_BLOCK, COMMON_STAGE);
5787 /* Reset PCIE errors for debug */ 5845 /* Reset PCIE errors for debug */
5788 REG_WR(bp, 0x2814, 0xffffffff); 5846 REG_WR(bp, 0x2814, 0xffffffff);
5789 REG_WR(bp, 0x3820, 0xffffffff); 5847 REG_WR(bp, 0x3820, 0xffffffff);
5790 5848
5791 /* EMAC0 COMMON comes here */
5792 bnx2x_init_block(bp, EMAC0_BLOCK, COMMON_STAGE); 5849 bnx2x_init_block(bp, EMAC0_BLOCK, COMMON_STAGE);
5793 /* EMAC1 COMMON comes here */
5794 bnx2x_init_block(bp, EMAC1_BLOCK, COMMON_STAGE); 5850 bnx2x_init_block(bp, EMAC1_BLOCK, COMMON_STAGE);
5795 /* DBU COMMON comes here */
5796 bnx2x_init_block(bp, DBU_BLOCK, COMMON_STAGE); 5851 bnx2x_init_block(bp, DBU_BLOCK, COMMON_STAGE);
5797 /* DBG COMMON comes here */
5798 bnx2x_init_block(bp, DBG_BLOCK, COMMON_STAGE); 5852 bnx2x_init_block(bp, DBG_BLOCK, COMMON_STAGE);
5799 5853
5800 bnx2x_init_block(bp, NIG_BLOCK, COMMON_STAGE); 5854 bnx2x_init_block(bp, NIG_BLOCK, COMMON_STAGE);
@@ -5875,10 +5929,12 @@ static int bnx2x_init_port(struct bnx2x *bp)
5875 5929
5876 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0); 5930 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
5877 5931
5878 /* Port PXP comes here */
5879 bnx2x_init_block(bp, PXP_BLOCK, init_stage); 5932 bnx2x_init_block(bp, PXP_BLOCK, init_stage);
5880 /* Port PXP2 comes here */
5881 bnx2x_init_block(bp, PXP2_BLOCK, init_stage); 5933 bnx2x_init_block(bp, PXP2_BLOCK, init_stage);
5934
5935 bnx2x_init_block(bp, TCM_BLOCK, init_stage);
5936 bnx2x_init_block(bp, UCM_BLOCK, init_stage);
5937 bnx2x_init_block(bp, CCM_BLOCK, init_stage);
5882#ifdef BCM_ISCSI 5938#ifdef BCM_ISCSI
5883 /* Port0 1 5939 /* Port0 1
5884 * Port1 385 */ 5940 * Port1 385 */
@@ -5904,17 +5960,14 @@ static int bnx2x_init_port(struct bnx2x *bp)
5904 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2); 5960 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
5905 REG_WR(bp, PXP2_REG_PSWRQ_SRC0_L2P + func*4, PXP_ONE_ILT(i)); 5961 REG_WR(bp, PXP2_REG_PSWRQ_SRC0_L2P + func*4, PXP_ONE_ILT(i));
5906#endif 5962#endif
5907 /* Port CMs come here */
5908 bnx2x_init_block(bp, XCM_BLOCK, init_stage); 5963 bnx2x_init_block(bp, XCM_BLOCK, init_stage);
5909 5964
5910 /* Port QM comes here */
5911#ifdef BCM_ISCSI 5965#ifdef BCM_ISCSI
5912 REG_WR(bp, TM_REG_LIN0_SCAN_TIME + func*4, 1024/64*20); 5966 REG_WR(bp, TM_REG_LIN0_SCAN_TIME + func*4, 1024/64*20);
5913 REG_WR(bp, TM_REG_LIN0_MAX_ACTIVE_CID + func*4, 31); 5967 REG_WR(bp, TM_REG_LIN0_MAX_ACTIVE_CID + func*4, 31);
5914 5968
5915 bnx2x_init_block(bp, TIMERS_BLOCK, init_stage); 5969 bnx2x_init_block(bp, TIMERS_BLOCK, init_stage);
5916#endif 5970#endif
5917 /* Port DQ comes here */
5918 bnx2x_init_block(bp, DQ_BLOCK, init_stage); 5971 bnx2x_init_block(bp, DQ_BLOCK, init_stage);
5919 5972
5920 bnx2x_init_block(bp, BRB1_BLOCK, init_stage); 5973 bnx2x_init_block(bp, BRB1_BLOCK, init_stage);
@@ -5941,15 +5994,11 @@ static int bnx2x_init_port(struct bnx2x *bp)
5941 REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_0 + port*4, high); 5994 REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_0 + port*4, high);
5942 5995
5943 5996
5944 /* Port PRS comes here */
5945 bnx2x_init_block(bp, PRS_BLOCK, init_stage); 5997 bnx2x_init_block(bp, PRS_BLOCK, init_stage);
5946 /* Port TSDM comes here */ 5998
5947 bnx2x_init_block(bp, TSDM_BLOCK, init_stage); 5999 bnx2x_init_block(bp, TSDM_BLOCK, init_stage);
5948 /* Port CSDM comes here */
5949 bnx2x_init_block(bp, CSDM_BLOCK, init_stage); 6000 bnx2x_init_block(bp, CSDM_BLOCK, init_stage);
5950 /* Port USDM comes here */
5951 bnx2x_init_block(bp, USDM_BLOCK, init_stage); 6001 bnx2x_init_block(bp, USDM_BLOCK, init_stage);
5952 /* Port XSDM comes here */
5953 bnx2x_init_block(bp, XSDM_BLOCK, init_stage); 6002 bnx2x_init_block(bp, XSDM_BLOCK, init_stage);
5954 6003
5955 bnx2x_init_block(bp, TSEM_BLOCK, init_stage); 6004 bnx2x_init_block(bp, TSEM_BLOCK, init_stage);
@@ -5957,9 +6006,7 @@ static int bnx2x_init_port(struct bnx2x *bp)
5957 bnx2x_init_block(bp, CSEM_BLOCK, init_stage); 6006 bnx2x_init_block(bp, CSEM_BLOCK, init_stage);
5958 bnx2x_init_block(bp, XSEM_BLOCK, init_stage); 6007 bnx2x_init_block(bp, XSEM_BLOCK, init_stage);
5959 6008
5960 /* Port UPB comes here */
5961 bnx2x_init_block(bp, UPB_BLOCK, init_stage); 6009 bnx2x_init_block(bp, UPB_BLOCK, init_stage);
5962 /* Port XPB comes here */
5963 bnx2x_init_block(bp, XPB_BLOCK, init_stage); 6010 bnx2x_init_block(bp, XPB_BLOCK, init_stage);
5964 6011
5965 bnx2x_init_block(bp, PBF_BLOCK, init_stage); 6012 bnx2x_init_block(bp, PBF_BLOCK, init_stage);
@@ -5989,11 +6036,8 @@ static int bnx2x_init_port(struct bnx2x *bp)
5989 REG_WR_DMAE(bp, SRC_REG_LASTFREE0 + func*4, wb_write, 2); 6036 REG_WR_DMAE(bp, SRC_REG_LASTFREE0 + func*4, wb_write, 2);
5990 6037
5991 REG_WR(bp, SRC_REG_NUMBER_HASH_BITS0 + func*4, 10); 6038 REG_WR(bp, SRC_REG_NUMBER_HASH_BITS0 + func*4, 10);
5992 /* Port SRCH comes here */
5993#endif 6039#endif
5994 /* Port CDU comes here */
5995 bnx2x_init_block(bp, CDU_BLOCK, init_stage); 6040 bnx2x_init_block(bp, CDU_BLOCK, init_stage);
5996 /* Port CFC comes here */
5997 bnx2x_init_block(bp, CFC_BLOCK, init_stage); 6041 bnx2x_init_block(bp, CFC_BLOCK, init_stage);
5998 6042
5999 if (CHIP_IS_E1(bp)) { 6043 if (CHIP_IS_E1(bp)) {
@@ -6010,15 +6054,10 @@ static int bnx2x_init_port(struct bnx2x *bp)
6010 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, 6054 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4,
6011 (IS_E1HMF(bp) ? 0xF7 : 0x7)); 6055 (IS_E1HMF(bp) ? 0xF7 : 0x7));
6012 6056
6013 /* Port PXPCS comes here */
6014 bnx2x_init_block(bp, PXPCS_BLOCK, init_stage); 6057 bnx2x_init_block(bp, PXPCS_BLOCK, init_stage);
6015 /* Port EMAC0 comes here */
6016 bnx2x_init_block(bp, EMAC0_BLOCK, init_stage); 6058 bnx2x_init_block(bp, EMAC0_BLOCK, init_stage);
6017 /* Port EMAC1 comes here */
6018 bnx2x_init_block(bp, EMAC1_BLOCK, init_stage); 6059 bnx2x_init_block(bp, EMAC1_BLOCK, init_stage);
6019 /* Port DBU comes here */
6020 bnx2x_init_block(bp, DBU_BLOCK, init_stage); 6060 bnx2x_init_block(bp, DBU_BLOCK, init_stage);
6021 /* Port DBG comes here */
6022 bnx2x_init_block(bp, DBG_BLOCK, init_stage); 6061 bnx2x_init_block(bp, DBG_BLOCK, init_stage);
6023 6062
6024 bnx2x_init_block(bp, NIG_BLOCK, init_stage); 6063 bnx2x_init_block(bp, NIG_BLOCK, init_stage);
@@ -6040,9 +6079,7 @@ static int bnx2x_init_port(struct bnx2x *bp)
6040 } 6079 }
6041 } 6080 }
6042 6081
6043 /* Port MCP comes here */
6044 bnx2x_init_block(bp, MCP_BLOCK, init_stage); 6082 bnx2x_init_block(bp, MCP_BLOCK, init_stage);
6045 /* Port DMAE comes here */
6046 bnx2x_init_block(bp, DMAE_BLOCK, init_stage); 6083 bnx2x_init_block(bp, DMAE_BLOCK, init_stage);
6047 6084
6048 switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) { 6085 switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
@@ -6302,8 +6339,7 @@ static void bnx2x_free_mem(struct bnx2x *bp)
6302 /* status blocks */ 6339 /* status blocks */
6303 BNX2X_PCI_FREE(bnx2x_fp(bp, i, status_blk), 6340 BNX2X_PCI_FREE(bnx2x_fp(bp, i, status_blk),
6304 bnx2x_fp(bp, i, status_blk_mapping), 6341 bnx2x_fp(bp, i, status_blk_mapping),
6305 sizeof(struct host_status_block) + 6342 sizeof(struct host_status_block));
6306 sizeof(struct eth_tx_db_data));
6307 } 6343 }
6308 /* Rx */ 6344 /* Rx */
6309 for_each_rx_queue(bp, i) { 6345 for_each_rx_queue(bp, i) {
@@ -6332,7 +6368,7 @@ static void bnx2x_free_mem(struct bnx2x *bp)
6332 BNX2X_FREE(bnx2x_fp(bp, i, tx_buf_ring)); 6368 BNX2X_FREE(bnx2x_fp(bp, i, tx_buf_ring));
6333 BNX2X_PCI_FREE(bnx2x_fp(bp, i, tx_desc_ring), 6369 BNX2X_PCI_FREE(bnx2x_fp(bp, i, tx_desc_ring),
6334 bnx2x_fp(bp, i, tx_desc_mapping), 6370 bnx2x_fp(bp, i, tx_desc_mapping),
6335 sizeof(struct eth_tx_bd) * NUM_TX_BD); 6371 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
6336 } 6372 }
6337 /* end of fastpath */ 6373 /* end of fastpath */
6338 6374
@@ -6383,8 +6419,7 @@ static int bnx2x_alloc_mem(struct bnx2x *bp)
6383 /* status blocks */ 6419 /* status blocks */
6384 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, status_blk), 6420 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, status_blk),
6385 &bnx2x_fp(bp, i, status_blk_mapping), 6421 &bnx2x_fp(bp, i, status_blk_mapping),
6386 sizeof(struct host_status_block) + 6422 sizeof(struct host_status_block));
6387 sizeof(struct eth_tx_db_data));
6388 } 6423 }
6389 /* Rx */ 6424 /* Rx */
6390 for_each_rx_queue(bp, i) { 6425 for_each_rx_queue(bp, i) {
@@ -6411,19 +6446,12 @@ static int bnx2x_alloc_mem(struct bnx2x *bp)
6411 /* Tx */ 6446 /* Tx */
6412 for_each_tx_queue(bp, i) { 6447 for_each_tx_queue(bp, i) {
6413 6448
6414 bnx2x_fp(bp, i, hw_tx_prods) =
6415 (void *)(bnx2x_fp(bp, i, status_blk) + 1);
6416
6417 bnx2x_fp(bp, i, tx_prods_mapping) =
6418 bnx2x_fp(bp, i, status_blk_mapping) +
6419 sizeof(struct host_status_block);
6420
6421 /* fastpath tx rings: tx_buf tx_desc */ 6449 /* fastpath tx rings: tx_buf tx_desc */
6422 BNX2X_ALLOC(bnx2x_fp(bp, i, tx_buf_ring), 6450 BNX2X_ALLOC(bnx2x_fp(bp, i, tx_buf_ring),
6423 sizeof(struct sw_tx_bd) * NUM_TX_BD); 6451 sizeof(struct sw_tx_bd) * NUM_TX_BD);
6424 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, tx_desc_ring), 6452 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, tx_desc_ring),
6425 &bnx2x_fp(bp, i, tx_desc_mapping), 6453 &bnx2x_fp(bp, i, tx_desc_mapping),
6426 sizeof(struct eth_tx_bd) * NUM_TX_BD); 6454 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
6427 } 6455 }
6428 /* end of fastpath */ 6456 /* end of fastpath */
6429 6457
@@ -6600,7 +6628,12 @@ static int bnx2x_req_msix_irqs(struct bnx2x *bp)
6600 for_each_queue(bp, i) { 6628 for_each_queue(bp, i) {
6601 struct bnx2x_fastpath *fp = &bp->fp[i]; 6629 struct bnx2x_fastpath *fp = &bp->fp[i];
6602 6630
6603 sprintf(fp->name, "%s.fp%d", bp->dev->name, i); 6631 if (i < bp->num_rx_queues)
6632 sprintf(fp->name, "%s-rx-%d", bp->dev->name, i);
6633 else
6634 sprintf(fp->name, "%s-tx-%d",
6635 bp->dev->name, i - bp->num_rx_queues);
6636
6604 rc = request_irq(bp->msix_table[i + offset].vector, 6637 rc = request_irq(bp->msix_table[i + offset].vector,
6605 bnx2x_msix_fp_int, 0, fp->name, fp); 6638 bnx2x_msix_fp_int, 0, fp->name, fp);
6606 if (rc) { 6639 if (rc) {
@@ -6613,16 +6646,11 @@ static int bnx2x_req_msix_irqs(struct bnx2x *bp)
6613 } 6646 }
6614 6647
6615 i = BNX2X_NUM_QUEUES(bp); 6648 i = BNX2X_NUM_QUEUES(bp);
6616 if (is_multi(bp)) 6649 printk(KERN_INFO PFX "%s: using MSI-X IRQs: sp %d fp[%d] %d"
6617 printk(KERN_INFO PFX 6650 " ... fp[%d] %d\n",
6618 "%s: using MSI-X IRQs: sp %d fp %d - %d\n", 6651 bp->dev->name, bp->msix_table[0].vector,
6619 bp->dev->name, bp->msix_table[0].vector, 6652 0, bp->msix_table[offset].vector,
6620 bp->msix_table[offset].vector, 6653 i - 1, bp->msix_table[offset + i - 1].vector);
6621 bp->msix_table[offset + i - 1].vector);
6622 else
6623 printk(KERN_INFO PFX "%s: using MSI-X IRQs: sp %d fp %d\n",
6624 bp->dev->name, bp->msix_table[0].vector,
6625 bp->msix_table[offset + i - 1].vector);
6626 6654
6627 return 0; 6655 return 0;
6628} 6656}
@@ -6730,7 +6758,8 @@ static void bnx2x_set_mac_addr_e1(struct bnx2x *bp, int set)
6730 config->config_table[0].target_table_entry.flags = 0; 6758 config->config_table[0].target_table_entry.flags = 0;
6731 else 6759 else
6732 CAM_INVALIDATE(config->config_table[0]); 6760 CAM_INVALIDATE(config->config_table[0]);
6733 config->config_table[0].target_table_entry.client_id = 0; 6761 config->config_table[0].target_table_entry.clients_bit_vector =
6762 cpu_to_le32(1 << BP_L_ID(bp));
6734 config->config_table[0].target_table_entry.vlan_id = 0; 6763 config->config_table[0].target_table_entry.vlan_id = 0;
6735 6764
6736 DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x)\n", 6765 DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x)\n",
@@ -6749,7 +6778,8 @@ static void bnx2x_set_mac_addr_e1(struct bnx2x *bp, int set)
6749 TSTORM_CAM_TARGET_TABLE_ENTRY_BROADCAST; 6778 TSTORM_CAM_TARGET_TABLE_ENTRY_BROADCAST;
6750 else 6779 else
6751 CAM_INVALIDATE(config->config_table[1]); 6780 CAM_INVALIDATE(config->config_table[1]);
6752 config->config_table[1].target_table_entry.client_id = 0; 6781 config->config_table[1].target_table_entry.clients_bit_vector =
6782 cpu_to_le32(1 << BP_L_ID(bp));
6753 config->config_table[1].target_table_entry.vlan_id = 0; 6783 config->config_table[1].target_table_entry.vlan_id = 0;
6754 6784
6755 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0, 6785 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
@@ -6762,11 +6792,6 @@ static void bnx2x_set_mac_addr_e1h(struct bnx2x *bp, int set)
6762 struct mac_configuration_cmd_e1h *config = 6792 struct mac_configuration_cmd_e1h *config =
6763 (struct mac_configuration_cmd_e1h *)bnx2x_sp(bp, mac_config); 6793 (struct mac_configuration_cmd_e1h *)bnx2x_sp(bp, mac_config);
6764 6794
6765 if (set && (bp->state != BNX2X_STATE_OPEN)) {
6766 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
6767 return;
6768 }
6769
6770 /* CAM allocation for E1H 6795 /* CAM allocation for E1H
6771 * unicasts: by func number 6796 * unicasts: by func number
6772 * multicast: 20+FUNC*20, 20 each 6797 * multicast: 20+FUNC*20, 20 each
@@ -6783,7 +6808,8 @@ static void bnx2x_set_mac_addr_e1h(struct bnx2x *bp, int set)
6783 swab16(*(u16 *)&bp->dev->dev_addr[2]); 6808 swab16(*(u16 *)&bp->dev->dev_addr[2]);
6784 config->config_table[0].lsb_mac_addr = 6809 config->config_table[0].lsb_mac_addr =
6785 swab16(*(u16 *)&bp->dev->dev_addr[4]); 6810 swab16(*(u16 *)&bp->dev->dev_addr[4]);
6786 config->config_table[0].client_id = BP_L_ID(bp); 6811 config->config_table[0].clients_bit_vector =
6812 cpu_to_le32(1 << BP_L_ID(bp));
6787 config->config_table[0].vlan_id = 0; 6813 config->config_table[0].vlan_id = 0;
6788 config->config_table[0].e1hov_id = cpu_to_le16(bp->e1hov); 6814 config->config_table[0].e1hov_id = cpu_to_le16(bp->e1hov);
6789 if (set) 6815 if (set)
@@ -6880,49 +6906,94 @@ static int bnx2x_setup_multi(struct bnx2x *bp, int index)
6880 6906
6881static int bnx2x_poll(struct napi_struct *napi, int budget); 6907static int bnx2x_poll(struct napi_struct *napi, int budget);
6882 6908
6883static void bnx2x_set_int_mode(struct bnx2x *bp) 6909static void bnx2x_set_int_mode_msix(struct bnx2x *bp, int *num_rx_queues_out,
6910 int *num_tx_queues_out)
6911{
6912 int _num_rx_queues = 0, _num_tx_queues = 0;
6913
6914 switch (bp->multi_mode) {
6915 case ETH_RSS_MODE_DISABLED:
6916 _num_rx_queues = 1;
6917 _num_tx_queues = 1;
6918 break;
6919
6920 case ETH_RSS_MODE_REGULAR:
6921 if (num_rx_queues)
6922 _num_rx_queues = min_t(u32, num_rx_queues,
6923 BNX2X_MAX_QUEUES(bp));
6924 else
6925 _num_rx_queues = min_t(u32, num_online_cpus(),
6926 BNX2X_MAX_QUEUES(bp));
6927
6928 if (num_tx_queues)
6929 _num_tx_queues = min_t(u32, num_tx_queues,
6930 BNX2X_MAX_QUEUES(bp));
6931 else
6932 _num_tx_queues = min_t(u32, num_online_cpus(),
6933 BNX2X_MAX_QUEUES(bp));
6934
6935 /* There must be not more Tx queues than Rx queues */
6936 if (_num_tx_queues > _num_rx_queues) {
6937 BNX2X_ERR("number of tx queues (%d) > "
6938 "number of rx queues (%d)"
6939 " defaulting to %d\n",
6940 _num_tx_queues, _num_rx_queues,
6941 _num_rx_queues);
6942 _num_tx_queues = _num_rx_queues;
6943 }
6944 break;
6945
6946
6947 default:
6948 _num_rx_queues = 1;
6949 _num_tx_queues = 1;
6950 break;
6951 }
6952
6953 *num_rx_queues_out = _num_rx_queues;
6954 *num_tx_queues_out = _num_tx_queues;
6955}
6956
6957static int bnx2x_set_int_mode(struct bnx2x *bp)
6884{ 6958{
6885 int num_queues; 6959 int rc = 0;
6886 6960
6887 switch (int_mode) { 6961 switch (int_mode) {
6888 case INT_MODE_INTx: 6962 case INT_MODE_INTx:
6889 case INT_MODE_MSI: 6963 case INT_MODE_MSI:
6890 num_queues = 1; 6964 bp->num_rx_queues = 1;
6891 bp->num_rx_queues = num_queues; 6965 bp->num_tx_queues = 1;
6892 bp->num_tx_queues = num_queues; 6966 DP(NETIF_MSG_IFUP, "set number of queues to 1\n");
6893 DP(NETIF_MSG_IFUP,
6894 "set number of queues to %d\n", num_queues);
6895 break; 6967 break;
6896 6968
6897 case INT_MODE_MSIX: 6969 case INT_MODE_MSIX:
6898 default: 6970 default:
6899 if (bp->multi_mode == ETH_RSS_MODE_REGULAR) 6971 /* Set interrupt mode according to bp->multi_mode value */
6900 num_queues = min_t(u32, num_online_cpus(), 6972 bnx2x_set_int_mode_msix(bp, &bp->num_rx_queues,
6901 BNX2X_MAX_QUEUES(bp)); 6973 &bp->num_tx_queues);
6902 else 6974
6903 num_queues = 1; 6975 DP(NETIF_MSG_IFUP, "set number of queues to: rx %d tx %d\n",
6904 bp->num_rx_queues = num_queues;
6905 bp->num_tx_queues = num_queues;
6906 DP(NETIF_MSG_IFUP, "set number of rx queues to %d"
6907 " number of tx queues to %d\n",
6908 bp->num_rx_queues, bp->num_tx_queues); 6976 bp->num_rx_queues, bp->num_tx_queues);
6977
6909 /* if we can't use MSI-X we only need one fp, 6978 /* if we can't use MSI-X we only need one fp,
6910 * so try to enable MSI-X with the requested number of fp's 6979 * so try to enable MSI-X with the requested number of fp's
6911 * and fallback to MSI or legacy INTx with one fp 6980 * and fallback to MSI or legacy INTx with one fp
6912 */ 6981 */
6913 if (bnx2x_enable_msix(bp)) { 6982 rc = bnx2x_enable_msix(bp);
6983 if (rc) {
6914 /* failed to enable MSI-X */ 6984 /* failed to enable MSI-X */
6915 num_queues = 1;
6916 bp->num_rx_queues = num_queues;
6917 bp->num_tx_queues = num_queues;
6918 if (bp->multi_mode) 6985 if (bp->multi_mode)
6919 BNX2X_ERR("Multi requested but failed to " 6986 BNX2X_ERR("Multi requested but failed to "
6920 "enable MSI-X set number of " 6987 "enable MSI-X (rx %d tx %d), "
6921 "queues to %d\n", num_queues); 6988 "set number of queues to 1\n",
6989 bp->num_rx_queues, bp->num_tx_queues);
6990 bp->num_rx_queues = 1;
6991 bp->num_tx_queues = 1;
6922 } 6992 }
6923 break; 6993 break;
6924 } 6994 }
6925 bp->dev->real_num_tx_queues = bp->num_tx_queues; 6995 bp->dev->real_num_tx_queues = bp->num_tx_queues;
6996 return rc;
6926} 6997}
6927 6998
6928static void bnx2x_set_rx_mode(struct net_device *dev); 6999static void bnx2x_set_rx_mode(struct net_device *dev);
@@ -6931,16 +7002,16 @@ static void bnx2x_set_rx_mode(struct net_device *dev);
6931static int bnx2x_nic_load(struct bnx2x *bp, int load_mode) 7002static int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
6932{ 7003{
6933 u32 load_code; 7004 u32 load_code;
6934 int i, rc = 0; 7005 int i, rc;
7006
6935#ifdef BNX2X_STOP_ON_ERROR 7007#ifdef BNX2X_STOP_ON_ERROR
6936 DP(NETIF_MSG_IFUP, "enter load_mode %d\n", load_mode);
6937 if (unlikely(bp->panic)) 7008 if (unlikely(bp->panic))
6938 return -EPERM; 7009 return -EPERM;
6939#endif 7010#endif
6940 7011
6941 bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD; 7012 bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
6942 7013
6943 bnx2x_set_int_mode(bp); 7014 rc = bnx2x_set_int_mode(bp);
6944 7015
6945 if (bnx2x_alloc_mem(bp)) 7016 if (bnx2x_alloc_mem(bp))
6946 return -ENOMEM; 7017 return -ENOMEM;
@@ -6953,17 +7024,6 @@ static int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
6953 netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi), 7024 netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
6954 bnx2x_poll, 128); 7025 bnx2x_poll, 128);
6955 7026
6956#ifdef BNX2X_STOP_ON_ERROR
6957 for_each_rx_queue(bp, i) {
6958 struct bnx2x_fastpath *fp = &bp->fp[i];
6959
6960 fp->poll_no_work = 0;
6961 fp->poll_calls = 0;
6962 fp->poll_max_calls = 0;
6963 fp->poll_complete = 0;
6964 fp->poll_exit = 0;
6965 }
6966#endif
6967 bnx2x_napi_enable(bp); 7027 bnx2x_napi_enable(bp);
6968 7028
6969 if (bp->flags & USING_MSIX_FLAG) { 7029 if (bp->flags & USING_MSIX_FLAG) {
@@ -6973,6 +7033,8 @@ static int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
6973 goto load_error1; 7033 goto load_error1;
6974 } 7034 }
6975 } else { 7035 } else {
7036 /* Fall to INTx if failed to enable MSI-X due to lack of
7037 memory (in bnx2x_set_int_mode()) */
6976 if ((rc != -ENOMEM) && (int_mode != INT_MODE_INTx)) 7038 if ((rc != -ENOMEM) && (int_mode != INT_MODE_INTx))
6977 bnx2x_enable_msi(bp); 7039 bnx2x_enable_msi(bp);
6978 bnx2x_ack_int(bp); 7040 bnx2x_ack_int(bp);
@@ -7065,17 +7127,18 @@ static int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
7065 bp->state = BNX2X_STATE_DISABLED; 7127 bp->state = BNX2X_STATE_DISABLED;
7066 } 7128 }
7067 7129
7068 if (bp->state == BNX2X_STATE_OPEN) 7130 if (bp->state == BNX2X_STATE_OPEN) {
7069 for_each_nondefault_queue(bp, i) { 7131 for_each_nondefault_queue(bp, i) {
7070 rc = bnx2x_setup_multi(bp, i); 7132 rc = bnx2x_setup_multi(bp, i);
7071 if (rc) 7133 if (rc)
7072 goto load_error3; 7134 goto load_error3;
7073 } 7135 }
7074 7136
7075 if (CHIP_IS_E1(bp)) 7137 if (CHIP_IS_E1(bp))
7076 bnx2x_set_mac_addr_e1(bp, 1); 7138 bnx2x_set_mac_addr_e1(bp, 1);
7077 else 7139 else
7078 bnx2x_set_mac_addr_e1h(bp, 1); 7140 bnx2x_set_mac_addr_e1h(bp, 1);
7141 }
7079 7142
7080 if (bp->port.pmf) 7143 if (bp->port.pmf)
7081 bnx2x_initial_phy_init(bp, load_mode); 7144 bnx2x_initial_phy_init(bp, load_mode);
@@ -7083,14 +7146,18 @@ static int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
7083 /* Start fast path */ 7146 /* Start fast path */
7084 switch (load_mode) { 7147 switch (load_mode) {
7085 case LOAD_NORMAL: 7148 case LOAD_NORMAL:
7086 /* Tx queue should be only reenabled */ 7149 if (bp->state == BNX2X_STATE_OPEN) {
7087 netif_tx_wake_all_queues(bp->dev); 7150 /* Tx queue should be only reenabled */
7151 netif_tx_wake_all_queues(bp->dev);
7152 }
7088 /* Initialize the receive filter. */ 7153 /* Initialize the receive filter. */
7089 bnx2x_set_rx_mode(bp->dev); 7154 bnx2x_set_rx_mode(bp->dev);
7090 break; 7155 break;
7091 7156
7092 case LOAD_OPEN: 7157 case LOAD_OPEN:
7093 netif_tx_start_all_queues(bp->dev); 7158 netif_tx_start_all_queues(bp->dev);
7159 if (bp->state != BNX2X_STATE_OPEN)
7160 netif_tx_disable(bp->dev);
7094 /* Initialize the receive filter. */ 7161 /* Initialize the receive filter. */
7095 bnx2x_set_rx_mode(bp->dev); 7162 bnx2x_set_rx_mode(bp->dev);
7096 break; 7163 break;
@@ -9184,18 +9251,19 @@ static int bnx2x_get_coalesce(struct net_device *dev,
9184 return 0; 9251 return 0;
9185} 9252}
9186 9253
9254#define BNX2X_MAX_COALES_TOUT (0xf0*12) /* Maximal coalescing timeout in us */
9187static int bnx2x_set_coalesce(struct net_device *dev, 9255static int bnx2x_set_coalesce(struct net_device *dev,
9188 struct ethtool_coalesce *coal) 9256 struct ethtool_coalesce *coal)
9189{ 9257{
9190 struct bnx2x *bp = netdev_priv(dev); 9258 struct bnx2x *bp = netdev_priv(dev);
9191 9259
9192 bp->rx_ticks = (u16) coal->rx_coalesce_usecs; 9260 bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
9193 if (bp->rx_ticks > BNX2X_MAX_COALESCE_TOUT) 9261 if (bp->rx_ticks > BNX2X_MAX_COALES_TOUT)
9194 bp->rx_ticks = BNX2X_MAX_COALESCE_TOUT; 9262 bp->rx_ticks = BNX2X_MAX_COALES_TOUT;
9195 9263
9196 bp->tx_ticks = (u16) coal->tx_coalesce_usecs; 9264 bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
9197 if (bp->tx_ticks > BNX2X_MAX_COALESCE_TOUT) 9265 if (bp->tx_ticks > BNX2X_MAX_COALES_TOUT)
9198 bp->tx_ticks = BNX2X_MAX_COALESCE_TOUT; 9266 bp->tx_ticks = BNX2X_MAX_COALES_TOUT;
9199 9267
9200 if (netif_running(dev)) 9268 if (netif_running(dev))
9201 bnx2x_update_coalesce(bp); 9269 bnx2x_update_coalesce(bp);
@@ -9554,12 +9622,14 @@ static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode, u8 link_up)
9554 unsigned int pkt_size, num_pkts, i; 9622 unsigned int pkt_size, num_pkts, i;
9555 struct sk_buff *skb; 9623 struct sk_buff *skb;
9556 unsigned char *packet; 9624 unsigned char *packet;
9557 struct bnx2x_fastpath *fp = &bp->fp[0]; 9625 struct bnx2x_fastpath *fp_rx = &bp->fp[0];
9626 struct bnx2x_fastpath *fp_tx = &bp->fp[bp->num_rx_queues];
9558 u16 tx_start_idx, tx_idx; 9627 u16 tx_start_idx, tx_idx;
9559 u16 rx_start_idx, rx_idx; 9628 u16 rx_start_idx, rx_idx;
9560 u16 pkt_prod; 9629 u16 pkt_prod, bd_prod;
9561 struct sw_tx_bd *tx_buf; 9630 struct sw_tx_bd *tx_buf;
9562 struct eth_tx_bd *tx_bd; 9631 struct eth_tx_start_bd *tx_start_bd;
9632 struct eth_tx_parse_bd *pbd = NULL;
9563 dma_addr_t mapping; 9633 dma_addr_t mapping;
9564 union eth_rx_cqe *cqe; 9634 union eth_rx_cqe *cqe;
9565 u8 cqe_fp_flags; 9635 u8 cqe_fp_flags;
@@ -9591,57 +9661,64 @@ static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode, u8 link_up)
9591 } 9661 }
9592 packet = skb_put(skb, pkt_size); 9662 packet = skb_put(skb, pkt_size);
9593 memcpy(packet, bp->dev->dev_addr, ETH_ALEN); 9663 memcpy(packet, bp->dev->dev_addr, ETH_ALEN);
9594 memset(packet + ETH_ALEN, 0, (ETH_HLEN - ETH_ALEN)); 9664 memset(packet + ETH_ALEN, 0, ETH_ALEN);
9665 memset(packet + 2*ETH_ALEN, 0x77, (ETH_HLEN - 2*ETH_ALEN));
9595 for (i = ETH_HLEN; i < pkt_size; i++) 9666 for (i = ETH_HLEN; i < pkt_size; i++)
9596 packet[i] = (unsigned char) (i & 0xff); 9667 packet[i] = (unsigned char) (i & 0xff);
9597 9668
9598 /* send the loopback packet */ 9669 /* send the loopback packet */
9599 num_pkts = 0; 9670 num_pkts = 0;
9600 tx_start_idx = le16_to_cpu(*fp->tx_cons_sb); 9671 tx_start_idx = le16_to_cpu(*fp_tx->tx_cons_sb);
9601 rx_start_idx = le16_to_cpu(*fp->rx_cons_sb); 9672 rx_start_idx = le16_to_cpu(*fp_rx->rx_cons_sb);
9602 9673
9603 pkt_prod = fp->tx_pkt_prod++; 9674 pkt_prod = fp_tx->tx_pkt_prod++;
9604 tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)]; 9675 tx_buf = &fp_tx->tx_buf_ring[TX_BD(pkt_prod)];
9605 tx_buf->first_bd = fp->tx_bd_prod; 9676 tx_buf->first_bd = fp_tx->tx_bd_prod;
9606 tx_buf->skb = skb; 9677 tx_buf->skb = skb;
9678 tx_buf->flags = 0;
9607 9679
9608 tx_bd = &fp->tx_desc_ring[TX_BD(fp->tx_bd_prod)]; 9680 bd_prod = TX_BD(fp_tx->tx_bd_prod);
9681 tx_start_bd = &fp_tx->tx_desc_ring[bd_prod].start_bd;
9609 mapping = pci_map_single(bp->pdev, skb->data, 9682 mapping = pci_map_single(bp->pdev, skb->data,
9610 skb_headlen(skb), PCI_DMA_TODEVICE); 9683 skb_headlen(skb), PCI_DMA_TODEVICE);
9611 tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping)); 9684 tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
9612 tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping)); 9685 tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
9613 tx_bd->nbd = cpu_to_le16(1); 9686 tx_start_bd->nbd = cpu_to_le16(2); /* start + pbd */
9614 tx_bd->nbytes = cpu_to_le16(skb_headlen(skb)); 9687 tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb));
9615 tx_bd->vlan = cpu_to_le16(pkt_prod); 9688 tx_start_bd->vlan = cpu_to_le16(pkt_prod);
9616 tx_bd->bd_flags.as_bitfield = (ETH_TX_BD_FLAGS_START_BD | 9689 tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
9617 ETH_TX_BD_FLAGS_END_BD); 9690 tx_start_bd->general_data = ((UNICAST_ADDRESS <<
9618 tx_bd->general_data = ((UNICAST_ADDRESS << 9691 ETH_TX_START_BD_ETH_ADDR_TYPE_SHIFT) | 1);
9619 ETH_TX_BD_ETH_ADDR_TYPE_SHIFT) | 1); 9692
9693 /* turn on parsing and get a BD */
9694 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
9695 pbd = &fp_tx->tx_desc_ring[bd_prod].parse_bd;
9696
9697 memset(pbd, 0, sizeof(struct eth_tx_parse_bd));
9620 9698
9621 wmb(); 9699 wmb();
9622 9700
9623 le16_add_cpu(&fp->hw_tx_prods->bds_prod, 1); 9701 fp_tx->tx_db.data.prod += 2;
9624 mb(); /* FW restriction: must not reorder writing nbd and packets */ 9702 barrier();
9625 le32_add_cpu(&fp->hw_tx_prods->packets_prod, 1); 9703 DOORBELL(bp, fp_tx->index - bp->num_rx_queues, fp_tx->tx_db.raw);
9626 DOORBELL(bp, fp->index, 0);
9627 9704
9628 mmiowb(); 9705 mmiowb();
9629 9706
9630 num_pkts++; 9707 num_pkts++;
9631 fp->tx_bd_prod++; 9708 fp_tx->tx_bd_prod += 2; /* start + pbd */
9632 bp->dev->trans_start = jiffies; 9709 bp->dev->trans_start = jiffies;
9633 9710
9634 udelay(100); 9711 udelay(100);
9635 9712
9636 tx_idx = le16_to_cpu(*fp->tx_cons_sb); 9713 tx_idx = le16_to_cpu(*fp_tx->tx_cons_sb);
9637 if (tx_idx != tx_start_idx + num_pkts) 9714 if (tx_idx != tx_start_idx + num_pkts)
9638 goto test_loopback_exit; 9715 goto test_loopback_exit;
9639 9716
9640 rx_idx = le16_to_cpu(*fp->rx_cons_sb); 9717 rx_idx = le16_to_cpu(*fp_rx->rx_cons_sb);
9641 if (rx_idx != rx_start_idx + num_pkts) 9718 if (rx_idx != rx_start_idx + num_pkts)
9642 goto test_loopback_exit; 9719 goto test_loopback_exit;
9643 9720
9644 cqe = &fp->rx_comp_ring[RCQ_BD(fp->rx_comp_cons)]; 9721 cqe = &fp_rx->rx_comp_ring[RCQ_BD(fp_rx->rx_comp_cons)];
9645 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags; 9722 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
9646 if (CQE_TYPE(cqe_fp_flags) || (cqe_fp_flags & ETH_RX_ERROR_FALGS)) 9723 if (CQE_TYPE(cqe_fp_flags) || (cqe_fp_flags & ETH_RX_ERROR_FALGS))
9647 goto test_loopback_rx_exit; 9724 goto test_loopback_rx_exit;
@@ -9650,7 +9727,7 @@ static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode, u8 link_up)
9650 if (len != pkt_size) 9727 if (len != pkt_size)
9651 goto test_loopback_rx_exit; 9728 goto test_loopback_rx_exit;
9652 9729
9653 rx_buf = &fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)]; 9730 rx_buf = &fp_rx->rx_buf_ring[RX_BD(fp_rx->rx_bd_cons)];
9654 skb = rx_buf->skb; 9731 skb = rx_buf->skb;
9655 skb_reserve(skb, cqe->fast_path_cqe.placement_offset); 9732 skb_reserve(skb, cqe->fast_path_cqe.placement_offset);
9656 for (i = ETH_HLEN; i < pkt_size; i++) 9733 for (i = ETH_HLEN; i < pkt_size; i++)
@@ -9661,14 +9738,14 @@ static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode, u8 link_up)
9661 9738
9662test_loopback_rx_exit: 9739test_loopback_rx_exit:
9663 9740
9664 fp->rx_bd_cons = NEXT_RX_IDX(fp->rx_bd_cons); 9741 fp_rx->rx_bd_cons = NEXT_RX_IDX(fp_rx->rx_bd_cons);
9665 fp->rx_bd_prod = NEXT_RX_IDX(fp->rx_bd_prod); 9742 fp_rx->rx_bd_prod = NEXT_RX_IDX(fp_rx->rx_bd_prod);
9666 fp->rx_comp_cons = NEXT_RCQ_IDX(fp->rx_comp_cons); 9743 fp_rx->rx_comp_cons = NEXT_RCQ_IDX(fp_rx->rx_comp_cons);
9667 fp->rx_comp_prod = NEXT_RCQ_IDX(fp->rx_comp_prod); 9744 fp_rx->rx_comp_prod = NEXT_RCQ_IDX(fp_rx->rx_comp_prod);
9668 9745
9669 /* Update producers */ 9746 /* Update producers */
9670 bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod, 9747 bnx2x_update_rx_prod(bp, fp_rx, fp_rx->rx_bd_prod, fp_rx->rx_comp_prod,
9671 fp->rx_sge_prod); 9748 fp_rx->rx_sge_prod);
9672 9749
9673test_loopback_exit: 9750test_loopback_exit:
9674 bp->link_params.loopback_mode = LOOPBACK_NONE; 9751 bp->link_params.loopback_mode = LOOPBACK_NONE;
@@ -10001,7 +10078,7 @@ static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
10001 case ETH_SS_STATS: 10078 case ETH_SS_STATS:
10002 if (is_multi(bp)) { 10079 if (is_multi(bp)) {
10003 k = 0; 10080 k = 0;
10004 for_each_queue(bp, i) { 10081 for_each_rx_queue(bp, i) {
10005 for (j = 0; j < BNX2X_NUM_Q_STATS; j++) 10082 for (j = 0; j < BNX2X_NUM_Q_STATS; j++)
10006 sprintf(buf + (k + j)*ETH_GSTRING_LEN, 10083 sprintf(buf + (k + j)*ETH_GSTRING_LEN,
10007 bnx2x_q_stats_arr[j].string, i); 10084 bnx2x_q_stats_arr[j].string, i);
@@ -10035,7 +10112,7 @@ static int bnx2x_get_stats_count(struct net_device *dev)
10035 int i, num_stats; 10112 int i, num_stats;
10036 10113
10037 if (is_multi(bp)) { 10114 if (is_multi(bp)) {
10038 num_stats = BNX2X_NUM_Q_STATS * BNX2X_NUM_QUEUES(bp); 10115 num_stats = BNX2X_NUM_Q_STATS * bp->num_rx_queues;
10039 if (!IS_E1HMF_MODE_STAT(bp)) 10116 if (!IS_E1HMF_MODE_STAT(bp))
10040 num_stats += BNX2X_NUM_STATS; 10117 num_stats += BNX2X_NUM_STATS;
10041 } else { 10118 } else {
@@ -10060,7 +10137,7 @@ static void bnx2x_get_ethtool_stats(struct net_device *dev,
10060 10137
10061 if (is_multi(bp)) { 10138 if (is_multi(bp)) {
10062 k = 0; 10139 k = 0;
10063 for_each_queue(bp, i) { 10140 for_each_rx_queue(bp, i) {
10064 hw_stats = (u32 *)&bp->fp[i].eth_q_stats; 10141 hw_stats = (u32 *)&bp->fp[i].eth_q_stats;
10065 for (j = 0; j < BNX2X_NUM_Q_STATS; j++) { 10142 for (j = 0; j < BNX2X_NUM_Q_STATS; j++) {
10066 if (bnx2x_q_stats_arr[j].size == 0) { 10143 if (bnx2x_q_stats_arr[j].size == 0) {
@@ -10273,15 +10350,11 @@ static int bnx2x_poll(struct napi_struct *napi, int budget)
10273 goto poll_panic; 10350 goto poll_panic;
10274#endif 10351#endif
10275 10352
10276 prefetch(fp->tx_buf_ring[TX_BD(fp->tx_pkt_cons)].skb);
10277 prefetch(fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)].skb); 10353 prefetch(fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)].skb);
10278 prefetch((char *)(fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)].skb) + 256); 10354 prefetch((char *)(fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)].skb) + 256);
10279 10355
10280 bnx2x_update_fpsb_idx(fp); 10356 bnx2x_update_fpsb_idx(fp);
10281 10357
10282 if (bnx2x_has_tx_work(fp))
10283 bnx2x_tx_int(fp);
10284
10285 if (bnx2x_has_rx_work(fp)) { 10358 if (bnx2x_has_rx_work(fp)) {
10286 work_done = bnx2x_rx_int(fp, budget); 10359 work_done = bnx2x_rx_int(fp, budget);
10287 10360
@@ -10290,11 +10363,11 @@ static int bnx2x_poll(struct napi_struct *napi, int budget)
10290 goto poll_again; 10363 goto poll_again;
10291 } 10364 }
10292 10365
10293 /* BNX2X_HAS_WORK() reads the status block, thus we need to 10366 /* bnx2x_has_rx_work() reads the status block, thus we need to
10294 * ensure that status block indices have been actually read 10367 * ensure that status block indices have been actually read
10295 * (bnx2x_update_fpsb_idx) prior to this check (BNX2X_HAS_WORK) 10368 * (bnx2x_update_fpsb_idx) prior to this check (bnx2x_has_rx_work)
10296 * so that we won't write the "newer" value of the status block to IGU 10369 * so that we won't write the "newer" value of the status block to IGU
10297 * (if there was a DMA right after BNX2X_HAS_WORK and 10370 * (if there was a DMA right after bnx2x_has_rx_work and
10298 * if there is no rmb, the memory reading (bnx2x_update_fpsb_idx) 10371 * if there is no rmb, the memory reading (bnx2x_update_fpsb_idx)
10299 * may be postponed to right before bnx2x_ack_sb). In this case 10372 * may be postponed to right before bnx2x_ack_sb). In this case
10300 * there will never be another interrupt until there is another update 10373 * there will never be another interrupt until there is another update
@@ -10302,7 +10375,7 @@ static int bnx2x_poll(struct napi_struct *napi, int budget)
10302 */ 10375 */
10303 rmb(); 10376 rmb();
10304 10377
10305 if (!BNX2X_HAS_WORK(fp)) { 10378 if (!bnx2x_has_rx_work(fp)) {
10306#ifdef BNX2X_STOP_ON_ERROR 10379#ifdef BNX2X_STOP_ON_ERROR
10307poll_panic: 10380poll_panic:
10308#endif 10381#endif
@@ -10327,10 +10400,11 @@ poll_again:
10327 */ 10400 */
10328static noinline u16 bnx2x_tx_split(struct bnx2x *bp, 10401static noinline u16 bnx2x_tx_split(struct bnx2x *bp,
10329 struct bnx2x_fastpath *fp, 10402 struct bnx2x_fastpath *fp,
10330 struct eth_tx_bd **tx_bd, u16 hlen, 10403 struct sw_tx_bd *tx_buf,
10404 struct eth_tx_start_bd **tx_bd, u16 hlen,
10331 u16 bd_prod, int nbd) 10405 u16 bd_prod, int nbd)
10332{ 10406{
10333 struct eth_tx_bd *h_tx_bd = *tx_bd; 10407 struct eth_tx_start_bd *h_tx_bd = *tx_bd;
10334 struct eth_tx_bd *d_tx_bd; 10408 struct eth_tx_bd *d_tx_bd;
10335 dma_addr_t mapping; 10409 dma_addr_t mapping;
10336 int old_len = le16_to_cpu(h_tx_bd->nbytes); 10410 int old_len = le16_to_cpu(h_tx_bd->nbytes);
@@ -10346,7 +10420,7 @@ static noinline u16 bnx2x_tx_split(struct bnx2x *bp,
10346 /* now get a new data BD 10420 /* now get a new data BD
10347 * (after the pbd) and fill it */ 10421 * (after the pbd) and fill it */
10348 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod)); 10422 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
10349 d_tx_bd = &fp->tx_desc_ring[bd_prod]; 10423 d_tx_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
10350 10424
10351 mapping = HILO_U64(le32_to_cpu(h_tx_bd->addr_hi), 10425 mapping = HILO_U64(le32_to_cpu(h_tx_bd->addr_hi),
10352 le32_to_cpu(h_tx_bd->addr_lo)) + hlen; 10426 le32_to_cpu(h_tx_bd->addr_lo)) + hlen;
@@ -10354,17 +10428,16 @@ static noinline u16 bnx2x_tx_split(struct bnx2x *bp,
10354 d_tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping)); 10428 d_tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
10355 d_tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping)); 10429 d_tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
10356 d_tx_bd->nbytes = cpu_to_le16(old_len - hlen); 10430 d_tx_bd->nbytes = cpu_to_le16(old_len - hlen);
10357 d_tx_bd->vlan = 0; 10431
10358 /* this marks the BD as one that has no individual mapping 10432 /* this marks the BD as one that has no individual mapping */
10359 * the FW ignores this flag in a BD not marked start 10433 tx_buf->flags |= BNX2X_TSO_SPLIT_BD;
10360 */ 10434
10361 d_tx_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_SW_LSO;
10362 DP(NETIF_MSG_TX_QUEUED, 10435 DP(NETIF_MSG_TX_QUEUED,
10363 "TSO split data size is %d (%x:%x)\n", 10436 "TSO split data size is %d (%x:%x)\n",
10364 d_tx_bd->nbytes, d_tx_bd->addr_hi, d_tx_bd->addr_lo); 10437 d_tx_bd->nbytes, d_tx_bd->addr_hi, d_tx_bd->addr_lo);
10365 10438
10366 /* update tx_bd for marking the last BD flag */ 10439 /* update tx_bd */
10367 *tx_bd = d_tx_bd; 10440 *tx_bd = (struct eth_tx_start_bd *)d_tx_bd;
10368 10441
10369 return bd_prod; 10442 return bd_prod;
10370} 10443}
@@ -10499,18 +10572,19 @@ exit_lbl:
10499static int bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev) 10572static int bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
10500{ 10573{
10501 struct bnx2x *bp = netdev_priv(dev); 10574 struct bnx2x *bp = netdev_priv(dev);
10502 struct bnx2x_fastpath *fp; 10575 struct bnx2x_fastpath *fp, *fp_stat;
10503 struct netdev_queue *txq; 10576 struct netdev_queue *txq;
10504 struct sw_tx_bd *tx_buf; 10577 struct sw_tx_bd *tx_buf;
10505 struct eth_tx_bd *tx_bd; 10578 struct eth_tx_start_bd *tx_start_bd;
10579 struct eth_tx_bd *tx_data_bd, *total_pkt_bd = NULL;
10506 struct eth_tx_parse_bd *pbd = NULL; 10580 struct eth_tx_parse_bd *pbd = NULL;
10507 u16 pkt_prod, bd_prod; 10581 u16 pkt_prod, bd_prod;
10508 int nbd, fp_index; 10582 int nbd, fp_index;
10509 dma_addr_t mapping; 10583 dma_addr_t mapping;
10510 u32 xmit_type = bnx2x_xmit_type(bp, skb); 10584 u32 xmit_type = bnx2x_xmit_type(bp, skb);
10511 int vlan_off = (bp->e1hov ? 4 : 0);
10512 int i; 10585 int i;
10513 u8 hlen = 0; 10586 u8 hlen = 0;
10587 __le16 pkt_size = 0;
10514 10588
10515#ifdef BNX2X_STOP_ON_ERROR 10589#ifdef BNX2X_STOP_ON_ERROR
10516 if (unlikely(bp->panic)) 10590 if (unlikely(bp->panic))
@@ -10520,10 +10594,11 @@ static int bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
10520 fp_index = skb_get_queue_mapping(skb); 10594 fp_index = skb_get_queue_mapping(skb);
10521 txq = netdev_get_tx_queue(dev, fp_index); 10595 txq = netdev_get_tx_queue(dev, fp_index);
10522 10596
10523 fp = &bp->fp[fp_index]; 10597 fp = &bp->fp[fp_index + bp->num_rx_queues];
10598 fp_stat = &bp->fp[fp_index];
10524 10599
10525 if (unlikely(bnx2x_tx_avail(fp) < (skb_shinfo(skb)->nr_frags + 3))) { 10600 if (unlikely(bnx2x_tx_avail(fp) < (skb_shinfo(skb)->nr_frags + 3))) {
10526 fp->eth_q_stats.driver_xoff++, 10601 fp_stat->eth_q_stats.driver_xoff++;
10527 netif_tx_stop_queue(txq); 10602 netif_tx_stop_queue(txq);
10528 BNX2X_ERR("BUG! Tx ring full when queue awake!\n"); 10603 BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
10529 return NETDEV_TX_BUSY; 10604 return NETDEV_TX_BUSY;
@@ -10552,7 +10627,7 @@ static int bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
10552 10627
10553 /* 10628 /*
10554 Please read carefully. First we use one BD which we mark as start, 10629 Please read carefully. First we use one BD which we mark as start,
10555 then for TSO or xsum we have a parsing info BD, 10630 then we have a parsing info BD (used for TSO or xsum),
10556 and only then we have the rest of the TSO BDs. 10631 and only then we have the rest of the TSO BDs.
10557 (don't forget to mark the last one as last, 10632 (don't forget to mark the last one as last,
10558 and to unmap only AFTER you write to the BD ...) 10633 and to unmap only AFTER you write to the BD ...)
@@ -10564,42 +10639,40 @@ static int bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
10564 10639
10565 /* get a tx_buf and first BD */ 10640 /* get a tx_buf and first BD */
10566 tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)]; 10641 tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)];
10567 tx_bd = &fp->tx_desc_ring[bd_prod]; 10642 tx_start_bd = &fp->tx_desc_ring[bd_prod].start_bd;
10568 10643
10569 tx_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD; 10644 tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
10570 tx_bd->general_data = (UNICAST_ADDRESS << 10645 tx_start_bd->general_data = (UNICAST_ADDRESS <<
10571 ETH_TX_BD_ETH_ADDR_TYPE_SHIFT); 10646 ETH_TX_START_BD_ETH_ADDR_TYPE_SHIFT);
10572 /* header nbd */ 10647 /* header nbd */
10573 tx_bd->general_data |= (1 << ETH_TX_BD_HDR_NBDS_SHIFT); 10648 tx_start_bd->general_data |= (1 << ETH_TX_START_BD_HDR_NBDS_SHIFT);
10574 10649
10575 /* remember the first BD of the packet */ 10650 /* remember the first BD of the packet */
10576 tx_buf->first_bd = fp->tx_bd_prod; 10651 tx_buf->first_bd = fp->tx_bd_prod;
10577 tx_buf->skb = skb; 10652 tx_buf->skb = skb;
10653 tx_buf->flags = 0;
10578 10654
10579 DP(NETIF_MSG_TX_QUEUED, 10655 DP(NETIF_MSG_TX_QUEUED,
10580 "sending pkt %u @%p next_idx %u bd %u @%p\n", 10656 "sending pkt %u @%p next_idx %u bd %u @%p\n",
10581 pkt_prod, tx_buf, fp->tx_pkt_prod, bd_prod, tx_bd); 10657 pkt_prod, tx_buf, fp->tx_pkt_prod, bd_prod, tx_start_bd);
10582 10658
10583#ifdef BCM_VLAN 10659#ifdef BCM_VLAN
10584 if ((bp->vlgrp != NULL) && vlan_tx_tag_present(skb) && 10660 if ((bp->vlgrp != NULL) && vlan_tx_tag_present(skb) &&
10585 (bp->flags & HW_VLAN_TX_FLAG)) { 10661 (bp->flags & HW_VLAN_TX_FLAG)) {
10586 tx_bd->vlan = cpu_to_le16(vlan_tx_tag_get(skb)); 10662 tx_start_bd->vlan = cpu_to_le16(vlan_tx_tag_get(skb));
10587 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_VLAN_TAG; 10663 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_VLAN_TAG;
10588 vlan_off += 4;
10589 } else 10664 } else
10590#endif 10665#endif
10591 tx_bd->vlan = cpu_to_le16(pkt_prod); 10666 tx_start_bd->vlan = cpu_to_le16(pkt_prod);
10592 10667
10593 if (xmit_type) { 10668 /* turn on parsing and get a BD */
10594 /* turn on parsing and get a BD */ 10669 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
10595 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod)); 10670 pbd = &fp->tx_desc_ring[bd_prod].parse_bd;
10596 pbd = (void *)&fp->tx_desc_ring[bd_prod];
10597 10671
10598 memset(pbd, 0, sizeof(struct eth_tx_parse_bd)); 10672 memset(pbd, 0, sizeof(struct eth_tx_parse_bd));
10599 }
10600 10673
10601 if (xmit_type & XMIT_CSUM) { 10674 if (xmit_type & XMIT_CSUM) {
10602 hlen = (skb_network_header(skb) - skb->data + vlan_off) / 2; 10675 hlen = (skb_network_header(skb) - skb->data) / 2;
10603 10676
10604 /* for now NS flag is not used in Linux */ 10677 /* for now NS flag is not used in Linux */
10605 pbd->global_data = 10678 pbd->global_data =
@@ -10612,15 +10685,16 @@ static int bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
10612 hlen += pbd->ip_hlen + tcp_hdrlen(skb) / 2; 10685 hlen += pbd->ip_hlen + tcp_hdrlen(skb) / 2;
10613 10686
10614 pbd->total_hlen = cpu_to_le16(hlen); 10687 pbd->total_hlen = cpu_to_le16(hlen);
10615 hlen = hlen*2 - vlan_off; 10688 hlen = hlen*2;
10616 10689
10617 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_TCP_CSUM; 10690 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_L4_CSUM;
10618 10691
10619 if (xmit_type & XMIT_CSUM_V4) 10692 if (xmit_type & XMIT_CSUM_V4)
10620 tx_bd->bd_flags.as_bitfield |= 10693 tx_start_bd->bd_flags.as_bitfield |=
10621 ETH_TX_BD_FLAGS_IP_CSUM; 10694 ETH_TX_BD_FLAGS_IP_CSUM;
10622 else 10695 else
10623 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IPV6; 10696 tx_start_bd->bd_flags.as_bitfield |=
10697 ETH_TX_BD_FLAGS_IPV6;
10624 10698
10625 if (xmit_type & XMIT_CSUM_TCP) { 10699 if (xmit_type & XMIT_CSUM_TCP) {
10626 pbd->tcp_pseudo_csum = swab16(tcp_hdr(skb)->check); 10700 pbd->tcp_pseudo_csum = swab16(tcp_hdr(skb)->check);
@@ -10628,13 +10702,11 @@ static int bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
10628 } else { 10702 } else {
10629 s8 fix = SKB_CS_OFF(skb); /* signed! */ 10703 s8 fix = SKB_CS_OFF(skb); /* signed! */
10630 10704
10631 pbd->global_data |= ETH_TX_PARSE_BD_CS_ANY_FLG; 10705 pbd->global_data |= ETH_TX_PARSE_BD_UDP_CS_FLG;
10632 pbd->cs_offset = fix / 2;
10633 10706
10634 DP(NETIF_MSG_TX_QUEUED, 10707 DP(NETIF_MSG_TX_QUEUED,
10635 "hlen %d offset %d fix %d csum before fix %x\n", 10708 "hlen %d fix %d csum before fix %x\n",
10636 le16_to_cpu(pbd->total_hlen), pbd->cs_offset, fix, 10709 le16_to_cpu(pbd->total_hlen), fix, SKB_CS(skb));
10637 SKB_CS(skb));
10638 10710
10639 /* HW bug: fixup the CSUM */ 10711 /* HW bug: fixup the CSUM */
10640 pbd->tcp_pseudo_csum = 10712 pbd->tcp_pseudo_csum =
@@ -10649,17 +10721,18 @@ static int bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
10649 mapping = pci_map_single(bp->pdev, skb->data, 10721 mapping = pci_map_single(bp->pdev, skb->data,
10650 skb_headlen(skb), PCI_DMA_TODEVICE); 10722 skb_headlen(skb), PCI_DMA_TODEVICE);
10651 10723
10652 tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping)); 10724 tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
10653 tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping)); 10725 tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
10654 nbd = skb_shinfo(skb)->nr_frags + ((pbd == NULL) ? 1 : 2); 10726 nbd = skb_shinfo(skb)->nr_frags + 2; /* start_bd + pbd + frags */
10655 tx_bd->nbd = cpu_to_le16(nbd); 10727 tx_start_bd->nbd = cpu_to_le16(nbd);
10656 tx_bd->nbytes = cpu_to_le16(skb_headlen(skb)); 10728 tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb));
10729 pkt_size = tx_start_bd->nbytes;
10657 10730
10658 DP(NETIF_MSG_TX_QUEUED, "first bd @%p addr (%x:%x) nbd %d" 10731 DP(NETIF_MSG_TX_QUEUED, "first bd @%p addr (%x:%x) nbd %d"
10659 " nbytes %d flags %x vlan %x\n", 10732 " nbytes %d flags %x vlan %x\n",
10660 tx_bd, tx_bd->addr_hi, tx_bd->addr_lo, le16_to_cpu(tx_bd->nbd), 10733 tx_start_bd, tx_start_bd->addr_hi, tx_start_bd->addr_lo,
10661 le16_to_cpu(tx_bd->nbytes), tx_bd->bd_flags.as_bitfield, 10734 le16_to_cpu(tx_start_bd->nbd), le16_to_cpu(tx_start_bd->nbytes),
10662 le16_to_cpu(tx_bd->vlan)); 10735 tx_start_bd->bd_flags.as_bitfield, le16_to_cpu(tx_start_bd->vlan));
10663 10736
10664 if (xmit_type & XMIT_GSO) { 10737 if (xmit_type & XMIT_GSO) {
10665 10738
@@ -10668,11 +10741,11 @@ static int bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
10668 skb->len, hlen, skb_headlen(skb), 10741 skb->len, hlen, skb_headlen(skb),
10669 skb_shinfo(skb)->gso_size); 10742 skb_shinfo(skb)->gso_size);
10670 10743
10671 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO; 10744 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
10672 10745
10673 if (unlikely(skb_headlen(skb) > hlen)) 10746 if (unlikely(skb_headlen(skb) > hlen))
10674 bd_prod = bnx2x_tx_split(bp, fp, &tx_bd, hlen, 10747 bd_prod = bnx2x_tx_split(bp, fp, tx_buf, &tx_start_bd,
10675 bd_prod, ++nbd); 10748 hlen, bd_prod, ++nbd);
10676 10749
10677 pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size); 10750 pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
10678 pbd->tcp_send_seq = swab32(tcp_hdr(skb)->seq); 10751 pbd->tcp_send_seq = swab32(tcp_hdr(skb)->seq);
@@ -10693,33 +10766,31 @@ static int bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
10693 10766
10694 pbd->global_data |= ETH_TX_PARSE_BD_PSEUDO_CS_WITHOUT_LEN; 10767 pbd->global_data |= ETH_TX_PARSE_BD_PSEUDO_CS_WITHOUT_LEN;
10695 } 10768 }
10769 tx_data_bd = (struct eth_tx_bd *)tx_start_bd;
10696 10770
10697 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 10771 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
10698 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 10772 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
10699 10773
10700 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod)); 10774 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
10701 tx_bd = &fp->tx_desc_ring[bd_prod]; 10775 tx_data_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
10776 if (total_pkt_bd == NULL)
10777 total_pkt_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
10702 10778
10703 mapping = pci_map_page(bp->pdev, frag->page, frag->page_offset, 10779 mapping = pci_map_page(bp->pdev, frag->page, frag->page_offset,
10704 frag->size, PCI_DMA_TODEVICE); 10780 frag->size, PCI_DMA_TODEVICE);
10705 10781
10706 tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping)); 10782 tx_data_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
10707 tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping)); 10783 tx_data_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
10708 tx_bd->nbytes = cpu_to_le16(frag->size); 10784 tx_data_bd->nbytes = cpu_to_le16(frag->size);
10709 tx_bd->vlan = cpu_to_le16(pkt_prod); 10785 le16_add_cpu(&pkt_size, frag->size);
10710 tx_bd->bd_flags.as_bitfield = 0;
10711 10786
10712 DP(NETIF_MSG_TX_QUEUED, 10787 DP(NETIF_MSG_TX_QUEUED,
10713 "frag %d bd @%p addr (%x:%x) nbytes %d flags %x\n", 10788 "frag %d bd @%p addr (%x:%x) nbytes %d\n",
10714 i, tx_bd, tx_bd->addr_hi, tx_bd->addr_lo, 10789 i, tx_data_bd, tx_data_bd->addr_hi, tx_data_bd->addr_lo,
10715 le16_to_cpu(tx_bd->nbytes), tx_bd->bd_flags.as_bitfield); 10790 le16_to_cpu(tx_data_bd->nbytes));
10716 } 10791 }
10717 10792
10718 /* now at last mark the BD as the last BD */ 10793 DP(NETIF_MSG_TX_QUEUED, "last bd @%p\n", tx_data_bd);
10719 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_END_BD;
10720
10721 DP(NETIF_MSG_TX_QUEUED, "last bd @%p flags %x\n",
10722 tx_bd, tx_bd->bd_flags.as_bitfield);
10723 10794
10724 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod)); 10795 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
10725 10796
@@ -10729,6 +10800,9 @@ static int bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
10729 if (TX_BD_POFF(bd_prod) < nbd) 10800 if (TX_BD_POFF(bd_prod) < nbd)
10730 nbd++; 10801 nbd++;
10731 10802
10803 if (total_pkt_bd != NULL)
10804 total_pkt_bd->total_pkt_bytes = pkt_size;
10805
10732 if (pbd) 10806 if (pbd)
10733 DP(NETIF_MSG_TX_QUEUED, 10807 DP(NETIF_MSG_TX_QUEUED,
10734 "PBD @%p ip_data %x ip_hlen %u ip_id %u lso_mss %u" 10808 "PBD @%p ip_data %x ip_hlen %u ip_id %u lso_mss %u"
@@ -10748,25 +10822,24 @@ static int bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
10748 */ 10822 */
10749 wmb(); 10823 wmb();
10750 10824
10751 le16_add_cpu(&fp->hw_tx_prods->bds_prod, nbd); 10825 fp->tx_db.data.prod += nbd;
10752 mb(); /* FW restriction: must not reorder writing nbd and packets */ 10826 barrier();
10753 le32_add_cpu(&fp->hw_tx_prods->packets_prod, 1); 10827 DOORBELL(bp, fp->index - bp->num_rx_queues, fp->tx_db.raw);
10754 DOORBELL(bp, fp->index, 0);
10755 10828
10756 mmiowb(); 10829 mmiowb();
10757 10830
10758 fp->tx_bd_prod += nbd; 10831 fp->tx_bd_prod += nbd;
10759 10832
10760 if (unlikely(bnx2x_tx_avail(fp) < MAX_SKB_FRAGS + 3)) { 10833 if (unlikely(bnx2x_tx_avail(fp) < MAX_SKB_FRAGS + 3)) {
10834 netif_tx_stop_queue(txq);
10761 /* We want bnx2x_tx_int to "see" the updated tx_bd_prod 10835 /* We want bnx2x_tx_int to "see" the updated tx_bd_prod
10762 if we put Tx into XOFF state. */ 10836 if we put Tx into XOFF state. */
10763 smp_mb(); 10837 smp_mb();
10764 netif_tx_stop_queue(txq); 10838 fp_stat->eth_q_stats.driver_xoff++;
10765 fp->eth_q_stats.driver_xoff++;
10766 if (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3) 10839 if (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3)
10767 netif_tx_wake_queue(txq); 10840 netif_tx_wake_queue(txq);
10768 } 10841 }
10769 fp->tx_pkt++; 10842 fp_stat->tx_pkt++;
10770 10843
10771 return NETDEV_TX_OK; 10844 return NETDEV_TX_OK;
10772} 10845}
@@ -10842,8 +10915,9 @@ static void bnx2x_set_rx_mode(struct net_device *dev)
10842 cpu_to_le16(port); 10915 cpu_to_le16(port);
10843 config->config_table[i]. 10916 config->config_table[i].
10844 target_table_entry.flags = 0; 10917 target_table_entry.flags = 0;
10845 config->config_table[i]. 10918 config->config_table[i].target_table_entry.
10846 target_table_entry.client_id = 0; 10919 clients_bit_vector =
10920 cpu_to_le32(1 << BP_L_ID(bp));
10847 config->config_table[i]. 10921 config->config_table[i].
10848 target_table_entry.vlan_id = 0; 10922 target_table_entry.vlan_id = 0;
10849 10923
diff --git a/drivers/net/bnx2x_reg.h b/drivers/net/bnx2x_reg.h
index 8e9e7a24f2fc..25639e2df52b 100644
--- a/drivers/net/bnx2x_reg.h
+++ b/drivers/net/bnx2x_reg.h
@@ -370,7 +370,6 @@
370#define CFC_REG_NUM_LCIDS_LEAVING 0x104018 370#define CFC_REG_NUM_LCIDS_LEAVING 0x104018
371/* [RW 8] The event id for aggregated interrupt 0 */ 371/* [RW 8] The event id for aggregated interrupt 0 */
372#define CSDM_REG_AGG_INT_EVENT_0 0xc2038 372#define CSDM_REG_AGG_INT_EVENT_0 0xc2038
373#define CSDM_REG_AGG_INT_EVENT_1 0xc203c
374#define CSDM_REG_AGG_INT_EVENT_10 0xc2060 373#define CSDM_REG_AGG_INT_EVENT_10 0xc2060
375#define CSDM_REG_AGG_INT_EVENT_11 0xc2064 374#define CSDM_REG_AGG_INT_EVENT_11 0xc2064
376#define CSDM_REG_AGG_INT_EVENT_12 0xc2068 375#define CSDM_REG_AGG_INT_EVENT_12 0xc2068
@@ -378,37 +377,27 @@
378#define CSDM_REG_AGG_INT_EVENT_14 0xc2070 377#define CSDM_REG_AGG_INT_EVENT_14 0xc2070
379#define CSDM_REG_AGG_INT_EVENT_15 0xc2074 378#define CSDM_REG_AGG_INT_EVENT_15 0xc2074
380#define CSDM_REG_AGG_INT_EVENT_16 0xc2078 379#define CSDM_REG_AGG_INT_EVENT_16 0xc2078
381#define CSDM_REG_AGG_INT_EVENT_17 0xc207c
382#define CSDM_REG_AGG_INT_EVENT_18 0xc2080
383#define CSDM_REG_AGG_INT_EVENT_19 0xc2084
384#define CSDM_REG_AGG_INT_EVENT_2 0xc2040 380#define CSDM_REG_AGG_INT_EVENT_2 0xc2040
385#define CSDM_REG_AGG_INT_EVENT_20 0xc2088
386#define CSDM_REG_AGG_INT_EVENT_21 0xc208c
387#define CSDM_REG_AGG_INT_EVENT_22 0xc2090
388#define CSDM_REG_AGG_INT_EVENT_23 0xc2094
389#define CSDM_REG_AGG_INT_EVENT_24 0xc2098
390#define CSDM_REG_AGG_INT_EVENT_25 0xc209c
391#define CSDM_REG_AGG_INT_EVENT_26 0xc20a0
392#define CSDM_REG_AGG_INT_EVENT_27 0xc20a4
393#define CSDM_REG_AGG_INT_EVENT_28 0xc20a8
394#define CSDM_REG_AGG_INT_EVENT_29 0xc20ac
395#define CSDM_REG_AGG_INT_EVENT_3 0xc2044 381#define CSDM_REG_AGG_INT_EVENT_3 0xc2044
396#define CSDM_REG_AGG_INT_EVENT_30 0xc20b0
397#define CSDM_REG_AGG_INT_EVENT_31 0xc20b4
398#define CSDM_REG_AGG_INT_EVENT_4 0xc2048 382#define CSDM_REG_AGG_INT_EVENT_4 0xc2048
399/* [RW 1] The T bit for aggregated interrupt 0 */ 383#define CSDM_REG_AGG_INT_EVENT_5 0xc204c
400#define CSDM_REG_AGG_INT_T_0 0xc20b8 384#define CSDM_REG_AGG_INT_EVENT_6 0xc2050
401#define CSDM_REG_AGG_INT_T_1 0xc20bc 385#define CSDM_REG_AGG_INT_EVENT_7 0xc2054
402#define CSDM_REG_AGG_INT_T_10 0xc20e0 386#define CSDM_REG_AGG_INT_EVENT_8 0xc2058
403#define CSDM_REG_AGG_INT_T_11 0xc20e4 387#define CSDM_REG_AGG_INT_EVENT_9 0xc205c
404#define CSDM_REG_AGG_INT_T_12 0xc20e8 388/* [RW 1] For each aggregated interrupt index whether the mode is normal (0)
405#define CSDM_REG_AGG_INT_T_13 0xc20ec 389 or auto-mask-mode (1) */
406#define CSDM_REG_AGG_INT_T_14 0xc20f0 390#define CSDM_REG_AGG_INT_MODE_10 0xc21e0
407#define CSDM_REG_AGG_INT_T_15 0xc20f4 391#define CSDM_REG_AGG_INT_MODE_11 0xc21e4
408#define CSDM_REG_AGG_INT_T_16 0xc20f8 392#define CSDM_REG_AGG_INT_MODE_12 0xc21e8
409#define CSDM_REG_AGG_INT_T_17 0xc20fc 393#define CSDM_REG_AGG_INT_MODE_13 0xc21ec
410#define CSDM_REG_AGG_INT_T_18 0xc2100 394#define CSDM_REG_AGG_INT_MODE_14 0xc21f0
411#define CSDM_REG_AGG_INT_T_19 0xc2104 395#define CSDM_REG_AGG_INT_MODE_15 0xc21f4
396#define CSDM_REG_AGG_INT_MODE_16 0xc21f8
397#define CSDM_REG_AGG_INT_MODE_6 0xc21d0
398#define CSDM_REG_AGG_INT_MODE_7 0xc21d4
399#define CSDM_REG_AGG_INT_MODE_8 0xc21d8
400#define CSDM_REG_AGG_INT_MODE_9 0xc21dc
412/* [RW 13] The start address in the internal RAM for the cfc_rsp lcid */ 401/* [RW 13] The start address in the internal RAM for the cfc_rsp lcid */
413#define CSDM_REG_CFC_RSP_START_ADDR 0xc2008 402#define CSDM_REG_CFC_RSP_START_ADDR 0xc2008
414/* [RW 16] The maximum value of the competion counter #0 */ 403/* [RW 16] The maximum value of the competion counter #0 */
@@ -1421,6 +1410,8 @@
1421/* [RW 1] e1hmf for WOL. If clr WOL signal o the PXP will be send on bit 0 1410/* [RW 1] e1hmf for WOL. If clr WOL signal o the PXP will be send on bit 0
1422 only. */ 1411 only. */
1423#define MISC_REG_E1HMF_MODE 0xa5f8 1412#define MISC_REG_E1HMF_MODE 0xa5f8
1413/* [RW 32] Debug only: spare RW register reset by core reset */
1414#define MISC_REG_GENERIC_CR_0 0xa460
1424/* [RW 32] GPIO. [31-28] FLOAT port 0; [27-24] FLOAT port 0; When any of 1415/* [RW 32] GPIO. [31-28] FLOAT port 0; [27-24] FLOAT port 0; When any of
1425 these bits is written as a '1'; the corresponding SPIO bit will turn off 1416 these bits is written as a '1'; the corresponding SPIO bit will turn off
1426 it's drivers and become an input. This is the reset state of all GPIO 1417 it's drivers and become an input. This is the reset state of all GPIO
@@ -1729,6 +1720,7 @@
1729/* [RW 3] for port0 enable for llfc ppp and pause. b0 - brb1 enable; b1- 1720/* [RW 3] for port0 enable for llfc ppp and pause. b0 - brb1 enable; b1-
1730 tsdm enable; b2- usdm enable */ 1721 tsdm enable; b2- usdm enable */
1731#define NIG_REG_LLFC_EGRESS_SRC_ENABLE_0 0x16070 1722#define NIG_REG_LLFC_EGRESS_SRC_ENABLE_0 0x16070
1723#define NIG_REG_LLFC_EGRESS_SRC_ENABLE_1 0x16074
1732/* [RW 1] SAFC enable for port0. This register may get 1 only when 1724/* [RW 1] SAFC enable for port0. This register may get 1 only when
1733 ~ppp_enable.ppp_enable = 0 and pause_enable.pause_enable =0 for the same 1725 ~ppp_enable.ppp_enable = 0 and pause_enable.pause_enable =0 for the same
1734 port */ 1726 port */
@@ -2079,6 +2071,7 @@
2079#define PXP2_REG_PGL_ADDR_94_F0 0x120540 2071#define PXP2_REG_PGL_ADDR_94_F0 0x120540
2080#define PXP2_REG_PGL_CONTROL0 0x120490 2072#define PXP2_REG_PGL_CONTROL0 0x120490
2081#define PXP2_REG_PGL_CONTROL1 0x120514 2073#define PXP2_REG_PGL_CONTROL1 0x120514
2074#define PXP2_REG_PGL_DEBUG 0x120520
2082/* [RW 32] third dword data of expansion rom request. this register is 2075/* [RW 32] third dword data of expansion rom request. this register is
2083 special. reading from it provides a vector outstanding read requests. if 2076 special. reading from it provides a vector outstanding read requests. if
2084 a bit is zero it means that a read request on the corresponding tag did 2077 a bit is zero it means that a read request on the corresponding tag did
@@ -2239,6 +2232,9 @@
2239 allocated for vq22 */ 2232 allocated for vq22 */
2240#define PXP2_REG_RD_MAX_BLKS_VQ22 0x1203d0 2233#define PXP2_REG_RD_MAX_BLKS_VQ22 0x1203d0
2241/* [RW 8] The maximum number of blocks in Tetris Buffer that can be 2234/* [RW 8] The maximum number of blocks in Tetris Buffer that can be
2235 allocated for vq25 */
2236#define PXP2_REG_RD_MAX_BLKS_VQ25 0x1203dc
2237/* [RW 8] The maximum number of blocks in Tetris Buffer that can be
2242 allocated for vq6 */ 2238 allocated for vq6 */
2243#define PXP2_REG_RD_MAX_BLKS_VQ6 0x120390 2239#define PXP2_REG_RD_MAX_BLKS_VQ6 0x120390
2244/* [RW 8] The maximum number of blocks in Tetris Buffer that can be 2240/* [RW 8] The maximum number of blocks in Tetris Buffer that can be
@@ -3835,6 +3831,7 @@
3835#define TM_REG_LIN0_PHY_ADDR 0x164270 3831#define TM_REG_LIN0_PHY_ADDR 0x164270
3836/* [RW 1] Linear0 physical address valid. */ 3832/* [RW 1] Linear0 physical address valid. */
3837#define TM_REG_LIN0_PHY_ADDR_VALID 0x164248 3833#define TM_REG_LIN0_PHY_ADDR_VALID 0x164248
3834#define TM_REG_LIN0_SCAN_ON 0x1640d0
3838/* [RW 24] Linear0 array scan timeout. */ 3835/* [RW 24] Linear0 array scan timeout. */
3839#define TM_REG_LIN0_SCAN_TIME 0x16403c 3836#define TM_REG_LIN0_SCAN_TIME 0x16403c
3840/* [RW 32] Linear1 logic address. */ 3837/* [RW 32] Linear1 logic address. */
@@ -4363,6 +4360,7 @@
4363#define USDM_REG_AGG_INT_EVENT_31 0xc40b4 4360#define USDM_REG_AGG_INT_EVENT_31 0xc40b4
4364#define USDM_REG_AGG_INT_EVENT_4 0xc4048 4361#define USDM_REG_AGG_INT_EVENT_4 0xc4048
4365#define USDM_REG_AGG_INT_EVENT_5 0xc404c 4362#define USDM_REG_AGG_INT_EVENT_5 0xc404c
4363#define USDM_REG_AGG_INT_EVENT_6 0xc4050
4366/* [RW 1] For each aggregated interrupt index whether the mode is normal (0) 4364/* [RW 1] For each aggregated interrupt index whether the mode is normal (0)
4367 or auto-mask-mode (1) */ 4365 or auto-mask-mode (1) */
4368#define USDM_REG_AGG_INT_MODE_0 0xc41b8 4366#define USDM_REG_AGG_INT_MODE_0 0xc41b8
@@ -4379,6 +4377,10 @@
4379#define USDM_REG_AGG_INT_MODE_19 0xc4204 4377#define USDM_REG_AGG_INT_MODE_19 0xc4204
4380#define USDM_REG_AGG_INT_MODE_4 0xc41c8 4378#define USDM_REG_AGG_INT_MODE_4 0xc41c8
4381#define USDM_REG_AGG_INT_MODE_5 0xc41cc 4379#define USDM_REG_AGG_INT_MODE_5 0xc41cc
4380#define USDM_REG_AGG_INT_MODE_6 0xc41d0
4381/* [RW 1] The T bit for aggregated interrupt 5 */
4382#define USDM_REG_AGG_INT_T_5 0xc40cc
4383#define USDM_REG_AGG_INT_T_6 0xc40d0
4382/* [RW 13] The start address in the internal RAM for the cfc_rsp lcid */ 4384/* [RW 13] The start address in the internal RAM for the cfc_rsp lcid */
4383#define USDM_REG_CFC_RSP_START_ADDR 0xc4008 4385#define USDM_REG_CFC_RSP_START_ADDR 0xc4008
4384/* [RW 16] The maximum value of the competion counter #0 */ 4386/* [RW 16] The maximum value of the competion counter #0 */