aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/net/bnx2x/bnx2x.h54
-rw-r--r--drivers/net/bnx2x/bnx2x_cmn.c60
-rw-r--r--drivers/net/bnx2x/bnx2x_cmn.h145
-rw-r--r--drivers/net/bnx2x/bnx2x_ethtool.c5
-rw-r--r--drivers/net/bnx2x/bnx2x_main.c275
-rw-r--r--drivers/net/bnx2x/bnx2x_stats.c4
-rw-r--r--drivers/net/bnx2x/bnx2x_stats.h8
7 files changed, 318 insertions, 233 deletions
diff --git a/drivers/net/bnx2x/bnx2x.h b/drivers/net/bnx2x/bnx2x.h
index d80809f5ffc9..6fc77a4a5de6 100644
--- a/drivers/net/bnx2x/bnx2x.h
+++ b/drivers/net/bnx2x/bnx2x.h
@@ -180,13 +180,14 @@ void bnx2x_panic_dump(struct bnx2x *bp);
180#define SHMEM2_WR(bp, field, val) REG_WR(bp, SHMEM2_ADDR(bp, field), val) 180#define SHMEM2_WR(bp, field, val) REG_WR(bp, SHMEM2_ADDR(bp, field), val)
181#define MF_CFG_ADDR(bp, field) (bp->common.mf_cfg_base + \ 181#define MF_CFG_ADDR(bp, field) (bp->common.mf_cfg_base + \
182 offsetof(struct mf_cfg, field)) 182 offsetof(struct mf_cfg, field))
183#define MF2_CFG_ADDR(bp, field) (bp->common.mf2_cfg_base + \ 183#define MF2_CFG_ADDR(bp, field) (bp->common.mf2_cfg_base + \
184 offsetof(struct mf2_cfg, field)) 184 offsetof(struct mf2_cfg, field))
185 185
186#define MF_CFG_RD(bp, field) REG_RD(bp, MF_CFG_ADDR(bp, field)) 186#define MF_CFG_RD(bp, field) REG_RD(bp, MF_CFG_ADDR(bp, field))
187#define MF_CFG_WR(bp, field, val) REG_WR(bp,\ 187#define MF_CFG_WR(bp, field, val) REG_WR(bp,\
188 MF_CFG_ADDR(bp, field), (val)) 188 MF_CFG_ADDR(bp, field), (val))
189#define MF2_CFG_RD(bp, field) REG_RD(bp, MF2_CFG_ADDR(bp, field)) 189#define MF2_CFG_RD(bp, field) REG_RD(bp, MF2_CFG_ADDR(bp, field))
190
190#define SHMEM2_HAS(bp, field) ((bp)->common.shmem2_base && \ 191#define SHMEM2_HAS(bp, field) ((bp)->common.shmem2_base && \
191 (SHMEM2_RD((bp), size) > \ 192 (SHMEM2_RD((bp), size) > \
192 offsetof(struct shmem2_region, field))) 193 offsetof(struct shmem2_region, field)))
@@ -310,7 +311,7 @@ struct bnx2x_fastpath {
310 311
311#define BNX2X_NAPI_WEIGHT 128 312#define BNX2X_NAPI_WEIGHT 128
312 struct napi_struct napi; 313 struct napi_struct napi;
313 union host_hc_status_block status_blk; 314 union host_hc_status_block status_blk;
314 /* chip independed shortcuts into sb structure */ 315 /* chip independed shortcuts into sb structure */
315 __le16 *sb_index_values; 316 __le16 *sb_index_values;
316 __le16 *sb_running_index; 317 __le16 *sb_running_index;
@@ -349,8 +350,8 @@ struct bnx2x_fastpath {
349#define BNX2X_FP_STATE_TERMINATING 0xd0000 350#define BNX2X_FP_STATE_TERMINATING 0xd0000
350#define BNX2X_FP_STATE_TERMINATED 0xe0000 351#define BNX2X_FP_STATE_TERMINATED 0xe0000
351 352
352 u8 index; /* number in fp array */ 353 u8 index; /* number in fp array */
353 u8 cl_id; /* eth client id */ 354 u8 cl_id; /* eth client id */
354 u8 cl_qzone_id; 355 u8 cl_qzone_id;
355 u8 fw_sb_id; /* status block number in FW */ 356 u8 fw_sb_id; /* status block number in FW */
356 u8 igu_sb_id; /* status block number in HW */ 357 u8 igu_sb_id; /* status block number in HW */
@@ -375,8 +376,6 @@ struct bnx2x_fastpath {
375 u16 last_max_sge; 376 u16 last_max_sge;
376 __le16 *rx_cons_sb; 377 __le16 *rx_cons_sb;
377 378
378
379
380 unsigned long tx_pkt, 379 unsigned long tx_pkt,
381 rx_pkt, 380 rx_pkt,
382 rx_calls; 381 rx_calls;
@@ -977,7 +976,7 @@ struct bnx2x {
977 u32 mf2_config[E2_FUNC_MAX]; 976 u32 mf2_config[E2_FUNC_MAX];
978 u16 mf_ov; 977 u16 mf_ov;
979 u8 mf_mode; 978 u8 mf_mode;
980#define IS_MF(bp) (bp->mf_mode != 0) 979#define IS_MF(bp) (bp->mf_mode != 0)
981 980
982 u8 wol; 981 u8 wol;
983 982
@@ -1302,21 +1301,35 @@ struct bnx2x_func_init_params {
1302 for (var = 1; var < BNX2X_NUM_QUEUES(bp); var++) 1301 for (var = 1; var < BNX2X_NUM_QUEUES(bp); var++)
1303 1302
1304 1303
1304#define WAIT_RAMROD_POLL 0x01
1305#define WAIT_RAMROD_COMMON 0x02
1306int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
1307 int *state_p, int flags);
1308
1309/* dmae */
1305void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32); 1310void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32);
1306void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr, 1311void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
1307 u32 len32); 1312 u32 len32);
1313void bnx2x_write_dmae_phys_len(struct bnx2x *bp, dma_addr_t phys_addr,
1314 u32 addr, u32 len);
1315void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae, int idx);
1316u32 bnx2x_dmae_opcode_add_comp(u32 opcode, u8 comp_type);
1317u32 bnx2x_dmae_opcode_clr_src_reset(u32 opcode);
1318u32 bnx2x_dmae_opcode(struct bnx2x *bp, u8 src_type, u8 dst_type,
1319 bool with_comp, u8 comp_type);
1320
1308int bnx2x_get_gpio(struct bnx2x *bp, int gpio_num, u8 port); 1321int bnx2x_get_gpio(struct bnx2x *bp, int gpio_num, u8 port);
1309int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port); 1322int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port);
1310int bnx2x_set_gpio_int(struct bnx2x *bp, int gpio_num, u32 mode, u8 port); 1323int bnx2x_set_gpio_int(struct bnx2x *bp, int gpio_num, u32 mode, u8 port);
1311u32 bnx2x_fw_command(struct bnx2x *bp, u32 command, u32 param); 1324u32 bnx2x_fw_command(struct bnx2x *bp, u32 command, u32 param);
1312void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val); 1325void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val);
1313void bnx2x_write_dmae_phys_len(struct bnx2x *bp, dma_addr_t phys_addr, 1326
1314 u32 addr, u32 len);
1315void bnx2x_calc_fc_adv(struct bnx2x *bp); 1327void bnx2x_calc_fc_adv(struct bnx2x *bp);
1316int bnx2x_sp_post(struct bnx2x *bp, int command, int cid, 1328int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
1317 u32 data_hi, u32 data_lo, int common); 1329 u32 data_hi, u32 data_lo, int common);
1318void bnx2x_update_coalesce(struct bnx2x *bp); 1330void bnx2x_update_coalesce(struct bnx2x *bp);
1319int bnx2x_get_link_cfg_idx(struct bnx2x *bp); 1331int bnx2x_get_link_cfg_idx(struct bnx2x *bp);
1332
1320static inline u32 reg_poll(struct bnx2x *bp, u32 reg, u32 expected, int ms, 1333static inline u32 reg_poll(struct bnx2x *bp, u32 reg, u32 expected, int ms,
1321 int wait) 1334 int wait)
1322{ 1335{
@@ -1333,6 +1346,7 @@ static inline u32 reg_poll(struct bnx2x *bp, u32 reg, u32 expected, int ms,
1333 1346
1334 return val; 1347 return val;
1335} 1348}
1349
1336#define BNX2X_ILT_ZALLOC(x, y, size) \ 1350#define BNX2X_ILT_ZALLOC(x, y, size) \
1337 do { \ 1351 do { \
1338 x = pci_alloc_consistent(bp->pdev, size, y); \ 1352 x = pci_alloc_consistent(bp->pdev, size, y); \
@@ -1353,6 +1367,8 @@ static inline u32 reg_poll(struct bnx2x *bp, u32 reg, u32 expected, int ms,
1353 1367
1354#define ILT_NUM_PAGE_ENTRIES (3072) 1368#define ILT_NUM_PAGE_ENTRIES (3072)
1355/* In 57710/11 we use whole table since we have 8 func 1369/* In 57710/11 we use whole table since we have 8 func
1370 * In 57712 we have only 4 func, but use same size per func, then only half of
1371 * the table in use
1356 */ 1372 */
1357#define ILT_PER_FUNC (ILT_NUM_PAGE_ENTRIES/8) 1373#define ILT_PER_FUNC (ILT_NUM_PAGE_ENTRIES/8)
1358 1374
@@ -1366,14 +1382,13 @@ static inline u32 reg_poll(struct bnx2x *bp, u32 reg, u32 expected, int ms,
1366#define ONCHIP_ADDR1(x) ((u32)(((u64)x >> 12) & 0xFFFFFFFF)) 1382#define ONCHIP_ADDR1(x) ((u32)(((u64)x >> 12) & 0xFFFFFFFF))
1367#define ONCHIP_ADDR2(x) ((u32)((1 << 20) | ((u64)x >> 44))) 1383#define ONCHIP_ADDR2(x) ((u32)((1 << 20) | ((u64)x >> 44)))
1368 1384
1369
1370/* load/unload mode */ 1385/* load/unload mode */
1371#define LOAD_NORMAL 0 1386#define LOAD_NORMAL 0
1372#define LOAD_OPEN 1 1387#define LOAD_OPEN 1
1373#define LOAD_DIAG 2 1388#define LOAD_DIAG 2
1374#define UNLOAD_NORMAL 0 1389#define UNLOAD_NORMAL 0
1375#define UNLOAD_CLOSE 1 1390#define UNLOAD_CLOSE 1
1376#define UNLOAD_RECOVERY 2 1391#define UNLOAD_RECOVERY 2
1377 1392
1378 1393
1379/* DMAE command defines */ 1394/* DMAE command defines */
@@ -1447,7 +1462,6 @@ static inline u32 reg_poll(struct bnx2x *bp, u32 reg, u32 expected, int ms,
1447#define PMF_DMAE_C(bp) (BP_PORT(bp) * MAX_DMAE_C_PER_PORT + \ 1462#define PMF_DMAE_C(bp) (BP_PORT(bp) * MAX_DMAE_C_PER_PORT + \
1448 E1HVN_MAX) 1463 E1HVN_MAX)
1449 1464
1450
1451/* PCIE link and speed */ 1465/* PCIE link and speed */
1452#define PCICFG_LINK_WIDTH 0x1f00000 1466#define PCICFG_LINK_WIDTH 0x1f00000
1453#define PCICFG_LINK_WIDTH_SHIFT 20 1467#define PCICFG_LINK_WIDTH_SHIFT 20
@@ -1596,6 +1610,7 @@ static inline u32 reg_poll(struct bnx2x *bp, u32 reg, u32 expected, int ms,
1596#define BNX2X_SP_DSB_INDEX \ 1610#define BNX2X_SP_DSB_INDEX \
1597 (&bp->def_status_blk->sp_sb.\ 1611 (&bp->def_status_blk->sp_sb.\
1598 index_values[HC_SP_INDEX_ETH_DEF_CONS]) 1612 index_values[HC_SP_INDEX_ETH_DEF_CONS])
1613
1599#define SET_FLAG(value, mask, flag) \ 1614#define SET_FLAG(value, mask, flag) \
1600 do {\ 1615 do {\
1601 (value) &= ~(mask);\ 1616 (value) &= ~(mask);\
@@ -1630,6 +1645,7 @@ static inline u32 reg_poll(struct bnx2x *bp, u32 reg, u32 expected, int ms,
1630#ifndef ETH_MAX_RX_CLIENTS_E2 1645#ifndef ETH_MAX_RX_CLIENTS_E2
1631#define ETH_MAX_RX_CLIENTS_E2 ETH_MAX_RX_CLIENTS_E1H 1646#define ETH_MAX_RX_CLIENTS_E2 ETH_MAX_RX_CLIENTS_E1H
1632#endif 1647#endif
1648
1633#define BNX2X_VPD_LEN 128 1649#define BNX2X_VPD_LEN 128
1634#define VENDOR_ID_LEN 4 1650#define VENDOR_ID_LEN 4
1635 1651
@@ -1649,20 +1665,6 @@ static inline u32 reg_poll(struct bnx2x *bp, u32 reg, u32 expected, int ms,
1649 1665
1650BNX2X_EXTERN int load_count[2][3]; /* per path: 0-common, 1-port0, 2-port1 */ 1666BNX2X_EXTERN int load_count[2][3]; /* per path: 0-common, 1-port0, 2-port1 */
1651 1667
1652/* MISC_REG_RESET_REG - this is here for the hsi to work don't touch */
1653
1654extern void bnx2x_set_ethtool_ops(struct net_device *netdev); 1668extern void bnx2x_set_ethtool_ops(struct net_device *netdev);
1655 1669
1656void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae, int idx);
1657u32 bnx2x_dmae_opcode_add_comp(u32 opcode, u8 comp_type);
1658u32 bnx2x_dmae_opcode_clr_src_reset(u32 opcode);
1659u32 bnx2x_dmae_opcode(struct bnx2x *bp, u8 src_type, u8 dst_type,
1660 bool with_comp, u8 comp_type);
1661
1662
1663#define WAIT_RAMROD_POLL 0x01
1664#define WAIT_RAMROD_COMMON 0x02
1665
1666int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
1667 int *state_p, int flags);
1668#endif /* bnx2x.h */ 1670#endif /* bnx2x.h */
diff --git a/drivers/net/bnx2x/bnx2x_cmn.c b/drivers/net/bnx2x/bnx2x_cmn.c
index 68181cdd2096..97ef674dcc34 100644
--- a/drivers/net/bnx2x/bnx2x_cmn.c
+++ b/drivers/net/bnx2x/bnx2x_cmn.c
@@ -15,7 +15,6 @@
15 * 15 *
16 */ 16 */
17 17
18
19#include <linux/etherdevice.h> 18#include <linux/etherdevice.h>
20#include <linux/ip.h> 19#include <linux/ip.h>
21#include <net/ipv6.h> 20#include <net/ipv6.h>
@@ -136,7 +135,6 @@ int bnx2x_tx_int(struct bnx2x_fastpath *fp)
136 */ 135 */
137 smp_mb(); 136 smp_mb();
138 137
139 /* TBD need a thresh? */
140 if (unlikely(netif_tx_queue_stopped(txq))) { 138 if (unlikely(netif_tx_queue_stopped(txq))) {
141 /* Taking tx_lock() is needed to prevent reenabling the queue 139 /* Taking tx_lock() is needed to prevent reenabling the queue
142 * while it's empty. This could have happen if rx_action() gets 140 * while it's empty. This could have happen if rx_action() gets
@@ -623,6 +621,7 @@ reuse_rx:
623 bnx2x_set_skb_rxhash(bp, cqe, skb); 621 bnx2x_set_skb_rxhash(bp, cqe, skb);
624 622
625 skb_checksum_none_assert(skb); 623 skb_checksum_none_assert(skb);
624
626 if (bp->rx_csum) { 625 if (bp->rx_csum) {
627 if (likely(BNX2X_RX_CSUM_OK(cqe))) 626 if (likely(BNX2X_RX_CSUM_OK(cqe)))
628 skb->ip_summed = CHECKSUM_UNNECESSARY; 627 skb->ip_summed = CHECKSUM_UNNECESSARY;
@@ -704,7 +703,6 @@ static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
704 return IRQ_HANDLED; 703 return IRQ_HANDLED;
705} 704}
706 705
707
708/* HW Lock for shared dual port PHYs */ 706/* HW Lock for shared dual port PHYs */
709void bnx2x_acquire_phy_lock(struct bnx2x *bp) 707void bnx2x_acquire_phy_lock(struct bnx2x *bp)
710{ 708{
@@ -916,6 +914,7 @@ void bnx2x_init_rx_rings(struct bnx2x *bp)
916 } 914 }
917 } 915 }
918} 916}
917
919static void bnx2x_free_tx_skbs(struct bnx2x *bp) 918static void bnx2x_free_tx_skbs(struct bnx2x *bp)
920{ 919{
921 int i; 920 int i;
@@ -1185,6 +1184,7 @@ void bnx2x_set_num_queues(struct bnx2x *bp)
1185 case ETH_RSS_MODE_REGULAR: 1184 case ETH_RSS_MODE_REGULAR:
1186 bp->num_queues = bnx2x_calc_num_queues(bp); 1185 bp->num_queues = bnx2x_calc_num_queues(bp);
1187 break; 1186 break;
1187
1188 default: 1188 default:
1189 bp->num_queues = 1; 1189 bp->num_queues = 1;
1190 break; 1190 break;
@@ -1354,6 +1354,7 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
1354 /* Enable Timer scan */ 1354 /* Enable Timer scan */
1355 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + BP_PORT(bp)*4, 1); 1355 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + BP_PORT(bp)*4, 1);
1356#endif 1356#endif
1357
1357 for_each_nondefault_queue(bp, i) { 1358 for_each_nondefault_queue(bp, i) {
1358 rc = bnx2x_setup_client(bp, &bp->fp[i], 0); 1359 rc = bnx2x_setup_client(bp, &bp->fp[i], 0);
1359 if (rc) 1360 if (rc)
@@ -1473,11 +1474,13 @@ int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
1473 1474
1474 /* Stop Tx */ 1475 /* Stop Tx */
1475 bnx2x_tx_disable(bp); 1476 bnx2x_tx_disable(bp);
1477
1476 del_timer_sync(&bp->timer); 1478 del_timer_sync(&bp->timer);
1479
1477 SHMEM_WR(bp, func_mb[BP_FW_MB_IDX(bp)].drv_pulse_mb, 1480 SHMEM_WR(bp, func_mb[BP_FW_MB_IDX(bp)].drv_pulse_mb,
1478 (DRV_PULSE_ALWAYS_ALIVE | bp->fw_drv_pulse_wr_seq)); 1481 (DRV_PULSE_ALWAYS_ALIVE | bp->fw_drv_pulse_wr_seq));
1479 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
1480 1482
1483 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
1481 1484
1482 /* Cleanup the chip if needed */ 1485 /* Cleanup the chip if needed */
1483 if (unload_mode != UNLOAD_RECOVERY) 1486 if (unload_mode != UNLOAD_RECOVERY)
@@ -1514,6 +1517,7 @@ int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
1514 1517
1515 return 0; 1518 return 0;
1516} 1519}
1520
1517int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state) 1521int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
1518{ 1522{
1519 u16 pmcsr; 1523 u16 pmcsr;
@@ -1560,12 +1564,9 @@ int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
1560 return 0; 1564 return 0;
1561} 1565}
1562 1566
1563
1564
1565/* 1567/*
1566 * net_device service functions 1568 * net_device service functions
1567 */ 1569 */
1568
1569int bnx2x_poll(struct napi_struct *napi, int budget) 1570int bnx2x_poll(struct napi_struct *napi, int budget)
1570{ 1571{
1571 int work_done = 0; 1572 int work_done = 0;
@@ -1595,19 +1596,19 @@ int bnx2x_poll(struct napi_struct *napi, int budget)
1595 /* Fall out from the NAPI loop if needed */ 1596 /* Fall out from the NAPI loop if needed */
1596 if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) { 1597 if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
1597 bnx2x_update_fpsb_idx(fp); 1598 bnx2x_update_fpsb_idx(fp);
1598 /* bnx2x_has_rx_work() reads the status block, 1599 /* bnx2x_has_rx_work() reads the status block,
1599 * thus we need to ensure that status block indices 1600 * thus we need to ensure that status block indices
1600 * have been actually read (bnx2x_update_fpsb_idx) 1601 * have been actually read (bnx2x_update_fpsb_idx)
1601 * prior to this check (bnx2x_has_rx_work) so that 1602 * prior to this check (bnx2x_has_rx_work) so that
1602 * we won't write the "newer" value of the status block 1603 * we won't write the "newer" value of the status block
1603 * to IGU (if there was a DMA right after 1604 * to IGU (if there was a DMA right after
1604 * bnx2x_has_rx_work and if there is no rmb, the memory 1605 * bnx2x_has_rx_work and if there is no rmb, the memory
1605 * reading (bnx2x_update_fpsb_idx) may be postponed 1606 * reading (bnx2x_update_fpsb_idx) may be postponed
1606 * to right before bnx2x_ack_sb). In this case there 1607 * to right before bnx2x_ack_sb). In this case there
1607 * will never be another interrupt until there is 1608 * will never be another interrupt until there is
1608 * another update of the status block, while there 1609 * another update of the status block, while there
1609 * is still unhandled work. 1610 * is still unhandled work.
1610 */ 1611 */
1611 rmb(); 1612 rmb();
1612 1613
1613 if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) { 1614 if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
@@ -1626,7 +1627,6 @@ int bnx2x_poll(struct napi_struct *napi, int budget)
1626 return work_done; 1627 return work_done;
1627} 1628}
1628 1629
1629
1630/* we split the first BD into headers and data BDs 1630/* we split the first BD into headers and data BDs
1631 * to ease the pain of our fellow microcode engineers 1631 * to ease the pain of our fellow microcode engineers
1632 * we use one mapping for both BDs 1632 * we use one mapping for both BDs
@@ -1842,6 +1842,7 @@ static inline void bnx2x_set_pbd_gso(struct sk_buff *skb,
1842 1842
1843 pbd->global_data |= ETH_TX_PARSE_BD_E1X_PSEUDO_CS_WITHOUT_LEN; 1843 pbd->global_data |= ETH_TX_PARSE_BD_E1X_PSEUDO_CS_WITHOUT_LEN;
1844} 1844}
1845
1845/** 1846/**
1846 * 1847 *
1847 * @param skb 1848 * @param skb
@@ -1914,6 +1915,7 @@ static inline u8 bnx2x_set_pbd_csum(struct bnx2x *bp, struct sk_buff *skb,
1914 1915
1915 return hlen; 1916 return hlen;
1916} 1917}
1918
1917/* called with netif_tx_lock 1919/* called with netif_tx_lock
1918 * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call 1920 * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
1919 * netif_wake_queue() 1921 * netif_wake_queue()
@@ -2003,13 +2005,11 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
2003 tx_start_bd = &fp->tx_desc_ring[bd_prod].start_bd; 2005 tx_start_bd = &fp->tx_desc_ring[bd_prod].start_bd;
2004 2006
2005 tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD; 2007 tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
2006 SET_FLAG(tx_start_bd->general_data, 2008 SET_FLAG(tx_start_bd->general_data, ETH_TX_START_BD_ETH_ADDR_TYPE,
2007 ETH_TX_START_BD_ETH_ADDR_TYPE, 2009 mac_type);
2008 mac_type); 2010
2009 /* header nbd */ 2011 /* header nbd */
2010 SET_FLAG(tx_start_bd->general_data, 2012 SET_FLAG(tx_start_bd->general_data, ETH_TX_START_BD_HDR_NBDS, 1);
2011 ETH_TX_START_BD_HDR_NBDS,
2012 1);
2013 2013
2014 /* remember the first BD of the packet */ 2014 /* remember the first BD of the packet */
2015 tx_buf->first_bd = fp->tx_bd_prod; 2015 tx_buf->first_bd = fp->tx_bd_prod;
@@ -2065,9 +2065,11 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
2065 2065
2066 } 2066 }
2067 2067
2068 /* Map skb linear data for DMA */
2068 mapping = dma_map_single(&bp->pdev->dev, skb->data, 2069 mapping = dma_map_single(&bp->pdev->dev, skb->data,
2069 skb_headlen(skb), DMA_TO_DEVICE); 2070 skb_headlen(skb), DMA_TO_DEVICE);
2070 2071
2072 /* Setup the data pointer of the first BD of the packet */
2071 tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping)); 2073 tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
2072 tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping)); 2074 tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
2073 nbd = skb_shinfo(skb)->nr_frags + 2; /* start_bd + pbd + frags */ 2075 nbd = skb_shinfo(skb)->nr_frags + 2; /* start_bd + pbd + frags */
@@ -2101,6 +2103,7 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
2101 } 2103 }
2102 tx_data_bd = (struct eth_tx_bd *)tx_start_bd; 2104 tx_data_bd = (struct eth_tx_bd *)tx_start_bd;
2103 2105
2106 /* Handle fragmented skb */
2104 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 2107 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2105 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 2108 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2106 2109
@@ -2165,6 +2168,7 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
2165 2168
2166 fp->tx_db.data.prod += nbd; 2169 fp->tx_db.data.prod += nbd;
2167 barrier(); 2170 barrier();
2171
2168 DOORBELL(bp, fp->cid, fp->tx_db.raw); 2172 DOORBELL(bp, fp->cid, fp->tx_db.raw);
2169 2173
2170 mmiowb(); 2174 mmiowb();
@@ -2187,6 +2191,7 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
2187 2191
2188 return NETDEV_TX_OK; 2192 return NETDEV_TX_OK;
2189} 2193}
2194
2190/* called with rtnl_lock */ 2195/* called with rtnl_lock */
2191int bnx2x_change_mac_addr(struct net_device *dev, void *p) 2196int bnx2x_change_mac_addr(struct net_device *dev, void *p)
2192{ 2197{
@@ -2319,6 +2324,7 @@ void bnx2x_vlan_rx_register(struct net_device *dev,
2319} 2324}
2320 2325
2321#endif 2326#endif
2327
2322int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state) 2328int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
2323{ 2329{
2324 struct net_device *dev = pci_get_drvdata(pdev); 2330 struct net_device *dev = pci_get_drvdata(pdev);
diff --git a/drivers/net/bnx2x/bnx2x_cmn.h b/drivers/net/bnx2x/bnx2x_cmn.h
index 1d9686ea6b66..7f52cec9bb99 100644
--- a/drivers/net/bnx2x/bnx2x_cmn.h
+++ b/drivers/net/bnx2x/bnx2x_cmn.h
@@ -64,6 +64,15 @@ u8 bnx2x_link_test(struct bnx2x *bp, u8 is_serdes);
64void bnx2x__link_status_update(struct bnx2x *bp); 64void bnx2x__link_status_update(struct bnx2x *bp);
65 65
66/** 66/**
67 * Report link status to upper layer
68 *
69 * @param bp
70 *
71 * @return int
72 */
73void bnx2x_link_report(struct bnx2x *bp);
74
75/**
67 * MSI-X slowpath interrupt handler 76 * MSI-X slowpath interrupt handler
68 * 77 *
69 * @param irq 78 * @param irq
@@ -234,7 +243,7 @@ int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource);
234 243
235/** 244/**
236 * Configure eth MAC address in the HW according to the value in 245 * Configure eth MAC address in the HW according to the value in
237 * netdev->dev_addr for 57711 246 * netdev->dev_addr.
238 * 247 *
239 * @param bp driver handle 248 * @param bp driver handle
240 * @param set 249 * @param set
@@ -270,10 +279,11 @@ void bnx2x_init_sb(struct bnx2x *bp, dma_addr_t mapping, int vfid,
270 u8 vf_valid, int fw_sb_id, int igu_sb_id); 279 u8 vf_valid, int fw_sb_id, int igu_sb_id);
271 280
272/** 281/**
273 * Reconfigure FW/HW according to dev->flags rx mode 282 * Set MAC filtering configurations.
274 * 283 *
275 * @param dev net_device 284 * @remarks called with netif_tx_lock from dev_mcast.c
276 * 285 *
286 * @param dev net_device
277 */ 287 */
278void bnx2x_set_rx_mode(struct net_device *dev); 288void bnx2x_set_rx_mode(struct net_device *dev);
279 289
@@ -295,17 +305,17 @@ void bnx2x_disable_close_the_gate(struct bnx2x *bp);
295 * Perform statistics handling according to event 305 * Perform statistics handling according to event
296 * 306 *
297 * @param bp driver handle 307 * @param bp driver handle
298 * @param even tbnx2x_stats_event 308 * @param event bnx2x_stats_event
299 */ 309 */
300void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event); 310void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event);
301 311
302/** 312/**
303 * Handle sp events 313 * Handle ramrods completion
304 * 314 *
305 * @param fp fastpath handle for the event 315 * @param fp fastpath handle for the event
306 * @param rr_cqe eth_rx_cqe 316 * @param rr_cqe eth_rx_cqe
307 */ 317 */
308void bnx2x_sp_event(struct bnx2x_fastpath *fp, union eth_rx_cqe *rr_cqe); 318void bnx2x_sp_event(struct bnx2x_fastpath *fp, union eth_rx_cqe *rr_cqe);
309 319
310/** 320/**
311 * Init/halt function before/after sending 321 * Init/halt function before/after sending
@@ -327,6 +337,46 @@ int bnx2x_func_stop(struct bnx2x *bp);
327void bnx2x_ilt_set_info(struct bnx2x *bp); 337void bnx2x_ilt_set_info(struct bnx2x *bp);
328 338
329/** 339/**
340 * Set power state to the requested value. Currently only D0 and
341 * D3hot are supported.
342 *
343 * @param bp
344 * @param state D0 or D3hot
345 *
346 * @return int
347 */
348int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state);
349
350/* dev_close main block */
351int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode);
352
353/* dev_open main block */
354int bnx2x_nic_load(struct bnx2x *bp, int load_mode);
355
356/* hard_xmit callback */
357netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev);
358
359int bnx2x_change_mac_addr(struct net_device *dev, void *p);
360
361/* NAPI poll Rx part */
362int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget);
363
364/* NAPI poll Tx part */
365int bnx2x_tx_int(struct bnx2x_fastpath *fp);
366
367/* suspend/resume callbacks */
368int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state);
369int bnx2x_resume(struct pci_dev *pdev);
370
371/* Release IRQ vectors */
372void bnx2x_free_irq(struct bnx2x *bp);
373
374void bnx2x_init_rx_rings(struct bnx2x *bp);
375void bnx2x_free_skbs(struct bnx2x *bp);
376void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw);
377void bnx2x_netif_start(struct bnx2x *bp);
378
379/**
330 * Fill msix_table, request vectors, update num_queues according 380 * Fill msix_table, request vectors, update num_queues according
331 * to number of available vectors 381 * to number of available vectors
332 * 382 *
@@ -362,6 +412,51 @@ int bnx2x_setup_irqs(struct bnx2x *bp);
362 * @return int 412 * @return int
363 */ 413 */
364int bnx2x_poll(struct napi_struct *napi, int budget); 414int bnx2x_poll(struct napi_struct *napi, int budget);
415
416/**
417 * Allocate/release memories outsize main driver structure
418 *
419 * @param bp
420 *
421 * @return int
422 */
423int __devinit bnx2x_alloc_mem_bp(struct bnx2x *bp);
424void bnx2x_free_mem_bp(struct bnx2x *bp);
425
426/**
427 * Change mtu netdev callback
428 *
429 * @param dev
430 * @param new_mtu
431 *
432 * @return int
433 */
434int bnx2x_change_mtu(struct net_device *dev, int new_mtu);
435
436/**
437 * tx timeout netdev callback
438 *
439 * @param dev
440 * @param new_mtu
441 *
442 * @return int
443 */
444void bnx2x_tx_timeout(struct net_device *dev);
445
446#ifdef BCM_VLAN
447/**
448 * vlan rx register netdev callback
449 *
450 * @param dev
451 * @param new_mtu
452 *
453 * @return int
454 */
455void bnx2x_vlan_rx_register(struct net_device *dev,
456 struct vlan_group *vlgrp);
457
458#endif
459
365static inline void bnx2x_update_fpsb_idx(struct bnx2x_fastpath *fp) 460static inline void bnx2x_update_fpsb_idx(struct bnx2x_fastpath *fp)
366{ 461{
367 barrier(); /* status block is written to by the chip */ 462 barrier(); /* status block is written to by the chip */
@@ -558,9 +653,6 @@ static inline u16 bnx2x_ack_int(struct bnx2x *bp)
558 return bnx2x_igu_ack_int(bp); 653 return bnx2x_igu_ack_int(bp);
559} 654}
560 655
561/*
562 * fast path service functions
563 */
564static inline int bnx2x_has_tx_work_unload(struct bnx2x_fastpath *fp) 656static inline int bnx2x_has_tx_work_unload(struct bnx2x_fastpath *fp)
565{ 657{
566 /* Tell compiler that consumer and producer can change */ 658 /* Tell compiler that consumer and producer can change */
@@ -611,6 +703,7 @@ static inline int bnx2x_has_rx_work(struct bnx2x_fastpath *fp)
611 rx_cons_sb++; 703 rx_cons_sb++;
612 return (fp->rx_comp_cons != rx_cons_sb); 704 return (fp->rx_comp_cons != rx_cons_sb);
613} 705}
706
614/** 707/**
615 * disables tx from stack point of view 708 * disables tx from stack point of view
616 * 709 *
@@ -731,6 +824,7 @@ static inline int bnx2x_alloc_rx_sge(struct bnx2x *bp,
731 824
732 return 0; 825 return 0;
733} 826}
827
734static inline int bnx2x_alloc_rx_skb(struct bnx2x *bp, 828static inline int bnx2x_alloc_rx_skb(struct bnx2x *bp,
735 struct bnx2x_fastpath *fp, u16 index) 829 struct bnx2x_fastpath *fp, u16 index)
736{ 830{
@@ -782,6 +876,7 @@ static inline void bnx2x_reuse_rx_skb(struct bnx2x_fastpath *fp,
782 dma_unmap_addr(cons_rx_buf, mapping)); 876 dma_unmap_addr(cons_rx_buf, mapping));
783 *prod_bd = *cons_bd; 877 *prod_bd = *cons_bd;
784} 878}
879
785static inline void bnx2x_free_rx_sge_range(struct bnx2x *bp, 880static inline void bnx2x_free_rx_sge_range(struct bnx2x *bp,
786 struct bnx2x_fastpath *fp, int last) 881 struct bnx2x_fastpath *fp, int last)
787{ 882{
@@ -846,6 +941,7 @@ static inline void bnx2x_init_tx_rings(struct bnx2x *bp)
846 fp->tx_pkt = 0; 941 fp->tx_pkt = 0;
847 } 942 }
848} 943}
944
849static inline void bnx2x_set_next_page_rx_bd(struct bnx2x_fastpath *fp) 945static inline void bnx2x_set_next_page_rx_bd(struct bnx2x_fastpath *fp)
850{ 946{
851 int i; 947 int i;
@@ -931,40 +1027,11 @@ static inline void storm_memset_cmng(struct bnx2x *bp,
931 1027
932 __storm_memset_struct(bp, addr, size, (u32 *)cmng); 1028 __storm_memset_struct(bp, addr, size, (u32 *)cmng);
933} 1029}
1030
934/* HW Lock for shared dual port PHYs */ 1031/* HW Lock for shared dual port PHYs */
935void bnx2x_acquire_phy_lock(struct bnx2x *bp); 1032void bnx2x_acquire_phy_lock(struct bnx2x *bp);
936void bnx2x_release_phy_lock(struct bnx2x *bp); 1033void bnx2x_release_phy_lock(struct bnx2x *bp);
937 1034
938void bnx2x_link_report(struct bnx2x *bp);
939int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget);
940int bnx2x_tx_int(struct bnx2x_fastpath *fp);
941void bnx2x_init_rx_rings(struct bnx2x *bp);
942netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev);
943
944int bnx2x_change_mac_addr(struct net_device *dev, void *p);
945void bnx2x_tx_timeout(struct net_device *dev);
946void bnx2x_vlan_rx_register(struct net_device *dev, struct vlan_group *vlgrp);
947void bnx2x_netif_start(struct bnx2x *bp);
948void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw);
949void bnx2x_free_irq(struct bnx2x *bp);
950int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state);
951int bnx2x_resume(struct pci_dev *pdev);
952void bnx2x_free_skbs(struct bnx2x *bp);
953int bnx2x_change_mtu(struct net_device *dev, int new_mtu);
954int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode);
955int bnx2x_nic_load(struct bnx2x *bp, int load_mode);
956int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state);
957
958/**
959 * Allocate/release memories outsize main driver structure
960 *
961 * @param bp
962 *
963 * @return int
964 */
965int __devinit bnx2x_alloc_mem_bp(struct bnx2x *bp);
966void bnx2x_free_mem_bp(struct bnx2x *bp);
967
968#define BNX2X_FW_IP_HDR_ALIGN_PAD 2 /* FW places hdr with this padding */ 1035#define BNX2X_FW_IP_HDR_ALIGN_PAD 2 /* FW places hdr with this padding */
969 1036
970#endif /* BNX2X_CMN_H */ 1037#endif /* BNX2X_CMN_H */
diff --git a/drivers/net/bnx2x/bnx2x_ethtool.c b/drivers/net/bnx2x/bnx2x_ethtool.c
index 8fb00276dc41..54fe0615a8b9 100644
--- a/drivers/net/bnx2x/bnx2x_ethtool.c
+++ b/drivers/net/bnx2x/bnx2x_ethtool.c
@@ -25,7 +25,6 @@
25#include "bnx2x_cmn.h" 25#include "bnx2x_cmn.h"
26#include "bnx2x_dump.h" 26#include "bnx2x_dump.h"
27 27
28
29static int bnx2x_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) 28static int bnx2x_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
30{ 29{
31 struct bnx2x *bp = netdev_priv(dev); 30 struct bnx2x *bp = netdev_priv(dev);
@@ -963,6 +962,7 @@ static int bnx2x_set_eeprom(struct net_device *dev,
963 962
964 return rc; 963 return rc;
965} 964}
965
966static int bnx2x_get_coalesce(struct net_device *dev, 966static int bnx2x_get_coalesce(struct net_device *dev,
967 struct ethtool_coalesce *coal) 967 struct ethtool_coalesce *coal)
968{ 968{
@@ -1288,6 +1288,7 @@ static int bnx2x_test_registers(struct bnx2x *bp)
1288 save_val = REG_RD(bp, offset); 1288 save_val = REG_RD(bp, offset);
1289 1289
1290 REG_WR(bp, offset, (wr_val & mask)); 1290 REG_WR(bp, offset, (wr_val & mask));
1291
1291 val = REG_RD(bp, offset); 1292 val = REG_RD(bp, offset);
1292 1293
1293 /* Restore the original register's value */ 1294 /* Restore the original register's value */
@@ -1471,6 +1472,7 @@ static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode, u8 link_up)
1471 1472
1472 /* turn on parsing and get a BD */ 1473 /* turn on parsing and get a BD */
1473 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod)); 1474 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
1475
1474 pbd_e1x = &fp_tx->tx_desc_ring[bd_prod].parse_bd_e1x; 1476 pbd_e1x = &fp_tx->tx_desc_ring[bd_prod].parse_bd_e1x;
1475 pbd_e2 = &fp_tx->tx_desc_ring[bd_prod].parse_bd_e2; 1477 pbd_e2 = &fp_tx->tx_desc_ring[bd_prod].parse_bd_e2;
1476 1478
@@ -1714,6 +1716,7 @@ static void bnx2x_self_test(struct net_device *dev,
1714 buf[1] = 1; 1716 buf[1] = 1;
1715 etest->flags |= ETH_TEST_FL_FAILED; 1717 etest->flags |= ETH_TEST_FL_FAILED;
1716 } 1718 }
1719
1717 buf[2] = bnx2x_test_loopback(bp, link_up); 1720 buf[2] = bnx2x_test_loopback(bp, link_up);
1718 if (buf[2] != 0) 1721 if (buf[2] != 0)
1719 etest->flags |= ETH_TEST_FL_FAILED; 1722 etest->flags |= ETH_TEST_FL_FAILED;
diff --git a/drivers/net/bnx2x/bnx2x_main.c b/drivers/net/bnx2x/bnx2x_main.c
index a686a4c15710..7a9556b5b55d 100644
--- a/drivers/net/bnx2x/bnx2x_main.c
+++ b/drivers/net/bnx2x/bnx2x_main.c
@@ -56,7 +56,6 @@
56#include "bnx2x_init_ops.h" 56#include "bnx2x_init_ops.h"
57#include "bnx2x_cmn.h" 57#include "bnx2x_cmn.h"
58 58
59
60#include <linux/firmware.h> 59#include <linux/firmware.h>
61#include "bnx2x_fw_file_hdr.h" 60#include "bnx2x_fw_file_hdr.h"
62/* FW files */ 61/* FW files */
@@ -1325,7 +1324,6 @@ static bool bnx2x_trylock_hw_lock(struct bnx2x *bp, u32 resource)
1325 return false; 1324 return false;
1326} 1325}
1327 1326
1328
1329#ifdef BCM_CNIC 1327#ifdef BCM_CNIC
1330static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid); 1328static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid);
1331#endif 1329#endif
@@ -1754,12 +1752,12 @@ void bnx2x_calc_fc_adv(struct bnx2x *bp)
1754 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK) { 1752 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK) {
1755 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE: 1753 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE:
1756 bp->port.advertising[cfg_idx] &= ~(ADVERTISED_Asym_Pause | 1754 bp->port.advertising[cfg_idx] &= ~(ADVERTISED_Asym_Pause |
1757 ADVERTISED_Pause); 1755 ADVERTISED_Pause);
1758 break; 1756 break;
1759 1757
1760 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH: 1758 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH:
1761 bp->port.advertising[cfg_idx] |= (ADVERTISED_Asym_Pause | 1759 bp->port.advertising[cfg_idx] |= (ADVERTISED_Asym_Pause |
1762 ADVERTISED_Pause); 1760 ADVERTISED_Pause);
1763 break; 1761 break;
1764 1762
1765 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC: 1763 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC:
@@ -1768,12 +1766,11 @@ void bnx2x_calc_fc_adv(struct bnx2x *bp)
1768 1766
1769 default: 1767 default:
1770 bp->port.advertising[cfg_idx] &= ~(ADVERTISED_Asym_Pause | 1768 bp->port.advertising[cfg_idx] &= ~(ADVERTISED_Asym_Pause |
1771 ADVERTISED_Pause); 1769 ADVERTISED_Pause);
1772 break; 1770 break;
1773 } 1771 }
1774} 1772}
1775 1773
1776
1777u8 bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode) 1774u8 bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode)
1778{ 1775{
1779 if (!BP_NOMCP(bp)) { 1776 if (!BP_NOMCP(bp)) {
@@ -1952,6 +1949,7 @@ static void bnx2x_init_vn_minmax(struct bnx2x *bp, int vn)
1952 vn_max_rate = ((vn_cfg & FUNC_MF_CFG_MAX_BW_MASK) >> 1949 vn_max_rate = ((vn_cfg & FUNC_MF_CFG_MAX_BW_MASK) >>
1953 FUNC_MF_CFG_MAX_BW_SHIFT) * 100; 1950 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
1954 } 1951 }
1952
1955 DP(NETIF_MSG_IFUP, 1953 DP(NETIF_MSG_IFUP,
1956 "func %d: vn_min_rate %d vn_max_rate %d vn_weight_sum %d\n", 1954 "func %d: vn_min_rate %d vn_max_rate %d vn_weight_sum %d\n",
1957 func, vn_min_rate, vn_max_rate, bp->vn_weight_sum); 1955 func, vn_min_rate, vn_max_rate, bp->vn_weight_sum);
@@ -1991,6 +1989,7 @@ static void bnx2x_init_vn_minmax(struct bnx2x *bp, int vn)
1991 XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func) + i * 4, 1989 XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func) + i * 4,
1992 ((u32 *)(&m_fair_vn))[i]); 1990 ((u32 *)(&m_fair_vn))[i]);
1993} 1991}
1992
1994static int bnx2x_get_cmng_fns_mode(struct bnx2x *bp) 1993static int bnx2x_get_cmng_fns_mode(struct bnx2x *bp)
1995{ 1994{
1996 if (CHIP_REV_IS_SLOW(bp)) 1995 if (CHIP_REV_IS_SLOW(bp))
@@ -2625,13 +2624,13 @@ static inline void bnx2x_sp_prod_update(struct bnx2x *bp)
2625 wmb(); 2624 wmb();
2626 2625
2627 REG_WR16(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func), 2626 REG_WR16(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func),
2628 bp->spq_prod_idx); 2627 bp->spq_prod_idx);
2629 mmiowb(); 2628 mmiowb();
2630} 2629}
2631 2630
2632/* the slow path queue is odd since completions arrive on the fastpath ring */ 2631/* the slow path queue is odd since completions arrive on the fastpath ring */
2633int bnx2x_sp_post(struct bnx2x *bp, int command, int cid, 2632int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
2634 u32 data_hi, u32 data_lo, int common) 2633 u32 data_hi, u32 data_lo, int common)
2635{ 2634{
2636 struct eth_spe *spe; 2635 struct eth_spe *spe;
2637 u16 type; 2636 u16 type;
@@ -3055,6 +3054,7 @@ static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
3055#define RESET_DONE_FLAG_MASK (~LOAD_COUNTER_MASK) 3054#define RESET_DONE_FLAG_MASK (~LOAD_COUNTER_MASK)
3056#define RESET_DONE_FLAG_SHIFT LOAD_COUNTER_BITS 3055#define RESET_DONE_FLAG_SHIFT LOAD_COUNTER_BITS
3057#define CHIP_PARITY_SUPPORTED(bp) (CHIP_IS_E1(bp) || CHIP_IS_E1H(bp)) 3056#define CHIP_PARITY_SUPPORTED(bp) (CHIP_IS_E1(bp) || CHIP_IS_E1H(bp))
3057
3058/* 3058/*
3059 * should be run under rtnl lock 3059 * should be run under rtnl lock
3060 */ 3060 */
@@ -4376,7 +4376,6 @@ gunzip_nomem1:
4376static void bnx2x_gunzip_end(struct bnx2x *bp) 4376static void bnx2x_gunzip_end(struct bnx2x *bp)
4377{ 4377{
4378 kfree(bp->strm->workspace); 4378 kfree(bp->strm->workspace);
4379
4380 kfree(bp->strm); 4379 kfree(bp->strm);
4381 bp->strm = NULL; 4380 bp->strm = NULL;
4382 4381
@@ -4641,6 +4640,7 @@ static void enable_blocks_attention(struct bnx2x *bp)
4641 REG_WR(bp, CCM_REG_CCM_INT_MASK, 0); 4640 REG_WR(bp, CCM_REG_CCM_INT_MASK, 0);
4642/* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_0, 0); */ 4641/* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_0, 0); */
4643/* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_1, 0); */ 4642/* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_1, 0); */
4643
4644 if (CHIP_REV_IS_FPGA(bp)) 4644 if (CHIP_REV_IS_FPGA(bp))
4645 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x580000); 4645 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x580000);
4646 else if (CHIP_IS_E2(bp)) 4646 else if (CHIP_IS_E2(bp))
@@ -4672,29 +4672,29 @@ static const struct {
4672 {PXP2_REG_PXP2_PRTY_MASK_1, 0x7f}, 4672 {PXP2_REG_PXP2_PRTY_MASK_1, 0x7f},
4673 {HC_REG_HC_PRTY_MASK, 0x7}, 4673 {HC_REG_HC_PRTY_MASK, 0x7},
4674 {MISC_REG_MISC_PRTY_MASK, 0x1}, 4674 {MISC_REG_MISC_PRTY_MASK, 0x1},
4675 {QM_REG_QM_PRTY_MASK, 0x0}, 4675 {QM_REG_QM_PRTY_MASK, 0x0},
4676 {DORQ_REG_DORQ_PRTY_MASK, 0x0}, 4676 {DORQ_REG_DORQ_PRTY_MASK, 0x0},
4677 {GRCBASE_UPB + PB_REG_PB_PRTY_MASK, 0x0}, 4677 {GRCBASE_UPB + PB_REG_PB_PRTY_MASK, 0x0},
4678 {GRCBASE_XPB + PB_REG_PB_PRTY_MASK, 0x0}, 4678 {GRCBASE_XPB + PB_REG_PB_PRTY_MASK, 0x0},
4679 {SRC_REG_SRC_PRTY_MASK, 0x4}, /* bit 2 */ 4679 {SRC_REG_SRC_PRTY_MASK, 0x4}, /* bit 2 */
4680 {CDU_REG_CDU_PRTY_MASK, 0x0}, 4680 {CDU_REG_CDU_PRTY_MASK, 0x0},
4681 {CFC_REG_CFC_PRTY_MASK, 0x0}, 4681 {CFC_REG_CFC_PRTY_MASK, 0x0},
4682 {DBG_REG_DBG_PRTY_MASK, 0x0}, 4682 {DBG_REG_DBG_PRTY_MASK, 0x0},
4683 {DMAE_REG_DMAE_PRTY_MASK, 0x0}, 4683 {DMAE_REG_DMAE_PRTY_MASK, 0x0},
4684 {BRB1_REG_BRB1_PRTY_MASK, 0x0}, 4684 {BRB1_REG_BRB1_PRTY_MASK, 0x0},
4685 {PRS_REG_PRS_PRTY_MASK, (1<<6)},/* bit 6 */ 4685 {PRS_REG_PRS_PRTY_MASK, (1<<6)},/* bit 6 */
4686 {TSDM_REG_TSDM_PRTY_MASK, 0x18},/* bit 3,4 */ 4686 {TSDM_REG_TSDM_PRTY_MASK, 0x18}, /* bit 3,4 */
4687 {CSDM_REG_CSDM_PRTY_MASK, 0x8}, /* bit 3 */ 4687 {CSDM_REG_CSDM_PRTY_MASK, 0x8}, /* bit 3 */
4688 {USDM_REG_USDM_PRTY_MASK, 0x38},/* bit 3,4,5 */ 4688 {USDM_REG_USDM_PRTY_MASK, 0x38}, /* bit 3,4,5 */
4689 {XSDM_REG_XSDM_PRTY_MASK, 0x8}, /* bit 3 */ 4689 {XSDM_REG_XSDM_PRTY_MASK, 0x8}, /* bit 3 */
4690 {TSEM_REG_TSEM_PRTY_MASK_0, 0x0}, 4690 {TSEM_REG_TSEM_PRTY_MASK_0, 0x0},
4691 {TSEM_REG_TSEM_PRTY_MASK_1, 0x0}, 4691 {TSEM_REG_TSEM_PRTY_MASK_1, 0x0},
4692 {USEM_REG_USEM_PRTY_MASK_0, 0x0}, 4692 {USEM_REG_USEM_PRTY_MASK_0, 0x0},
4693 {USEM_REG_USEM_PRTY_MASK_1, 0x0}, 4693 {USEM_REG_USEM_PRTY_MASK_1, 0x0},
4694 {CSEM_REG_CSEM_PRTY_MASK_0, 0x0}, 4694 {CSEM_REG_CSEM_PRTY_MASK_0, 0x0},
4695 {CSEM_REG_CSEM_PRTY_MASK_1, 0x0}, 4695 {CSEM_REG_CSEM_PRTY_MASK_1, 0x0},
4696 {XSEM_REG_XSEM_PRTY_MASK_0, 0x0}, 4696 {XSEM_REG_XSEM_PRTY_MASK_0, 0x0},
4697 {XSEM_REG_XSEM_PRTY_MASK_1, 0x0} 4697 {XSEM_REG_XSEM_PRTY_MASK_1, 0x0}
4698}; 4698};
4699 4699
4700static void enable_blocks_parity(struct bnx2x *bp) 4700static void enable_blocks_parity(struct bnx2x *bp)
@@ -4906,7 +4906,6 @@ static int bnx2x_init_hw_common(struct bnx2x *bp, u32 load_code)
4906 4906
4907 bnx2x_ilt_init_page_size(bp, INITOP_SET); 4907 bnx2x_ilt_init_page_size(bp, INITOP_SET);
4908 4908
4909
4910 if (CHIP_REV_IS_FPGA(bp) && CHIP_IS_E1H(bp)) 4909 if (CHIP_REV_IS_FPGA(bp) && CHIP_IS_E1H(bp))
4911 REG_WR(bp, PXP2_REG_PGL_TAGS_LIMIT, 0x1); 4910 REG_WR(bp, PXP2_REG_PGL_TAGS_LIMIT, 0x1);
4912 4911
@@ -5003,6 +5002,7 @@ static int bnx2x_init_hw_common(struct bnx2x *bp, u32 load_code)
5003 5002
5004 if (CHIP_MODE_IS_4_PORT(bp)) 5003 if (CHIP_MODE_IS_4_PORT(bp))
5005 bnx2x_init_block(bp, QM_4PORT_BLOCK, COMMON_STAGE); 5004 bnx2x_init_block(bp, QM_4PORT_BLOCK, COMMON_STAGE);
5005
5006 /* QM queues pointers table */ 5006 /* QM queues pointers table */
5007 bnx2x_qm_init_ptr_table(bp, bp->qm_cid_count, INITOP_SET); 5007 bnx2x_qm_init_ptr_table(bp, bp->qm_cid_count, INITOP_SET);
5008 5008
@@ -5036,6 +5036,7 @@ static int bnx2x_init_hw_common(struct bnx2x *bp, u32 load_code)
5036#endif 5036#endif
5037 if (!CHIP_IS_E1(bp)) 5037 if (!CHIP_IS_E1(bp))
5038 REG_WR(bp, PRS_REG_E1HOV_MODE, IS_MF(bp)); 5038 REG_WR(bp, PRS_REG_E1HOV_MODE, IS_MF(bp));
5039
5039 if (CHIP_IS_E2(bp)) { 5040 if (CHIP_IS_E2(bp)) {
5040 /* Bit-map indicating which L2 hdrs may appear after the 5041 /* Bit-map indicating which L2 hdrs may appear after the
5041 basic Ethernet header */ 5042 basic Ethernet header */
@@ -5081,6 +5082,7 @@ static int bnx2x_init_hw_common(struct bnx2x *bp, u32 load_code)
5081 REG_WR(bp, SRC_REG_SOFT_RST, 1); 5082 REG_WR(bp, SRC_REG_SOFT_RST, 1);
5082 for (i = SRC_REG_KEYRSS0_0; i <= SRC_REG_KEYRSS1_9; i += 4) 5083 for (i = SRC_REG_KEYRSS0_0; i <= SRC_REG_KEYRSS1_9; i += 4)
5083 REG_WR(bp, i, random32()); 5084 REG_WR(bp, i, random32());
5085
5084 bnx2x_init_block(bp, SRCH_BLOCK, COMMON_STAGE); 5086 bnx2x_init_block(bp, SRCH_BLOCK, COMMON_STAGE);
5085#ifdef BCM_CNIC 5087#ifdef BCM_CNIC
5086 REG_WR(bp, SRC_REG_KEYSEARCH_0, 0x63285672); 5088 REG_WR(bp, SRC_REG_KEYSEARCH_0, 0x63285672);
@@ -5467,6 +5469,7 @@ static int bnx2x_init_hw_func(struct bnx2x *bp)
5467 set the size */ 5469 set the size */
5468 } 5470 }
5469 bnx2x_ilt_init_op(bp, INITOP_SET); 5471 bnx2x_ilt_init_op(bp, INITOP_SET);
5472
5470#ifdef BCM_CNIC 5473#ifdef BCM_CNIC
5471 bnx2x_src_init_t2(bp, bp->t2, bp->t2_mapping, SRC_CONN_NUM); 5474 bnx2x_src_init_t2(bp, bp->t2, bp->t2_mapping, SRC_CONN_NUM);
5472 5475
@@ -5692,6 +5695,7 @@ static int bnx2x_init_hw_func(struct bnx2x *bp)
5692 bnx2x_init_block(bp, DMAE_BLOCK, FUNC0_STAGE + func); 5695 bnx2x_init_block(bp, DMAE_BLOCK, FUNC0_STAGE + func);
5693 5696
5694 bnx2x_phy_probe(&bp->link_params); 5697 bnx2x_phy_probe(&bp->link_params);
5698
5695 return 0; 5699 return 0;
5696} 5700}
5697 5701
@@ -5826,6 +5830,7 @@ void bnx2x_free_mem(struct bnx2x *bp)
5826 bnx2x_ilt_mem_op(bp, ILT_MEMOP_FREE); 5830 bnx2x_ilt_mem_op(bp, ILT_MEMOP_FREE);
5827 5831
5828 BNX2X_FREE(bp->ilt->lines); 5832 BNX2X_FREE(bp->ilt->lines);
5833
5829#ifdef BCM_CNIC 5834#ifdef BCM_CNIC
5830 if (CHIP_IS_E2(bp)) 5835 if (CHIP_IS_E2(bp))
5831 BNX2X_PCI_FREE(bp->cnic_sb.e2_sb, bp->cnic_sb_mapping, 5836 BNX2X_PCI_FREE(bp->cnic_sb.e2_sb, bp->cnic_sb_mapping,
@@ -5833,8 +5838,10 @@ void bnx2x_free_mem(struct bnx2x *bp)
5833 else 5838 else
5834 BNX2X_PCI_FREE(bp->cnic_sb.e1x_sb, bp->cnic_sb_mapping, 5839 BNX2X_PCI_FREE(bp->cnic_sb.e1x_sb, bp->cnic_sb_mapping,
5835 sizeof(struct host_hc_status_block_e1x)); 5840 sizeof(struct host_hc_status_block_e1x));
5841
5836 BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, SRC_T2_SZ); 5842 BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, SRC_T2_SZ);
5837#endif 5843#endif
5844
5838 BNX2X_PCI_FREE(bp->spq, bp->spq_mapping, BCM_PAGE_SIZE); 5845 BNX2X_PCI_FREE(bp->spq, bp->spq_mapping, BCM_PAGE_SIZE);
5839 5846
5840 BNX2X_PCI_FREE(bp->eq_ring, bp->eq_mapping, 5847 BNX2X_PCI_FREE(bp->eq_ring, bp->eq_mapping,
@@ -5862,7 +5869,6 @@ static inline void set_sb_shortcuts(struct bnx2x *bp, int index)
5862 5869
5863int bnx2x_alloc_mem(struct bnx2x *bp) 5870int bnx2x_alloc_mem(struct bnx2x *bp)
5864{ 5871{
5865
5866#define BNX2X_PCI_ALLOC(x, y, size) \ 5872#define BNX2X_PCI_ALLOC(x, y, size) \
5867 do { \ 5873 do { \
5868 x = dma_alloc_coherent(&bp->pdev->dev, size, y, GFP_KERNEL); \ 5874 x = dma_alloc_coherent(&bp->pdev->dev, size, y, GFP_KERNEL); \
@@ -5951,6 +5957,7 @@ int bnx2x_alloc_mem(struct bnx2x *bp)
5951 sizeof(struct bnx2x_slowpath)); 5957 sizeof(struct bnx2x_slowpath));
5952 5958
5953 bp->context.size = sizeof(union cdu_context) * bp->l2_cid_count; 5959 bp->context.size = sizeof(union cdu_context) * bp->l2_cid_count;
5960
5954 BNX2X_PCI_ALLOC(bp->context.vcxt, &bp->context.cxt_mapping, 5961 BNX2X_PCI_ALLOC(bp->context.vcxt, &bp->context.cxt_mapping,
5955 bp->context.size); 5962 bp->context.size);
5956 5963
@@ -5997,7 +6004,7 @@ int bnx2x_func_stop(struct bnx2x *bp)
5997} 6004}
5998 6005
5999/** 6006/**
6000 * Sets a MAC in a CAM for a few L2 Clients for E1x chip 6007 * Sets a MAC in a CAM for a few L2 Clients for E1x chips
6001 * 6008 *
6002 * @param bp driver descriptor 6009 * @param bp driver descriptor
6003 * @param set set or clear an entry (1 or 0) 6010 * @param set set or clear an entry (1 or 0)
@@ -6007,8 +6014,8 @@ int bnx2x_func_stop(struct bnx2x *bp)
6007 * @param is_bcast is the set MAC a broadcast address (for E1 only) 6014 * @param is_bcast is the set MAC a broadcast address (for E1 only)
6008 */ 6015 */
6009static void bnx2x_set_mac_addr_gen(struct bnx2x *bp, int set, u8 *mac, 6016static void bnx2x_set_mac_addr_gen(struct bnx2x *bp, int set, u8 *mac,
6010 u32 cl_bit_vec, u8 cam_offset, 6017 u32 cl_bit_vec, u8 cam_offset,
6011 u8 is_bcast) 6018 u8 is_bcast)
6012{ 6019{
6013 struct mac_configuration_cmd *config = 6020 struct mac_configuration_cmd *config =
6014 (struct mac_configuration_cmd *)bnx2x_sp(bp, mac_config); 6021 (struct mac_configuration_cmd *)bnx2x_sp(bp, mac_config);
@@ -6060,9 +6067,8 @@ static void bnx2x_set_mac_addr_gen(struct bnx2x *bp, int set, u8 *mac,
6060 bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending, ramrod_flags); 6067 bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending, ramrod_flags);
6061} 6068}
6062 6069
6063
6064int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx, 6070int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
6065 int *state_p, int flags) 6071 int *state_p, int flags)
6066{ 6072{
6067 /* can take a while if any port is running */ 6073 /* can take a while if any port is running */
6068 int cnt = 5000; 6074 int cnt = 5000;
@@ -6220,7 +6226,6 @@ static void bnx2x_invlidate_e1_mc_list(struct bnx2x *bp)
6220 6226
6221} 6227}
6222 6228
6223
6224#ifdef BCM_CNIC 6229#ifdef BCM_CNIC
6225/** 6230/**
6226 * Set iSCSI MAC(s) at the next enties in the CAM after the ETH 6231 * Set iSCSI MAC(s) at the next enties in the CAM after the ETH
@@ -6564,6 +6569,7 @@ void bnx2x_ilt_set_info(struct bnx2x *bp)
6564 ilt_client->flags = (ILT_CLIENT_SKIP_INIT | ILT_CLIENT_SKIP_MEM); 6569 ilt_client->flags = (ILT_CLIENT_SKIP_INIT | ILT_CLIENT_SKIP_MEM);
6565#endif 6570#endif
6566} 6571}
6572
6567int bnx2x_setup_client(struct bnx2x *bp, struct bnx2x_fastpath *fp, 6573int bnx2x_setup_client(struct bnx2x *bp, struct bnx2x_fastpath *fp,
6568 int is_leading) 6574 int is_leading)
6569{ 6575{
@@ -6949,7 +6955,6 @@ void bnx2x_disable_close_the_gate(struct bnx2x *bp)
6949 } 6955 }
6950} 6956}
6951 6957
6952
6953/* Close gates #2, #3 and #4: */ 6958/* Close gates #2, #3 and #4: */
6954static void bnx2x_set_234_gates(struct bnx2x *bp, bool close) 6959static void bnx2x_set_234_gates(struct bnx2x *bp, bool close)
6955{ 6960{
@@ -6995,15 +7000,13 @@ static void bnx2x_clp_reset_prep(struct bnx2x *bp, u32 *magic_val)
6995static void bnx2x_clp_reset_done(struct bnx2x *bp, u32 magic_val) 7000static void bnx2x_clp_reset_done(struct bnx2x *bp, u32 magic_val)
6996{ 7001{
6997 /* Restore the `magic' bit value... */ 7002 /* Restore the `magic' bit value... */
6998 /* u32 val = SHMEM_RD(bp, mf_cfg.shared_mf_config.clp_mb);
6999 SHMEM_WR(bp, mf_cfg.shared_mf_config.clp_mb,
7000 (val & (~SHARED_MF_CLP_MAGIC)) | magic_val); */
7001 u32 val = MF_CFG_RD(bp, shared_mf_config.clp_mb); 7003 u32 val = MF_CFG_RD(bp, shared_mf_config.clp_mb);
7002 MF_CFG_WR(bp, shared_mf_config.clp_mb, 7004 MF_CFG_WR(bp, shared_mf_config.clp_mb,
7003 (val & (~SHARED_MF_CLP_MAGIC)) | magic_val); 7005 (val & (~SHARED_MF_CLP_MAGIC)) | magic_val);
7004} 7006}
7005 7007
7006/* Prepares for MCP reset: takes care of CLP configurations. 7008/**
7009 * Prepares for MCP reset: takes care of CLP configurations.
7007 * 7010 *
7008 * @param bp 7011 * @param bp
7009 * @param magic_val Old value of 'magic' bit. 7012 * @param magic_val Old value of 'magic' bit.
@@ -7532,7 +7535,6 @@ static void __devinit bnx2x_undi_unload(struct bnx2x *bp)
7532 bp->fw_seq = 7535 bp->fw_seq =
7533 (SHMEM_RD(bp, func_mb[bp->pf_num].drv_mb_header) & 7536 (SHMEM_RD(bp, func_mb[bp->pf_num].drv_mb_header) &
7534 DRV_MSG_SEQ_NUMBER_MASK); 7537 DRV_MSG_SEQ_NUMBER_MASK);
7535
7536 } else 7538 } else
7537 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI); 7539 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
7538 } 7540 }
@@ -7651,7 +7653,8 @@ static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
7651 } 7653 }
7652 bp->link_params.feature_config_flags |= 7654 bp->link_params.feature_config_flags |=
7653 (val >= REQ_BC_VER_4_VRFY_FIRST_PHY_OPT_MDL) ? 7655 (val >= REQ_BC_VER_4_VRFY_FIRST_PHY_OPT_MDL) ?
7654 FEATURE_CONFIG_BC_SUPPORTS_OPT_MDL_VRFY : 0; 7656 FEATURE_CONFIG_BC_SUPPORTS_OPT_MDL_VRFY : 0;
7657
7655 bp->link_params.feature_config_flags |= 7658 bp->link_params.feature_config_flags |=
7656 (val >= REQ_BC_VER_4_VRFY_SPECIFIC_PHY_OPT_MDL) ? 7659 (val >= REQ_BC_VER_4_VRFY_SPECIFIC_PHY_OPT_MDL) ?
7657 FEATURE_CONFIG_BC_SUPPORTS_DUAL_PHY_OPT_MDL_VRFY : 0; 7660 FEATURE_CONFIG_BC_SUPPORTS_DUAL_PHY_OPT_MDL_VRFY : 0;
@@ -7768,7 +7771,7 @@ static void __devinit bnx2x_link_settings_supported(struct bnx2x *bp,
7768 SHMEM_RD(bp, 7771 SHMEM_RD(bp,
7769 dev_info.port_hw_config[port].external_phy_config2)); 7772 dev_info.port_hw_config[port].external_phy_config2));
7770 return; 7773 return;
7771 } 7774 }
7772 7775
7773 switch (switch_cfg) { 7776 switch (switch_cfg) {
7774 case SWITCH_CFG_1G: 7777 case SWITCH_CFG_1G:
@@ -7781,7 +7784,6 @@ static void __devinit bnx2x_link_settings_supported(struct bnx2x *bp,
7781 bp->port.phy_addr = REG_RD(bp, NIG_REG_XGXS0_CTRL_PHY_ADDR + 7784 bp->port.phy_addr = REG_RD(bp, NIG_REG_XGXS0_CTRL_PHY_ADDR +
7782 port*0x18); 7785 port*0x18);
7783 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr); 7786 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
7784
7785 break; 7787 break;
7786 7788
7787 default: 7789 default:
@@ -7810,7 +7812,7 @@ static void __devinit bnx2x_link_settings_supported(struct bnx2x *bp,
7810 if (!(bp->link_params.speed_cap_mask[idx] & 7812 if (!(bp->link_params.speed_cap_mask[idx] &
7811 PORT_HW_CFG_SPEED_CAPABILITY_D0_1G)) 7813 PORT_HW_CFG_SPEED_CAPABILITY_D0_1G))
7812 bp->port.supported[idx] &= ~(SUPPORTED_1000baseT_Half | 7814 bp->port.supported[idx] &= ~(SUPPORTED_1000baseT_Half |
7813 SUPPORTED_1000baseT_Full); 7815 SUPPORTED_1000baseT_Full);
7814 7816
7815 if (!(bp->link_params.speed_cap_mask[idx] & 7817 if (!(bp->link_params.speed_cap_mask[idx] &
7816 PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G)) 7818 PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G))
@@ -7844,41 +7846,41 @@ static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp)
7844 bp->link_params.req_duplex[idx] = DUPLEX_FULL; 7846 bp->link_params.req_duplex[idx] = DUPLEX_FULL;
7845 link_config = bp->port.link_config[idx]; 7847 link_config = bp->port.link_config[idx];
7846 switch (link_config & PORT_FEATURE_LINK_SPEED_MASK) { 7848 switch (link_config & PORT_FEATURE_LINK_SPEED_MASK) {
7847 case PORT_FEATURE_LINK_SPEED_AUTO: 7849 case PORT_FEATURE_LINK_SPEED_AUTO:
7848 if (bp->port.supported[idx] & SUPPORTED_Autoneg) { 7850 if (bp->port.supported[idx] & SUPPORTED_Autoneg) {
7849 bp->link_params.req_line_speed[idx] = 7851 bp->link_params.req_line_speed[idx] =
7850 SPEED_AUTO_NEG; 7852 SPEED_AUTO_NEG;
7851 bp->port.advertising[idx] |= 7853 bp->port.advertising[idx] |=
7852 bp->port.supported[idx]; 7854 bp->port.supported[idx];
7853 } else { 7855 } else {
7854 /* force 10G, no AN */ 7856 /* force 10G, no AN */
7855 bp->link_params.req_line_speed[idx] = 7857 bp->link_params.req_line_speed[idx] =
7856 SPEED_10000; 7858 SPEED_10000;
7857 bp->port.advertising[idx] |= 7859 bp->port.advertising[idx] |=
7858 (ADVERTISED_10000baseT_Full | 7860 (ADVERTISED_10000baseT_Full |
7859 ADVERTISED_FIBRE); 7861 ADVERTISED_FIBRE);
7860 continue; 7862 continue;
7861 } 7863 }
7862 break; 7864 break;
7863 7865
7864 case PORT_FEATURE_LINK_SPEED_10M_FULL: 7866 case PORT_FEATURE_LINK_SPEED_10M_FULL:
7865 if (bp->port.supported[idx] & SUPPORTED_10baseT_Full) { 7867 if (bp->port.supported[idx] & SUPPORTED_10baseT_Full) {
7866 bp->link_params.req_line_speed[idx] = 7868 bp->link_params.req_line_speed[idx] =
7867 SPEED_10; 7869 SPEED_10;
7868 bp->port.advertising[idx] |= 7870 bp->port.advertising[idx] |=
7869 (ADVERTISED_10baseT_Full | 7871 (ADVERTISED_10baseT_Full |
7870 ADVERTISED_TP); 7872 ADVERTISED_TP);
7871 } else { 7873 } else {
7872 BNX2X_ERROR("NVRAM config error. " 7874 BNX2X_ERROR("NVRAM config error. "
7873 "Invalid link_config 0x%x" 7875 "Invalid link_config 0x%x"
7874 " speed_cap_mask 0x%x\n", 7876 " speed_cap_mask 0x%x\n",
7875 link_config, 7877 link_config,
7876 bp->link_params.speed_cap_mask[idx]); 7878 bp->link_params.speed_cap_mask[idx]);
7877 return; 7879 return;
7878 } 7880 }
7879 break; 7881 break;
7880 7882
7881 case PORT_FEATURE_LINK_SPEED_10M_HALF: 7883 case PORT_FEATURE_LINK_SPEED_10M_HALF:
7882 if (bp->port.supported[idx] & SUPPORTED_10baseT_Half) { 7884 if (bp->port.supported[idx] & SUPPORTED_10baseT_Half) {
7883 bp->link_params.req_line_speed[idx] = 7885 bp->link_params.req_line_speed[idx] =
7884 SPEED_10; 7886 SPEED_10;
@@ -7886,70 +7888,74 @@ static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp)
7886 DUPLEX_HALF; 7888 DUPLEX_HALF;
7887 bp->port.advertising[idx] |= 7889 bp->port.advertising[idx] |=
7888 (ADVERTISED_10baseT_Half | 7890 (ADVERTISED_10baseT_Half |
7889 ADVERTISED_TP); 7891 ADVERTISED_TP);
7890 } else { 7892 } else {
7891 BNX2X_ERROR("NVRAM config error. " 7893 BNX2X_ERROR("NVRAM config error. "
7892 "Invalid link_config 0x%x" 7894 "Invalid link_config 0x%x"
7893 " speed_cap_mask 0x%x\n", 7895 " speed_cap_mask 0x%x\n",
7894 link_config, 7896 link_config,
7895 bp->link_params.speed_cap_mask[idx]); 7897 bp->link_params.speed_cap_mask[idx]);
7896 return; 7898 return;
7897 } 7899 }
7898 break; 7900 break;
7899 7901
7900 case PORT_FEATURE_LINK_SPEED_100M_FULL: 7902 case PORT_FEATURE_LINK_SPEED_100M_FULL:
7901 if (bp->port.supported[idx] & SUPPORTED_100baseT_Full) { 7903 if (bp->port.supported[idx] &
7904 SUPPORTED_100baseT_Full) {
7902 bp->link_params.req_line_speed[idx] = 7905 bp->link_params.req_line_speed[idx] =
7903 SPEED_100; 7906 SPEED_100;
7904 bp->port.advertising[idx] |= 7907 bp->port.advertising[idx] |=
7905 (ADVERTISED_100baseT_Full | 7908 (ADVERTISED_100baseT_Full |
7906 ADVERTISED_TP); 7909 ADVERTISED_TP);
7907 } else { 7910 } else {
7908 BNX2X_ERROR("NVRAM config error. " 7911 BNX2X_ERROR("NVRAM config error. "
7909 "Invalid link_config 0x%x" 7912 "Invalid link_config 0x%x"
7910 " speed_cap_mask 0x%x\n", 7913 " speed_cap_mask 0x%x\n",
7911 link_config, 7914 link_config,
7912 bp->link_params.speed_cap_mask[idx]); 7915 bp->link_params.speed_cap_mask[idx]);
7913 return; 7916 return;
7914 } 7917 }
7915 break; 7918 break;
7916 7919
7917 case PORT_FEATURE_LINK_SPEED_100M_HALF: 7920 case PORT_FEATURE_LINK_SPEED_100M_HALF:
7918 if (bp->port.supported[idx] & SUPPORTED_100baseT_Half) { 7921 if (bp->port.supported[idx] &
7919 bp->link_params.req_line_speed[idx] = SPEED_100; 7922 SUPPORTED_100baseT_Half) {
7920 bp->link_params.req_duplex[idx] = DUPLEX_HALF; 7923 bp->link_params.req_line_speed[idx] =
7924 SPEED_100;
7925 bp->link_params.req_duplex[idx] =
7926 DUPLEX_HALF;
7921 bp->port.advertising[idx] |= 7927 bp->port.advertising[idx] |=
7922 (ADVERTISED_100baseT_Half | 7928 (ADVERTISED_100baseT_Half |
7923 ADVERTISED_TP); 7929 ADVERTISED_TP);
7924 } else { 7930 } else {
7925 BNX2X_ERROR("NVRAM config error. " 7931 BNX2X_ERROR("NVRAM config error. "
7926 "Invalid link_config 0x%x" 7932 "Invalid link_config 0x%x"
7927 " speed_cap_mask 0x%x\n", 7933 " speed_cap_mask 0x%x\n",
7928 link_config, 7934 link_config,
7929 bp->link_params.speed_cap_mask[idx]); 7935 bp->link_params.speed_cap_mask[idx]);
7930 return; 7936 return;
7931 } 7937 }
7932 break; 7938 break;
7933 7939
7934 case PORT_FEATURE_LINK_SPEED_1G: 7940 case PORT_FEATURE_LINK_SPEED_1G:
7935 if (bp->port.supported[idx] & 7941 if (bp->port.supported[idx] &
7936 SUPPORTED_1000baseT_Full) { 7942 SUPPORTED_1000baseT_Full) {
7937 bp->link_params.req_line_speed[idx] = 7943 bp->link_params.req_line_speed[idx] =
7938 SPEED_1000; 7944 SPEED_1000;
7939 bp->port.advertising[idx] |= 7945 bp->port.advertising[idx] |=
7940 (ADVERTISED_1000baseT_Full | 7946 (ADVERTISED_1000baseT_Full |
7941 ADVERTISED_TP); 7947 ADVERTISED_TP);
7942 } else { 7948 } else {
7943 BNX2X_ERROR("NVRAM config error. " 7949 BNX2X_ERROR("NVRAM config error. "
7944 "Invalid link_config 0x%x" 7950 "Invalid link_config 0x%x"
7945 " speed_cap_mask 0x%x\n", 7951 " speed_cap_mask 0x%x\n",
7946 link_config, 7952 link_config,
7947 bp->link_params.speed_cap_mask[idx]); 7953 bp->link_params.speed_cap_mask[idx]);
7948 return; 7954 return;
7949 } 7955 }
7950 break; 7956 break;
7951 7957
7952 case PORT_FEATURE_LINK_SPEED_2_5G: 7958 case PORT_FEATURE_LINK_SPEED_2_5G:
7953 if (bp->port.supported[idx] & 7959 if (bp->port.supported[idx] &
7954 SUPPORTED_2500baseX_Full) { 7960 SUPPORTED_2500baseX_Full) {
7955 bp->link_params.req_line_speed[idx] = 7961 bp->link_params.req_line_speed[idx] =
@@ -7957,19 +7963,19 @@ static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp)
7957 bp->port.advertising[idx] |= 7963 bp->port.advertising[idx] |=
7958 (ADVERTISED_2500baseX_Full | 7964 (ADVERTISED_2500baseX_Full |
7959 ADVERTISED_TP); 7965 ADVERTISED_TP);
7960 } else { 7966 } else {
7961 BNX2X_ERROR("NVRAM config error. " 7967 BNX2X_ERROR("NVRAM config error. "
7962 "Invalid link_config 0x%x" 7968 "Invalid link_config 0x%x"
7963 " speed_cap_mask 0x%x\n", 7969 " speed_cap_mask 0x%x\n",
7964 link_config, 7970 link_config,
7965 bp->link_params.speed_cap_mask[idx]); 7971 bp->link_params.speed_cap_mask[idx]);
7966 return; 7972 return;
7967 } 7973 }
7968 break; 7974 break;
7969 7975
7970 case PORT_FEATURE_LINK_SPEED_10G_CX4: 7976 case PORT_FEATURE_LINK_SPEED_10G_CX4:
7971 case PORT_FEATURE_LINK_SPEED_10G_KX4: 7977 case PORT_FEATURE_LINK_SPEED_10G_KX4:
7972 case PORT_FEATURE_LINK_SPEED_10G_KR: 7978 case PORT_FEATURE_LINK_SPEED_10G_KR:
7973 if (bp->port.supported[idx] & 7979 if (bp->port.supported[idx] &
7974 SUPPORTED_10000baseT_Full) { 7980 SUPPORTED_10000baseT_Full) {
7975 bp->link_params.req_line_speed[idx] = 7981 bp->link_params.req_line_speed[idx] =
@@ -7977,24 +7983,26 @@ static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp)
7977 bp->port.advertising[idx] |= 7983 bp->port.advertising[idx] |=
7978 (ADVERTISED_10000baseT_Full | 7984 (ADVERTISED_10000baseT_Full |
7979 ADVERTISED_FIBRE); 7985 ADVERTISED_FIBRE);
7980 } else { 7986 } else {
7981 BNX2X_ERROR("NVRAM config error. " 7987 BNX2X_ERROR("NVRAM config error. "
7982 "Invalid link_config 0x%x" 7988 "Invalid link_config 0x%x"
7983 " speed_cap_mask 0x%x\n", 7989 " speed_cap_mask 0x%x\n",
7984 link_config, 7990 link_config,
7985 bp->link_params.speed_cap_mask[idx]); 7991 bp->link_params.speed_cap_mask[idx]);
7986 return; 7992 return;
7987 } 7993 }
7988 break; 7994 break;
7989 7995
7990 default: 7996 default:
7991 BNX2X_ERROR("NVRAM config error. " 7997 BNX2X_ERROR("NVRAM config error. "
7992 "BAD link speed link_config 0x%x\n", 7998 "BAD link speed link_config 0x%x\n",
7993 link_config); 7999 link_config);
7994 bp->link_params.req_line_speed[idx] = SPEED_AUTO_NEG; 8000 bp->link_params.req_line_speed[idx] =
7995 bp->port.advertising[idx] = bp->port.supported[idx]; 8001 SPEED_AUTO_NEG;
7996 break; 8002 bp->port.advertising[idx] =
7997 } 8003 bp->port.supported[idx];
8004 break;
8005 }
7998 8006
7999 bp->link_params.req_flow_ctrl[idx] = (link_config & 8007 bp->link_params.req_flow_ctrl[idx] = (link_config &
8000 PORT_FEATURE_FLOW_CONTROL_MASK); 8008 PORT_FEATURE_FLOW_CONTROL_MASK);
@@ -8056,14 +8064,14 @@ static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp)
8056 bp->wol = (!(bp->flags & NO_WOL_FLAG) && 8064 bp->wol = (!(bp->flags & NO_WOL_FLAG) &&
8057 (config & PORT_FEATURE_WOL_ENABLED)); 8065 (config & PORT_FEATURE_WOL_ENABLED));
8058 8066
8059 BNX2X_DEV_INFO("lane_config 0x%08x" 8067 BNX2X_DEV_INFO("lane_config 0x%08x "
8060 "speed_cap_mask0 0x%08x link_config0 0x%08x\n", 8068 "speed_cap_mask0 0x%08x link_config0 0x%08x\n",
8061 bp->link_params.lane_config, 8069 bp->link_params.lane_config,
8062 bp->link_params.speed_cap_mask[0], 8070 bp->link_params.speed_cap_mask[0],
8063 bp->port.link_config[0]); 8071 bp->port.link_config[0]);
8064 8072
8065 bp->link_params.switch_cfg = (bp->port.link_config[0] & 8073 bp->link_params.switch_cfg = (bp->port.link_config[0] &
8066 PORT_FEATURE_CONNECTED_SWITCH_MASK); 8074 PORT_FEATURE_CONNECTED_SWITCH_MASK);
8067 bnx2x_phy_probe(&bp->link_params); 8075 bnx2x_phy_probe(&bp->link_params);
8068 bnx2x_link_settings_supported(bp, bp->link_params.switch_cfg); 8076 bnx2x_link_settings_supported(bp, bp->link_params.switch_cfg);
8069 8077
@@ -8458,12 +8466,10 @@ void bnx2x_set_rx_mode(struct net_device *dev)
8458 8466
8459 if (dev->flags & IFF_PROMISC) 8467 if (dev->flags & IFF_PROMISC)
8460 rx_mode = BNX2X_RX_MODE_PROMISC; 8468 rx_mode = BNX2X_RX_MODE_PROMISC;
8461
8462 else if ((dev->flags & IFF_ALLMULTI) || 8469 else if ((dev->flags & IFF_ALLMULTI) ||
8463 ((netdev_mc_count(dev) > BNX2X_MAX_MULTICAST) && 8470 ((netdev_mc_count(dev) > BNX2X_MAX_MULTICAST) &&
8464 CHIP_IS_E1(bp))) 8471 CHIP_IS_E1(bp)))
8465 rx_mode = BNX2X_RX_MODE_ALLMULTI; 8472 rx_mode = BNX2X_RX_MODE_ALLMULTI;
8466
8467 else { /* some multicasts */ 8473 else { /* some multicasts */
8468 if (CHIP_IS_E1(bp)) { 8474 if (CHIP_IS_E1(bp)) {
8469 /* 8475 /*
@@ -8503,12 +8509,10 @@ void bnx2x_set_rx_mode(struct net_device *dev)
8503 } 8509 }
8504 } 8510 }
8505 8511
8506
8507 bp->rx_mode = rx_mode; 8512 bp->rx_mode = rx_mode;
8508 bnx2x_set_storm_rx_mode(bp); 8513 bnx2x_set_storm_rx_mode(bp);
8509} 8514}
8510 8515
8511
8512/* called with rtnl_lock */ 8516/* called with rtnl_lock */
8513static int bnx2x_mdio_read(struct net_device *netdev, int prtad, 8517static int bnx2x_mdio_read(struct net_device *netdev, int prtad,
8514 int devad, u16 addr) 8518 int devad, u16 addr)
@@ -8999,6 +9003,7 @@ static inline int bnx2x_set_qm_cid_count(struct bnx2x *bp, int l2_cid_count)
8999#endif 9003#endif
9000 return roundup(cid_count, QM_CID_ROUND); 9004 return roundup(cid_count, QM_CID_ROUND);
9001} 9005}
9006
9002static int __devinit bnx2x_init_one(struct pci_dev *pdev, 9007static int __devinit bnx2x_init_one(struct pci_dev *pdev,
9003 const struct pci_device_id *ent) 9008 const struct pci_device_id *ent)
9004{ 9009{
@@ -9026,6 +9031,7 @@ static int __devinit bnx2x_init_one(struct pci_dev *pdev,
9026 } 9031 }
9027 9032
9028 cid_count += CNIC_CONTEXT_USE; 9033 cid_count += CNIC_CONTEXT_USE;
9034
9029 /* dev zeroed in init_etherdev */ 9035 /* dev zeroed in init_etherdev */
9030 dev = alloc_etherdev_mq(sizeof(*bp), cid_count); 9036 dev = alloc_etherdev_mq(sizeof(*bp), cid_count);
9031 if (!dev) { 9037 if (!dev) {
@@ -9117,6 +9123,7 @@ static void __devexit bnx2x_remove_one(struct pci_dev *pdev)
9117 9123
9118 /* Disable MSI/MSI-X */ 9124 /* Disable MSI/MSI-X */
9119 bnx2x_disable_msi(bp); 9125 bnx2x_disable_msi(bp);
9126
9120 /* Make sure RESET task is not scheduled before continuing */ 9127 /* Make sure RESET task is not scheduled before continuing */
9121 cancel_delayed_work_sync(&bp->reset_task); 9128 cancel_delayed_work_sync(&bp->reset_task);
9122 9129
diff --git a/drivers/net/bnx2x/bnx2x_stats.c b/drivers/net/bnx2x/bnx2x_stats.c
index ad7aa55efb63..5644bddb3d19 100644
--- a/drivers/net/bnx2x/bnx2x_stats.c
+++ b/drivers/net/bnx2x/bnx2x_stats.c
@@ -14,8 +14,8 @@
14 * Statistics and Link management by Yitchak Gertner 14 * Statistics and Link management by Yitchak Gertner
15 * 15 *
16 */ 16 */
17 #include "bnx2x_cmn.h" 17#include "bnx2x_cmn.h"
18 #include "bnx2x_stats.h" 18#include "bnx2x_stats.h"
19 19
20/* Statistics */ 20/* Statistics */
21 21
diff --git a/drivers/net/bnx2x/bnx2x_stats.h b/drivers/net/bnx2x/bnx2x_stats.h
index 38a4e908f4fb..afd15efa429a 100644
--- a/drivers/net/bnx2x/bnx2x_stats.h
+++ b/drivers/net/bnx2x/bnx2x_stats.h
@@ -9,6 +9,10 @@
9 * Maintained by: Eilon Greenstein <eilong@broadcom.com> 9 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10 * Written by: Eliezer Tamir 10 * Written by: Eliezer Tamir
11 * Based on code from Michael Chan's bnx2 driver 11 * Based on code from Michael Chan's bnx2 driver
12 * UDP CSUM errata workaround by Arik Gendelman
13 * Slowpath and fastpath rework by Vladislav Zolotarov
14 * Statistics and Link management by Yitchak Gertner
15 *
12 */ 16 */
13 17
14#ifndef BNX2X_STATS_H 18#ifndef BNX2X_STATS_H
@@ -228,12 +232,8 @@ struct bnx2x_eth_stats {
228/* Forward declaration */ 232/* Forward declaration */
229struct bnx2x; 233struct bnx2x;
230 234
231
232void bnx2x_stats_init(struct bnx2x *bp); 235void bnx2x_stats_init(struct bnx2x *bp);
233 236
234extern const u32 dmae_reg_go_c[]; 237extern const u32 dmae_reg_go_c[];
235extern int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
236 u32 data_hi, u32 data_lo, int common);
237
238 238
239#endif /* BNX2X_STATS_H */ 239#endif /* BNX2X_STATS_H */