aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/bnx2x_main.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/bnx2x_main.c')
-rw-r--r--drivers/net/bnx2x_main.c1212
1 files changed, 686 insertions, 526 deletions
diff --git a/drivers/net/bnx2x_main.c b/drivers/net/bnx2x_main.c
index 272a4bd25953..3e7dc171cdf1 100644
--- a/drivers/net/bnx2x_main.c
+++ b/drivers/net/bnx2x_main.c
@@ -60,8 +60,8 @@
60#include "bnx2x.h" 60#include "bnx2x.h"
61#include "bnx2x_init.h" 61#include "bnx2x_init.h"
62 62
63#define DRV_MODULE_VERSION "1.45.6" 63#define DRV_MODULE_VERSION "1.45.17"
64#define DRV_MODULE_RELDATE "2008/06/23" 64#define DRV_MODULE_RELDATE "2008/08/13"
65#define BNX2X_BC_VER 0x040200 65#define BNX2X_BC_VER 0x040200
66 66
67/* Time in jiffies before concluding the transmitter is hung */ 67/* Time in jiffies before concluding the transmitter is hung */
@@ -76,23 +76,21 @@ MODULE_DESCRIPTION("Broadcom NetXtreme II BCM57710 Driver");
76MODULE_LICENSE("GPL"); 76MODULE_LICENSE("GPL");
77MODULE_VERSION(DRV_MODULE_VERSION); 77MODULE_VERSION(DRV_MODULE_VERSION);
78 78
79static int disable_tpa;
79static int use_inta; 80static int use_inta;
80static int poll; 81static int poll;
81static int debug; 82static int debug;
82static int disable_tpa;
83static int nomcp;
84static int load_count[3]; /* 0-common, 1-port0, 2-port1 */ 83static int load_count[3]; /* 0-common, 1-port0, 2-port1 */
85static int use_multi; 84static int use_multi;
86 85
86module_param(disable_tpa, int, 0);
87module_param(use_inta, int, 0); 87module_param(use_inta, int, 0);
88module_param(poll, int, 0); 88module_param(poll, int, 0);
89module_param(debug, int, 0); 89module_param(debug, int, 0);
90module_param(disable_tpa, int, 0); 90MODULE_PARM_DESC(disable_tpa, "disable the TPA (LRO) feature");
91module_param(nomcp, int, 0);
92MODULE_PARM_DESC(use_inta, "use INT#A instead of MSI-X"); 91MODULE_PARM_DESC(use_inta, "use INT#A instead of MSI-X");
93MODULE_PARM_DESC(poll, "use polling (for debug)"); 92MODULE_PARM_DESC(poll, "use polling (for debug)");
94MODULE_PARM_DESC(debug, "default debug msglevel"); 93MODULE_PARM_DESC(debug, "default debug msglevel");
95MODULE_PARM_DESC(nomcp, "ignore management CPU");
96 94
97#ifdef BNX2X_MULTI 95#ifdef BNX2X_MULTI
98module_param(use_multi, int, 0); 96module_param(use_multi, int, 0);
@@ -237,17 +235,16 @@ void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
237 while (*wb_comp != DMAE_COMP_VAL) { 235 while (*wb_comp != DMAE_COMP_VAL) {
238 DP(BNX2X_MSG_OFF, "wb_comp 0x%08x\n", *wb_comp); 236 DP(BNX2X_MSG_OFF, "wb_comp 0x%08x\n", *wb_comp);
239 237
240 /* adjust delay for emulation/FPGA */
241 if (CHIP_REV_IS_SLOW(bp))
242 msleep(100);
243 else
244 udelay(5);
245
246 if (!cnt) { 238 if (!cnt) {
247 BNX2X_ERR("dmae timeout!\n"); 239 BNX2X_ERR("dmae timeout!\n");
248 break; 240 break;
249 } 241 }
250 cnt--; 242 cnt--;
243 /* adjust delay for emulation/FPGA */
244 if (CHIP_REV_IS_SLOW(bp))
245 msleep(100);
246 else
247 udelay(5);
251 } 248 }
252 249
253 mutex_unlock(&bp->dmae_mutex); 250 mutex_unlock(&bp->dmae_mutex);
@@ -310,17 +307,16 @@ void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32)
310 307
311 while (*wb_comp != DMAE_COMP_VAL) { 308 while (*wb_comp != DMAE_COMP_VAL) {
312 309
313 /* adjust delay for emulation/FPGA */
314 if (CHIP_REV_IS_SLOW(bp))
315 msleep(100);
316 else
317 udelay(5);
318
319 if (!cnt) { 310 if (!cnt) {
320 BNX2X_ERR("dmae timeout!\n"); 311 BNX2X_ERR("dmae timeout!\n");
321 break; 312 break;
322 } 313 }
323 cnt--; 314 cnt--;
315 /* adjust delay for emulation/FPGA */
316 if (CHIP_REV_IS_SLOW(bp))
317 msleep(100);
318 else
319 udelay(5);
324 } 320 }
325 DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n", 321 DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
326 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1], 322 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
@@ -503,6 +499,9 @@ static void bnx2x_panic_dump(struct bnx2x *bp)
503 int i; 499 int i;
504 u16 j, start, end; 500 u16 j, start, end;
505 501
502 bp->stats_state = STATS_STATE_DISABLED;
503 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
504
506 BNX2X_ERR("begin crash dump -----------------\n"); 505 BNX2X_ERR("begin crash dump -----------------\n");
507 506
508 for_each_queue(bp, i) { 507 for_each_queue(bp, i) {
@@ -513,17 +512,20 @@ static void bnx2x_panic_dump(struct bnx2x *bp)
513 " tx_bd_prod(%x) tx_bd_cons(%x) *tx_cons_sb(%x)\n", 512 " tx_bd_prod(%x) tx_bd_cons(%x) *tx_cons_sb(%x)\n",
514 i, fp->tx_pkt_prod, fp->tx_pkt_cons, fp->tx_bd_prod, 513 i, fp->tx_pkt_prod, fp->tx_pkt_cons, fp->tx_bd_prod,
515 fp->tx_bd_cons, le16_to_cpu(*fp->tx_cons_sb)); 514 fp->tx_bd_cons, le16_to_cpu(*fp->tx_cons_sb));
516 BNX2X_ERR(" rx_comp_prod(%x) rx_comp_cons(%x)" 515 BNX2X_ERR(" rx_bd_prod(%x) rx_bd_cons(%x)"
517 " *rx_cons_sb(%x) *rx_bd_cons_sb(%x)" 516 " *rx_bd_cons_sb(%x) rx_comp_prod(%x)"
518 " rx_sge_prod(%x) last_max_sge(%x)\n", 517 " rx_comp_cons(%x) *rx_cons_sb(%x)\n",
519 fp->rx_comp_prod, fp->rx_comp_cons, 518 fp->rx_bd_prod, fp->rx_bd_cons,
520 le16_to_cpu(*fp->rx_cons_sb), 519 le16_to_cpu(*fp->rx_bd_cons_sb), fp->rx_comp_prod,
521 le16_to_cpu(*fp->rx_bd_cons_sb), 520 fp->rx_comp_cons, le16_to_cpu(*fp->rx_cons_sb));
522 fp->rx_sge_prod, fp->last_max_sge); 521 BNX2X_ERR(" rx_sge_prod(%x) last_max_sge(%x)"
523 BNX2X_ERR(" fp_c_idx(%x) fp_u_idx(%x)" 522 " fp_c_idx(%x) *sb_c_idx(%x) fp_u_idx(%x)"
524 " bd data(%x,%x) rx_alloc_failed(%lx)\n", 523 " *sb_u_idx(%x) bd data(%x,%x)\n",
525 fp->fp_c_idx, fp->fp_u_idx, hw_prods->packets_prod, 524 fp->rx_sge_prod, fp->last_max_sge, fp->fp_c_idx,
526 hw_prods->bds_prod, fp->rx_alloc_failed); 525 fp->status_blk->c_status_block.status_block_index,
526 fp->fp_u_idx,
527 fp->status_blk->u_status_block.status_block_index,
528 hw_prods->packets_prod, hw_prods->bds_prod);
527 529
528 start = TX_BD(le16_to_cpu(*fp->tx_cons_sb) - 10); 530 start = TX_BD(le16_to_cpu(*fp->tx_cons_sb) - 10);
529 end = TX_BD(le16_to_cpu(*fp->tx_cons_sb) + 245); 531 end = TX_BD(le16_to_cpu(*fp->tx_cons_sb) + 245);
@@ -553,8 +555,8 @@ static void bnx2x_panic_dump(struct bnx2x *bp)
553 j, rx_bd[1], rx_bd[0], sw_bd->skb); 555 j, rx_bd[1], rx_bd[0], sw_bd->skb);
554 } 556 }
555 557
556 start = 0; 558 start = RX_SGE(fp->rx_sge_prod);
557 end = RX_SGE_CNT*NUM_RX_SGE_PAGES; 559 end = RX_SGE(fp->last_max_sge);
558 for (j = start; j < end; j++) { 560 for (j = start; j < end; j++) {
559 u32 *rx_sge = (u32 *)&fp->rx_sge_ring[j]; 561 u32 *rx_sge = (u32 *)&fp->rx_sge_ring[j];
560 struct sw_rx_page *sw_page = &fp->rx_page_ring[j]; 562 struct sw_rx_page *sw_page = &fp->rx_page_ring[j];
@@ -582,9 +584,6 @@ static void bnx2x_panic_dump(struct bnx2x *bp)
582 bnx2x_fw_dump(bp); 584 bnx2x_fw_dump(bp);
583 bnx2x_mc_assert(bp); 585 bnx2x_mc_assert(bp);
584 BNX2X_ERR("end crash dump -----------------\n"); 586 BNX2X_ERR("end crash dump -----------------\n");
585
586 bp->stats_state = STATS_STATE_DISABLED;
587 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
588} 587}
589 588
590static void bnx2x_int_enable(struct bnx2x *bp) 589static void bnx2x_int_enable(struct bnx2x *bp)
@@ -684,7 +683,8 @@ static void bnx2x_int_disable_sync(struct bnx2x *bp)
684static inline void bnx2x_ack_sb(struct bnx2x *bp, u8 sb_id, 683static inline void bnx2x_ack_sb(struct bnx2x *bp, u8 sb_id,
685 u8 storm, u16 index, u8 op, u8 update) 684 u8 storm, u16 index, u8 op, u8 update)
686{ 685{
687 u32 igu_addr = (IGU_ADDR_INT_ACK + IGU_FUNC_BASE * BP_FUNC(bp)) * 8; 686 u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
687 COMMAND_REG_INT_ACK);
688 struct igu_ack_register igu_ack; 688 struct igu_ack_register igu_ack;
689 689
690 igu_ack.status_block_index = index; 690 igu_ack.status_block_index = index;
@@ -694,9 +694,9 @@ static inline void bnx2x_ack_sb(struct bnx2x *bp, u8 sb_id,
694 (update << IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT) | 694 (update << IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT) |
695 (op << IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT)); 695 (op << IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT));
696 696
697 DP(BNX2X_MSG_OFF, "write 0x%08x to IGU addr 0x%x\n", 697 DP(BNX2X_MSG_OFF, "write 0x%08x to HC addr 0x%x\n",
698 (*(u32 *)&igu_ack), BAR_IGU_INTMEM + igu_addr); 698 (*(u32 *)&igu_ack), hc_addr);
699 REG_WR(bp, BAR_IGU_INTMEM + igu_addr, (*(u32 *)&igu_ack)); 699 REG_WR(bp, hc_addr, (*(u32 *)&igu_ack));
700} 700}
701 701
702static inline u16 bnx2x_update_fpsb_idx(struct bnx2x_fastpath *fp) 702static inline u16 bnx2x_update_fpsb_idx(struct bnx2x_fastpath *fp)
@@ -716,36 +716,15 @@ static inline u16 bnx2x_update_fpsb_idx(struct bnx2x_fastpath *fp)
716 return rc; 716 return rc;
717} 717}
718 718
719static inline int bnx2x_has_work(struct bnx2x_fastpath *fp)
720{
721 u16 rx_cons_sb = le16_to_cpu(*fp->rx_cons_sb);
722
723 if ((rx_cons_sb & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
724 rx_cons_sb++;
725
726 if ((fp->rx_comp_cons != rx_cons_sb) ||
727 (fp->tx_pkt_prod != le16_to_cpu(*fp->tx_cons_sb)) ||
728 (fp->tx_pkt_prod != fp->tx_pkt_cons))
729 return 1;
730
731 return 0;
732}
733
734static u16 bnx2x_ack_int(struct bnx2x *bp) 719static u16 bnx2x_ack_int(struct bnx2x *bp)
735{ 720{
736 u32 igu_addr = (IGU_ADDR_SIMD_MASK + IGU_FUNC_BASE * BP_FUNC(bp)) * 8; 721 u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
737 u32 result = REG_RD(bp, BAR_IGU_INTMEM + igu_addr); 722 COMMAND_REG_SIMD_MASK);
723 u32 result = REG_RD(bp, hc_addr);
738 724
739 DP(BNX2X_MSG_OFF, "read 0x%08x from IGU addr 0x%x\n", 725 DP(BNX2X_MSG_OFF, "read 0x%08x from HC addr 0x%x\n",
740 result, BAR_IGU_INTMEM + igu_addr); 726 result, hc_addr);
741 727
742#ifdef IGU_DEBUG
743#warning IGU_DEBUG active
744 if (result == 0) {
745 BNX2X_ERR("read %x from IGU\n", result);
746 REG_WR(bp, TM_REG_TIMER_SOFT_RST, 0);
747 }
748#endif
749 return result; 728 return result;
750} 729}
751 730
@@ -898,6 +877,7 @@ static void bnx2x_tx_int(struct bnx2x_fastpath *fp, int work)
898 netif_tx_lock(bp->dev); 877 netif_tx_lock(bp->dev);
899 878
900 if (netif_queue_stopped(bp->dev) && 879 if (netif_queue_stopped(bp->dev) &&
880 (bp->state == BNX2X_STATE_OPEN) &&
901 (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3)) 881 (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3))
902 netif_wake_queue(bp->dev); 882 netif_wake_queue(bp->dev);
903 883
@@ -905,6 +885,7 @@ static void bnx2x_tx_int(struct bnx2x_fastpath *fp, int work)
905 } 885 }
906} 886}
907 887
888
908static void bnx2x_sp_event(struct bnx2x_fastpath *fp, 889static void bnx2x_sp_event(struct bnx2x_fastpath *fp,
909 union eth_rx_cqe *rr_cqe) 890 union eth_rx_cqe *rr_cqe)
910{ 891{
@@ -960,6 +941,7 @@ static void bnx2x_sp_event(struct bnx2x_fastpath *fp,
960 bnx2x_fp(bp, cid, state) = BNX2X_FP_STATE_CLOSED; 941 bnx2x_fp(bp, cid, state) = BNX2X_FP_STATE_CLOSED;
961 break; 942 break;
962 943
944
963 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_OPEN): 945 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_OPEN):
964 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_DIAG): 946 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_DIAG):
965 DP(NETIF_MSG_IFUP, "got set mac ramrod\n"); 947 DP(NETIF_MSG_IFUP, "got set mac ramrod\n");
@@ -1169,8 +1151,8 @@ static inline void bnx2x_init_sge_ring_bit_mask(struct bnx2x_fastpath *fp)
1169 memset(fp->sge_mask, 0xff, 1151 memset(fp->sge_mask, 0xff,
1170 (NUM_RX_SGE >> RX_SGE_MASK_ELEM_SHIFT)*sizeof(u64)); 1152 (NUM_RX_SGE >> RX_SGE_MASK_ELEM_SHIFT)*sizeof(u64));
1171 1153
1172 /* Clear the two last indeces in the page to 1: 1154 /* Clear the two last indices in the page to 1:
1173 these are the indeces that correspond to the "next" element, 1155 these are the indices that correspond to the "next" element,
1174 hence will never be indicated and should be removed from 1156 hence will never be indicated and should be removed from
1175 the calculations. */ 1157 the calculations. */
1176 bnx2x_clear_sge_mask_next_elems(fp); 1158 bnx2x_clear_sge_mask_next_elems(fp);
@@ -1261,7 +1243,7 @@ static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1261 where we are and drop the whole packet */ 1243 where we are and drop the whole packet */
1262 err = bnx2x_alloc_rx_sge(bp, fp, sge_idx); 1244 err = bnx2x_alloc_rx_sge(bp, fp, sge_idx);
1263 if (unlikely(err)) { 1245 if (unlikely(err)) {
1264 fp->rx_alloc_failed++; 1246 bp->eth_stats.rx_skb_alloc_failed++;
1265 return err; 1247 return err;
1266 } 1248 }
1267 1249
@@ -1297,14 +1279,13 @@ static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1297 pci_unmap_single(bp->pdev, pci_unmap_addr(rx_buf, mapping), 1279 pci_unmap_single(bp->pdev, pci_unmap_addr(rx_buf, mapping),
1298 bp->rx_buf_use_size, PCI_DMA_FROMDEVICE); 1280 bp->rx_buf_use_size, PCI_DMA_FROMDEVICE);
1299 1281
1300 /* if alloc failed drop the packet and keep the buffer in the bin */
1301 if (likely(new_skb)) { 1282 if (likely(new_skb)) {
1283 /* fix ip xsum and give it to the stack */
1284 /* (no need to map the new skb) */
1302 1285
1303 prefetch(skb); 1286 prefetch(skb);
1304 prefetch(((char *)(skb)) + 128); 1287 prefetch(((char *)(skb)) + 128);
1305 1288
1306 /* else fix ip xsum and give it to the stack */
1307 /* (no need to map the new skb) */
1308#ifdef BNX2X_STOP_ON_ERROR 1289#ifdef BNX2X_STOP_ON_ERROR
1309 if (pad + len > bp->rx_buf_size) { 1290 if (pad + len > bp->rx_buf_size) {
1310 BNX2X_ERR("skb_put is about to fail... " 1291 BNX2X_ERR("skb_put is about to fail... "
@@ -1353,9 +1334,10 @@ static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1353 fp->tpa_pool[queue].skb = new_skb; 1334 fp->tpa_pool[queue].skb = new_skb;
1354 1335
1355 } else { 1336 } else {
1337 /* else drop the packet and keep the buffer in the bin */
1356 DP(NETIF_MSG_RX_STATUS, 1338 DP(NETIF_MSG_RX_STATUS,
1357 "Failed to allocate new skb - dropping packet!\n"); 1339 "Failed to allocate new skb - dropping packet!\n");
1358 fp->rx_alloc_failed++; 1340 bp->eth_stats.rx_skb_alloc_failed++;
1359 } 1341 }
1360 1342
1361 fp->tpa_state[queue] = BNX2X_TPA_STOP; 1343 fp->tpa_state[queue] = BNX2X_TPA_STOP;
@@ -1390,7 +1372,6 @@ static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
1390 u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons; 1372 u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
1391 u16 hw_comp_cons, sw_comp_cons, sw_comp_prod; 1373 u16 hw_comp_cons, sw_comp_cons, sw_comp_prod;
1392 int rx_pkt = 0; 1374 int rx_pkt = 0;
1393 u16 queue;
1394 1375
1395#ifdef BNX2X_STOP_ON_ERROR 1376#ifdef BNX2X_STOP_ON_ERROR
1396 if (unlikely(bp->panic)) 1377 if (unlikely(bp->panic))
@@ -1456,7 +1437,7 @@ static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
1456 if ((!fp->disable_tpa) && 1437 if ((!fp->disable_tpa) &&
1457 (TPA_TYPE(cqe_fp_flags) != 1438 (TPA_TYPE(cqe_fp_flags) !=
1458 (TPA_TYPE_START | TPA_TYPE_END))) { 1439 (TPA_TYPE_START | TPA_TYPE_END))) {
1459 queue = cqe->fast_path_cqe.queue_index; 1440 u16 queue = cqe->fast_path_cqe.queue_index;
1460 1441
1461 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_START) { 1442 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_START) {
1462 DP(NETIF_MSG_RX_STATUS, 1443 DP(NETIF_MSG_RX_STATUS,
@@ -1503,11 +1484,10 @@ static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
1503 1484
1504 /* is this an error packet? */ 1485 /* is this an error packet? */
1505 if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) { 1486 if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
1506 /* do we sometimes forward error packets anyway? */
1507 DP(NETIF_MSG_RX_ERR, 1487 DP(NETIF_MSG_RX_ERR,
1508 "ERROR flags %x rx packet %u\n", 1488 "ERROR flags %x rx packet %u\n",
1509 cqe_fp_flags, sw_comp_cons); 1489 cqe_fp_flags, sw_comp_cons);
1510 /* TBD make sure MC counts this as a drop */ 1490 bp->eth_stats.rx_err_discard_pkt++;
1511 goto reuse_rx; 1491 goto reuse_rx;
1512 } 1492 }
1513 1493
@@ -1524,7 +1504,7 @@ static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
1524 DP(NETIF_MSG_RX_ERR, 1504 DP(NETIF_MSG_RX_ERR,
1525 "ERROR packet dropped " 1505 "ERROR packet dropped "
1526 "because of alloc failure\n"); 1506 "because of alloc failure\n");
1527 fp->rx_alloc_failed++; 1507 bp->eth_stats.rx_skb_alloc_failed++;
1528 goto reuse_rx; 1508 goto reuse_rx;
1529 } 1509 }
1530 1510
@@ -1550,7 +1530,7 @@ static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
1550 DP(NETIF_MSG_RX_ERR, 1530 DP(NETIF_MSG_RX_ERR,
1551 "ERROR packet dropped because " 1531 "ERROR packet dropped because "
1552 "of alloc failure\n"); 1532 "of alloc failure\n");
1553 fp->rx_alloc_failed++; 1533 bp->eth_stats.rx_skb_alloc_failed++;
1554reuse_rx: 1534reuse_rx:
1555 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod); 1535 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1556 goto next_rx; 1536 goto next_rx;
@@ -1559,10 +1539,12 @@ reuse_rx:
1559 skb->protocol = eth_type_trans(skb, bp->dev); 1539 skb->protocol = eth_type_trans(skb, bp->dev);
1560 1540
1561 skb->ip_summed = CHECKSUM_NONE; 1541 skb->ip_summed = CHECKSUM_NONE;
1562 if (bp->rx_csum && BNX2X_RX_SUM_OK(cqe)) 1542 if (bp->rx_csum) {
1563 skb->ip_summed = CHECKSUM_UNNECESSARY; 1543 if (likely(BNX2X_RX_CSUM_OK(cqe)))
1564 1544 skb->ip_summed = CHECKSUM_UNNECESSARY;
1565 /* TBD do we pass bad csum packets in promisc */ 1545 else
1546 bp->eth_stats.hw_csum_err++;
1547 }
1566 } 1548 }
1567 1549
1568#ifdef BCM_VLAN 1550#ifdef BCM_VLAN
@@ -1615,6 +1597,12 @@ static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
1615 struct net_device *dev = bp->dev; 1597 struct net_device *dev = bp->dev;
1616 int index = FP_IDX(fp); 1598 int index = FP_IDX(fp);
1617 1599
1600 /* Return here if interrupt is disabled */
1601 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1602 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1603 return IRQ_HANDLED;
1604 }
1605
1618 DP(BNX2X_MSG_FP, "got an MSI-X interrupt on IDX:SB [%d:%d]\n", 1606 DP(BNX2X_MSG_FP, "got an MSI-X interrupt on IDX:SB [%d:%d]\n",
1619 index, FP_SB_ID(fp)); 1607 index, FP_SB_ID(fp));
1620 bnx2x_ack_sb(bp, FP_SB_ID(fp), USTORM_ID, 0, IGU_INT_DISABLE, 0); 1608 bnx2x_ack_sb(bp, FP_SB_ID(fp), USTORM_ID, 0, IGU_INT_DISABLE, 0);
@@ -1648,17 +1636,17 @@ static irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
1648 } 1636 }
1649 DP(NETIF_MSG_INTR, "got an interrupt status %u\n", status); 1637 DP(NETIF_MSG_INTR, "got an interrupt status %u\n", status);
1650 1638
1651#ifdef BNX2X_STOP_ON_ERROR
1652 if (unlikely(bp->panic))
1653 return IRQ_HANDLED;
1654#endif
1655
1656 /* Return here if interrupt is disabled */ 1639 /* Return here if interrupt is disabled */
1657 if (unlikely(atomic_read(&bp->intr_sem) != 0)) { 1640 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1658 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n"); 1641 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1659 return IRQ_HANDLED; 1642 return IRQ_HANDLED;
1660 } 1643 }
1661 1644
1645#ifdef BNX2X_STOP_ON_ERROR
1646 if (unlikely(bp->panic))
1647 return IRQ_HANDLED;
1648#endif
1649
1662 mask = 0x2 << bp->fp[0].sb_id; 1650 mask = 0x2 << bp->fp[0].sb_id;
1663 if (status & mask) { 1651 if (status & mask) {
1664 struct bnx2x_fastpath *fp = &bp->fp[0]; 1652 struct bnx2x_fastpath *fp = &bp->fp[0];
@@ -1699,11 +1687,12 @@ static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event);
1699 * General service functions 1687 * General service functions
1700 */ 1688 */
1701 1689
1702static int bnx2x_hw_lock(struct bnx2x *bp, u32 resource) 1690static int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource)
1703{ 1691{
1704 u32 lock_status; 1692 u32 lock_status;
1705 u32 resource_bit = (1 << resource); 1693 u32 resource_bit = (1 << resource);
1706 u8 port = BP_PORT(bp); 1694 int func = BP_FUNC(bp);
1695 u32 hw_lock_control_reg;
1707 int cnt; 1696 int cnt;
1708 1697
1709 /* Validating that the resource is within range */ 1698 /* Validating that the resource is within range */
@@ -1714,8 +1703,15 @@ static int bnx2x_hw_lock(struct bnx2x *bp, u32 resource)
1714 return -EINVAL; 1703 return -EINVAL;
1715 } 1704 }
1716 1705
1706 if (func <= 5) {
1707 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1708 } else {
1709 hw_lock_control_reg =
1710 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1711 }
1712
1717 /* Validating that the resource is not already taken */ 1713 /* Validating that the resource is not already taken */
1718 lock_status = REG_RD(bp, MISC_REG_DRIVER_CONTROL_1 + port*8); 1714 lock_status = REG_RD(bp, hw_lock_control_reg);
1719 if (lock_status & resource_bit) { 1715 if (lock_status & resource_bit) {
1720 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n", 1716 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1721 lock_status, resource_bit); 1717 lock_status, resource_bit);
@@ -1725,9 +1721,8 @@ static int bnx2x_hw_lock(struct bnx2x *bp, u32 resource)
1725 /* Try for 1 second every 5ms */ 1721 /* Try for 1 second every 5ms */
1726 for (cnt = 0; cnt < 200; cnt++) { 1722 for (cnt = 0; cnt < 200; cnt++) {
1727 /* Try to acquire the lock */ 1723 /* Try to acquire the lock */
1728 REG_WR(bp, MISC_REG_DRIVER_CONTROL_1 + port*8 + 4, 1724 REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
1729 resource_bit); 1725 lock_status = REG_RD(bp, hw_lock_control_reg);
1730 lock_status = REG_RD(bp, MISC_REG_DRIVER_CONTROL_1 + port*8);
1731 if (lock_status & resource_bit) 1726 if (lock_status & resource_bit)
1732 return 0; 1727 return 0;
1733 1728
@@ -1737,11 +1732,12 @@ static int bnx2x_hw_lock(struct bnx2x *bp, u32 resource)
1737 return -EAGAIN; 1732 return -EAGAIN;
1738} 1733}
1739 1734
1740static int bnx2x_hw_unlock(struct bnx2x *bp, u32 resource) 1735static int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource)
1741{ 1736{
1742 u32 lock_status; 1737 u32 lock_status;
1743 u32 resource_bit = (1 << resource); 1738 u32 resource_bit = (1 << resource);
1744 u8 port = BP_PORT(bp); 1739 int func = BP_FUNC(bp);
1740 u32 hw_lock_control_reg;
1745 1741
1746 /* Validating that the resource is within range */ 1742 /* Validating that the resource is within range */
1747 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) { 1743 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
@@ -1751,20 +1747,27 @@ static int bnx2x_hw_unlock(struct bnx2x *bp, u32 resource)
1751 return -EINVAL; 1747 return -EINVAL;
1752 } 1748 }
1753 1749
1750 if (func <= 5) {
1751 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1752 } else {
1753 hw_lock_control_reg =
1754 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1755 }
1756
1754 /* Validating that the resource is currently taken */ 1757 /* Validating that the resource is currently taken */
1755 lock_status = REG_RD(bp, MISC_REG_DRIVER_CONTROL_1 + port*8); 1758 lock_status = REG_RD(bp, hw_lock_control_reg);
1756 if (!(lock_status & resource_bit)) { 1759 if (!(lock_status & resource_bit)) {
1757 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n", 1760 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1758 lock_status, resource_bit); 1761 lock_status, resource_bit);
1759 return -EFAULT; 1762 return -EFAULT;
1760 } 1763 }
1761 1764
1762 REG_WR(bp, MISC_REG_DRIVER_CONTROL_1 + port*8, resource_bit); 1765 REG_WR(bp, hw_lock_control_reg, resource_bit);
1763 return 0; 1766 return 0;
1764} 1767}
1765 1768
1766/* HW Lock for shared dual port PHYs */ 1769/* HW Lock for shared dual port PHYs */
1767static void bnx2x_phy_hw_lock(struct bnx2x *bp) 1770static void bnx2x_acquire_phy_lock(struct bnx2x *bp)
1768{ 1771{
1769 u32 ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config); 1772 u32 ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
1770 1773
@@ -1772,25 +1775,25 @@ static void bnx2x_phy_hw_lock(struct bnx2x *bp)
1772 1775
1773 if ((ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072) || 1776 if ((ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072) ||
1774 (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073)) 1777 (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073))
1775 bnx2x_hw_lock(bp, HW_LOCK_RESOURCE_8072_MDIO); 1778 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_8072_MDIO);
1776} 1779}
1777 1780
1778static void bnx2x_phy_hw_unlock(struct bnx2x *bp) 1781static void bnx2x_release_phy_lock(struct bnx2x *bp)
1779{ 1782{
1780 u32 ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config); 1783 u32 ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
1781 1784
1782 if ((ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072) || 1785 if ((ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072) ||
1783 (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073)) 1786 (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073))
1784 bnx2x_hw_unlock(bp, HW_LOCK_RESOURCE_8072_MDIO); 1787 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_8072_MDIO);
1785 1788
1786 mutex_unlock(&bp->port.phy_mutex); 1789 mutex_unlock(&bp->port.phy_mutex);
1787} 1790}
1788 1791
1789int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode) 1792int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
1790{ 1793{
1791 /* The GPIO should be swapped if swap register is set and active */ 1794 /* The GPIO should be swapped if swap register is set and active */
1792 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) && 1795 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1793 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ BP_PORT(bp); 1796 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1794 int gpio_shift = gpio_num + 1797 int gpio_shift = gpio_num +
1795 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0); 1798 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1796 u32 gpio_mask = (1 << gpio_shift); 1799 u32 gpio_mask = (1 << gpio_shift);
@@ -1801,7 +1804,7 @@ int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode)
1801 return -EINVAL; 1804 return -EINVAL;
1802 } 1805 }
1803 1806
1804 bnx2x_hw_lock(bp, HW_LOCK_RESOURCE_GPIO); 1807 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1805 /* read GPIO and mask except the float bits */ 1808 /* read GPIO and mask except the float bits */
1806 gpio_reg = (REG_RD(bp, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT); 1809 gpio_reg = (REG_RD(bp, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT);
1807 1810
@@ -1822,7 +1825,7 @@ int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode)
1822 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_SET_POS); 1825 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_SET_POS);
1823 break; 1826 break;
1824 1827
1825 case MISC_REGISTERS_GPIO_INPUT_HI_Z : 1828 case MISC_REGISTERS_GPIO_INPUT_HI_Z:
1826 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> input\n", 1829 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> input\n",
1827 gpio_num, gpio_shift); 1830 gpio_num, gpio_shift);
1828 /* set FLOAT */ 1831 /* set FLOAT */
@@ -1834,7 +1837,7 @@ int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode)
1834 } 1837 }
1835 1838
1836 REG_WR(bp, MISC_REG_GPIO, gpio_reg); 1839 REG_WR(bp, MISC_REG_GPIO, gpio_reg);
1837 bnx2x_hw_unlock(bp, HW_LOCK_RESOURCE_GPIO); 1840 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1838 1841
1839 return 0; 1842 return 0;
1840} 1843}
@@ -1850,19 +1853,19 @@ static int bnx2x_set_spio(struct bnx2x *bp, int spio_num, u32 mode)
1850 return -EINVAL; 1853 return -EINVAL;
1851 } 1854 }
1852 1855
1853 bnx2x_hw_lock(bp, HW_LOCK_RESOURCE_SPIO); 1856 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
1854 /* read SPIO and mask except the float bits */ 1857 /* read SPIO and mask except the float bits */
1855 spio_reg = (REG_RD(bp, MISC_REG_SPIO) & MISC_REGISTERS_SPIO_FLOAT); 1858 spio_reg = (REG_RD(bp, MISC_REG_SPIO) & MISC_REGISTERS_SPIO_FLOAT);
1856 1859
1857 switch (mode) { 1860 switch (mode) {
1858 case MISC_REGISTERS_SPIO_OUTPUT_LOW : 1861 case MISC_REGISTERS_SPIO_OUTPUT_LOW:
1859 DP(NETIF_MSG_LINK, "Set SPIO %d -> output low\n", spio_num); 1862 DP(NETIF_MSG_LINK, "Set SPIO %d -> output low\n", spio_num);
1860 /* clear FLOAT and set CLR */ 1863 /* clear FLOAT and set CLR */
1861 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS); 1864 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1862 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_CLR_POS); 1865 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_CLR_POS);
1863 break; 1866 break;
1864 1867
1865 case MISC_REGISTERS_SPIO_OUTPUT_HIGH : 1868 case MISC_REGISTERS_SPIO_OUTPUT_HIGH:
1866 DP(NETIF_MSG_LINK, "Set SPIO %d -> output high\n", spio_num); 1869 DP(NETIF_MSG_LINK, "Set SPIO %d -> output high\n", spio_num);
1867 /* clear FLOAT and set SET */ 1870 /* clear FLOAT and set SET */
1868 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS); 1871 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
@@ -1880,7 +1883,7 @@ static int bnx2x_set_spio(struct bnx2x *bp, int spio_num, u32 mode)
1880 } 1883 }
1881 1884
1882 REG_WR(bp, MISC_REG_SPIO, spio_reg); 1885 REG_WR(bp, MISC_REG_SPIO, spio_reg);
1883 bnx2x_hw_unlock(bp, HW_LOCK_RESOURCE_SPIO); 1886 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
1884 1887
1885 return 0; 1888 return 0;
1886} 1889}
@@ -1940,46 +1943,63 @@ static void bnx2x_link_report(struct bnx2x *bp)
1940 1943
1941static u8 bnx2x_initial_phy_init(struct bnx2x *bp) 1944static u8 bnx2x_initial_phy_init(struct bnx2x *bp)
1942{ 1945{
1943 u8 rc; 1946 if (!BP_NOMCP(bp)) {
1947 u8 rc;
1944 1948
1945 /* Initialize link parameters structure variables */ 1949 /* Initialize link parameters structure variables */
1946 bp->link_params.mtu = bp->dev->mtu; 1950 /* It is recommended to turn off RX FC for jumbo frames
1951 for better performance */
1952 if (IS_E1HMF(bp))
1953 bp->link_params.req_fc_auto_adv = FLOW_CTRL_BOTH;
1954 else if (bp->dev->mtu > 5000)
1955 bp->link_params.req_fc_auto_adv = FLOW_CTRL_TX;
1956 else
1957 bp->link_params.req_fc_auto_adv = FLOW_CTRL_BOTH;
1947 1958
1948 bnx2x_phy_hw_lock(bp); 1959 bnx2x_acquire_phy_lock(bp);
1949 rc = bnx2x_phy_init(&bp->link_params, &bp->link_vars); 1960 rc = bnx2x_phy_init(&bp->link_params, &bp->link_vars);
1950 bnx2x_phy_hw_unlock(bp); 1961 bnx2x_release_phy_lock(bp);
1951 1962
1952 if (bp->link_vars.link_up) 1963 if (bp->link_vars.link_up)
1953 bnx2x_link_report(bp); 1964 bnx2x_link_report(bp);
1954 1965
1955 bnx2x_calc_fc_adv(bp); 1966 bnx2x_calc_fc_adv(bp);
1956 1967
1957 return rc; 1968 return rc;
1969 }
1970 BNX2X_ERR("Bootcode is missing -not initializing link\n");
1971 return -EINVAL;
1958} 1972}
1959 1973
1960static void bnx2x_link_set(struct bnx2x *bp) 1974static void bnx2x_link_set(struct bnx2x *bp)
1961{ 1975{
1962 bnx2x_phy_hw_lock(bp); 1976 if (!BP_NOMCP(bp)) {
1963 bnx2x_phy_init(&bp->link_params, &bp->link_vars); 1977 bnx2x_acquire_phy_lock(bp);
1964 bnx2x_phy_hw_unlock(bp); 1978 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
1979 bnx2x_release_phy_lock(bp);
1965 1980
1966 bnx2x_calc_fc_adv(bp); 1981 bnx2x_calc_fc_adv(bp);
1982 } else
1983 BNX2X_ERR("Bootcode is missing -not setting link\n");
1967} 1984}
1968 1985
1969static void bnx2x__link_reset(struct bnx2x *bp) 1986static void bnx2x__link_reset(struct bnx2x *bp)
1970{ 1987{
1971 bnx2x_phy_hw_lock(bp); 1988 if (!BP_NOMCP(bp)) {
1972 bnx2x_link_reset(&bp->link_params, &bp->link_vars); 1989 bnx2x_acquire_phy_lock(bp);
1973 bnx2x_phy_hw_unlock(bp); 1990 bnx2x_link_reset(&bp->link_params, &bp->link_vars);
1991 bnx2x_release_phy_lock(bp);
1992 } else
1993 BNX2X_ERR("Bootcode is missing -not resetting link\n");
1974} 1994}
1975 1995
1976static u8 bnx2x_link_test(struct bnx2x *bp) 1996static u8 bnx2x_link_test(struct bnx2x *bp)
1977{ 1997{
1978 u8 rc; 1998 u8 rc;
1979 1999
1980 bnx2x_phy_hw_lock(bp); 2000 bnx2x_acquire_phy_lock(bp);
1981 rc = bnx2x_test_link(&bp->link_params, &bp->link_vars); 2001 rc = bnx2x_test_link(&bp->link_params, &bp->link_vars);
1982 bnx2x_phy_hw_unlock(bp); 2002 bnx2x_release_phy_lock(bp);
1983 2003
1984 return rc; 2004 return rc;
1985} 2005}
@@ -1991,7 +2011,7 @@ static u8 bnx2x_link_test(struct bnx2x *bp)
1991 sum of vn_min_rates 2011 sum of vn_min_rates
1992 or 2012 or
1993 0 - if all the min_rates are 0. 2013 0 - if all the min_rates are 0.
1994 In the later case fainess algorithm should be deactivated. 2014 In the later case fairness algorithm should be deactivated.
1995 If not all min_rates are zero then those that are zeroes will 2015 If not all min_rates are zero then those that are zeroes will
1996 be set to 1. 2016 be set to 1.
1997 */ 2017 */
@@ -2114,7 +2134,7 @@ static void bnx2x_init_vn_minmax(struct bnx2x *bp, int func,
2114 FUNC_MF_CFG_MIN_BW_SHIFT) * 100; 2134 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
2115 /* If FAIRNESS is enabled (not all min rates are zeroes) and 2135 /* If FAIRNESS is enabled (not all min rates are zeroes) and
2116 if current min rate is zero - set it to 1. 2136 if current min rate is zero - set it to 1.
2117 This is a requirment of the algorithm. */ 2137 This is a requirement of the algorithm. */
2118 if ((vn_min_rate == 0) && wsum) 2138 if ((vn_min_rate == 0) && wsum)
2119 vn_min_rate = DEF_MIN_RATE; 2139 vn_min_rate = DEF_MIN_RATE;
2120 vn_max_rate = ((vn_cfg & FUNC_MF_CFG_MAX_BW_MASK) >> 2140 vn_max_rate = ((vn_cfg & FUNC_MF_CFG_MAX_BW_MASK) >>
@@ -2203,9 +2223,9 @@ static void bnx2x_link_attn(struct bnx2x *bp)
2203 /* Make sure that we are synced with the current statistics */ 2223 /* Make sure that we are synced with the current statistics */
2204 bnx2x_stats_handle(bp, STATS_EVENT_STOP); 2224 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2205 2225
2206 bnx2x_phy_hw_lock(bp); 2226 bnx2x_acquire_phy_lock(bp);
2207 bnx2x_link_update(&bp->link_params, &bp->link_vars); 2227 bnx2x_link_update(&bp->link_params, &bp->link_vars);
2208 bnx2x_phy_hw_unlock(bp); 2228 bnx2x_release_phy_lock(bp);
2209 2229
2210 if (bp->link_vars.link_up) { 2230 if (bp->link_vars.link_up) {
2211 2231
@@ -2357,7 +2377,7 @@ static int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
2357} 2377}
2358 2378
2359/* acquire split MCP access lock register */ 2379/* acquire split MCP access lock register */
2360static int bnx2x_lock_alr(struct bnx2x *bp) 2380static int bnx2x_acquire_alr(struct bnx2x *bp)
2361{ 2381{
2362 u32 i, j, val; 2382 u32 i, j, val;
2363 int rc = 0; 2383 int rc = 0;
@@ -2374,15 +2394,15 @@ static int bnx2x_lock_alr(struct bnx2x *bp)
2374 msleep(5); 2394 msleep(5);
2375 } 2395 }
2376 if (!(val & (1L << 31))) { 2396 if (!(val & (1L << 31))) {
2377 BNX2X_ERR("Cannot acquire nvram interface\n"); 2397 BNX2X_ERR("Cannot acquire MCP access lock register\n");
2378 rc = -EBUSY; 2398 rc = -EBUSY;
2379 } 2399 }
2380 2400
2381 return rc; 2401 return rc;
2382} 2402}
2383 2403
2384/* Release split MCP access lock register */ 2404/* release split MCP access lock register */
2385static void bnx2x_unlock_alr(struct bnx2x *bp) 2405static void bnx2x_release_alr(struct bnx2x *bp)
2386{ 2406{
2387 u32 val = 0; 2407 u32 val = 0;
2388 2408
@@ -2395,7 +2415,6 @@ static inline u16 bnx2x_update_dsb_idx(struct bnx2x *bp)
2395 u16 rc = 0; 2415 u16 rc = 0;
2396 2416
2397 barrier(); /* status block is written to by the chip */ 2417 barrier(); /* status block is written to by the chip */
2398
2399 if (bp->def_att_idx != def_sb->atten_status_block.attn_bits_index) { 2418 if (bp->def_att_idx != def_sb->atten_status_block.attn_bits_index) {
2400 bp->def_att_idx = def_sb->atten_status_block.attn_bits_index; 2419 bp->def_att_idx = def_sb->atten_status_block.attn_bits_index;
2401 rc |= 1; 2420 rc |= 1;
@@ -2426,26 +2445,31 @@ static inline u16 bnx2x_update_dsb_idx(struct bnx2x *bp)
2426static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted) 2445static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted)
2427{ 2446{
2428 int port = BP_PORT(bp); 2447 int port = BP_PORT(bp);
2429 int func = BP_FUNC(bp); 2448 u32 hc_addr = (HC_REG_COMMAND_REG + port*32 +
2430 u32 igu_addr = (IGU_ADDR_ATTN_BITS_SET + IGU_FUNC_BASE * func) * 8; 2449 COMMAND_REG_ATTN_BITS_SET);
2431 u32 aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 : 2450 u32 aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2432 MISC_REG_AEU_MASK_ATTN_FUNC_0; 2451 MISC_REG_AEU_MASK_ATTN_FUNC_0;
2433 u32 nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 : 2452 u32 nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 :
2434 NIG_REG_MASK_INTERRUPT_PORT0; 2453 NIG_REG_MASK_INTERRUPT_PORT0;
2454 u32 aeu_mask;
2435 2455
2436 if (~bp->aeu_mask & (asserted & 0xff))
2437 BNX2X_ERR("IGU ERROR\n");
2438 if (bp->attn_state & asserted) 2456 if (bp->attn_state & asserted)
2439 BNX2X_ERR("IGU ERROR\n"); 2457 BNX2X_ERR("IGU ERROR\n");
2440 2458
2459 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2460 aeu_mask = REG_RD(bp, aeu_addr);
2461
2441 DP(NETIF_MSG_HW, "aeu_mask %x newly asserted %x\n", 2462 DP(NETIF_MSG_HW, "aeu_mask %x newly asserted %x\n",
2442 bp->aeu_mask, asserted); 2463 aeu_mask, asserted);
2443 bp->aeu_mask &= ~(asserted & 0xff); 2464 aeu_mask &= ~(asserted & 0xff);
2444 DP(NETIF_MSG_HW, "after masking: aeu_mask %x\n", bp->aeu_mask); 2465 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
2445 2466
2446 REG_WR(bp, aeu_addr, bp->aeu_mask); 2467 REG_WR(bp, aeu_addr, aeu_mask);
2468 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2447 2469
2470 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
2448 bp->attn_state |= asserted; 2471 bp->attn_state |= asserted;
2472 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
2449 2473
2450 if (asserted & ATTN_HARD_WIRED_MASK) { 2474 if (asserted & ATTN_HARD_WIRED_MASK) {
2451 if (asserted & ATTN_NIG_FOR_FUNC) { 2475 if (asserted & ATTN_NIG_FOR_FUNC) {
@@ -2500,9 +2524,9 @@ static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted)
2500 2524
2501 } /* if hardwired */ 2525 } /* if hardwired */
2502 2526
2503 DP(NETIF_MSG_HW, "about to mask 0x%08x at IGU addr 0x%x\n", 2527 DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
2504 asserted, BAR_IGU_INTMEM + igu_addr); 2528 asserted, hc_addr);
2505 REG_WR(bp, BAR_IGU_INTMEM + igu_addr, asserted); 2529 REG_WR(bp, hc_addr, asserted);
2506 2530
2507 /* now set back the mask */ 2531 /* now set back the mask */
2508 if (asserted & ATTN_NIG_FOR_FUNC) 2532 if (asserted & ATTN_NIG_FOR_FUNC)
@@ -2530,12 +2554,12 @@ static inline void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
2530 case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1022G: 2554 case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1022G:
2531 /* Fan failure attention */ 2555 /* Fan failure attention */
2532 2556
2533 /* The PHY reset is controled by GPIO 1 */ 2557 /* The PHY reset is controlled by GPIO 1 */
2534 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1, 2558 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
2535 MISC_REGISTERS_GPIO_OUTPUT_LOW); 2559 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
2536 /* Low power mode is controled by GPIO 2 */ 2560 /* Low power mode is controlled by GPIO 2 */
2537 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2, 2561 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
2538 MISC_REGISTERS_GPIO_OUTPUT_LOW); 2562 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
2539 /* mark the failure */ 2563 /* mark the failure */
2540 bp->link_params.ext_phy_config &= 2564 bp->link_params.ext_phy_config &=
2541 ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK; 2565 ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
@@ -2699,10 +2723,11 @@ static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
2699 int index; 2723 int index;
2700 u32 reg_addr; 2724 u32 reg_addr;
2701 u32 val; 2725 u32 val;
2726 u32 aeu_mask;
2702 2727
2703 /* need to take HW lock because MCP or other port might also 2728 /* need to take HW lock because MCP or other port might also
2704 try to handle this event */ 2729 try to handle this event */
2705 bnx2x_lock_alr(bp); 2730 bnx2x_acquire_alr(bp);
2706 2731
2707 attn.sig[0] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4); 2732 attn.sig[0] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4);
2708 attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4); 2733 attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
@@ -2734,32 +2759,35 @@ static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
2734 HW_PRTY_ASSERT_SET_1) || 2759 HW_PRTY_ASSERT_SET_1) ||
2735 (attn.sig[2] & group_mask.sig[2] & 2760 (attn.sig[2] & group_mask.sig[2] &
2736 HW_PRTY_ASSERT_SET_2)) 2761 HW_PRTY_ASSERT_SET_2))
2737 BNX2X_ERR("FATAL HW block parity attention\n"); 2762 BNX2X_ERR("FATAL HW block parity attention\n");
2738 } 2763 }
2739 } 2764 }
2740 2765
2741 bnx2x_unlock_alr(bp); 2766 bnx2x_release_alr(bp);
2742 2767
2743 reg_addr = (IGU_ADDR_ATTN_BITS_CLR + IGU_FUNC_BASE * BP_FUNC(bp)) * 8; 2768 reg_addr = (HC_REG_COMMAND_REG + port*32 + COMMAND_REG_ATTN_BITS_CLR);
2744 2769
2745 val = ~deasserted; 2770 val = ~deasserted;
2746/* DP(NETIF_MSG_INTR, "write 0x%08x to IGU addr 0x%x\n", 2771 DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
2747 val, BAR_IGU_INTMEM + reg_addr); */ 2772 val, reg_addr);
2748 REG_WR(bp, BAR_IGU_INTMEM + reg_addr, val); 2773 REG_WR(bp, reg_addr, val);
2749 2774
2750 if (bp->aeu_mask & (deasserted & 0xff))
2751 BNX2X_ERR("IGU BUG!\n");
2752 if (~bp->attn_state & deasserted) 2775 if (~bp->attn_state & deasserted)
2753 BNX2X_ERR("IGU BUG!\n"); 2776 BNX2X_ERR("IGU ERROR\n");
2754 2777
2755 reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 : 2778 reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2756 MISC_REG_AEU_MASK_ATTN_FUNC_0; 2779 MISC_REG_AEU_MASK_ATTN_FUNC_0;
2757 2780
2758 DP(NETIF_MSG_HW, "aeu_mask %x\n", bp->aeu_mask); 2781 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2759 bp->aeu_mask |= (deasserted & 0xff); 2782 aeu_mask = REG_RD(bp, reg_addr);
2783
2784 DP(NETIF_MSG_HW, "aeu_mask %x newly deasserted %x\n",
2785 aeu_mask, deasserted);
2786 aeu_mask |= (deasserted & 0xff);
2787 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
2760 2788
2761 DP(NETIF_MSG_HW, "new mask %x\n", bp->aeu_mask); 2789 REG_WR(bp, reg_addr, aeu_mask);
2762 REG_WR(bp, reg_addr, bp->aeu_mask); 2790 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2763 2791
2764 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state); 2792 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
2765 bp->attn_state &= ~deasserted; 2793 bp->attn_state &= ~deasserted;
@@ -2800,7 +2828,7 @@ static void bnx2x_sp_task(struct work_struct *work)
2800 2828
2801 /* Return here if interrupt is disabled */ 2829 /* Return here if interrupt is disabled */
2802 if (unlikely(atomic_read(&bp->intr_sem) != 0)) { 2830 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
2803 DP(BNX2X_MSG_SP, "called but intr_sem not 0, returning\n"); 2831 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
2804 return; 2832 return;
2805 } 2833 }
2806 2834
@@ -2808,7 +2836,7 @@ static void bnx2x_sp_task(struct work_struct *work)
2808/* if (status == 0) */ 2836/* if (status == 0) */
2809/* BNX2X_ERR("spurious slowpath interrupt!\n"); */ 2837/* BNX2X_ERR("spurious slowpath interrupt!\n"); */
2810 2838
2811 DP(BNX2X_MSG_SP, "got a slowpath interrupt (updated %x)\n", status); 2839 DP(NETIF_MSG_INTR, "got a slowpath interrupt (updated %x)\n", status);
2812 2840
2813 /* HW attentions */ 2841 /* HW attentions */
2814 if (status & 0x1) 2842 if (status & 0x1)
@@ -2838,7 +2866,7 @@ static irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
2838 2866
2839 /* Return here if interrupt is disabled */ 2867 /* Return here if interrupt is disabled */
2840 if (unlikely(atomic_read(&bp->intr_sem) != 0)) { 2868 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
2841 DP(BNX2X_MSG_SP, "called but intr_sem not 0, returning\n"); 2869 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
2842 return IRQ_HANDLED; 2870 return IRQ_HANDLED;
2843 } 2871 }
2844 2872
@@ -2876,11 +2904,11 @@ static irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
2876 /* underflow */ \ 2904 /* underflow */ \
2877 d_hi = m_hi - s_hi; \ 2905 d_hi = m_hi - s_hi; \
2878 if (d_hi > 0) { \ 2906 if (d_hi > 0) { \
2879 /* we can 'loan' 1 */ \ 2907 /* we can 'loan' 1 */ \
2880 d_hi--; \ 2908 d_hi--; \
2881 d_lo = m_lo + (UINT_MAX - s_lo) + 1; \ 2909 d_lo = m_lo + (UINT_MAX - s_lo) + 1; \
2882 } else { \ 2910 } else { \
2883 /* m_hi <= s_hi */ \ 2911 /* m_hi <= s_hi */ \
2884 d_hi = 0; \ 2912 d_hi = 0; \
2885 d_lo = 0; \ 2913 d_lo = 0; \
2886 } \ 2914 } \
@@ -2890,7 +2918,7 @@ static irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
2890 d_hi = 0; \ 2918 d_hi = 0; \
2891 d_lo = 0; \ 2919 d_lo = 0; \
2892 } else { \ 2920 } else { \
2893 /* m_hi >= s_hi */ \ 2921 /* m_hi >= s_hi */ \
2894 d_hi = m_hi - s_hi; \ 2922 d_hi = m_hi - s_hi; \
2895 d_lo = m_lo - s_lo; \ 2923 d_lo = m_lo - s_lo; \
2896 } \ 2924 } \
@@ -2963,37 +2991,6 @@ static inline long bnx2x_hilo(u32 *hiref)
2963 * Init service functions 2991 * Init service functions
2964 */ 2992 */
2965 2993
2966static void bnx2x_storm_stats_init(struct bnx2x *bp)
2967{
2968 int func = BP_FUNC(bp);
2969
2970 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func), 1);
2971 REG_WR(bp, BAR_XSTRORM_INTMEM +
2972 XSTORM_STATS_FLAGS_OFFSET(func) + 4, 0);
2973
2974 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func), 1);
2975 REG_WR(bp, BAR_TSTRORM_INTMEM +
2976 TSTORM_STATS_FLAGS_OFFSET(func) + 4, 0);
2977
2978 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func), 0);
2979 REG_WR(bp, BAR_CSTRORM_INTMEM +
2980 CSTORM_STATS_FLAGS_OFFSET(func) + 4, 0);
2981
2982 REG_WR(bp, BAR_XSTRORM_INTMEM +
2983 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
2984 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
2985 REG_WR(bp, BAR_XSTRORM_INTMEM +
2986 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
2987 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
2988
2989 REG_WR(bp, BAR_TSTRORM_INTMEM +
2990 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
2991 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
2992 REG_WR(bp, BAR_TSTRORM_INTMEM +
2993 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
2994 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
2995}
2996
2997static void bnx2x_storm_stats_post(struct bnx2x *bp) 2994static void bnx2x_storm_stats_post(struct bnx2x *bp)
2998{ 2995{
2999 if (!bp->stats_pending) { 2996 if (!bp->stats_pending) {
@@ -3032,6 +3029,8 @@ static void bnx2x_stats_init(struct bnx2x *bp)
3032 memset(&(bp->port.old_nig_stats), 0, sizeof(struct nig_stats)); 3029 memset(&(bp->port.old_nig_stats), 0, sizeof(struct nig_stats));
3033 bp->port.old_nig_stats.brb_discard = 3030 bp->port.old_nig_stats.brb_discard =
3034 REG_RD(bp, NIG_REG_STAT0_BRB_DISCARD + port*0x38); 3031 REG_RD(bp, NIG_REG_STAT0_BRB_DISCARD + port*0x38);
3032 bp->port.old_nig_stats.brb_truncate =
3033 REG_RD(bp, NIG_REG_STAT0_BRB_TRUNCATE + port*0x38);
3035 REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT0 + port*0x50, 3034 REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT0 + port*0x50,
3036 &(bp->port.old_nig_stats.egress_mac_pkt0_lo), 2); 3035 &(bp->port.old_nig_stats.egress_mac_pkt0_lo), 2);
3037 REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT1 + port*0x50, 3036 REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT1 + port*0x50,
@@ -3101,12 +3100,12 @@ static int bnx2x_stats_comp(struct bnx2x *bp)
3101 3100
3102 might_sleep(); 3101 might_sleep();
3103 while (*stats_comp != DMAE_COMP_VAL) { 3102 while (*stats_comp != DMAE_COMP_VAL) {
3104 msleep(1);
3105 if (!cnt) { 3103 if (!cnt) {
3106 BNX2X_ERR("timeout waiting for stats finished\n"); 3104 BNX2X_ERR("timeout waiting for stats finished\n");
3107 break; 3105 break;
3108 } 3106 }
3109 cnt--; 3107 cnt--;
3108 msleep(1);
3110 } 3109 }
3111 return 1; 3110 return 1;
3112} 3111}
@@ -3451,8 +3450,7 @@ static void bnx2x_bmac_stats_update(struct bnx2x *bp)
3451 UPDATE_STAT64(rx_stat_grovr, rx_stat_dot3statsframestoolong); 3450 UPDATE_STAT64(rx_stat_grovr, rx_stat_dot3statsframestoolong);
3452 UPDATE_STAT64(rx_stat_grfrg, rx_stat_etherstatsfragments); 3451 UPDATE_STAT64(rx_stat_grfrg, rx_stat_etherstatsfragments);
3453 UPDATE_STAT64(rx_stat_grjbr, rx_stat_etherstatsjabbers); 3452 UPDATE_STAT64(rx_stat_grjbr, rx_stat_etherstatsjabbers);
3454 UPDATE_STAT64(rx_stat_grxpf, rx_stat_bmac_xpf); 3453 UPDATE_STAT64(rx_stat_grxcf, rx_stat_maccontrolframesreceived);
3455 UPDATE_STAT64(rx_stat_grxcf, rx_stat_bmac_xcf);
3456 UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffstateentered); 3454 UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffstateentered);
3457 UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffpauseframesreceived); 3455 UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffpauseframesreceived);
3458 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_outxoffsent); 3456 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_outxoffsent);
@@ -3536,6 +3534,8 @@ static int bnx2x_hw_stats_update(struct bnx2x *bp)
3536 3534
3537 ADD_EXTEND_64(pstats->brb_drop_hi, pstats->brb_drop_lo, 3535 ADD_EXTEND_64(pstats->brb_drop_hi, pstats->brb_drop_lo,
3538 new->brb_discard - old->brb_discard); 3536 new->brb_discard - old->brb_discard);
3537 ADD_EXTEND_64(estats->brb_truncate_hi, estats->brb_truncate_lo,
3538 new->brb_truncate - old->brb_truncate);
3539 3539
3540 UPDATE_STAT64_NIG(egress_mac_pkt0, 3540 UPDATE_STAT64_NIG(egress_mac_pkt0,
3541 etherstatspkts1024octetsto1522octets); 3541 etherstatspkts1024octetsto1522octets);
@@ -3713,8 +3713,7 @@ static void bnx2x_net_stats_update(struct bnx2x *bp)
3713 nstats->rx_length_errors = 3713 nstats->rx_length_errors =
3714 estats->rx_stat_etherstatsundersizepkts_lo + 3714 estats->rx_stat_etherstatsundersizepkts_lo +
3715 estats->jabber_packets_received; 3715 estats->jabber_packets_received;
3716 nstats->rx_over_errors = estats->brb_drop_lo + 3716 nstats->rx_over_errors = estats->brb_drop_lo + estats->brb_truncate_lo;
3717 estats->brb_truncate_discard;
3718 nstats->rx_crc_errors = estats->rx_stat_dot3statsfcserrors_lo; 3717 nstats->rx_crc_errors = estats->rx_stat_dot3statsfcserrors_lo;
3719 nstats->rx_frame_errors = estats->rx_stat_dot3statsalignmenterrors_lo; 3718 nstats->rx_frame_errors = estats->rx_stat_dot3statsalignmenterrors_lo;
3720 nstats->rx_fifo_errors = old_tclient->no_buff_discard; 3719 nstats->rx_fifo_errors = old_tclient->no_buff_discard;
@@ -3783,7 +3782,7 @@ static void bnx2x_stats_update(struct bnx2x *bp)
3783 bp->fp->rx_comp_cons), 3782 bp->fp->rx_comp_cons),
3784 le16_to_cpu(*bp->fp->rx_cons_sb), nstats->rx_packets); 3783 le16_to_cpu(*bp->fp->rx_cons_sb), nstats->rx_packets);
3785 printk(KERN_DEBUG " %s (Xoff events %u) brb drops %u\n", 3784 printk(KERN_DEBUG " %s (Xoff events %u) brb drops %u\n",
3786 netif_queue_stopped(bp->dev)? "Xoff" : "Xon", 3785 netif_queue_stopped(bp->dev) ? "Xoff" : "Xon",
3787 estats->driver_xoff, estats->brb_drop_lo); 3786 estats->driver_xoff, estats->brb_drop_lo);
3788 printk(KERN_DEBUG "tstats: checksum_discard %u " 3787 printk(KERN_DEBUG "tstats: checksum_discard %u "
3789 "packets_too_big_discard %u no_buff_discard %u " 3788 "packets_too_big_discard %u no_buff_discard %u "
@@ -3994,14 +3993,14 @@ static void bnx2x_zero_sb(struct bnx2x *bp, int sb_id)
3994 3993
3995 bnx2x_init_fill(bp, BAR_USTRORM_INTMEM + 3994 bnx2x_init_fill(bp, BAR_USTRORM_INTMEM +
3996 USTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), 0, 3995 USTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), 0,
3997 sizeof(struct ustorm_def_status_block)/4); 3996 sizeof(struct ustorm_status_block)/4);
3998 bnx2x_init_fill(bp, BAR_CSTRORM_INTMEM + 3997 bnx2x_init_fill(bp, BAR_CSTRORM_INTMEM +
3999 CSTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), 0, 3998 CSTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), 0,
4000 sizeof(struct cstorm_def_status_block)/4); 3999 sizeof(struct cstorm_status_block)/4);
4001} 4000}
4002 4001
4003static void bnx2x_init_sb(struct bnx2x *bp, int sb_id, 4002static void bnx2x_init_sb(struct bnx2x *bp, struct host_status_block *sb,
4004 struct host_status_block *sb, dma_addr_t mapping) 4003 dma_addr_t mapping, int sb_id)
4005{ 4004{
4006 int port = BP_PORT(bp); 4005 int port = BP_PORT(bp);
4007 int func = BP_FUNC(bp); 4006 int func = BP_FUNC(bp);
@@ -4077,7 +4076,6 @@ static void bnx2x_init_def_sb(struct bnx2x *bp,
4077 atten_status_block); 4076 atten_status_block);
4078 def_sb->atten_status_block.status_block_id = sb_id; 4077 def_sb->atten_status_block.status_block_id = sb_id;
4079 4078
4080 bp->def_att_idx = 0;
4081 bp->attn_state = 0; 4079 bp->attn_state = 0;
4082 4080
4083 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 : 4081 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
@@ -4094,9 +4092,6 @@ static void bnx2x_init_def_sb(struct bnx2x *bp,
4094 reg_offset + 0xc + 0x10*index); 4092 reg_offset + 0xc + 0x10*index);
4095 } 4093 }
4096 4094
4097 bp->aeu_mask = REG_RD(bp, (port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
4098 MISC_REG_AEU_MASK_ATTN_FUNC_0));
4099
4100 reg_offset = (port ? HC_REG_ATTN_MSG1_ADDR_L : 4095 reg_offset = (port ? HC_REG_ATTN_MSG1_ADDR_L :
4101 HC_REG_ATTN_MSG0_ADDR_L); 4096 HC_REG_ATTN_MSG0_ADDR_L);
4102 4097
@@ -4114,17 +4109,13 @@ static void bnx2x_init_def_sb(struct bnx2x *bp,
4114 u_def_status_block); 4109 u_def_status_block);
4115 def_sb->u_def_status_block.status_block_id = sb_id; 4110 def_sb->u_def_status_block.status_block_id = sb_id;
4116 4111
4117 bp->def_u_idx = 0;
4118
4119 REG_WR(bp, BAR_USTRORM_INTMEM + 4112 REG_WR(bp, BAR_USTRORM_INTMEM +
4120 USTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section)); 4113 USTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
4121 REG_WR(bp, BAR_USTRORM_INTMEM + 4114 REG_WR(bp, BAR_USTRORM_INTMEM +
4122 ((USTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4), 4115 ((USTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
4123 U64_HI(section)); 4116 U64_HI(section));
4124 REG_WR8(bp, BAR_USTRORM_INTMEM + DEF_USB_FUNC_OFF + 4117 REG_WR8(bp, BAR_USTRORM_INTMEM + DEF_USB_FUNC_OFF +
4125 USTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func); 4118 USTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4126 REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_HC_BTR_OFFSET(func),
4127 BNX2X_BTR);
4128 4119
4129 for (index = 0; index < HC_USTORM_DEF_SB_NUM_INDICES; index++) 4120 for (index = 0; index < HC_USTORM_DEF_SB_NUM_INDICES; index++)
4130 REG_WR16(bp, BAR_USTRORM_INTMEM + 4121 REG_WR16(bp, BAR_USTRORM_INTMEM +
@@ -4135,17 +4126,13 @@ static void bnx2x_init_def_sb(struct bnx2x *bp,
4135 c_def_status_block); 4126 c_def_status_block);
4136 def_sb->c_def_status_block.status_block_id = sb_id; 4127 def_sb->c_def_status_block.status_block_id = sb_id;
4137 4128
4138 bp->def_c_idx = 0;
4139
4140 REG_WR(bp, BAR_CSTRORM_INTMEM + 4129 REG_WR(bp, BAR_CSTRORM_INTMEM +
4141 CSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section)); 4130 CSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
4142 REG_WR(bp, BAR_CSTRORM_INTMEM + 4131 REG_WR(bp, BAR_CSTRORM_INTMEM +
4143 ((CSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4), 4132 ((CSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
4144 U64_HI(section)); 4133 U64_HI(section));
4145 REG_WR8(bp, BAR_CSTRORM_INTMEM + DEF_CSB_FUNC_OFF + 4134 REG_WR8(bp, BAR_CSTRORM_INTMEM + DEF_CSB_FUNC_OFF +
4146 CSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func); 4135 CSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4147 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_OFFSET(func),
4148 BNX2X_BTR);
4149 4136
4150 for (index = 0; index < HC_CSTORM_DEF_SB_NUM_INDICES; index++) 4137 for (index = 0; index < HC_CSTORM_DEF_SB_NUM_INDICES; index++)
4151 REG_WR16(bp, BAR_CSTRORM_INTMEM + 4138 REG_WR16(bp, BAR_CSTRORM_INTMEM +
@@ -4156,17 +4143,13 @@ static void bnx2x_init_def_sb(struct bnx2x *bp,
4156 t_def_status_block); 4143 t_def_status_block);
4157 def_sb->t_def_status_block.status_block_id = sb_id; 4144 def_sb->t_def_status_block.status_block_id = sb_id;
4158 4145
4159 bp->def_t_idx = 0;
4160
4161 REG_WR(bp, BAR_TSTRORM_INTMEM + 4146 REG_WR(bp, BAR_TSTRORM_INTMEM +
4162 TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section)); 4147 TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
4163 REG_WR(bp, BAR_TSTRORM_INTMEM + 4148 REG_WR(bp, BAR_TSTRORM_INTMEM +
4164 ((TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4), 4149 ((TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
4165 U64_HI(section)); 4150 U64_HI(section));
4166 REG_WR8(bp, BAR_TSTRORM_INTMEM + DEF_TSB_FUNC_OFF + 4151 REG_WR8(bp, BAR_TSTRORM_INTMEM + DEF_TSB_FUNC_OFF +
4167 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func); 4152 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4168 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_HC_BTR_OFFSET(func),
4169 BNX2X_BTR);
4170 4153
4171 for (index = 0; index < HC_TSTORM_DEF_SB_NUM_INDICES; index++) 4154 for (index = 0; index < HC_TSTORM_DEF_SB_NUM_INDICES; index++)
4172 REG_WR16(bp, BAR_TSTRORM_INTMEM + 4155 REG_WR16(bp, BAR_TSTRORM_INTMEM +
@@ -4177,23 +4160,20 @@ static void bnx2x_init_def_sb(struct bnx2x *bp,
4177 x_def_status_block); 4160 x_def_status_block);
4178 def_sb->x_def_status_block.status_block_id = sb_id; 4161 def_sb->x_def_status_block.status_block_id = sb_id;
4179 4162
4180 bp->def_x_idx = 0;
4181
4182 REG_WR(bp, BAR_XSTRORM_INTMEM + 4163 REG_WR(bp, BAR_XSTRORM_INTMEM +
4183 XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section)); 4164 XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
4184 REG_WR(bp, BAR_XSTRORM_INTMEM + 4165 REG_WR(bp, BAR_XSTRORM_INTMEM +
4185 ((XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4), 4166 ((XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
4186 U64_HI(section)); 4167 U64_HI(section));
4187 REG_WR8(bp, BAR_XSTRORM_INTMEM + DEF_XSB_FUNC_OFF + 4168 REG_WR8(bp, BAR_XSTRORM_INTMEM + DEF_XSB_FUNC_OFF +
4188 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func); 4169 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4189 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_HC_BTR_OFFSET(func),
4190 BNX2X_BTR);
4191 4170
4192 for (index = 0; index < HC_XSTORM_DEF_SB_NUM_INDICES; index++) 4171 for (index = 0; index < HC_XSTORM_DEF_SB_NUM_INDICES; index++)
4193 REG_WR16(bp, BAR_XSTRORM_INTMEM + 4172 REG_WR16(bp, BAR_XSTRORM_INTMEM +
4194 XSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1); 4173 XSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
4195 4174
4196 bp->stats_pending = 0; 4175 bp->stats_pending = 0;
4176 bp->set_mac_pending = 0;
4197 4177
4198 bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0); 4178 bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
4199} 4179}
@@ -4209,21 +4189,25 @@ static void bnx2x_update_coalesce(struct bnx2x *bp)
4209 /* HC_INDEX_U_ETH_RX_CQ_CONS */ 4189 /* HC_INDEX_U_ETH_RX_CQ_CONS */
4210 REG_WR8(bp, BAR_USTRORM_INTMEM + 4190 REG_WR8(bp, BAR_USTRORM_INTMEM +
4211 USTORM_SB_HC_TIMEOUT_OFFSET(port, sb_id, 4191 USTORM_SB_HC_TIMEOUT_OFFSET(port, sb_id,
4212 HC_INDEX_U_ETH_RX_CQ_CONS), 4192 U_SB_ETH_RX_CQ_INDEX),
4213 bp->rx_ticks/12); 4193 bp->rx_ticks/12);
4214 REG_WR16(bp, BAR_USTRORM_INTMEM + 4194 REG_WR16(bp, BAR_USTRORM_INTMEM +
4215 USTORM_SB_HC_DISABLE_OFFSET(port, sb_id, 4195 USTORM_SB_HC_DISABLE_OFFSET(port, sb_id,
4216 HC_INDEX_U_ETH_RX_CQ_CONS), 4196 U_SB_ETH_RX_CQ_INDEX),
4197 bp->rx_ticks ? 0 : 1);
4198 REG_WR16(bp, BAR_USTRORM_INTMEM +
4199 USTORM_SB_HC_DISABLE_OFFSET(port, sb_id,
4200 U_SB_ETH_RX_BD_INDEX),
4217 bp->rx_ticks ? 0 : 1); 4201 bp->rx_ticks ? 0 : 1);
4218 4202
4219 /* HC_INDEX_C_ETH_TX_CQ_CONS */ 4203 /* HC_INDEX_C_ETH_TX_CQ_CONS */
4220 REG_WR8(bp, BAR_CSTRORM_INTMEM + 4204 REG_WR8(bp, BAR_CSTRORM_INTMEM +
4221 CSTORM_SB_HC_TIMEOUT_OFFSET(port, sb_id, 4205 CSTORM_SB_HC_TIMEOUT_OFFSET(port, sb_id,
4222 HC_INDEX_C_ETH_TX_CQ_CONS), 4206 C_SB_ETH_TX_CQ_INDEX),
4223 bp->tx_ticks/12); 4207 bp->tx_ticks/12);
4224 REG_WR16(bp, BAR_CSTRORM_INTMEM + 4208 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4225 CSTORM_SB_HC_DISABLE_OFFSET(port, sb_id, 4209 CSTORM_SB_HC_DISABLE_OFFSET(port, sb_id,
4226 HC_INDEX_C_ETH_TX_CQ_CONS), 4210 C_SB_ETH_TX_CQ_INDEX),
4227 bp->tx_ticks ? 0 : 1); 4211 bp->tx_ticks ? 0 : 1);
4228 } 4212 }
4229} 4213}
@@ -4256,7 +4240,9 @@ static inline void bnx2x_free_tpa_pool(struct bnx2x *bp,
4256static void bnx2x_init_rx_rings(struct bnx2x *bp) 4240static void bnx2x_init_rx_rings(struct bnx2x *bp)
4257{ 4241{
4258 int func = BP_FUNC(bp); 4242 int func = BP_FUNC(bp);
4259 u16 ring_prod, cqe_ring_prod = 0; 4243 int max_agg_queues = CHIP_IS_E1(bp) ? ETH_MAX_AGGREGATION_QUEUES_E1 :
4244 ETH_MAX_AGGREGATION_QUEUES_E1H;
4245 u16 ring_prod, cqe_ring_prod;
4260 int i, j; 4246 int i, j;
4261 4247
4262 bp->rx_buf_use_size = bp->dev->mtu; 4248 bp->rx_buf_use_size = bp->dev->mtu;
@@ -4270,9 +4256,9 @@ static void bnx2x_init_rx_rings(struct bnx2x *bp)
4270 bp->dev->mtu + ETH_OVREHEAD); 4256 bp->dev->mtu + ETH_OVREHEAD);
4271 4257
4272 for_each_queue(bp, j) { 4258 for_each_queue(bp, j) {
4273 for (i = 0; i < ETH_MAX_AGGREGATION_QUEUES_E1H; i++) { 4259 struct bnx2x_fastpath *fp = &bp->fp[j];
4274 struct bnx2x_fastpath *fp = &bp->fp[j];
4275 4260
4261 for (i = 0; i < max_agg_queues; i++) {
4276 fp->tpa_pool[i].skb = 4262 fp->tpa_pool[i].skb =
4277 netdev_alloc_skb(bp->dev, bp->rx_buf_size); 4263 netdev_alloc_skb(bp->dev, bp->rx_buf_size);
4278 if (!fp->tpa_pool[i].skb) { 4264 if (!fp->tpa_pool[i].skb) {
@@ -4352,8 +4338,7 @@ static void bnx2x_init_rx_rings(struct bnx2x *bp)
4352 BNX2X_ERR("disabling TPA for queue[%d]\n", j); 4338 BNX2X_ERR("disabling TPA for queue[%d]\n", j);
4353 /* Cleanup already allocated elements */ 4339 /* Cleanup already allocated elements */
4354 bnx2x_free_rx_sge_range(bp, fp, ring_prod); 4340 bnx2x_free_rx_sge_range(bp, fp, ring_prod);
4355 bnx2x_free_tpa_pool(bp, fp, 4341 bnx2x_free_tpa_pool(bp, fp, max_agg_queues);
4356 ETH_MAX_AGGREGATION_QUEUES_E1H);
4357 fp->disable_tpa = 1; 4342 fp->disable_tpa = 1;
4358 ring_prod = 0; 4343 ring_prod = 0;
4359 break; 4344 break;
@@ -4363,13 +4348,13 @@ static void bnx2x_init_rx_rings(struct bnx2x *bp)
4363 fp->rx_sge_prod = ring_prod; 4348 fp->rx_sge_prod = ring_prod;
4364 4349
4365 /* Allocate BDs and initialize BD ring */ 4350 /* Allocate BDs and initialize BD ring */
4366 fp->rx_comp_cons = fp->rx_alloc_failed = 0; 4351 fp->rx_comp_cons = 0;
4367 cqe_ring_prod = ring_prod = 0; 4352 cqe_ring_prod = ring_prod = 0;
4368 for (i = 0; i < bp->rx_ring_size; i++) { 4353 for (i = 0; i < bp->rx_ring_size; i++) {
4369 if (bnx2x_alloc_rx_skb(bp, fp, ring_prod) < 0) { 4354 if (bnx2x_alloc_rx_skb(bp, fp, ring_prod) < 0) {
4370 BNX2X_ERR("was only able to allocate " 4355 BNX2X_ERR("was only able to allocate "
4371 "%d rx skbs\n", i); 4356 "%d rx skbs\n", i);
4372 fp->rx_alloc_failed++; 4357 bp->eth_stats.rx_skb_alloc_failed++;
4373 break; 4358 break;
4374 } 4359 }
4375 ring_prod = NEXT_RX_IDX(ring_prod); 4360 ring_prod = NEXT_RX_IDX(ring_prod);
@@ -4497,7 +4482,7 @@ static void bnx2x_init_context(struct bnx2x *bp)
4497 } 4482 }
4498 4483
4499 context->cstorm_st_context.sb_index_number = 4484 context->cstorm_st_context.sb_index_number =
4500 HC_INDEX_C_ETH_TX_CQ_CONS; 4485 C_SB_ETH_TX_CQ_INDEX;
4501 context->cstorm_st_context.status_block_id = sb_id; 4486 context->cstorm_st_context.status_block_id = sb_id;
4502 4487
4503 context->xstorm_ag_context.cdu_reserved = 4488 context->xstorm_ag_context.cdu_reserved =
@@ -4535,7 +4520,7 @@ static void bnx2x_set_client_config(struct bnx2x *bp)
4535 int i; 4520 int i;
4536 4521
4537 tstorm_client.mtu = bp->dev->mtu + ETH_OVREHEAD; 4522 tstorm_client.mtu = bp->dev->mtu + ETH_OVREHEAD;
4538 tstorm_client.statistics_counter_id = 0; 4523 tstorm_client.statistics_counter_id = BP_CL_ID(bp);
4539 tstorm_client.config_flags = 4524 tstorm_client.config_flags =
4540 TSTORM_ETH_CLIENT_CONFIG_STATSITICS_ENABLE; 4525 TSTORM_ETH_CLIENT_CONFIG_STATSITICS_ENABLE;
4541#ifdef BCM_VLAN 4526#ifdef BCM_VLAN
@@ -4579,7 +4564,7 @@ static void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
4579 int func = BP_FUNC(bp); 4564 int func = BP_FUNC(bp);
4580 int i; 4565 int i;
4581 4566
4582 DP(NETIF_MSG_RX_STATUS, "rx mode is %d\n", mode); 4567 DP(NETIF_MSG_IFUP, "rx mode %d mask 0x%x\n", mode, mask);
4583 4568
4584 switch (mode) { 4569 switch (mode) {
4585 case BNX2X_RX_MODE_NONE: /* no Rx */ 4570 case BNX2X_RX_MODE_NONE: /* no Rx */
@@ -4617,13 +4602,35 @@ static void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
4617 bnx2x_set_client_config(bp); 4602 bnx2x_set_client_config(bp);
4618} 4603}
4619 4604
4620static void bnx2x_init_internal(struct bnx2x *bp) 4605static void bnx2x_init_internal_common(struct bnx2x *bp)
4606{
4607 int i;
4608
4609 /* Zero this manually as its initialization is
4610 currently missing in the initTool */
4611 for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++)
4612 REG_WR(bp, BAR_USTRORM_INTMEM +
4613 USTORM_AGG_DATA_OFFSET + i * 4, 0);
4614}
4615
4616static void bnx2x_init_internal_port(struct bnx2x *bp)
4617{
4618 int port = BP_PORT(bp);
4619
4620 REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4621 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4622 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4623 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4624}
4625
4626static void bnx2x_init_internal_func(struct bnx2x *bp)
4621{ 4627{
4622 struct tstorm_eth_function_common_config tstorm_config = {0}; 4628 struct tstorm_eth_function_common_config tstorm_config = {0};
4623 struct stats_indication_flags stats_flags = {0}; 4629 struct stats_indication_flags stats_flags = {0};
4624 int port = BP_PORT(bp); 4630 int port = BP_PORT(bp);
4625 int func = BP_FUNC(bp); 4631 int func = BP_FUNC(bp);
4626 int i; 4632 int i;
4633 u16 max_agg_size;
4627 4634
4628 if (is_multi(bp)) { 4635 if (is_multi(bp)) {
4629 tstorm_config.config_flags = MULTI_FLAGS; 4636 tstorm_config.config_flags = MULTI_FLAGS;
@@ -4636,31 +4643,53 @@ static void bnx2x_init_internal(struct bnx2x *bp)
4636 TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(func), 4643 TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(func),
4637 (*(u32 *)&tstorm_config)); 4644 (*(u32 *)&tstorm_config));
4638 4645
4639/* DP(NETIF_MSG_IFUP, "tstorm_config: 0x%08x\n",
4640 (*(u32 *)&tstorm_config)); */
4641
4642 bp->rx_mode = BNX2X_RX_MODE_NONE; /* no rx until link is up */ 4646 bp->rx_mode = BNX2X_RX_MODE_NONE; /* no rx until link is up */
4643 bnx2x_set_storm_rx_mode(bp); 4647 bnx2x_set_storm_rx_mode(bp);
4644 4648
4649 /* reset xstorm per client statistics */
4650 for (i = 0; i < sizeof(struct xstorm_per_client_stats) / 4; i++) {
4651 REG_WR(bp, BAR_XSTRORM_INTMEM +
4652 XSTORM_PER_COUNTER_ID_STATS_OFFSET(port, BP_CL_ID(bp)) +
4653 i*4, 0);
4654 }
4655 /* reset tstorm per client statistics */
4656 for (i = 0; i < sizeof(struct tstorm_per_client_stats) / 4; i++) {
4657 REG_WR(bp, BAR_TSTRORM_INTMEM +
4658 TSTORM_PER_COUNTER_ID_STATS_OFFSET(port, BP_CL_ID(bp)) +
4659 i*4, 0);
4660 }
4661
4662 /* Init statistics related context */
4645 stats_flags.collect_eth = 1; 4663 stats_flags.collect_eth = 1;
4646 4664
4647 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(port), 4665 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func),
4648 ((u32 *)&stats_flags)[0]); 4666 ((u32 *)&stats_flags)[0]);
4649 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(port) + 4, 4667 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func) + 4,
4650 ((u32 *)&stats_flags)[1]); 4668 ((u32 *)&stats_flags)[1]);
4651 4669
4652 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(port), 4670 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func),
4653 ((u32 *)&stats_flags)[0]); 4671 ((u32 *)&stats_flags)[0]);
4654 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(port) + 4, 4672 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func) + 4,
4655 ((u32 *)&stats_flags)[1]); 4673 ((u32 *)&stats_flags)[1]);
4656 4674
4657 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(port), 4675 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func),
4658 ((u32 *)&stats_flags)[0]); 4676 ((u32 *)&stats_flags)[0]);
4659 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(port) + 4, 4677 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func) + 4,
4660 ((u32 *)&stats_flags)[1]); 4678 ((u32 *)&stats_flags)[1]);
4661 4679
4662/* DP(NETIF_MSG_IFUP, "stats_flags: 0x%08x 0x%08x\n", 4680 REG_WR(bp, BAR_XSTRORM_INTMEM +
4663 ((u32 *)&stats_flags)[0], ((u32 *)&stats_flags)[1]); */ 4681 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
4682 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
4683 REG_WR(bp, BAR_XSTRORM_INTMEM +
4684 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
4685 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
4686
4687 REG_WR(bp, BAR_TSTRORM_INTMEM +
4688 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
4689 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
4690 REG_WR(bp, BAR_TSTRORM_INTMEM +
4691 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
4692 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
4664 4693
4665 if (CHIP_IS_E1H(bp)) { 4694 if (CHIP_IS_E1H(bp)) {
4666 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNCTION_MODE_OFFSET, 4695 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNCTION_MODE_OFFSET,
@@ -4676,15 +4705,12 @@ static void bnx2x_init_internal(struct bnx2x *bp)
4676 bp->e1hov); 4705 bp->e1hov);
4677 } 4706 }
4678 4707
4679 /* Zero this manualy as its initialization is 4708 /* Init CQ ring mapping and aggregation size */
4680 currently missing in the initTool */ 4709 max_agg_size = min((u32)(bp->rx_buf_use_size +
4681 for (i = 0; i < USTORM_AGG_DATA_SIZE >> 2; i++) 4710 8*BCM_PAGE_SIZE*PAGES_PER_SGE),
4682 REG_WR(bp, BAR_USTRORM_INTMEM + 4711 (u32)0xffff);
4683 USTORM_AGG_DATA_OFFSET + 4*i, 0);
4684
4685 for_each_queue(bp, i) { 4712 for_each_queue(bp, i) {
4686 struct bnx2x_fastpath *fp = &bp->fp[i]; 4713 struct bnx2x_fastpath *fp = &bp->fp[i];
4687 u16 max_agg_size;
4688 4714
4689 REG_WR(bp, BAR_USTRORM_INTMEM + 4715 REG_WR(bp, BAR_USTRORM_INTMEM +
4690 USTORM_CQE_PAGE_BASE_OFFSET(port, FP_CL_ID(fp)), 4716 USTORM_CQE_PAGE_BASE_OFFSET(port, FP_CL_ID(fp)),
@@ -4693,16 +4719,34 @@ static void bnx2x_init_internal(struct bnx2x *bp)
4693 USTORM_CQE_PAGE_BASE_OFFSET(port, FP_CL_ID(fp)) + 4, 4719 USTORM_CQE_PAGE_BASE_OFFSET(port, FP_CL_ID(fp)) + 4,
4694 U64_HI(fp->rx_comp_mapping)); 4720 U64_HI(fp->rx_comp_mapping));
4695 4721
4696 max_agg_size = min((u32)(bp->rx_buf_use_size +
4697 8*BCM_PAGE_SIZE*PAGES_PER_SGE),
4698 (u32)0xffff);
4699 REG_WR16(bp, BAR_USTRORM_INTMEM + 4722 REG_WR16(bp, BAR_USTRORM_INTMEM +
4700 USTORM_MAX_AGG_SIZE_OFFSET(port, FP_CL_ID(fp)), 4723 USTORM_MAX_AGG_SIZE_OFFSET(port, FP_CL_ID(fp)),
4701 max_agg_size); 4724 max_agg_size);
4702 } 4725 }
4703} 4726}
4704 4727
4705static void bnx2x_nic_init(struct bnx2x *bp) 4728static void bnx2x_init_internal(struct bnx2x *bp, u32 load_code)
4729{
4730 switch (load_code) {
4731 case FW_MSG_CODE_DRV_LOAD_COMMON:
4732 bnx2x_init_internal_common(bp);
4733 /* no break */
4734
4735 case FW_MSG_CODE_DRV_LOAD_PORT:
4736 bnx2x_init_internal_port(bp);
4737 /* no break */
4738
4739 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
4740 bnx2x_init_internal_func(bp);
4741 break;
4742
4743 default:
4744 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
4745 break;
4746 }
4747}
4748
4749static void bnx2x_nic_init(struct bnx2x *bp, u32 load_code)
4706{ 4750{
4707 int i; 4751 int i;
4708 4752
@@ -4717,19 +4761,20 @@ static void bnx2x_nic_init(struct bnx2x *bp)
4717 DP(NETIF_MSG_IFUP, 4761 DP(NETIF_MSG_IFUP,
4718 "bnx2x_init_sb(%p,%p) index %d cl_id %d sb %d\n", 4762 "bnx2x_init_sb(%p,%p) index %d cl_id %d sb %d\n",
4719 bp, fp->status_blk, i, FP_CL_ID(fp), FP_SB_ID(fp)); 4763 bp, fp->status_blk, i, FP_CL_ID(fp), FP_SB_ID(fp));
4720 bnx2x_init_sb(bp, FP_SB_ID(fp), fp->status_blk, 4764 bnx2x_init_sb(bp, fp->status_blk, fp->status_blk_mapping,
4721 fp->status_blk_mapping); 4765 FP_SB_ID(fp));
4766 bnx2x_update_fpsb_idx(fp);
4722 } 4767 }
4723 4768
4724 bnx2x_init_def_sb(bp, bp->def_status_blk, 4769 bnx2x_init_def_sb(bp, bp->def_status_blk, bp->def_status_blk_mapping,
4725 bp->def_status_blk_mapping, DEF_SB_ID); 4770 DEF_SB_ID);
4771 bnx2x_update_dsb_idx(bp);
4726 bnx2x_update_coalesce(bp); 4772 bnx2x_update_coalesce(bp);
4727 bnx2x_init_rx_rings(bp); 4773 bnx2x_init_rx_rings(bp);
4728 bnx2x_init_tx_ring(bp); 4774 bnx2x_init_tx_ring(bp);
4729 bnx2x_init_sp_ring(bp); 4775 bnx2x_init_sp_ring(bp);
4730 bnx2x_init_context(bp); 4776 bnx2x_init_context(bp);
4731 bnx2x_init_internal(bp); 4777 bnx2x_init_internal(bp, load_code);
4732 bnx2x_storm_stats_init(bp);
4733 bnx2x_init_ind_table(bp); 4778 bnx2x_init_ind_table(bp);
4734 bnx2x_int_enable(bp); 4779 bnx2x_int_enable(bp);
4735} 4780}
@@ -4878,7 +4923,7 @@ static int bnx2x_int_mem_test(struct bnx2x *bp)
4878 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0); 4923 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
4879 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0); 4924 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
4880 REG_WR(bp, CFC_REG_DEBUG0, 0x1); 4925 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
4881 NIG_WR(NIG_REG_PRS_REQ_IN_EN, 0x0); 4926 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
4882 4927
4883 /* Write 0 to parser credits for CFC search request */ 4928 /* Write 0 to parser credits for CFC search request */
4884 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0); 4929 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
@@ -4933,7 +4978,7 @@ static int bnx2x_int_mem_test(struct bnx2x *bp)
4933 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0); 4978 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
4934 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0); 4979 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
4935 REG_WR(bp, CFC_REG_DEBUG0, 0x1); 4980 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
4936 NIG_WR(NIG_REG_PRS_REQ_IN_EN, 0x0); 4981 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
4937 4982
4938 /* Write 0 to parser credits for CFC search request */ 4983 /* Write 0 to parser credits for CFC search request */
4939 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0); 4984 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
@@ -5000,7 +5045,7 @@ static int bnx2x_int_mem_test(struct bnx2x *bp)
5000 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x7fffffff); 5045 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x7fffffff);
5001 REG_WR(bp, TCM_REG_PRS_IFEN, 0x1); 5046 REG_WR(bp, TCM_REG_PRS_IFEN, 0x1);
5002 REG_WR(bp, CFC_REG_DEBUG0, 0x0); 5047 REG_WR(bp, CFC_REG_DEBUG0, 0x0);
5003 NIG_WR(NIG_REG_PRS_REQ_IN_EN, 0x1); 5048 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x1);
5004 5049
5005 DP(NETIF_MSG_HW, "done\n"); 5050 DP(NETIF_MSG_HW, "done\n");
5006 5051
@@ -5089,11 +5134,6 @@ static int bnx2x_init_common(struct bnx2x *bp)
5089 REG_WR(bp, PXP2_REG_RD_CDURD_SWAP_MODE, 1); 5134 REG_WR(bp, PXP2_REG_RD_CDURD_SWAP_MODE, 1);
5090#endif 5135#endif
5091 5136
5092#ifndef BCM_ISCSI
5093 /* set NIC mode */
5094 REG_WR(bp, PRS_REG_NIC_MODE, 1);
5095#endif
5096
5097 REG_WR(bp, PXP2_REG_RQ_CDU_P_SIZE, 2); 5137 REG_WR(bp, PXP2_REG_RQ_CDU_P_SIZE, 2);
5098#ifdef BCM_ISCSI 5138#ifdef BCM_ISCSI
5099 REG_WR(bp, PXP2_REG_RQ_TM_P_SIZE, 5); 5139 REG_WR(bp, PXP2_REG_RQ_TM_P_SIZE, 5);
@@ -5163,6 +5203,8 @@ static int bnx2x_init_common(struct bnx2x *bp)
5163 } 5203 }
5164 5204
5165 bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END); 5205 bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END);
5206 /* set NIC mode */
5207 REG_WR(bp, PRS_REG_NIC_MODE, 1);
5166 if (CHIP_IS_E1H(bp)) 5208 if (CHIP_IS_E1H(bp))
5167 REG_WR(bp, PRS_REG_E1HOV_MODE, IS_E1HMF(bp)); 5209 REG_WR(bp, PRS_REG_E1HOV_MODE, IS_E1HMF(bp));
5168 5210
@@ -5333,6 +5375,13 @@ static int bnx2x_init_common(struct bnx2x *bp)
5333 ((u32 *)&tmp)[1]); 5375 ((u32 *)&tmp)[1]);
5334 } 5376 }
5335 5377
5378 if (!BP_NOMCP(bp)) {
5379 bnx2x_acquire_phy_lock(bp);
5380 bnx2x_common_init_phy(bp, bp->common.shmem_base);
5381 bnx2x_release_phy_lock(bp);
5382 } else
5383 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
5384
5336 return 0; 5385 return 0;
5337} 5386}
5338 5387
@@ -5638,18 +5687,23 @@ static u32 bnx2x_fw_command(struct bnx2x *bp, u32 command)
5638 int func = BP_FUNC(bp); 5687 int func = BP_FUNC(bp);
5639 u32 seq = ++bp->fw_seq; 5688 u32 seq = ++bp->fw_seq;
5640 u32 rc = 0; 5689 u32 rc = 0;
5690 u32 cnt = 1;
5691 u8 delay = CHIP_REV_IS_SLOW(bp) ? 100 : 10;
5641 5692
5642 SHMEM_WR(bp, func_mb[func].drv_mb_header, (command | seq)); 5693 SHMEM_WR(bp, func_mb[func].drv_mb_header, (command | seq));
5643 DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB\n", (command | seq)); 5694 DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB\n", (command | seq));
5644 5695
5645 /* let the FW do it's magic ... */ 5696 do {
5646 msleep(100); /* TBD */ 5697 /* let the FW do it's magic ... */
5698 msleep(delay);
5647 5699
5648 if (CHIP_REV_IS_SLOW(bp)) 5700 rc = SHMEM_RD(bp, func_mb[func].fw_mb_header);
5649 msleep(900);
5650 5701
5651 rc = SHMEM_RD(bp, func_mb[func].fw_mb_header); 5702 /* Give the FW up to 2 second (200*10ms) */
5652 DP(BNX2X_MSG_MCP, "read (%x) seq is (%x) from FW MB\n", rc, seq); 5703 } while ((seq != (rc & FW_MSG_SEQ_NUMBER_MASK)) && (cnt++ < 200));
5704
5705 DP(BNX2X_MSG_MCP, "[after %d ms] read (%x) seq is (%x) from FW MB\n",
5706 cnt*delay, rc, seq);
5653 5707
5654 /* is this a reply to our command? */ 5708 /* is this a reply to our command? */
5655 if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK)) { 5709 if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK)) {
@@ -5713,6 +5767,7 @@ static void bnx2x_free_mem(struct bnx2x *bp)
5713 NUM_RCQ_BD); 5767 NUM_RCQ_BD);
5714 5768
5715 /* SGE ring */ 5769 /* SGE ring */
5770 BNX2X_FREE(bnx2x_fp(bp, i, rx_page_ring));
5716 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_sge_ring), 5771 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_sge_ring),
5717 bnx2x_fp(bp, i, rx_sge_mapping), 5772 bnx2x_fp(bp, i, rx_sge_mapping),
5718 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES); 5773 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
@@ -5890,7 +5945,8 @@ static void bnx2x_free_rx_skbs(struct bnx2x *bp)
5890 dev_kfree_skb(skb); 5945 dev_kfree_skb(skb);
5891 } 5946 }
5892 if (!fp->disable_tpa) 5947 if (!fp->disable_tpa)
5893 bnx2x_free_tpa_pool(bp, fp, 5948 bnx2x_free_tpa_pool(bp, fp, CHIP_IS_E1(bp) ?
5949 ETH_MAX_AGGREGATION_QUEUES_E1 :
5894 ETH_MAX_AGGREGATION_QUEUES_E1H); 5950 ETH_MAX_AGGREGATION_QUEUES_E1H);
5895 } 5951 }
5896} 5952}
@@ -5976,8 +6032,8 @@ static int bnx2x_req_msix_irqs(struct bnx2x *bp)
5976 bnx2x_msix_fp_int, 0, 6032 bnx2x_msix_fp_int, 0,
5977 bp->dev->name, &bp->fp[i]); 6033 bp->dev->name, &bp->fp[i]);
5978 if (rc) { 6034 if (rc) {
5979 BNX2X_ERR("request fp #%d irq failed rc %d\n", 6035 BNX2X_ERR("request fp #%d irq failed rc -%d\n",
5980 i + offset, rc); 6036 i + offset, -rc);
5981 bnx2x_free_msix_irqs(bp); 6037 bnx2x_free_msix_irqs(bp);
5982 return -EBUSY; 6038 return -EBUSY;
5983 } 6039 }
@@ -6004,7 +6060,7 @@ static int bnx2x_req_irq(struct bnx2x *bp)
6004 * Init service functions 6060 * Init service functions
6005 */ 6061 */
6006 6062
6007static void bnx2x_set_mac_addr_e1(struct bnx2x *bp) 6063static void bnx2x_set_mac_addr_e1(struct bnx2x *bp, int set)
6008{ 6064{
6009 struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config); 6065 struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
6010 int port = BP_PORT(bp); 6066 int port = BP_PORT(bp);
@@ -6026,11 +6082,15 @@ static void bnx2x_set_mac_addr_e1(struct bnx2x *bp)
6026 config->config_table[0].cam_entry.lsb_mac_addr = 6082 config->config_table[0].cam_entry.lsb_mac_addr =
6027 swab16(*(u16 *)&bp->dev->dev_addr[4]); 6083 swab16(*(u16 *)&bp->dev->dev_addr[4]);
6028 config->config_table[0].cam_entry.flags = cpu_to_le16(port); 6084 config->config_table[0].cam_entry.flags = cpu_to_le16(port);
6029 config->config_table[0].target_table_entry.flags = 0; 6085 if (set)
6086 config->config_table[0].target_table_entry.flags = 0;
6087 else
6088 CAM_INVALIDATE(config->config_table[0]);
6030 config->config_table[0].target_table_entry.client_id = 0; 6089 config->config_table[0].target_table_entry.client_id = 0;
6031 config->config_table[0].target_table_entry.vlan_id = 0; 6090 config->config_table[0].target_table_entry.vlan_id = 0;
6032 6091
6033 DP(NETIF_MSG_IFUP, "setting MAC (%04x:%04x:%04x)\n", 6092 DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x)\n",
6093 (set ? "setting" : "clearing"),
6034 config->config_table[0].cam_entry.msb_mac_addr, 6094 config->config_table[0].cam_entry.msb_mac_addr,
6035 config->config_table[0].cam_entry.middle_mac_addr, 6095 config->config_table[0].cam_entry.middle_mac_addr,
6036 config->config_table[0].cam_entry.lsb_mac_addr); 6096 config->config_table[0].cam_entry.lsb_mac_addr);
@@ -6040,8 +6100,11 @@ static void bnx2x_set_mac_addr_e1(struct bnx2x *bp)
6040 config->config_table[1].cam_entry.middle_mac_addr = 0xffff; 6100 config->config_table[1].cam_entry.middle_mac_addr = 0xffff;
6041 config->config_table[1].cam_entry.lsb_mac_addr = 0xffff; 6101 config->config_table[1].cam_entry.lsb_mac_addr = 0xffff;
6042 config->config_table[1].cam_entry.flags = cpu_to_le16(port); 6102 config->config_table[1].cam_entry.flags = cpu_to_le16(port);
6043 config->config_table[1].target_table_entry.flags = 6103 if (set)
6104 config->config_table[1].target_table_entry.flags =
6044 TSTORM_CAM_TARGET_TABLE_ENTRY_BROADCAST; 6105 TSTORM_CAM_TARGET_TABLE_ENTRY_BROADCAST;
6106 else
6107 CAM_INVALIDATE(config->config_table[1]);
6045 config->config_table[1].target_table_entry.client_id = 0; 6108 config->config_table[1].target_table_entry.client_id = 0;
6046 config->config_table[1].target_table_entry.vlan_id = 0; 6109 config->config_table[1].target_table_entry.vlan_id = 0;
6047 6110
@@ -6050,12 +6113,12 @@ static void bnx2x_set_mac_addr_e1(struct bnx2x *bp)
6050 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0); 6113 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
6051} 6114}
6052 6115
6053static void bnx2x_set_mac_addr_e1h(struct bnx2x *bp) 6116static void bnx2x_set_mac_addr_e1h(struct bnx2x *bp, int set)
6054{ 6117{
6055 struct mac_configuration_cmd_e1h *config = 6118 struct mac_configuration_cmd_e1h *config =
6056 (struct mac_configuration_cmd_e1h *)bnx2x_sp(bp, mac_config); 6119 (struct mac_configuration_cmd_e1h *)bnx2x_sp(bp, mac_config);
6057 6120
6058 if (bp->state != BNX2X_STATE_OPEN) { 6121 if (set && (bp->state != BNX2X_STATE_OPEN)) {
6059 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state); 6122 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
6060 return; 6123 return;
6061 } 6124 }
@@ -6079,9 +6142,14 @@ static void bnx2x_set_mac_addr_e1h(struct bnx2x *bp)
6079 config->config_table[0].client_id = BP_L_ID(bp); 6142 config->config_table[0].client_id = BP_L_ID(bp);
6080 config->config_table[0].vlan_id = 0; 6143 config->config_table[0].vlan_id = 0;
6081 config->config_table[0].e1hov_id = cpu_to_le16(bp->e1hov); 6144 config->config_table[0].e1hov_id = cpu_to_le16(bp->e1hov);
6082 config->config_table[0].flags = BP_PORT(bp); 6145 if (set)
6146 config->config_table[0].flags = BP_PORT(bp);
6147 else
6148 config->config_table[0].flags =
6149 MAC_CONFIGURATION_ENTRY_E1H_ACTION_TYPE;
6083 6150
6084 DP(NETIF_MSG_IFUP, "setting MAC (%04x:%04x:%04x) E1HOV %d CLID %d\n", 6151 DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x) E1HOV %d CLID %d\n",
6152 (set ? "setting" : "clearing"),
6085 config->config_table[0].msb_mac_addr, 6153 config->config_table[0].msb_mac_addr,
6086 config->config_table[0].middle_mac_addr, 6154 config->config_table[0].middle_mac_addr,
6087 config->config_table[0].lsb_mac_addr, bp->e1hov, BP_L_ID(bp)); 6155 config->config_table[0].lsb_mac_addr, bp->e1hov, BP_L_ID(bp));
@@ -6106,13 +6174,13 @@ static int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
6106 bnx2x_rx_int(bp->fp, 10); 6174 bnx2x_rx_int(bp->fp, 10);
6107 /* if index is different from 0 6175 /* if index is different from 0
6108 * the reply for some commands will 6176 * the reply for some commands will
6109 * be on the none default queue 6177 * be on the non default queue
6110 */ 6178 */
6111 if (idx) 6179 if (idx)
6112 bnx2x_rx_int(&bp->fp[idx], 10); 6180 bnx2x_rx_int(&bp->fp[idx], 10);
6113 } 6181 }
6114 mb(); /* state is changed by bnx2x_sp_event() */
6115 6182
6183 mb(); /* state is changed by bnx2x_sp_event() */
6116 if (*state_p == state) 6184 if (*state_p == state)
6117 return 0; 6185 return 0;
6118 6186
@@ -6167,7 +6235,6 @@ static int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
6167{ 6235{
6168 u32 load_code; 6236 u32 load_code;
6169 int i, rc; 6237 int i, rc;
6170
6171#ifdef BNX2X_STOP_ON_ERROR 6238#ifdef BNX2X_STOP_ON_ERROR
6172 if (unlikely(bp->panic)) 6239 if (unlikely(bp->panic))
6173 return -EPERM; 6240 return -EPERM;
@@ -6183,22 +6250,24 @@ static int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
6183 if (!BP_NOMCP(bp)) { 6250 if (!BP_NOMCP(bp)) {
6184 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ); 6251 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ);
6185 if (!load_code) { 6252 if (!load_code) {
6186 BNX2X_ERR("MCP response failure, unloading\n"); 6253 BNX2X_ERR("MCP response failure, aborting\n");
6187 return -EBUSY; 6254 return -EBUSY;
6188 } 6255 }
6189 if (load_code == FW_MSG_CODE_DRV_LOAD_REFUSED) 6256 if (load_code == FW_MSG_CODE_DRV_LOAD_REFUSED)
6190 return -EBUSY; /* other port in diagnostic mode */ 6257 return -EBUSY; /* other port in diagnostic mode */
6191 6258
6192 } else { 6259 } else {
6260 int port = BP_PORT(bp);
6261
6193 DP(NETIF_MSG_IFUP, "NO MCP load counts before us %d, %d, %d\n", 6262 DP(NETIF_MSG_IFUP, "NO MCP load counts before us %d, %d, %d\n",
6194 load_count[0], load_count[1], load_count[2]); 6263 load_count[0], load_count[1], load_count[2]);
6195 load_count[0]++; 6264 load_count[0]++;
6196 load_count[1 + BP_PORT(bp)]++; 6265 load_count[1 + port]++;
6197 DP(NETIF_MSG_IFUP, "NO MCP new load counts %d, %d, %d\n", 6266 DP(NETIF_MSG_IFUP, "NO MCP new load counts %d, %d, %d\n",
6198 load_count[0], load_count[1], load_count[2]); 6267 load_count[0], load_count[1], load_count[2]);
6199 if (load_count[0] == 1) 6268 if (load_count[0] == 1)
6200 load_code = FW_MSG_CODE_DRV_LOAD_COMMON; 6269 load_code = FW_MSG_CODE_DRV_LOAD_COMMON;
6201 else if (load_count[1 + BP_PORT(bp)] == 1) 6270 else if (load_count[1 + port] == 1)
6202 load_code = FW_MSG_CODE_DRV_LOAD_PORT; 6271 load_code = FW_MSG_CODE_DRV_LOAD_PORT;
6203 else 6272 else
6204 load_code = FW_MSG_CODE_DRV_LOAD_FUNCTION; 6273 load_code = FW_MSG_CODE_DRV_LOAD_FUNCTION;
@@ -6247,9 +6316,6 @@ static int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
6247 bnx2x_fp(bp, i, disable_tpa) = 6316 bnx2x_fp(bp, i, disable_tpa) =
6248 ((bp->flags & TPA_ENABLE_FLAG) == 0); 6317 ((bp->flags & TPA_ENABLE_FLAG) == 0);
6249 6318
6250 /* Disable interrupt handling until HW is initialized */
6251 atomic_set(&bp->intr_sem, 1);
6252
6253 if (bp->flags & USING_MSIX_FLAG) { 6319 if (bp->flags & USING_MSIX_FLAG) {
6254 rc = bnx2x_req_msix_irqs(bp); 6320 rc = bnx2x_req_msix_irqs(bp);
6255 if (rc) { 6321 if (rc) {
@@ -6276,17 +6342,14 @@ static int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
6276 goto load_error; 6342 goto load_error;
6277 } 6343 }
6278 6344
6279 /* Enable interrupt handling */
6280 atomic_set(&bp->intr_sem, 0);
6281
6282 /* Setup NIC internals and enable interrupts */ 6345 /* Setup NIC internals and enable interrupts */
6283 bnx2x_nic_init(bp); 6346 bnx2x_nic_init(bp, load_code);
6284 6347
6285 /* Send LOAD_DONE command to MCP */ 6348 /* Send LOAD_DONE command to MCP */
6286 if (!BP_NOMCP(bp)) { 6349 if (!BP_NOMCP(bp)) {
6287 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE); 6350 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE);
6288 if (!load_code) { 6351 if (!load_code) {
6289 BNX2X_ERR("MCP response failure, unloading\n"); 6352 BNX2X_ERR("MCP response failure, aborting\n");
6290 rc = -EBUSY; 6353 rc = -EBUSY;
6291 goto load_int_disable; 6354 goto load_int_disable;
6292 } 6355 }
@@ -6301,11 +6364,12 @@ static int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
6301 for_each_queue(bp, i) 6364 for_each_queue(bp, i)
6302 napi_enable(&bnx2x_fp(bp, i, napi)); 6365 napi_enable(&bnx2x_fp(bp, i, napi));
6303 6366
6367 /* Enable interrupt handling */
6368 atomic_set(&bp->intr_sem, 0);
6369
6304 rc = bnx2x_setup_leading(bp); 6370 rc = bnx2x_setup_leading(bp);
6305 if (rc) { 6371 if (rc) {
6306#ifdef BNX2X_STOP_ON_ERROR 6372 BNX2X_ERR("Setup leading failed!\n");
6307 bp->panic = 1;
6308#endif
6309 goto load_stop_netif; 6373 goto load_stop_netif;
6310 } 6374 }
6311 6375
@@ -6323,9 +6387,9 @@ static int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
6323 } 6387 }
6324 6388
6325 if (CHIP_IS_E1(bp)) 6389 if (CHIP_IS_E1(bp))
6326 bnx2x_set_mac_addr_e1(bp); 6390 bnx2x_set_mac_addr_e1(bp, 1);
6327 else 6391 else
6328 bnx2x_set_mac_addr_e1h(bp); 6392 bnx2x_set_mac_addr_e1h(bp, 1);
6329 6393
6330 if (bp->port.pmf) 6394 if (bp->port.pmf)
6331 bnx2x_initial_phy_init(bp); 6395 bnx2x_initial_phy_init(bp);
@@ -6339,7 +6403,6 @@ static int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
6339 break; 6403 break;
6340 6404
6341 case LOAD_OPEN: 6405 case LOAD_OPEN:
6342 /* IRQ is only requested from bnx2x_open */
6343 netif_start_queue(bp->dev); 6406 netif_start_queue(bp->dev);
6344 bnx2x_set_rx_mode(bp->dev); 6407 bnx2x_set_rx_mode(bp->dev);
6345 if (bp->flags & USING_MSIX_FLAG) 6408 if (bp->flags & USING_MSIX_FLAG)
@@ -6378,8 +6441,7 @@ load_int_disable:
6378 /* Free SKBs, SGEs, TPA pool and driver internals */ 6441 /* Free SKBs, SGEs, TPA pool and driver internals */
6379 bnx2x_free_skbs(bp); 6442 bnx2x_free_skbs(bp);
6380 for_each_queue(bp, i) 6443 for_each_queue(bp, i)
6381 bnx2x_free_rx_sge_range(bp, bp->fp + i, 6444 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
6382 RX_SGE_CNT*NUM_RX_SGE_PAGES);
6383load_error: 6445load_error:
6384 bnx2x_free_mem(bp); 6446 bnx2x_free_mem(bp);
6385 6447
@@ -6411,7 +6473,7 @@ static int bnx2x_stop_multi(struct bnx2x *bp, int index)
6411 return rc; 6473 return rc;
6412} 6474}
6413 6475
6414static void bnx2x_stop_leading(struct bnx2x *bp) 6476static int bnx2x_stop_leading(struct bnx2x *bp)
6415{ 6477{
6416 u16 dsb_sp_prod_idx; 6478 u16 dsb_sp_prod_idx;
6417 /* if the other port is handling traffic, 6479 /* if the other port is handling traffic,
@@ -6429,7 +6491,7 @@ static void bnx2x_stop_leading(struct bnx2x *bp)
6429 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, 0, 6491 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, 0,
6430 &(bp->fp[0].state), 1); 6492 &(bp->fp[0].state), 1);
6431 if (rc) /* timeout */ 6493 if (rc) /* timeout */
6432 return; 6494 return rc;
6433 6495
6434 dsb_sp_prod_idx = *bp->dsb_sp_prod; 6496 dsb_sp_prod_idx = *bp->dsb_sp_prod;
6435 6497
@@ -6441,20 +6503,24 @@ static void bnx2x_stop_leading(struct bnx2x *bp)
6441 so there is not much to do if this times out 6503 so there is not much to do if this times out
6442 */ 6504 */
6443 while (dsb_sp_prod_idx == *bp->dsb_sp_prod) { 6505 while (dsb_sp_prod_idx == *bp->dsb_sp_prod) {
6444 msleep(1);
6445 if (!cnt) { 6506 if (!cnt) {
6446 DP(NETIF_MSG_IFDOWN, "timeout waiting for port del " 6507 DP(NETIF_MSG_IFDOWN, "timeout waiting for port del "
6447 "dsb_sp_prod 0x%x != dsb_sp_prod_idx 0x%x\n", 6508 "dsb_sp_prod 0x%x != dsb_sp_prod_idx 0x%x\n",
6448 *bp->dsb_sp_prod, dsb_sp_prod_idx); 6509 *bp->dsb_sp_prod, dsb_sp_prod_idx);
6449#ifdef BNX2X_STOP_ON_ERROR 6510#ifdef BNX2X_STOP_ON_ERROR
6450 bnx2x_panic(); 6511 bnx2x_panic();
6512#else
6513 rc = -EBUSY;
6451#endif 6514#endif
6452 break; 6515 break;
6453 } 6516 }
6454 cnt--; 6517 cnt--;
6518 msleep(1);
6455 } 6519 }
6456 bp->state = BNX2X_STATE_CLOSING_WAIT4_UNLOAD; 6520 bp->state = BNX2X_STATE_CLOSING_WAIT4_UNLOAD;
6457 bp->fp[0].state = BNX2X_FP_STATE_CLOSED; 6521 bp->fp[0].state = BNX2X_FP_STATE_CLOSED;
6522
6523 return rc;
6458} 6524}
6459 6525
6460static void bnx2x_reset_func(struct bnx2x *bp) 6526static void bnx2x_reset_func(struct bnx2x *bp)
@@ -6496,7 +6562,7 @@ static void bnx2x_reset_port(struct bnx2x *bp)
6496 val = REG_RD(bp, BRB1_REG_PORT_NUM_OCC_BLOCKS_0 + port*4); 6562 val = REG_RD(bp, BRB1_REG_PORT_NUM_OCC_BLOCKS_0 + port*4);
6497 if (val) 6563 if (val)
6498 DP(NETIF_MSG_IFDOWN, 6564 DP(NETIF_MSG_IFDOWN,
6499 "BRB1 is not empty %d blooks are occupied\n", val); 6565 "BRB1 is not empty %d blocks are occupied\n", val);
6500 6566
6501 /* TODO: Close Doorbell port? */ 6567 /* TODO: Close Doorbell port? */
6502} 6568}
@@ -6536,11 +6602,12 @@ static void bnx2x_reset_chip(struct bnx2x *bp, u32 reset_code)
6536 } 6602 }
6537} 6603}
6538 6604
6539/* msut be called with rtnl_lock */ 6605/* must be called with rtnl_lock */
6540static int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode) 6606static int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
6541{ 6607{
6608 int port = BP_PORT(bp);
6542 u32 reset_code = 0; 6609 u32 reset_code = 0;
6543 int i, cnt; 6610 int i, cnt, rc;
6544 6611
6545 bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT; 6612 bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
6546 6613
@@ -6557,22 +6624,17 @@ static int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
6557 (DRV_PULSE_ALWAYS_ALIVE | bp->fw_drv_pulse_wr_seq)); 6624 (DRV_PULSE_ALWAYS_ALIVE | bp->fw_drv_pulse_wr_seq));
6558 bnx2x_stats_handle(bp, STATS_EVENT_STOP); 6625 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
6559 6626
6560 /* Wait until all fast path tasks complete */ 6627 /* Wait until tx fast path tasks complete */
6561 for_each_queue(bp, i) { 6628 for_each_queue(bp, i) {
6562 struct bnx2x_fastpath *fp = &bp->fp[i]; 6629 struct bnx2x_fastpath *fp = &bp->fp[i];
6563 6630
6564#ifdef BNX2X_STOP_ON_ERROR
6565#ifdef __powerpc64__
6566 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n",
6567#else
6568 DP(NETIF_MSG_IFDOWN, "fp->tpa_queue_used = 0x%llx\n",
6569#endif
6570 fp->tpa_queue_used);
6571#endif
6572 cnt = 1000; 6631 cnt = 1000;
6573 smp_rmb(); 6632 smp_rmb();
6574 while (bnx2x_has_work(fp)) { 6633 while (BNX2X_HAS_TX_WORK(fp)) {
6575 msleep(1); 6634
6635 if (!netif_running(bp->dev))
6636 bnx2x_tx_int(fp, 1000);
6637
6576 if (!cnt) { 6638 if (!cnt) {
6577 BNX2X_ERR("timeout waiting for queue[%d]\n", 6639 BNX2X_ERR("timeout waiting for queue[%d]\n",
6578 i); 6640 i);
@@ -6584,14 +6646,13 @@ static int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
6584#endif 6646#endif
6585 } 6647 }
6586 cnt--; 6648 cnt--;
6649 msleep(1);
6587 smp_rmb(); 6650 smp_rmb();
6588 } 6651 }
6589 } 6652 }
6590 6653
6591 /* Wait until all slow path tasks complete */ 6654 /* Give HW time to discard old tx messages */
6592 cnt = 1000; 6655 msleep(1);
6593 while ((bp->spq_left != MAX_SPQ_PENDING) && cnt--)
6594 msleep(1);
6595 6656
6596 for_each_queue(bp, i) 6657 for_each_queue(bp, i)
6597 napi_disable(&bnx2x_fp(bp, i, napi)); 6658 napi_disable(&bnx2x_fp(bp, i, napi));
@@ -6601,52 +6662,79 @@ static int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
6601 /* Release IRQs */ 6662 /* Release IRQs */
6602 bnx2x_free_irq(bp); 6663 bnx2x_free_irq(bp);
6603 6664
6604 if (bp->flags & NO_WOL_FLAG) 6665 if (unload_mode == UNLOAD_NORMAL)
6666 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
6667
6668 else if (bp->flags & NO_WOL_FLAG) {
6605 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP; 6669 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP;
6670 if (CHIP_IS_E1H(bp))
6671 REG_WR(bp, MISC_REG_E1HMF_MODE, 0);
6606 6672
6607 else if (bp->wol) { 6673 } else if (bp->wol) {
6608 u32 emac_base = BP_PORT(bp) ? GRCBASE_EMAC1 : GRCBASE_EMAC0; 6674 u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
6609 u8 *mac_addr = bp->dev->dev_addr; 6675 u8 *mac_addr = bp->dev->dev_addr;
6610 u32 val; 6676 u32 val;
6611
6612 /* The mac address is written to entries 1-4 to 6677 /* The mac address is written to entries 1-4 to
6613 preserve entry 0 which is used by the PMF */ 6678 preserve entry 0 which is used by the PMF */
6679 u8 entry = (BP_E1HVN(bp) + 1)*8;
6680
6614 val = (mac_addr[0] << 8) | mac_addr[1]; 6681 val = (mac_addr[0] << 8) | mac_addr[1];
6615 EMAC_WR(EMAC_REG_EMAC_MAC_MATCH + (BP_E1HVN(bp) + 1)*8, val); 6682 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry, val);
6616 6683
6617 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) | 6684 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
6618 (mac_addr[4] << 8) | mac_addr[5]; 6685 (mac_addr[4] << 8) | mac_addr[5];
6619 EMAC_WR(EMAC_REG_EMAC_MAC_MATCH + (BP_E1HVN(bp) + 1)*8 + 4, 6686 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry + 4, val);
6620 val);
6621 6687
6622 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN; 6688 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN;
6623 6689
6624 } else 6690 } else
6625 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS; 6691 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
6626 6692
6693 if (CHIP_IS_E1(bp)) {
6694 struct mac_configuration_cmd *config =
6695 bnx2x_sp(bp, mcast_config);
6696
6697 bnx2x_set_mac_addr_e1(bp, 0);
6698
6699 for (i = 0; i < config->hdr.length_6b; i++)
6700 CAM_INVALIDATE(config->config_table[i]);
6701
6702 config->hdr.length_6b = i;
6703 if (CHIP_REV_IS_SLOW(bp))
6704 config->hdr.offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
6705 else
6706 config->hdr.offset = BNX2X_MAX_MULTICAST*(1 + port);
6707 config->hdr.client_id = BP_CL_ID(bp);
6708 config->hdr.reserved1 = 0;
6709
6710 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
6711 U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
6712 U64_LO(bnx2x_sp_mapping(bp, mcast_config)), 0);
6713
6714 } else { /* E1H */
6715 bnx2x_set_mac_addr_e1h(bp, 0);
6716
6717 for (i = 0; i < MC_HASH_SIZE; i++)
6718 REG_WR(bp, MC_HASH_OFFSET(bp, i), 0);
6719 }
6720
6721 if (CHIP_IS_E1H(bp))
6722 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
6723
6627 /* Close multi and leading connections 6724 /* Close multi and leading connections
6628 Completions for ramrods are collected in a synchronous way */ 6725 Completions for ramrods are collected in a synchronous way */
6629 for_each_nondefault_queue(bp, i) 6726 for_each_nondefault_queue(bp, i)
6630 if (bnx2x_stop_multi(bp, i)) 6727 if (bnx2x_stop_multi(bp, i))
6631 goto unload_error; 6728 goto unload_error;
6632 6729
6633 if (CHIP_IS_E1H(bp)) 6730 rc = bnx2x_stop_leading(bp);
6634 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + BP_PORT(bp)*8, 0); 6731 if (rc) {
6635
6636 bnx2x_stop_leading(bp);
6637#ifdef BNX2X_STOP_ON_ERROR
6638 /* If ramrod completion timed out - break here! */
6639 if (bp->panic) {
6640 BNX2X_ERR("Stop leading failed!\n"); 6732 BNX2X_ERR("Stop leading failed!\n");
6733#ifdef BNX2X_STOP_ON_ERROR
6641 return -EBUSY; 6734 return -EBUSY;
6642 } 6735#else
6736 goto unload_error;
6643#endif 6737#endif
6644
6645 if ((bp->state != BNX2X_STATE_CLOSING_WAIT4_UNLOAD) ||
6646 (bp->fp[0].state != BNX2X_FP_STATE_CLOSED)) {
6647 DP(NETIF_MSG_IFDOWN, "failed to close leading properly! "
6648 "state 0x%x fp[0].state 0x%x\n",
6649 bp->state, bp->fp[0].state);
6650 } 6738 }
6651 6739
6652unload_error: 6740unload_error:
@@ -6656,12 +6744,12 @@ unload_error:
6656 DP(NETIF_MSG_IFDOWN, "NO MCP load counts %d, %d, %d\n", 6744 DP(NETIF_MSG_IFDOWN, "NO MCP load counts %d, %d, %d\n",
6657 load_count[0], load_count[1], load_count[2]); 6745 load_count[0], load_count[1], load_count[2]);
6658 load_count[0]--; 6746 load_count[0]--;
6659 load_count[1 + BP_PORT(bp)]--; 6747 load_count[1 + port]--;
6660 DP(NETIF_MSG_IFDOWN, "NO MCP new load counts %d, %d, %d\n", 6748 DP(NETIF_MSG_IFDOWN, "NO MCP new load counts %d, %d, %d\n",
6661 load_count[0], load_count[1], load_count[2]); 6749 load_count[0], load_count[1], load_count[2]);
6662 if (load_count[0] == 0) 6750 if (load_count[0] == 0)
6663 reset_code = FW_MSG_CODE_DRV_UNLOAD_COMMON; 6751 reset_code = FW_MSG_CODE_DRV_UNLOAD_COMMON;
6664 else if (load_count[1 + BP_PORT(bp)] == 0) 6752 else if (load_count[1 + port] == 0)
6665 reset_code = FW_MSG_CODE_DRV_UNLOAD_PORT; 6753 reset_code = FW_MSG_CODE_DRV_UNLOAD_PORT;
6666 else 6754 else
6667 reset_code = FW_MSG_CODE_DRV_UNLOAD_FUNCTION; 6755 reset_code = FW_MSG_CODE_DRV_UNLOAD_FUNCTION;
@@ -6681,8 +6769,7 @@ unload_error:
6681 /* Free SKBs, SGEs, TPA pool and driver internals */ 6769 /* Free SKBs, SGEs, TPA pool and driver internals */
6682 bnx2x_free_skbs(bp); 6770 bnx2x_free_skbs(bp);
6683 for_each_queue(bp, i) 6771 for_each_queue(bp, i)
6684 bnx2x_free_rx_sge_range(bp, bp->fp + i, 6772 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
6685 RX_SGE_CNT*NUM_RX_SGE_PAGES);
6686 bnx2x_free_mem(bp); 6773 bnx2x_free_mem(bp);
6687 6774
6688 bp->state = BNX2X_STATE_CLOSED; 6775 bp->state = BNX2X_STATE_CLOSED;
@@ -6733,56 +6820,93 @@ static void __devinit bnx2x_undi_unload(struct bnx2x *bp)
6733 /* Check if it is the UNDI driver 6820 /* Check if it is the UNDI driver
6734 * UNDI driver initializes CID offset for normal bell to 0x7 6821 * UNDI driver initializes CID offset for normal bell to 0x7
6735 */ 6822 */
6823 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
6736 val = REG_RD(bp, DORQ_REG_NORM_CID_OFST); 6824 val = REG_RD(bp, DORQ_REG_NORM_CID_OFST);
6737 if (val == 0x7) { 6825 if (val == 0x7) {
6738 u32 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS; 6826 u32 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
6739 /* save our func and fw_seq */ 6827 /* save our func */
6740 int func = BP_FUNC(bp); 6828 int func = BP_FUNC(bp);
6741 u16 fw_seq = bp->fw_seq; 6829 u32 swap_en;
6830 u32 swap_val;
6742 6831
6743 BNX2X_DEV_INFO("UNDI is active! reset device\n"); 6832 BNX2X_DEV_INFO("UNDI is active! reset device\n");
6744 6833
6745 /* try unload UNDI on port 0 */ 6834 /* try unload UNDI on port 0 */
6746 bp->func = 0; 6835 bp->func = 0;
6747 bp->fw_seq = (SHMEM_RD(bp, 6836 bp->fw_seq =
6748 func_mb[bp->func].drv_mb_header) & 6837 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
6749 DRV_MSG_SEQ_NUMBER_MASK); 6838 DRV_MSG_SEQ_NUMBER_MASK);
6750
6751 reset_code = bnx2x_fw_command(bp, reset_code); 6839 reset_code = bnx2x_fw_command(bp, reset_code);
6752 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
6753 6840
6754 /* if UNDI is loaded on the other port */ 6841 /* if UNDI is loaded on the other port */
6755 if (reset_code != FW_MSG_CODE_DRV_UNLOAD_COMMON) { 6842 if (reset_code != FW_MSG_CODE_DRV_UNLOAD_COMMON) {
6756 6843
6844 /* send "DONE" for previous unload */
6845 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
6846
6847 /* unload UNDI on port 1 */
6757 bp->func = 1; 6848 bp->func = 1;
6758 bp->fw_seq = (SHMEM_RD(bp, 6849 bp->fw_seq =
6759 func_mb[bp->func].drv_mb_header) & 6850 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
6760 DRV_MSG_SEQ_NUMBER_MASK); 6851 DRV_MSG_SEQ_NUMBER_MASK);
6761 6852 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
6762 bnx2x_fw_command(bp, 6853
6763 DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS); 6854 bnx2x_fw_command(bp, reset_code);
6764 bnx2x_fw_command(bp,
6765 DRV_MSG_CODE_UNLOAD_DONE);
6766
6767 /* restore our func and fw_seq */
6768 bp->func = func;
6769 bp->fw_seq = fw_seq;
6770 } 6855 }
6771 6856
6857 REG_WR(bp, (BP_PORT(bp) ? HC_REG_CONFIG_1 :
6858 HC_REG_CONFIG_0), 0x1000);
6859
6860 /* close input traffic and wait for it */
6861 /* Do not rcv packets to BRB */
6862 REG_WR(bp,
6863 (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_DRV_MASK :
6864 NIG_REG_LLH0_BRB1_DRV_MASK), 0x0);
6865 /* Do not direct rcv packets that are not for MCP to
6866 * the BRB */
6867 REG_WR(bp,
6868 (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_NOT_MCP :
6869 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
6870 /* clear AEU */
6871 REG_WR(bp,
6872 (BP_PORT(bp) ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
6873 MISC_REG_AEU_MASK_ATTN_FUNC_0), 0);
6874 msleep(10);
6875
6876 /* save NIG port swap info */
6877 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
6878 swap_en = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
6772 /* reset device */ 6879 /* reset device */
6773 REG_WR(bp, 6880 REG_WR(bp,
6774 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 6881 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
6775 0xd3ffff7f); 6882 0xd3ffffff);
6776 REG_WR(bp, 6883 REG_WR(bp,
6777 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, 6884 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
6778 0x1403); 6885 0x1403);
6886 /* take the NIG out of reset and restore swap values */
6887 REG_WR(bp,
6888 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
6889 MISC_REGISTERS_RESET_REG_1_RST_NIG);
6890 REG_WR(bp, NIG_REG_PORT_SWAP, swap_val);
6891 REG_WR(bp, NIG_REG_STRAP_OVERRIDE, swap_en);
6892
6893 /* send unload done to the MCP */
6894 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
6895
6896 /* restore our func and fw_seq */
6897 bp->func = func;
6898 bp->fw_seq =
6899 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
6900 DRV_MSG_SEQ_NUMBER_MASK);
6779 } 6901 }
6902 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
6780 } 6903 }
6781} 6904}
6782 6905
6783static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp) 6906static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
6784{ 6907{
6785 u32 val, val2, val3, val4, id; 6908 u32 val, val2, val3, val4, id;
6909 u16 pmc;
6786 6910
6787 /* Get the chip revision id and number. */ 6911 /* Get the chip revision id and number. */
6788 /* chip num:16-31, rev:12-15, metal:4-11, bond_id:0-3 */ 6912 /* chip num:16-31, rev:12-15, metal:4-11, bond_id:0-3 */
@@ -6840,8 +6964,16 @@ static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
6840 BNX2X_ERR("This driver needs bc_ver %X but found %X," 6964 BNX2X_ERR("This driver needs bc_ver %X but found %X,"
6841 " please upgrade BC\n", BNX2X_BC_VER, val); 6965 " please upgrade BC\n", BNX2X_BC_VER, val);
6842 } 6966 }
6843 BNX2X_DEV_INFO("%sWoL Capable\n", 6967
6844 (bp->flags & NO_WOL_FLAG)? "Not " : ""); 6968 if (BP_E1HVN(bp) == 0) {
6969 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_PMC, &pmc);
6970 bp->flags |= (pmc & PCI_PM_CAP_PME_D3cold) ? 0 : NO_WOL_FLAG;
6971 } else {
6972 /* no WOL capability for E1HVN != 0 */
6973 bp->flags |= NO_WOL_FLAG;
6974 }
6975 BNX2X_DEV_INFO("%sWoL capable\n",
6976 (bp->flags & NO_WOL_FLAG) ? "Not " : "");
6845 6977
6846 val = SHMEM_RD(bp, dev_info.shared_hw_config.part_num); 6978 val = SHMEM_RD(bp, dev_info.shared_hw_config.part_num);
6847 val2 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[4]); 6979 val2 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[4]);
@@ -7274,9 +7406,8 @@ static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
7274 bp->mf_config = 7406 bp->mf_config =
7275 SHMEM_RD(bp, mf_cfg.func_mf_config[func].config); 7407 SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
7276 7408
7277 val = 7409 val = (SHMEM_RD(bp, mf_cfg.func_mf_config[func].e1hov_tag) &
7278 (SHMEM_RD(bp, mf_cfg.func_mf_config[func].e1hov_tag) & 7410 FUNC_MF_CFG_E1HOV_TAG_MASK);
7279 FUNC_MF_CFG_E1HOV_TAG_MASK);
7280 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) { 7411 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
7281 7412
7282 bp->e1hov = val; 7413 bp->e1hov = val;
@@ -7324,7 +7455,7 @@ static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
7324 7455
7325 if (BP_NOMCP(bp)) { 7456 if (BP_NOMCP(bp)) {
7326 /* only supposed to happen on emulation/FPGA */ 7457 /* only supposed to happen on emulation/FPGA */
7327 BNX2X_ERR("warning rendom MAC workaround active\n"); 7458 BNX2X_ERR("warning random MAC workaround active\n");
7328 random_ether_addr(bp->dev->dev_addr); 7459 random_ether_addr(bp->dev->dev_addr);
7329 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN); 7460 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
7330 } 7461 }
@@ -7337,8 +7468,8 @@ static int __devinit bnx2x_init_bp(struct bnx2x *bp)
7337 int func = BP_FUNC(bp); 7468 int func = BP_FUNC(bp);
7338 int rc; 7469 int rc;
7339 7470
7340 if (nomcp) 7471 /* Disable interrupt handling until HW is initialized */
7341 bp->flags |= NO_MCP_FLAG; 7472 atomic_set(&bp->intr_sem, 1);
7342 7473
7343 mutex_init(&bp->port.phy_mutex); 7474 mutex_init(&bp->port.phy_mutex);
7344 7475
@@ -7377,8 +7508,6 @@ static int __devinit bnx2x_init_bp(struct bnx2x *bp)
7377 bp->tx_ticks = 50; 7508 bp->tx_ticks = 50;
7378 bp->rx_ticks = 25; 7509 bp->rx_ticks = 25;
7379 7510
7380 bp->stats_ticks = 1000000 & 0xffff00;
7381
7382 bp->timer_interval = (CHIP_REV_IS_SLOW(bp) ? 5*HZ : HZ); 7511 bp->timer_interval = (CHIP_REV_IS_SLOW(bp) ? 5*HZ : HZ);
7383 bp->current_interval = (poll ? poll : bp->timer_interval); 7512 bp->current_interval = (poll ? poll : bp->timer_interval);
7384 7513
@@ -7628,25 +7757,25 @@ static void bnx2x_get_drvinfo(struct net_device *dev,
7628 struct ethtool_drvinfo *info) 7757 struct ethtool_drvinfo *info)
7629{ 7758{
7630 struct bnx2x *bp = netdev_priv(dev); 7759 struct bnx2x *bp = netdev_priv(dev);
7631 char phy_fw_ver[PHY_FW_VER_LEN]; 7760 u8 phy_fw_ver[PHY_FW_VER_LEN];
7632 7761
7633 strcpy(info->driver, DRV_MODULE_NAME); 7762 strcpy(info->driver, DRV_MODULE_NAME);
7634 strcpy(info->version, DRV_MODULE_VERSION); 7763 strcpy(info->version, DRV_MODULE_VERSION);
7635 7764
7636 phy_fw_ver[0] = '\0'; 7765 phy_fw_ver[0] = '\0';
7637 if (bp->port.pmf) { 7766 if (bp->port.pmf) {
7638 bnx2x_phy_hw_lock(bp); 7767 bnx2x_acquire_phy_lock(bp);
7639 bnx2x_get_ext_phy_fw_version(&bp->link_params, 7768 bnx2x_get_ext_phy_fw_version(&bp->link_params,
7640 (bp->state != BNX2X_STATE_CLOSED), 7769 (bp->state != BNX2X_STATE_CLOSED),
7641 phy_fw_ver, PHY_FW_VER_LEN); 7770 phy_fw_ver, PHY_FW_VER_LEN);
7642 bnx2x_phy_hw_unlock(bp); 7771 bnx2x_release_phy_lock(bp);
7643 } 7772 }
7644 7773
7645 snprintf(info->fw_version, 32, "%d.%d.%d:%d BC:%x%s%s", 7774 snprintf(info->fw_version, 32, "BC:%d.%d.%d%s%s",
7646 BCM_5710_FW_MAJOR_VERSION, BCM_5710_FW_MINOR_VERSION, 7775 (bp->common.bc_ver & 0xff0000) >> 16,
7647 BCM_5710_FW_REVISION_VERSION, 7776 (bp->common.bc_ver & 0xff00) >> 8,
7648 BCM_5710_FW_COMPILE_FLAGS, bp->common.bc_ver, 7777 (bp->common.bc_ver & 0xff),
7649 ((phy_fw_ver[0] != '\0')? " PHY:":""), phy_fw_ver); 7778 ((phy_fw_ver[0] != '\0') ? " PHY:" : ""), phy_fw_ver);
7650 strcpy(info->bus_info, pci_name(bp->pdev)); 7779 strcpy(info->bus_info, pci_name(bp->pdev));
7651 info->n_stats = BNX2X_NUM_STATS; 7780 info->n_stats = BNX2X_NUM_STATS;
7652 info->testinfo_len = BNX2X_NUM_TESTS; 7781 info->testinfo_len = BNX2X_NUM_TESTS;
@@ -8097,7 +8226,7 @@ static int bnx2x_set_eeprom(struct net_device *dev,
8097 if (eeprom->magic == 0x00504859) 8226 if (eeprom->magic == 0x00504859)
8098 if (bp->port.pmf) { 8227 if (bp->port.pmf) {
8099 8228
8100 bnx2x_phy_hw_lock(bp); 8229 bnx2x_acquire_phy_lock(bp);
8101 rc = bnx2x_flash_download(bp, BP_PORT(bp), 8230 rc = bnx2x_flash_download(bp, BP_PORT(bp),
8102 bp->link_params.ext_phy_config, 8231 bp->link_params.ext_phy_config,
8103 (bp->state != BNX2X_STATE_CLOSED), 8232 (bp->state != BNX2X_STATE_CLOSED),
@@ -8109,7 +8238,7 @@ static int bnx2x_set_eeprom(struct net_device *dev,
8109 rc |= bnx2x_phy_init(&bp->link_params, 8238 rc |= bnx2x_phy_init(&bp->link_params,
8110 &bp->link_vars); 8239 &bp->link_vars);
8111 } 8240 }
8112 bnx2x_phy_hw_unlock(bp); 8241 bnx2x_release_phy_lock(bp);
8113 8242
8114 } else /* Only the PMF can access the PHY */ 8243 } else /* Only the PMF can access the PHY */
8115 return -EINVAL; 8244 return -EINVAL;
@@ -8128,7 +8257,6 @@ static int bnx2x_get_coalesce(struct net_device *dev,
8128 8257
8129 coal->rx_coalesce_usecs = bp->rx_ticks; 8258 coal->rx_coalesce_usecs = bp->rx_ticks;
8130 coal->tx_coalesce_usecs = bp->tx_ticks; 8259 coal->tx_coalesce_usecs = bp->tx_ticks;
8131 coal->stats_block_coalesce_usecs = bp->stats_ticks;
8132 8260
8133 return 0; 8261 return 0;
8134} 8262}
@@ -8146,44 +8274,12 @@ static int bnx2x_set_coalesce(struct net_device *dev,
8146 if (bp->tx_ticks > 0x3000) 8274 if (bp->tx_ticks > 0x3000)
8147 bp->tx_ticks = 0x3000; 8275 bp->tx_ticks = 0x3000;
8148 8276
8149 bp->stats_ticks = coal->stats_block_coalesce_usecs;
8150 if (bp->stats_ticks > 0xffff00)
8151 bp->stats_ticks = 0xffff00;
8152 bp->stats_ticks &= 0xffff00;
8153
8154 if (netif_running(dev)) 8277 if (netif_running(dev))
8155 bnx2x_update_coalesce(bp); 8278 bnx2x_update_coalesce(bp);
8156 8279
8157 return 0; 8280 return 0;
8158} 8281}
8159 8282
8160static int bnx2x_set_flags(struct net_device *dev, u32 data)
8161{
8162 struct bnx2x *bp = netdev_priv(dev);
8163 int changed = 0;
8164 int rc = 0;
8165
8166 if (data & ETH_FLAG_LRO) {
8167 if (!(dev->features & NETIF_F_LRO)) {
8168 dev->features |= NETIF_F_LRO;
8169 bp->flags |= TPA_ENABLE_FLAG;
8170 changed = 1;
8171 }
8172
8173 } else if (dev->features & NETIF_F_LRO) {
8174 dev->features &= ~NETIF_F_LRO;
8175 bp->flags &= ~TPA_ENABLE_FLAG;
8176 changed = 1;
8177 }
8178
8179 if (changed && netif_running(dev)) {
8180 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
8181 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
8182 }
8183
8184 return rc;
8185}
8186
8187static void bnx2x_get_ringparam(struct net_device *dev, 8283static void bnx2x_get_ringparam(struct net_device *dev,
8188 struct ethtool_ringparam *ering) 8284 struct ethtool_ringparam *ering)
8189{ 8285{
@@ -8266,7 +8362,7 @@ static int bnx2x_set_pauseparam(struct net_device *dev,
8266 8362
8267 if (epause->autoneg) { 8363 if (epause->autoneg) {
8268 if (!(bp->port.supported & SUPPORTED_Autoneg)) { 8364 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
8269 DP(NETIF_MSG_LINK, "Autoneg not supported\n"); 8365 DP(NETIF_MSG_LINK, "autoneg not supported\n");
8270 return -EINVAL; 8366 return -EINVAL;
8271 } 8367 }
8272 8368
@@ -8285,6 +8381,34 @@ static int bnx2x_set_pauseparam(struct net_device *dev,
8285 return 0; 8381 return 0;
8286} 8382}
8287 8383
8384static int bnx2x_set_flags(struct net_device *dev, u32 data)
8385{
8386 struct bnx2x *bp = netdev_priv(dev);
8387 int changed = 0;
8388 int rc = 0;
8389
8390 /* TPA requires Rx CSUM offloading */
8391 if ((data & ETH_FLAG_LRO) && bp->rx_csum) {
8392 if (!(dev->features & NETIF_F_LRO)) {
8393 dev->features |= NETIF_F_LRO;
8394 bp->flags |= TPA_ENABLE_FLAG;
8395 changed = 1;
8396 }
8397
8398 } else if (dev->features & NETIF_F_LRO) {
8399 dev->features &= ~NETIF_F_LRO;
8400 bp->flags &= ~TPA_ENABLE_FLAG;
8401 changed = 1;
8402 }
8403
8404 if (changed && netif_running(dev)) {
8405 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
8406 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
8407 }
8408
8409 return rc;
8410}
8411
8288static u32 bnx2x_get_rx_csum(struct net_device *dev) 8412static u32 bnx2x_get_rx_csum(struct net_device *dev)
8289{ 8413{
8290 struct bnx2x *bp = netdev_priv(dev); 8414 struct bnx2x *bp = netdev_priv(dev);
@@ -8295,9 +8419,19 @@ static u32 bnx2x_get_rx_csum(struct net_device *dev)
8295static int bnx2x_set_rx_csum(struct net_device *dev, u32 data) 8419static int bnx2x_set_rx_csum(struct net_device *dev, u32 data)
8296{ 8420{
8297 struct bnx2x *bp = netdev_priv(dev); 8421 struct bnx2x *bp = netdev_priv(dev);
8422 int rc = 0;
8298 8423
8299 bp->rx_csum = data; 8424 bp->rx_csum = data;
8300 return 0; 8425
8426 /* Disable TPA, when Rx CSUM is disabled. Otherwise all
8427 TPA'ed packets will be discarded due to wrong TCP CSUM */
8428 if (!data) {
8429 u32 flags = ethtool_op_get_flags(dev);
8430
8431 rc = bnx2x_set_flags(dev, (flags & ~ETH_FLAG_LRO));
8432 }
8433
8434 return rc;
8301} 8435}
8302 8436
8303static int bnx2x_set_tso(struct net_device *dev, u32 data) 8437static int bnx2x_set_tso(struct net_device *dev, u32 data)
@@ -8335,6 +8469,7 @@ static int bnx2x_test_registers(struct bnx2x *bp)
8335{ 8469{
8336 int idx, i, rc = -ENODEV; 8470 int idx, i, rc = -ENODEV;
8337 u32 wr_val = 0; 8471 u32 wr_val = 0;
8472 int port = BP_PORT(bp);
8338 static const struct { 8473 static const struct {
8339 u32 offset0; 8474 u32 offset0;
8340 u32 offset1; 8475 u32 offset1;
@@ -8400,7 +8535,6 @@ static int bnx2x_test_registers(struct bnx2x *bp)
8400 8535
8401 for (i = 0; reg_tbl[i].offset0 != 0xffffffff; i++) { 8536 for (i = 0; reg_tbl[i].offset0 != 0xffffffff; i++) {
8402 u32 offset, mask, save_val, val; 8537 u32 offset, mask, save_val, val;
8403 int port = BP_PORT(bp);
8404 8538
8405 offset = reg_tbl[i].offset0 + port*reg_tbl[i].offset1; 8539 offset = reg_tbl[i].offset0 + port*reg_tbl[i].offset1;
8406 mask = reg_tbl[i].mask; 8540 mask = reg_tbl[i].mask;
@@ -8446,16 +8580,17 @@ static int bnx2x_test_memory(struct bnx2x *bp)
8446 static const struct { 8580 static const struct {
8447 char *name; 8581 char *name;
8448 u32 offset; 8582 u32 offset;
8449 u32 mask; 8583 u32 e1_mask;
8584 u32 e1h_mask;
8450 } prty_tbl[] = { 8585 } prty_tbl[] = {
8451 { "CCM_REG_CCM_PRTY_STS", CCM_REG_CCM_PRTY_STS, 0 }, 8586 { "CCM_PRTY_STS", CCM_REG_CCM_PRTY_STS, 0x3ffc0, 0 },
8452 { "CFC_REG_CFC_PRTY_STS", CFC_REG_CFC_PRTY_STS, 0 }, 8587 { "CFC_PRTY_STS", CFC_REG_CFC_PRTY_STS, 0x2, 0x2 },
8453 { "DMAE_REG_DMAE_PRTY_STS", DMAE_REG_DMAE_PRTY_STS, 0 }, 8588 { "DMAE_PRTY_STS", DMAE_REG_DMAE_PRTY_STS, 0, 0 },
8454 { "TCM_REG_TCM_PRTY_STS", TCM_REG_TCM_PRTY_STS, 0 }, 8589 { "TCM_PRTY_STS", TCM_REG_TCM_PRTY_STS, 0x3ffc0, 0 },
8455 { "UCM_REG_UCM_PRTY_STS", UCM_REG_UCM_PRTY_STS, 0 }, 8590 { "UCM_PRTY_STS", UCM_REG_UCM_PRTY_STS, 0x3ffc0, 0 },
8456 { "XCM_REG_XCM_PRTY_STS", XCM_REG_XCM_PRTY_STS, 0x1 }, 8591 { "XCM_PRTY_STS", XCM_REG_XCM_PRTY_STS, 0x3ffc1, 0 },
8457 8592
8458 { NULL, 0xffffffff, 0 } 8593 { NULL, 0xffffffff, 0, 0 }
8459 }; 8594 };
8460 8595
8461 if (!netif_running(bp->dev)) 8596 if (!netif_running(bp->dev))
@@ -8469,7 +8604,8 @@ static int bnx2x_test_memory(struct bnx2x *bp)
8469 /* Check the parity status */ 8604 /* Check the parity status */
8470 for (i = 0; prty_tbl[i].offset != 0xffffffff; i++) { 8605 for (i = 0; prty_tbl[i].offset != 0xffffffff; i++) {
8471 val = REG_RD(bp, prty_tbl[i].offset); 8606 val = REG_RD(bp, prty_tbl[i].offset);
8472 if (val & ~(prty_tbl[i].mask)) { 8607 if ((CHIP_IS_E1(bp) && (val & ~(prty_tbl[i].e1_mask))) ||
8608 (CHIP_IS_E1H(bp) && (val & ~(prty_tbl[i].e1h_mask)))) {
8473 DP(NETIF_MSG_HW, 8609 DP(NETIF_MSG_HW,
8474 "%s is 0x%x\n", prty_tbl[i].name, val); 8610 "%s is 0x%x\n", prty_tbl[i].name, val);
8475 goto test_mem_exit; 8611 goto test_mem_exit;
@@ -8539,15 +8675,15 @@ static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode, u8 link_up)
8539 8675
8540 if (loopback_mode == BNX2X_MAC_LOOPBACK) { 8676 if (loopback_mode == BNX2X_MAC_LOOPBACK) {
8541 bp->link_params.loopback_mode = LOOPBACK_BMAC; 8677 bp->link_params.loopback_mode = LOOPBACK_BMAC;
8542 bnx2x_phy_hw_lock(bp); 8678 bnx2x_acquire_phy_lock(bp);
8543 bnx2x_phy_init(&bp->link_params, &bp->link_vars); 8679 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
8544 bnx2x_phy_hw_unlock(bp); 8680 bnx2x_release_phy_lock(bp);
8545 8681
8546 } else if (loopback_mode == BNX2X_PHY_LOOPBACK) { 8682 } else if (loopback_mode == BNX2X_PHY_LOOPBACK) {
8547 bp->link_params.loopback_mode = LOOPBACK_XGXS_10; 8683 bp->link_params.loopback_mode = LOOPBACK_XGXS_10;
8548 bnx2x_phy_hw_lock(bp); 8684 bnx2x_acquire_phy_lock(bp);
8549 bnx2x_phy_init(&bp->link_params, &bp->link_vars); 8685 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
8550 bnx2x_phy_hw_unlock(bp); 8686 bnx2x_release_phy_lock(bp);
8551 /* wait until link state is restored */ 8687 /* wait until link state is restored */
8552 bnx2x_wait_for_link(bp, link_up); 8688 bnx2x_wait_for_link(bp, link_up);
8553 8689
@@ -8771,7 +8907,7 @@ static void bnx2x_self_test(struct net_device *dev,
8771 if (!netif_running(dev)) 8907 if (!netif_running(dev))
8772 return; 8908 return;
8773 8909
8774 /* offline tests are not suppoerted in MF mode */ 8910 /* offline tests are not supported in MF mode */
8775 if (IS_E1HMF(bp)) 8911 if (IS_E1HMF(bp))
8776 etest->flags &= ~ETH_TEST_FL_OFFLINE; 8912 etest->flags &= ~ETH_TEST_FL_OFFLINE;
8777 8913
@@ -8827,76 +8963,99 @@ static const struct {
8827 long offset; 8963 long offset;
8828 int size; 8964 int size;
8829 u32 flags; 8965 u32 flags;
8830 char string[ETH_GSTRING_LEN]; 8966#define STATS_FLAGS_PORT 1
8967#define STATS_FLAGS_FUNC 2
8968 u8 string[ETH_GSTRING_LEN];
8831} bnx2x_stats_arr[BNX2X_NUM_STATS] = { 8969} bnx2x_stats_arr[BNX2X_NUM_STATS] = {
8832/* 1 */ { STATS_OFFSET32(valid_bytes_received_hi), 8, 1, "rx_bytes" }, 8970/* 1 */ { STATS_OFFSET32(valid_bytes_received_hi),
8833 { STATS_OFFSET32(error_bytes_received_hi), 8, 1, "rx_error_bytes" }, 8971 8, STATS_FLAGS_FUNC, "rx_bytes" },
8834 { STATS_OFFSET32(total_bytes_transmitted_hi), 8, 1, "tx_bytes" }, 8972 { STATS_OFFSET32(error_bytes_received_hi),
8835 { STATS_OFFSET32(tx_stat_ifhcoutbadoctets_hi), 8, 0, "tx_error_bytes" }, 8973 8, STATS_FLAGS_FUNC, "rx_error_bytes" },
8974 { STATS_OFFSET32(total_bytes_transmitted_hi),
8975 8, STATS_FLAGS_FUNC, "tx_bytes" },
8976 { STATS_OFFSET32(tx_stat_ifhcoutbadoctets_hi),
8977 8, STATS_FLAGS_PORT, "tx_error_bytes" },
8836 { STATS_OFFSET32(total_unicast_packets_received_hi), 8978 { STATS_OFFSET32(total_unicast_packets_received_hi),
8837 8, 1, "rx_ucast_packets" }, 8979 8, STATS_FLAGS_FUNC, "rx_ucast_packets" },
8838 { STATS_OFFSET32(total_multicast_packets_received_hi), 8980 { STATS_OFFSET32(total_multicast_packets_received_hi),
8839 8, 1, "rx_mcast_packets" }, 8981 8, STATS_FLAGS_FUNC, "rx_mcast_packets" },
8840 { STATS_OFFSET32(total_broadcast_packets_received_hi), 8982 { STATS_OFFSET32(total_broadcast_packets_received_hi),
8841 8, 1, "rx_bcast_packets" }, 8983 8, STATS_FLAGS_FUNC, "rx_bcast_packets" },
8842 { STATS_OFFSET32(total_unicast_packets_transmitted_hi), 8984 { STATS_OFFSET32(total_unicast_packets_transmitted_hi),
8843 8, 1, "tx_packets" }, 8985 8, STATS_FLAGS_FUNC, "tx_packets" },
8844 { STATS_OFFSET32(tx_stat_dot3statsinternalmactransmiterrors_hi), 8986 { STATS_OFFSET32(tx_stat_dot3statsinternalmactransmiterrors_hi),
8845 8, 0, "tx_mac_errors" }, 8987 8, STATS_FLAGS_PORT, "tx_mac_errors" },
8846/* 10 */{ STATS_OFFSET32(rx_stat_dot3statscarriersenseerrors_hi), 8988/* 10 */{ STATS_OFFSET32(rx_stat_dot3statscarriersenseerrors_hi),
8847 8, 0, "tx_carrier_errors" }, 8989 8, STATS_FLAGS_PORT, "tx_carrier_errors" },
8848 { STATS_OFFSET32(rx_stat_dot3statsfcserrors_hi), 8990 { STATS_OFFSET32(rx_stat_dot3statsfcserrors_hi),
8849 8, 0, "rx_crc_errors" }, 8991 8, STATS_FLAGS_PORT, "rx_crc_errors" },
8850 { STATS_OFFSET32(rx_stat_dot3statsalignmenterrors_hi), 8992 { STATS_OFFSET32(rx_stat_dot3statsalignmenterrors_hi),
8851 8, 0, "rx_align_errors" }, 8993 8, STATS_FLAGS_PORT, "rx_align_errors" },
8852 { STATS_OFFSET32(tx_stat_dot3statssinglecollisionframes_hi), 8994 { STATS_OFFSET32(tx_stat_dot3statssinglecollisionframes_hi),
8853 8, 0, "tx_single_collisions" }, 8995 8, STATS_FLAGS_PORT, "tx_single_collisions" },
8854 { STATS_OFFSET32(tx_stat_dot3statsmultiplecollisionframes_hi), 8996 { STATS_OFFSET32(tx_stat_dot3statsmultiplecollisionframes_hi),
8855 8, 0, "tx_multi_collisions" }, 8997 8, STATS_FLAGS_PORT, "tx_multi_collisions" },
8856 { STATS_OFFSET32(tx_stat_dot3statsdeferredtransmissions_hi), 8998 { STATS_OFFSET32(tx_stat_dot3statsdeferredtransmissions_hi),
8857 8, 0, "tx_deferred" }, 8999 8, STATS_FLAGS_PORT, "tx_deferred" },
8858 { STATS_OFFSET32(tx_stat_dot3statsexcessivecollisions_hi), 9000 { STATS_OFFSET32(tx_stat_dot3statsexcessivecollisions_hi),
8859 8, 0, "tx_excess_collisions" }, 9001 8, STATS_FLAGS_PORT, "tx_excess_collisions" },
8860 { STATS_OFFSET32(tx_stat_dot3statslatecollisions_hi), 9002 { STATS_OFFSET32(tx_stat_dot3statslatecollisions_hi),
8861 8, 0, "tx_late_collisions" }, 9003 8, STATS_FLAGS_PORT, "tx_late_collisions" },
8862 { STATS_OFFSET32(tx_stat_etherstatscollisions_hi), 9004 { STATS_OFFSET32(tx_stat_etherstatscollisions_hi),
8863 8, 0, "tx_total_collisions" }, 9005 8, STATS_FLAGS_PORT, "tx_total_collisions" },
8864 { STATS_OFFSET32(rx_stat_etherstatsfragments_hi), 9006 { STATS_OFFSET32(rx_stat_etherstatsfragments_hi),
8865 8, 0, "rx_fragments" }, 9007 8, STATS_FLAGS_PORT, "rx_fragments" },
8866/* 20 */{ STATS_OFFSET32(rx_stat_etherstatsjabbers_hi), 8, 0, "rx_jabbers" }, 9008/* 20 */{ STATS_OFFSET32(rx_stat_etherstatsjabbers_hi),
9009 8, STATS_FLAGS_PORT, "rx_jabbers" },
8867 { STATS_OFFSET32(rx_stat_etherstatsundersizepkts_hi), 9010 { STATS_OFFSET32(rx_stat_etherstatsundersizepkts_hi),
8868 8, 0, "rx_undersize_packets" }, 9011 8, STATS_FLAGS_PORT, "rx_undersize_packets" },
8869 { STATS_OFFSET32(jabber_packets_received), 9012 { STATS_OFFSET32(jabber_packets_received),
8870 4, 1, "rx_oversize_packets" }, 9013 4, STATS_FLAGS_FUNC, "rx_oversize_packets" },
8871 { STATS_OFFSET32(tx_stat_etherstatspkts64octets_hi), 9014 { STATS_OFFSET32(tx_stat_etherstatspkts64octets_hi),
8872 8, 0, "tx_64_byte_packets" }, 9015 8, STATS_FLAGS_PORT, "tx_64_byte_packets" },
8873 { STATS_OFFSET32(tx_stat_etherstatspkts65octetsto127octets_hi), 9016 { STATS_OFFSET32(tx_stat_etherstatspkts65octetsto127octets_hi),
8874 8, 0, "tx_65_to_127_byte_packets" }, 9017 8, STATS_FLAGS_PORT, "tx_65_to_127_byte_packets" },
8875 { STATS_OFFSET32(tx_stat_etherstatspkts128octetsto255octets_hi), 9018 { STATS_OFFSET32(tx_stat_etherstatspkts128octetsto255octets_hi),
8876 8, 0, "tx_128_to_255_byte_packets" }, 9019 8, STATS_FLAGS_PORT, "tx_128_to_255_byte_packets" },
8877 { STATS_OFFSET32(tx_stat_etherstatspkts256octetsto511octets_hi), 9020 { STATS_OFFSET32(tx_stat_etherstatspkts256octetsto511octets_hi),
8878 8, 0, "tx_256_to_511_byte_packets" }, 9021 8, STATS_FLAGS_PORT, "tx_256_to_511_byte_packets" },
8879 { STATS_OFFSET32(tx_stat_etherstatspkts512octetsto1023octets_hi), 9022 { STATS_OFFSET32(tx_stat_etherstatspkts512octetsto1023octets_hi),
8880 8, 0, "tx_512_to_1023_byte_packets" }, 9023 8, STATS_FLAGS_PORT, "tx_512_to_1023_byte_packets" },
8881 { STATS_OFFSET32(etherstatspkts1024octetsto1522octets_hi), 9024 { STATS_OFFSET32(etherstatspkts1024octetsto1522octets_hi),
8882 8, 0, "tx_1024_to_1522_byte_packets" }, 9025 8, STATS_FLAGS_PORT, "tx_1024_to_1522_byte_packets" },
8883 { STATS_OFFSET32(etherstatspktsover1522octets_hi), 9026 { STATS_OFFSET32(etherstatspktsover1522octets_hi),
8884 8, 0, "tx_1523_to_9022_byte_packets" }, 9027 8, STATS_FLAGS_PORT, "tx_1523_to_9022_byte_packets" },
8885/* 30 */{ STATS_OFFSET32(rx_stat_xonpauseframesreceived_hi), 9028/* 30 */{ STATS_OFFSET32(rx_stat_xonpauseframesreceived_hi),
8886 8, 0, "rx_xon_frames" }, 9029 8, STATS_FLAGS_PORT, "rx_xon_frames" },
8887 { STATS_OFFSET32(rx_stat_xoffpauseframesreceived_hi), 9030 { STATS_OFFSET32(rx_stat_xoffpauseframesreceived_hi),
8888 8, 0, "rx_xoff_frames" }, 9031 8, STATS_FLAGS_PORT, "rx_xoff_frames" },
8889 { STATS_OFFSET32(tx_stat_outxonsent_hi), 8, 0, "tx_xon_frames" }, 9032 { STATS_OFFSET32(tx_stat_outxonsent_hi),
8890 { STATS_OFFSET32(tx_stat_outxoffsent_hi), 8, 0, "tx_xoff_frames" }, 9033 8, STATS_FLAGS_PORT, "tx_xon_frames" },
9034 { STATS_OFFSET32(tx_stat_outxoffsent_hi),
9035 8, STATS_FLAGS_PORT, "tx_xoff_frames" },
8891 { STATS_OFFSET32(rx_stat_maccontrolframesreceived_hi), 9036 { STATS_OFFSET32(rx_stat_maccontrolframesreceived_hi),
8892 8, 0, "rx_mac_ctrl_frames" }, 9037 8, STATS_FLAGS_PORT, "rx_mac_ctrl_frames" },
8893 { STATS_OFFSET32(mac_filter_discard), 4, 1, "rx_filtered_packets" }, 9038 { STATS_OFFSET32(mac_filter_discard),
8894 { STATS_OFFSET32(no_buff_discard), 4, 1, "rx_discards" }, 9039 4, STATS_FLAGS_PORT, "rx_filtered_packets" },
8895 { STATS_OFFSET32(xxoverflow_discard), 4, 1, "rx_fw_discards" }, 9040 { STATS_OFFSET32(no_buff_discard),
8896 { STATS_OFFSET32(brb_drop_hi), 8, 1, "brb_discard" }, 9041 4, STATS_FLAGS_FUNC, "rx_discards" },
8897/* 39 */{ STATS_OFFSET32(brb_truncate_discard), 8, 1, "brb_truncate" } 9042 { STATS_OFFSET32(xxoverflow_discard),
9043 4, STATS_FLAGS_PORT, "rx_fw_discards" },
9044 { STATS_OFFSET32(brb_drop_hi),
9045 8, STATS_FLAGS_PORT, "brb_discard" },
9046 { STATS_OFFSET32(brb_truncate_hi),
9047 8, STATS_FLAGS_PORT, "brb_truncate" },
9048/* 40 */{ STATS_OFFSET32(rx_err_discard_pkt),
9049 4, STATS_FLAGS_FUNC, "rx_phy_ip_err_discards"},
9050 { STATS_OFFSET32(rx_skb_alloc_failed),
9051 4, STATS_FLAGS_FUNC, "rx_skb_alloc_discard" },
9052/* 42 */{ STATS_OFFSET32(hw_csum_err),
9053 4, STATS_FLAGS_FUNC, "rx_csum_offload_errors" }
8898}; 9054};
8899 9055
9056#define IS_NOT_E1HMF_STAT(bp, i) \
9057 (IS_E1HMF(bp) && (bnx2x_stats_arr[i].flags & STATS_FLAGS_PORT))
9058
8900static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf) 9059static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
8901{ 9060{
8902 struct bnx2x *bp = netdev_priv(dev); 9061 struct bnx2x *bp = netdev_priv(dev);
@@ -8905,7 +9064,7 @@ static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
8905 switch (stringset) { 9064 switch (stringset) {
8906 case ETH_SS_STATS: 9065 case ETH_SS_STATS:
8907 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) { 9066 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
8908 if (IS_E1HMF(bp) && (!bnx2x_stats_arr[i].flags)) 9067 if (IS_NOT_E1HMF_STAT(bp, i))
8909 continue; 9068 continue;
8910 strcpy(buf + j*ETH_GSTRING_LEN, 9069 strcpy(buf + j*ETH_GSTRING_LEN,
8911 bnx2x_stats_arr[i].string); 9070 bnx2x_stats_arr[i].string);
@@ -8925,7 +9084,7 @@ static int bnx2x_get_stats_count(struct net_device *dev)
8925 int i, num_stats = 0; 9084 int i, num_stats = 0;
8926 9085
8927 for (i = 0; i < BNX2X_NUM_STATS; i++) { 9086 for (i = 0; i < BNX2X_NUM_STATS; i++) {
8928 if (IS_E1HMF(bp) && (!bnx2x_stats_arr[i].flags)) 9087 if (IS_NOT_E1HMF_STAT(bp, i))
8929 continue; 9088 continue;
8930 num_stats++; 9089 num_stats++;
8931 } 9090 }
@@ -8940,7 +9099,7 @@ static void bnx2x_get_ethtool_stats(struct net_device *dev,
8940 int i, j; 9099 int i, j;
8941 9100
8942 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) { 9101 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
8943 if (IS_E1HMF(bp) && (!bnx2x_stats_arr[i].flags)) 9102 if (IS_NOT_E1HMF_STAT(bp, i))
8944 continue; 9103 continue;
8945 9104
8946 if (bnx2x_stats_arr[i].size == 0) { 9105 if (bnx2x_stats_arr[i].size == 0) {
@@ -9057,7 +9216,7 @@ static int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
9057 PCI_PM_CTRL_PME_STATUS)); 9216 PCI_PM_CTRL_PME_STATUS));
9058 9217
9059 if (pmcsr & PCI_PM_CTRL_STATE_MASK) 9218 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
9060 /* delay required during transition out of D3hot */ 9219 /* delay required during transition out of D3hot */
9061 msleep(20); 9220 msleep(20);
9062 break; 9221 break;
9063 9222
@@ -9104,17 +9263,16 @@ static int bnx2x_poll(struct napi_struct *napi, int budget)
9104 9263
9105 bnx2x_update_fpsb_idx(fp); 9264 bnx2x_update_fpsb_idx(fp);
9106 9265
9107 if ((fp->tx_pkt_prod != le16_to_cpu(*fp->tx_cons_sb)) || 9266 if (BNX2X_HAS_TX_WORK(fp))
9108 (fp->tx_pkt_prod != fp->tx_pkt_cons))
9109 bnx2x_tx_int(fp, budget); 9267 bnx2x_tx_int(fp, budget);
9110 9268
9111 if (le16_to_cpu(*fp->rx_cons_sb) != fp->rx_comp_cons) 9269 if (BNX2X_HAS_RX_WORK(fp))
9112 work_done = bnx2x_rx_int(fp, budget); 9270 work_done = bnx2x_rx_int(fp, budget);
9113 9271
9114 rmb(); /* bnx2x_has_work() reads the status block */ 9272 rmb(); /* BNX2X_HAS_WORK() reads the status block */
9115 9273
9116 /* must not complete if we consumed full budget */ 9274 /* must not complete if we consumed full budget */
9117 if ((work_done < budget) && !bnx2x_has_work(fp)) { 9275 if ((work_done < budget) && !BNX2X_HAS_WORK(fp)) {
9118 9276
9119#ifdef BNX2X_STOP_ON_ERROR 9277#ifdef BNX2X_STOP_ON_ERROR
9120poll_panic: 9278poll_panic:
@@ -9131,7 +9289,7 @@ poll_panic:
9131 9289
9132 9290
9133/* we split the first BD into headers and data BDs 9291/* we split the first BD into headers and data BDs
9134 * to ease the pain of our fellow micocode engineers 9292 * to ease the pain of our fellow microcode engineers
9135 * we use one mapping for both BDs 9293 * we use one mapping for both BDs
9136 * So far this has only been observed to happen 9294 * So far this has only been observed to happen
9137 * in Other Operating Systems(TM) 9295 * in Other Operating Systems(TM)
@@ -9238,7 +9396,7 @@ static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
9238 /* Check if LSO packet needs to be copied: 9396 /* Check if LSO packet needs to be copied:
9239 3 = 1 (for headers BD) + 2 (for PBD and last BD) */ 9397 3 = 1 (for headers BD) + 2 (for PBD and last BD) */
9240 int wnd_size = MAX_FETCH_BD - 3; 9398 int wnd_size = MAX_FETCH_BD - 3;
9241 /* Number of widnows to check */ 9399 /* Number of windows to check */
9242 int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size; 9400 int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size;
9243 int wnd_idx = 0; 9401 int wnd_idx = 0;
9244 int frag_idx = 0; 9402 int frag_idx = 0;
@@ -9340,7 +9498,7 @@ static int bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
9340 skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr, 9498 skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
9341 ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type); 9499 ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type);
9342 9500
9343 /* First, check if we need to linearaize the skb 9501 /* First, check if we need to linearize the skb
9344 (due to FW restrictions) */ 9502 (due to FW restrictions) */
9345 if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) { 9503 if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) {
9346 /* Statistics of linearization */ 9504 /* Statistics of linearization */
@@ -9349,7 +9507,7 @@ static int bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
9349 DP(NETIF_MSG_TX_QUEUED, "SKB linearization failed - " 9507 DP(NETIF_MSG_TX_QUEUED, "SKB linearization failed - "
9350 "silently dropping this SKB\n"); 9508 "silently dropping this SKB\n");
9351 dev_kfree_skb_any(skb); 9509 dev_kfree_skb_any(skb);
9352 return 0; 9510 return NETDEV_TX_OK;
9353 } 9511 }
9354 } 9512 }
9355 9513
@@ -9372,7 +9530,8 @@ static int bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
9372 tx_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD; 9530 tx_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
9373 tx_bd->general_data = (UNICAST_ADDRESS << 9531 tx_bd->general_data = (UNICAST_ADDRESS <<
9374 ETH_TX_BD_ETH_ADDR_TYPE_SHIFT); 9532 ETH_TX_BD_ETH_ADDR_TYPE_SHIFT);
9375 tx_bd->general_data |= 1; /* header nbd */ 9533 /* header nbd */
9534 tx_bd->general_data |= (1 << ETH_TX_BD_HDR_NBDS_SHIFT);
9376 9535
9377 /* remember the first BD of the packet */ 9536 /* remember the first BD of the packet */
9378 tx_buf->first_bd = fp->tx_bd_prod; 9537 tx_buf->first_bd = fp->tx_bd_prod;
@@ -9451,7 +9610,7 @@ static int bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
9451 9610
9452 tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping)); 9611 tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
9453 tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping)); 9612 tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
9454 nbd = skb_shinfo(skb)->nr_frags + ((pbd == NULL)? 1 : 2); 9613 nbd = skb_shinfo(skb)->nr_frags + ((pbd == NULL) ? 1 : 2);
9455 tx_bd->nbd = cpu_to_le16(nbd); 9614 tx_bd->nbd = cpu_to_le16(nbd);
9456 tx_bd->nbytes = cpu_to_le16(skb_headlen(skb)); 9615 tx_bd->nbytes = cpu_to_le16(skb_headlen(skb));
9457 9616
@@ -9721,9 +9880,9 @@ static int bnx2x_change_mac_addr(struct net_device *dev, void *p)
9721 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); 9880 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
9722 if (netif_running(dev)) { 9881 if (netif_running(dev)) {
9723 if (CHIP_IS_E1(bp)) 9882 if (CHIP_IS_E1(bp))
9724 bnx2x_set_mac_addr_e1(bp); 9883 bnx2x_set_mac_addr_e1(bp, 1);
9725 else 9884 else
9726 bnx2x_set_mac_addr_e1h(bp); 9885 bnx2x_set_mac_addr_e1h(bp, 1);
9727 } 9886 }
9728 9887
9729 return 0; 9888 return 0;
@@ -9734,6 +9893,7 @@ static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
9734{ 9893{
9735 struct mii_ioctl_data *data = if_mii(ifr); 9894 struct mii_ioctl_data *data = if_mii(ifr);
9736 struct bnx2x *bp = netdev_priv(dev); 9895 struct bnx2x *bp = netdev_priv(dev);
9896 int port = BP_PORT(bp);
9737 int err; 9897 int err;
9738 9898
9739 switch (cmd) { 9899 switch (cmd) {
@@ -9749,7 +9909,7 @@ static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
9749 return -EAGAIN; 9909 return -EAGAIN;
9750 9910
9751 mutex_lock(&bp->port.phy_mutex); 9911 mutex_lock(&bp->port.phy_mutex);
9752 err = bnx2x_cl45_read(bp, BP_PORT(bp), 0, bp->port.phy_addr, 9912 err = bnx2x_cl45_read(bp, port, 0, bp->port.phy_addr,
9753 DEFAULT_PHY_DEV_ADDR, 9913 DEFAULT_PHY_DEV_ADDR,
9754 (data->reg_num & 0x1f), &mii_regval); 9914 (data->reg_num & 0x1f), &mii_regval);
9755 data->val_out = mii_regval; 9915 data->val_out = mii_regval;
@@ -9765,7 +9925,7 @@ static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
9765 return -EAGAIN; 9925 return -EAGAIN;
9766 9926
9767 mutex_lock(&bp->port.phy_mutex); 9927 mutex_lock(&bp->port.phy_mutex);
9768 err = bnx2x_cl45_write(bp, BP_PORT(bp), 0, bp->port.phy_addr, 9928 err = bnx2x_cl45_write(bp, port, 0, bp->port.phy_addr,
9769 DEFAULT_PHY_DEV_ADDR, 9929 DEFAULT_PHY_DEV_ADDR,
9770 (data->reg_num & 0x1f), data->val_in); 9930 (data->reg_num & 0x1f), data->val_in);
9771 mutex_unlock(&bp->port.phy_mutex); 9931 mutex_unlock(&bp->port.phy_mutex);
@@ -10141,7 +10301,7 @@ static int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
10141 10301
10142 netif_device_detach(dev); 10302 netif_device_detach(dev);
10143 10303
10144 bnx2x_nic_unload(bp, UNLOAD_NORMAL); 10304 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
10145 10305
10146 bnx2x_set_power_state(bp, pci_choose_state(pdev, state)); 10306 bnx2x_set_power_state(bp, pci_choose_state(pdev, state));
10147 10307
@@ -10174,7 +10334,7 @@ static int bnx2x_resume(struct pci_dev *pdev)
10174 bnx2x_set_power_state(bp, PCI_D0); 10334 bnx2x_set_power_state(bp, PCI_D0);
10175 netif_device_attach(dev); 10335 netif_device_attach(dev);
10176 10336
10177 rc = bnx2x_nic_load(bp, LOAD_NORMAL); 10337 rc = bnx2x_nic_load(bp, LOAD_OPEN);
10178 10338
10179 rtnl_unlock(); 10339 rtnl_unlock();
10180 10340