aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/bnx2x_main.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/bnx2x_main.c')
-rw-r--r--drivers/net/bnx2x_main.c1215
1 files changed, 687 insertions, 528 deletions
diff --git a/drivers/net/bnx2x_main.c b/drivers/net/bnx2x_main.c
index af251a5df844..971576b43687 100644
--- a/drivers/net/bnx2x_main.c
+++ b/drivers/net/bnx2x_main.c
@@ -44,7 +44,6 @@
44#include <net/ip.h> 44#include <net/ip.h>
45#include <net/tcp.h> 45#include <net/tcp.h>
46#include <net/checksum.h> 46#include <net/checksum.h>
47#include <linux/version.h>
48#include <net/ip6_checksum.h> 47#include <net/ip6_checksum.h>
49#include <linux/workqueue.h> 48#include <linux/workqueue.h>
50#include <linux/crc32.h> 49#include <linux/crc32.h>
@@ -60,8 +59,8 @@
60#include "bnx2x.h" 59#include "bnx2x.h"
61#include "bnx2x_init.h" 60#include "bnx2x_init.h"
62 61
63#define DRV_MODULE_VERSION "1.45.6" 62#define DRV_MODULE_VERSION "1.45.17"
64#define DRV_MODULE_RELDATE "2008/06/23" 63#define DRV_MODULE_RELDATE "2008/08/13"
65#define BNX2X_BC_VER 0x040200 64#define BNX2X_BC_VER 0x040200
66 65
67/* Time in jiffies before concluding the transmitter is hung */ 66/* Time in jiffies before concluding the transmitter is hung */
@@ -76,23 +75,21 @@ MODULE_DESCRIPTION("Broadcom NetXtreme II BCM57710 Driver");
76MODULE_LICENSE("GPL"); 75MODULE_LICENSE("GPL");
77MODULE_VERSION(DRV_MODULE_VERSION); 76MODULE_VERSION(DRV_MODULE_VERSION);
78 77
78static int disable_tpa;
79static int use_inta; 79static int use_inta;
80static int poll; 80static int poll;
81static int debug; 81static int debug;
82static int disable_tpa;
83static int nomcp;
84static int load_count[3]; /* 0-common, 1-port0, 2-port1 */ 82static int load_count[3]; /* 0-common, 1-port0, 2-port1 */
85static int use_multi; 83static int use_multi;
86 84
85module_param(disable_tpa, int, 0);
87module_param(use_inta, int, 0); 86module_param(use_inta, int, 0);
88module_param(poll, int, 0); 87module_param(poll, int, 0);
89module_param(debug, int, 0); 88module_param(debug, int, 0);
90module_param(disable_tpa, int, 0); 89MODULE_PARM_DESC(disable_tpa, "disable the TPA (LRO) feature");
91module_param(nomcp, int, 0);
92MODULE_PARM_DESC(use_inta, "use INT#A instead of MSI-X"); 90MODULE_PARM_DESC(use_inta, "use INT#A instead of MSI-X");
93MODULE_PARM_DESC(poll, "use polling (for debug)"); 91MODULE_PARM_DESC(poll, "use polling (for debug)");
94MODULE_PARM_DESC(debug, "default debug msglevel"); 92MODULE_PARM_DESC(debug, "default debug msglevel");
95MODULE_PARM_DESC(nomcp, "ignore management CPU");
96 93
97#ifdef BNX2X_MULTI 94#ifdef BNX2X_MULTI
98module_param(use_multi, int, 0); 95module_param(use_multi, int, 0);
@@ -237,17 +234,16 @@ void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
237 while (*wb_comp != DMAE_COMP_VAL) { 234 while (*wb_comp != DMAE_COMP_VAL) {
238 DP(BNX2X_MSG_OFF, "wb_comp 0x%08x\n", *wb_comp); 235 DP(BNX2X_MSG_OFF, "wb_comp 0x%08x\n", *wb_comp);
239 236
240 /* adjust delay for emulation/FPGA */
241 if (CHIP_REV_IS_SLOW(bp))
242 msleep(100);
243 else
244 udelay(5);
245
246 if (!cnt) { 237 if (!cnt) {
247 BNX2X_ERR("dmae timeout!\n"); 238 BNX2X_ERR("dmae timeout!\n");
248 break; 239 break;
249 } 240 }
250 cnt--; 241 cnt--;
242 /* adjust delay for emulation/FPGA */
243 if (CHIP_REV_IS_SLOW(bp))
244 msleep(100);
245 else
246 udelay(5);
251 } 247 }
252 248
253 mutex_unlock(&bp->dmae_mutex); 249 mutex_unlock(&bp->dmae_mutex);
@@ -310,17 +306,16 @@ void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32)
310 306
311 while (*wb_comp != DMAE_COMP_VAL) { 307 while (*wb_comp != DMAE_COMP_VAL) {
312 308
313 /* adjust delay for emulation/FPGA */
314 if (CHIP_REV_IS_SLOW(bp))
315 msleep(100);
316 else
317 udelay(5);
318
319 if (!cnt) { 309 if (!cnt) {
320 BNX2X_ERR("dmae timeout!\n"); 310 BNX2X_ERR("dmae timeout!\n");
321 break; 311 break;
322 } 312 }
323 cnt--; 313 cnt--;
314 /* adjust delay for emulation/FPGA */
315 if (CHIP_REV_IS_SLOW(bp))
316 msleep(100);
317 else
318 udelay(5);
324 } 319 }
325 DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n", 320 DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
326 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1], 321 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
@@ -503,6 +498,9 @@ static void bnx2x_panic_dump(struct bnx2x *bp)
503 int i; 498 int i;
504 u16 j, start, end; 499 u16 j, start, end;
505 500
501 bp->stats_state = STATS_STATE_DISABLED;
502 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
503
506 BNX2X_ERR("begin crash dump -----------------\n"); 504 BNX2X_ERR("begin crash dump -----------------\n");
507 505
508 for_each_queue(bp, i) { 506 for_each_queue(bp, i) {
@@ -513,17 +511,20 @@ static void bnx2x_panic_dump(struct bnx2x *bp)
513 " tx_bd_prod(%x) tx_bd_cons(%x) *tx_cons_sb(%x)\n", 511 " tx_bd_prod(%x) tx_bd_cons(%x) *tx_cons_sb(%x)\n",
514 i, fp->tx_pkt_prod, fp->tx_pkt_cons, fp->tx_bd_prod, 512 i, fp->tx_pkt_prod, fp->tx_pkt_cons, fp->tx_bd_prod,
515 fp->tx_bd_cons, le16_to_cpu(*fp->tx_cons_sb)); 513 fp->tx_bd_cons, le16_to_cpu(*fp->tx_cons_sb));
516 BNX2X_ERR(" rx_comp_prod(%x) rx_comp_cons(%x)" 514 BNX2X_ERR(" rx_bd_prod(%x) rx_bd_cons(%x)"
517 " *rx_cons_sb(%x) *rx_bd_cons_sb(%x)" 515 " *rx_bd_cons_sb(%x) rx_comp_prod(%x)"
518 " rx_sge_prod(%x) last_max_sge(%x)\n", 516 " rx_comp_cons(%x) *rx_cons_sb(%x)\n",
519 fp->rx_comp_prod, fp->rx_comp_cons, 517 fp->rx_bd_prod, fp->rx_bd_cons,
520 le16_to_cpu(*fp->rx_cons_sb), 518 le16_to_cpu(*fp->rx_bd_cons_sb), fp->rx_comp_prod,
521 le16_to_cpu(*fp->rx_bd_cons_sb), 519 fp->rx_comp_cons, le16_to_cpu(*fp->rx_cons_sb));
522 fp->rx_sge_prod, fp->last_max_sge); 520 BNX2X_ERR(" rx_sge_prod(%x) last_max_sge(%x)"
523 BNX2X_ERR(" fp_c_idx(%x) fp_u_idx(%x)" 521 " fp_c_idx(%x) *sb_c_idx(%x) fp_u_idx(%x)"
524 " bd data(%x,%x) rx_alloc_failed(%lx)\n", 522 " *sb_u_idx(%x) bd data(%x,%x)\n",
525 fp->fp_c_idx, fp->fp_u_idx, hw_prods->packets_prod, 523 fp->rx_sge_prod, fp->last_max_sge, fp->fp_c_idx,
526 hw_prods->bds_prod, fp->rx_alloc_failed); 524 fp->status_blk->c_status_block.status_block_index,
525 fp->fp_u_idx,
526 fp->status_blk->u_status_block.status_block_index,
527 hw_prods->packets_prod, hw_prods->bds_prod);
527 528
528 start = TX_BD(le16_to_cpu(*fp->tx_cons_sb) - 10); 529 start = TX_BD(le16_to_cpu(*fp->tx_cons_sb) - 10);
529 end = TX_BD(le16_to_cpu(*fp->tx_cons_sb) + 245); 530 end = TX_BD(le16_to_cpu(*fp->tx_cons_sb) + 245);
@@ -553,8 +554,8 @@ static void bnx2x_panic_dump(struct bnx2x *bp)
553 j, rx_bd[1], rx_bd[0], sw_bd->skb); 554 j, rx_bd[1], rx_bd[0], sw_bd->skb);
554 } 555 }
555 556
556 start = 0; 557 start = RX_SGE(fp->rx_sge_prod);
557 end = RX_SGE_CNT*NUM_RX_SGE_PAGES; 558 end = RX_SGE(fp->last_max_sge);
558 for (j = start; j < end; j++) { 559 for (j = start; j < end; j++) {
559 u32 *rx_sge = (u32 *)&fp->rx_sge_ring[j]; 560 u32 *rx_sge = (u32 *)&fp->rx_sge_ring[j];
560 struct sw_rx_page *sw_page = &fp->rx_page_ring[j]; 561 struct sw_rx_page *sw_page = &fp->rx_page_ring[j];
@@ -582,9 +583,6 @@ static void bnx2x_panic_dump(struct bnx2x *bp)
582 bnx2x_fw_dump(bp); 583 bnx2x_fw_dump(bp);
583 bnx2x_mc_assert(bp); 584 bnx2x_mc_assert(bp);
584 BNX2X_ERR("end crash dump -----------------\n"); 585 BNX2X_ERR("end crash dump -----------------\n");
585
586 bp->stats_state = STATS_STATE_DISABLED;
587 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
588} 586}
589 587
590static void bnx2x_int_enable(struct bnx2x *bp) 588static void bnx2x_int_enable(struct bnx2x *bp)
@@ -684,7 +682,8 @@ static void bnx2x_int_disable_sync(struct bnx2x *bp)
684static inline void bnx2x_ack_sb(struct bnx2x *bp, u8 sb_id, 682static inline void bnx2x_ack_sb(struct bnx2x *bp, u8 sb_id,
685 u8 storm, u16 index, u8 op, u8 update) 683 u8 storm, u16 index, u8 op, u8 update)
686{ 684{
687 u32 igu_addr = (IGU_ADDR_INT_ACK + IGU_FUNC_BASE * BP_FUNC(bp)) * 8; 685 u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
686 COMMAND_REG_INT_ACK);
688 struct igu_ack_register igu_ack; 687 struct igu_ack_register igu_ack;
689 688
690 igu_ack.status_block_index = index; 689 igu_ack.status_block_index = index;
@@ -694,9 +693,9 @@ static inline void bnx2x_ack_sb(struct bnx2x *bp, u8 sb_id,
694 (update << IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT) | 693 (update << IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT) |
695 (op << IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT)); 694 (op << IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT));
696 695
697 DP(BNX2X_MSG_OFF, "write 0x%08x to IGU addr 0x%x\n", 696 DP(BNX2X_MSG_OFF, "write 0x%08x to HC addr 0x%x\n",
698 (*(u32 *)&igu_ack), BAR_IGU_INTMEM + igu_addr); 697 (*(u32 *)&igu_ack), hc_addr);
699 REG_WR(bp, BAR_IGU_INTMEM + igu_addr, (*(u32 *)&igu_ack)); 698 REG_WR(bp, hc_addr, (*(u32 *)&igu_ack));
700} 699}
701 700
702static inline u16 bnx2x_update_fpsb_idx(struct bnx2x_fastpath *fp) 701static inline u16 bnx2x_update_fpsb_idx(struct bnx2x_fastpath *fp)
@@ -716,36 +715,15 @@ static inline u16 bnx2x_update_fpsb_idx(struct bnx2x_fastpath *fp)
716 return rc; 715 return rc;
717} 716}
718 717
719static inline int bnx2x_has_work(struct bnx2x_fastpath *fp)
720{
721 u16 rx_cons_sb = le16_to_cpu(*fp->rx_cons_sb);
722
723 if ((rx_cons_sb & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
724 rx_cons_sb++;
725
726 if ((fp->rx_comp_cons != rx_cons_sb) ||
727 (fp->tx_pkt_prod != le16_to_cpu(*fp->tx_cons_sb)) ||
728 (fp->tx_pkt_prod != fp->tx_pkt_cons))
729 return 1;
730
731 return 0;
732}
733
734static u16 bnx2x_ack_int(struct bnx2x *bp) 718static u16 bnx2x_ack_int(struct bnx2x *bp)
735{ 719{
736 u32 igu_addr = (IGU_ADDR_SIMD_MASK + IGU_FUNC_BASE * BP_FUNC(bp)) * 8; 720 u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
737 u32 result = REG_RD(bp, BAR_IGU_INTMEM + igu_addr); 721 COMMAND_REG_SIMD_MASK);
722 u32 result = REG_RD(bp, hc_addr);
738 723
739 DP(BNX2X_MSG_OFF, "read 0x%08x from IGU addr 0x%x\n", 724 DP(BNX2X_MSG_OFF, "read 0x%08x from HC addr 0x%x\n",
740 result, BAR_IGU_INTMEM + igu_addr); 725 result, hc_addr);
741 726
742#ifdef IGU_DEBUG
743#warning IGU_DEBUG active
744 if (result == 0) {
745 BNX2X_ERR("read %x from IGU\n", result);
746 REG_WR(bp, TM_REG_TIMER_SOFT_RST, 0);
747 }
748#endif
749 return result; 727 return result;
750} 728}
751 729
@@ -898,6 +876,7 @@ static void bnx2x_tx_int(struct bnx2x_fastpath *fp, int work)
898 netif_tx_lock(bp->dev); 876 netif_tx_lock(bp->dev);
899 877
900 if (netif_queue_stopped(bp->dev) && 878 if (netif_queue_stopped(bp->dev) &&
879 (bp->state == BNX2X_STATE_OPEN) &&
901 (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3)) 880 (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3))
902 netif_wake_queue(bp->dev); 881 netif_wake_queue(bp->dev);
903 882
@@ -905,6 +884,7 @@ static void bnx2x_tx_int(struct bnx2x_fastpath *fp, int work)
905 } 884 }
906} 885}
907 886
887
908static void bnx2x_sp_event(struct bnx2x_fastpath *fp, 888static void bnx2x_sp_event(struct bnx2x_fastpath *fp,
909 union eth_rx_cqe *rr_cqe) 889 union eth_rx_cqe *rr_cqe)
910{ 890{
@@ -960,6 +940,7 @@ static void bnx2x_sp_event(struct bnx2x_fastpath *fp,
960 bnx2x_fp(bp, cid, state) = BNX2X_FP_STATE_CLOSED; 940 bnx2x_fp(bp, cid, state) = BNX2X_FP_STATE_CLOSED;
961 break; 941 break;
962 942
943
963 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_OPEN): 944 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_OPEN):
964 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_DIAG): 945 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_DIAG):
965 DP(NETIF_MSG_IFUP, "got set mac ramrod\n"); 946 DP(NETIF_MSG_IFUP, "got set mac ramrod\n");
@@ -1169,8 +1150,8 @@ static inline void bnx2x_init_sge_ring_bit_mask(struct bnx2x_fastpath *fp)
1169 memset(fp->sge_mask, 0xff, 1150 memset(fp->sge_mask, 0xff,
1170 (NUM_RX_SGE >> RX_SGE_MASK_ELEM_SHIFT)*sizeof(u64)); 1151 (NUM_RX_SGE >> RX_SGE_MASK_ELEM_SHIFT)*sizeof(u64));
1171 1152
1172 /* Clear the two last indeces in the page to 1: 1153 /* Clear the two last indices in the page to 1:
1173 these are the indeces that correspond to the "next" element, 1154 these are the indices that correspond to the "next" element,
1174 hence will never be indicated and should be removed from 1155 hence will never be indicated and should be removed from
1175 the calculations. */ 1156 the calculations. */
1176 bnx2x_clear_sge_mask_next_elems(fp); 1157 bnx2x_clear_sge_mask_next_elems(fp);
@@ -1261,7 +1242,7 @@ static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1261 where we are and drop the whole packet */ 1242 where we are and drop the whole packet */
1262 err = bnx2x_alloc_rx_sge(bp, fp, sge_idx); 1243 err = bnx2x_alloc_rx_sge(bp, fp, sge_idx);
1263 if (unlikely(err)) { 1244 if (unlikely(err)) {
1264 fp->rx_alloc_failed++; 1245 bp->eth_stats.rx_skb_alloc_failed++;
1265 return err; 1246 return err;
1266 } 1247 }
1267 1248
@@ -1297,14 +1278,13 @@ static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1297 pci_unmap_single(bp->pdev, pci_unmap_addr(rx_buf, mapping), 1278 pci_unmap_single(bp->pdev, pci_unmap_addr(rx_buf, mapping),
1298 bp->rx_buf_use_size, PCI_DMA_FROMDEVICE); 1279 bp->rx_buf_use_size, PCI_DMA_FROMDEVICE);
1299 1280
1300 /* if alloc failed drop the packet and keep the buffer in the bin */
1301 if (likely(new_skb)) { 1281 if (likely(new_skb)) {
1282 /* fix ip xsum and give it to the stack */
1283 /* (no need to map the new skb) */
1302 1284
1303 prefetch(skb); 1285 prefetch(skb);
1304 prefetch(((char *)(skb)) + 128); 1286 prefetch(((char *)(skb)) + 128);
1305 1287
1306 /* else fix ip xsum and give it to the stack */
1307 /* (no need to map the new skb) */
1308#ifdef BNX2X_STOP_ON_ERROR 1288#ifdef BNX2X_STOP_ON_ERROR
1309 if (pad + len > bp->rx_buf_size) { 1289 if (pad + len > bp->rx_buf_size) {
1310 BNX2X_ERR("skb_put is about to fail... " 1290 BNX2X_ERR("skb_put is about to fail... "
@@ -1353,9 +1333,10 @@ static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1353 fp->tpa_pool[queue].skb = new_skb; 1333 fp->tpa_pool[queue].skb = new_skb;
1354 1334
1355 } else { 1335 } else {
1336 /* else drop the packet and keep the buffer in the bin */
1356 DP(NETIF_MSG_RX_STATUS, 1337 DP(NETIF_MSG_RX_STATUS,
1357 "Failed to allocate new skb - dropping packet!\n"); 1338 "Failed to allocate new skb - dropping packet!\n");
1358 fp->rx_alloc_failed++; 1339 bp->eth_stats.rx_skb_alloc_failed++;
1359 } 1340 }
1360 1341
1361 fp->tpa_state[queue] = BNX2X_TPA_STOP; 1342 fp->tpa_state[queue] = BNX2X_TPA_STOP;
@@ -1390,7 +1371,6 @@ static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
1390 u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons; 1371 u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
1391 u16 hw_comp_cons, sw_comp_cons, sw_comp_prod; 1372 u16 hw_comp_cons, sw_comp_cons, sw_comp_prod;
1392 int rx_pkt = 0; 1373 int rx_pkt = 0;
1393 u16 queue;
1394 1374
1395#ifdef BNX2X_STOP_ON_ERROR 1375#ifdef BNX2X_STOP_ON_ERROR
1396 if (unlikely(bp->panic)) 1376 if (unlikely(bp->panic))
@@ -1456,7 +1436,7 @@ static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
1456 if ((!fp->disable_tpa) && 1436 if ((!fp->disable_tpa) &&
1457 (TPA_TYPE(cqe_fp_flags) != 1437 (TPA_TYPE(cqe_fp_flags) !=
1458 (TPA_TYPE_START | TPA_TYPE_END))) { 1438 (TPA_TYPE_START | TPA_TYPE_END))) {
1459 queue = cqe->fast_path_cqe.queue_index; 1439 u16 queue = cqe->fast_path_cqe.queue_index;
1460 1440
1461 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_START) { 1441 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_START) {
1462 DP(NETIF_MSG_RX_STATUS, 1442 DP(NETIF_MSG_RX_STATUS,
@@ -1503,11 +1483,10 @@ static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
1503 1483
1504 /* is this an error packet? */ 1484 /* is this an error packet? */
1505 if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) { 1485 if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
1506 /* do we sometimes forward error packets anyway? */
1507 DP(NETIF_MSG_RX_ERR, 1486 DP(NETIF_MSG_RX_ERR,
1508 "ERROR flags %x rx packet %u\n", 1487 "ERROR flags %x rx packet %u\n",
1509 cqe_fp_flags, sw_comp_cons); 1488 cqe_fp_flags, sw_comp_cons);
1510 /* TBD make sure MC counts this as a drop */ 1489 bp->eth_stats.rx_err_discard_pkt++;
1511 goto reuse_rx; 1490 goto reuse_rx;
1512 } 1491 }
1513 1492
@@ -1524,7 +1503,7 @@ static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
1524 DP(NETIF_MSG_RX_ERR, 1503 DP(NETIF_MSG_RX_ERR,
1525 "ERROR packet dropped " 1504 "ERROR packet dropped "
1526 "because of alloc failure\n"); 1505 "because of alloc failure\n");
1527 fp->rx_alloc_failed++; 1506 bp->eth_stats.rx_skb_alloc_failed++;
1528 goto reuse_rx; 1507 goto reuse_rx;
1529 } 1508 }
1530 1509
@@ -1550,7 +1529,7 @@ static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
1550 DP(NETIF_MSG_RX_ERR, 1529 DP(NETIF_MSG_RX_ERR,
1551 "ERROR packet dropped because " 1530 "ERROR packet dropped because "
1552 "of alloc failure\n"); 1531 "of alloc failure\n");
1553 fp->rx_alloc_failed++; 1532 bp->eth_stats.rx_skb_alloc_failed++;
1554reuse_rx: 1533reuse_rx:
1555 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod); 1534 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1556 goto next_rx; 1535 goto next_rx;
@@ -1559,10 +1538,12 @@ reuse_rx:
1559 skb->protocol = eth_type_trans(skb, bp->dev); 1538 skb->protocol = eth_type_trans(skb, bp->dev);
1560 1539
1561 skb->ip_summed = CHECKSUM_NONE; 1540 skb->ip_summed = CHECKSUM_NONE;
1562 if (bp->rx_csum && BNX2X_RX_SUM_OK(cqe)) 1541 if (bp->rx_csum) {
1563 skb->ip_summed = CHECKSUM_UNNECESSARY; 1542 if (likely(BNX2X_RX_CSUM_OK(cqe)))
1564 1543 skb->ip_summed = CHECKSUM_UNNECESSARY;
1565 /* TBD do we pass bad csum packets in promisc */ 1544 else
1545 bp->eth_stats.hw_csum_err++;
1546 }
1566 } 1547 }
1567 1548
1568#ifdef BCM_VLAN 1549#ifdef BCM_VLAN
@@ -1615,6 +1596,12 @@ static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
1615 struct net_device *dev = bp->dev; 1596 struct net_device *dev = bp->dev;
1616 int index = FP_IDX(fp); 1597 int index = FP_IDX(fp);
1617 1598
1599 /* Return here if interrupt is disabled */
1600 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1601 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1602 return IRQ_HANDLED;
1603 }
1604
1618 DP(BNX2X_MSG_FP, "got an MSI-X interrupt on IDX:SB [%d:%d]\n", 1605 DP(BNX2X_MSG_FP, "got an MSI-X interrupt on IDX:SB [%d:%d]\n",
1619 index, FP_SB_ID(fp)); 1606 index, FP_SB_ID(fp));
1620 bnx2x_ack_sb(bp, FP_SB_ID(fp), USTORM_ID, 0, IGU_INT_DISABLE, 0); 1607 bnx2x_ack_sb(bp, FP_SB_ID(fp), USTORM_ID, 0, IGU_INT_DISABLE, 0);
@@ -1648,17 +1635,17 @@ static irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
1648 } 1635 }
1649 DP(NETIF_MSG_INTR, "got an interrupt status %u\n", status); 1636 DP(NETIF_MSG_INTR, "got an interrupt status %u\n", status);
1650 1637
1651#ifdef BNX2X_STOP_ON_ERROR
1652 if (unlikely(bp->panic))
1653 return IRQ_HANDLED;
1654#endif
1655
1656 /* Return here if interrupt is disabled */ 1638 /* Return here if interrupt is disabled */
1657 if (unlikely(atomic_read(&bp->intr_sem) != 0)) { 1639 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1658 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n"); 1640 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1659 return IRQ_HANDLED; 1641 return IRQ_HANDLED;
1660 } 1642 }
1661 1643
1644#ifdef BNX2X_STOP_ON_ERROR
1645 if (unlikely(bp->panic))
1646 return IRQ_HANDLED;
1647#endif
1648
1662 mask = 0x2 << bp->fp[0].sb_id; 1649 mask = 0x2 << bp->fp[0].sb_id;
1663 if (status & mask) { 1650 if (status & mask) {
1664 struct bnx2x_fastpath *fp = &bp->fp[0]; 1651 struct bnx2x_fastpath *fp = &bp->fp[0];
@@ -1699,11 +1686,12 @@ static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event);
1699 * General service functions 1686 * General service functions
1700 */ 1687 */
1701 1688
1702static int bnx2x_hw_lock(struct bnx2x *bp, u32 resource) 1689static int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource)
1703{ 1690{
1704 u32 lock_status; 1691 u32 lock_status;
1705 u32 resource_bit = (1 << resource); 1692 u32 resource_bit = (1 << resource);
1706 u8 port = BP_PORT(bp); 1693 int func = BP_FUNC(bp);
1694 u32 hw_lock_control_reg;
1707 int cnt; 1695 int cnt;
1708 1696
1709 /* Validating that the resource is within range */ 1697 /* Validating that the resource is within range */
@@ -1714,8 +1702,15 @@ static int bnx2x_hw_lock(struct bnx2x *bp, u32 resource)
1714 return -EINVAL; 1702 return -EINVAL;
1715 } 1703 }
1716 1704
1705 if (func <= 5) {
1706 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1707 } else {
1708 hw_lock_control_reg =
1709 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1710 }
1711
1717 /* Validating that the resource is not already taken */ 1712 /* Validating that the resource is not already taken */
1718 lock_status = REG_RD(bp, MISC_REG_DRIVER_CONTROL_1 + port*8); 1713 lock_status = REG_RD(bp, hw_lock_control_reg);
1719 if (lock_status & resource_bit) { 1714 if (lock_status & resource_bit) {
1720 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n", 1715 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1721 lock_status, resource_bit); 1716 lock_status, resource_bit);
@@ -1725,9 +1720,8 @@ static int bnx2x_hw_lock(struct bnx2x *bp, u32 resource)
1725 /* Try for 1 second every 5ms */ 1720 /* Try for 1 second every 5ms */
1726 for (cnt = 0; cnt < 200; cnt++) { 1721 for (cnt = 0; cnt < 200; cnt++) {
1727 /* Try to acquire the lock */ 1722 /* Try to acquire the lock */
1728 REG_WR(bp, MISC_REG_DRIVER_CONTROL_1 + port*8 + 4, 1723 REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
1729 resource_bit); 1724 lock_status = REG_RD(bp, hw_lock_control_reg);
1730 lock_status = REG_RD(bp, MISC_REG_DRIVER_CONTROL_1 + port*8);
1731 if (lock_status & resource_bit) 1725 if (lock_status & resource_bit)
1732 return 0; 1726 return 0;
1733 1727
@@ -1737,11 +1731,12 @@ static int bnx2x_hw_lock(struct bnx2x *bp, u32 resource)
1737 return -EAGAIN; 1731 return -EAGAIN;
1738} 1732}
1739 1733
1740static int bnx2x_hw_unlock(struct bnx2x *bp, u32 resource) 1734static int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource)
1741{ 1735{
1742 u32 lock_status; 1736 u32 lock_status;
1743 u32 resource_bit = (1 << resource); 1737 u32 resource_bit = (1 << resource);
1744 u8 port = BP_PORT(bp); 1738 int func = BP_FUNC(bp);
1739 u32 hw_lock_control_reg;
1745 1740
1746 /* Validating that the resource is within range */ 1741 /* Validating that the resource is within range */
1747 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) { 1742 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
@@ -1751,20 +1746,27 @@ static int bnx2x_hw_unlock(struct bnx2x *bp, u32 resource)
1751 return -EINVAL; 1746 return -EINVAL;
1752 } 1747 }
1753 1748
1749 if (func <= 5) {
1750 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1751 } else {
1752 hw_lock_control_reg =
1753 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1754 }
1755
1754 /* Validating that the resource is currently taken */ 1756 /* Validating that the resource is currently taken */
1755 lock_status = REG_RD(bp, MISC_REG_DRIVER_CONTROL_1 + port*8); 1757 lock_status = REG_RD(bp, hw_lock_control_reg);
1756 if (!(lock_status & resource_bit)) { 1758 if (!(lock_status & resource_bit)) {
1757 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n", 1759 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1758 lock_status, resource_bit); 1760 lock_status, resource_bit);
1759 return -EFAULT; 1761 return -EFAULT;
1760 } 1762 }
1761 1763
1762 REG_WR(bp, MISC_REG_DRIVER_CONTROL_1 + port*8, resource_bit); 1764 REG_WR(bp, hw_lock_control_reg, resource_bit);
1763 return 0; 1765 return 0;
1764} 1766}
1765 1767
1766/* HW Lock for shared dual port PHYs */ 1768/* HW Lock for shared dual port PHYs */
1767static void bnx2x_phy_hw_lock(struct bnx2x *bp) 1769static void bnx2x_acquire_phy_lock(struct bnx2x *bp)
1768{ 1770{
1769 u32 ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config); 1771 u32 ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
1770 1772
@@ -1772,25 +1774,25 @@ static void bnx2x_phy_hw_lock(struct bnx2x *bp)
1772 1774
1773 if ((ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072) || 1775 if ((ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072) ||
1774 (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073)) 1776 (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073))
1775 bnx2x_hw_lock(bp, HW_LOCK_RESOURCE_8072_MDIO); 1777 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_8072_MDIO);
1776} 1778}
1777 1779
1778static void bnx2x_phy_hw_unlock(struct bnx2x *bp) 1780static void bnx2x_release_phy_lock(struct bnx2x *bp)
1779{ 1781{
1780 u32 ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config); 1782 u32 ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
1781 1783
1782 if ((ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072) || 1784 if ((ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072) ||
1783 (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073)) 1785 (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073))
1784 bnx2x_hw_unlock(bp, HW_LOCK_RESOURCE_8072_MDIO); 1786 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_8072_MDIO);
1785 1787
1786 mutex_unlock(&bp->port.phy_mutex); 1788 mutex_unlock(&bp->port.phy_mutex);
1787} 1789}
1788 1790
1789int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode) 1791int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
1790{ 1792{
1791 /* The GPIO should be swapped if swap register is set and active */ 1793 /* The GPIO should be swapped if swap register is set and active */
1792 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) && 1794 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1793 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ BP_PORT(bp); 1795 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1794 int gpio_shift = gpio_num + 1796 int gpio_shift = gpio_num +
1795 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0); 1797 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1796 u32 gpio_mask = (1 << gpio_shift); 1798 u32 gpio_mask = (1 << gpio_shift);
@@ -1801,7 +1803,7 @@ int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode)
1801 return -EINVAL; 1803 return -EINVAL;
1802 } 1804 }
1803 1805
1804 bnx2x_hw_lock(bp, HW_LOCK_RESOURCE_GPIO); 1806 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1805 /* read GPIO and mask except the float bits */ 1807 /* read GPIO and mask except the float bits */
1806 gpio_reg = (REG_RD(bp, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT); 1808 gpio_reg = (REG_RD(bp, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT);
1807 1809
@@ -1822,7 +1824,7 @@ int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode)
1822 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_SET_POS); 1824 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_SET_POS);
1823 break; 1825 break;
1824 1826
1825 case MISC_REGISTERS_GPIO_INPUT_HI_Z : 1827 case MISC_REGISTERS_GPIO_INPUT_HI_Z:
1826 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> input\n", 1828 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> input\n",
1827 gpio_num, gpio_shift); 1829 gpio_num, gpio_shift);
1828 /* set FLOAT */ 1830 /* set FLOAT */
@@ -1834,7 +1836,7 @@ int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode)
1834 } 1836 }
1835 1837
1836 REG_WR(bp, MISC_REG_GPIO, gpio_reg); 1838 REG_WR(bp, MISC_REG_GPIO, gpio_reg);
1837 bnx2x_hw_unlock(bp, HW_LOCK_RESOURCE_GPIO); 1839 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1838 1840
1839 return 0; 1841 return 0;
1840} 1842}
@@ -1850,19 +1852,19 @@ static int bnx2x_set_spio(struct bnx2x *bp, int spio_num, u32 mode)
1850 return -EINVAL; 1852 return -EINVAL;
1851 } 1853 }
1852 1854
1853 bnx2x_hw_lock(bp, HW_LOCK_RESOURCE_SPIO); 1855 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
1854 /* read SPIO and mask except the float bits */ 1856 /* read SPIO and mask except the float bits */
1855 spio_reg = (REG_RD(bp, MISC_REG_SPIO) & MISC_REGISTERS_SPIO_FLOAT); 1857 spio_reg = (REG_RD(bp, MISC_REG_SPIO) & MISC_REGISTERS_SPIO_FLOAT);
1856 1858
1857 switch (mode) { 1859 switch (mode) {
1858 case MISC_REGISTERS_SPIO_OUTPUT_LOW : 1860 case MISC_REGISTERS_SPIO_OUTPUT_LOW:
1859 DP(NETIF_MSG_LINK, "Set SPIO %d -> output low\n", spio_num); 1861 DP(NETIF_MSG_LINK, "Set SPIO %d -> output low\n", spio_num);
1860 /* clear FLOAT and set CLR */ 1862 /* clear FLOAT and set CLR */
1861 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS); 1863 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1862 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_CLR_POS); 1864 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_CLR_POS);
1863 break; 1865 break;
1864 1866
1865 case MISC_REGISTERS_SPIO_OUTPUT_HIGH : 1867 case MISC_REGISTERS_SPIO_OUTPUT_HIGH:
1866 DP(NETIF_MSG_LINK, "Set SPIO %d -> output high\n", spio_num); 1868 DP(NETIF_MSG_LINK, "Set SPIO %d -> output high\n", spio_num);
1867 /* clear FLOAT and set SET */ 1869 /* clear FLOAT and set SET */
1868 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS); 1870 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
@@ -1880,7 +1882,7 @@ static int bnx2x_set_spio(struct bnx2x *bp, int spio_num, u32 mode)
1880 } 1882 }
1881 1883
1882 REG_WR(bp, MISC_REG_SPIO, spio_reg); 1884 REG_WR(bp, MISC_REG_SPIO, spio_reg);
1883 bnx2x_hw_unlock(bp, HW_LOCK_RESOURCE_SPIO); 1885 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
1884 1886
1885 return 0; 1887 return 0;
1886} 1888}
@@ -1940,46 +1942,63 @@ static void bnx2x_link_report(struct bnx2x *bp)
1940 1942
1941static u8 bnx2x_initial_phy_init(struct bnx2x *bp) 1943static u8 bnx2x_initial_phy_init(struct bnx2x *bp)
1942{ 1944{
1943 u8 rc; 1945 if (!BP_NOMCP(bp)) {
1946 u8 rc;
1944 1947
1945 /* Initialize link parameters structure variables */ 1948 /* Initialize link parameters structure variables */
1946 bp->link_params.mtu = bp->dev->mtu; 1949 /* It is recommended to turn off RX FC for jumbo frames
1950 for better performance */
1951 if (IS_E1HMF(bp))
1952 bp->link_params.req_fc_auto_adv = FLOW_CTRL_BOTH;
1953 else if (bp->dev->mtu > 5000)
1954 bp->link_params.req_fc_auto_adv = FLOW_CTRL_TX;
1955 else
1956 bp->link_params.req_fc_auto_adv = FLOW_CTRL_BOTH;
1947 1957
1948 bnx2x_phy_hw_lock(bp); 1958 bnx2x_acquire_phy_lock(bp);
1949 rc = bnx2x_phy_init(&bp->link_params, &bp->link_vars); 1959 rc = bnx2x_phy_init(&bp->link_params, &bp->link_vars);
1950 bnx2x_phy_hw_unlock(bp); 1960 bnx2x_release_phy_lock(bp);
1951 1961
1952 if (bp->link_vars.link_up) 1962 if (bp->link_vars.link_up)
1953 bnx2x_link_report(bp); 1963 bnx2x_link_report(bp);
1954 1964
1955 bnx2x_calc_fc_adv(bp); 1965 bnx2x_calc_fc_adv(bp);
1956 1966
1957 return rc; 1967 return rc;
1968 }
1969 BNX2X_ERR("Bootcode is missing -not initializing link\n");
1970 return -EINVAL;
1958} 1971}
1959 1972
1960static void bnx2x_link_set(struct bnx2x *bp) 1973static void bnx2x_link_set(struct bnx2x *bp)
1961{ 1974{
1962 bnx2x_phy_hw_lock(bp); 1975 if (!BP_NOMCP(bp)) {
1963 bnx2x_phy_init(&bp->link_params, &bp->link_vars); 1976 bnx2x_acquire_phy_lock(bp);
1964 bnx2x_phy_hw_unlock(bp); 1977 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
1978 bnx2x_release_phy_lock(bp);
1965 1979
1966 bnx2x_calc_fc_adv(bp); 1980 bnx2x_calc_fc_adv(bp);
1981 } else
1982 BNX2X_ERR("Bootcode is missing -not setting link\n");
1967} 1983}
1968 1984
1969static void bnx2x__link_reset(struct bnx2x *bp) 1985static void bnx2x__link_reset(struct bnx2x *bp)
1970{ 1986{
1971 bnx2x_phy_hw_lock(bp); 1987 if (!BP_NOMCP(bp)) {
1972 bnx2x_link_reset(&bp->link_params, &bp->link_vars); 1988 bnx2x_acquire_phy_lock(bp);
1973 bnx2x_phy_hw_unlock(bp); 1989 bnx2x_link_reset(&bp->link_params, &bp->link_vars);
1990 bnx2x_release_phy_lock(bp);
1991 } else
1992 BNX2X_ERR("Bootcode is missing -not resetting link\n");
1974} 1993}
1975 1994
1976static u8 bnx2x_link_test(struct bnx2x *bp) 1995static u8 bnx2x_link_test(struct bnx2x *bp)
1977{ 1996{
1978 u8 rc; 1997 u8 rc;
1979 1998
1980 bnx2x_phy_hw_lock(bp); 1999 bnx2x_acquire_phy_lock(bp);
1981 rc = bnx2x_test_link(&bp->link_params, &bp->link_vars); 2000 rc = bnx2x_test_link(&bp->link_params, &bp->link_vars);
1982 bnx2x_phy_hw_unlock(bp); 2001 bnx2x_release_phy_lock(bp);
1983 2002
1984 return rc; 2003 return rc;
1985} 2004}
@@ -1991,7 +2010,7 @@ static u8 bnx2x_link_test(struct bnx2x *bp)
1991 sum of vn_min_rates 2010 sum of vn_min_rates
1992 or 2011 or
1993 0 - if all the min_rates are 0. 2012 0 - if all the min_rates are 0.
1994 In the later case fainess algorithm should be deactivated. 2013 In the later case fairness algorithm should be deactivated.
1995 If not all min_rates are zero then those that are zeroes will 2014 If not all min_rates are zero then those that are zeroes will
1996 be set to 1. 2015 be set to 1.
1997 */ 2016 */
@@ -2114,7 +2133,7 @@ static void bnx2x_init_vn_minmax(struct bnx2x *bp, int func,
2114 FUNC_MF_CFG_MIN_BW_SHIFT) * 100; 2133 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
2115 /* If FAIRNESS is enabled (not all min rates are zeroes) and 2134 /* If FAIRNESS is enabled (not all min rates are zeroes) and
2116 if current min rate is zero - set it to 1. 2135 if current min rate is zero - set it to 1.
2117 This is a requirment of the algorithm. */ 2136 This is a requirement of the algorithm. */
2118 if ((vn_min_rate == 0) && wsum) 2137 if ((vn_min_rate == 0) && wsum)
2119 vn_min_rate = DEF_MIN_RATE; 2138 vn_min_rate = DEF_MIN_RATE;
2120 vn_max_rate = ((vn_cfg & FUNC_MF_CFG_MAX_BW_MASK) >> 2139 vn_max_rate = ((vn_cfg & FUNC_MF_CFG_MAX_BW_MASK) >>
@@ -2203,9 +2222,9 @@ static void bnx2x_link_attn(struct bnx2x *bp)
2203 /* Make sure that we are synced with the current statistics */ 2222 /* Make sure that we are synced with the current statistics */
2204 bnx2x_stats_handle(bp, STATS_EVENT_STOP); 2223 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2205 2224
2206 bnx2x_phy_hw_lock(bp); 2225 bnx2x_acquire_phy_lock(bp);
2207 bnx2x_link_update(&bp->link_params, &bp->link_vars); 2226 bnx2x_link_update(&bp->link_params, &bp->link_vars);
2208 bnx2x_phy_hw_unlock(bp); 2227 bnx2x_release_phy_lock(bp);
2209 2228
2210 if (bp->link_vars.link_up) { 2229 if (bp->link_vars.link_up) {
2211 2230
@@ -2357,7 +2376,7 @@ static int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
2357} 2376}
2358 2377
2359/* acquire split MCP access lock register */ 2378/* acquire split MCP access lock register */
2360static int bnx2x_lock_alr(struct bnx2x *bp) 2379static int bnx2x_acquire_alr(struct bnx2x *bp)
2361{ 2380{
2362 u32 i, j, val; 2381 u32 i, j, val;
2363 int rc = 0; 2382 int rc = 0;
@@ -2374,15 +2393,15 @@ static int bnx2x_lock_alr(struct bnx2x *bp)
2374 msleep(5); 2393 msleep(5);
2375 } 2394 }
2376 if (!(val & (1L << 31))) { 2395 if (!(val & (1L << 31))) {
2377 BNX2X_ERR("Cannot acquire nvram interface\n"); 2396 BNX2X_ERR("Cannot acquire MCP access lock register\n");
2378 rc = -EBUSY; 2397 rc = -EBUSY;
2379 } 2398 }
2380 2399
2381 return rc; 2400 return rc;
2382} 2401}
2383 2402
2384/* Release split MCP access lock register */ 2403/* release split MCP access lock register */
2385static void bnx2x_unlock_alr(struct bnx2x *bp) 2404static void bnx2x_release_alr(struct bnx2x *bp)
2386{ 2405{
2387 u32 val = 0; 2406 u32 val = 0;
2388 2407
@@ -2395,7 +2414,6 @@ static inline u16 bnx2x_update_dsb_idx(struct bnx2x *bp)
2395 u16 rc = 0; 2414 u16 rc = 0;
2396 2415
2397 barrier(); /* status block is written to by the chip */ 2416 barrier(); /* status block is written to by the chip */
2398
2399 if (bp->def_att_idx != def_sb->atten_status_block.attn_bits_index) { 2417 if (bp->def_att_idx != def_sb->atten_status_block.attn_bits_index) {
2400 bp->def_att_idx = def_sb->atten_status_block.attn_bits_index; 2418 bp->def_att_idx = def_sb->atten_status_block.attn_bits_index;
2401 rc |= 1; 2419 rc |= 1;
@@ -2426,26 +2444,31 @@ static inline u16 bnx2x_update_dsb_idx(struct bnx2x *bp)
2426static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted) 2444static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted)
2427{ 2445{
2428 int port = BP_PORT(bp); 2446 int port = BP_PORT(bp);
2429 int func = BP_FUNC(bp); 2447 u32 hc_addr = (HC_REG_COMMAND_REG + port*32 +
2430 u32 igu_addr = (IGU_ADDR_ATTN_BITS_SET + IGU_FUNC_BASE * func) * 8; 2448 COMMAND_REG_ATTN_BITS_SET);
2431 u32 aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 : 2449 u32 aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2432 MISC_REG_AEU_MASK_ATTN_FUNC_0; 2450 MISC_REG_AEU_MASK_ATTN_FUNC_0;
2433 u32 nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 : 2451 u32 nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 :
2434 NIG_REG_MASK_INTERRUPT_PORT0; 2452 NIG_REG_MASK_INTERRUPT_PORT0;
2453 u32 aeu_mask;
2435 2454
2436 if (~bp->aeu_mask & (asserted & 0xff))
2437 BNX2X_ERR("IGU ERROR\n");
2438 if (bp->attn_state & asserted) 2455 if (bp->attn_state & asserted)
2439 BNX2X_ERR("IGU ERROR\n"); 2456 BNX2X_ERR("IGU ERROR\n");
2440 2457
2458 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2459 aeu_mask = REG_RD(bp, aeu_addr);
2460
2441 DP(NETIF_MSG_HW, "aeu_mask %x newly asserted %x\n", 2461 DP(NETIF_MSG_HW, "aeu_mask %x newly asserted %x\n",
2442 bp->aeu_mask, asserted); 2462 aeu_mask, asserted);
2443 bp->aeu_mask &= ~(asserted & 0xff); 2463 aeu_mask &= ~(asserted & 0xff);
2444 DP(NETIF_MSG_HW, "after masking: aeu_mask %x\n", bp->aeu_mask); 2464 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
2445 2465
2446 REG_WR(bp, aeu_addr, bp->aeu_mask); 2466 REG_WR(bp, aeu_addr, aeu_mask);
2467 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2447 2468
2469 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
2448 bp->attn_state |= asserted; 2470 bp->attn_state |= asserted;
2471 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
2449 2472
2450 if (asserted & ATTN_HARD_WIRED_MASK) { 2473 if (asserted & ATTN_HARD_WIRED_MASK) {
2451 if (asserted & ATTN_NIG_FOR_FUNC) { 2474 if (asserted & ATTN_NIG_FOR_FUNC) {
@@ -2500,9 +2523,9 @@ static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted)
2500 2523
2501 } /* if hardwired */ 2524 } /* if hardwired */
2502 2525
2503 DP(NETIF_MSG_HW, "about to mask 0x%08x at IGU addr 0x%x\n", 2526 DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
2504 asserted, BAR_IGU_INTMEM + igu_addr); 2527 asserted, hc_addr);
2505 REG_WR(bp, BAR_IGU_INTMEM + igu_addr, asserted); 2528 REG_WR(bp, hc_addr, asserted);
2506 2529
2507 /* now set back the mask */ 2530 /* now set back the mask */
2508 if (asserted & ATTN_NIG_FOR_FUNC) 2531 if (asserted & ATTN_NIG_FOR_FUNC)
@@ -2530,12 +2553,12 @@ static inline void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
2530 case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1022G: 2553 case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1022G:
2531 /* Fan failure attention */ 2554 /* Fan failure attention */
2532 2555
2533 /* The PHY reset is controled by GPIO 1 */ 2556 /* The PHY reset is controlled by GPIO 1 */
2534 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1, 2557 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
2535 MISC_REGISTERS_GPIO_OUTPUT_LOW); 2558 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
2536 /* Low power mode is controled by GPIO 2 */ 2559 /* Low power mode is controlled by GPIO 2 */
2537 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2, 2560 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
2538 MISC_REGISTERS_GPIO_OUTPUT_LOW); 2561 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
2539 /* mark the failure */ 2562 /* mark the failure */
2540 bp->link_params.ext_phy_config &= 2563 bp->link_params.ext_phy_config &=
2541 ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK; 2564 ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
@@ -2699,10 +2722,11 @@ static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
2699 int index; 2722 int index;
2700 u32 reg_addr; 2723 u32 reg_addr;
2701 u32 val; 2724 u32 val;
2725 u32 aeu_mask;
2702 2726
2703 /* need to take HW lock because MCP or other port might also 2727 /* need to take HW lock because MCP or other port might also
2704 try to handle this event */ 2728 try to handle this event */
2705 bnx2x_lock_alr(bp); 2729 bnx2x_acquire_alr(bp);
2706 2730
2707 attn.sig[0] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4); 2731 attn.sig[0] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4);
2708 attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4); 2732 attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
@@ -2734,32 +2758,35 @@ static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
2734 HW_PRTY_ASSERT_SET_1) || 2758 HW_PRTY_ASSERT_SET_1) ||
2735 (attn.sig[2] & group_mask.sig[2] & 2759 (attn.sig[2] & group_mask.sig[2] &
2736 HW_PRTY_ASSERT_SET_2)) 2760 HW_PRTY_ASSERT_SET_2))
2737 BNX2X_ERR("FATAL HW block parity attention\n"); 2761 BNX2X_ERR("FATAL HW block parity attention\n");
2738 } 2762 }
2739 } 2763 }
2740 2764
2741 bnx2x_unlock_alr(bp); 2765 bnx2x_release_alr(bp);
2742 2766
2743 reg_addr = (IGU_ADDR_ATTN_BITS_CLR + IGU_FUNC_BASE * BP_FUNC(bp)) * 8; 2767 reg_addr = (HC_REG_COMMAND_REG + port*32 + COMMAND_REG_ATTN_BITS_CLR);
2744 2768
2745 val = ~deasserted; 2769 val = ~deasserted;
2746/* DP(NETIF_MSG_INTR, "write 0x%08x to IGU addr 0x%x\n", 2770 DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
2747 val, BAR_IGU_INTMEM + reg_addr); */ 2771 val, reg_addr);
2748 REG_WR(bp, BAR_IGU_INTMEM + reg_addr, val); 2772 REG_WR(bp, reg_addr, val);
2749 2773
2750 if (bp->aeu_mask & (deasserted & 0xff))
2751 BNX2X_ERR("IGU BUG!\n");
2752 if (~bp->attn_state & deasserted) 2774 if (~bp->attn_state & deasserted)
2753 BNX2X_ERR("IGU BUG!\n"); 2775 BNX2X_ERR("IGU ERROR\n");
2754 2776
2755 reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 : 2777 reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2756 MISC_REG_AEU_MASK_ATTN_FUNC_0; 2778 MISC_REG_AEU_MASK_ATTN_FUNC_0;
2757 2779
2758 DP(NETIF_MSG_HW, "aeu_mask %x\n", bp->aeu_mask); 2780 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2759 bp->aeu_mask |= (deasserted & 0xff); 2781 aeu_mask = REG_RD(bp, reg_addr);
2782
2783 DP(NETIF_MSG_HW, "aeu_mask %x newly deasserted %x\n",
2784 aeu_mask, deasserted);
2785 aeu_mask |= (deasserted & 0xff);
2786 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
2760 2787
2761 DP(NETIF_MSG_HW, "new mask %x\n", bp->aeu_mask); 2788 REG_WR(bp, reg_addr, aeu_mask);
2762 REG_WR(bp, reg_addr, bp->aeu_mask); 2789 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2763 2790
2764 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state); 2791 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
2765 bp->attn_state &= ~deasserted; 2792 bp->attn_state &= ~deasserted;
@@ -2800,7 +2827,7 @@ static void bnx2x_sp_task(struct work_struct *work)
2800 2827
2801 /* Return here if interrupt is disabled */ 2828 /* Return here if interrupt is disabled */
2802 if (unlikely(atomic_read(&bp->intr_sem) != 0)) { 2829 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
2803 DP(BNX2X_MSG_SP, "called but intr_sem not 0, returning\n"); 2830 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
2804 return; 2831 return;
2805 } 2832 }
2806 2833
@@ -2808,7 +2835,7 @@ static void bnx2x_sp_task(struct work_struct *work)
2808/* if (status == 0) */ 2835/* if (status == 0) */
2809/* BNX2X_ERR("spurious slowpath interrupt!\n"); */ 2836/* BNX2X_ERR("spurious slowpath interrupt!\n"); */
2810 2837
2811 DP(BNX2X_MSG_SP, "got a slowpath interrupt (updated %x)\n", status); 2838 DP(NETIF_MSG_INTR, "got a slowpath interrupt (updated %x)\n", status);
2812 2839
2813 /* HW attentions */ 2840 /* HW attentions */
2814 if (status & 0x1) 2841 if (status & 0x1)
@@ -2838,7 +2865,7 @@ static irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
2838 2865
2839 /* Return here if interrupt is disabled */ 2866 /* Return here if interrupt is disabled */
2840 if (unlikely(atomic_read(&bp->intr_sem) != 0)) { 2867 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
2841 DP(BNX2X_MSG_SP, "called but intr_sem not 0, returning\n"); 2868 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
2842 return IRQ_HANDLED; 2869 return IRQ_HANDLED;
2843 } 2870 }
2844 2871
@@ -2876,11 +2903,11 @@ static irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
2876 /* underflow */ \ 2903 /* underflow */ \
2877 d_hi = m_hi - s_hi; \ 2904 d_hi = m_hi - s_hi; \
2878 if (d_hi > 0) { \ 2905 if (d_hi > 0) { \
2879 /* we can 'loan' 1 */ \ 2906 /* we can 'loan' 1 */ \
2880 d_hi--; \ 2907 d_hi--; \
2881 d_lo = m_lo + (UINT_MAX - s_lo) + 1; \ 2908 d_lo = m_lo + (UINT_MAX - s_lo) + 1; \
2882 } else { \ 2909 } else { \
2883 /* m_hi <= s_hi */ \ 2910 /* m_hi <= s_hi */ \
2884 d_hi = 0; \ 2911 d_hi = 0; \
2885 d_lo = 0; \ 2912 d_lo = 0; \
2886 } \ 2913 } \
@@ -2890,7 +2917,7 @@ static irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
2890 d_hi = 0; \ 2917 d_hi = 0; \
2891 d_lo = 0; \ 2918 d_lo = 0; \
2892 } else { \ 2919 } else { \
2893 /* m_hi >= s_hi */ \ 2920 /* m_hi >= s_hi */ \
2894 d_hi = m_hi - s_hi; \ 2921 d_hi = m_hi - s_hi; \
2895 d_lo = m_lo - s_lo; \ 2922 d_lo = m_lo - s_lo; \
2896 } \ 2923 } \
@@ -2963,37 +2990,6 @@ static inline long bnx2x_hilo(u32 *hiref)
2963 * Init service functions 2990 * Init service functions
2964 */ 2991 */
2965 2992
2966static void bnx2x_storm_stats_init(struct bnx2x *bp)
2967{
2968 int func = BP_FUNC(bp);
2969
2970 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func), 1);
2971 REG_WR(bp, BAR_XSTRORM_INTMEM +
2972 XSTORM_STATS_FLAGS_OFFSET(func) + 4, 0);
2973
2974 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func), 1);
2975 REG_WR(bp, BAR_TSTRORM_INTMEM +
2976 TSTORM_STATS_FLAGS_OFFSET(func) + 4, 0);
2977
2978 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func), 0);
2979 REG_WR(bp, BAR_CSTRORM_INTMEM +
2980 CSTORM_STATS_FLAGS_OFFSET(func) + 4, 0);
2981
2982 REG_WR(bp, BAR_XSTRORM_INTMEM +
2983 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
2984 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
2985 REG_WR(bp, BAR_XSTRORM_INTMEM +
2986 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
2987 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
2988
2989 REG_WR(bp, BAR_TSTRORM_INTMEM +
2990 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
2991 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
2992 REG_WR(bp, BAR_TSTRORM_INTMEM +
2993 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
2994 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
2995}
2996
2997static void bnx2x_storm_stats_post(struct bnx2x *bp) 2993static void bnx2x_storm_stats_post(struct bnx2x *bp)
2998{ 2994{
2999 if (!bp->stats_pending) { 2995 if (!bp->stats_pending) {
@@ -3032,6 +3028,8 @@ static void bnx2x_stats_init(struct bnx2x *bp)
3032 memset(&(bp->port.old_nig_stats), 0, sizeof(struct nig_stats)); 3028 memset(&(bp->port.old_nig_stats), 0, sizeof(struct nig_stats));
3033 bp->port.old_nig_stats.brb_discard = 3029 bp->port.old_nig_stats.brb_discard =
3034 REG_RD(bp, NIG_REG_STAT0_BRB_DISCARD + port*0x38); 3030 REG_RD(bp, NIG_REG_STAT0_BRB_DISCARD + port*0x38);
3031 bp->port.old_nig_stats.brb_truncate =
3032 REG_RD(bp, NIG_REG_STAT0_BRB_TRUNCATE + port*0x38);
3035 REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT0 + port*0x50, 3033 REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT0 + port*0x50,
3036 &(bp->port.old_nig_stats.egress_mac_pkt0_lo), 2); 3034 &(bp->port.old_nig_stats.egress_mac_pkt0_lo), 2);
3037 REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT1 + port*0x50, 3035 REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT1 + port*0x50,
@@ -3101,12 +3099,12 @@ static int bnx2x_stats_comp(struct bnx2x *bp)
3101 3099
3102 might_sleep(); 3100 might_sleep();
3103 while (*stats_comp != DMAE_COMP_VAL) { 3101 while (*stats_comp != DMAE_COMP_VAL) {
3104 msleep(1);
3105 if (!cnt) { 3102 if (!cnt) {
3106 BNX2X_ERR("timeout waiting for stats finished\n"); 3103 BNX2X_ERR("timeout waiting for stats finished\n");
3107 break; 3104 break;
3108 } 3105 }
3109 cnt--; 3106 cnt--;
3107 msleep(1);
3110 } 3108 }
3111 return 1; 3109 return 1;
3112} 3110}
@@ -3451,8 +3449,7 @@ static void bnx2x_bmac_stats_update(struct bnx2x *bp)
3451 UPDATE_STAT64(rx_stat_grovr, rx_stat_dot3statsframestoolong); 3449 UPDATE_STAT64(rx_stat_grovr, rx_stat_dot3statsframestoolong);
3452 UPDATE_STAT64(rx_stat_grfrg, rx_stat_etherstatsfragments); 3450 UPDATE_STAT64(rx_stat_grfrg, rx_stat_etherstatsfragments);
3453 UPDATE_STAT64(rx_stat_grjbr, rx_stat_etherstatsjabbers); 3451 UPDATE_STAT64(rx_stat_grjbr, rx_stat_etherstatsjabbers);
3454 UPDATE_STAT64(rx_stat_grxpf, rx_stat_bmac_xpf); 3452 UPDATE_STAT64(rx_stat_grxcf, rx_stat_maccontrolframesreceived);
3455 UPDATE_STAT64(rx_stat_grxcf, rx_stat_bmac_xcf);
3456 UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffstateentered); 3453 UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffstateentered);
3457 UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffpauseframesreceived); 3454 UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffpauseframesreceived);
3458 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_outxoffsent); 3455 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_outxoffsent);
@@ -3536,6 +3533,8 @@ static int bnx2x_hw_stats_update(struct bnx2x *bp)
3536 3533
3537 ADD_EXTEND_64(pstats->brb_drop_hi, pstats->brb_drop_lo, 3534 ADD_EXTEND_64(pstats->brb_drop_hi, pstats->brb_drop_lo,
3538 new->brb_discard - old->brb_discard); 3535 new->brb_discard - old->brb_discard);
3536 ADD_EXTEND_64(estats->brb_truncate_hi, estats->brb_truncate_lo,
3537 new->brb_truncate - old->brb_truncate);
3539 3538
3540 UPDATE_STAT64_NIG(egress_mac_pkt0, 3539 UPDATE_STAT64_NIG(egress_mac_pkt0,
3541 etherstatspkts1024octetsto1522octets); 3540 etherstatspkts1024octetsto1522octets);
@@ -3713,8 +3712,7 @@ static void bnx2x_net_stats_update(struct bnx2x *bp)
3713 nstats->rx_length_errors = 3712 nstats->rx_length_errors =
3714 estats->rx_stat_etherstatsundersizepkts_lo + 3713 estats->rx_stat_etherstatsundersizepkts_lo +
3715 estats->jabber_packets_received; 3714 estats->jabber_packets_received;
3716 nstats->rx_over_errors = estats->brb_drop_lo + 3715 nstats->rx_over_errors = estats->brb_drop_lo + estats->brb_truncate_lo;
3717 estats->brb_truncate_discard;
3718 nstats->rx_crc_errors = estats->rx_stat_dot3statsfcserrors_lo; 3716 nstats->rx_crc_errors = estats->rx_stat_dot3statsfcserrors_lo;
3719 nstats->rx_frame_errors = estats->rx_stat_dot3statsalignmenterrors_lo; 3717 nstats->rx_frame_errors = estats->rx_stat_dot3statsalignmenterrors_lo;
3720 nstats->rx_fifo_errors = old_tclient->no_buff_discard; 3718 nstats->rx_fifo_errors = old_tclient->no_buff_discard;
@@ -3783,7 +3781,7 @@ static void bnx2x_stats_update(struct bnx2x *bp)
3783 bp->fp->rx_comp_cons), 3781 bp->fp->rx_comp_cons),
3784 le16_to_cpu(*bp->fp->rx_cons_sb), nstats->rx_packets); 3782 le16_to_cpu(*bp->fp->rx_cons_sb), nstats->rx_packets);
3785 printk(KERN_DEBUG " %s (Xoff events %u) brb drops %u\n", 3783 printk(KERN_DEBUG " %s (Xoff events %u) brb drops %u\n",
3786 netif_queue_stopped(bp->dev)? "Xoff" : "Xon", 3784 netif_queue_stopped(bp->dev) ? "Xoff" : "Xon",
3787 estats->driver_xoff, estats->brb_drop_lo); 3785 estats->driver_xoff, estats->brb_drop_lo);
3788 printk(KERN_DEBUG "tstats: checksum_discard %u " 3786 printk(KERN_DEBUG "tstats: checksum_discard %u "
3789 "packets_too_big_discard %u no_buff_discard %u " 3787 "packets_too_big_discard %u no_buff_discard %u "
@@ -3994,14 +3992,14 @@ static void bnx2x_zero_sb(struct bnx2x *bp, int sb_id)
3994 3992
3995 bnx2x_init_fill(bp, BAR_USTRORM_INTMEM + 3993 bnx2x_init_fill(bp, BAR_USTRORM_INTMEM +
3996 USTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), 0, 3994 USTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), 0,
3997 sizeof(struct ustorm_def_status_block)/4); 3995 sizeof(struct ustorm_status_block)/4);
3998 bnx2x_init_fill(bp, BAR_CSTRORM_INTMEM + 3996 bnx2x_init_fill(bp, BAR_CSTRORM_INTMEM +
3999 CSTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), 0, 3997 CSTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), 0,
4000 sizeof(struct cstorm_def_status_block)/4); 3998 sizeof(struct cstorm_status_block)/4);
4001} 3999}
4002 4000
4003static void bnx2x_init_sb(struct bnx2x *bp, int sb_id, 4001static void bnx2x_init_sb(struct bnx2x *bp, struct host_status_block *sb,
4004 struct host_status_block *sb, dma_addr_t mapping) 4002 dma_addr_t mapping, int sb_id)
4005{ 4003{
4006 int port = BP_PORT(bp); 4004 int port = BP_PORT(bp);
4007 int func = BP_FUNC(bp); 4005 int func = BP_FUNC(bp);
@@ -4077,7 +4075,6 @@ static void bnx2x_init_def_sb(struct bnx2x *bp,
4077 atten_status_block); 4075 atten_status_block);
4078 def_sb->atten_status_block.status_block_id = sb_id; 4076 def_sb->atten_status_block.status_block_id = sb_id;
4079 4077
4080 bp->def_att_idx = 0;
4081 bp->attn_state = 0; 4078 bp->attn_state = 0;
4082 4079
4083 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 : 4080 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
@@ -4094,9 +4091,6 @@ static void bnx2x_init_def_sb(struct bnx2x *bp,
4094 reg_offset + 0xc + 0x10*index); 4091 reg_offset + 0xc + 0x10*index);
4095 } 4092 }
4096 4093
4097 bp->aeu_mask = REG_RD(bp, (port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
4098 MISC_REG_AEU_MASK_ATTN_FUNC_0));
4099
4100 reg_offset = (port ? HC_REG_ATTN_MSG1_ADDR_L : 4094 reg_offset = (port ? HC_REG_ATTN_MSG1_ADDR_L :
4101 HC_REG_ATTN_MSG0_ADDR_L); 4095 HC_REG_ATTN_MSG0_ADDR_L);
4102 4096
@@ -4114,17 +4108,13 @@ static void bnx2x_init_def_sb(struct bnx2x *bp,
4114 u_def_status_block); 4108 u_def_status_block);
4115 def_sb->u_def_status_block.status_block_id = sb_id; 4109 def_sb->u_def_status_block.status_block_id = sb_id;
4116 4110
4117 bp->def_u_idx = 0;
4118
4119 REG_WR(bp, BAR_USTRORM_INTMEM + 4111 REG_WR(bp, BAR_USTRORM_INTMEM +
4120 USTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section)); 4112 USTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
4121 REG_WR(bp, BAR_USTRORM_INTMEM + 4113 REG_WR(bp, BAR_USTRORM_INTMEM +
4122 ((USTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4), 4114 ((USTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
4123 U64_HI(section)); 4115 U64_HI(section));
4124 REG_WR8(bp, BAR_USTRORM_INTMEM + DEF_USB_FUNC_OFF + 4116 REG_WR8(bp, BAR_USTRORM_INTMEM + DEF_USB_FUNC_OFF +
4125 USTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func); 4117 USTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4126 REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_HC_BTR_OFFSET(func),
4127 BNX2X_BTR);
4128 4118
4129 for (index = 0; index < HC_USTORM_DEF_SB_NUM_INDICES; index++) 4119 for (index = 0; index < HC_USTORM_DEF_SB_NUM_INDICES; index++)
4130 REG_WR16(bp, BAR_USTRORM_INTMEM + 4120 REG_WR16(bp, BAR_USTRORM_INTMEM +
@@ -4135,17 +4125,13 @@ static void bnx2x_init_def_sb(struct bnx2x *bp,
4135 c_def_status_block); 4125 c_def_status_block);
4136 def_sb->c_def_status_block.status_block_id = sb_id; 4126 def_sb->c_def_status_block.status_block_id = sb_id;
4137 4127
4138 bp->def_c_idx = 0;
4139
4140 REG_WR(bp, BAR_CSTRORM_INTMEM + 4128 REG_WR(bp, BAR_CSTRORM_INTMEM +
4141 CSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section)); 4129 CSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
4142 REG_WR(bp, BAR_CSTRORM_INTMEM + 4130 REG_WR(bp, BAR_CSTRORM_INTMEM +
4143 ((CSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4), 4131 ((CSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
4144 U64_HI(section)); 4132 U64_HI(section));
4145 REG_WR8(bp, BAR_CSTRORM_INTMEM + DEF_CSB_FUNC_OFF + 4133 REG_WR8(bp, BAR_CSTRORM_INTMEM + DEF_CSB_FUNC_OFF +
4146 CSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func); 4134 CSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4147 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_OFFSET(func),
4148 BNX2X_BTR);
4149 4135
4150 for (index = 0; index < HC_CSTORM_DEF_SB_NUM_INDICES; index++) 4136 for (index = 0; index < HC_CSTORM_DEF_SB_NUM_INDICES; index++)
4151 REG_WR16(bp, BAR_CSTRORM_INTMEM + 4137 REG_WR16(bp, BAR_CSTRORM_INTMEM +
@@ -4156,17 +4142,13 @@ static void bnx2x_init_def_sb(struct bnx2x *bp,
4156 t_def_status_block); 4142 t_def_status_block);
4157 def_sb->t_def_status_block.status_block_id = sb_id; 4143 def_sb->t_def_status_block.status_block_id = sb_id;
4158 4144
4159 bp->def_t_idx = 0;
4160
4161 REG_WR(bp, BAR_TSTRORM_INTMEM + 4145 REG_WR(bp, BAR_TSTRORM_INTMEM +
4162 TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section)); 4146 TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
4163 REG_WR(bp, BAR_TSTRORM_INTMEM + 4147 REG_WR(bp, BAR_TSTRORM_INTMEM +
4164 ((TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4), 4148 ((TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
4165 U64_HI(section)); 4149 U64_HI(section));
4166 REG_WR8(bp, BAR_TSTRORM_INTMEM + DEF_TSB_FUNC_OFF + 4150 REG_WR8(bp, BAR_TSTRORM_INTMEM + DEF_TSB_FUNC_OFF +
4167 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func); 4151 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4168 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_HC_BTR_OFFSET(func),
4169 BNX2X_BTR);
4170 4152
4171 for (index = 0; index < HC_TSTORM_DEF_SB_NUM_INDICES; index++) 4153 for (index = 0; index < HC_TSTORM_DEF_SB_NUM_INDICES; index++)
4172 REG_WR16(bp, BAR_TSTRORM_INTMEM + 4154 REG_WR16(bp, BAR_TSTRORM_INTMEM +
@@ -4177,23 +4159,20 @@ static void bnx2x_init_def_sb(struct bnx2x *bp,
4177 x_def_status_block); 4159 x_def_status_block);
4178 def_sb->x_def_status_block.status_block_id = sb_id; 4160 def_sb->x_def_status_block.status_block_id = sb_id;
4179 4161
4180 bp->def_x_idx = 0;
4181
4182 REG_WR(bp, BAR_XSTRORM_INTMEM + 4162 REG_WR(bp, BAR_XSTRORM_INTMEM +
4183 XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section)); 4163 XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
4184 REG_WR(bp, BAR_XSTRORM_INTMEM + 4164 REG_WR(bp, BAR_XSTRORM_INTMEM +
4185 ((XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4), 4165 ((XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
4186 U64_HI(section)); 4166 U64_HI(section));
4187 REG_WR8(bp, BAR_XSTRORM_INTMEM + DEF_XSB_FUNC_OFF + 4167 REG_WR8(bp, BAR_XSTRORM_INTMEM + DEF_XSB_FUNC_OFF +
4188 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func); 4168 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4189 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_HC_BTR_OFFSET(func),
4190 BNX2X_BTR);
4191 4169
4192 for (index = 0; index < HC_XSTORM_DEF_SB_NUM_INDICES; index++) 4170 for (index = 0; index < HC_XSTORM_DEF_SB_NUM_INDICES; index++)
4193 REG_WR16(bp, BAR_XSTRORM_INTMEM + 4171 REG_WR16(bp, BAR_XSTRORM_INTMEM +
4194 XSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1); 4172 XSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
4195 4173
4196 bp->stats_pending = 0; 4174 bp->stats_pending = 0;
4175 bp->set_mac_pending = 0;
4197 4176
4198 bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0); 4177 bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
4199} 4178}
@@ -4209,21 +4188,25 @@ static void bnx2x_update_coalesce(struct bnx2x *bp)
4209 /* HC_INDEX_U_ETH_RX_CQ_CONS */ 4188 /* HC_INDEX_U_ETH_RX_CQ_CONS */
4210 REG_WR8(bp, BAR_USTRORM_INTMEM + 4189 REG_WR8(bp, BAR_USTRORM_INTMEM +
4211 USTORM_SB_HC_TIMEOUT_OFFSET(port, sb_id, 4190 USTORM_SB_HC_TIMEOUT_OFFSET(port, sb_id,
4212 HC_INDEX_U_ETH_RX_CQ_CONS), 4191 U_SB_ETH_RX_CQ_INDEX),
4213 bp->rx_ticks/12); 4192 bp->rx_ticks/12);
4214 REG_WR16(bp, BAR_USTRORM_INTMEM + 4193 REG_WR16(bp, BAR_USTRORM_INTMEM +
4215 USTORM_SB_HC_DISABLE_OFFSET(port, sb_id, 4194 USTORM_SB_HC_DISABLE_OFFSET(port, sb_id,
4216 HC_INDEX_U_ETH_RX_CQ_CONS), 4195 U_SB_ETH_RX_CQ_INDEX),
4196 bp->rx_ticks ? 0 : 1);
4197 REG_WR16(bp, BAR_USTRORM_INTMEM +
4198 USTORM_SB_HC_DISABLE_OFFSET(port, sb_id,
4199 U_SB_ETH_RX_BD_INDEX),
4217 bp->rx_ticks ? 0 : 1); 4200 bp->rx_ticks ? 0 : 1);
4218 4201
4219 /* HC_INDEX_C_ETH_TX_CQ_CONS */ 4202 /* HC_INDEX_C_ETH_TX_CQ_CONS */
4220 REG_WR8(bp, BAR_CSTRORM_INTMEM + 4203 REG_WR8(bp, BAR_CSTRORM_INTMEM +
4221 CSTORM_SB_HC_TIMEOUT_OFFSET(port, sb_id, 4204 CSTORM_SB_HC_TIMEOUT_OFFSET(port, sb_id,
4222 HC_INDEX_C_ETH_TX_CQ_CONS), 4205 C_SB_ETH_TX_CQ_INDEX),
4223 bp->tx_ticks/12); 4206 bp->tx_ticks/12);
4224 REG_WR16(bp, BAR_CSTRORM_INTMEM + 4207 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4225 CSTORM_SB_HC_DISABLE_OFFSET(port, sb_id, 4208 CSTORM_SB_HC_DISABLE_OFFSET(port, sb_id,
4226 HC_INDEX_C_ETH_TX_CQ_CONS), 4209 C_SB_ETH_TX_CQ_INDEX),
4227 bp->tx_ticks ? 0 : 1); 4210 bp->tx_ticks ? 0 : 1);
4228 } 4211 }
4229} 4212}
@@ -4256,7 +4239,9 @@ static inline void bnx2x_free_tpa_pool(struct bnx2x *bp,
4256static void bnx2x_init_rx_rings(struct bnx2x *bp) 4239static void bnx2x_init_rx_rings(struct bnx2x *bp)
4257{ 4240{
4258 int func = BP_FUNC(bp); 4241 int func = BP_FUNC(bp);
4259 u16 ring_prod, cqe_ring_prod = 0; 4242 int max_agg_queues = CHIP_IS_E1(bp) ? ETH_MAX_AGGREGATION_QUEUES_E1 :
4243 ETH_MAX_AGGREGATION_QUEUES_E1H;
4244 u16 ring_prod, cqe_ring_prod;
4260 int i, j; 4245 int i, j;
4261 4246
4262 bp->rx_buf_use_size = bp->dev->mtu; 4247 bp->rx_buf_use_size = bp->dev->mtu;
@@ -4270,9 +4255,9 @@ static void bnx2x_init_rx_rings(struct bnx2x *bp)
4270 bp->dev->mtu + ETH_OVREHEAD); 4255 bp->dev->mtu + ETH_OVREHEAD);
4271 4256
4272 for_each_queue(bp, j) { 4257 for_each_queue(bp, j) {
4273 for (i = 0; i < ETH_MAX_AGGREGATION_QUEUES_E1H; i++) { 4258 struct bnx2x_fastpath *fp = &bp->fp[j];
4274 struct bnx2x_fastpath *fp = &bp->fp[j];
4275 4259
4260 for (i = 0; i < max_agg_queues; i++) {
4276 fp->tpa_pool[i].skb = 4261 fp->tpa_pool[i].skb =
4277 netdev_alloc_skb(bp->dev, bp->rx_buf_size); 4262 netdev_alloc_skb(bp->dev, bp->rx_buf_size);
4278 if (!fp->tpa_pool[i].skb) { 4263 if (!fp->tpa_pool[i].skb) {
@@ -4352,8 +4337,7 @@ static void bnx2x_init_rx_rings(struct bnx2x *bp)
4352 BNX2X_ERR("disabling TPA for queue[%d]\n", j); 4337 BNX2X_ERR("disabling TPA for queue[%d]\n", j);
4353 /* Cleanup already allocated elements */ 4338 /* Cleanup already allocated elements */
4354 bnx2x_free_rx_sge_range(bp, fp, ring_prod); 4339 bnx2x_free_rx_sge_range(bp, fp, ring_prod);
4355 bnx2x_free_tpa_pool(bp, fp, 4340 bnx2x_free_tpa_pool(bp, fp, max_agg_queues);
4356 ETH_MAX_AGGREGATION_QUEUES_E1H);
4357 fp->disable_tpa = 1; 4341 fp->disable_tpa = 1;
4358 ring_prod = 0; 4342 ring_prod = 0;
4359 break; 4343 break;
@@ -4363,13 +4347,13 @@ static void bnx2x_init_rx_rings(struct bnx2x *bp)
4363 fp->rx_sge_prod = ring_prod; 4347 fp->rx_sge_prod = ring_prod;
4364 4348
4365 /* Allocate BDs and initialize BD ring */ 4349 /* Allocate BDs and initialize BD ring */
4366 fp->rx_comp_cons = fp->rx_alloc_failed = 0; 4350 fp->rx_comp_cons = 0;
4367 cqe_ring_prod = ring_prod = 0; 4351 cqe_ring_prod = ring_prod = 0;
4368 for (i = 0; i < bp->rx_ring_size; i++) { 4352 for (i = 0; i < bp->rx_ring_size; i++) {
4369 if (bnx2x_alloc_rx_skb(bp, fp, ring_prod) < 0) { 4353 if (bnx2x_alloc_rx_skb(bp, fp, ring_prod) < 0) {
4370 BNX2X_ERR("was only able to allocate " 4354 BNX2X_ERR("was only able to allocate "
4371 "%d rx skbs\n", i); 4355 "%d rx skbs\n", i);
4372 fp->rx_alloc_failed++; 4356 bp->eth_stats.rx_skb_alloc_failed++;
4373 break; 4357 break;
4374 } 4358 }
4375 ring_prod = NEXT_RX_IDX(ring_prod); 4359 ring_prod = NEXT_RX_IDX(ring_prod);
@@ -4497,7 +4481,7 @@ static void bnx2x_init_context(struct bnx2x *bp)
4497 } 4481 }
4498 4482
4499 context->cstorm_st_context.sb_index_number = 4483 context->cstorm_st_context.sb_index_number =
4500 HC_INDEX_C_ETH_TX_CQ_CONS; 4484 C_SB_ETH_TX_CQ_INDEX;
4501 context->cstorm_st_context.status_block_id = sb_id; 4485 context->cstorm_st_context.status_block_id = sb_id;
4502 4486
4503 context->xstorm_ag_context.cdu_reserved = 4487 context->xstorm_ag_context.cdu_reserved =
@@ -4535,7 +4519,7 @@ static void bnx2x_set_client_config(struct bnx2x *bp)
4535 int i; 4519 int i;
4536 4520
4537 tstorm_client.mtu = bp->dev->mtu + ETH_OVREHEAD; 4521 tstorm_client.mtu = bp->dev->mtu + ETH_OVREHEAD;
4538 tstorm_client.statistics_counter_id = 0; 4522 tstorm_client.statistics_counter_id = BP_CL_ID(bp);
4539 tstorm_client.config_flags = 4523 tstorm_client.config_flags =
4540 TSTORM_ETH_CLIENT_CONFIG_STATSITICS_ENABLE; 4524 TSTORM_ETH_CLIENT_CONFIG_STATSITICS_ENABLE;
4541#ifdef BCM_VLAN 4525#ifdef BCM_VLAN
@@ -4579,7 +4563,7 @@ static void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
4579 int func = BP_FUNC(bp); 4563 int func = BP_FUNC(bp);
4580 int i; 4564 int i;
4581 4565
4582 DP(NETIF_MSG_RX_STATUS, "rx mode is %d\n", mode); 4566 DP(NETIF_MSG_IFUP, "rx mode %d mask 0x%x\n", mode, mask);
4583 4567
4584 switch (mode) { 4568 switch (mode) {
4585 case BNX2X_RX_MODE_NONE: /* no Rx */ 4569 case BNX2X_RX_MODE_NONE: /* no Rx */
@@ -4617,13 +4601,35 @@ static void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
4617 bnx2x_set_client_config(bp); 4601 bnx2x_set_client_config(bp);
4618} 4602}
4619 4603
4620static void bnx2x_init_internal(struct bnx2x *bp) 4604static void bnx2x_init_internal_common(struct bnx2x *bp)
4605{
4606 int i;
4607
4608 /* Zero this manually as its initialization is
4609 currently missing in the initTool */
4610 for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++)
4611 REG_WR(bp, BAR_USTRORM_INTMEM +
4612 USTORM_AGG_DATA_OFFSET + i * 4, 0);
4613}
4614
4615static void bnx2x_init_internal_port(struct bnx2x *bp)
4616{
4617 int port = BP_PORT(bp);
4618
4619 REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4620 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4621 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4622 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4623}
4624
4625static void bnx2x_init_internal_func(struct bnx2x *bp)
4621{ 4626{
4622 struct tstorm_eth_function_common_config tstorm_config = {0}; 4627 struct tstorm_eth_function_common_config tstorm_config = {0};
4623 struct stats_indication_flags stats_flags = {0}; 4628 struct stats_indication_flags stats_flags = {0};
4624 int port = BP_PORT(bp); 4629 int port = BP_PORT(bp);
4625 int func = BP_FUNC(bp); 4630 int func = BP_FUNC(bp);
4626 int i; 4631 int i;
4632 u16 max_agg_size;
4627 4633
4628 if (is_multi(bp)) { 4634 if (is_multi(bp)) {
4629 tstorm_config.config_flags = MULTI_FLAGS; 4635 tstorm_config.config_flags = MULTI_FLAGS;
@@ -4636,31 +4642,53 @@ static void bnx2x_init_internal(struct bnx2x *bp)
4636 TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(func), 4642 TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(func),
4637 (*(u32 *)&tstorm_config)); 4643 (*(u32 *)&tstorm_config));
4638 4644
4639/* DP(NETIF_MSG_IFUP, "tstorm_config: 0x%08x\n",
4640 (*(u32 *)&tstorm_config)); */
4641
4642 bp->rx_mode = BNX2X_RX_MODE_NONE; /* no rx until link is up */ 4645 bp->rx_mode = BNX2X_RX_MODE_NONE; /* no rx until link is up */
4643 bnx2x_set_storm_rx_mode(bp); 4646 bnx2x_set_storm_rx_mode(bp);
4644 4647
4648 /* reset xstorm per client statistics */
4649 for (i = 0; i < sizeof(struct xstorm_per_client_stats) / 4; i++) {
4650 REG_WR(bp, BAR_XSTRORM_INTMEM +
4651 XSTORM_PER_COUNTER_ID_STATS_OFFSET(port, BP_CL_ID(bp)) +
4652 i*4, 0);
4653 }
4654 /* reset tstorm per client statistics */
4655 for (i = 0; i < sizeof(struct tstorm_per_client_stats) / 4; i++) {
4656 REG_WR(bp, BAR_TSTRORM_INTMEM +
4657 TSTORM_PER_COUNTER_ID_STATS_OFFSET(port, BP_CL_ID(bp)) +
4658 i*4, 0);
4659 }
4660
4661 /* Init statistics related context */
4645 stats_flags.collect_eth = 1; 4662 stats_flags.collect_eth = 1;
4646 4663
4647 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(port), 4664 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func),
4648 ((u32 *)&stats_flags)[0]); 4665 ((u32 *)&stats_flags)[0]);
4649 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(port) + 4, 4666 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func) + 4,
4650 ((u32 *)&stats_flags)[1]); 4667 ((u32 *)&stats_flags)[1]);
4651 4668
4652 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(port), 4669 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func),
4653 ((u32 *)&stats_flags)[0]); 4670 ((u32 *)&stats_flags)[0]);
4654 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(port) + 4, 4671 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func) + 4,
4655 ((u32 *)&stats_flags)[1]); 4672 ((u32 *)&stats_flags)[1]);
4656 4673
4657 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(port), 4674 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func),
4658 ((u32 *)&stats_flags)[0]); 4675 ((u32 *)&stats_flags)[0]);
4659 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(port) + 4, 4676 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func) + 4,
4660 ((u32 *)&stats_flags)[1]); 4677 ((u32 *)&stats_flags)[1]);
4661 4678
4662/* DP(NETIF_MSG_IFUP, "stats_flags: 0x%08x 0x%08x\n", 4679 REG_WR(bp, BAR_XSTRORM_INTMEM +
4663 ((u32 *)&stats_flags)[0], ((u32 *)&stats_flags)[1]); */ 4680 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
4681 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
4682 REG_WR(bp, BAR_XSTRORM_INTMEM +
4683 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
4684 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
4685
4686 REG_WR(bp, BAR_TSTRORM_INTMEM +
4687 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
4688 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
4689 REG_WR(bp, BAR_TSTRORM_INTMEM +
4690 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
4691 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
4664 4692
4665 if (CHIP_IS_E1H(bp)) { 4693 if (CHIP_IS_E1H(bp)) {
4666 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNCTION_MODE_OFFSET, 4694 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNCTION_MODE_OFFSET,
@@ -4676,15 +4704,12 @@ static void bnx2x_init_internal(struct bnx2x *bp)
4676 bp->e1hov); 4704 bp->e1hov);
4677 } 4705 }
4678 4706
4679 /* Zero this manualy as its initialization is 4707 /* Init CQ ring mapping and aggregation size */
4680 currently missing in the initTool */ 4708 max_agg_size = min((u32)(bp->rx_buf_use_size +
4681 for (i = 0; i < USTORM_AGG_DATA_SIZE >> 2; i++) 4709 8*BCM_PAGE_SIZE*PAGES_PER_SGE),
4682 REG_WR(bp, BAR_USTRORM_INTMEM + 4710 (u32)0xffff);
4683 USTORM_AGG_DATA_OFFSET + 4*i, 0);
4684
4685 for_each_queue(bp, i) { 4711 for_each_queue(bp, i) {
4686 struct bnx2x_fastpath *fp = &bp->fp[i]; 4712 struct bnx2x_fastpath *fp = &bp->fp[i];
4687 u16 max_agg_size;
4688 4713
4689 REG_WR(bp, BAR_USTRORM_INTMEM + 4714 REG_WR(bp, BAR_USTRORM_INTMEM +
4690 USTORM_CQE_PAGE_BASE_OFFSET(port, FP_CL_ID(fp)), 4715 USTORM_CQE_PAGE_BASE_OFFSET(port, FP_CL_ID(fp)),
@@ -4693,16 +4718,34 @@ static void bnx2x_init_internal(struct bnx2x *bp)
4693 USTORM_CQE_PAGE_BASE_OFFSET(port, FP_CL_ID(fp)) + 4, 4718 USTORM_CQE_PAGE_BASE_OFFSET(port, FP_CL_ID(fp)) + 4,
4694 U64_HI(fp->rx_comp_mapping)); 4719 U64_HI(fp->rx_comp_mapping));
4695 4720
4696 max_agg_size = min((u32)(bp->rx_buf_use_size +
4697 8*BCM_PAGE_SIZE*PAGES_PER_SGE),
4698 (u32)0xffff);
4699 REG_WR16(bp, BAR_USTRORM_INTMEM + 4721 REG_WR16(bp, BAR_USTRORM_INTMEM +
4700 USTORM_MAX_AGG_SIZE_OFFSET(port, FP_CL_ID(fp)), 4722 USTORM_MAX_AGG_SIZE_OFFSET(port, FP_CL_ID(fp)),
4701 max_agg_size); 4723 max_agg_size);
4702 } 4724 }
4703} 4725}
4704 4726
4705static void bnx2x_nic_init(struct bnx2x *bp) 4727static void bnx2x_init_internal(struct bnx2x *bp, u32 load_code)
4728{
4729 switch (load_code) {
4730 case FW_MSG_CODE_DRV_LOAD_COMMON:
4731 bnx2x_init_internal_common(bp);
4732 /* no break */
4733
4734 case FW_MSG_CODE_DRV_LOAD_PORT:
4735 bnx2x_init_internal_port(bp);
4736 /* no break */
4737
4738 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
4739 bnx2x_init_internal_func(bp);
4740 break;
4741
4742 default:
4743 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
4744 break;
4745 }
4746}
4747
4748static void bnx2x_nic_init(struct bnx2x *bp, u32 load_code)
4706{ 4749{
4707 int i; 4750 int i;
4708 4751
@@ -4717,19 +4760,20 @@ static void bnx2x_nic_init(struct bnx2x *bp)
4717 DP(NETIF_MSG_IFUP, 4760 DP(NETIF_MSG_IFUP,
4718 "bnx2x_init_sb(%p,%p) index %d cl_id %d sb %d\n", 4761 "bnx2x_init_sb(%p,%p) index %d cl_id %d sb %d\n",
4719 bp, fp->status_blk, i, FP_CL_ID(fp), FP_SB_ID(fp)); 4762 bp, fp->status_blk, i, FP_CL_ID(fp), FP_SB_ID(fp));
4720 bnx2x_init_sb(bp, FP_SB_ID(fp), fp->status_blk, 4763 bnx2x_init_sb(bp, fp->status_blk, fp->status_blk_mapping,
4721 fp->status_blk_mapping); 4764 FP_SB_ID(fp));
4765 bnx2x_update_fpsb_idx(fp);
4722 } 4766 }
4723 4767
4724 bnx2x_init_def_sb(bp, bp->def_status_blk, 4768 bnx2x_init_def_sb(bp, bp->def_status_blk, bp->def_status_blk_mapping,
4725 bp->def_status_blk_mapping, DEF_SB_ID); 4769 DEF_SB_ID);
4770 bnx2x_update_dsb_idx(bp);
4726 bnx2x_update_coalesce(bp); 4771 bnx2x_update_coalesce(bp);
4727 bnx2x_init_rx_rings(bp); 4772 bnx2x_init_rx_rings(bp);
4728 bnx2x_init_tx_ring(bp); 4773 bnx2x_init_tx_ring(bp);
4729 bnx2x_init_sp_ring(bp); 4774 bnx2x_init_sp_ring(bp);
4730 bnx2x_init_context(bp); 4775 bnx2x_init_context(bp);
4731 bnx2x_init_internal(bp); 4776 bnx2x_init_internal(bp, load_code);
4732 bnx2x_storm_stats_init(bp);
4733 bnx2x_init_ind_table(bp); 4777 bnx2x_init_ind_table(bp);
4734 bnx2x_int_enable(bp); 4778 bnx2x_int_enable(bp);
4735} 4779}
@@ -4878,7 +4922,7 @@ static int bnx2x_int_mem_test(struct bnx2x *bp)
4878 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0); 4922 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
4879 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0); 4923 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
4880 REG_WR(bp, CFC_REG_DEBUG0, 0x1); 4924 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
4881 NIG_WR(NIG_REG_PRS_REQ_IN_EN, 0x0); 4925 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
4882 4926
4883 /* Write 0 to parser credits for CFC search request */ 4927 /* Write 0 to parser credits for CFC search request */
4884 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0); 4928 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
@@ -4933,7 +4977,7 @@ static int bnx2x_int_mem_test(struct bnx2x *bp)
4933 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0); 4977 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
4934 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0); 4978 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
4935 REG_WR(bp, CFC_REG_DEBUG0, 0x1); 4979 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
4936 NIG_WR(NIG_REG_PRS_REQ_IN_EN, 0x0); 4980 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
4937 4981
4938 /* Write 0 to parser credits for CFC search request */ 4982 /* Write 0 to parser credits for CFC search request */
4939 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0); 4983 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
@@ -5000,7 +5044,7 @@ static int bnx2x_int_mem_test(struct bnx2x *bp)
5000 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x7fffffff); 5044 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x7fffffff);
5001 REG_WR(bp, TCM_REG_PRS_IFEN, 0x1); 5045 REG_WR(bp, TCM_REG_PRS_IFEN, 0x1);
5002 REG_WR(bp, CFC_REG_DEBUG0, 0x0); 5046 REG_WR(bp, CFC_REG_DEBUG0, 0x0);
5003 NIG_WR(NIG_REG_PRS_REQ_IN_EN, 0x1); 5047 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x1);
5004 5048
5005 DP(NETIF_MSG_HW, "done\n"); 5049 DP(NETIF_MSG_HW, "done\n");
5006 5050
@@ -5089,11 +5133,6 @@ static int bnx2x_init_common(struct bnx2x *bp)
5089 REG_WR(bp, PXP2_REG_RD_CDURD_SWAP_MODE, 1); 5133 REG_WR(bp, PXP2_REG_RD_CDURD_SWAP_MODE, 1);
5090#endif 5134#endif
5091 5135
5092#ifndef BCM_ISCSI
5093 /* set NIC mode */
5094 REG_WR(bp, PRS_REG_NIC_MODE, 1);
5095#endif
5096
5097 REG_WR(bp, PXP2_REG_RQ_CDU_P_SIZE, 2); 5136 REG_WR(bp, PXP2_REG_RQ_CDU_P_SIZE, 2);
5098#ifdef BCM_ISCSI 5137#ifdef BCM_ISCSI
5099 REG_WR(bp, PXP2_REG_RQ_TM_P_SIZE, 5); 5138 REG_WR(bp, PXP2_REG_RQ_TM_P_SIZE, 5);
@@ -5163,6 +5202,8 @@ static int bnx2x_init_common(struct bnx2x *bp)
5163 } 5202 }
5164 5203
5165 bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END); 5204 bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END);
5205 /* set NIC mode */
5206 REG_WR(bp, PRS_REG_NIC_MODE, 1);
5166 if (CHIP_IS_E1H(bp)) 5207 if (CHIP_IS_E1H(bp))
5167 REG_WR(bp, PRS_REG_E1HOV_MODE, IS_E1HMF(bp)); 5208 REG_WR(bp, PRS_REG_E1HOV_MODE, IS_E1HMF(bp));
5168 5209
@@ -5333,6 +5374,13 @@ static int bnx2x_init_common(struct bnx2x *bp)
5333 ((u32 *)&tmp)[1]); 5374 ((u32 *)&tmp)[1]);
5334 } 5375 }
5335 5376
5377 if (!BP_NOMCP(bp)) {
5378 bnx2x_acquire_phy_lock(bp);
5379 bnx2x_common_init_phy(bp, bp->common.shmem_base);
5380 bnx2x_release_phy_lock(bp);
5381 } else
5382 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
5383
5336 return 0; 5384 return 0;
5337} 5385}
5338 5386
@@ -5638,18 +5686,23 @@ static u32 bnx2x_fw_command(struct bnx2x *bp, u32 command)
5638 int func = BP_FUNC(bp); 5686 int func = BP_FUNC(bp);
5639 u32 seq = ++bp->fw_seq; 5687 u32 seq = ++bp->fw_seq;
5640 u32 rc = 0; 5688 u32 rc = 0;
5689 u32 cnt = 1;
5690 u8 delay = CHIP_REV_IS_SLOW(bp) ? 100 : 10;
5641 5691
5642 SHMEM_WR(bp, func_mb[func].drv_mb_header, (command | seq)); 5692 SHMEM_WR(bp, func_mb[func].drv_mb_header, (command | seq));
5643 DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB\n", (command | seq)); 5693 DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB\n", (command | seq));
5644 5694
5645 /* let the FW do it's magic ... */ 5695 do {
5646 msleep(100); /* TBD */ 5696 /* let the FW do it's magic ... */
5697 msleep(delay);
5647 5698
5648 if (CHIP_REV_IS_SLOW(bp)) 5699 rc = SHMEM_RD(bp, func_mb[func].fw_mb_header);
5649 msleep(900);
5650 5700
5651 rc = SHMEM_RD(bp, func_mb[func].fw_mb_header); 5701 /* Give the FW up to 2 second (200*10ms) */
5652 DP(BNX2X_MSG_MCP, "read (%x) seq is (%x) from FW MB\n", rc, seq); 5702 } while ((seq != (rc & FW_MSG_SEQ_NUMBER_MASK)) && (cnt++ < 200));
5703
5704 DP(BNX2X_MSG_MCP, "[after %d ms] read (%x) seq is (%x) from FW MB\n",
5705 cnt*delay, rc, seq);
5653 5706
5654 /* is this a reply to our command? */ 5707 /* is this a reply to our command? */
5655 if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK)) { 5708 if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK)) {
@@ -5713,6 +5766,7 @@ static void bnx2x_free_mem(struct bnx2x *bp)
5713 NUM_RCQ_BD); 5766 NUM_RCQ_BD);
5714 5767
5715 /* SGE ring */ 5768 /* SGE ring */
5769 BNX2X_FREE(bnx2x_fp(bp, i, rx_page_ring));
5716 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_sge_ring), 5770 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_sge_ring),
5717 bnx2x_fp(bp, i, rx_sge_mapping), 5771 bnx2x_fp(bp, i, rx_sge_mapping),
5718 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES); 5772 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
@@ -5890,7 +5944,8 @@ static void bnx2x_free_rx_skbs(struct bnx2x *bp)
5890 dev_kfree_skb(skb); 5944 dev_kfree_skb(skb);
5891 } 5945 }
5892 if (!fp->disable_tpa) 5946 if (!fp->disable_tpa)
5893 bnx2x_free_tpa_pool(bp, fp, 5947 bnx2x_free_tpa_pool(bp, fp, CHIP_IS_E1(bp) ?
5948 ETH_MAX_AGGREGATION_QUEUES_E1 :
5894 ETH_MAX_AGGREGATION_QUEUES_E1H); 5949 ETH_MAX_AGGREGATION_QUEUES_E1H);
5895 } 5950 }
5896} 5951}
@@ -5976,8 +6031,8 @@ static int bnx2x_req_msix_irqs(struct bnx2x *bp)
5976 bnx2x_msix_fp_int, 0, 6031 bnx2x_msix_fp_int, 0,
5977 bp->dev->name, &bp->fp[i]); 6032 bp->dev->name, &bp->fp[i]);
5978 if (rc) { 6033 if (rc) {
5979 BNX2X_ERR("request fp #%d irq failed rc %d\n", 6034 BNX2X_ERR("request fp #%d irq failed rc -%d\n",
5980 i + offset, rc); 6035 i + offset, -rc);
5981 bnx2x_free_msix_irqs(bp); 6036 bnx2x_free_msix_irqs(bp);
5982 return -EBUSY; 6037 return -EBUSY;
5983 } 6038 }
@@ -6004,7 +6059,7 @@ static int bnx2x_req_irq(struct bnx2x *bp)
6004 * Init service functions 6059 * Init service functions
6005 */ 6060 */
6006 6061
6007static void bnx2x_set_mac_addr_e1(struct bnx2x *bp) 6062static void bnx2x_set_mac_addr_e1(struct bnx2x *bp, int set)
6008{ 6063{
6009 struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config); 6064 struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
6010 int port = BP_PORT(bp); 6065 int port = BP_PORT(bp);
@@ -6026,11 +6081,15 @@ static void bnx2x_set_mac_addr_e1(struct bnx2x *bp)
6026 config->config_table[0].cam_entry.lsb_mac_addr = 6081 config->config_table[0].cam_entry.lsb_mac_addr =
6027 swab16(*(u16 *)&bp->dev->dev_addr[4]); 6082 swab16(*(u16 *)&bp->dev->dev_addr[4]);
6028 config->config_table[0].cam_entry.flags = cpu_to_le16(port); 6083 config->config_table[0].cam_entry.flags = cpu_to_le16(port);
6029 config->config_table[0].target_table_entry.flags = 0; 6084 if (set)
6085 config->config_table[0].target_table_entry.flags = 0;
6086 else
6087 CAM_INVALIDATE(config->config_table[0]);
6030 config->config_table[0].target_table_entry.client_id = 0; 6088 config->config_table[0].target_table_entry.client_id = 0;
6031 config->config_table[0].target_table_entry.vlan_id = 0; 6089 config->config_table[0].target_table_entry.vlan_id = 0;
6032 6090
6033 DP(NETIF_MSG_IFUP, "setting MAC (%04x:%04x:%04x)\n", 6091 DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x)\n",
6092 (set ? "setting" : "clearing"),
6034 config->config_table[0].cam_entry.msb_mac_addr, 6093 config->config_table[0].cam_entry.msb_mac_addr,
6035 config->config_table[0].cam_entry.middle_mac_addr, 6094 config->config_table[0].cam_entry.middle_mac_addr,
6036 config->config_table[0].cam_entry.lsb_mac_addr); 6095 config->config_table[0].cam_entry.lsb_mac_addr);
@@ -6040,8 +6099,11 @@ static void bnx2x_set_mac_addr_e1(struct bnx2x *bp)
6040 config->config_table[1].cam_entry.middle_mac_addr = 0xffff; 6099 config->config_table[1].cam_entry.middle_mac_addr = 0xffff;
6041 config->config_table[1].cam_entry.lsb_mac_addr = 0xffff; 6100 config->config_table[1].cam_entry.lsb_mac_addr = 0xffff;
6042 config->config_table[1].cam_entry.flags = cpu_to_le16(port); 6101 config->config_table[1].cam_entry.flags = cpu_to_le16(port);
6043 config->config_table[1].target_table_entry.flags = 6102 if (set)
6103 config->config_table[1].target_table_entry.flags =
6044 TSTORM_CAM_TARGET_TABLE_ENTRY_BROADCAST; 6104 TSTORM_CAM_TARGET_TABLE_ENTRY_BROADCAST;
6105 else
6106 CAM_INVALIDATE(config->config_table[1]);
6045 config->config_table[1].target_table_entry.client_id = 0; 6107 config->config_table[1].target_table_entry.client_id = 0;
6046 config->config_table[1].target_table_entry.vlan_id = 0; 6108 config->config_table[1].target_table_entry.vlan_id = 0;
6047 6109
@@ -6050,12 +6112,12 @@ static void bnx2x_set_mac_addr_e1(struct bnx2x *bp)
6050 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0); 6112 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
6051} 6113}
6052 6114
6053static void bnx2x_set_mac_addr_e1h(struct bnx2x *bp) 6115static void bnx2x_set_mac_addr_e1h(struct bnx2x *bp, int set)
6054{ 6116{
6055 struct mac_configuration_cmd_e1h *config = 6117 struct mac_configuration_cmd_e1h *config =
6056 (struct mac_configuration_cmd_e1h *)bnx2x_sp(bp, mac_config); 6118 (struct mac_configuration_cmd_e1h *)bnx2x_sp(bp, mac_config);
6057 6119
6058 if (bp->state != BNX2X_STATE_OPEN) { 6120 if (set && (bp->state != BNX2X_STATE_OPEN)) {
6059 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state); 6121 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
6060 return; 6122 return;
6061 } 6123 }
@@ -6079,9 +6141,14 @@ static void bnx2x_set_mac_addr_e1h(struct bnx2x *bp)
6079 config->config_table[0].client_id = BP_L_ID(bp); 6141 config->config_table[0].client_id = BP_L_ID(bp);
6080 config->config_table[0].vlan_id = 0; 6142 config->config_table[0].vlan_id = 0;
6081 config->config_table[0].e1hov_id = cpu_to_le16(bp->e1hov); 6143 config->config_table[0].e1hov_id = cpu_to_le16(bp->e1hov);
6082 config->config_table[0].flags = BP_PORT(bp); 6144 if (set)
6145 config->config_table[0].flags = BP_PORT(bp);
6146 else
6147 config->config_table[0].flags =
6148 MAC_CONFIGURATION_ENTRY_E1H_ACTION_TYPE;
6083 6149
6084 DP(NETIF_MSG_IFUP, "setting MAC (%04x:%04x:%04x) E1HOV %d CLID %d\n", 6150 DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x) E1HOV %d CLID %d\n",
6151 (set ? "setting" : "clearing"),
6085 config->config_table[0].msb_mac_addr, 6152 config->config_table[0].msb_mac_addr,
6086 config->config_table[0].middle_mac_addr, 6153 config->config_table[0].middle_mac_addr,
6087 config->config_table[0].lsb_mac_addr, bp->e1hov, BP_L_ID(bp)); 6154 config->config_table[0].lsb_mac_addr, bp->e1hov, BP_L_ID(bp));
@@ -6106,13 +6173,13 @@ static int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
6106 bnx2x_rx_int(bp->fp, 10); 6173 bnx2x_rx_int(bp->fp, 10);
6107 /* if index is different from 0 6174 /* if index is different from 0
6108 * the reply for some commands will 6175 * the reply for some commands will
6109 * be on the none default queue 6176 * be on the non default queue
6110 */ 6177 */
6111 if (idx) 6178 if (idx)
6112 bnx2x_rx_int(&bp->fp[idx], 10); 6179 bnx2x_rx_int(&bp->fp[idx], 10);
6113 } 6180 }
6114 mb(); /* state is changed by bnx2x_sp_event() */
6115 6181
6182 mb(); /* state is changed by bnx2x_sp_event() */
6116 if (*state_p == state) 6183 if (*state_p == state)
6117 return 0; 6184 return 0;
6118 6185
@@ -6167,7 +6234,6 @@ static int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
6167{ 6234{
6168 u32 load_code; 6235 u32 load_code;
6169 int i, rc; 6236 int i, rc;
6170
6171#ifdef BNX2X_STOP_ON_ERROR 6237#ifdef BNX2X_STOP_ON_ERROR
6172 if (unlikely(bp->panic)) 6238 if (unlikely(bp->panic))
6173 return -EPERM; 6239 return -EPERM;
@@ -6183,22 +6249,24 @@ static int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
6183 if (!BP_NOMCP(bp)) { 6249 if (!BP_NOMCP(bp)) {
6184 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ); 6250 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ);
6185 if (!load_code) { 6251 if (!load_code) {
6186 BNX2X_ERR("MCP response failure, unloading\n"); 6252 BNX2X_ERR("MCP response failure, aborting\n");
6187 return -EBUSY; 6253 return -EBUSY;
6188 } 6254 }
6189 if (load_code == FW_MSG_CODE_DRV_LOAD_REFUSED) 6255 if (load_code == FW_MSG_CODE_DRV_LOAD_REFUSED)
6190 return -EBUSY; /* other port in diagnostic mode */ 6256 return -EBUSY; /* other port in diagnostic mode */
6191 6257
6192 } else { 6258 } else {
6259 int port = BP_PORT(bp);
6260
6193 DP(NETIF_MSG_IFUP, "NO MCP load counts before us %d, %d, %d\n", 6261 DP(NETIF_MSG_IFUP, "NO MCP load counts before us %d, %d, %d\n",
6194 load_count[0], load_count[1], load_count[2]); 6262 load_count[0], load_count[1], load_count[2]);
6195 load_count[0]++; 6263 load_count[0]++;
6196 load_count[1 + BP_PORT(bp)]++; 6264 load_count[1 + port]++;
6197 DP(NETIF_MSG_IFUP, "NO MCP new load counts %d, %d, %d\n", 6265 DP(NETIF_MSG_IFUP, "NO MCP new load counts %d, %d, %d\n",
6198 load_count[0], load_count[1], load_count[2]); 6266 load_count[0], load_count[1], load_count[2]);
6199 if (load_count[0] == 1) 6267 if (load_count[0] == 1)
6200 load_code = FW_MSG_CODE_DRV_LOAD_COMMON; 6268 load_code = FW_MSG_CODE_DRV_LOAD_COMMON;
6201 else if (load_count[1 + BP_PORT(bp)] == 1) 6269 else if (load_count[1 + port] == 1)
6202 load_code = FW_MSG_CODE_DRV_LOAD_PORT; 6270 load_code = FW_MSG_CODE_DRV_LOAD_PORT;
6203 else 6271 else
6204 load_code = FW_MSG_CODE_DRV_LOAD_FUNCTION; 6272 load_code = FW_MSG_CODE_DRV_LOAD_FUNCTION;
@@ -6247,9 +6315,6 @@ static int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
6247 bnx2x_fp(bp, i, disable_tpa) = 6315 bnx2x_fp(bp, i, disable_tpa) =
6248 ((bp->flags & TPA_ENABLE_FLAG) == 0); 6316 ((bp->flags & TPA_ENABLE_FLAG) == 0);
6249 6317
6250 /* Disable interrupt handling until HW is initialized */
6251 atomic_set(&bp->intr_sem, 1);
6252
6253 if (bp->flags & USING_MSIX_FLAG) { 6318 if (bp->flags & USING_MSIX_FLAG) {
6254 rc = bnx2x_req_msix_irqs(bp); 6319 rc = bnx2x_req_msix_irqs(bp);
6255 if (rc) { 6320 if (rc) {
@@ -6276,17 +6341,14 @@ static int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
6276 goto load_error; 6341 goto load_error;
6277 } 6342 }
6278 6343
6279 /* Enable interrupt handling */
6280 atomic_set(&bp->intr_sem, 0);
6281
6282 /* Setup NIC internals and enable interrupts */ 6344 /* Setup NIC internals and enable interrupts */
6283 bnx2x_nic_init(bp); 6345 bnx2x_nic_init(bp, load_code);
6284 6346
6285 /* Send LOAD_DONE command to MCP */ 6347 /* Send LOAD_DONE command to MCP */
6286 if (!BP_NOMCP(bp)) { 6348 if (!BP_NOMCP(bp)) {
6287 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE); 6349 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE);
6288 if (!load_code) { 6350 if (!load_code) {
6289 BNX2X_ERR("MCP response failure, unloading\n"); 6351 BNX2X_ERR("MCP response failure, aborting\n");
6290 rc = -EBUSY; 6352 rc = -EBUSY;
6291 goto load_int_disable; 6353 goto load_int_disable;
6292 } 6354 }
@@ -6301,11 +6363,12 @@ static int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
6301 for_each_queue(bp, i) 6363 for_each_queue(bp, i)
6302 napi_enable(&bnx2x_fp(bp, i, napi)); 6364 napi_enable(&bnx2x_fp(bp, i, napi));
6303 6365
6366 /* Enable interrupt handling */
6367 atomic_set(&bp->intr_sem, 0);
6368
6304 rc = bnx2x_setup_leading(bp); 6369 rc = bnx2x_setup_leading(bp);
6305 if (rc) { 6370 if (rc) {
6306#ifdef BNX2X_STOP_ON_ERROR 6371 BNX2X_ERR("Setup leading failed!\n");
6307 bp->panic = 1;
6308#endif
6309 goto load_stop_netif; 6372 goto load_stop_netif;
6310 } 6373 }
6311 6374
@@ -6323,9 +6386,9 @@ static int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
6323 } 6386 }
6324 6387
6325 if (CHIP_IS_E1(bp)) 6388 if (CHIP_IS_E1(bp))
6326 bnx2x_set_mac_addr_e1(bp); 6389 bnx2x_set_mac_addr_e1(bp, 1);
6327 else 6390 else
6328 bnx2x_set_mac_addr_e1h(bp); 6391 bnx2x_set_mac_addr_e1h(bp, 1);
6329 6392
6330 if (bp->port.pmf) 6393 if (bp->port.pmf)
6331 bnx2x_initial_phy_init(bp); 6394 bnx2x_initial_phy_init(bp);
@@ -6339,7 +6402,6 @@ static int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
6339 break; 6402 break;
6340 6403
6341 case LOAD_OPEN: 6404 case LOAD_OPEN:
6342 /* IRQ is only requested from bnx2x_open */
6343 netif_start_queue(bp->dev); 6405 netif_start_queue(bp->dev);
6344 bnx2x_set_rx_mode(bp->dev); 6406 bnx2x_set_rx_mode(bp->dev);
6345 if (bp->flags & USING_MSIX_FLAG) 6407 if (bp->flags & USING_MSIX_FLAG)
@@ -6378,8 +6440,7 @@ load_int_disable:
6378 /* Free SKBs, SGEs, TPA pool and driver internals */ 6440 /* Free SKBs, SGEs, TPA pool and driver internals */
6379 bnx2x_free_skbs(bp); 6441 bnx2x_free_skbs(bp);
6380 for_each_queue(bp, i) 6442 for_each_queue(bp, i)
6381 bnx2x_free_rx_sge_range(bp, bp->fp + i, 6443 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
6382 RX_SGE_CNT*NUM_RX_SGE_PAGES);
6383load_error: 6444load_error:
6384 bnx2x_free_mem(bp); 6445 bnx2x_free_mem(bp);
6385 6446
@@ -6411,7 +6472,7 @@ static int bnx2x_stop_multi(struct bnx2x *bp, int index)
6411 return rc; 6472 return rc;
6412} 6473}
6413 6474
6414static void bnx2x_stop_leading(struct bnx2x *bp) 6475static int bnx2x_stop_leading(struct bnx2x *bp)
6415{ 6476{
6416 u16 dsb_sp_prod_idx; 6477 u16 dsb_sp_prod_idx;
6417 /* if the other port is handling traffic, 6478 /* if the other port is handling traffic,
@@ -6429,7 +6490,7 @@ static void bnx2x_stop_leading(struct bnx2x *bp)
6429 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, 0, 6490 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, 0,
6430 &(bp->fp[0].state), 1); 6491 &(bp->fp[0].state), 1);
6431 if (rc) /* timeout */ 6492 if (rc) /* timeout */
6432 return; 6493 return rc;
6433 6494
6434 dsb_sp_prod_idx = *bp->dsb_sp_prod; 6495 dsb_sp_prod_idx = *bp->dsb_sp_prod;
6435 6496
@@ -6441,20 +6502,24 @@ static void bnx2x_stop_leading(struct bnx2x *bp)
6441 so there is not much to do if this times out 6502 so there is not much to do if this times out
6442 */ 6503 */
6443 while (dsb_sp_prod_idx == *bp->dsb_sp_prod) { 6504 while (dsb_sp_prod_idx == *bp->dsb_sp_prod) {
6444 msleep(1);
6445 if (!cnt) { 6505 if (!cnt) {
6446 DP(NETIF_MSG_IFDOWN, "timeout waiting for port del " 6506 DP(NETIF_MSG_IFDOWN, "timeout waiting for port del "
6447 "dsb_sp_prod 0x%x != dsb_sp_prod_idx 0x%x\n", 6507 "dsb_sp_prod 0x%x != dsb_sp_prod_idx 0x%x\n",
6448 *bp->dsb_sp_prod, dsb_sp_prod_idx); 6508 *bp->dsb_sp_prod, dsb_sp_prod_idx);
6449#ifdef BNX2X_STOP_ON_ERROR 6509#ifdef BNX2X_STOP_ON_ERROR
6450 bnx2x_panic(); 6510 bnx2x_panic();
6511#else
6512 rc = -EBUSY;
6451#endif 6513#endif
6452 break; 6514 break;
6453 } 6515 }
6454 cnt--; 6516 cnt--;
6517 msleep(1);
6455 } 6518 }
6456 bp->state = BNX2X_STATE_CLOSING_WAIT4_UNLOAD; 6519 bp->state = BNX2X_STATE_CLOSING_WAIT4_UNLOAD;
6457 bp->fp[0].state = BNX2X_FP_STATE_CLOSED; 6520 bp->fp[0].state = BNX2X_FP_STATE_CLOSED;
6521
6522 return rc;
6458} 6523}
6459 6524
6460static void bnx2x_reset_func(struct bnx2x *bp) 6525static void bnx2x_reset_func(struct bnx2x *bp)
@@ -6496,7 +6561,7 @@ static void bnx2x_reset_port(struct bnx2x *bp)
6496 val = REG_RD(bp, BRB1_REG_PORT_NUM_OCC_BLOCKS_0 + port*4); 6561 val = REG_RD(bp, BRB1_REG_PORT_NUM_OCC_BLOCKS_0 + port*4);
6497 if (val) 6562 if (val)
6498 DP(NETIF_MSG_IFDOWN, 6563 DP(NETIF_MSG_IFDOWN,
6499 "BRB1 is not empty %d blooks are occupied\n", val); 6564 "BRB1 is not empty %d blocks are occupied\n", val);
6500 6565
6501 /* TODO: Close Doorbell port? */ 6566 /* TODO: Close Doorbell port? */
6502} 6567}
@@ -6536,11 +6601,12 @@ static void bnx2x_reset_chip(struct bnx2x *bp, u32 reset_code)
6536 } 6601 }
6537} 6602}
6538 6603
6539/* msut be called with rtnl_lock */ 6604/* must be called with rtnl_lock */
6540static int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode) 6605static int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
6541{ 6606{
6607 int port = BP_PORT(bp);
6542 u32 reset_code = 0; 6608 u32 reset_code = 0;
6543 int i, cnt; 6609 int i, cnt, rc;
6544 6610
6545 bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT; 6611 bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
6546 6612
@@ -6557,22 +6623,17 @@ static int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
6557 (DRV_PULSE_ALWAYS_ALIVE | bp->fw_drv_pulse_wr_seq)); 6623 (DRV_PULSE_ALWAYS_ALIVE | bp->fw_drv_pulse_wr_seq));
6558 bnx2x_stats_handle(bp, STATS_EVENT_STOP); 6624 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
6559 6625
6560 /* Wait until all fast path tasks complete */ 6626 /* Wait until tx fast path tasks complete */
6561 for_each_queue(bp, i) { 6627 for_each_queue(bp, i) {
6562 struct bnx2x_fastpath *fp = &bp->fp[i]; 6628 struct bnx2x_fastpath *fp = &bp->fp[i];
6563 6629
6564#ifdef BNX2X_STOP_ON_ERROR
6565#ifdef __powerpc64__
6566 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n",
6567#else
6568 DP(NETIF_MSG_IFDOWN, "fp->tpa_queue_used = 0x%llx\n",
6569#endif
6570 fp->tpa_queue_used);
6571#endif
6572 cnt = 1000; 6630 cnt = 1000;
6573 smp_rmb(); 6631 smp_rmb();
6574 while (bnx2x_has_work(fp)) { 6632 while (BNX2X_HAS_TX_WORK(fp)) {
6575 msleep(1); 6633
6634 if (!netif_running(bp->dev))
6635 bnx2x_tx_int(fp, 1000);
6636
6576 if (!cnt) { 6637 if (!cnt) {
6577 BNX2X_ERR("timeout waiting for queue[%d]\n", 6638 BNX2X_ERR("timeout waiting for queue[%d]\n",
6578 i); 6639 i);
@@ -6584,14 +6645,13 @@ static int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
6584#endif 6645#endif
6585 } 6646 }
6586 cnt--; 6647 cnt--;
6648 msleep(1);
6587 smp_rmb(); 6649 smp_rmb();
6588 } 6650 }
6589 } 6651 }
6590 6652
6591 /* Wait until all slow path tasks complete */ 6653 /* Give HW time to discard old tx messages */
6592 cnt = 1000; 6654 msleep(1);
6593 while ((bp->spq_left != MAX_SPQ_PENDING) && cnt--)
6594 msleep(1);
6595 6655
6596 for_each_queue(bp, i) 6656 for_each_queue(bp, i)
6597 napi_disable(&bnx2x_fp(bp, i, napi)); 6657 napi_disable(&bnx2x_fp(bp, i, napi));
@@ -6601,52 +6661,79 @@ static int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
6601 /* Release IRQs */ 6661 /* Release IRQs */
6602 bnx2x_free_irq(bp); 6662 bnx2x_free_irq(bp);
6603 6663
6604 if (bp->flags & NO_WOL_FLAG) 6664 if (unload_mode == UNLOAD_NORMAL)
6665 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
6666
6667 else if (bp->flags & NO_WOL_FLAG) {
6605 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP; 6668 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP;
6669 if (CHIP_IS_E1H(bp))
6670 REG_WR(bp, MISC_REG_E1HMF_MODE, 0);
6606 6671
6607 else if (bp->wol) { 6672 } else if (bp->wol) {
6608 u32 emac_base = BP_PORT(bp) ? GRCBASE_EMAC1 : GRCBASE_EMAC0; 6673 u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
6609 u8 *mac_addr = bp->dev->dev_addr; 6674 u8 *mac_addr = bp->dev->dev_addr;
6610 u32 val; 6675 u32 val;
6611
6612 /* The mac address is written to entries 1-4 to 6676 /* The mac address is written to entries 1-4 to
6613 preserve entry 0 which is used by the PMF */ 6677 preserve entry 0 which is used by the PMF */
6678 u8 entry = (BP_E1HVN(bp) + 1)*8;
6679
6614 val = (mac_addr[0] << 8) | mac_addr[1]; 6680 val = (mac_addr[0] << 8) | mac_addr[1];
6615 EMAC_WR(EMAC_REG_EMAC_MAC_MATCH + (BP_E1HVN(bp) + 1)*8, val); 6681 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry, val);
6616 6682
6617 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) | 6683 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
6618 (mac_addr[4] << 8) | mac_addr[5]; 6684 (mac_addr[4] << 8) | mac_addr[5];
6619 EMAC_WR(EMAC_REG_EMAC_MAC_MATCH + (BP_E1HVN(bp) + 1)*8 + 4, 6685 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry + 4, val);
6620 val);
6621 6686
6622 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN; 6687 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN;
6623 6688
6624 } else 6689 } else
6625 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS; 6690 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
6626 6691
6692 if (CHIP_IS_E1(bp)) {
6693 struct mac_configuration_cmd *config =
6694 bnx2x_sp(bp, mcast_config);
6695
6696 bnx2x_set_mac_addr_e1(bp, 0);
6697
6698 for (i = 0; i < config->hdr.length_6b; i++)
6699 CAM_INVALIDATE(config->config_table[i]);
6700
6701 config->hdr.length_6b = i;
6702 if (CHIP_REV_IS_SLOW(bp))
6703 config->hdr.offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
6704 else
6705 config->hdr.offset = BNX2X_MAX_MULTICAST*(1 + port);
6706 config->hdr.client_id = BP_CL_ID(bp);
6707 config->hdr.reserved1 = 0;
6708
6709 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
6710 U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
6711 U64_LO(bnx2x_sp_mapping(bp, mcast_config)), 0);
6712
6713 } else { /* E1H */
6714 bnx2x_set_mac_addr_e1h(bp, 0);
6715
6716 for (i = 0; i < MC_HASH_SIZE; i++)
6717 REG_WR(bp, MC_HASH_OFFSET(bp, i), 0);
6718 }
6719
6720 if (CHIP_IS_E1H(bp))
6721 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
6722
6627 /* Close multi and leading connections 6723 /* Close multi and leading connections
6628 Completions for ramrods are collected in a synchronous way */ 6724 Completions for ramrods are collected in a synchronous way */
6629 for_each_nondefault_queue(bp, i) 6725 for_each_nondefault_queue(bp, i)
6630 if (bnx2x_stop_multi(bp, i)) 6726 if (bnx2x_stop_multi(bp, i))
6631 goto unload_error; 6727 goto unload_error;
6632 6728
6633 if (CHIP_IS_E1H(bp)) 6729 rc = bnx2x_stop_leading(bp);
6634 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + BP_PORT(bp)*8, 0); 6730 if (rc) {
6635
6636 bnx2x_stop_leading(bp);
6637#ifdef BNX2X_STOP_ON_ERROR
6638 /* If ramrod completion timed out - break here! */
6639 if (bp->panic) {
6640 BNX2X_ERR("Stop leading failed!\n"); 6731 BNX2X_ERR("Stop leading failed!\n");
6732#ifdef BNX2X_STOP_ON_ERROR
6641 return -EBUSY; 6733 return -EBUSY;
6642 } 6734#else
6735 goto unload_error;
6643#endif 6736#endif
6644
6645 if ((bp->state != BNX2X_STATE_CLOSING_WAIT4_UNLOAD) ||
6646 (bp->fp[0].state != BNX2X_FP_STATE_CLOSED)) {
6647 DP(NETIF_MSG_IFDOWN, "failed to close leading properly! "
6648 "state 0x%x fp[0].state 0x%x\n",
6649 bp->state, bp->fp[0].state);
6650 } 6737 }
6651 6738
6652unload_error: 6739unload_error:
@@ -6656,12 +6743,12 @@ unload_error:
6656 DP(NETIF_MSG_IFDOWN, "NO MCP load counts %d, %d, %d\n", 6743 DP(NETIF_MSG_IFDOWN, "NO MCP load counts %d, %d, %d\n",
6657 load_count[0], load_count[1], load_count[2]); 6744 load_count[0], load_count[1], load_count[2]);
6658 load_count[0]--; 6745 load_count[0]--;
6659 load_count[1 + BP_PORT(bp)]--; 6746 load_count[1 + port]--;
6660 DP(NETIF_MSG_IFDOWN, "NO MCP new load counts %d, %d, %d\n", 6747 DP(NETIF_MSG_IFDOWN, "NO MCP new load counts %d, %d, %d\n",
6661 load_count[0], load_count[1], load_count[2]); 6748 load_count[0], load_count[1], load_count[2]);
6662 if (load_count[0] == 0) 6749 if (load_count[0] == 0)
6663 reset_code = FW_MSG_CODE_DRV_UNLOAD_COMMON; 6750 reset_code = FW_MSG_CODE_DRV_UNLOAD_COMMON;
6664 else if (load_count[1 + BP_PORT(bp)] == 0) 6751 else if (load_count[1 + port] == 0)
6665 reset_code = FW_MSG_CODE_DRV_UNLOAD_PORT; 6752 reset_code = FW_MSG_CODE_DRV_UNLOAD_PORT;
6666 else 6753 else
6667 reset_code = FW_MSG_CODE_DRV_UNLOAD_FUNCTION; 6754 reset_code = FW_MSG_CODE_DRV_UNLOAD_FUNCTION;
@@ -6681,8 +6768,7 @@ unload_error:
6681 /* Free SKBs, SGEs, TPA pool and driver internals */ 6768 /* Free SKBs, SGEs, TPA pool and driver internals */
6682 bnx2x_free_skbs(bp); 6769 bnx2x_free_skbs(bp);
6683 for_each_queue(bp, i) 6770 for_each_queue(bp, i)
6684 bnx2x_free_rx_sge_range(bp, bp->fp + i, 6771 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
6685 RX_SGE_CNT*NUM_RX_SGE_PAGES);
6686 bnx2x_free_mem(bp); 6772 bnx2x_free_mem(bp);
6687 6773
6688 bp->state = BNX2X_STATE_CLOSED; 6774 bp->state = BNX2X_STATE_CLOSED;
@@ -6733,56 +6819,93 @@ static void __devinit bnx2x_undi_unload(struct bnx2x *bp)
6733 /* Check if it is the UNDI driver 6819 /* Check if it is the UNDI driver
6734 * UNDI driver initializes CID offset for normal bell to 0x7 6820 * UNDI driver initializes CID offset for normal bell to 0x7
6735 */ 6821 */
6822 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
6736 val = REG_RD(bp, DORQ_REG_NORM_CID_OFST); 6823 val = REG_RD(bp, DORQ_REG_NORM_CID_OFST);
6737 if (val == 0x7) { 6824 if (val == 0x7) {
6738 u32 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS; 6825 u32 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
6739 /* save our func and fw_seq */ 6826 /* save our func */
6740 int func = BP_FUNC(bp); 6827 int func = BP_FUNC(bp);
6741 u16 fw_seq = bp->fw_seq; 6828 u32 swap_en;
6829 u32 swap_val;
6742 6830
6743 BNX2X_DEV_INFO("UNDI is active! reset device\n"); 6831 BNX2X_DEV_INFO("UNDI is active! reset device\n");
6744 6832
6745 /* try unload UNDI on port 0 */ 6833 /* try unload UNDI on port 0 */
6746 bp->func = 0; 6834 bp->func = 0;
6747 bp->fw_seq = (SHMEM_RD(bp, 6835 bp->fw_seq =
6748 func_mb[bp->func].drv_mb_header) & 6836 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
6749 DRV_MSG_SEQ_NUMBER_MASK); 6837 DRV_MSG_SEQ_NUMBER_MASK);
6750
6751 reset_code = bnx2x_fw_command(bp, reset_code); 6838 reset_code = bnx2x_fw_command(bp, reset_code);
6752 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
6753 6839
6754 /* if UNDI is loaded on the other port */ 6840 /* if UNDI is loaded on the other port */
6755 if (reset_code != FW_MSG_CODE_DRV_UNLOAD_COMMON) { 6841 if (reset_code != FW_MSG_CODE_DRV_UNLOAD_COMMON) {
6756 6842
6843 /* send "DONE" for previous unload */
6844 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
6845
6846 /* unload UNDI on port 1 */
6757 bp->func = 1; 6847 bp->func = 1;
6758 bp->fw_seq = (SHMEM_RD(bp, 6848 bp->fw_seq =
6759 func_mb[bp->func].drv_mb_header) & 6849 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
6760 DRV_MSG_SEQ_NUMBER_MASK); 6850 DRV_MSG_SEQ_NUMBER_MASK);
6761 6851 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
6762 bnx2x_fw_command(bp, 6852
6763 DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS); 6853 bnx2x_fw_command(bp, reset_code);
6764 bnx2x_fw_command(bp,
6765 DRV_MSG_CODE_UNLOAD_DONE);
6766
6767 /* restore our func and fw_seq */
6768 bp->func = func;
6769 bp->fw_seq = fw_seq;
6770 } 6854 }
6771 6855
6856 REG_WR(bp, (BP_PORT(bp) ? HC_REG_CONFIG_1 :
6857 HC_REG_CONFIG_0), 0x1000);
6858
6859 /* close input traffic and wait for it */
6860 /* Do not rcv packets to BRB */
6861 REG_WR(bp,
6862 (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_DRV_MASK :
6863 NIG_REG_LLH0_BRB1_DRV_MASK), 0x0);
6864 /* Do not direct rcv packets that are not for MCP to
6865 * the BRB */
6866 REG_WR(bp,
6867 (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_NOT_MCP :
6868 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
6869 /* clear AEU */
6870 REG_WR(bp,
6871 (BP_PORT(bp) ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
6872 MISC_REG_AEU_MASK_ATTN_FUNC_0), 0);
6873 msleep(10);
6874
6875 /* save NIG port swap info */
6876 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
6877 swap_en = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
6772 /* reset device */ 6878 /* reset device */
6773 REG_WR(bp, 6879 REG_WR(bp,
6774 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 6880 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
6775 0xd3ffff7f); 6881 0xd3ffffff);
6776 REG_WR(bp, 6882 REG_WR(bp,
6777 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, 6883 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
6778 0x1403); 6884 0x1403);
6885 /* take the NIG out of reset and restore swap values */
6886 REG_WR(bp,
6887 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
6888 MISC_REGISTERS_RESET_REG_1_RST_NIG);
6889 REG_WR(bp, NIG_REG_PORT_SWAP, swap_val);
6890 REG_WR(bp, NIG_REG_STRAP_OVERRIDE, swap_en);
6891
6892 /* send unload done to the MCP */
6893 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
6894
6895 /* restore our func and fw_seq */
6896 bp->func = func;
6897 bp->fw_seq =
6898 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
6899 DRV_MSG_SEQ_NUMBER_MASK);
6779 } 6900 }
6901 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
6780 } 6902 }
6781} 6903}
6782 6904
6783static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp) 6905static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
6784{ 6906{
6785 u32 val, val2, val3, val4, id; 6907 u32 val, val2, val3, val4, id;
6908 u16 pmc;
6786 6909
6787 /* Get the chip revision id and number. */ 6910 /* Get the chip revision id and number. */
6788 /* chip num:16-31, rev:12-15, metal:4-11, bond_id:0-3 */ 6911 /* chip num:16-31, rev:12-15, metal:4-11, bond_id:0-3 */
@@ -6840,8 +6963,16 @@ static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
6840 BNX2X_ERR("This driver needs bc_ver %X but found %X," 6963 BNX2X_ERR("This driver needs bc_ver %X but found %X,"
6841 " please upgrade BC\n", BNX2X_BC_VER, val); 6964 " please upgrade BC\n", BNX2X_BC_VER, val);
6842 } 6965 }
6843 BNX2X_DEV_INFO("%sWoL Capable\n", 6966
6844 (bp->flags & NO_WOL_FLAG)? "Not " : ""); 6967 if (BP_E1HVN(bp) == 0) {
6968 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_PMC, &pmc);
6969 bp->flags |= (pmc & PCI_PM_CAP_PME_D3cold) ? 0 : NO_WOL_FLAG;
6970 } else {
6971 /* no WOL capability for E1HVN != 0 */
6972 bp->flags |= NO_WOL_FLAG;
6973 }
6974 BNX2X_DEV_INFO("%sWoL capable\n",
6975 (bp->flags & NO_WOL_FLAG) ? "Not " : "");
6845 6976
6846 val = SHMEM_RD(bp, dev_info.shared_hw_config.part_num); 6977 val = SHMEM_RD(bp, dev_info.shared_hw_config.part_num);
6847 val2 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[4]); 6978 val2 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[4]);
@@ -7202,7 +7333,7 @@ static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp)
7202 bp->link_params.req_flow_ctrl = (bp->port.link_config & 7333 bp->link_params.req_flow_ctrl = (bp->port.link_config &
7203 PORT_FEATURE_FLOW_CONTROL_MASK); 7334 PORT_FEATURE_FLOW_CONTROL_MASK);
7204 if ((bp->link_params.req_flow_ctrl == FLOW_CTRL_AUTO) && 7335 if ((bp->link_params.req_flow_ctrl == FLOW_CTRL_AUTO) &&
7205 (!bp->port.supported & SUPPORTED_Autoneg)) 7336 !(bp->port.supported & SUPPORTED_Autoneg))
7206 bp->link_params.req_flow_ctrl = FLOW_CTRL_NONE; 7337 bp->link_params.req_flow_ctrl = FLOW_CTRL_NONE;
7207 7338
7208 BNX2X_DEV_INFO("req_line_speed %d req_duplex %d req_flow_ctrl 0x%x" 7339 BNX2X_DEV_INFO("req_line_speed %d req_duplex %d req_flow_ctrl 0x%x"
@@ -7274,9 +7405,8 @@ static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
7274 bp->mf_config = 7405 bp->mf_config =
7275 SHMEM_RD(bp, mf_cfg.func_mf_config[func].config); 7406 SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
7276 7407
7277 val = 7408 val = (SHMEM_RD(bp, mf_cfg.func_mf_config[func].e1hov_tag) &
7278 (SHMEM_RD(bp, mf_cfg.func_mf_config[func].e1hov_tag) & 7409 FUNC_MF_CFG_E1HOV_TAG_MASK);
7279 FUNC_MF_CFG_E1HOV_TAG_MASK);
7280 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) { 7410 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
7281 7411
7282 bp->e1hov = val; 7412 bp->e1hov = val;
@@ -7324,7 +7454,7 @@ static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
7324 7454
7325 if (BP_NOMCP(bp)) { 7455 if (BP_NOMCP(bp)) {
7326 /* only supposed to happen on emulation/FPGA */ 7456 /* only supposed to happen on emulation/FPGA */
7327 BNX2X_ERR("warning rendom MAC workaround active\n"); 7457 BNX2X_ERR("warning random MAC workaround active\n");
7328 random_ether_addr(bp->dev->dev_addr); 7458 random_ether_addr(bp->dev->dev_addr);
7329 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN); 7459 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
7330 } 7460 }
@@ -7337,8 +7467,8 @@ static int __devinit bnx2x_init_bp(struct bnx2x *bp)
7337 int func = BP_FUNC(bp); 7467 int func = BP_FUNC(bp);
7338 int rc; 7468 int rc;
7339 7469
7340 if (nomcp) 7470 /* Disable interrupt handling until HW is initialized */
7341 bp->flags |= NO_MCP_FLAG; 7471 atomic_set(&bp->intr_sem, 1);
7342 7472
7343 mutex_init(&bp->port.phy_mutex); 7473 mutex_init(&bp->port.phy_mutex);
7344 7474
@@ -7377,8 +7507,6 @@ static int __devinit bnx2x_init_bp(struct bnx2x *bp)
7377 bp->tx_ticks = 50; 7507 bp->tx_ticks = 50;
7378 bp->rx_ticks = 25; 7508 bp->rx_ticks = 25;
7379 7509
7380 bp->stats_ticks = 1000000 & 0xffff00;
7381
7382 bp->timer_interval = (CHIP_REV_IS_SLOW(bp) ? 5*HZ : HZ); 7510 bp->timer_interval = (CHIP_REV_IS_SLOW(bp) ? 5*HZ : HZ);
7383 bp->current_interval = (poll ? poll : bp->timer_interval); 7511 bp->current_interval = (poll ? poll : bp->timer_interval);
7384 7512
@@ -7628,25 +7756,25 @@ static void bnx2x_get_drvinfo(struct net_device *dev,
7628 struct ethtool_drvinfo *info) 7756 struct ethtool_drvinfo *info)
7629{ 7757{
7630 struct bnx2x *bp = netdev_priv(dev); 7758 struct bnx2x *bp = netdev_priv(dev);
7631 char phy_fw_ver[PHY_FW_VER_LEN]; 7759 u8 phy_fw_ver[PHY_FW_VER_LEN];
7632 7760
7633 strcpy(info->driver, DRV_MODULE_NAME); 7761 strcpy(info->driver, DRV_MODULE_NAME);
7634 strcpy(info->version, DRV_MODULE_VERSION); 7762 strcpy(info->version, DRV_MODULE_VERSION);
7635 7763
7636 phy_fw_ver[0] = '\0'; 7764 phy_fw_ver[0] = '\0';
7637 if (bp->port.pmf) { 7765 if (bp->port.pmf) {
7638 bnx2x_phy_hw_lock(bp); 7766 bnx2x_acquire_phy_lock(bp);
7639 bnx2x_get_ext_phy_fw_version(&bp->link_params, 7767 bnx2x_get_ext_phy_fw_version(&bp->link_params,
7640 (bp->state != BNX2X_STATE_CLOSED), 7768 (bp->state != BNX2X_STATE_CLOSED),
7641 phy_fw_ver, PHY_FW_VER_LEN); 7769 phy_fw_ver, PHY_FW_VER_LEN);
7642 bnx2x_phy_hw_unlock(bp); 7770 bnx2x_release_phy_lock(bp);
7643 } 7771 }
7644 7772
7645 snprintf(info->fw_version, 32, "%d.%d.%d:%d BC:%x%s%s", 7773 snprintf(info->fw_version, 32, "BC:%d.%d.%d%s%s",
7646 BCM_5710_FW_MAJOR_VERSION, BCM_5710_FW_MINOR_VERSION, 7774 (bp->common.bc_ver & 0xff0000) >> 16,
7647 BCM_5710_FW_REVISION_VERSION, 7775 (bp->common.bc_ver & 0xff00) >> 8,
7648 BCM_5710_FW_COMPILE_FLAGS, bp->common.bc_ver, 7776 (bp->common.bc_ver & 0xff),
7649 ((phy_fw_ver[0] != '\0')? " PHY:":""), phy_fw_ver); 7777 ((phy_fw_ver[0] != '\0') ? " PHY:" : ""), phy_fw_ver);
7650 strcpy(info->bus_info, pci_name(bp->pdev)); 7778 strcpy(info->bus_info, pci_name(bp->pdev));
7651 info->n_stats = BNX2X_NUM_STATS; 7779 info->n_stats = BNX2X_NUM_STATS;
7652 info->testinfo_len = BNX2X_NUM_TESTS; 7780 info->testinfo_len = BNX2X_NUM_TESTS;
@@ -8097,7 +8225,7 @@ static int bnx2x_set_eeprom(struct net_device *dev,
8097 if (eeprom->magic == 0x00504859) 8225 if (eeprom->magic == 0x00504859)
8098 if (bp->port.pmf) { 8226 if (bp->port.pmf) {
8099 8227
8100 bnx2x_phy_hw_lock(bp); 8228 bnx2x_acquire_phy_lock(bp);
8101 rc = bnx2x_flash_download(bp, BP_PORT(bp), 8229 rc = bnx2x_flash_download(bp, BP_PORT(bp),
8102 bp->link_params.ext_phy_config, 8230 bp->link_params.ext_phy_config,
8103 (bp->state != BNX2X_STATE_CLOSED), 8231 (bp->state != BNX2X_STATE_CLOSED),
@@ -8109,7 +8237,7 @@ static int bnx2x_set_eeprom(struct net_device *dev,
8109 rc |= bnx2x_phy_init(&bp->link_params, 8237 rc |= bnx2x_phy_init(&bp->link_params,
8110 &bp->link_vars); 8238 &bp->link_vars);
8111 } 8239 }
8112 bnx2x_phy_hw_unlock(bp); 8240 bnx2x_release_phy_lock(bp);
8113 8241
8114 } else /* Only the PMF can access the PHY */ 8242 } else /* Only the PMF can access the PHY */
8115 return -EINVAL; 8243 return -EINVAL;
@@ -8128,7 +8256,6 @@ static int bnx2x_get_coalesce(struct net_device *dev,
8128 8256
8129 coal->rx_coalesce_usecs = bp->rx_ticks; 8257 coal->rx_coalesce_usecs = bp->rx_ticks;
8130 coal->tx_coalesce_usecs = bp->tx_ticks; 8258 coal->tx_coalesce_usecs = bp->tx_ticks;
8131 coal->stats_block_coalesce_usecs = bp->stats_ticks;
8132 8259
8133 return 0; 8260 return 0;
8134} 8261}
@@ -8146,44 +8273,12 @@ static int bnx2x_set_coalesce(struct net_device *dev,
8146 if (bp->tx_ticks > 0x3000) 8273 if (bp->tx_ticks > 0x3000)
8147 bp->tx_ticks = 0x3000; 8274 bp->tx_ticks = 0x3000;
8148 8275
8149 bp->stats_ticks = coal->stats_block_coalesce_usecs;
8150 if (bp->stats_ticks > 0xffff00)
8151 bp->stats_ticks = 0xffff00;
8152 bp->stats_ticks &= 0xffff00;
8153
8154 if (netif_running(dev)) 8276 if (netif_running(dev))
8155 bnx2x_update_coalesce(bp); 8277 bnx2x_update_coalesce(bp);
8156 8278
8157 return 0; 8279 return 0;
8158} 8280}
8159 8281
8160static int bnx2x_set_flags(struct net_device *dev, u32 data)
8161{
8162 struct bnx2x *bp = netdev_priv(dev);
8163 int changed = 0;
8164 int rc = 0;
8165
8166 if (data & ETH_FLAG_LRO) {
8167 if (!(dev->features & NETIF_F_LRO)) {
8168 dev->features |= NETIF_F_LRO;
8169 bp->flags |= TPA_ENABLE_FLAG;
8170 changed = 1;
8171 }
8172
8173 } else if (dev->features & NETIF_F_LRO) {
8174 dev->features &= ~NETIF_F_LRO;
8175 bp->flags &= ~TPA_ENABLE_FLAG;
8176 changed = 1;
8177 }
8178
8179 if (changed && netif_running(dev)) {
8180 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
8181 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
8182 }
8183
8184 return rc;
8185}
8186
8187static void bnx2x_get_ringparam(struct net_device *dev, 8282static void bnx2x_get_ringparam(struct net_device *dev,
8188 struct ethtool_ringparam *ering) 8283 struct ethtool_ringparam *ering)
8189{ 8284{
@@ -8266,7 +8361,7 @@ static int bnx2x_set_pauseparam(struct net_device *dev,
8266 8361
8267 if (epause->autoneg) { 8362 if (epause->autoneg) {
8268 if (!(bp->port.supported & SUPPORTED_Autoneg)) { 8363 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
8269 DP(NETIF_MSG_LINK, "Autoneg not supported\n"); 8364 DP(NETIF_MSG_LINK, "autoneg not supported\n");
8270 return -EINVAL; 8365 return -EINVAL;
8271 } 8366 }
8272 8367
@@ -8285,6 +8380,34 @@ static int bnx2x_set_pauseparam(struct net_device *dev,
8285 return 0; 8380 return 0;
8286} 8381}
8287 8382
8383static int bnx2x_set_flags(struct net_device *dev, u32 data)
8384{
8385 struct bnx2x *bp = netdev_priv(dev);
8386 int changed = 0;
8387 int rc = 0;
8388
8389 /* TPA requires Rx CSUM offloading */
8390 if ((data & ETH_FLAG_LRO) && bp->rx_csum) {
8391 if (!(dev->features & NETIF_F_LRO)) {
8392 dev->features |= NETIF_F_LRO;
8393 bp->flags |= TPA_ENABLE_FLAG;
8394 changed = 1;
8395 }
8396
8397 } else if (dev->features & NETIF_F_LRO) {
8398 dev->features &= ~NETIF_F_LRO;
8399 bp->flags &= ~TPA_ENABLE_FLAG;
8400 changed = 1;
8401 }
8402
8403 if (changed && netif_running(dev)) {
8404 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
8405 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
8406 }
8407
8408 return rc;
8409}
8410
8288static u32 bnx2x_get_rx_csum(struct net_device *dev) 8411static u32 bnx2x_get_rx_csum(struct net_device *dev)
8289{ 8412{
8290 struct bnx2x *bp = netdev_priv(dev); 8413 struct bnx2x *bp = netdev_priv(dev);
@@ -8295,9 +8418,19 @@ static u32 bnx2x_get_rx_csum(struct net_device *dev)
8295static int bnx2x_set_rx_csum(struct net_device *dev, u32 data) 8418static int bnx2x_set_rx_csum(struct net_device *dev, u32 data)
8296{ 8419{
8297 struct bnx2x *bp = netdev_priv(dev); 8420 struct bnx2x *bp = netdev_priv(dev);
8421 int rc = 0;
8298 8422
8299 bp->rx_csum = data; 8423 bp->rx_csum = data;
8300 return 0; 8424
8425 /* Disable TPA, when Rx CSUM is disabled. Otherwise all
8426 TPA'ed packets will be discarded due to wrong TCP CSUM */
8427 if (!data) {
8428 u32 flags = ethtool_op_get_flags(dev);
8429
8430 rc = bnx2x_set_flags(dev, (flags & ~ETH_FLAG_LRO));
8431 }
8432
8433 return rc;
8301} 8434}
8302 8435
8303static int bnx2x_set_tso(struct net_device *dev, u32 data) 8436static int bnx2x_set_tso(struct net_device *dev, u32 data)
@@ -8335,6 +8468,7 @@ static int bnx2x_test_registers(struct bnx2x *bp)
8335{ 8468{
8336 int idx, i, rc = -ENODEV; 8469 int idx, i, rc = -ENODEV;
8337 u32 wr_val = 0; 8470 u32 wr_val = 0;
8471 int port = BP_PORT(bp);
8338 static const struct { 8472 static const struct {
8339 u32 offset0; 8473 u32 offset0;
8340 u32 offset1; 8474 u32 offset1;
@@ -8400,7 +8534,6 @@ static int bnx2x_test_registers(struct bnx2x *bp)
8400 8534
8401 for (i = 0; reg_tbl[i].offset0 != 0xffffffff; i++) { 8535 for (i = 0; reg_tbl[i].offset0 != 0xffffffff; i++) {
8402 u32 offset, mask, save_val, val; 8536 u32 offset, mask, save_val, val;
8403 int port = BP_PORT(bp);
8404 8537
8405 offset = reg_tbl[i].offset0 + port*reg_tbl[i].offset1; 8538 offset = reg_tbl[i].offset0 + port*reg_tbl[i].offset1;
8406 mask = reg_tbl[i].mask; 8539 mask = reg_tbl[i].mask;
@@ -8446,16 +8579,17 @@ static int bnx2x_test_memory(struct bnx2x *bp)
8446 static const struct { 8579 static const struct {
8447 char *name; 8580 char *name;
8448 u32 offset; 8581 u32 offset;
8449 u32 mask; 8582 u32 e1_mask;
8583 u32 e1h_mask;
8450 } prty_tbl[] = { 8584 } prty_tbl[] = {
8451 { "CCM_REG_CCM_PRTY_STS", CCM_REG_CCM_PRTY_STS, 0 }, 8585 { "CCM_PRTY_STS", CCM_REG_CCM_PRTY_STS, 0x3ffc0, 0 },
8452 { "CFC_REG_CFC_PRTY_STS", CFC_REG_CFC_PRTY_STS, 0 }, 8586 { "CFC_PRTY_STS", CFC_REG_CFC_PRTY_STS, 0x2, 0x2 },
8453 { "DMAE_REG_DMAE_PRTY_STS", DMAE_REG_DMAE_PRTY_STS, 0 }, 8587 { "DMAE_PRTY_STS", DMAE_REG_DMAE_PRTY_STS, 0, 0 },
8454 { "TCM_REG_TCM_PRTY_STS", TCM_REG_TCM_PRTY_STS, 0 }, 8588 { "TCM_PRTY_STS", TCM_REG_TCM_PRTY_STS, 0x3ffc0, 0 },
8455 { "UCM_REG_UCM_PRTY_STS", UCM_REG_UCM_PRTY_STS, 0 }, 8589 { "UCM_PRTY_STS", UCM_REG_UCM_PRTY_STS, 0x3ffc0, 0 },
8456 { "XCM_REG_XCM_PRTY_STS", XCM_REG_XCM_PRTY_STS, 0x1 }, 8590 { "XCM_PRTY_STS", XCM_REG_XCM_PRTY_STS, 0x3ffc1, 0 },
8457 8591
8458 { NULL, 0xffffffff, 0 } 8592 { NULL, 0xffffffff, 0, 0 }
8459 }; 8593 };
8460 8594
8461 if (!netif_running(bp->dev)) 8595 if (!netif_running(bp->dev))
@@ -8469,7 +8603,8 @@ static int bnx2x_test_memory(struct bnx2x *bp)
8469 /* Check the parity status */ 8603 /* Check the parity status */
8470 for (i = 0; prty_tbl[i].offset != 0xffffffff; i++) { 8604 for (i = 0; prty_tbl[i].offset != 0xffffffff; i++) {
8471 val = REG_RD(bp, prty_tbl[i].offset); 8605 val = REG_RD(bp, prty_tbl[i].offset);
8472 if (val & ~(prty_tbl[i].mask)) { 8606 if ((CHIP_IS_E1(bp) && (val & ~(prty_tbl[i].e1_mask))) ||
8607 (CHIP_IS_E1H(bp) && (val & ~(prty_tbl[i].e1h_mask)))) {
8473 DP(NETIF_MSG_HW, 8608 DP(NETIF_MSG_HW,
8474 "%s is 0x%x\n", prty_tbl[i].name, val); 8609 "%s is 0x%x\n", prty_tbl[i].name, val);
8475 goto test_mem_exit; 8610 goto test_mem_exit;
@@ -8539,15 +8674,15 @@ static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode, u8 link_up)
8539 8674
8540 if (loopback_mode == BNX2X_MAC_LOOPBACK) { 8675 if (loopback_mode == BNX2X_MAC_LOOPBACK) {
8541 bp->link_params.loopback_mode = LOOPBACK_BMAC; 8676 bp->link_params.loopback_mode = LOOPBACK_BMAC;
8542 bnx2x_phy_hw_lock(bp); 8677 bnx2x_acquire_phy_lock(bp);
8543 bnx2x_phy_init(&bp->link_params, &bp->link_vars); 8678 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
8544 bnx2x_phy_hw_unlock(bp); 8679 bnx2x_release_phy_lock(bp);
8545 8680
8546 } else if (loopback_mode == BNX2X_PHY_LOOPBACK) { 8681 } else if (loopback_mode == BNX2X_PHY_LOOPBACK) {
8547 bp->link_params.loopback_mode = LOOPBACK_XGXS_10; 8682 bp->link_params.loopback_mode = LOOPBACK_XGXS_10;
8548 bnx2x_phy_hw_lock(bp); 8683 bnx2x_acquire_phy_lock(bp);
8549 bnx2x_phy_init(&bp->link_params, &bp->link_vars); 8684 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
8550 bnx2x_phy_hw_unlock(bp); 8685 bnx2x_release_phy_lock(bp);
8551 /* wait until link state is restored */ 8686 /* wait until link state is restored */
8552 bnx2x_wait_for_link(bp, link_up); 8687 bnx2x_wait_for_link(bp, link_up);
8553 8688
@@ -8771,7 +8906,7 @@ static void bnx2x_self_test(struct net_device *dev,
8771 if (!netif_running(dev)) 8906 if (!netif_running(dev))
8772 return; 8907 return;
8773 8908
8774 /* offline tests are not suppoerted in MF mode */ 8909 /* offline tests are not supported in MF mode */
8775 if (IS_E1HMF(bp)) 8910 if (IS_E1HMF(bp))
8776 etest->flags &= ~ETH_TEST_FL_OFFLINE; 8911 etest->flags &= ~ETH_TEST_FL_OFFLINE;
8777 8912
@@ -8827,76 +8962,99 @@ static const struct {
8827 long offset; 8962 long offset;
8828 int size; 8963 int size;
8829 u32 flags; 8964 u32 flags;
8830 char string[ETH_GSTRING_LEN]; 8965#define STATS_FLAGS_PORT 1
8966#define STATS_FLAGS_FUNC 2
8967 u8 string[ETH_GSTRING_LEN];
8831} bnx2x_stats_arr[BNX2X_NUM_STATS] = { 8968} bnx2x_stats_arr[BNX2X_NUM_STATS] = {
8832/* 1 */ { STATS_OFFSET32(valid_bytes_received_hi), 8, 1, "rx_bytes" }, 8969/* 1 */ { STATS_OFFSET32(valid_bytes_received_hi),
8833 { STATS_OFFSET32(error_bytes_received_hi), 8, 1, "rx_error_bytes" }, 8970 8, STATS_FLAGS_FUNC, "rx_bytes" },
8834 { STATS_OFFSET32(total_bytes_transmitted_hi), 8, 1, "tx_bytes" }, 8971 { STATS_OFFSET32(error_bytes_received_hi),
8835 { STATS_OFFSET32(tx_stat_ifhcoutbadoctets_hi), 8, 0, "tx_error_bytes" }, 8972 8, STATS_FLAGS_FUNC, "rx_error_bytes" },
8973 { STATS_OFFSET32(total_bytes_transmitted_hi),
8974 8, STATS_FLAGS_FUNC, "tx_bytes" },
8975 { STATS_OFFSET32(tx_stat_ifhcoutbadoctets_hi),
8976 8, STATS_FLAGS_PORT, "tx_error_bytes" },
8836 { STATS_OFFSET32(total_unicast_packets_received_hi), 8977 { STATS_OFFSET32(total_unicast_packets_received_hi),
8837 8, 1, "rx_ucast_packets" }, 8978 8, STATS_FLAGS_FUNC, "rx_ucast_packets" },
8838 { STATS_OFFSET32(total_multicast_packets_received_hi), 8979 { STATS_OFFSET32(total_multicast_packets_received_hi),
8839 8, 1, "rx_mcast_packets" }, 8980 8, STATS_FLAGS_FUNC, "rx_mcast_packets" },
8840 { STATS_OFFSET32(total_broadcast_packets_received_hi), 8981 { STATS_OFFSET32(total_broadcast_packets_received_hi),
8841 8, 1, "rx_bcast_packets" }, 8982 8, STATS_FLAGS_FUNC, "rx_bcast_packets" },
8842 { STATS_OFFSET32(total_unicast_packets_transmitted_hi), 8983 { STATS_OFFSET32(total_unicast_packets_transmitted_hi),
8843 8, 1, "tx_packets" }, 8984 8, STATS_FLAGS_FUNC, "tx_packets" },
8844 { STATS_OFFSET32(tx_stat_dot3statsinternalmactransmiterrors_hi), 8985 { STATS_OFFSET32(tx_stat_dot3statsinternalmactransmiterrors_hi),
8845 8, 0, "tx_mac_errors" }, 8986 8, STATS_FLAGS_PORT, "tx_mac_errors" },
8846/* 10 */{ STATS_OFFSET32(rx_stat_dot3statscarriersenseerrors_hi), 8987/* 10 */{ STATS_OFFSET32(rx_stat_dot3statscarriersenseerrors_hi),
8847 8, 0, "tx_carrier_errors" }, 8988 8, STATS_FLAGS_PORT, "tx_carrier_errors" },
8848 { STATS_OFFSET32(rx_stat_dot3statsfcserrors_hi), 8989 { STATS_OFFSET32(rx_stat_dot3statsfcserrors_hi),
8849 8, 0, "rx_crc_errors" }, 8990 8, STATS_FLAGS_PORT, "rx_crc_errors" },
8850 { STATS_OFFSET32(rx_stat_dot3statsalignmenterrors_hi), 8991 { STATS_OFFSET32(rx_stat_dot3statsalignmenterrors_hi),
8851 8, 0, "rx_align_errors" }, 8992 8, STATS_FLAGS_PORT, "rx_align_errors" },
8852 { STATS_OFFSET32(tx_stat_dot3statssinglecollisionframes_hi), 8993 { STATS_OFFSET32(tx_stat_dot3statssinglecollisionframes_hi),
8853 8, 0, "tx_single_collisions" }, 8994 8, STATS_FLAGS_PORT, "tx_single_collisions" },
8854 { STATS_OFFSET32(tx_stat_dot3statsmultiplecollisionframes_hi), 8995 { STATS_OFFSET32(tx_stat_dot3statsmultiplecollisionframes_hi),
8855 8, 0, "tx_multi_collisions" }, 8996 8, STATS_FLAGS_PORT, "tx_multi_collisions" },
8856 { STATS_OFFSET32(tx_stat_dot3statsdeferredtransmissions_hi), 8997 { STATS_OFFSET32(tx_stat_dot3statsdeferredtransmissions_hi),
8857 8, 0, "tx_deferred" }, 8998 8, STATS_FLAGS_PORT, "tx_deferred" },
8858 { STATS_OFFSET32(tx_stat_dot3statsexcessivecollisions_hi), 8999 { STATS_OFFSET32(tx_stat_dot3statsexcessivecollisions_hi),
8859 8, 0, "tx_excess_collisions" }, 9000 8, STATS_FLAGS_PORT, "tx_excess_collisions" },
8860 { STATS_OFFSET32(tx_stat_dot3statslatecollisions_hi), 9001 { STATS_OFFSET32(tx_stat_dot3statslatecollisions_hi),
8861 8, 0, "tx_late_collisions" }, 9002 8, STATS_FLAGS_PORT, "tx_late_collisions" },
8862 { STATS_OFFSET32(tx_stat_etherstatscollisions_hi), 9003 { STATS_OFFSET32(tx_stat_etherstatscollisions_hi),
8863 8, 0, "tx_total_collisions" }, 9004 8, STATS_FLAGS_PORT, "tx_total_collisions" },
8864 { STATS_OFFSET32(rx_stat_etherstatsfragments_hi), 9005 { STATS_OFFSET32(rx_stat_etherstatsfragments_hi),
8865 8, 0, "rx_fragments" }, 9006 8, STATS_FLAGS_PORT, "rx_fragments" },
8866/* 20 */{ STATS_OFFSET32(rx_stat_etherstatsjabbers_hi), 8, 0, "rx_jabbers" }, 9007/* 20 */{ STATS_OFFSET32(rx_stat_etherstatsjabbers_hi),
9008 8, STATS_FLAGS_PORT, "rx_jabbers" },
8867 { STATS_OFFSET32(rx_stat_etherstatsundersizepkts_hi), 9009 { STATS_OFFSET32(rx_stat_etherstatsundersizepkts_hi),
8868 8, 0, "rx_undersize_packets" }, 9010 8, STATS_FLAGS_PORT, "rx_undersize_packets" },
8869 { STATS_OFFSET32(jabber_packets_received), 9011 { STATS_OFFSET32(jabber_packets_received),
8870 4, 1, "rx_oversize_packets" }, 9012 4, STATS_FLAGS_FUNC, "rx_oversize_packets" },
8871 { STATS_OFFSET32(tx_stat_etherstatspkts64octets_hi), 9013 { STATS_OFFSET32(tx_stat_etherstatspkts64octets_hi),
8872 8, 0, "tx_64_byte_packets" }, 9014 8, STATS_FLAGS_PORT, "tx_64_byte_packets" },
8873 { STATS_OFFSET32(tx_stat_etherstatspkts65octetsto127octets_hi), 9015 { STATS_OFFSET32(tx_stat_etherstatspkts65octetsto127octets_hi),
8874 8, 0, "tx_65_to_127_byte_packets" }, 9016 8, STATS_FLAGS_PORT, "tx_65_to_127_byte_packets" },
8875 { STATS_OFFSET32(tx_stat_etherstatspkts128octetsto255octets_hi), 9017 { STATS_OFFSET32(tx_stat_etherstatspkts128octetsto255octets_hi),
8876 8, 0, "tx_128_to_255_byte_packets" }, 9018 8, STATS_FLAGS_PORT, "tx_128_to_255_byte_packets" },
8877 { STATS_OFFSET32(tx_stat_etherstatspkts256octetsto511octets_hi), 9019 { STATS_OFFSET32(tx_stat_etherstatspkts256octetsto511octets_hi),
8878 8, 0, "tx_256_to_511_byte_packets" }, 9020 8, STATS_FLAGS_PORT, "tx_256_to_511_byte_packets" },
8879 { STATS_OFFSET32(tx_stat_etherstatspkts512octetsto1023octets_hi), 9021 { STATS_OFFSET32(tx_stat_etherstatspkts512octetsto1023octets_hi),
8880 8, 0, "tx_512_to_1023_byte_packets" }, 9022 8, STATS_FLAGS_PORT, "tx_512_to_1023_byte_packets" },
8881 { STATS_OFFSET32(etherstatspkts1024octetsto1522octets_hi), 9023 { STATS_OFFSET32(etherstatspkts1024octetsto1522octets_hi),
8882 8, 0, "tx_1024_to_1522_byte_packets" }, 9024 8, STATS_FLAGS_PORT, "tx_1024_to_1522_byte_packets" },
8883 { STATS_OFFSET32(etherstatspktsover1522octets_hi), 9025 { STATS_OFFSET32(etherstatspktsover1522octets_hi),
8884 8, 0, "tx_1523_to_9022_byte_packets" }, 9026 8, STATS_FLAGS_PORT, "tx_1523_to_9022_byte_packets" },
8885/* 30 */{ STATS_OFFSET32(rx_stat_xonpauseframesreceived_hi), 9027/* 30 */{ STATS_OFFSET32(rx_stat_xonpauseframesreceived_hi),
8886 8, 0, "rx_xon_frames" }, 9028 8, STATS_FLAGS_PORT, "rx_xon_frames" },
8887 { STATS_OFFSET32(rx_stat_xoffpauseframesreceived_hi), 9029 { STATS_OFFSET32(rx_stat_xoffpauseframesreceived_hi),
8888 8, 0, "rx_xoff_frames" }, 9030 8, STATS_FLAGS_PORT, "rx_xoff_frames" },
8889 { STATS_OFFSET32(tx_stat_outxonsent_hi), 8, 0, "tx_xon_frames" }, 9031 { STATS_OFFSET32(tx_stat_outxonsent_hi),
8890 { STATS_OFFSET32(tx_stat_outxoffsent_hi), 8, 0, "tx_xoff_frames" }, 9032 8, STATS_FLAGS_PORT, "tx_xon_frames" },
9033 { STATS_OFFSET32(tx_stat_outxoffsent_hi),
9034 8, STATS_FLAGS_PORT, "tx_xoff_frames" },
8891 { STATS_OFFSET32(rx_stat_maccontrolframesreceived_hi), 9035 { STATS_OFFSET32(rx_stat_maccontrolframesreceived_hi),
8892 8, 0, "rx_mac_ctrl_frames" }, 9036 8, STATS_FLAGS_PORT, "rx_mac_ctrl_frames" },
8893 { STATS_OFFSET32(mac_filter_discard), 4, 1, "rx_filtered_packets" }, 9037 { STATS_OFFSET32(mac_filter_discard),
8894 { STATS_OFFSET32(no_buff_discard), 4, 1, "rx_discards" }, 9038 4, STATS_FLAGS_PORT, "rx_filtered_packets" },
8895 { STATS_OFFSET32(xxoverflow_discard), 4, 1, "rx_fw_discards" }, 9039 { STATS_OFFSET32(no_buff_discard),
8896 { STATS_OFFSET32(brb_drop_hi), 8, 1, "brb_discard" }, 9040 4, STATS_FLAGS_FUNC, "rx_discards" },
8897/* 39 */{ STATS_OFFSET32(brb_truncate_discard), 8, 1, "brb_truncate" } 9041 { STATS_OFFSET32(xxoverflow_discard),
9042 4, STATS_FLAGS_PORT, "rx_fw_discards" },
9043 { STATS_OFFSET32(brb_drop_hi),
9044 8, STATS_FLAGS_PORT, "brb_discard" },
9045 { STATS_OFFSET32(brb_truncate_hi),
9046 8, STATS_FLAGS_PORT, "brb_truncate" },
9047/* 40 */{ STATS_OFFSET32(rx_err_discard_pkt),
9048 4, STATS_FLAGS_FUNC, "rx_phy_ip_err_discards"},
9049 { STATS_OFFSET32(rx_skb_alloc_failed),
9050 4, STATS_FLAGS_FUNC, "rx_skb_alloc_discard" },
9051/* 42 */{ STATS_OFFSET32(hw_csum_err),
9052 4, STATS_FLAGS_FUNC, "rx_csum_offload_errors" }
8898}; 9053};
8899 9054
9055#define IS_NOT_E1HMF_STAT(bp, i) \
9056 (IS_E1HMF(bp) && (bnx2x_stats_arr[i].flags & STATS_FLAGS_PORT))
9057
8900static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf) 9058static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
8901{ 9059{
8902 struct bnx2x *bp = netdev_priv(dev); 9060 struct bnx2x *bp = netdev_priv(dev);
@@ -8905,7 +9063,7 @@ static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
8905 switch (stringset) { 9063 switch (stringset) {
8906 case ETH_SS_STATS: 9064 case ETH_SS_STATS:
8907 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) { 9065 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
8908 if (IS_E1HMF(bp) && (!bnx2x_stats_arr[i].flags)) 9066 if (IS_NOT_E1HMF_STAT(bp, i))
8909 continue; 9067 continue;
8910 strcpy(buf + j*ETH_GSTRING_LEN, 9068 strcpy(buf + j*ETH_GSTRING_LEN,
8911 bnx2x_stats_arr[i].string); 9069 bnx2x_stats_arr[i].string);
@@ -8925,7 +9083,7 @@ static int bnx2x_get_stats_count(struct net_device *dev)
8925 int i, num_stats = 0; 9083 int i, num_stats = 0;
8926 9084
8927 for (i = 0; i < BNX2X_NUM_STATS; i++) { 9085 for (i = 0; i < BNX2X_NUM_STATS; i++) {
8928 if (IS_E1HMF(bp) && (!bnx2x_stats_arr[i].flags)) 9086 if (IS_NOT_E1HMF_STAT(bp, i))
8929 continue; 9087 continue;
8930 num_stats++; 9088 num_stats++;
8931 } 9089 }
@@ -8940,7 +9098,7 @@ static void bnx2x_get_ethtool_stats(struct net_device *dev,
8940 int i, j; 9098 int i, j;
8941 9099
8942 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) { 9100 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
8943 if (IS_E1HMF(bp) && (!bnx2x_stats_arr[i].flags)) 9101 if (IS_NOT_E1HMF_STAT(bp, i))
8944 continue; 9102 continue;
8945 9103
8946 if (bnx2x_stats_arr[i].size == 0) { 9104 if (bnx2x_stats_arr[i].size == 0) {
@@ -9057,7 +9215,7 @@ static int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
9057 PCI_PM_CTRL_PME_STATUS)); 9215 PCI_PM_CTRL_PME_STATUS));
9058 9216
9059 if (pmcsr & PCI_PM_CTRL_STATE_MASK) 9217 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
9060 /* delay required during transition out of D3hot */ 9218 /* delay required during transition out of D3hot */
9061 msleep(20); 9219 msleep(20);
9062 break; 9220 break;
9063 9221
@@ -9104,17 +9262,16 @@ static int bnx2x_poll(struct napi_struct *napi, int budget)
9104 9262
9105 bnx2x_update_fpsb_idx(fp); 9263 bnx2x_update_fpsb_idx(fp);
9106 9264
9107 if ((fp->tx_pkt_prod != le16_to_cpu(*fp->tx_cons_sb)) || 9265 if (BNX2X_HAS_TX_WORK(fp))
9108 (fp->tx_pkt_prod != fp->tx_pkt_cons))
9109 bnx2x_tx_int(fp, budget); 9266 bnx2x_tx_int(fp, budget);
9110 9267
9111 if (le16_to_cpu(*fp->rx_cons_sb) != fp->rx_comp_cons) 9268 if (BNX2X_HAS_RX_WORK(fp))
9112 work_done = bnx2x_rx_int(fp, budget); 9269 work_done = bnx2x_rx_int(fp, budget);
9113 9270
9114 rmb(); /* bnx2x_has_work() reads the status block */ 9271 rmb(); /* BNX2X_HAS_WORK() reads the status block */
9115 9272
9116 /* must not complete if we consumed full budget */ 9273 /* must not complete if we consumed full budget */
9117 if ((work_done < budget) && !bnx2x_has_work(fp)) { 9274 if ((work_done < budget) && !BNX2X_HAS_WORK(fp)) {
9118 9275
9119#ifdef BNX2X_STOP_ON_ERROR 9276#ifdef BNX2X_STOP_ON_ERROR
9120poll_panic: 9277poll_panic:
@@ -9131,7 +9288,7 @@ poll_panic:
9131 9288
9132 9289
9133/* we split the first BD into headers and data BDs 9290/* we split the first BD into headers and data BDs
9134 * to ease the pain of our fellow micocode engineers 9291 * to ease the pain of our fellow microcode engineers
9135 * we use one mapping for both BDs 9292 * we use one mapping for both BDs
9136 * So far this has only been observed to happen 9293 * So far this has only been observed to happen
9137 * in Other Operating Systems(TM) 9294 * in Other Operating Systems(TM)
@@ -9238,7 +9395,7 @@ static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
9238 /* Check if LSO packet needs to be copied: 9395 /* Check if LSO packet needs to be copied:
9239 3 = 1 (for headers BD) + 2 (for PBD and last BD) */ 9396 3 = 1 (for headers BD) + 2 (for PBD and last BD) */
9240 int wnd_size = MAX_FETCH_BD - 3; 9397 int wnd_size = MAX_FETCH_BD - 3;
9241 /* Number of widnows to check */ 9398 /* Number of windows to check */
9242 int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size; 9399 int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size;
9243 int wnd_idx = 0; 9400 int wnd_idx = 0;
9244 int frag_idx = 0; 9401 int frag_idx = 0;
@@ -9340,7 +9497,7 @@ static int bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
9340 skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr, 9497 skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
9341 ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type); 9498 ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type);
9342 9499
9343 /* First, check if we need to linearaize the skb 9500 /* First, check if we need to linearize the skb
9344 (due to FW restrictions) */ 9501 (due to FW restrictions) */
9345 if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) { 9502 if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) {
9346 /* Statistics of linearization */ 9503 /* Statistics of linearization */
@@ -9349,7 +9506,7 @@ static int bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
9349 DP(NETIF_MSG_TX_QUEUED, "SKB linearization failed - " 9506 DP(NETIF_MSG_TX_QUEUED, "SKB linearization failed - "
9350 "silently dropping this SKB\n"); 9507 "silently dropping this SKB\n");
9351 dev_kfree_skb_any(skb); 9508 dev_kfree_skb_any(skb);
9352 return 0; 9509 return NETDEV_TX_OK;
9353 } 9510 }
9354 } 9511 }
9355 9512
@@ -9372,7 +9529,8 @@ static int bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
9372 tx_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD; 9529 tx_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
9373 tx_bd->general_data = (UNICAST_ADDRESS << 9530 tx_bd->general_data = (UNICAST_ADDRESS <<
9374 ETH_TX_BD_ETH_ADDR_TYPE_SHIFT); 9531 ETH_TX_BD_ETH_ADDR_TYPE_SHIFT);
9375 tx_bd->general_data |= 1; /* header nbd */ 9532 /* header nbd */
9533 tx_bd->general_data |= (1 << ETH_TX_BD_HDR_NBDS_SHIFT);
9376 9534
9377 /* remember the first BD of the packet */ 9535 /* remember the first BD of the packet */
9378 tx_buf->first_bd = fp->tx_bd_prod; 9536 tx_buf->first_bd = fp->tx_bd_prod;
@@ -9451,7 +9609,7 @@ static int bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
9451 9609
9452 tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping)); 9610 tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
9453 tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping)); 9611 tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
9454 nbd = skb_shinfo(skb)->nr_frags + ((pbd == NULL)? 1 : 2); 9612 nbd = skb_shinfo(skb)->nr_frags + ((pbd == NULL) ? 1 : 2);
9455 tx_bd->nbd = cpu_to_le16(nbd); 9613 tx_bd->nbd = cpu_to_le16(nbd);
9456 tx_bd->nbytes = cpu_to_le16(skb_headlen(skb)); 9614 tx_bd->nbytes = cpu_to_le16(skb_headlen(skb));
9457 9615
@@ -9721,9 +9879,9 @@ static int bnx2x_change_mac_addr(struct net_device *dev, void *p)
9721 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); 9879 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
9722 if (netif_running(dev)) { 9880 if (netif_running(dev)) {
9723 if (CHIP_IS_E1(bp)) 9881 if (CHIP_IS_E1(bp))
9724 bnx2x_set_mac_addr_e1(bp); 9882 bnx2x_set_mac_addr_e1(bp, 1);
9725 else 9883 else
9726 bnx2x_set_mac_addr_e1h(bp); 9884 bnx2x_set_mac_addr_e1h(bp, 1);
9727 } 9885 }
9728 9886
9729 return 0; 9887 return 0;
@@ -9734,6 +9892,7 @@ static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
9734{ 9892{
9735 struct mii_ioctl_data *data = if_mii(ifr); 9893 struct mii_ioctl_data *data = if_mii(ifr);
9736 struct bnx2x *bp = netdev_priv(dev); 9894 struct bnx2x *bp = netdev_priv(dev);
9895 int port = BP_PORT(bp);
9737 int err; 9896 int err;
9738 9897
9739 switch (cmd) { 9898 switch (cmd) {
@@ -9749,7 +9908,7 @@ static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
9749 return -EAGAIN; 9908 return -EAGAIN;
9750 9909
9751 mutex_lock(&bp->port.phy_mutex); 9910 mutex_lock(&bp->port.phy_mutex);
9752 err = bnx2x_cl45_read(bp, BP_PORT(bp), 0, bp->port.phy_addr, 9911 err = bnx2x_cl45_read(bp, port, 0, bp->port.phy_addr,
9753 DEFAULT_PHY_DEV_ADDR, 9912 DEFAULT_PHY_DEV_ADDR,
9754 (data->reg_num & 0x1f), &mii_regval); 9913 (data->reg_num & 0x1f), &mii_regval);
9755 data->val_out = mii_regval; 9914 data->val_out = mii_regval;
@@ -9765,7 +9924,7 @@ static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
9765 return -EAGAIN; 9924 return -EAGAIN;
9766 9925
9767 mutex_lock(&bp->port.phy_mutex); 9926 mutex_lock(&bp->port.phy_mutex);
9768 err = bnx2x_cl45_write(bp, BP_PORT(bp), 0, bp->port.phy_addr, 9927 err = bnx2x_cl45_write(bp, port, 0, bp->port.phy_addr,
9769 DEFAULT_PHY_DEV_ADDR, 9928 DEFAULT_PHY_DEV_ADDR,
9770 (data->reg_num & 0x1f), data->val_in); 9929 (data->reg_num & 0x1f), data->val_in);
9771 mutex_unlock(&bp->port.phy_mutex); 9930 mutex_unlock(&bp->port.phy_mutex);
@@ -10141,7 +10300,7 @@ static int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
10141 10300
10142 netif_device_detach(dev); 10301 netif_device_detach(dev);
10143 10302
10144 bnx2x_nic_unload(bp, UNLOAD_NORMAL); 10303 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
10145 10304
10146 bnx2x_set_power_state(bp, pci_choose_state(pdev, state)); 10305 bnx2x_set_power_state(bp, pci_choose_state(pdev, state));
10147 10306
@@ -10174,7 +10333,7 @@ static int bnx2x_resume(struct pci_dev *pdev)
10174 bnx2x_set_power_state(bp, PCI_D0); 10333 bnx2x_set_power_state(bp, PCI_D0);
10175 netif_device_attach(dev); 10334 netif_device_attach(dev);
10176 10335
10177 rc = bnx2x_nic_load(bp, LOAD_NORMAL); 10336 rc = bnx2x_nic_load(bp, LOAD_OPEN);
10178 10337
10179 rtnl_unlock(); 10338 rtnl_unlock();
10180 10339