aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/bnx2x_main.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/bnx2x_main.c')
-rw-r--r--drivers/net/bnx2x_main.c1385
1 files changed, 775 insertions, 610 deletions
diff --git a/drivers/net/bnx2x_main.c b/drivers/net/bnx2x_main.c
index 272a4bd25953..a8eb3c4a47c8 100644
--- a/drivers/net/bnx2x_main.c
+++ b/drivers/net/bnx2x_main.c
@@ -44,7 +44,6 @@
44#include <net/ip.h> 44#include <net/ip.h>
45#include <net/tcp.h> 45#include <net/tcp.h>
46#include <net/checksum.h> 46#include <net/checksum.h>
47#include <linux/version.h>
48#include <net/ip6_checksum.h> 47#include <net/ip6_checksum.h>
49#include <linux/workqueue.h> 48#include <linux/workqueue.h>
50#include <linux/crc32.h> 49#include <linux/crc32.h>
@@ -60,8 +59,8 @@
60#include "bnx2x.h" 59#include "bnx2x.h"
61#include "bnx2x_init.h" 60#include "bnx2x_init.h"
62 61
63#define DRV_MODULE_VERSION "1.45.6" 62#define DRV_MODULE_VERSION "1.45.21"
64#define DRV_MODULE_RELDATE "2008/06/23" 63#define DRV_MODULE_RELDATE "2008/09/03"
65#define BNX2X_BC_VER 0x040200 64#define BNX2X_BC_VER 0x040200
66 65
67/* Time in jiffies before concluding the transmitter is hung */ 66/* Time in jiffies before concluding the transmitter is hung */
@@ -76,23 +75,21 @@ MODULE_DESCRIPTION("Broadcom NetXtreme II BCM57710 Driver");
76MODULE_LICENSE("GPL"); 75MODULE_LICENSE("GPL");
77MODULE_VERSION(DRV_MODULE_VERSION); 76MODULE_VERSION(DRV_MODULE_VERSION);
78 77
78static int disable_tpa;
79static int use_inta; 79static int use_inta;
80static int poll; 80static int poll;
81static int debug; 81static int debug;
82static int disable_tpa;
83static int nomcp;
84static int load_count[3]; /* 0-common, 1-port0, 2-port1 */ 82static int load_count[3]; /* 0-common, 1-port0, 2-port1 */
85static int use_multi; 83static int use_multi;
86 84
85module_param(disable_tpa, int, 0);
87module_param(use_inta, int, 0); 86module_param(use_inta, int, 0);
88module_param(poll, int, 0); 87module_param(poll, int, 0);
89module_param(debug, int, 0); 88module_param(debug, int, 0);
90module_param(disable_tpa, int, 0); 89MODULE_PARM_DESC(disable_tpa, "disable the TPA (LRO) feature");
91module_param(nomcp, int, 0);
92MODULE_PARM_DESC(use_inta, "use INT#A instead of MSI-X"); 90MODULE_PARM_DESC(use_inta, "use INT#A instead of MSI-X");
93MODULE_PARM_DESC(poll, "use polling (for debug)"); 91MODULE_PARM_DESC(poll, "use polling (for debug)");
94MODULE_PARM_DESC(debug, "default debug msglevel"); 92MODULE_PARM_DESC(debug, "default debug msglevel");
95MODULE_PARM_DESC(nomcp, "ignore management CPU");
96 93
97#ifdef BNX2X_MULTI 94#ifdef BNX2X_MULTI
98module_param(use_multi, int, 0); 95module_param(use_multi, int, 0);
@@ -237,17 +234,16 @@ void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
237 while (*wb_comp != DMAE_COMP_VAL) { 234 while (*wb_comp != DMAE_COMP_VAL) {
238 DP(BNX2X_MSG_OFF, "wb_comp 0x%08x\n", *wb_comp); 235 DP(BNX2X_MSG_OFF, "wb_comp 0x%08x\n", *wb_comp);
239 236
240 /* adjust delay for emulation/FPGA */
241 if (CHIP_REV_IS_SLOW(bp))
242 msleep(100);
243 else
244 udelay(5);
245
246 if (!cnt) { 237 if (!cnt) {
247 BNX2X_ERR("dmae timeout!\n"); 238 BNX2X_ERR("dmae timeout!\n");
248 break; 239 break;
249 } 240 }
250 cnt--; 241 cnt--;
242 /* adjust delay for emulation/FPGA */
243 if (CHIP_REV_IS_SLOW(bp))
244 msleep(100);
245 else
246 udelay(5);
251 } 247 }
252 248
253 mutex_unlock(&bp->dmae_mutex); 249 mutex_unlock(&bp->dmae_mutex);
@@ -310,17 +306,16 @@ void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32)
310 306
311 while (*wb_comp != DMAE_COMP_VAL) { 307 while (*wb_comp != DMAE_COMP_VAL) {
312 308
313 /* adjust delay for emulation/FPGA */
314 if (CHIP_REV_IS_SLOW(bp))
315 msleep(100);
316 else
317 udelay(5);
318
319 if (!cnt) { 309 if (!cnt) {
320 BNX2X_ERR("dmae timeout!\n"); 310 BNX2X_ERR("dmae timeout!\n");
321 break; 311 break;
322 } 312 }
323 cnt--; 313 cnt--;
314 /* adjust delay for emulation/FPGA */
315 if (CHIP_REV_IS_SLOW(bp))
316 msleep(100);
317 else
318 udelay(5);
324 } 319 }
325 DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n", 320 DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
326 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1], 321 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
@@ -503,6 +498,9 @@ static void bnx2x_panic_dump(struct bnx2x *bp)
503 int i; 498 int i;
504 u16 j, start, end; 499 u16 j, start, end;
505 500
501 bp->stats_state = STATS_STATE_DISABLED;
502 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
503
506 BNX2X_ERR("begin crash dump -----------------\n"); 504 BNX2X_ERR("begin crash dump -----------------\n");
507 505
508 for_each_queue(bp, i) { 506 for_each_queue(bp, i) {
@@ -513,17 +511,20 @@ static void bnx2x_panic_dump(struct bnx2x *bp)
513 " tx_bd_prod(%x) tx_bd_cons(%x) *tx_cons_sb(%x)\n", 511 " tx_bd_prod(%x) tx_bd_cons(%x) *tx_cons_sb(%x)\n",
514 i, fp->tx_pkt_prod, fp->tx_pkt_cons, fp->tx_bd_prod, 512 i, fp->tx_pkt_prod, fp->tx_pkt_cons, fp->tx_bd_prod,
515 fp->tx_bd_cons, le16_to_cpu(*fp->tx_cons_sb)); 513 fp->tx_bd_cons, le16_to_cpu(*fp->tx_cons_sb));
516 BNX2X_ERR(" rx_comp_prod(%x) rx_comp_cons(%x)" 514 BNX2X_ERR(" rx_bd_prod(%x) rx_bd_cons(%x)"
517 " *rx_cons_sb(%x) *rx_bd_cons_sb(%x)" 515 " *rx_bd_cons_sb(%x) rx_comp_prod(%x)"
518 " rx_sge_prod(%x) last_max_sge(%x)\n", 516 " rx_comp_cons(%x) *rx_cons_sb(%x)\n",
519 fp->rx_comp_prod, fp->rx_comp_cons, 517 fp->rx_bd_prod, fp->rx_bd_cons,
520 le16_to_cpu(*fp->rx_cons_sb), 518 le16_to_cpu(*fp->rx_bd_cons_sb), fp->rx_comp_prod,
521 le16_to_cpu(*fp->rx_bd_cons_sb), 519 fp->rx_comp_cons, le16_to_cpu(*fp->rx_cons_sb));
522 fp->rx_sge_prod, fp->last_max_sge); 520 BNX2X_ERR(" rx_sge_prod(%x) last_max_sge(%x)"
523 BNX2X_ERR(" fp_c_idx(%x) fp_u_idx(%x)" 521 " fp_c_idx(%x) *sb_c_idx(%x) fp_u_idx(%x)"
524 " bd data(%x,%x) rx_alloc_failed(%lx)\n", 522 " *sb_u_idx(%x) bd data(%x,%x)\n",
525 fp->fp_c_idx, fp->fp_u_idx, hw_prods->packets_prod, 523 fp->rx_sge_prod, fp->last_max_sge, fp->fp_c_idx,
526 hw_prods->bds_prod, fp->rx_alloc_failed); 524 fp->status_blk->c_status_block.status_block_index,
525 fp->fp_u_idx,
526 fp->status_blk->u_status_block.status_block_index,
527 hw_prods->packets_prod, hw_prods->bds_prod);
527 528
528 start = TX_BD(le16_to_cpu(*fp->tx_cons_sb) - 10); 529 start = TX_BD(le16_to_cpu(*fp->tx_cons_sb) - 10);
529 end = TX_BD(le16_to_cpu(*fp->tx_cons_sb) + 245); 530 end = TX_BD(le16_to_cpu(*fp->tx_cons_sb) + 245);
@@ -553,8 +554,8 @@ static void bnx2x_panic_dump(struct bnx2x *bp)
553 j, rx_bd[1], rx_bd[0], sw_bd->skb); 554 j, rx_bd[1], rx_bd[0], sw_bd->skb);
554 } 555 }
555 556
556 start = 0; 557 start = RX_SGE(fp->rx_sge_prod);
557 end = RX_SGE_CNT*NUM_RX_SGE_PAGES; 558 end = RX_SGE(fp->last_max_sge);
558 for (j = start; j < end; j++) { 559 for (j = start; j < end; j++) {
559 u32 *rx_sge = (u32 *)&fp->rx_sge_ring[j]; 560 u32 *rx_sge = (u32 *)&fp->rx_sge_ring[j];
560 struct sw_rx_page *sw_page = &fp->rx_page_ring[j]; 561 struct sw_rx_page *sw_page = &fp->rx_page_ring[j];
@@ -582,9 +583,6 @@ static void bnx2x_panic_dump(struct bnx2x *bp)
582 bnx2x_fw_dump(bp); 583 bnx2x_fw_dump(bp);
583 bnx2x_mc_assert(bp); 584 bnx2x_mc_assert(bp);
584 BNX2X_ERR("end crash dump -----------------\n"); 585 BNX2X_ERR("end crash dump -----------------\n");
585
586 bp->stats_state = STATS_STATE_DISABLED;
587 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
588} 586}
589 587
590static void bnx2x_int_enable(struct bnx2x *bp) 588static void bnx2x_int_enable(struct bnx2x *bp)
@@ -684,7 +682,8 @@ static void bnx2x_int_disable_sync(struct bnx2x *bp)
684static inline void bnx2x_ack_sb(struct bnx2x *bp, u8 sb_id, 682static inline void bnx2x_ack_sb(struct bnx2x *bp, u8 sb_id,
685 u8 storm, u16 index, u8 op, u8 update) 683 u8 storm, u16 index, u8 op, u8 update)
686{ 684{
687 u32 igu_addr = (IGU_ADDR_INT_ACK + IGU_FUNC_BASE * BP_FUNC(bp)) * 8; 685 u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
686 COMMAND_REG_INT_ACK);
688 struct igu_ack_register igu_ack; 687 struct igu_ack_register igu_ack;
689 688
690 igu_ack.status_block_index = index; 689 igu_ack.status_block_index = index;
@@ -694,9 +693,9 @@ static inline void bnx2x_ack_sb(struct bnx2x *bp, u8 sb_id,
694 (update << IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT) | 693 (update << IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT) |
695 (op << IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT)); 694 (op << IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT));
696 695
697 DP(BNX2X_MSG_OFF, "write 0x%08x to IGU addr 0x%x\n", 696 DP(BNX2X_MSG_OFF, "write 0x%08x to HC addr 0x%x\n",
698 (*(u32 *)&igu_ack), BAR_IGU_INTMEM + igu_addr); 697 (*(u32 *)&igu_ack), hc_addr);
699 REG_WR(bp, BAR_IGU_INTMEM + igu_addr, (*(u32 *)&igu_ack)); 698 REG_WR(bp, hc_addr, (*(u32 *)&igu_ack));
700} 699}
701 700
702static inline u16 bnx2x_update_fpsb_idx(struct bnx2x_fastpath *fp) 701static inline u16 bnx2x_update_fpsb_idx(struct bnx2x_fastpath *fp)
@@ -716,36 +715,15 @@ static inline u16 bnx2x_update_fpsb_idx(struct bnx2x_fastpath *fp)
716 return rc; 715 return rc;
717} 716}
718 717
719static inline int bnx2x_has_work(struct bnx2x_fastpath *fp)
720{
721 u16 rx_cons_sb = le16_to_cpu(*fp->rx_cons_sb);
722
723 if ((rx_cons_sb & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
724 rx_cons_sb++;
725
726 if ((fp->rx_comp_cons != rx_cons_sb) ||
727 (fp->tx_pkt_prod != le16_to_cpu(*fp->tx_cons_sb)) ||
728 (fp->tx_pkt_prod != fp->tx_pkt_cons))
729 return 1;
730
731 return 0;
732}
733
734static u16 bnx2x_ack_int(struct bnx2x *bp) 718static u16 bnx2x_ack_int(struct bnx2x *bp)
735{ 719{
736 u32 igu_addr = (IGU_ADDR_SIMD_MASK + IGU_FUNC_BASE * BP_FUNC(bp)) * 8; 720 u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
737 u32 result = REG_RD(bp, BAR_IGU_INTMEM + igu_addr); 721 COMMAND_REG_SIMD_MASK);
722 u32 result = REG_RD(bp, hc_addr);
738 723
739 DP(BNX2X_MSG_OFF, "read 0x%08x from IGU addr 0x%x\n", 724 DP(BNX2X_MSG_OFF, "read 0x%08x from HC addr 0x%x\n",
740 result, BAR_IGU_INTMEM + igu_addr); 725 result, hc_addr);
741 726
742#ifdef IGU_DEBUG
743#warning IGU_DEBUG active
744 if (result == 0) {
745 BNX2X_ERR("read %x from IGU\n", result);
746 REG_WR(bp, TM_REG_TIMER_SOFT_RST, 0);
747 }
748#endif
749 return result; 727 return result;
750} 728}
751 729
@@ -898,6 +876,7 @@ static void bnx2x_tx_int(struct bnx2x_fastpath *fp, int work)
898 netif_tx_lock(bp->dev); 876 netif_tx_lock(bp->dev);
899 877
900 if (netif_queue_stopped(bp->dev) && 878 if (netif_queue_stopped(bp->dev) &&
879 (bp->state == BNX2X_STATE_OPEN) &&
901 (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3)) 880 (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3))
902 netif_wake_queue(bp->dev); 881 netif_wake_queue(bp->dev);
903 882
@@ -905,6 +884,7 @@ static void bnx2x_tx_int(struct bnx2x_fastpath *fp, int work)
905 } 884 }
906} 885}
907 886
887
908static void bnx2x_sp_event(struct bnx2x_fastpath *fp, 888static void bnx2x_sp_event(struct bnx2x_fastpath *fp,
909 union eth_rx_cqe *rr_cqe) 889 union eth_rx_cqe *rr_cqe)
910{ 890{
@@ -960,6 +940,7 @@ static void bnx2x_sp_event(struct bnx2x_fastpath *fp,
960 bnx2x_fp(bp, cid, state) = BNX2X_FP_STATE_CLOSED; 940 bnx2x_fp(bp, cid, state) = BNX2X_FP_STATE_CLOSED;
961 break; 941 break;
962 942
943
963 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_OPEN): 944 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_OPEN):
964 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_DIAG): 945 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_DIAG):
965 DP(NETIF_MSG_IFUP, "got set mac ramrod\n"); 946 DP(NETIF_MSG_IFUP, "got set mac ramrod\n");
@@ -1046,7 +1027,7 @@ static inline int bnx2x_alloc_rx_skb(struct bnx2x *bp,
1046 if (unlikely(skb == NULL)) 1027 if (unlikely(skb == NULL))
1047 return -ENOMEM; 1028 return -ENOMEM;
1048 1029
1049 mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_use_size, 1030 mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_size,
1050 PCI_DMA_FROMDEVICE); 1031 PCI_DMA_FROMDEVICE);
1051 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) { 1032 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
1052 dev_kfree_skb(skb); 1033 dev_kfree_skb(skb);
@@ -1169,8 +1150,8 @@ static inline void bnx2x_init_sge_ring_bit_mask(struct bnx2x_fastpath *fp)
1169 memset(fp->sge_mask, 0xff, 1150 memset(fp->sge_mask, 0xff,
1170 (NUM_RX_SGE >> RX_SGE_MASK_ELEM_SHIFT)*sizeof(u64)); 1151 (NUM_RX_SGE >> RX_SGE_MASK_ELEM_SHIFT)*sizeof(u64));
1171 1152
1172 /* Clear the two last indeces in the page to 1: 1153 /* Clear the two last indices in the page to 1:
1173 these are the indeces that correspond to the "next" element, 1154 these are the indices that correspond to the "next" element,
1174 hence will never be indicated and should be removed from 1155 hence will never be indicated and should be removed from
1175 the calculations. */ 1156 the calculations. */
1176 bnx2x_clear_sge_mask_next_elems(fp); 1157 bnx2x_clear_sge_mask_next_elems(fp);
@@ -1188,7 +1169,7 @@ static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
1188 /* move empty skb from pool to prod and map it */ 1169 /* move empty skb from pool to prod and map it */
1189 prod_rx_buf->skb = fp->tpa_pool[queue].skb; 1170 prod_rx_buf->skb = fp->tpa_pool[queue].skb;
1190 mapping = pci_map_single(bp->pdev, fp->tpa_pool[queue].skb->data, 1171 mapping = pci_map_single(bp->pdev, fp->tpa_pool[queue].skb->data,
1191 bp->rx_buf_use_size, PCI_DMA_FROMDEVICE); 1172 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
1192 pci_unmap_addr_set(prod_rx_buf, mapping, mapping); 1173 pci_unmap_addr_set(prod_rx_buf, mapping, mapping);
1193 1174
1194 /* move partial skb from cons to pool (don't unmap yet) */ 1175 /* move partial skb from cons to pool (don't unmap yet) */
@@ -1261,7 +1242,7 @@ static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1261 where we are and drop the whole packet */ 1242 where we are and drop the whole packet */
1262 err = bnx2x_alloc_rx_sge(bp, fp, sge_idx); 1243 err = bnx2x_alloc_rx_sge(bp, fp, sge_idx);
1263 if (unlikely(err)) { 1244 if (unlikely(err)) {
1264 fp->rx_alloc_failed++; 1245 bp->eth_stats.rx_skb_alloc_failed++;
1265 return err; 1246 return err;
1266 } 1247 }
1267 1248
@@ -1295,16 +1276,15 @@ static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1295 pool entry status to BNX2X_TPA_STOP even if new skb allocation 1276 pool entry status to BNX2X_TPA_STOP even if new skb allocation
1296 fails. */ 1277 fails. */
1297 pci_unmap_single(bp->pdev, pci_unmap_addr(rx_buf, mapping), 1278 pci_unmap_single(bp->pdev, pci_unmap_addr(rx_buf, mapping),
1298 bp->rx_buf_use_size, PCI_DMA_FROMDEVICE); 1279 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
1299 1280
1300 /* if alloc failed drop the packet and keep the buffer in the bin */
1301 if (likely(new_skb)) { 1281 if (likely(new_skb)) {
1282 /* fix ip xsum and give it to the stack */
1283 /* (no need to map the new skb) */
1302 1284
1303 prefetch(skb); 1285 prefetch(skb);
1304 prefetch(((char *)(skb)) + 128); 1286 prefetch(((char *)(skb)) + 128);
1305 1287
1306 /* else fix ip xsum and give it to the stack */
1307 /* (no need to map the new skb) */
1308#ifdef BNX2X_STOP_ON_ERROR 1288#ifdef BNX2X_STOP_ON_ERROR
1309 if (pad + len > bp->rx_buf_size) { 1289 if (pad + len > bp->rx_buf_size) {
1310 BNX2X_ERR("skb_put is about to fail... " 1290 BNX2X_ERR("skb_put is about to fail... "
@@ -1353,9 +1333,10 @@ static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1353 fp->tpa_pool[queue].skb = new_skb; 1333 fp->tpa_pool[queue].skb = new_skb;
1354 1334
1355 } else { 1335 } else {
1336 /* else drop the packet and keep the buffer in the bin */
1356 DP(NETIF_MSG_RX_STATUS, 1337 DP(NETIF_MSG_RX_STATUS,
1357 "Failed to allocate new skb - dropping packet!\n"); 1338 "Failed to allocate new skb - dropping packet!\n");
1358 fp->rx_alloc_failed++; 1339 bp->eth_stats.rx_skb_alloc_failed++;
1359 } 1340 }
1360 1341
1361 fp->tpa_state[queue] = BNX2X_TPA_STOP; 1342 fp->tpa_state[queue] = BNX2X_TPA_STOP;
@@ -1390,7 +1371,6 @@ static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
1390 u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons; 1371 u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
1391 u16 hw_comp_cons, sw_comp_cons, sw_comp_prod; 1372 u16 hw_comp_cons, sw_comp_cons, sw_comp_prod;
1392 int rx_pkt = 0; 1373 int rx_pkt = 0;
1393 u16 queue;
1394 1374
1395#ifdef BNX2X_STOP_ON_ERROR 1375#ifdef BNX2X_STOP_ON_ERROR
1396 if (unlikely(bp->panic)) 1376 if (unlikely(bp->panic))
@@ -1456,7 +1436,7 @@ static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
1456 if ((!fp->disable_tpa) && 1436 if ((!fp->disable_tpa) &&
1457 (TPA_TYPE(cqe_fp_flags) != 1437 (TPA_TYPE(cqe_fp_flags) !=
1458 (TPA_TYPE_START | TPA_TYPE_END))) { 1438 (TPA_TYPE_START | TPA_TYPE_END))) {
1459 queue = cqe->fast_path_cqe.queue_index; 1439 u16 queue = cqe->fast_path_cqe.queue_index;
1460 1440
1461 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_START) { 1441 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_START) {
1462 DP(NETIF_MSG_RX_STATUS, 1442 DP(NETIF_MSG_RX_STATUS,
@@ -1503,11 +1483,10 @@ static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
1503 1483
1504 /* is this an error packet? */ 1484 /* is this an error packet? */
1505 if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) { 1485 if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
1506 /* do we sometimes forward error packets anyway? */
1507 DP(NETIF_MSG_RX_ERR, 1486 DP(NETIF_MSG_RX_ERR,
1508 "ERROR flags %x rx packet %u\n", 1487 "ERROR flags %x rx packet %u\n",
1509 cqe_fp_flags, sw_comp_cons); 1488 cqe_fp_flags, sw_comp_cons);
1510 /* TBD make sure MC counts this as a drop */ 1489 bp->eth_stats.rx_err_discard_pkt++;
1511 goto reuse_rx; 1490 goto reuse_rx;
1512 } 1491 }
1513 1492
@@ -1524,7 +1503,7 @@ static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
1524 DP(NETIF_MSG_RX_ERR, 1503 DP(NETIF_MSG_RX_ERR,
1525 "ERROR packet dropped " 1504 "ERROR packet dropped "
1526 "because of alloc failure\n"); 1505 "because of alloc failure\n");
1527 fp->rx_alloc_failed++; 1506 bp->eth_stats.rx_skb_alloc_failed++;
1528 goto reuse_rx; 1507 goto reuse_rx;
1529 } 1508 }
1530 1509
@@ -1541,7 +1520,7 @@ static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
1541 } else if (bnx2x_alloc_rx_skb(bp, fp, bd_prod) == 0) { 1520 } else if (bnx2x_alloc_rx_skb(bp, fp, bd_prod) == 0) {
1542 pci_unmap_single(bp->pdev, 1521 pci_unmap_single(bp->pdev,
1543 pci_unmap_addr(rx_buf, mapping), 1522 pci_unmap_addr(rx_buf, mapping),
1544 bp->rx_buf_use_size, 1523 bp->rx_buf_size,
1545 PCI_DMA_FROMDEVICE); 1524 PCI_DMA_FROMDEVICE);
1546 skb_reserve(skb, pad); 1525 skb_reserve(skb, pad);
1547 skb_put(skb, len); 1526 skb_put(skb, len);
@@ -1550,7 +1529,7 @@ static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
1550 DP(NETIF_MSG_RX_ERR, 1529 DP(NETIF_MSG_RX_ERR,
1551 "ERROR packet dropped because " 1530 "ERROR packet dropped because "
1552 "of alloc failure\n"); 1531 "of alloc failure\n");
1553 fp->rx_alloc_failed++; 1532 bp->eth_stats.rx_skb_alloc_failed++;
1554reuse_rx: 1533reuse_rx:
1555 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod); 1534 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1556 goto next_rx; 1535 goto next_rx;
@@ -1559,10 +1538,12 @@ reuse_rx:
1559 skb->protocol = eth_type_trans(skb, bp->dev); 1538 skb->protocol = eth_type_trans(skb, bp->dev);
1560 1539
1561 skb->ip_summed = CHECKSUM_NONE; 1540 skb->ip_summed = CHECKSUM_NONE;
1562 if (bp->rx_csum && BNX2X_RX_SUM_OK(cqe)) 1541 if (bp->rx_csum) {
1563 skb->ip_summed = CHECKSUM_UNNECESSARY; 1542 if (likely(BNX2X_RX_CSUM_OK(cqe)))
1564 1543 skb->ip_summed = CHECKSUM_UNNECESSARY;
1565 /* TBD do we pass bad csum packets in promisc */ 1544 else
1545 bp->eth_stats.hw_csum_err++;
1546 }
1566 } 1547 }
1567 1548
1568#ifdef BCM_VLAN 1549#ifdef BCM_VLAN
@@ -1615,6 +1596,12 @@ static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
1615 struct net_device *dev = bp->dev; 1596 struct net_device *dev = bp->dev;
1616 int index = FP_IDX(fp); 1597 int index = FP_IDX(fp);
1617 1598
1599 /* Return here if interrupt is disabled */
1600 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1601 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1602 return IRQ_HANDLED;
1603 }
1604
1618 DP(BNX2X_MSG_FP, "got an MSI-X interrupt on IDX:SB [%d:%d]\n", 1605 DP(BNX2X_MSG_FP, "got an MSI-X interrupt on IDX:SB [%d:%d]\n",
1619 index, FP_SB_ID(fp)); 1606 index, FP_SB_ID(fp));
1620 bnx2x_ack_sb(bp, FP_SB_ID(fp), USTORM_ID, 0, IGU_INT_DISABLE, 0); 1607 bnx2x_ack_sb(bp, FP_SB_ID(fp), USTORM_ID, 0, IGU_INT_DISABLE, 0);
@@ -1648,17 +1635,17 @@ static irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
1648 } 1635 }
1649 DP(NETIF_MSG_INTR, "got an interrupt status %u\n", status); 1636 DP(NETIF_MSG_INTR, "got an interrupt status %u\n", status);
1650 1637
1651#ifdef BNX2X_STOP_ON_ERROR
1652 if (unlikely(bp->panic))
1653 return IRQ_HANDLED;
1654#endif
1655
1656 /* Return here if interrupt is disabled */ 1638 /* Return here if interrupt is disabled */
1657 if (unlikely(atomic_read(&bp->intr_sem) != 0)) { 1639 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1658 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n"); 1640 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1659 return IRQ_HANDLED; 1641 return IRQ_HANDLED;
1660 } 1642 }
1661 1643
1644#ifdef BNX2X_STOP_ON_ERROR
1645 if (unlikely(bp->panic))
1646 return IRQ_HANDLED;
1647#endif
1648
1662 mask = 0x2 << bp->fp[0].sb_id; 1649 mask = 0x2 << bp->fp[0].sb_id;
1663 if (status & mask) { 1650 if (status & mask) {
1664 struct bnx2x_fastpath *fp = &bp->fp[0]; 1651 struct bnx2x_fastpath *fp = &bp->fp[0];
@@ -1699,11 +1686,12 @@ static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event);
1699 * General service functions 1686 * General service functions
1700 */ 1687 */
1701 1688
1702static int bnx2x_hw_lock(struct bnx2x *bp, u32 resource) 1689static int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource)
1703{ 1690{
1704 u32 lock_status; 1691 u32 lock_status;
1705 u32 resource_bit = (1 << resource); 1692 u32 resource_bit = (1 << resource);
1706 u8 port = BP_PORT(bp); 1693 int func = BP_FUNC(bp);
1694 u32 hw_lock_control_reg;
1707 int cnt; 1695 int cnt;
1708 1696
1709 /* Validating that the resource is within range */ 1697 /* Validating that the resource is within range */
@@ -1714,20 +1702,26 @@ static int bnx2x_hw_lock(struct bnx2x *bp, u32 resource)
1714 return -EINVAL; 1702 return -EINVAL;
1715 } 1703 }
1716 1704
1705 if (func <= 5) {
1706 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1707 } else {
1708 hw_lock_control_reg =
1709 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1710 }
1711
1717 /* Validating that the resource is not already taken */ 1712 /* Validating that the resource is not already taken */
1718 lock_status = REG_RD(bp, MISC_REG_DRIVER_CONTROL_1 + port*8); 1713 lock_status = REG_RD(bp, hw_lock_control_reg);
1719 if (lock_status & resource_bit) { 1714 if (lock_status & resource_bit) {
1720 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n", 1715 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1721 lock_status, resource_bit); 1716 lock_status, resource_bit);
1722 return -EEXIST; 1717 return -EEXIST;
1723 } 1718 }
1724 1719
1725 /* Try for 1 second every 5ms */ 1720 /* Try for 5 second every 5ms */
1726 for (cnt = 0; cnt < 200; cnt++) { 1721 for (cnt = 0; cnt < 1000; cnt++) {
1727 /* Try to acquire the lock */ 1722 /* Try to acquire the lock */
1728 REG_WR(bp, MISC_REG_DRIVER_CONTROL_1 + port*8 + 4, 1723 REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
1729 resource_bit); 1724 lock_status = REG_RD(bp, hw_lock_control_reg);
1730 lock_status = REG_RD(bp, MISC_REG_DRIVER_CONTROL_1 + port*8);
1731 if (lock_status & resource_bit) 1725 if (lock_status & resource_bit)
1732 return 0; 1726 return 0;
1733 1727
@@ -1737,11 +1731,12 @@ static int bnx2x_hw_lock(struct bnx2x *bp, u32 resource)
1737 return -EAGAIN; 1731 return -EAGAIN;
1738} 1732}
1739 1733
1740static int bnx2x_hw_unlock(struct bnx2x *bp, u32 resource) 1734static int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource)
1741{ 1735{
1742 u32 lock_status; 1736 u32 lock_status;
1743 u32 resource_bit = (1 << resource); 1737 u32 resource_bit = (1 << resource);
1744 u8 port = BP_PORT(bp); 1738 int func = BP_FUNC(bp);
1739 u32 hw_lock_control_reg;
1745 1740
1746 /* Validating that the resource is within range */ 1741 /* Validating that the resource is within range */
1747 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) { 1742 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
@@ -1751,20 +1746,27 @@ static int bnx2x_hw_unlock(struct bnx2x *bp, u32 resource)
1751 return -EINVAL; 1746 return -EINVAL;
1752 } 1747 }
1753 1748
1749 if (func <= 5) {
1750 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1751 } else {
1752 hw_lock_control_reg =
1753 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1754 }
1755
1754 /* Validating that the resource is currently taken */ 1756 /* Validating that the resource is currently taken */
1755 lock_status = REG_RD(bp, MISC_REG_DRIVER_CONTROL_1 + port*8); 1757 lock_status = REG_RD(bp, hw_lock_control_reg);
1756 if (!(lock_status & resource_bit)) { 1758 if (!(lock_status & resource_bit)) {
1757 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n", 1759 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1758 lock_status, resource_bit); 1760 lock_status, resource_bit);
1759 return -EFAULT; 1761 return -EFAULT;
1760 } 1762 }
1761 1763
1762 REG_WR(bp, MISC_REG_DRIVER_CONTROL_1 + port*8, resource_bit); 1764 REG_WR(bp, hw_lock_control_reg, resource_bit);
1763 return 0; 1765 return 0;
1764} 1766}
1765 1767
1766/* HW Lock for shared dual port PHYs */ 1768/* HW Lock for shared dual port PHYs */
1767static void bnx2x_phy_hw_lock(struct bnx2x *bp) 1769static void bnx2x_acquire_phy_lock(struct bnx2x *bp)
1768{ 1770{
1769 u32 ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config); 1771 u32 ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
1770 1772
@@ -1772,25 +1774,25 @@ static void bnx2x_phy_hw_lock(struct bnx2x *bp)
1772 1774
1773 if ((ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072) || 1775 if ((ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072) ||
1774 (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073)) 1776 (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073))
1775 bnx2x_hw_lock(bp, HW_LOCK_RESOURCE_8072_MDIO); 1777 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_8072_MDIO);
1776} 1778}
1777 1779
1778static void bnx2x_phy_hw_unlock(struct bnx2x *bp) 1780static void bnx2x_release_phy_lock(struct bnx2x *bp)
1779{ 1781{
1780 u32 ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config); 1782 u32 ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
1781 1783
1782 if ((ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072) || 1784 if ((ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072) ||
1783 (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073)) 1785 (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073))
1784 bnx2x_hw_unlock(bp, HW_LOCK_RESOURCE_8072_MDIO); 1786 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_8072_MDIO);
1785 1787
1786 mutex_unlock(&bp->port.phy_mutex); 1788 mutex_unlock(&bp->port.phy_mutex);
1787} 1789}
1788 1790
1789int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode) 1791int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
1790{ 1792{
1791 /* The GPIO should be swapped if swap register is set and active */ 1793 /* The GPIO should be swapped if swap register is set and active */
1792 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) && 1794 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1793 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ BP_PORT(bp); 1795 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1794 int gpio_shift = gpio_num + 1796 int gpio_shift = gpio_num +
1795 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0); 1797 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1796 u32 gpio_mask = (1 << gpio_shift); 1798 u32 gpio_mask = (1 << gpio_shift);
@@ -1801,7 +1803,7 @@ int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode)
1801 return -EINVAL; 1803 return -EINVAL;
1802 } 1804 }
1803 1805
1804 bnx2x_hw_lock(bp, HW_LOCK_RESOURCE_GPIO); 1806 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1805 /* read GPIO and mask except the float bits */ 1807 /* read GPIO and mask except the float bits */
1806 gpio_reg = (REG_RD(bp, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT); 1808 gpio_reg = (REG_RD(bp, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT);
1807 1809
@@ -1822,7 +1824,7 @@ int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode)
1822 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_SET_POS); 1824 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_SET_POS);
1823 break; 1825 break;
1824 1826
1825 case MISC_REGISTERS_GPIO_INPUT_HI_Z : 1827 case MISC_REGISTERS_GPIO_INPUT_HI_Z:
1826 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> input\n", 1828 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> input\n",
1827 gpio_num, gpio_shift); 1829 gpio_num, gpio_shift);
1828 /* set FLOAT */ 1830 /* set FLOAT */
@@ -1834,7 +1836,7 @@ int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode)
1834 } 1836 }
1835 1837
1836 REG_WR(bp, MISC_REG_GPIO, gpio_reg); 1838 REG_WR(bp, MISC_REG_GPIO, gpio_reg);
1837 bnx2x_hw_unlock(bp, HW_LOCK_RESOURCE_GPIO); 1839 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1838 1840
1839 return 0; 1841 return 0;
1840} 1842}
@@ -1850,19 +1852,19 @@ static int bnx2x_set_spio(struct bnx2x *bp, int spio_num, u32 mode)
1850 return -EINVAL; 1852 return -EINVAL;
1851 } 1853 }
1852 1854
1853 bnx2x_hw_lock(bp, HW_LOCK_RESOURCE_SPIO); 1855 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
1854 /* read SPIO and mask except the float bits */ 1856 /* read SPIO and mask except the float bits */
1855 spio_reg = (REG_RD(bp, MISC_REG_SPIO) & MISC_REGISTERS_SPIO_FLOAT); 1857 spio_reg = (REG_RD(bp, MISC_REG_SPIO) & MISC_REGISTERS_SPIO_FLOAT);
1856 1858
1857 switch (mode) { 1859 switch (mode) {
1858 case MISC_REGISTERS_SPIO_OUTPUT_LOW : 1860 case MISC_REGISTERS_SPIO_OUTPUT_LOW:
1859 DP(NETIF_MSG_LINK, "Set SPIO %d -> output low\n", spio_num); 1861 DP(NETIF_MSG_LINK, "Set SPIO %d -> output low\n", spio_num);
1860 /* clear FLOAT and set CLR */ 1862 /* clear FLOAT and set CLR */
1861 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS); 1863 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1862 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_CLR_POS); 1864 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_CLR_POS);
1863 break; 1865 break;
1864 1866
1865 case MISC_REGISTERS_SPIO_OUTPUT_HIGH : 1867 case MISC_REGISTERS_SPIO_OUTPUT_HIGH:
1866 DP(NETIF_MSG_LINK, "Set SPIO %d -> output high\n", spio_num); 1868 DP(NETIF_MSG_LINK, "Set SPIO %d -> output high\n", spio_num);
1867 /* clear FLOAT and set SET */ 1869 /* clear FLOAT and set SET */
1868 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS); 1870 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
@@ -1880,7 +1882,7 @@ static int bnx2x_set_spio(struct bnx2x *bp, int spio_num, u32 mode)
1880 } 1882 }
1881 1883
1882 REG_WR(bp, MISC_REG_SPIO, spio_reg); 1884 REG_WR(bp, MISC_REG_SPIO, spio_reg);
1883 bnx2x_hw_unlock(bp, HW_LOCK_RESOURCE_SPIO); 1885 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
1884 1886
1885 return 0; 1887 return 0;
1886} 1888}
@@ -1940,46 +1942,63 @@ static void bnx2x_link_report(struct bnx2x *bp)
1940 1942
1941static u8 bnx2x_initial_phy_init(struct bnx2x *bp) 1943static u8 bnx2x_initial_phy_init(struct bnx2x *bp)
1942{ 1944{
1943 u8 rc; 1945 if (!BP_NOMCP(bp)) {
1946 u8 rc;
1944 1947
1945 /* Initialize link parameters structure variables */ 1948 /* Initialize link parameters structure variables */
1946 bp->link_params.mtu = bp->dev->mtu; 1949 /* It is recommended to turn off RX FC for jumbo frames
1950 for better performance */
1951 if (IS_E1HMF(bp))
1952 bp->link_params.req_fc_auto_adv = FLOW_CTRL_BOTH;
1953 else if (bp->dev->mtu > 5000)
1954 bp->link_params.req_fc_auto_adv = FLOW_CTRL_TX;
1955 else
1956 bp->link_params.req_fc_auto_adv = FLOW_CTRL_BOTH;
1947 1957
1948 bnx2x_phy_hw_lock(bp); 1958 bnx2x_acquire_phy_lock(bp);
1949 rc = bnx2x_phy_init(&bp->link_params, &bp->link_vars); 1959 rc = bnx2x_phy_init(&bp->link_params, &bp->link_vars);
1950 bnx2x_phy_hw_unlock(bp); 1960 bnx2x_release_phy_lock(bp);
1951 1961
1952 if (bp->link_vars.link_up) 1962 if (bp->link_vars.link_up)
1953 bnx2x_link_report(bp); 1963 bnx2x_link_report(bp);
1954 1964
1955 bnx2x_calc_fc_adv(bp); 1965 bnx2x_calc_fc_adv(bp);
1956 1966
1957 return rc; 1967 return rc;
1968 }
1969 BNX2X_ERR("Bootcode is missing -not initializing link\n");
1970 return -EINVAL;
1958} 1971}
1959 1972
1960static void bnx2x_link_set(struct bnx2x *bp) 1973static void bnx2x_link_set(struct bnx2x *bp)
1961{ 1974{
1962 bnx2x_phy_hw_lock(bp); 1975 if (!BP_NOMCP(bp)) {
1963 bnx2x_phy_init(&bp->link_params, &bp->link_vars); 1976 bnx2x_acquire_phy_lock(bp);
1964 bnx2x_phy_hw_unlock(bp); 1977 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
1978 bnx2x_release_phy_lock(bp);
1965 1979
1966 bnx2x_calc_fc_adv(bp); 1980 bnx2x_calc_fc_adv(bp);
1981 } else
1982 BNX2X_ERR("Bootcode is missing -not setting link\n");
1967} 1983}
1968 1984
1969static void bnx2x__link_reset(struct bnx2x *bp) 1985static void bnx2x__link_reset(struct bnx2x *bp)
1970{ 1986{
1971 bnx2x_phy_hw_lock(bp); 1987 if (!BP_NOMCP(bp)) {
1972 bnx2x_link_reset(&bp->link_params, &bp->link_vars); 1988 bnx2x_acquire_phy_lock(bp);
1973 bnx2x_phy_hw_unlock(bp); 1989 bnx2x_link_reset(&bp->link_params, &bp->link_vars);
1990 bnx2x_release_phy_lock(bp);
1991 } else
1992 BNX2X_ERR("Bootcode is missing -not resetting link\n");
1974} 1993}
1975 1994
1976static u8 bnx2x_link_test(struct bnx2x *bp) 1995static u8 bnx2x_link_test(struct bnx2x *bp)
1977{ 1996{
1978 u8 rc; 1997 u8 rc;
1979 1998
1980 bnx2x_phy_hw_lock(bp); 1999 bnx2x_acquire_phy_lock(bp);
1981 rc = bnx2x_test_link(&bp->link_params, &bp->link_vars); 2000 rc = bnx2x_test_link(&bp->link_params, &bp->link_vars);
1982 bnx2x_phy_hw_unlock(bp); 2001 bnx2x_release_phy_lock(bp);
1983 2002
1984 return rc; 2003 return rc;
1985} 2004}
@@ -1991,7 +2010,7 @@ static u8 bnx2x_link_test(struct bnx2x *bp)
1991 sum of vn_min_rates 2010 sum of vn_min_rates
1992 or 2011 or
1993 0 - if all the min_rates are 0. 2012 0 - if all the min_rates are 0.
1994 In the later case fainess algorithm should be deactivated. 2013 In the later case fairness algorithm should be deactivated.
1995 If not all min_rates are zero then those that are zeroes will 2014 If not all min_rates are zero then those that are zeroes will
1996 be set to 1. 2015 be set to 1.
1997 */ 2016 */
@@ -2114,7 +2133,7 @@ static void bnx2x_init_vn_minmax(struct bnx2x *bp, int func,
2114 FUNC_MF_CFG_MIN_BW_SHIFT) * 100; 2133 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
2115 /* If FAIRNESS is enabled (not all min rates are zeroes) and 2134 /* If FAIRNESS is enabled (not all min rates are zeroes) and
2116 if current min rate is zero - set it to 1. 2135 if current min rate is zero - set it to 1.
2117 This is a requirment of the algorithm. */ 2136 This is a requirement of the algorithm. */
2118 if ((vn_min_rate == 0) && wsum) 2137 if ((vn_min_rate == 0) && wsum)
2119 vn_min_rate = DEF_MIN_RATE; 2138 vn_min_rate = DEF_MIN_RATE;
2120 vn_max_rate = ((vn_cfg & FUNC_MF_CFG_MAX_BW_MASK) >> 2139 vn_max_rate = ((vn_cfg & FUNC_MF_CFG_MAX_BW_MASK) >>
@@ -2203,9 +2222,9 @@ static void bnx2x_link_attn(struct bnx2x *bp)
2203 /* Make sure that we are synced with the current statistics */ 2222 /* Make sure that we are synced with the current statistics */
2204 bnx2x_stats_handle(bp, STATS_EVENT_STOP); 2223 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2205 2224
2206 bnx2x_phy_hw_lock(bp); 2225 bnx2x_acquire_phy_lock(bp);
2207 bnx2x_link_update(&bp->link_params, &bp->link_vars); 2226 bnx2x_link_update(&bp->link_params, &bp->link_vars);
2208 bnx2x_phy_hw_unlock(bp); 2227 bnx2x_release_phy_lock(bp);
2209 2228
2210 if (bp->link_vars.link_up) { 2229 if (bp->link_vars.link_up) {
2211 2230
@@ -2357,7 +2376,7 @@ static int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
2357} 2376}
2358 2377
2359/* acquire split MCP access lock register */ 2378/* acquire split MCP access lock register */
2360static int bnx2x_lock_alr(struct bnx2x *bp) 2379static int bnx2x_acquire_alr(struct bnx2x *bp)
2361{ 2380{
2362 u32 i, j, val; 2381 u32 i, j, val;
2363 int rc = 0; 2382 int rc = 0;
@@ -2374,15 +2393,15 @@ static int bnx2x_lock_alr(struct bnx2x *bp)
2374 msleep(5); 2393 msleep(5);
2375 } 2394 }
2376 if (!(val & (1L << 31))) { 2395 if (!(val & (1L << 31))) {
2377 BNX2X_ERR("Cannot acquire nvram interface\n"); 2396 BNX2X_ERR("Cannot acquire MCP access lock register\n");
2378 rc = -EBUSY; 2397 rc = -EBUSY;
2379 } 2398 }
2380 2399
2381 return rc; 2400 return rc;
2382} 2401}
2383 2402
2384/* Release split MCP access lock register */ 2403/* release split MCP access lock register */
2385static void bnx2x_unlock_alr(struct bnx2x *bp) 2404static void bnx2x_release_alr(struct bnx2x *bp)
2386{ 2405{
2387 u32 val = 0; 2406 u32 val = 0;
2388 2407
@@ -2395,7 +2414,6 @@ static inline u16 bnx2x_update_dsb_idx(struct bnx2x *bp)
2395 u16 rc = 0; 2414 u16 rc = 0;
2396 2415
2397 barrier(); /* status block is written to by the chip */ 2416 barrier(); /* status block is written to by the chip */
2398
2399 if (bp->def_att_idx != def_sb->atten_status_block.attn_bits_index) { 2417 if (bp->def_att_idx != def_sb->atten_status_block.attn_bits_index) {
2400 bp->def_att_idx = def_sb->atten_status_block.attn_bits_index; 2418 bp->def_att_idx = def_sb->atten_status_block.attn_bits_index;
2401 rc |= 1; 2419 rc |= 1;
@@ -2426,26 +2444,31 @@ static inline u16 bnx2x_update_dsb_idx(struct bnx2x *bp)
2426static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted) 2444static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted)
2427{ 2445{
2428 int port = BP_PORT(bp); 2446 int port = BP_PORT(bp);
2429 int func = BP_FUNC(bp); 2447 u32 hc_addr = (HC_REG_COMMAND_REG + port*32 +
2430 u32 igu_addr = (IGU_ADDR_ATTN_BITS_SET + IGU_FUNC_BASE * func) * 8; 2448 COMMAND_REG_ATTN_BITS_SET);
2431 u32 aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 : 2449 u32 aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2432 MISC_REG_AEU_MASK_ATTN_FUNC_0; 2450 MISC_REG_AEU_MASK_ATTN_FUNC_0;
2433 u32 nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 : 2451 u32 nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 :
2434 NIG_REG_MASK_INTERRUPT_PORT0; 2452 NIG_REG_MASK_INTERRUPT_PORT0;
2453 u32 aeu_mask;
2435 2454
2436 if (~bp->aeu_mask & (asserted & 0xff))
2437 BNX2X_ERR("IGU ERROR\n");
2438 if (bp->attn_state & asserted) 2455 if (bp->attn_state & asserted)
2439 BNX2X_ERR("IGU ERROR\n"); 2456 BNX2X_ERR("IGU ERROR\n");
2440 2457
2458 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2459 aeu_mask = REG_RD(bp, aeu_addr);
2460
2441 DP(NETIF_MSG_HW, "aeu_mask %x newly asserted %x\n", 2461 DP(NETIF_MSG_HW, "aeu_mask %x newly asserted %x\n",
2442 bp->aeu_mask, asserted); 2462 aeu_mask, asserted);
2443 bp->aeu_mask &= ~(asserted & 0xff); 2463 aeu_mask &= ~(asserted & 0xff);
2444 DP(NETIF_MSG_HW, "after masking: aeu_mask %x\n", bp->aeu_mask); 2464 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
2445 2465
2446 REG_WR(bp, aeu_addr, bp->aeu_mask); 2466 REG_WR(bp, aeu_addr, aeu_mask);
2467 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2447 2468
2469 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
2448 bp->attn_state |= asserted; 2470 bp->attn_state |= asserted;
2471 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
2449 2472
2450 if (asserted & ATTN_HARD_WIRED_MASK) { 2473 if (asserted & ATTN_HARD_WIRED_MASK) {
2451 if (asserted & ATTN_NIG_FOR_FUNC) { 2474 if (asserted & ATTN_NIG_FOR_FUNC) {
@@ -2500,9 +2523,9 @@ static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted)
2500 2523
2501 } /* if hardwired */ 2524 } /* if hardwired */
2502 2525
2503 DP(NETIF_MSG_HW, "about to mask 0x%08x at IGU addr 0x%x\n", 2526 DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
2504 asserted, BAR_IGU_INTMEM + igu_addr); 2527 asserted, hc_addr);
2505 REG_WR(bp, BAR_IGU_INTMEM + igu_addr, asserted); 2528 REG_WR(bp, hc_addr, asserted);
2506 2529
2507 /* now set back the mask */ 2530 /* now set back the mask */
2508 if (asserted & ATTN_NIG_FOR_FUNC) 2531 if (asserted & ATTN_NIG_FOR_FUNC)
@@ -2527,15 +2550,16 @@ static inline void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
2527 BNX2X_ERR("SPIO5 hw attention\n"); 2550 BNX2X_ERR("SPIO5 hw attention\n");
2528 2551
2529 switch (bp->common.board & SHARED_HW_CFG_BOARD_TYPE_MASK) { 2552 switch (bp->common.board & SHARED_HW_CFG_BOARD_TYPE_MASK) {
2553 case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1021G:
2530 case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1022G: 2554 case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1022G:
2531 /* Fan failure attention */ 2555 /* Fan failure attention */
2532 2556
2533 /* The PHY reset is controled by GPIO 1 */ 2557 /* The PHY reset is controlled by GPIO 1 */
2534 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1, 2558 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
2535 MISC_REGISTERS_GPIO_OUTPUT_LOW); 2559 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
2536 /* Low power mode is controled by GPIO 2 */ 2560 /* Low power mode is controlled by GPIO 2 */
2537 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2, 2561 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
2538 MISC_REGISTERS_GPIO_OUTPUT_LOW); 2562 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
2539 /* mark the failure */ 2563 /* mark the failure */
2540 bp->link_params.ext_phy_config &= 2564 bp->link_params.ext_phy_config &=
2541 ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK; 2565 ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
@@ -2699,10 +2723,11 @@ static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
2699 int index; 2723 int index;
2700 u32 reg_addr; 2724 u32 reg_addr;
2701 u32 val; 2725 u32 val;
2726 u32 aeu_mask;
2702 2727
2703 /* need to take HW lock because MCP or other port might also 2728 /* need to take HW lock because MCP or other port might also
2704 try to handle this event */ 2729 try to handle this event */
2705 bnx2x_lock_alr(bp); 2730 bnx2x_acquire_alr(bp);
2706 2731
2707 attn.sig[0] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4); 2732 attn.sig[0] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4);
2708 attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4); 2733 attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
@@ -2734,32 +2759,35 @@ static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
2734 HW_PRTY_ASSERT_SET_1) || 2759 HW_PRTY_ASSERT_SET_1) ||
2735 (attn.sig[2] & group_mask.sig[2] & 2760 (attn.sig[2] & group_mask.sig[2] &
2736 HW_PRTY_ASSERT_SET_2)) 2761 HW_PRTY_ASSERT_SET_2))
2737 BNX2X_ERR("FATAL HW block parity attention\n"); 2762 BNX2X_ERR("FATAL HW block parity attention\n");
2738 } 2763 }
2739 } 2764 }
2740 2765
2741 bnx2x_unlock_alr(bp); 2766 bnx2x_release_alr(bp);
2742 2767
2743 reg_addr = (IGU_ADDR_ATTN_BITS_CLR + IGU_FUNC_BASE * BP_FUNC(bp)) * 8; 2768 reg_addr = (HC_REG_COMMAND_REG + port*32 + COMMAND_REG_ATTN_BITS_CLR);
2744 2769
2745 val = ~deasserted; 2770 val = ~deasserted;
2746/* DP(NETIF_MSG_INTR, "write 0x%08x to IGU addr 0x%x\n", 2771 DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
2747 val, BAR_IGU_INTMEM + reg_addr); */ 2772 val, reg_addr);
2748 REG_WR(bp, BAR_IGU_INTMEM + reg_addr, val); 2773 REG_WR(bp, reg_addr, val);
2749 2774
2750 if (bp->aeu_mask & (deasserted & 0xff))
2751 BNX2X_ERR("IGU BUG!\n");
2752 if (~bp->attn_state & deasserted) 2775 if (~bp->attn_state & deasserted)
2753 BNX2X_ERR("IGU BUG!\n"); 2776 BNX2X_ERR("IGU ERROR\n");
2754 2777
2755 reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 : 2778 reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2756 MISC_REG_AEU_MASK_ATTN_FUNC_0; 2779 MISC_REG_AEU_MASK_ATTN_FUNC_0;
2757 2780
2758 DP(NETIF_MSG_HW, "aeu_mask %x\n", bp->aeu_mask); 2781 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2759 bp->aeu_mask |= (deasserted & 0xff); 2782 aeu_mask = REG_RD(bp, reg_addr);
2760 2783
2761 DP(NETIF_MSG_HW, "new mask %x\n", bp->aeu_mask); 2784 DP(NETIF_MSG_HW, "aeu_mask %x newly deasserted %x\n",
2762 REG_WR(bp, reg_addr, bp->aeu_mask); 2785 aeu_mask, deasserted);
2786 aeu_mask |= (deasserted & 0xff);
2787 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
2788
2789 REG_WR(bp, reg_addr, aeu_mask);
2790 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2763 2791
2764 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state); 2792 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
2765 bp->attn_state &= ~deasserted; 2793 bp->attn_state &= ~deasserted;
@@ -2800,7 +2828,7 @@ static void bnx2x_sp_task(struct work_struct *work)
2800 2828
2801 /* Return here if interrupt is disabled */ 2829 /* Return here if interrupt is disabled */
2802 if (unlikely(atomic_read(&bp->intr_sem) != 0)) { 2830 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
2803 DP(BNX2X_MSG_SP, "called but intr_sem not 0, returning\n"); 2831 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
2804 return; 2832 return;
2805 } 2833 }
2806 2834
@@ -2808,7 +2836,7 @@ static void bnx2x_sp_task(struct work_struct *work)
2808/* if (status == 0) */ 2836/* if (status == 0) */
2809/* BNX2X_ERR("spurious slowpath interrupt!\n"); */ 2837/* BNX2X_ERR("spurious slowpath interrupt!\n"); */
2810 2838
2811 DP(BNX2X_MSG_SP, "got a slowpath interrupt (updated %x)\n", status); 2839 DP(NETIF_MSG_INTR, "got a slowpath interrupt (updated %x)\n", status);
2812 2840
2813 /* HW attentions */ 2841 /* HW attentions */
2814 if (status & 0x1) 2842 if (status & 0x1)
@@ -2838,7 +2866,7 @@ static irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
2838 2866
2839 /* Return here if interrupt is disabled */ 2867 /* Return here if interrupt is disabled */
2840 if (unlikely(atomic_read(&bp->intr_sem) != 0)) { 2868 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
2841 DP(BNX2X_MSG_SP, "called but intr_sem not 0, returning\n"); 2869 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
2842 return IRQ_HANDLED; 2870 return IRQ_HANDLED;
2843 } 2871 }
2844 2872
@@ -2876,11 +2904,11 @@ static irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
2876 /* underflow */ \ 2904 /* underflow */ \
2877 d_hi = m_hi - s_hi; \ 2905 d_hi = m_hi - s_hi; \
2878 if (d_hi > 0) { \ 2906 if (d_hi > 0) { \
2879 /* we can 'loan' 1 */ \ 2907 /* we can 'loan' 1 */ \
2880 d_hi--; \ 2908 d_hi--; \
2881 d_lo = m_lo + (UINT_MAX - s_lo) + 1; \ 2909 d_lo = m_lo + (UINT_MAX - s_lo) + 1; \
2882 } else { \ 2910 } else { \
2883 /* m_hi <= s_hi */ \ 2911 /* m_hi <= s_hi */ \
2884 d_hi = 0; \ 2912 d_hi = 0; \
2885 d_lo = 0; \ 2913 d_lo = 0; \
2886 } \ 2914 } \
@@ -2890,7 +2918,7 @@ static irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
2890 d_hi = 0; \ 2918 d_hi = 0; \
2891 d_lo = 0; \ 2919 d_lo = 0; \
2892 } else { \ 2920 } else { \
2893 /* m_hi >= s_hi */ \ 2921 /* m_hi >= s_hi */ \
2894 d_hi = m_hi - s_hi; \ 2922 d_hi = m_hi - s_hi; \
2895 d_lo = m_lo - s_lo; \ 2923 d_lo = m_lo - s_lo; \
2896 } \ 2924 } \
@@ -2963,37 +2991,6 @@ static inline long bnx2x_hilo(u32 *hiref)
2963 * Init service functions 2991 * Init service functions
2964 */ 2992 */
2965 2993
2966static void bnx2x_storm_stats_init(struct bnx2x *bp)
2967{
2968 int func = BP_FUNC(bp);
2969
2970 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func), 1);
2971 REG_WR(bp, BAR_XSTRORM_INTMEM +
2972 XSTORM_STATS_FLAGS_OFFSET(func) + 4, 0);
2973
2974 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func), 1);
2975 REG_WR(bp, BAR_TSTRORM_INTMEM +
2976 TSTORM_STATS_FLAGS_OFFSET(func) + 4, 0);
2977
2978 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func), 0);
2979 REG_WR(bp, BAR_CSTRORM_INTMEM +
2980 CSTORM_STATS_FLAGS_OFFSET(func) + 4, 0);
2981
2982 REG_WR(bp, BAR_XSTRORM_INTMEM +
2983 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
2984 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
2985 REG_WR(bp, BAR_XSTRORM_INTMEM +
2986 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
2987 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
2988
2989 REG_WR(bp, BAR_TSTRORM_INTMEM +
2990 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
2991 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
2992 REG_WR(bp, BAR_TSTRORM_INTMEM +
2993 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
2994 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
2995}
2996
2997static void bnx2x_storm_stats_post(struct bnx2x *bp) 2994static void bnx2x_storm_stats_post(struct bnx2x *bp)
2998{ 2995{
2999 if (!bp->stats_pending) { 2996 if (!bp->stats_pending) {
@@ -3032,6 +3029,8 @@ static void bnx2x_stats_init(struct bnx2x *bp)
3032 memset(&(bp->port.old_nig_stats), 0, sizeof(struct nig_stats)); 3029 memset(&(bp->port.old_nig_stats), 0, sizeof(struct nig_stats));
3033 bp->port.old_nig_stats.brb_discard = 3030 bp->port.old_nig_stats.brb_discard =
3034 REG_RD(bp, NIG_REG_STAT0_BRB_DISCARD + port*0x38); 3031 REG_RD(bp, NIG_REG_STAT0_BRB_DISCARD + port*0x38);
3032 bp->port.old_nig_stats.brb_truncate =
3033 REG_RD(bp, NIG_REG_STAT0_BRB_TRUNCATE + port*0x38);
3035 REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT0 + port*0x50, 3034 REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT0 + port*0x50,
3036 &(bp->port.old_nig_stats.egress_mac_pkt0_lo), 2); 3035 &(bp->port.old_nig_stats.egress_mac_pkt0_lo), 2);
3037 REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT1 + port*0x50, 3036 REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT1 + port*0x50,
@@ -3101,12 +3100,12 @@ static int bnx2x_stats_comp(struct bnx2x *bp)
3101 3100
3102 might_sleep(); 3101 might_sleep();
3103 while (*stats_comp != DMAE_COMP_VAL) { 3102 while (*stats_comp != DMAE_COMP_VAL) {
3104 msleep(1);
3105 if (!cnt) { 3103 if (!cnt) {
3106 BNX2X_ERR("timeout waiting for stats finished\n"); 3104 BNX2X_ERR("timeout waiting for stats finished\n");
3107 break; 3105 break;
3108 } 3106 }
3109 cnt--; 3107 cnt--;
3108 msleep(1);
3110 } 3109 }
3111 return 1; 3110 return 1;
3112} 3111}
@@ -3451,8 +3450,7 @@ static void bnx2x_bmac_stats_update(struct bnx2x *bp)
3451 UPDATE_STAT64(rx_stat_grovr, rx_stat_dot3statsframestoolong); 3450 UPDATE_STAT64(rx_stat_grovr, rx_stat_dot3statsframestoolong);
3452 UPDATE_STAT64(rx_stat_grfrg, rx_stat_etherstatsfragments); 3451 UPDATE_STAT64(rx_stat_grfrg, rx_stat_etherstatsfragments);
3453 UPDATE_STAT64(rx_stat_grjbr, rx_stat_etherstatsjabbers); 3452 UPDATE_STAT64(rx_stat_grjbr, rx_stat_etherstatsjabbers);
3454 UPDATE_STAT64(rx_stat_grxpf, rx_stat_bmac_xpf); 3453 UPDATE_STAT64(rx_stat_grxcf, rx_stat_maccontrolframesreceived);
3455 UPDATE_STAT64(rx_stat_grxcf, rx_stat_bmac_xcf);
3456 UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffstateentered); 3454 UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffstateentered);
3457 UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffpauseframesreceived); 3455 UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffpauseframesreceived);
3458 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_outxoffsent); 3456 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_outxoffsent);
@@ -3536,6 +3534,8 @@ static int bnx2x_hw_stats_update(struct bnx2x *bp)
3536 3534
3537 ADD_EXTEND_64(pstats->brb_drop_hi, pstats->brb_drop_lo, 3535 ADD_EXTEND_64(pstats->brb_drop_hi, pstats->brb_drop_lo,
3538 new->brb_discard - old->brb_discard); 3536 new->brb_discard - old->brb_discard);
3537 ADD_EXTEND_64(estats->brb_truncate_hi, estats->brb_truncate_lo,
3538 new->brb_truncate - old->brb_truncate);
3539 3539
3540 UPDATE_STAT64_NIG(egress_mac_pkt0, 3540 UPDATE_STAT64_NIG(egress_mac_pkt0,
3541 etherstatspkts1024octetsto1522octets); 3541 etherstatspkts1024octetsto1522octets);
@@ -3713,8 +3713,7 @@ static void bnx2x_net_stats_update(struct bnx2x *bp)
3713 nstats->rx_length_errors = 3713 nstats->rx_length_errors =
3714 estats->rx_stat_etherstatsundersizepkts_lo + 3714 estats->rx_stat_etherstatsundersizepkts_lo +
3715 estats->jabber_packets_received; 3715 estats->jabber_packets_received;
3716 nstats->rx_over_errors = estats->brb_drop_lo + 3716 nstats->rx_over_errors = estats->brb_drop_lo + estats->brb_truncate_lo;
3717 estats->brb_truncate_discard;
3718 nstats->rx_crc_errors = estats->rx_stat_dot3statsfcserrors_lo; 3717 nstats->rx_crc_errors = estats->rx_stat_dot3statsfcserrors_lo;
3719 nstats->rx_frame_errors = estats->rx_stat_dot3statsalignmenterrors_lo; 3718 nstats->rx_frame_errors = estats->rx_stat_dot3statsalignmenterrors_lo;
3720 nstats->rx_fifo_errors = old_tclient->no_buff_discard; 3719 nstats->rx_fifo_errors = old_tclient->no_buff_discard;
@@ -3783,7 +3782,7 @@ static void bnx2x_stats_update(struct bnx2x *bp)
3783 bp->fp->rx_comp_cons), 3782 bp->fp->rx_comp_cons),
3784 le16_to_cpu(*bp->fp->rx_cons_sb), nstats->rx_packets); 3783 le16_to_cpu(*bp->fp->rx_cons_sb), nstats->rx_packets);
3785 printk(KERN_DEBUG " %s (Xoff events %u) brb drops %u\n", 3784 printk(KERN_DEBUG " %s (Xoff events %u) brb drops %u\n",
3786 netif_queue_stopped(bp->dev)? "Xoff" : "Xon", 3785 netif_queue_stopped(bp->dev) ? "Xoff" : "Xon",
3787 estats->driver_xoff, estats->brb_drop_lo); 3786 estats->driver_xoff, estats->brb_drop_lo);
3788 printk(KERN_DEBUG "tstats: checksum_discard %u " 3787 printk(KERN_DEBUG "tstats: checksum_discard %u "
3789 "packets_too_big_discard %u no_buff_discard %u " 3788 "packets_too_big_discard %u no_buff_discard %u "
@@ -3994,14 +3993,14 @@ static void bnx2x_zero_sb(struct bnx2x *bp, int sb_id)
3994 3993
3995 bnx2x_init_fill(bp, BAR_USTRORM_INTMEM + 3994 bnx2x_init_fill(bp, BAR_USTRORM_INTMEM +
3996 USTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), 0, 3995 USTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), 0,
3997 sizeof(struct ustorm_def_status_block)/4); 3996 sizeof(struct ustorm_status_block)/4);
3998 bnx2x_init_fill(bp, BAR_CSTRORM_INTMEM + 3997 bnx2x_init_fill(bp, BAR_CSTRORM_INTMEM +
3999 CSTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), 0, 3998 CSTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), 0,
4000 sizeof(struct cstorm_def_status_block)/4); 3999 sizeof(struct cstorm_status_block)/4);
4001} 4000}
4002 4001
4003static void bnx2x_init_sb(struct bnx2x *bp, int sb_id, 4002static void bnx2x_init_sb(struct bnx2x *bp, struct host_status_block *sb,
4004 struct host_status_block *sb, dma_addr_t mapping) 4003 dma_addr_t mapping, int sb_id)
4005{ 4004{
4006 int port = BP_PORT(bp); 4005 int port = BP_PORT(bp);
4007 int func = BP_FUNC(bp); 4006 int func = BP_FUNC(bp);
@@ -4077,7 +4076,6 @@ static void bnx2x_init_def_sb(struct bnx2x *bp,
4077 atten_status_block); 4076 atten_status_block);
4078 def_sb->atten_status_block.status_block_id = sb_id; 4077 def_sb->atten_status_block.status_block_id = sb_id;
4079 4078
4080 bp->def_att_idx = 0;
4081 bp->attn_state = 0; 4079 bp->attn_state = 0;
4082 4080
4083 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 : 4081 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
@@ -4094,9 +4092,6 @@ static void bnx2x_init_def_sb(struct bnx2x *bp,
4094 reg_offset + 0xc + 0x10*index); 4092 reg_offset + 0xc + 0x10*index);
4095 } 4093 }
4096 4094
4097 bp->aeu_mask = REG_RD(bp, (port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
4098 MISC_REG_AEU_MASK_ATTN_FUNC_0));
4099
4100 reg_offset = (port ? HC_REG_ATTN_MSG1_ADDR_L : 4095 reg_offset = (port ? HC_REG_ATTN_MSG1_ADDR_L :
4101 HC_REG_ATTN_MSG0_ADDR_L); 4096 HC_REG_ATTN_MSG0_ADDR_L);
4102 4097
@@ -4114,17 +4109,13 @@ static void bnx2x_init_def_sb(struct bnx2x *bp,
4114 u_def_status_block); 4109 u_def_status_block);
4115 def_sb->u_def_status_block.status_block_id = sb_id; 4110 def_sb->u_def_status_block.status_block_id = sb_id;
4116 4111
4117 bp->def_u_idx = 0;
4118
4119 REG_WR(bp, BAR_USTRORM_INTMEM + 4112 REG_WR(bp, BAR_USTRORM_INTMEM +
4120 USTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section)); 4113 USTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
4121 REG_WR(bp, BAR_USTRORM_INTMEM + 4114 REG_WR(bp, BAR_USTRORM_INTMEM +
4122 ((USTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4), 4115 ((USTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
4123 U64_HI(section)); 4116 U64_HI(section));
4124 REG_WR8(bp, BAR_USTRORM_INTMEM + DEF_USB_FUNC_OFF + 4117 REG_WR8(bp, BAR_USTRORM_INTMEM + DEF_USB_FUNC_OFF +
4125 USTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func); 4118 USTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4126 REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_HC_BTR_OFFSET(func),
4127 BNX2X_BTR);
4128 4119
4129 for (index = 0; index < HC_USTORM_DEF_SB_NUM_INDICES; index++) 4120 for (index = 0; index < HC_USTORM_DEF_SB_NUM_INDICES; index++)
4130 REG_WR16(bp, BAR_USTRORM_INTMEM + 4121 REG_WR16(bp, BAR_USTRORM_INTMEM +
@@ -4135,17 +4126,13 @@ static void bnx2x_init_def_sb(struct bnx2x *bp,
4135 c_def_status_block); 4126 c_def_status_block);
4136 def_sb->c_def_status_block.status_block_id = sb_id; 4127 def_sb->c_def_status_block.status_block_id = sb_id;
4137 4128
4138 bp->def_c_idx = 0;
4139
4140 REG_WR(bp, BAR_CSTRORM_INTMEM + 4129 REG_WR(bp, BAR_CSTRORM_INTMEM +
4141 CSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section)); 4130 CSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
4142 REG_WR(bp, BAR_CSTRORM_INTMEM + 4131 REG_WR(bp, BAR_CSTRORM_INTMEM +
4143 ((CSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4), 4132 ((CSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
4144 U64_HI(section)); 4133 U64_HI(section));
4145 REG_WR8(bp, BAR_CSTRORM_INTMEM + DEF_CSB_FUNC_OFF + 4134 REG_WR8(bp, BAR_CSTRORM_INTMEM + DEF_CSB_FUNC_OFF +
4146 CSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func); 4135 CSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4147 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_OFFSET(func),
4148 BNX2X_BTR);
4149 4136
4150 for (index = 0; index < HC_CSTORM_DEF_SB_NUM_INDICES; index++) 4137 for (index = 0; index < HC_CSTORM_DEF_SB_NUM_INDICES; index++)
4151 REG_WR16(bp, BAR_CSTRORM_INTMEM + 4138 REG_WR16(bp, BAR_CSTRORM_INTMEM +
@@ -4156,17 +4143,13 @@ static void bnx2x_init_def_sb(struct bnx2x *bp,
4156 t_def_status_block); 4143 t_def_status_block);
4157 def_sb->t_def_status_block.status_block_id = sb_id; 4144 def_sb->t_def_status_block.status_block_id = sb_id;
4158 4145
4159 bp->def_t_idx = 0;
4160
4161 REG_WR(bp, BAR_TSTRORM_INTMEM + 4146 REG_WR(bp, BAR_TSTRORM_INTMEM +
4162 TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section)); 4147 TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
4163 REG_WR(bp, BAR_TSTRORM_INTMEM + 4148 REG_WR(bp, BAR_TSTRORM_INTMEM +
4164 ((TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4), 4149 ((TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
4165 U64_HI(section)); 4150 U64_HI(section));
4166 REG_WR8(bp, BAR_TSTRORM_INTMEM + DEF_TSB_FUNC_OFF + 4151 REG_WR8(bp, BAR_TSTRORM_INTMEM + DEF_TSB_FUNC_OFF +
4167 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func); 4152 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4168 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_HC_BTR_OFFSET(func),
4169 BNX2X_BTR);
4170 4153
4171 for (index = 0; index < HC_TSTORM_DEF_SB_NUM_INDICES; index++) 4154 for (index = 0; index < HC_TSTORM_DEF_SB_NUM_INDICES; index++)
4172 REG_WR16(bp, BAR_TSTRORM_INTMEM + 4155 REG_WR16(bp, BAR_TSTRORM_INTMEM +
@@ -4177,23 +4160,20 @@ static void bnx2x_init_def_sb(struct bnx2x *bp,
4177 x_def_status_block); 4160 x_def_status_block);
4178 def_sb->x_def_status_block.status_block_id = sb_id; 4161 def_sb->x_def_status_block.status_block_id = sb_id;
4179 4162
4180 bp->def_x_idx = 0;
4181
4182 REG_WR(bp, BAR_XSTRORM_INTMEM + 4163 REG_WR(bp, BAR_XSTRORM_INTMEM +
4183 XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section)); 4164 XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
4184 REG_WR(bp, BAR_XSTRORM_INTMEM + 4165 REG_WR(bp, BAR_XSTRORM_INTMEM +
4185 ((XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4), 4166 ((XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
4186 U64_HI(section)); 4167 U64_HI(section));
4187 REG_WR8(bp, BAR_XSTRORM_INTMEM + DEF_XSB_FUNC_OFF + 4168 REG_WR8(bp, BAR_XSTRORM_INTMEM + DEF_XSB_FUNC_OFF +
4188 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func); 4169 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4189 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_HC_BTR_OFFSET(func),
4190 BNX2X_BTR);
4191 4170
4192 for (index = 0; index < HC_XSTORM_DEF_SB_NUM_INDICES; index++) 4171 for (index = 0; index < HC_XSTORM_DEF_SB_NUM_INDICES; index++)
4193 REG_WR16(bp, BAR_XSTRORM_INTMEM + 4172 REG_WR16(bp, BAR_XSTRORM_INTMEM +
4194 XSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1); 4173 XSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
4195 4174
4196 bp->stats_pending = 0; 4175 bp->stats_pending = 0;
4176 bp->set_mac_pending = 0;
4197 4177
4198 bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0); 4178 bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
4199} 4179}
@@ -4209,21 +4189,25 @@ static void bnx2x_update_coalesce(struct bnx2x *bp)
4209 /* HC_INDEX_U_ETH_RX_CQ_CONS */ 4189 /* HC_INDEX_U_ETH_RX_CQ_CONS */
4210 REG_WR8(bp, BAR_USTRORM_INTMEM + 4190 REG_WR8(bp, BAR_USTRORM_INTMEM +
4211 USTORM_SB_HC_TIMEOUT_OFFSET(port, sb_id, 4191 USTORM_SB_HC_TIMEOUT_OFFSET(port, sb_id,
4212 HC_INDEX_U_ETH_RX_CQ_CONS), 4192 U_SB_ETH_RX_CQ_INDEX),
4213 bp->rx_ticks/12); 4193 bp->rx_ticks/12);
4214 REG_WR16(bp, BAR_USTRORM_INTMEM + 4194 REG_WR16(bp, BAR_USTRORM_INTMEM +
4215 USTORM_SB_HC_DISABLE_OFFSET(port, sb_id, 4195 USTORM_SB_HC_DISABLE_OFFSET(port, sb_id,
4216 HC_INDEX_U_ETH_RX_CQ_CONS), 4196 U_SB_ETH_RX_CQ_INDEX),
4197 bp->rx_ticks ? 0 : 1);
4198 REG_WR16(bp, BAR_USTRORM_INTMEM +
4199 USTORM_SB_HC_DISABLE_OFFSET(port, sb_id,
4200 U_SB_ETH_RX_BD_INDEX),
4217 bp->rx_ticks ? 0 : 1); 4201 bp->rx_ticks ? 0 : 1);
4218 4202
4219 /* HC_INDEX_C_ETH_TX_CQ_CONS */ 4203 /* HC_INDEX_C_ETH_TX_CQ_CONS */
4220 REG_WR8(bp, BAR_CSTRORM_INTMEM + 4204 REG_WR8(bp, BAR_CSTRORM_INTMEM +
4221 CSTORM_SB_HC_TIMEOUT_OFFSET(port, sb_id, 4205 CSTORM_SB_HC_TIMEOUT_OFFSET(port, sb_id,
4222 HC_INDEX_C_ETH_TX_CQ_CONS), 4206 C_SB_ETH_TX_CQ_INDEX),
4223 bp->tx_ticks/12); 4207 bp->tx_ticks/12);
4224 REG_WR16(bp, BAR_CSTRORM_INTMEM + 4208 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4225 CSTORM_SB_HC_DISABLE_OFFSET(port, sb_id, 4209 CSTORM_SB_HC_DISABLE_OFFSET(port, sb_id,
4226 HC_INDEX_C_ETH_TX_CQ_CONS), 4210 C_SB_ETH_TX_CQ_INDEX),
4227 bp->tx_ticks ? 0 : 1); 4211 bp->tx_ticks ? 0 : 1);
4228 } 4212 }
4229} 4213}
@@ -4245,7 +4229,7 @@ static inline void bnx2x_free_tpa_pool(struct bnx2x *bp,
4245 if (fp->tpa_state[i] == BNX2X_TPA_START) 4229 if (fp->tpa_state[i] == BNX2X_TPA_START)
4246 pci_unmap_single(bp->pdev, 4230 pci_unmap_single(bp->pdev,
4247 pci_unmap_addr(rx_buf, mapping), 4231 pci_unmap_addr(rx_buf, mapping),
4248 bp->rx_buf_use_size, 4232 bp->rx_buf_size,
4249 PCI_DMA_FROMDEVICE); 4233 PCI_DMA_FROMDEVICE);
4250 4234
4251 dev_kfree_skb(skb); 4235 dev_kfree_skb(skb);
@@ -4256,23 +4240,24 @@ static inline void bnx2x_free_tpa_pool(struct bnx2x *bp,
4256static void bnx2x_init_rx_rings(struct bnx2x *bp) 4240static void bnx2x_init_rx_rings(struct bnx2x *bp)
4257{ 4241{
4258 int func = BP_FUNC(bp); 4242 int func = BP_FUNC(bp);
4259 u16 ring_prod, cqe_ring_prod = 0; 4243 int max_agg_queues = CHIP_IS_E1(bp) ? ETH_MAX_AGGREGATION_QUEUES_E1 :
4244 ETH_MAX_AGGREGATION_QUEUES_E1H;
4245 u16 ring_prod, cqe_ring_prod;
4260 int i, j; 4246 int i, j;
4261 4247
4262 bp->rx_buf_use_size = bp->dev->mtu; 4248 bp->rx_buf_size = bp->dev->mtu;
4263 bp->rx_buf_use_size += bp->rx_offset + ETH_OVREHEAD; 4249 bp->rx_buf_size += bp->rx_offset + ETH_OVREHEAD +
4264 bp->rx_buf_size = bp->rx_buf_use_size + 64; 4250 BCM_RX_ETH_PAYLOAD_ALIGN;
4265 4251
4266 if (bp->flags & TPA_ENABLE_FLAG) { 4252 if (bp->flags & TPA_ENABLE_FLAG) {
4267 DP(NETIF_MSG_IFUP, 4253 DP(NETIF_MSG_IFUP,
4268 "rx_buf_use_size %d rx_buf_size %d effective_mtu %d\n", 4254 "rx_buf_size %d effective_mtu %d\n",
4269 bp->rx_buf_use_size, bp->rx_buf_size, 4255 bp->rx_buf_size, bp->dev->mtu + ETH_OVREHEAD);
4270 bp->dev->mtu + ETH_OVREHEAD);
4271 4256
4272 for_each_queue(bp, j) { 4257 for_each_queue(bp, j) {
4273 for (i = 0; i < ETH_MAX_AGGREGATION_QUEUES_E1H; i++) { 4258 struct bnx2x_fastpath *fp = &bp->fp[j];
4274 struct bnx2x_fastpath *fp = &bp->fp[j];
4275 4259
4260 for (i = 0; i < max_agg_queues; i++) {
4276 fp->tpa_pool[i].skb = 4261 fp->tpa_pool[i].skb =
4277 netdev_alloc_skb(bp->dev, bp->rx_buf_size); 4262 netdev_alloc_skb(bp->dev, bp->rx_buf_size);
4278 if (!fp->tpa_pool[i].skb) { 4263 if (!fp->tpa_pool[i].skb) {
@@ -4352,8 +4337,7 @@ static void bnx2x_init_rx_rings(struct bnx2x *bp)
4352 BNX2X_ERR("disabling TPA for queue[%d]\n", j); 4337 BNX2X_ERR("disabling TPA for queue[%d]\n", j);
4353 /* Cleanup already allocated elements */ 4338 /* Cleanup already allocated elements */
4354 bnx2x_free_rx_sge_range(bp, fp, ring_prod); 4339 bnx2x_free_rx_sge_range(bp, fp, ring_prod);
4355 bnx2x_free_tpa_pool(bp, fp, 4340 bnx2x_free_tpa_pool(bp, fp, max_agg_queues);
4356 ETH_MAX_AGGREGATION_QUEUES_E1H);
4357 fp->disable_tpa = 1; 4341 fp->disable_tpa = 1;
4358 ring_prod = 0; 4342 ring_prod = 0;
4359 break; 4343 break;
@@ -4363,13 +4347,13 @@ static void bnx2x_init_rx_rings(struct bnx2x *bp)
4363 fp->rx_sge_prod = ring_prod; 4347 fp->rx_sge_prod = ring_prod;
4364 4348
4365 /* Allocate BDs and initialize BD ring */ 4349 /* Allocate BDs and initialize BD ring */
4366 fp->rx_comp_cons = fp->rx_alloc_failed = 0; 4350 fp->rx_comp_cons = 0;
4367 cqe_ring_prod = ring_prod = 0; 4351 cqe_ring_prod = ring_prod = 0;
4368 for (i = 0; i < bp->rx_ring_size; i++) { 4352 for (i = 0; i < bp->rx_ring_size; i++) {
4369 if (bnx2x_alloc_rx_skb(bp, fp, ring_prod) < 0) { 4353 if (bnx2x_alloc_rx_skb(bp, fp, ring_prod) < 0) {
4370 BNX2X_ERR("was only able to allocate " 4354 BNX2X_ERR("was only able to allocate "
4371 "%d rx skbs\n", i); 4355 "%d rx skbs\n", i);
4372 fp->rx_alloc_failed++; 4356 bp->eth_stats.rx_skb_alloc_failed++;
4373 break; 4357 break;
4374 } 4358 }
4375 ring_prod = NEXT_RX_IDX(ring_prod); 4359 ring_prod = NEXT_RX_IDX(ring_prod);
@@ -4477,9 +4461,10 @@ static void bnx2x_init_context(struct bnx2x *bp)
4477 context->ustorm_st_context.common.status_block_id = sb_id; 4461 context->ustorm_st_context.common.status_block_id = sb_id;
4478 context->ustorm_st_context.common.flags = 4462 context->ustorm_st_context.common.flags =
4479 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_MC_ALIGNMENT; 4463 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_MC_ALIGNMENT;
4480 context->ustorm_st_context.common.mc_alignment_size = 64; 4464 context->ustorm_st_context.common.mc_alignment_size =
4465 BCM_RX_ETH_PAYLOAD_ALIGN;
4481 context->ustorm_st_context.common.bd_buff_size = 4466 context->ustorm_st_context.common.bd_buff_size =
4482 bp->rx_buf_use_size; 4467 bp->rx_buf_size;
4483 context->ustorm_st_context.common.bd_page_base_hi = 4468 context->ustorm_st_context.common.bd_page_base_hi =
4484 U64_HI(fp->rx_desc_mapping); 4469 U64_HI(fp->rx_desc_mapping);
4485 context->ustorm_st_context.common.bd_page_base_lo = 4470 context->ustorm_st_context.common.bd_page_base_lo =
@@ -4497,7 +4482,7 @@ static void bnx2x_init_context(struct bnx2x *bp)
4497 } 4482 }
4498 4483
4499 context->cstorm_st_context.sb_index_number = 4484 context->cstorm_st_context.sb_index_number =
4500 HC_INDEX_C_ETH_TX_CQ_CONS; 4485 C_SB_ETH_TX_CQ_INDEX;
4501 context->cstorm_st_context.status_block_id = sb_id; 4486 context->cstorm_st_context.status_block_id = sb_id;
4502 4487
4503 context->xstorm_ag_context.cdu_reserved = 4488 context->xstorm_ag_context.cdu_reserved =
@@ -4535,7 +4520,7 @@ static void bnx2x_set_client_config(struct bnx2x *bp)
4535 int i; 4520 int i;
4536 4521
4537 tstorm_client.mtu = bp->dev->mtu + ETH_OVREHEAD; 4522 tstorm_client.mtu = bp->dev->mtu + ETH_OVREHEAD;
4538 tstorm_client.statistics_counter_id = 0; 4523 tstorm_client.statistics_counter_id = BP_CL_ID(bp);
4539 tstorm_client.config_flags = 4524 tstorm_client.config_flags =
4540 TSTORM_ETH_CLIENT_CONFIG_STATSITICS_ENABLE; 4525 TSTORM_ETH_CLIENT_CONFIG_STATSITICS_ENABLE;
4541#ifdef BCM_VLAN 4526#ifdef BCM_VLAN
@@ -4579,7 +4564,7 @@ static void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
4579 int func = BP_FUNC(bp); 4564 int func = BP_FUNC(bp);
4580 int i; 4565 int i;
4581 4566
4582 DP(NETIF_MSG_RX_STATUS, "rx mode is %d\n", mode); 4567 DP(NETIF_MSG_IFUP, "rx mode %d mask 0x%x\n", mode, mask);
4583 4568
4584 switch (mode) { 4569 switch (mode) {
4585 case BNX2X_RX_MODE_NONE: /* no Rx */ 4570 case BNX2X_RX_MODE_NONE: /* no Rx */
@@ -4617,13 +4602,46 @@ static void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
4617 bnx2x_set_client_config(bp); 4602 bnx2x_set_client_config(bp);
4618} 4603}
4619 4604
4620static void bnx2x_init_internal(struct bnx2x *bp) 4605static void bnx2x_init_internal_common(struct bnx2x *bp)
4606{
4607 int i;
4608
4609 if (bp->flags & TPA_ENABLE_FLAG) {
4610 struct tstorm_eth_tpa_exist tpa = {0};
4611
4612 tpa.tpa_exist = 1;
4613
4614 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_TPA_EXIST_OFFSET,
4615 ((u32 *)&tpa)[0]);
4616 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_TPA_EXIST_OFFSET + 4,
4617 ((u32 *)&tpa)[1]);
4618 }
4619
4620 /* Zero this manually as its initialization is
4621 currently missing in the initTool */
4622 for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++)
4623 REG_WR(bp, BAR_USTRORM_INTMEM +
4624 USTORM_AGG_DATA_OFFSET + i * 4, 0);
4625}
4626
4627static void bnx2x_init_internal_port(struct bnx2x *bp)
4628{
4629 int port = BP_PORT(bp);
4630
4631 REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4632 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4633 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4634 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4635}
4636
4637static void bnx2x_init_internal_func(struct bnx2x *bp)
4621{ 4638{
4622 struct tstorm_eth_function_common_config tstorm_config = {0}; 4639 struct tstorm_eth_function_common_config tstorm_config = {0};
4623 struct stats_indication_flags stats_flags = {0}; 4640 struct stats_indication_flags stats_flags = {0};
4624 int port = BP_PORT(bp); 4641 int port = BP_PORT(bp);
4625 int func = BP_FUNC(bp); 4642 int func = BP_FUNC(bp);
4626 int i; 4643 int i;
4644 u16 max_agg_size;
4627 4645
4628 if (is_multi(bp)) { 4646 if (is_multi(bp)) {
4629 tstorm_config.config_flags = MULTI_FLAGS; 4647 tstorm_config.config_flags = MULTI_FLAGS;
@@ -4636,31 +4654,53 @@ static void bnx2x_init_internal(struct bnx2x *bp)
4636 TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(func), 4654 TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(func),
4637 (*(u32 *)&tstorm_config)); 4655 (*(u32 *)&tstorm_config));
4638 4656
4639/* DP(NETIF_MSG_IFUP, "tstorm_config: 0x%08x\n",
4640 (*(u32 *)&tstorm_config)); */
4641
4642 bp->rx_mode = BNX2X_RX_MODE_NONE; /* no rx until link is up */ 4657 bp->rx_mode = BNX2X_RX_MODE_NONE; /* no rx until link is up */
4643 bnx2x_set_storm_rx_mode(bp); 4658 bnx2x_set_storm_rx_mode(bp);
4644 4659
4660 /* reset xstorm per client statistics */
4661 for (i = 0; i < sizeof(struct xstorm_per_client_stats) / 4; i++) {
4662 REG_WR(bp, BAR_XSTRORM_INTMEM +
4663 XSTORM_PER_COUNTER_ID_STATS_OFFSET(port, BP_CL_ID(bp)) +
4664 i*4, 0);
4665 }
4666 /* reset tstorm per client statistics */
4667 for (i = 0; i < sizeof(struct tstorm_per_client_stats) / 4; i++) {
4668 REG_WR(bp, BAR_TSTRORM_INTMEM +
4669 TSTORM_PER_COUNTER_ID_STATS_OFFSET(port, BP_CL_ID(bp)) +
4670 i*4, 0);
4671 }
4672
4673 /* Init statistics related context */
4645 stats_flags.collect_eth = 1; 4674 stats_flags.collect_eth = 1;
4646 4675
4647 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(port), 4676 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func),
4648 ((u32 *)&stats_flags)[0]); 4677 ((u32 *)&stats_flags)[0]);
4649 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(port) + 4, 4678 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func) + 4,
4650 ((u32 *)&stats_flags)[1]); 4679 ((u32 *)&stats_flags)[1]);
4651 4680
4652 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(port), 4681 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func),
4653 ((u32 *)&stats_flags)[0]); 4682 ((u32 *)&stats_flags)[0]);
4654 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(port) + 4, 4683 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func) + 4,
4655 ((u32 *)&stats_flags)[1]); 4684 ((u32 *)&stats_flags)[1]);
4656 4685
4657 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(port), 4686 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func),
4658 ((u32 *)&stats_flags)[0]); 4687 ((u32 *)&stats_flags)[0]);
4659 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(port) + 4, 4688 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func) + 4,
4660 ((u32 *)&stats_flags)[1]); 4689 ((u32 *)&stats_flags)[1]);
4661 4690
4662/* DP(NETIF_MSG_IFUP, "stats_flags: 0x%08x 0x%08x\n", 4691 REG_WR(bp, BAR_XSTRORM_INTMEM +
4663 ((u32 *)&stats_flags)[0], ((u32 *)&stats_flags)[1]); */ 4692 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
4693 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
4694 REG_WR(bp, BAR_XSTRORM_INTMEM +
4695 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
4696 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
4697
4698 REG_WR(bp, BAR_TSTRORM_INTMEM +
4699 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
4700 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
4701 REG_WR(bp, BAR_TSTRORM_INTMEM +
4702 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
4703 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
4664 4704
4665 if (CHIP_IS_E1H(bp)) { 4705 if (CHIP_IS_E1H(bp)) {
4666 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNCTION_MODE_OFFSET, 4706 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNCTION_MODE_OFFSET,
@@ -4676,15 +4716,12 @@ static void bnx2x_init_internal(struct bnx2x *bp)
4676 bp->e1hov); 4716 bp->e1hov);
4677 } 4717 }
4678 4718
4679 /* Zero this manualy as its initialization is 4719 /* Init CQ ring mapping and aggregation size */
4680 currently missing in the initTool */ 4720 max_agg_size = min((u32)(bp->rx_buf_size +
4681 for (i = 0; i < USTORM_AGG_DATA_SIZE >> 2; i++) 4721 8*BCM_PAGE_SIZE*PAGES_PER_SGE),
4682 REG_WR(bp, BAR_USTRORM_INTMEM + 4722 (u32)0xffff);
4683 USTORM_AGG_DATA_OFFSET + 4*i, 0);
4684
4685 for_each_queue(bp, i) { 4723 for_each_queue(bp, i) {
4686 struct bnx2x_fastpath *fp = &bp->fp[i]; 4724 struct bnx2x_fastpath *fp = &bp->fp[i];
4687 u16 max_agg_size;
4688 4725
4689 REG_WR(bp, BAR_USTRORM_INTMEM + 4726 REG_WR(bp, BAR_USTRORM_INTMEM +
4690 USTORM_CQE_PAGE_BASE_OFFSET(port, FP_CL_ID(fp)), 4727 USTORM_CQE_PAGE_BASE_OFFSET(port, FP_CL_ID(fp)),
@@ -4693,16 +4730,34 @@ static void bnx2x_init_internal(struct bnx2x *bp)
4693 USTORM_CQE_PAGE_BASE_OFFSET(port, FP_CL_ID(fp)) + 4, 4730 USTORM_CQE_PAGE_BASE_OFFSET(port, FP_CL_ID(fp)) + 4,
4694 U64_HI(fp->rx_comp_mapping)); 4731 U64_HI(fp->rx_comp_mapping));
4695 4732
4696 max_agg_size = min((u32)(bp->rx_buf_use_size +
4697 8*BCM_PAGE_SIZE*PAGES_PER_SGE),
4698 (u32)0xffff);
4699 REG_WR16(bp, BAR_USTRORM_INTMEM + 4733 REG_WR16(bp, BAR_USTRORM_INTMEM +
4700 USTORM_MAX_AGG_SIZE_OFFSET(port, FP_CL_ID(fp)), 4734 USTORM_MAX_AGG_SIZE_OFFSET(port, FP_CL_ID(fp)),
4701 max_agg_size); 4735 max_agg_size);
4702 } 4736 }
4703} 4737}
4704 4738
4705static void bnx2x_nic_init(struct bnx2x *bp) 4739static void bnx2x_init_internal(struct bnx2x *bp, u32 load_code)
4740{
4741 switch (load_code) {
4742 case FW_MSG_CODE_DRV_LOAD_COMMON:
4743 bnx2x_init_internal_common(bp);
4744 /* no break */
4745
4746 case FW_MSG_CODE_DRV_LOAD_PORT:
4747 bnx2x_init_internal_port(bp);
4748 /* no break */
4749
4750 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
4751 bnx2x_init_internal_func(bp);
4752 break;
4753
4754 default:
4755 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
4756 break;
4757 }
4758}
4759
4760static void bnx2x_nic_init(struct bnx2x *bp, u32 load_code)
4706{ 4761{
4707 int i; 4762 int i;
4708 4763
@@ -4717,19 +4772,20 @@ static void bnx2x_nic_init(struct bnx2x *bp)
4717 DP(NETIF_MSG_IFUP, 4772 DP(NETIF_MSG_IFUP,
4718 "bnx2x_init_sb(%p,%p) index %d cl_id %d sb %d\n", 4773 "bnx2x_init_sb(%p,%p) index %d cl_id %d sb %d\n",
4719 bp, fp->status_blk, i, FP_CL_ID(fp), FP_SB_ID(fp)); 4774 bp, fp->status_blk, i, FP_CL_ID(fp), FP_SB_ID(fp));
4720 bnx2x_init_sb(bp, FP_SB_ID(fp), fp->status_blk, 4775 bnx2x_init_sb(bp, fp->status_blk, fp->status_blk_mapping,
4721 fp->status_blk_mapping); 4776 FP_SB_ID(fp));
4777 bnx2x_update_fpsb_idx(fp);
4722 } 4778 }
4723 4779
4724 bnx2x_init_def_sb(bp, bp->def_status_blk, 4780 bnx2x_init_def_sb(bp, bp->def_status_blk, bp->def_status_blk_mapping,
4725 bp->def_status_blk_mapping, DEF_SB_ID); 4781 DEF_SB_ID);
4782 bnx2x_update_dsb_idx(bp);
4726 bnx2x_update_coalesce(bp); 4783 bnx2x_update_coalesce(bp);
4727 bnx2x_init_rx_rings(bp); 4784 bnx2x_init_rx_rings(bp);
4728 bnx2x_init_tx_ring(bp); 4785 bnx2x_init_tx_ring(bp);
4729 bnx2x_init_sp_ring(bp); 4786 bnx2x_init_sp_ring(bp);
4730 bnx2x_init_context(bp); 4787 bnx2x_init_context(bp);
4731 bnx2x_init_internal(bp); 4788 bnx2x_init_internal(bp, load_code);
4732 bnx2x_storm_stats_init(bp);
4733 bnx2x_init_ind_table(bp); 4789 bnx2x_init_ind_table(bp);
4734 bnx2x_int_enable(bp); 4790 bnx2x_int_enable(bp);
4735} 4791}
@@ -4878,7 +4934,7 @@ static int bnx2x_int_mem_test(struct bnx2x *bp)
4878 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0); 4934 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
4879 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0); 4935 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
4880 REG_WR(bp, CFC_REG_DEBUG0, 0x1); 4936 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
4881 NIG_WR(NIG_REG_PRS_REQ_IN_EN, 0x0); 4937 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
4882 4938
4883 /* Write 0 to parser credits for CFC search request */ 4939 /* Write 0 to parser credits for CFC search request */
4884 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0); 4940 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
@@ -4933,7 +4989,7 @@ static int bnx2x_int_mem_test(struct bnx2x *bp)
4933 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0); 4989 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
4934 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0); 4990 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
4935 REG_WR(bp, CFC_REG_DEBUG0, 0x1); 4991 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
4936 NIG_WR(NIG_REG_PRS_REQ_IN_EN, 0x0); 4992 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
4937 4993
4938 /* Write 0 to parser credits for CFC search request */ 4994 /* Write 0 to parser credits for CFC search request */
4939 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0); 4995 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
@@ -5000,7 +5056,7 @@ static int bnx2x_int_mem_test(struct bnx2x *bp)
5000 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x7fffffff); 5056 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x7fffffff);
5001 REG_WR(bp, TCM_REG_PRS_IFEN, 0x1); 5057 REG_WR(bp, TCM_REG_PRS_IFEN, 0x1);
5002 REG_WR(bp, CFC_REG_DEBUG0, 0x0); 5058 REG_WR(bp, CFC_REG_DEBUG0, 0x0);
5003 NIG_WR(NIG_REG_PRS_REQ_IN_EN, 0x1); 5059 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x1);
5004 5060
5005 DP(NETIF_MSG_HW, "done\n"); 5061 DP(NETIF_MSG_HW, "done\n");
5006 5062
@@ -5089,11 +5145,6 @@ static int bnx2x_init_common(struct bnx2x *bp)
5089 REG_WR(bp, PXP2_REG_RD_CDURD_SWAP_MODE, 1); 5145 REG_WR(bp, PXP2_REG_RD_CDURD_SWAP_MODE, 1);
5090#endif 5146#endif
5091 5147
5092#ifndef BCM_ISCSI
5093 /* set NIC mode */
5094 REG_WR(bp, PRS_REG_NIC_MODE, 1);
5095#endif
5096
5097 REG_WR(bp, PXP2_REG_RQ_CDU_P_SIZE, 2); 5148 REG_WR(bp, PXP2_REG_RQ_CDU_P_SIZE, 2);
5098#ifdef BCM_ISCSI 5149#ifdef BCM_ISCSI
5099 REG_WR(bp, PXP2_REG_RQ_TM_P_SIZE, 5); 5150 REG_WR(bp, PXP2_REG_RQ_TM_P_SIZE, 5);
@@ -5163,6 +5214,8 @@ static int bnx2x_init_common(struct bnx2x *bp)
5163 } 5214 }
5164 5215
5165 bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END); 5216 bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END);
5217 /* set NIC mode */
5218 REG_WR(bp, PRS_REG_NIC_MODE, 1);
5166 if (CHIP_IS_E1H(bp)) 5219 if (CHIP_IS_E1H(bp))
5167 REG_WR(bp, PRS_REG_E1HOV_MODE, IS_E1HMF(bp)); 5220 REG_WR(bp, PRS_REG_E1HOV_MODE, IS_E1HMF(bp));
5168 5221
@@ -5296,6 +5349,7 @@ static int bnx2x_init_common(struct bnx2x *bp)
5296 } 5349 }
5297 5350
5298 switch (bp->common.board & SHARED_HW_CFG_BOARD_TYPE_MASK) { 5351 switch (bp->common.board & SHARED_HW_CFG_BOARD_TYPE_MASK) {
5352 case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1021G:
5299 case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1022G: 5353 case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1022G:
5300 /* Fan failure is indicated by SPIO 5 */ 5354 /* Fan failure is indicated by SPIO 5 */
5301 bnx2x_set_spio(bp, MISC_REGISTERS_SPIO_5, 5355 bnx2x_set_spio(bp, MISC_REGISTERS_SPIO_5,
@@ -5322,16 +5376,12 @@ static int bnx2x_init_common(struct bnx2x *bp)
5322 5376
5323 enable_blocks_attention(bp); 5377 enable_blocks_attention(bp);
5324 5378
5325 if (bp->flags & TPA_ENABLE_FLAG) { 5379 if (!BP_NOMCP(bp)) {
5326 struct tstorm_eth_tpa_exist tmp = {0}; 5380 bnx2x_acquire_phy_lock(bp);
5327 5381 bnx2x_common_init_phy(bp, bp->common.shmem_base);
5328 tmp.tpa_exist = 1; 5382 bnx2x_release_phy_lock(bp);
5329 5383 } else
5330 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_TPA_EXIST_OFFSET, 5384 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
5331 ((u32 *)&tmp)[0]);
5332 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_TPA_EXIST_OFFSET + 4,
5333 ((u32 *)&tmp)[1]);
5334 }
5335 5385
5336 return 0; 5386 return 0;
5337} 5387}
@@ -5483,6 +5533,7 @@ static int bnx2x_init_port(struct bnx2x *bp)
5483 /* Port DMAE comes here */ 5533 /* Port DMAE comes here */
5484 5534
5485 switch (bp->common.board & SHARED_HW_CFG_BOARD_TYPE_MASK) { 5535 switch (bp->common.board & SHARED_HW_CFG_BOARD_TYPE_MASK) {
5536 case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1021G:
5486 case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1022G: 5537 case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1022G:
5487 /* add SPIO 5 to group 0 */ 5538 /* add SPIO 5 to group 0 */
5488 val = REG_RD(bp, MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0); 5539 val = REG_RD(bp, MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
@@ -5638,18 +5689,23 @@ static u32 bnx2x_fw_command(struct bnx2x *bp, u32 command)
5638 int func = BP_FUNC(bp); 5689 int func = BP_FUNC(bp);
5639 u32 seq = ++bp->fw_seq; 5690 u32 seq = ++bp->fw_seq;
5640 u32 rc = 0; 5691 u32 rc = 0;
5692 u32 cnt = 1;
5693 u8 delay = CHIP_REV_IS_SLOW(bp) ? 100 : 10;
5641 5694
5642 SHMEM_WR(bp, func_mb[func].drv_mb_header, (command | seq)); 5695 SHMEM_WR(bp, func_mb[func].drv_mb_header, (command | seq));
5643 DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB\n", (command | seq)); 5696 DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB\n", (command | seq));
5644 5697
5645 /* let the FW do it's magic ... */ 5698 do {
5646 msleep(100); /* TBD */ 5699 /* let the FW do it's magic ... */
5700 msleep(delay);
5647 5701
5648 if (CHIP_REV_IS_SLOW(bp)) 5702 rc = SHMEM_RD(bp, func_mb[func].fw_mb_header);
5649 msleep(900);
5650 5703
5651 rc = SHMEM_RD(bp, func_mb[func].fw_mb_header); 5704 /* Give the FW up to 2 second (200*10ms) */
5652 DP(BNX2X_MSG_MCP, "read (%x) seq is (%x) from FW MB\n", rc, seq); 5705 } while ((seq != (rc & FW_MSG_SEQ_NUMBER_MASK)) && (cnt++ < 200));
5706
5707 DP(BNX2X_MSG_MCP, "[after %d ms] read (%x) seq is (%x) from FW MB\n",
5708 cnt*delay, rc, seq);
5653 5709
5654 /* is this a reply to our command? */ 5710 /* is this a reply to our command? */
5655 if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK)) { 5711 if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK)) {
@@ -5713,6 +5769,7 @@ static void bnx2x_free_mem(struct bnx2x *bp)
5713 NUM_RCQ_BD); 5769 NUM_RCQ_BD);
5714 5770
5715 /* SGE ring */ 5771 /* SGE ring */
5772 BNX2X_FREE(bnx2x_fp(bp, i, rx_page_ring));
5716 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_sge_ring), 5773 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_sge_ring),
5717 bnx2x_fp(bp, i, rx_sge_mapping), 5774 bnx2x_fp(bp, i, rx_sge_mapping),
5718 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES); 5775 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
@@ -5883,14 +5940,15 @@ static void bnx2x_free_rx_skbs(struct bnx2x *bp)
5883 5940
5884 pci_unmap_single(bp->pdev, 5941 pci_unmap_single(bp->pdev,
5885 pci_unmap_addr(rx_buf, mapping), 5942 pci_unmap_addr(rx_buf, mapping),
5886 bp->rx_buf_use_size, 5943 bp->rx_buf_size,
5887 PCI_DMA_FROMDEVICE); 5944 PCI_DMA_FROMDEVICE);
5888 5945
5889 rx_buf->skb = NULL; 5946 rx_buf->skb = NULL;
5890 dev_kfree_skb(skb); 5947 dev_kfree_skb(skb);
5891 } 5948 }
5892 if (!fp->disable_tpa) 5949 if (!fp->disable_tpa)
5893 bnx2x_free_tpa_pool(bp, fp, 5950 bnx2x_free_tpa_pool(bp, fp, CHIP_IS_E1(bp) ?
5951 ETH_MAX_AGGREGATION_QUEUES_E1 :
5894 ETH_MAX_AGGREGATION_QUEUES_E1H); 5952 ETH_MAX_AGGREGATION_QUEUES_E1H);
5895 } 5953 }
5896} 5954}
@@ -5976,8 +6034,8 @@ static int bnx2x_req_msix_irqs(struct bnx2x *bp)
5976 bnx2x_msix_fp_int, 0, 6034 bnx2x_msix_fp_int, 0,
5977 bp->dev->name, &bp->fp[i]); 6035 bp->dev->name, &bp->fp[i]);
5978 if (rc) { 6036 if (rc) {
5979 BNX2X_ERR("request fp #%d irq failed rc %d\n", 6037 BNX2X_ERR("request fp #%d irq failed rc -%d\n",
5980 i + offset, rc); 6038 i + offset, -rc);
5981 bnx2x_free_msix_irqs(bp); 6039 bnx2x_free_msix_irqs(bp);
5982 return -EBUSY; 6040 return -EBUSY;
5983 } 6041 }
@@ -6000,11 +6058,49 @@ static int bnx2x_req_irq(struct bnx2x *bp)
6000 return rc; 6058 return rc;
6001} 6059}
6002 6060
6061static void bnx2x_napi_enable(struct bnx2x *bp)
6062{
6063 int i;
6064
6065 for_each_queue(bp, i)
6066 napi_enable(&bnx2x_fp(bp, i, napi));
6067}
6068
6069static void bnx2x_napi_disable(struct bnx2x *bp)
6070{
6071 int i;
6072
6073 for_each_queue(bp, i)
6074 napi_disable(&bnx2x_fp(bp, i, napi));
6075}
6076
6077static void bnx2x_netif_start(struct bnx2x *bp)
6078{
6079 if (atomic_dec_and_test(&bp->intr_sem)) {
6080 if (netif_running(bp->dev)) {
6081 if (bp->state == BNX2X_STATE_OPEN)
6082 netif_wake_queue(bp->dev);
6083 bnx2x_napi_enable(bp);
6084 bnx2x_int_enable(bp);
6085 }
6086 }
6087}
6088
6089static void bnx2x_netif_stop(struct bnx2x *bp)
6090{
6091 bnx2x_int_disable_sync(bp);
6092 if (netif_running(bp->dev)) {
6093 bnx2x_napi_disable(bp);
6094 netif_tx_disable(bp->dev);
6095 bp->dev->trans_start = jiffies; /* prevent tx timeout */
6096 }
6097}
6098
6003/* 6099/*
6004 * Init service functions 6100 * Init service functions
6005 */ 6101 */
6006 6102
6007static void bnx2x_set_mac_addr_e1(struct bnx2x *bp) 6103static void bnx2x_set_mac_addr_e1(struct bnx2x *bp, int set)
6008{ 6104{
6009 struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config); 6105 struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
6010 int port = BP_PORT(bp); 6106 int port = BP_PORT(bp);
@@ -6026,11 +6122,15 @@ static void bnx2x_set_mac_addr_e1(struct bnx2x *bp)
6026 config->config_table[0].cam_entry.lsb_mac_addr = 6122 config->config_table[0].cam_entry.lsb_mac_addr =
6027 swab16(*(u16 *)&bp->dev->dev_addr[4]); 6123 swab16(*(u16 *)&bp->dev->dev_addr[4]);
6028 config->config_table[0].cam_entry.flags = cpu_to_le16(port); 6124 config->config_table[0].cam_entry.flags = cpu_to_le16(port);
6029 config->config_table[0].target_table_entry.flags = 0; 6125 if (set)
6126 config->config_table[0].target_table_entry.flags = 0;
6127 else
6128 CAM_INVALIDATE(config->config_table[0]);
6030 config->config_table[0].target_table_entry.client_id = 0; 6129 config->config_table[0].target_table_entry.client_id = 0;
6031 config->config_table[0].target_table_entry.vlan_id = 0; 6130 config->config_table[0].target_table_entry.vlan_id = 0;
6032 6131
6033 DP(NETIF_MSG_IFUP, "setting MAC (%04x:%04x:%04x)\n", 6132 DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x)\n",
6133 (set ? "setting" : "clearing"),
6034 config->config_table[0].cam_entry.msb_mac_addr, 6134 config->config_table[0].cam_entry.msb_mac_addr,
6035 config->config_table[0].cam_entry.middle_mac_addr, 6135 config->config_table[0].cam_entry.middle_mac_addr,
6036 config->config_table[0].cam_entry.lsb_mac_addr); 6136 config->config_table[0].cam_entry.lsb_mac_addr);
@@ -6040,8 +6140,11 @@ static void bnx2x_set_mac_addr_e1(struct bnx2x *bp)
6040 config->config_table[1].cam_entry.middle_mac_addr = 0xffff; 6140 config->config_table[1].cam_entry.middle_mac_addr = 0xffff;
6041 config->config_table[1].cam_entry.lsb_mac_addr = 0xffff; 6141 config->config_table[1].cam_entry.lsb_mac_addr = 0xffff;
6042 config->config_table[1].cam_entry.flags = cpu_to_le16(port); 6142 config->config_table[1].cam_entry.flags = cpu_to_le16(port);
6043 config->config_table[1].target_table_entry.flags = 6143 if (set)
6144 config->config_table[1].target_table_entry.flags =
6044 TSTORM_CAM_TARGET_TABLE_ENTRY_BROADCAST; 6145 TSTORM_CAM_TARGET_TABLE_ENTRY_BROADCAST;
6146 else
6147 CAM_INVALIDATE(config->config_table[1]);
6045 config->config_table[1].target_table_entry.client_id = 0; 6148 config->config_table[1].target_table_entry.client_id = 0;
6046 config->config_table[1].target_table_entry.vlan_id = 0; 6149 config->config_table[1].target_table_entry.vlan_id = 0;
6047 6150
@@ -6050,12 +6153,12 @@ static void bnx2x_set_mac_addr_e1(struct bnx2x *bp)
6050 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0); 6153 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
6051} 6154}
6052 6155
6053static void bnx2x_set_mac_addr_e1h(struct bnx2x *bp) 6156static void bnx2x_set_mac_addr_e1h(struct bnx2x *bp, int set)
6054{ 6157{
6055 struct mac_configuration_cmd_e1h *config = 6158 struct mac_configuration_cmd_e1h *config =
6056 (struct mac_configuration_cmd_e1h *)bnx2x_sp(bp, mac_config); 6159 (struct mac_configuration_cmd_e1h *)bnx2x_sp(bp, mac_config);
6057 6160
6058 if (bp->state != BNX2X_STATE_OPEN) { 6161 if (set && (bp->state != BNX2X_STATE_OPEN)) {
6059 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state); 6162 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
6060 return; 6163 return;
6061 } 6164 }
@@ -6079,9 +6182,14 @@ static void bnx2x_set_mac_addr_e1h(struct bnx2x *bp)
6079 config->config_table[0].client_id = BP_L_ID(bp); 6182 config->config_table[0].client_id = BP_L_ID(bp);
6080 config->config_table[0].vlan_id = 0; 6183 config->config_table[0].vlan_id = 0;
6081 config->config_table[0].e1hov_id = cpu_to_le16(bp->e1hov); 6184 config->config_table[0].e1hov_id = cpu_to_le16(bp->e1hov);
6082 config->config_table[0].flags = BP_PORT(bp); 6185 if (set)
6186 config->config_table[0].flags = BP_PORT(bp);
6187 else
6188 config->config_table[0].flags =
6189 MAC_CONFIGURATION_ENTRY_E1H_ACTION_TYPE;
6083 6190
6084 DP(NETIF_MSG_IFUP, "setting MAC (%04x:%04x:%04x) E1HOV %d CLID %d\n", 6191 DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x) E1HOV %d CLID %d\n",
6192 (set ? "setting" : "clearing"),
6085 config->config_table[0].msb_mac_addr, 6193 config->config_table[0].msb_mac_addr,
6086 config->config_table[0].middle_mac_addr, 6194 config->config_table[0].middle_mac_addr,
6087 config->config_table[0].lsb_mac_addr, bp->e1hov, BP_L_ID(bp)); 6195 config->config_table[0].lsb_mac_addr, bp->e1hov, BP_L_ID(bp));
@@ -6106,13 +6214,13 @@ static int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
6106 bnx2x_rx_int(bp->fp, 10); 6214 bnx2x_rx_int(bp->fp, 10);
6107 /* if index is different from 0 6215 /* if index is different from 0
6108 * the reply for some commands will 6216 * the reply for some commands will
6109 * be on the none default queue 6217 * be on the non default queue
6110 */ 6218 */
6111 if (idx) 6219 if (idx)
6112 bnx2x_rx_int(&bp->fp[idx], 10); 6220 bnx2x_rx_int(&bp->fp[idx], 10);
6113 } 6221 }
6114 mb(); /* state is changed by bnx2x_sp_event() */
6115 6222
6223 mb(); /* state is changed by bnx2x_sp_event() */
6116 if (*state_p == state) 6224 if (*state_p == state)
6117 return 0; 6225 return 0;
6118 6226
@@ -6167,7 +6275,6 @@ static int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
6167{ 6275{
6168 u32 load_code; 6276 u32 load_code;
6169 int i, rc; 6277 int i, rc;
6170
6171#ifdef BNX2X_STOP_ON_ERROR 6278#ifdef BNX2X_STOP_ON_ERROR
6172 if (unlikely(bp->panic)) 6279 if (unlikely(bp->panic))
6173 return -EPERM; 6280 return -EPERM;
@@ -6183,22 +6290,24 @@ static int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
6183 if (!BP_NOMCP(bp)) { 6290 if (!BP_NOMCP(bp)) {
6184 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ); 6291 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ);
6185 if (!load_code) { 6292 if (!load_code) {
6186 BNX2X_ERR("MCP response failure, unloading\n"); 6293 BNX2X_ERR("MCP response failure, aborting\n");
6187 return -EBUSY; 6294 return -EBUSY;
6188 } 6295 }
6189 if (load_code == FW_MSG_CODE_DRV_LOAD_REFUSED) 6296 if (load_code == FW_MSG_CODE_DRV_LOAD_REFUSED)
6190 return -EBUSY; /* other port in diagnostic mode */ 6297 return -EBUSY; /* other port in diagnostic mode */
6191 6298
6192 } else { 6299 } else {
6300 int port = BP_PORT(bp);
6301
6193 DP(NETIF_MSG_IFUP, "NO MCP load counts before us %d, %d, %d\n", 6302 DP(NETIF_MSG_IFUP, "NO MCP load counts before us %d, %d, %d\n",
6194 load_count[0], load_count[1], load_count[2]); 6303 load_count[0], load_count[1], load_count[2]);
6195 load_count[0]++; 6304 load_count[0]++;
6196 load_count[1 + BP_PORT(bp)]++; 6305 load_count[1 + port]++;
6197 DP(NETIF_MSG_IFUP, "NO MCP new load counts %d, %d, %d\n", 6306 DP(NETIF_MSG_IFUP, "NO MCP new load counts %d, %d, %d\n",
6198 load_count[0], load_count[1], load_count[2]); 6307 load_count[0], load_count[1], load_count[2]);
6199 if (load_count[0] == 1) 6308 if (load_count[0] == 1)
6200 load_code = FW_MSG_CODE_DRV_LOAD_COMMON; 6309 load_code = FW_MSG_CODE_DRV_LOAD_COMMON;
6201 else if (load_count[1 + BP_PORT(bp)] == 1) 6310 else if (load_count[1 + port] == 1)
6202 load_code = FW_MSG_CODE_DRV_LOAD_PORT; 6311 load_code = FW_MSG_CODE_DRV_LOAD_PORT;
6203 else 6312 else
6204 load_code = FW_MSG_CODE_DRV_LOAD_FUNCTION; 6313 load_code = FW_MSG_CODE_DRV_LOAD_FUNCTION;
@@ -6247,9 +6356,6 @@ static int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
6247 bnx2x_fp(bp, i, disable_tpa) = 6356 bnx2x_fp(bp, i, disable_tpa) =
6248 ((bp->flags & TPA_ENABLE_FLAG) == 0); 6357 ((bp->flags & TPA_ENABLE_FLAG) == 0);
6249 6358
6250 /* Disable interrupt handling until HW is initialized */
6251 atomic_set(&bp->intr_sem, 1);
6252
6253 if (bp->flags & USING_MSIX_FLAG) { 6359 if (bp->flags & USING_MSIX_FLAG) {
6254 rc = bnx2x_req_msix_irqs(bp); 6360 rc = bnx2x_req_msix_irqs(bp);
6255 if (rc) { 6361 if (rc) {
@@ -6273,22 +6379,19 @@ static int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
6273 rc = bnx2x_init_hw(bp, load_code); 6379 rc = bnx2x_init_hw(bp, load_code);
6274 if (rc) { 6380 if (rc) {
6275 BNX2X_ERR("HW init failed, aborting\n"); 6381 BNX2X_ERR("HW init failed, aborting\n");
6276 goto load_error; 6382 goto load_int_disable;
6277 } 6383 }
6278 6384
6279 /* Enable interrupt handling */
6280 atomic_set(&bp->intr_sem, 0);
6281
6282 /* Setup NIC internals and enable interrupts */ 6385 /* Setup NIC internals and enable interrupts */
6283 bnx2x_nic_init(bp); 6386 bnx2x_nic_init(bp, load_code);
6284 6387
6285 /* Send LOAD_DONE command to MCP */ 6388 /* Send LOAD_DONE command to MCP */
6286 if (!BP_NOMCP(bp)) { 6389 if (!BP_NOMCP(bp)) {
6287 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE); 6390 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE);
6288 if (!load_code) { 6391 if (!load_code) {
6289 BNX2X_ERR("MCP response failure, unloading\n"); 6392 BNX2X_ERR("MCP response failure, aborting\n");
6290 rc = -EBUSY; 6393 rc = -EBUSY;
6291 goto load_int_disable; 6394 goto load_rings_free;
6292 } 6395 }
6293 } 6396 }
6294 6397
@@ -6298,15 +6401,15 @@ static int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
6298 6401
6299 /* Enable Rx interrupt handling before sending the ramrod 6402 /* Enable Rx interrupt handling before sending the ramrod
6300 as it's completed on Rx FP queue */ 6403 as it's completed on Rx FP queue */
6301 for_each_queue(bp, i) 6404 bnx2x_napi_enable(bp);
6302 napi_enable(&bnx2x_fp(bp, i, napi)); 6405
6406 /* Enable interrupt handling */
6407 atomic_set(&bp->intr_sem, 0);
6303 6408
6304 rc = bnx2x_setup_leading(bp); 6409 rc = bnx2x_setup_leading(bp);
6305 if (rc) { 6410 if (rc) {
6306#ifdef BNX2X_STOP_ON_ERROR 6411 BNX2X_ERR("Setup leading failed!\n");
6307 bp->panic = 1; 6412 goto load_netif_stop;
6308#endif
6309 goto load_stop_netif;
6310 } 6413 }
6311 6414
6312 if (CHIP_IS_E1H(bp)) 6415 if (CHIP_IS_E1H(bp))
@@ -6319,13 +6422,13 @@ static int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
6319 for_each_nondefault_queue(bp, i) { 6422 for_each_nondefault_queue(bp, i) {
6320 rc = bnx2x_setup_multi(bp, i); 6423 rc = bnx2x_setup_multi(bp, i);
6321 if (rc) 6424 if (rc)
6322 goto load_stop_netif; 6425 goto load_netif_stop;
6323 } 6426 }
6324 6427
6325 if (CHIP_IS_E1(bp)) 6428 if (CHIP_IS_E1(bp))
6326 bnx2x_set_mac_addr_e1(bp); 6429 bnx2x_set_mac_addr_e1(bp, 1);
6327 else 6430 else
6328 bnx2x_set_mac_addr_e1h(bp); 6431 bnx2x_set_mac_addr_e1h(bp, 1);
6329 6432
6330 if (bp->port.pmf) 6433 if (bp->port.pmf)
6331 bnx2x_initial_phy_init(bp); 6434 bnx2x_initial_phy_init(bp);
@@ -6339,7 +6442,6 @@ static int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
6339 break; 6442 break;
6340 6443
6341 case LOAD_OPEN: 6444 case LOAD_OPEN:
6342 /* IRQ is only requested from bnx2x_open */
6343 netif_start_queue(bp->dev); 6445 netif_start_queue(bp->dev);
6344 bnx2x_set_rx_mode(bp->dev); 6446 bnx2x_set_rx_mode(bp->dev);
6345 if (bp->flags & USING_MSIX_FLAG) 6447 if (bp->flags & USING_MSIX_FLAG)
@@ -6365,21 +6467,17 @@ static int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
6365 6467
6366 return 0; 6468 return 0;
6367 6469
6368load_stop_netif: 6470load_netif_stop:
6471 bnx2x_napi_disable(bp);
6472load_rings_free:
6473 /* Free SKBs, SGEs, TPA pool and driver internals */
6474 bnx2x_free_skbs(bp);
6369 for_each_queue(bp, i) 6475 for_each_queue(bp, i)
6370 napi_disable(&bnx2x_fp(bp, i, napi)); 6476 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
6371
6372load_int_disable: 6477load_int_disable:
6373 bnx2x_int_disable_sync(bp); 6478 bnx2x_int_disable_sync(bp);
6374
6375 /* Release IRQs */ 6479 /* Release IRQs */
6376 bnx2x_free_irq(bp); 6480 bnx2x_free_irq(bp);
6377
6378 /* Free SKBs, SGEs, TPA pool and driver internals */
6379 bnx2x_free_skbs(bp);
6380 for_each_queue(bp, i)
6381 bnx2x_free_rx_sge_range(bp, bp->fp + i,
6382 RX_SGE_CNT*NUM_RX_SGE_PAGES);
6383load_error: 6481load_error:
6384 bnx2x_free_mem(bp); 6482 bnx2x_free_mem(bp);
6385 6483
@@ -6394,7 +6492,7 @@ static int bnx2x_stop_multi(struct bnx2x *bp, int index)
6394 6492
6395 /* halt the connection */ 6493 /* halt the connection */
6396 bp->fp[index].state = BNX2X_FP_STATE_HALTING; 6494 bp->fp[index].state = BNX2X_FP_STATE_HALTING;
6397 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, index, 0, 0, 0); 6495 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, index, 0, index, 0);
6398 6496
6399 /* Wait for completion */ 6497 /* Wait for completion */
6400 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, index, 6498 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, index,
@@ -6411,7 +6509,7 @@ static int bnx2x_stop_multi(struct bnx2x *bp, int index)
6411 return rc; 6509 return rc;
6412} 6510}
6413 6511
6414static void bnx2x_stop_leading(struct bnx2x *bp) 6512static int bnx2x_stop_leading(struct bnx2x *bp)
6415{ 6513{
6416 u16 dsb_sp_prod_idx; 6514 u16 dsb_sp_prod_idx;
6417 /* if the other port is handling traffic, 6515 /* if the other port is handling traffic,
@@ -6429,7 +6527,7 @@ static void bnx2x_stop_leading(struct bnx2x *bp)
6429 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, 0, 6527 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, 0,
6430 &(bp->fp[0].state), 1); 6528 &(bp->fp[0].state), 1);
6431 if (rc) /* timeout */ 6529 if (rc) /* timeout */
6432 return; 6530 return rc;
6433 6531
6434 dsb_sp_prod_idx = *bp->dsb_sp_prod; 6532 dsb_sp_prod_idx = *bp->dsb_sp_prod;
6435 6533
@@ -6441,20 +6539,24 @@ static void bnx2x_stop_leading(struct bnx2x *bp)
6441 so there is not much to do if this times out 6539 so there is not much to do if this times out
6442 */ 6540 */
6443 while (dsb_sp_prod_idx == *bp->dsb_sp_prod) { 6541 while (dsb_sp_prod_idx == *bp->dsb_sp_prod) {
6444 msleep(1);
6445 if (!cnt) { 6542 if (!cnt) {
6446 DP(NETIF_MSG_IFDOWN, "timeout waiting for port del " 6543 DP(NETIF_MSG_IFDOWN, "timeout waiting for port del "
6447 "dsb_sp_prod 0x%x != dsb_sp_prod_idx 0x%x\n", 6544 "dsb_sp_prod 0x%x != dsb_sp_prod_idx 0x%x\n",
6448 *bp->dsb_sp_prod, dsb_sp_prod_idx); 6545 *bp->dsb_sp_prod, dsb_sp_prod_idx);
6449#ifdef BNX2X_STOP_ON_ERROR 6546#ifdef BNX2X_STOP_ON_ERROR
6450 bnx2x_panic(); 6547 bnx2x_panic();
6548#else
6549 rc = -EBUSY;
6451#endif 6550#endif
6452 break; 6551 break;
6453 } 6552 }
6454 cnt--; 6553 cnt--;
6554 msleep(1);
6455 } 6555 }
6456 bp->state = BNX2X_STATE_CLOSING_WAIT4_UNLOAD; 6556 bp->state = BNX2X_STATE_CLOSING_WAIT4_UNLOAD;
6457 bp->fp[0].state = BNX2X_FP_STATE_CLOSED; 6557 bp->fp[0].state = BNX2X_FP_STATE_CLOSED;
6558
6559 return rc;
6458} 6560}
6459 6561
6460static void bnx2x_reset_func(struct bnx2x *bp) 6562static void bnx2x_reset_func(struct bnx2x *bp)
@@ -6496,7 +6598,7 @@ static void bnx2x_reset_port(struct bnx2x *bp)
6496 val = REG_RD(bp, BRB1_REG_PORT_NUM_OCC_BLOCKS_0 + port*4); 6598 val = REG_RD(bp, BRB1_REG_PORT_NUM_OCC_BLOCKS_0 + port*4);
6497 if (val) 6599 if (val)
6498 DP(NETIF_MSG_IFDOWN, 6600 DP(NETIF_MSG_IFDOWN,
6499 "BRB1 is not empty %d blooks are occupied\n", val); 6601 "BRB1 is not empty %d blocks are occupied\n", val);
6500 6602
6501 /* TODO: Close Doorbell port? */ 6603 /* TODO: Close Doorbell port? */
6502} 6604}
@@ -6536,43 +6638,35 @@ static void bnx2x_reset_chip(struct bnx2x *bp, u32 reset_code)
6536 } 6638 }
6537} 6639}
6538 6640
6539/* msut be called with rtnl_lock */ 6641/* must be called with rtnl_lock */
6540static int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode) 6642static int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
6541{ 6643{
6644 int port = BP_PORT(bp);
6542 u32 reset_code = 0; 6645 u32 reset_code = 0;
6543 int i, cnt; 6646 int i, cnt, rc;
6544 6647
6545 bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT; 6648 bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
6546 6649
6547 bp->rx_mode = BNX2X_RX_MODE_NONE; 6650 bp->rx_mode = BNX2X_RX_MODE_NONE;
6548 bnx2x_set_storm_rx_mode(bp); 6651 bnx2x_set_storm_rx_mode(bp);
6549 6652
6550 if (netif_running(bp->dev)) { 6653 bnx2x_netif_stop(bp);
6551 netif_tx_disable(bp->dev); 6654 if (!netif_running(bp->dev))
6552 bp->dev->trans_start = jiffies; /* prevent tx timeout */ 6655 bnx2x_napi_disable(bp);
6553 }
6554
6555 del_timer_sync(&bp->timer); 6656 del_timer_sync(&bp->timer);
6556 SHMEM_WR(bp, func_mb[BP_FUNC(bp)].drv_pulse_mb, 6657 SHMEM_WR(bp, func_mb[BP_FUNC(bp)].drv_pulse_mb,
6557 (DRV_PULSE_ALWAYS_ALIVE | bp->fw_drv_pulse_wr_seq)); 6658 (DRV_PULSE_ALWAYS_ALIVE | bp->fw_drv_pulse_wr_seq));
6558 bnx2x_stats_handle(bp, STATS_EVENT_STOP); 6659 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
6559 6660
6560 /* Wait until all fast path tasks complete */ 6661 /* Wait until tx fast path tasks complete */
6561 for_each_queue(bp, i) { 6662 for_each_queue(bp, i) {
6562 struct bnx2x_fastpath *fp = &bp->fp[i]; 6663 struct bnx2x_fastpath *fp = &bp->fp[i];
6563 6664
6564#ifdef BNX2X_STOP_ON_ERROR
6565#ifdef __powerpc64__
6566 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n",
6567#else
6568 DP(NETIF_MSG_IFDOWN, "fp->tpa_queue_used = 0x%llx\n",
6569#endif
6570 fp->tpa_queue_used);
6571#endif
6572 cnt = 1000; 6665 cnt = 1000;
6573 smp_rmb(); 6666 smp_rmb();
6574 while (bnx2x_has_work(fp)) { 6667 while (BNX2X_HAS_TX_WORK(fp)) {
6575 msleep(1); 6668
6669 bnx2x_tx_int(fp, 1000);
6576 if (!cnt) { 6670 if (!cnt) {
6577 BNX2X_ERR("timeout waiting for queue[%d]\n", 6671 BNX2X_ERR("timeout waiting for queue[%d]\n",
6578 i); 6672 i);
@@ -6584,40 +6678,68 @@ static int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
6584#endif 6678#endif
6585 } 6679 }
6586 cnt--; 6680 cnt--;
6681 msleep(1);
6587 smp_rmb(); 6682 smp_rmb();
6588 } 6683 }
6589 } 6684 }
6590 6685 /* Give HW time to discard old tx messages */
6591 /* Wait until all slow path tasks complete */ 6686 msleep(1);
6592 cnt = 1000;
6593 while ((bp->spq_left != MAX_SPQ_PENDING) && cnt--)
6594 msleep(1);
6595
6596 for_each_queue(bp, i)
6597 napi_disable(&bnx2x_fp(bp, i, napi));
6598 /* Disable interrupts after Tx and Rx are disabled on stack level */
6599 bnx2x_int_disable_sync(bp);
6600 6687
6601 /* Release IRQs */ 6688 /* Release IRQs */
6602 bnx2x_free_irq(bp); 6689 bnx2x_free_irq(bp);
6603 6690
6604 if (bp->flags & NO_WOL_FLAG) 6691 if (CHIP_IS_E1(bp)) {
6692 struct mac_configuration_cmd *config =
6693 bnx2x_sp(bp, mcast_config);
6694
6695 bnx2x_set_mac_addr_e1(bp, 0);
6696
6697 for (i = 0; i < config->hdr.length_6b; i++)
6698 CAM_INVALIDATE(config->config_table[i]);
6699
6700 config->hdr.length_6b = i;
6701 if (CHIP_REV_IS_SLOW(bp))
6702 config->hdr.offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
6703 else
6704 config->hdr.offset = BNX2X_MAX_MULTICAST*(1 + port);
6705 config->hdr.client_id = BP_CL_ID(bp);
6706 config->hdr.reserved1 = 0;
6707
6708 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
6709 U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
6710 U64_LO(bnx2x_sp_mapping(bp, mcast_config)), 0);
6711
6712 } else { /* E1H */
6713 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
6714
6715 bnx2x_set_mac_addr_e1h(bp, 0);
6716
6717 for (i = 0; i < MC_HASH_SIZE; i++)
6718 REG_WR(bp, MC_HASH_OFFSET(bp, i), 0);
6719 }
6720
6721 if (unload_mode == UNLOAD_NORMAL)
6722 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
6723
6724 else if (bp->flags & NO_WOL_FLAG) {
6605 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP; 6725 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP;
6726 if (CHIP_IS_E1H(bp))
6727 REG_WR(bp, MISC_REG_E1HMF_MODE, 0);
6606 6728
6607 else if (bp->wol) { 6729 } else if (bp->wol) {
6608 u32 emac_base = BP_PORT(bp) ? GRCBASE_EMAC1 : GRCBASE_EMAC0; 6730 u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
6609 u8 *mac_addr = bp->dev->dev_addr; 6731 u8 *mac_addr = bp->dev->dev_addr;
6610 u32 val; 6732 u32 val;
6611
6612 /* The mac address is written to entries 1-4 to 6733 /* The mac address is written to entries 1-4 to
6613 preserve entry 0 which is used by the PMF */ 6734 preserve entry 0 which is used by the PMF */
6735 u8 entry = (BP_E1HVN(bp) + 1)*8;
6736
6614 val = (mac_addr[0] << 8) | mac_addr[1]; 6737 val = (mac_addr[0] << 8) | mac_addr[1];
6615 EMAC_WR(EMAC_REG_EMAC_MAC_MATCH + (BP_E1HVN(bp) + 1)*8, val); 6738 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry, val);
6616 6739
6617 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) | 6740 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
6618 (mac_addr[4] << 8) | mac_addr[5]; 6741 (mac_addr[4] << 8) | mac_addr[5];
6619 EMAC_WR(EMAC_REG_EMAC_MAC_MATCH + (BP_E1HVN(bp) + 1)*8 + 4, 6742 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry + 4, val);
6620 val);
6621 6743
6622 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN; 6744 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN;
6623 6745
@@ -6630,23 +6752,14 @@ static int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
6630 if (bnx2x_stop_multi(bp, i)) 6752 if (bnx2x_stop_multi(bp, i))
6631 goto unload_error; 6753 goto unload_error;
6632 6754
6633 if (CHIP_IS_E1H(bp)) 6755 rc = bnx2x_stop_leading(bp);
6634 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + BP_PORT(bp)*8, 0); 6756 if (rc) {
6635
6636 bnx2x_stop_leading(bp);
6637#ifdef BNX2X_STOP_ON_ERROR
6638 /* If ramrod completion timed out - break here! */
6639 if (bp->panic) {
6640 BNX2X_ERR("Stop leading failed!\n"); 6757 BNX2X_ERR("Stop leading failed!\n");
6758#ifdef BNX2X_STOP_ON_ERROR
6641 return -EBUSY; 6759 return -EBUSY;
6642 } 6760#else
6761 goto unload_error;
6643#endif 6762#endif
6644
6645 if ((bp->state != BNX2X_STATE_CLOSING_WAIT4_UNLOAD) ||
6646 (bp->fp[0].state != BNX2X_FP_STATE_CLOSED)) {
6647 DP(NETIF_MSG_IFDOWN, "failed to close leading properly! "
6648 "state 0x%x fp[0].state 0x%x\n",
6649 bp->state, bp->fp[0].state);
6650 } 6763 }
6651 6764
6652unload_error: 6765unload_error:
@@ -6656,12 +6769,12 @@ unload_error:
6656 DP(NETIF_MSG_IFDOWN, "NO MCP load counts %d, %d, %d\n", 6769 DP(NETIF_MSG_IFDOWN, "NO MCP load counts %d, %d, %d\n",
6657 load_count[0], load_count[1], load_count[2]); 6770 load_count[0], load_count[1], load_count[2]);
6658 load_count[0]--; 6771 load_count[0]--;
6659 load_count[1 + BP_PORT(bp)]--; 6772 load_count[1 + port]--;
6660 DP(NETIF_MSG_IFDOWN, "NO MCP new load counts %d, %d, %d\n", 6773 DP(NETIF_MSG_IFDOWN, "NO MCP new load counts %d, %d, %d\n",
6661 load_count[0], load_count[1], load_count[2]); 6774 load_count[0], load_count[1], load_count[2]);
6662 if (load_count[0] == 0) 6775 if (load_count[0] == 0)
6663 reset_code = FW_MSG_CODE_DRV_UNLOAD_COMMON; 6776 reset_code = FW_MSG_CODE_DRV_UNLOAD_COMMON;
6664 else if (load_count[1 + BP_PORT(bp)] == 0) 6777 else if (load_count[1 + port] == 0)
6665 reset_code = FW_MSG_CODE_DRV_UNLOAD_PORT; 6778 reset_code = FW_MSG_CODE_DRV_UNLOAD_PORT;
6666 else 6779 else
6667 reset_code = FW_MSG_CODE_DRV_UNLOAD_FUNCTION; 6780 reset_code = FW_MSG_CODE_DRV_UNLOAD_FUNCTION;
@@ -6681,8 +6794,7 @@ unload_error:
6681 /* Free SKBs, SGEs, TPA pool and driver internals */ 6794 /* Free SKBs, SGEs, TPA pool and driver internals */
6682 bnx2x_free_skbs(bp); 6795 bnx2x_free_skbs(bp);
6683 for_each_queue(bp, i) 6796 for_each_queue(bp, i)
6684 bnx2x_free_rx_sge_range(bp, bp->fp + i, 6797 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
6685 RX_SGE_CNT*NUM_RX_SGE_PAGES);
6686 bnx2x_free_mem(bp); 6798 bnx2x_free_mem(bp);
6687 6799
6688 bp->state = BNX2X_STATE_CLOSED; 6800 bp->state = BNX2X_STATE_CLOSED;
@@ -6733,49 +6845,88 @@ static void __devinit bnx2x_undi_unload(struct bnx2x *bp)
6733 /* Check if it is the UNDI driver 6845 /* Check if it is the UNDI driver
6734 * UNDI driver initializes CID offset for normal bell to 0x7 6846 * UNDI driver initializes CID offset for normal bell to 0x7
6735 */ 6847 */
6848 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
6736 val = REG_RD(bp, DORQ_REG_NORM_CID_OFST); 6849 val = REG_RD(bp, DORQ_REG_NORM_CID_OFST);
6850 if (val == 0x7)
6851 REG_WR(bp, DORQ_REG_NORM_CID_OFST, 0);
6852 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
6853
6737 if (val == 0x7) { 6854 if (val == 0x7) {
6738 u32 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS; 6855 u32 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
6739 /* save our func and fw_seq */ 6856 /* save our func */
6740 int func = BP_FUNC(bp); 6857 int func = BP_FUNC(bp);
6741 u16 fw_seq = bp->fw_seq; 6858 u32 swap_en;
6859 u32 swap_val;
6742 6860
6743 BNX2X_DEV_INFO("UNDI is active! reset device\n"); 6861 BNX2X_DEV_INFO("UNDI is active! reset device\n");
6744 6862
6745 /* try unload UNDI on port 0 */ 6863 /* try unload UNDI on port 0 */
6746 bp->func = 0; 6864 bp->func = 0;
6747 bp->fw_seq = (SHMEM_RD(bp, 6865 bp->fw_seq =
6748 func_mb[bp->func].drv_mb_header) & 6866 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
6749 DRV_MSG_SEQ_NUMBER_MASK); 6867 DRV_MSG_SEQ_NUMBER_MASK);
6750
6751 reset_code = bnx2x_fw_command(bp, reset_code); 6868 reset_code = bnx2x_fw_command(bp, reset_code);
6752 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
6753 6869
6754 /* if UNDI is loaded on the other port */ 6870 /* if UNDI is loaded on the other port */
6755 if (reset_code != FW_MSG_CODE_DRV_UNLOAD_COMMON) { 6871 if (reset_code != FW_MSG_CODE_DRV_UNLOAD_COMMON) {
6756 6872
6873 /* send "DONE" for previous unload */
6874 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
6875
6876 /* unload UNDI on port 1 */
6757 bp->func = 1; 6877 bp->func = 1;
6758 bp->fw_seq = (SHMEM_RD(bp, 6878 bp->fw_seq =
6759 func_mb[bp->func].drv_mb_header) & 6879 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
6760 DRV_MSG_SEQ_NUMBER_MASK); 6880 DRV_MSG_SEQ_NUMBER_MASK);
6761 6881 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
6762 bnx2x_fw_command(bp, 6882
6763 DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS); 6883 bnx2x_fw_command(bp, reset_code);
6764 bnx2x_fw_command(bp,
6765 DRV_MSG_CODE_UNLOAD_DONE);
6766
6767 /* restore our func and fw_seq */
6768 bp->func = func;
6769 bp->fw_seq = fw_seq;
6770 } 6884 }
6771 6885
6886 REG_WR(bp, (BP_PORT(bp) ? HC_REG_CONFIG_1 :
6887 HC_REG_CONFIG_0), 0x1000);
6888
6889 /* close input traffic and wait for it */
6890 /* Do not rcv packets to BRB */
6891 REG_WR(bp,
6892 (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_DRV_MASK :
6893 NIG_REG_LLH0_BRB1_DRV_MASK), 0x0);
6894 /* Do not direct rcv packets that are not for MCP to
6895 * the BRB */
6896 REG_WR(bp,
6897 (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_NOT_MCP :
6898 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
6899 /* clear AEU */
6900 REG_WR(bp,
6901 (BP_PORT(bp) ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
6902 MISC_REG_AEU_MASK_ATTN_FUNC_0), 0);
6903 msleep(10);
6904
6905 /* save NIG port swap info */
6906 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
6907 swap_en = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
6772 /* reset device */ 6908 /* reset device */
6773 REG_WR(bp, 6909 REG_WR(bp,
6774 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 6910 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
6775 0xd3ffff7f); 6911 0xd3ffffff);
6776 REG_WR(bp, 6912 REG_WR(bp,
6777 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, 6913 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
6778 0x1403); 6914 0x1403);
6915 /* take the NIG out of reset and restore swap values */
6916 REG_WR(bp,
6917 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
6918 MISC_REGISTERS_RESET_REG_1_RST_NIG);
6919 REG_WR(bp, NIG_REG_PORT_SWAP, swap_val);
6920 REG_WR(bp, NIG_REG_STRAP_OVERRIDE, swap_en);
6921
6922 /* send unload done to the MCP */
6923 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
6924
6925 /* restore our func and fw_seq */
6926 bp->func = func;
6927 bp->fw_seq =
6928 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
6929 DRV_MSG_SEQ_NUMBER_MASK);
6779 } 6930 }
6780 } 6931 }
6781} 6932}
@@ -6783,6 +6934,7 @@ static void __devinit bnx2x_undi_unload(struct bnx2x *bp)
6783static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp) 6934static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
6784{ 6935{
6785 u32 val, val2, val3, val4, id; 6936 u32 val, val2, val3, val4, id;
6937 u16 pmc;
6786 6938
6787 /* Get the chip revision id and number. */ 6939 /* Get the chip revision id and number. */
6788 /* chip num:16-31, rev:12-15, metal:4-11, bond_id:0-3 */ 6940 /* chip num:16-31, rev:12-15, metal:4-11, bond_id:0-3 */
@@ -6840,8 +6992,16 @@ static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
6840 BNX2X_ERR("This driver needs bc_ver %X but found %X," 6992 BNX2X_ERR("This driver needs bc_ver %X but found %X,"
6841 " please upgrade BC\n", BNX2X_BC_VER, val); 6993 " please upgrade BC\n", BNX2X_BC_VER, val);
6842 } 6994 }
6843 BNX2X_DEV_INFO("%sWoL Capable\n", 6995
6844 (bp->flags & NO_WOL_FLAG)? "Not " : ""); 6996 if (BP_E1HVN(bp) == 0) {
6997 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_PMC, &pmc);
6998 bp->flags |= (pmc & PCI_PM_CAP_PME_D3cold) ? 0 : NO_WOL_FLAG;
6999 } else {
7000 /* no WOL capability for E1HVN != 0 */
7001 bp->flags |= NO_WOL_FLAG;
7002 }
7003 BNX2X_DEV_INFO("%sWoL capable\n",
7004 (bp->flags & NO_WOL_FLAG) ? "Not " : "");
6845 7005
6846 val = SHMEM_RD(bp, dev_info.shared_hw_config.part_num); 7006 val = SHMEM_RD(bp, dev_info.shared_hw_config.part_num);
6847 val2 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[4]); 7007 val2 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[4]);
@@ -7274,9 +7434,8 @@ static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
7274 bp->mf_config = 7434 bp->mf_config =
7275 SHMEM_RD(bp, mf_cfg.func_mf_config[func].config); 7435 SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
7276 7436
7277 val = 7437 val = (SHMEM_RD(bp, mf_cfg.func_mf_config[func].e1hov_tag) &
7278 (SHMEM_RD(bp, mf_cfg.func_mf_config[func].e1hov_tag) & 7438 FUNC_MF_CFG_E1HOV_TAG_MASK);
7279 FUNC_MF_CFG_E1HOV_TAG_MASK);
7280 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) { 7439 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
7281 7440
7282 bp->e1hov = val; 7441 bp->e1hov = val;
@@ -7324,7 +7483,7 @@ static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
7324 7483
7325 if (BP_NOMCP(bp)) { 7484 if (BP_NOMCP(bp)) {
7326 /* only supposed to happen on emulation/FPGA */ 7485 /* only supposed to happen on emulation/FPGA */
7327 BNX2X_ERR("warning rendom MAC workaround active\n"); 7486 BNX2X_ERR("warning random MAC workaround active\n");
7328 random_ether_addr(bp->dev->dev_addr); 7487 random_ether_addr(bp->dev->dev_addr);
7329 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN); 7488 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
7330 } 7489 }
@@ -7337,8 +7496,8 @@ static int __devinit bnx2x_init_bp(struct bnx2x *bp)
7337 int func = BP_FUNC(bp); 7496 int func = BP_FUNC(bp);
7338 int rc; 7497 int rc;
7339 7498
7340 if (nomcp) 7499 /* Disable interrupt handling until HW is initialized */
7341 bp->flags |= NO_MCP_FLAG; 7500 atomic_set(&bp->intr_sem, 1);
7342 7501
7343 mutex_init(&bp->port.phy_mutex); 7502 mutex_init(&bp->port.phy_mutex);
7344 7503
@@ -7377,8 +7536,6 @@ static int __devinit bnx2x_init_bp(struct bnx2x *bp)
7377 bp->tx_ticks = 50; 7536 bp->tx_ticks = 50;
7378 bp->rx_ticks = 25; 7537 bp->rx_ticks = 25;
7379 7538
7380 bp->stats_ticks = 1000000 & 0xffff00;
7381
7382 bp->timer_interval = (CHIP_REV_IS_SLOW(bp) ? 5*HZ : HZ); 7539 bp->timer_interval = (CHIP_REV_IS_SLOW(bp) ? 5*HZ : HZ);
7383 bp->current_interval = (poll ? poll : bp->timer_interval); 7540 bp->current_interval = (poll ? poll : bp->timer_interval);
7384 7541
@@ -7628,25 +7785,25 @@ static void bnx2x_get_drvinfo(struct net_device *dev,
7628 struct ethtool_drvinfo *info) 7785 struct ethtool_drvinfo *info)
7629{ 7786{
7630 struct bnx2x *bp = netdev_priv(dev); 7787 struct bnx2x *bp = netdev_priv(dev);
7631 char phy_fw_ver[PHY_FW_VER_LEN]; 7788 u8 phy_fw_ver[PHY_FW_VER_LEN];
7632 7789
7633 strcpy(info->driver, DRV_MODULE_NAME); 7790 strcpy(info->driver, DRV_MODULE_NAME);
7634 strcpy(info->version, DRV_MODULE_VERSION); 7791 strcpy(info->version, DRV_MODULE_VERSION);
7635 7792
7636 phy_fw_ver[0] = '\0'; 7793 phy_fw_ver[0] = '\0';
7637 if (bp->port.pmf) { 7794 if (bp->port.pmf) {
7638 bnx2x_phy_hw_lock(bp); 7795 bnx2x_acquire_phy_lock(bp);
7639 bnx2x_get_ext_phy_fw_version(&bp->link_params, 7796 bnx2x_get_ext_phy_fw_version(&bp->link_params,
7640 (bp->state != BNX2X_STATE_CLOSED), 7797 (bp->state != BNX2X_STATE_CLOSED),
7641 phy_fw_ver, PHY_FW_VER_LEN); 7798 phy_fw_ver, PHY_FW_VER_LEN);
7642 bnx2x_phy_hw_unlock(bp); 7799 bnx2x_release_phy_lock(bp);
7643 } 7800 }
7644 7801
7645 snprintf(info->fw_version, 32, "%d.%d.%d:%d BC:%x%s%s", 7802 snprintf(info->fw_version, 32, "BC:%d.%d.%d%s%s",
7646 BCM_5710_FW_MAJOR_VERSION, BCM_5710_FW_MINOR_VERSION, 7803 (bp->common.bc_ver & 0xff0000) >> 16,
7647 BCM_5710_FW_REVISION_VERSION, 7804 (bp->common.bc_ver & 0xff00) >> 8,
7648 BCM_5710_FW_COMPILE_FLAGS, bp->common.bc_ver, 7805 (bp->common.bc_ver & 0xff),
7649 ((phy_fw_ver[0] != '\0')? " PHY:":""), phy_fw_ver); 7806 ((phy_fw_ver[0] != '\0') ? " PHY:" : ""), phy_fw_ver);
7650 strcpy(info->bus_info, pci_name(bp->pdev)); 7807 strcpy(info->bus_info, pci_name(bp->pdev));
7651 info->n_stats = BNX2X_NUM_STATS; 7808 info->n_stats = BNX2X_NUM_STATS;
7652 info->testinfo_len = BNX2X_NUM_TESTS; 7809 info->testinfo_len = BNX2X_NUM_TESTS;
@@ -8097,7 +8254,7 @@ static int bnx2x_set_eeprom(struct net_device *dev,
8097 if (eeprom->magic == 0x00504859) 8254 if (eeprom->magic == 0x00504859)
8098 if (bp->port.pmf) { 8255 if (bp->port.pmf) {
8099 8256
8100 bnx2x_phy_hw_lock(bp); 8257 bnx2x_acquire_phy_lock(bp);
8101 rc = bnx2x_flash_download(bp, BP_PORT(bp), 8258 rc = bnx2x_flash_download(bp, BP_PORT(bp),
8102 bp->link_params.ext_phy_config, 8259 bp->link_params.ext_phy_config,
8103 (bp->state != BNX2X_STATE_CLOSED), 8260 (bp->state != BNX2X_STATE_CLOSED),
@@ -8109,7 +8266,7 @@ static int bnx2x_set_eeprom(struct net_device *dev,
8109 rc |= bnx2x_phy_init(&bp->link_params, 8266 rc |= bnx2x_phy_init(&bp->link_params,
8110 &bp->link_vars); 8267 &bp->link_vars);
8111 } 8268 }
8112 bnx2x_phy_hw_unlock(bp); 8269 bnx2x_release_phy_lock(bp);
8113 8270
8114 } else /* Only the PMF can access the PHY */ 8271 } else /* Only the PMF can access the PHY */
8115 return -EINVAL; 8272 return -EINVAL;
@@ -8128,7 +8285,6 @@ static int bnx2x_get_coalesce(struct net_device *dev,
8128 8285
8129 coal->rx_coalesce_usecs = bp->rx_ticks; 8286 coal->rx_coalesce_usecs = bp->rx_ticks;
8130 coal->tx_coalesce_usecs = bp->tx_ticks; 8287 coal->tx_coalesce_usecs = bp->tx_ticks;
8131 coal->stats_block_coalesce_usecs = bp->stats_ticks;
8132 8288
8133 return 0; 8289 return 0;
8134} 8290}
@@ -8146,44 +8302,12 @@ static int bnx2x_set_coalesce(struct net_device *dev,
8146 if (bp->tx_ticks > 0x3000) 8302 if (bp->tx_ticks > 0x3000)
8147 bp->tx_ticks = 0x3000; 8303 bp->tx_ticks = 0x3000;
8148 8304
8149 bp->stats_ticks = coal->stats_block_coalesce_usecs;
8150 if (bp->stats_ticks > 0xffff00)
8151 bp->stats_ticks = 0xffff00;
8152 bp->stats_ticks &= 0xffff00;
8153
8154 if (netif_running(dev)) 8305 if (netif_running(dev))
8155 bnx2x_update_coalesce(bp); 8306 bnx2x_update_coalesce(bp);
8156 8307
8157 return 0; 8308 return 0;
8158} 8309}
8159 8310
8160static int bnx2x_set_flags(struct net_device *dev, u32 data)
8161{
8162 struct bnx2x *bp = netdev_priv(dev);
8163 int changed = 0;
8164 int rc = 0;
8165
8166 if (data & ETH_FLAG_LRO) {
8167 if (!(dev->features & NETIF_F_LRO)) {
8168 dev->features |= NETIF_F_LRO;
8169 bp->flags |= TPA_ENABLE_FLAG;
8170 changed = 1;
8171 }
8172
8173 } else if (dev->features & NETIF_F_LRO) {
8174 dev->features &= ~NETIF_F_LRO;
8175 bp->flags &= ~TPA_ENABLE_FLAG;
8176 changed = 1;
8177 }
8178
8179 if (changed && netif_running(dev)) {
8180 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
8181 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
8182 }
8183
8184 return rc;
8185}
8186
8187static void bnx2x_get_ringparam(struct net_device *dev, 8311static void bnx2x_get_ringparam(struct net_device *dev,
8188 struct ethtool_ringparam *ering) 8312 struct ethtool_ringparam *ering)
8189{ 8313{
@@ -8266,7 +8390,7 @@ static int bnx2x_set_pauseparam(struct net_device *dev,
8266 8390
8267 if (epause->autoneg) { 8391 if (epause->autoneg) {
8268 if (!(bp->port.supported & SUPPORTED_Autoneg)) { 8392 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
8269 DP(NETIF_MSG_LINK, "Autoneg not supported\n"); 8393 DP(NETIF_MSG_LINK, "autoneg not supported\n");
8270 return -EINVAL; 8394 return -EINVAL;
8271 } 8395 }
8272 8396
@@ -8285,6 +8409,34 @@ static int bnx2x_set_pauseparam(struct net_device *dev,
8285 return 0; 8409 return 0;
8286} 8410}
8287 8411
8412static int bnx2x_set_flags(struct net_device *dev, u32 data)
8413{
8414 struct bnx2x *bp = netdev_priv(dev);
8415 int changed = 0;
8416 int rc = 0;
8417
8418 /* TPA requires Rx CSUM offloading */
8419 if ((data & ETH_FLAG_LRO) && bp->rx_csum) {
8420 if (!(dev->features & NETIF_F_LRO)) {
8421 dev->features |= NETIF_F_LRO;
8422 bp->flags |= TPA_ENABLE_FLAG;
8423 changed = 1;
8424 }
8425
8426 } else if (dev->features & NETIF_F_LRO) {
8427 dev->features &= ~NETIF_F_LRO;
8428 bp->flags &= ~TPA_ENABLE_FLAG;
8429 changed = 1;
8430 }
8431
8432 if (changed && netif_running(dev)) {
8433 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
8434 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
8435 }
8436
8437 return rc;
8438}
8439
8288static u32 bnx2x_get_rx_csum(struct net_device *dev) 8440static u32 bnx2x_get_rx_csum(struct net_device *dev)
8289{ 8441{
8290 struct bnx2x *bp = netdev_priv(dev); 8442 struct bnx2x *bp = netdev_priv(dev);
@@ -8295,9 +8447,19 @@ static u32 bnx2x_get_rx_csum(struct net_device *dev)
8295static int bnx2x_set_rx_csum(struct net_device *dev, u32 data) 8447static int bnx2x_set_rx_csum(struct net_device *dev, u32 data)
8296{ 8448{
8297 struct bnx2x *bp = netdev_priv(dev); 8449 struct bnx2x *bp = netdev_priv(dev);
8450 int rc = 0;
8298 8451
8299 bp->rx_csum = data; 8452 bp->rx_csum = data;
8300 return 0; 8453
8454 /* Disable TPA, when Rx CSUM is disabled. Otherwise all
8455 TPA'ed packets will be discarded due to wrong TCP CSUM */
8456 if (!data) {
8457 u32 flags = ethtool_op_get_flags(dev);
8458
8459 rc = bnx2x_set_flags(dev, (flags & ~ETH_FLAG_LRO));
8460 }
8461
8462 return rc;
8301} 8463}
8302 8464
8303static int bnx2x_set_tso(struct net_device *dev, u32 data) 8465static int bnx2x_set_tso(struct net_device *dev, u32 data)
@@ -8335,6 +8497,7 @@ static int bnx2x_test_registers(struct bnx2x *bp)
8335{ 8497{
8336 int idx, i, rc = -ENODEV; 8498 int idx, i, rc = -ENODEV;
8337 u32 wr_val = 0; 8499 u32 wr_val = 0;
8500 int port = BP_PORT(bp);
8338 static const struct { 8501 static const struct {
8339 u32 offset0; 8502 u32 offset0;
8340 u32 offset1; 8503 u32 offset1;
@@ -8400,7 +8563,6 @@ static int bnx2x_test_registers(struct bnx2x *bp)
8400 8563
8401 for (i = 0; reg_tbl[i].offset0 != 0xffffffff; i++) { 8564 for (i = 0; reg_tbl[i].offset0 != 0xffffffff; i++) {
8402 u32 offset, mask, save_val, val; 8565 u32 offset, mask, save_val, val;
8403 int port = BP_PORT(bp);
8404 8566
8405 offset = reg_tbl[i].offset0 + port*reg_tbl[i].offset1; 8567 offset = reg_tbl[i].offset0 + port*reg_tbl[i].offset1;
8406 mask = reg_tbl[i].mask; 8568 mask = reg_tbl[i].mask;
@@ -8446,16 +8608,17 @@ static int bnx2x_test_memory(struct bnx2x *bp)
8446 static const struct { 8608 static const struct {
8447 char *name; 8609 char *name;
8448 u32 offset; 8610 u32 offset;
8449 u32 mask; 8611 u32 e1_mask;
8612 u32 e1h_mask;
8450 } prty_tbl[] = { 8613 } prty_tbl[] = {
8451 { "CCM_REG_CCM_PRTY_STS", CCM_REG_CCM_PRTY_STS, 0 }, 8614 { "CCM_PRTY_STS", CCM_REG_CCM_PRTY_STS, 0x3ffc0, 0 },
8452 { "CFC_REG_CFC_PRTY_STS", CFC_REG_CFC_PRTY_STS, 0 }, 8615 { "CFC_PRTY_STS", CFC_REG_CFC_PRTY_STS, 0x2, 0x2 },
8453 { "DMAE_REG_DMAE_PRTY_STS", DMAE_REG_DMAE_PRTY_STS, 0 }, 8616 { "DMAE_PRTY_STS", DMAE_REG_DMAE_PRTY_STS, 0, 0 },
8454 { "TCM_REG_TCM_PRTY_STS", TCM_REG_TCM_PRTY_STS, 0 }, 8617 { "TCM_PRTY_STS", TCM_REG_TCM_PRTY_STS, 0x3ffc0, 0 },
8455 { "UCM_REG_UCM_PRTY_STS", UCM_REG_UCM_PRTY_STS, 0 }, 8618 { "UCM_PRTY_STS", UCM_REG_UCM_PRTY_STS, 0x3ffc0, 0 },
8456 { "XCM_REG_XCM_PRTY_STS", XCM_REG_XCM_PRTY_STS, 0x1 }, 8619 { "XCM_PRTY_STS", XCM_REG_XCM_PRTY_STS, 0x3ffc1, 0 },
8457 8620
8458 { NULL, 0xffffffff, 0 } 8621 { NULL, 0xffffffff, 0, 0 }
8459 }; 8622 };
8460 8623
8461 if (!netif_running(bp->dev)) 8624 if (!netif_running(bp->dev))
@@ -8469,7 +8632,8 @@ static int bnx2x_test_memory(struct bnx2x *bp)
8469 /* Check the parity status */ 8632 /* Check the parity status */
8470 for (i = 0; prty_tbl[i].offset != 0xffffffff; i++) { 8633 for (i = 0; prty_tbl[i].offset != 0xffffffff; i++) {
8471 val = REG_RD(bp, prty_tbl[i].offset); 8634 val = REG_RD(bp, prty_tbl[i].offset);
8472 if (val & ~(prty_tbl[i].mask)) { 8635 if ((CHIP_IS_E1(bp) && (val & ~(prty_tbl[i].e1_mask))) ||
8636 (CHIP_IS_E1H(bp) && (val & ~(prty_tbl[i].e1h_mask)))) {
8473 DP(NETIF_MSG_HW, 8637 DP(NETIF_MSG_HW,
8474 "%s is 0x%x\n", prty_tbl[i].name, val); 8638 "%s is 0x%x\n", prty_tbl[i].name, val);
8475 goto test_mem_exit; 8639 goto test_mem_exit;
@@ -8482,34 +8646,6 @@ test_mem_exit:
8482 return rc; 8646 return rc;
8483} 8647}
8484 8648
8485static void bnx2x_netif_start(struct bnx2x *bp)
8486{
8487 int i;
8488
8489 if (atomic_dec_and_test(&bp->intr_sem)) {
8490 if (netif_running(bp->dev)) {
8491 bnx2x_int_enable(bp);
8492 for_each_queue(bp, i)
8493 napi_enable(&bnx2x_fp(bp, i, napi));
8494 if (bp->state == BNX2X_STATE_OPEN)
8495 netif_wake_queue(bp->dev);
8496 }
8497 }
8498}
8499
8500static void bnx2x_netif_stop(struct bnx2x *bp)
8501{
8502 int i;
8503
8504 if (netif_running(bp->dev)) {
8505 netif_tx_disable(bp->dev);
8506 bp->dev->trans_start = jiffies; /* prevent tx timeout */
8507 for_each_queue(bp, i)
8508 napi_disable(&bnx2x_fp(bp, i, napi));
8509 }
8510 bnx2x_int_disable_sync(bp);
8511}
8512
8513static void bnx2x_wait_for_link(struct bnx2x *bp, u8 link_up) 8649static void bnx2x_wait_for_link(struct bnx2x *bp, u8 link_up)
8514{ 8650{
8515 int cnt = 1000; 8651 int cnt = 1000;
@@ -8539,15 +8675,15 @@ static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode, u8 link_up)
8539 8675
8540 if (loopback_mode == BNX2X_MAC_LOOPBACK) { 8676 if (loopback_mode == BNX2X_MAC_LOOPBACK) {
8541 bp->link_params.loopback_mode = LOOPBACK_BMAC; 8677 bp->link_params.loopback_mode = LOOPBACK_BMAC;
8542 bnx2x_phy_hw_lock(bp); 8678 bnx2x_acquire_phy_lock(bp);
8543 bnx2x_phy_init(&bp->link_params, &bp->link_vars); 8679 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
8544 bnx2x_phy_hw_unlock(bp); 8680 bnx2x_release_phy_lock(bp);
8545 8681
8546 } else if (loopback_mode == BNX2X_PHY_LOOPBACK) { 8682 } else if (loopback_mode == BNX2X_PHY_LOOPBACK) {
8547 bp->link_params.loopback_mode = LOOPBACK_XGXS_10; 8683 bp->link_params.loopback_mode = LOOPBACK_XGXS_10;
8548 bnx2x_phy_hw_lock(bp); 8684 bnx2x_acquire_phy_lock(bp);
8549 bnx2x_phy_init(&bp->link_params, &bp->link_vars); 8685 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
8550 bnx2x_phy_hw_unlock(bp); 8686 bnx2x_release_phy_lock(bp);
8551 /* wait until link state is restored */ 8687 /* wait until link state is restored */
8552 bnx2x_wait_for_link(bp, link_up); 8688 bnx2x_wait_for_link(bp, link_up);
8553 8689
@@ -8771,7 +8907,7 @@ static void bnx2x_self_test(struct net_device *dev,
8771 if (!netif_running(dev)) 8907 if (!netif_running(dev))
8772 return; 8908 return;
8773 8909
8774 /* offline tests are not suppoerted in MF mode */ 8910 /* offline tests are not supported in MF mode */
8775 if (IS_E1HMF(bp)) 8911 if (IS_E1HMF(bp))
8776 etest->flags &= ~ETH_TEST_FL_OFFLINE; 8912 etest->flags &= ~ETH_TEST_FL_OFFLINE;
8777 8913
@@ -8827,76 +8963,99 @@ static const struct {
8827 long offset; 8963 long offset;
8828 int size; 8964 int size;
8829 u32 flags; 8965 u32 flags;
8830 char string[ETH_GSTRING_LEN]; 8966#define STATS_FLAGS_PORT 1
8967#define STATS_FLAGS_FUNC 2
8968 u8 string[ETH_GSTRING_LEN];
8831} bnx2x_stats_arr[BNX2X_NUM_STATS] = { 8969} bnx2x_stats_arr[BNX2X_NUM_STATS] = {
8832/* 1 */ { STATS_OFFSET32(valid_bytes_received_hi), 8, 1, "rx_bytes" }, 8970/* 1 */ { STATS_OFFSET32(valid_bytes_received_hi),
8833 { STATS_OFFSET32(error_bytes_received_hi), 8, 1, "rx_error_bytes" }, 8971 8, STATS_FLAGS_FUNC, "rx_bytes" },
8834 { STATS_OFFSET32(total_bytes_transmitted_hi), 8, 1, "tx_bytes" }, 8972 { STATS_OFFSET32(error_bytes_received_hi),
8835 { STATS_OFFSET32(tx_stat_ifhcoutbadoctets_hi), 8, 0, "tx_error_bytes" }, 8973 8, STATS_FLAGS_FUNC, "rx_error_bytes" },
8974 { STATS_OFFSET32(total_bytes_transmitted_hi),
8975 8, STATS_FLAGS_FUNC, "tx_bytes" },
8976 { STATS_OFFSET32(tx_stat_ifhcoutbadoctets_hi),
8977 8, STATS_FLAGS_PORT, "tx_error_bytes" },
8836 { STATS_OFFSET32(total_unicast_packets_received_hi), 8978 { STATS_OFFSET32(total_unicast_packets_received_hi),
8837 8, 1, "rx_ucast_packets" }, 8979 8, STATS_FLAGS_FUNC, "rx_ucast_packets" },
8838 { STATS_OFFSET32(total_multicast_packets_received_hi), 8980 { STATS_OFFSET32(total_multicast_packets_received_hi),
8839 8, 1, "rx_mcast_packets" }, 8981 8, STATS_FLAGS_FUNC, "rx_mcast_packets" },
8840 { STATS_OFFSET32(total_broadcast_packets_received_hi), 8982 { STATS_OFFSET32(total_broadcast_packets_received_hi),
8841 8, 1, "rx_bcast_packets" }, 8983 8, STATS_FLAGS_FUNC, "rx_bcast_packets" },
8842 { STATS_OFFSET32(total_unicast_packets_transmitted_hi), 8984 { STATS_OFFSET32(total_unicast_packets_transmitted_hi),
8843 8, 1, "tx_packets" }, 8985 8, STATS_FLAGS_FUNC, "tx_packets" },
8844 { STATS_OFFSET32(tx_stat_dot3statsinternalmactransmiterrors_hi), 8986 { STATS_OFFSET32(tx_stat_dot3statsinternalmactransmiterrors_hi),
8845 8, 0, "tx_mac_errors" }, 8987 8, STATS_FLAGS_PORT, "tx_mac_errors" },
8846/* 10 */{ STATS_OFFSET32(rx_stat_dot3statscarriersenseerrors_hi), 8988/* 10 */{ STATS_OFFSET32(rx_stat_dot3statscarriersenseerrors_hi),
8847 8, 0, "tx_carrier_errors" }, 8989 8, STATS_FLAGS_PORT, "tx_carrier_errors" },
8848 { STATS_OFFSET32(rx_stat_dot3statsfcserrors_hi), 8990 { STATS_OFFSET32(rx_stat_dot3statsfcserrors_hi),
8849 8, 0, "rx_crc_errors" }, 8991 8, STATS_FLAGS_PORT, "rx_crc_errors" },
8850 { STATS_OFFSET32(rx_stat_dot3statsalignmenterrors_hi), 8992 { STATS_OFFSET32(rx_stat_dot3statsalignmenterrors_hi),
8851 8, 0, "rx_align_errors" }, 8993 8, STATS_FLAGS_PORT, "rx_align_errors" },
8852 { STATS_OFFSET32(tx_stat_dot3statssinglecollisionframes_hi), 8994 { STATS_OFFSET32(tx_stat_dot3statssinglecollisionframes_hi),
8853 8, 0, "tx_single_collisions" }, 8995 8, STATS_FLAGS_PORT, "tx_single_collisions" },
8854 { STATS_OFFSET32(tx_stat_dot3statsmultiplecollisionframes_hi), 8996 { STATS_OFFSET32(tx_stat_dot3statsmultiplecollisionframes_hi),
8855 8, 0, "tx_multi_collisions" }, 8997 8, STATS_FLAGS_PORT, "tx_multi_collisions" },
8856 { STATS_OFFSET32(tx_stat_dot3statsdeferredtransmissions_hi), 8998 { STATS_OFFSET32(tx_stat_dot3statsdeferredtransmissions_hi),
8857 8, 0, "tx_deferred" }, 8999 8, STATS_FLAGS_PORT, "tx_deferred" },
8858 { STATS_OFFSET32(tx_stat_dot3statsexcessivecollisions_hi), 9000 { STATS_OFFSET32(tx_stat_dot3statsexcessivecollisions_hi),
8859 8, 0, "tx_excess_collisions" }, 9001 8, STATS_FLAGS_PORT, "tx_excess_collisions" },
8860 { STATS_OFFSET32(tx_stat_dot3statslatecollisions_hi), 9002 { STATS_OFFSET32(tx_stat_dot3statslatecollisions_hi),
8861 8, 0, "tx_late_collisions" }, 9003 8, STATS_FLAGS_PORT, "tx_late_collisions" },
8862 { STATS_OFFSET32(tx_stat_etherstatscollisions_hi), 9004 { STATS_OFFSET32(tx_stat_etherstatscollisions_hi),
8863 8, 0, "tx_total_collisions" }, 9005 8, STATS_FLAGS_PORT, "tx_total_collisions" },
8864 { STATS_OFFSET32(rx_stat_etherstatsfragments_hi), 9006 { STATS_OFFSET32(rx_stat_etherstatsfragments_hi),
8865 8, 0, "rx_fragments" }, 9007 8, STATS_FLAGS_PORT, "rx_fragments" },
8866/* 20 */{ STATS_OFFSET32(rx_stat_etherstatsjabbers_hi), 8, 0, "rx_jabbers" }, 9008/* 20 */{ STATS_OFFSET32(rx_stat_etherstatsjabbers_hi),
9009 8, STATS_FLAGS_PORT, "rx_jabbers" },
8867 { STATS_OFFSET32(rx_stat_etherstatsundersizepkts_hi), 9010 { STATS_OFFSET32(rx_stat_etherstatsundersizepkts_hi),
8868 8, 0, "rx_undersize_packets" }, 9011 8, STATS_FLAGS_PORT, "rx_undersize_packets" },
8869 { STATS_OFFSET32(jabber_packets_received), 9012 { STATS_OFFSET32(jabber_packets_received),
8870 4, 1, "rx_oversize_packets" }, 9013 4, STATS_FLAGS_FUNC, "rx_oversize_packets" },
8871 { STATS_OFFSET32(tx_stat_etherstatspkts64octets_hi), 9014 { STATS_OFFSET32(tx_stat_etherstatspkts64octets_hi),
8872 8, 0, "tx_64_byte_packets" }, 9015 8, STATS_FLAGS_PORT, "tx_64_byte_packets" },
8873 { STATS_OFFSET32(tx_stat_etherstatspkts65octetsto127octets_hi), 9016 { STATS_OFFSET32(tx_stat_etherstatspkts65octetsto127octets_hi),
8874 8, 0, "tx_65_to_127_byte_packets" }, 9017 8, STATS_FLAGS_PORT, "tx_65_to_127_byte_packets" },
8875 { STATS_OFFSET32(tx_stat_etherstatspkts128octetsto255octets_hi), 9018 { STATS_OFFSET32(tx_stat_etherstatspkts128octetsto255octets_hi),
8876 8, 0, "tx_128_to_255_byte_packets" }, 9019 8, STATS_FLAGS_PORT, "tx_128_to_255_byte_packets" },
8877 { STATS_OFFSET32(tx_stat_etherstatspkts256octetsto511octets_hi), 9020 { STATS_OFFSET32(tx_stat_etherstatspkts256octetsto511octets_hi),
8878 8, 0, "tx_256_to_511_byte_packets" }, 9021 8, STATS_FLAGS_PORT, "tx_256_to_511_byte_packets" },
8879 { STATS_OFFSET32(tx_stat_etherstatspkts512octetsto1023octets_hi), 9022 { STATS_OFFSET32(tx_stat_etherstatspkts512octetsto1023octets_hi),
8880 8, 0, "tx_512_to_1023_byte_packets" }, 9023 8, STATS_FLAGS_PORT, "tx_512_to_1023_byte_packets" },
8881 { STATS_OFFSET32(etherstatspkts1024octetsto1522octets_hi), 9024 { STATS_OFFSET32(etherstatspkts1024octetsto1522octets_hi),
8882 8, 0, "tx_1024_to_1522_byte_packets" }, 9025 8, STATS_FLAGS_PORT, "tx_1024_to_1522_byte_packets" },
8883 { STATS_OFFSET32(etherstatspktsover1522octets_hi), 9026 { STATS_OFFSET32(etherstatspktsover1522octets_hi),
8884 8, 0, "tx_1523_to_9022_byte_packets" }, 9027 8, STATS_FLAGS_PORT, "tx_1523_to_9022_byte_packets" },
8885/* 30 */{ STATS_OFFSET32(rx_stat_xonpauseframesreceived_hi), 9028/* 30 */{ STATS_OFFSET32(rx_stat_xonpauseframesreceived_hi),
8886 8, 0, "rx_xon_frames" }, 9029 8, STATS_FLAGS_PORT, "rx_xon_frames" },
8887 { STATS_OFFSET32(rx_stat_xoffpauseframesreceived_hi), 9030 { STATS_OFFSET32(rx_stat_xoffpauseframesreceived_hi),
8888 8, 0, "rx_xoff_frames" }, 9031 8, STATS_FLAGS_PORT, "rx_xoff_frames" },
8889 { STATS_OFFSET32(tx_stat_outxonsent_hi), 8, 0, "tx_xon_frames" }, 9032 { STATS_OFFSET32(tx_stat_outxonsent_hi),
8890 { STATS_OFFSET32(tx_stat_outxoffsent_hi), 8, 0, "tx_xoff_frames" }, 9033 8, STATS_FLAGS_PORT, "tx_xon_frames" },
9034 { STATS_OFFSET32(tx_stat_outxoffsent_hi),
9035 8, STATS_FLAGS_PORT, "tx_xoff_frames" },
8891 { STATS_OFFSET32(rx_stat_maccontrolframesreceived_hi), 9036 { STATS_OFFSET32(rx_stat_maccontrolframesreceived_hi),
8892 8, 0, "rx_mac_ctrl_frames" }, 9037 8, STATS_FLAGS_PORT, "rx_mac_ctrl_frames" },
8893 { STATS_OFFSET32(mac_filter_discard), 4, 1, "rx_filtered_packets" }, 9038 { STATS_OFFSET32(mac_filter_discard),
8894 { STATS_OFFSET32(no_buff_discard), 4, 1, "rx_discards" }, 9039 4, STATS_FLAGS_PORT, "rx_filtered_packets" },
8895 { STATS_OFFSET32(xxoverflow_discard), 4, 1, "rx_fw_discards" }, 9040 { STATS_OFFSET32(no_buff_discard),
8896 { STATS_OFFSET32(brb_drop_hi), 8, 1, "brb_discard" }, 9041 4, STATS_FLAGS_FUNC, "rx_discards" },
8897/* 39 */{ STATS_OFFSET32(brb_truncate_discard), 8, 1, "brb_truncate" } 9042 { STATS_OFFSET32(xxoverflow_discard),
9043 4, STATS_FLAGS_PORT, "rx_fw_discards" },
9044 { STATS_OFFSET32(brb_drop_hi),
9045 8, STATS_FLAGS_PORT, "brb_discard" },
9046 { STATS_OFFSET32(brb_truncate_hi),
9047 8, STATS_FLAGS_PORT, "brb_truncate" },
9048/* 40 */{ STATS_OFFSET32(rx_err_discard_pkt),
9049 4, STATS_FLAGS_FUNC, "rx_phy_ip_err_discards"},
9050 { STATS_OFFSET32(rx_skb_alloc_failed),
9051 4, STATS_FLAGS_FUNC, "rx_skb_alloc_discard" },
9052/* 42 */{ STATS_OFFSET32(hw_csum_err),
9053 4, STATS_FLAGS_FUNC, "rx_csum_offload_errors" }
8898}; 9054};
8899 9055
9056#define IS_NOT_E1HMF_STAT(bp, i) \
9057 (IS_E1HMF(bp) && (bnx2x_stats_arr[i].flags & STATS_FLAGS_PORT))
9058
8900static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf) 9059static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
8901{ 9060{
8902 struct bnx2x *bp = netdev_priv(dev); 9061 struct bnx2x *bp = netdev_priv(dev);
@@ -8905,7 +9064,7 @@ static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
8905 switch (stringset) { 9064 switch (stringset) {
8906 case ETH_SS_STATS: 9065 case ETH_SS_STATS:
8907 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) { 9066 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
8908 if (IS_E1HMF(bp) && (!bnx2x_stats_arr[i].flags)) 9067 if (IS_NOT_E1HMF_STAT(bp, i))
8909 continue; 9068 continue;
8910 strcpy(buf + j*ETH_GSTRING_LEN, 9069 strcpy(buf + j*ETH_GSTRING_LEN,
8911 bnx2x_stats_arr[i].string); 9070 bnx2x_stats_arr[i].string);
@@ -8925,7 +9084,7 @@ static int bnx2x_get_stats_count(struct net_device *dev)
8925 int i, num_stats = 0; 9084 int i, num_stats = 0;
8926 9085
8927 for (i = 0; i < BNX2X_NUM_STATS; i++) { 9086 for (i = 0; i < BNX2X_NUM_STATS; i++) {
8928 if (IS_E1HMF(bp) && (!bnx2x_stats_arr[i].flags)) 9087 if (IS_NOT_E1HMF_STAT(bp, i))
8929 continue; 9088 continue;
8930 num_stats++; 9089 num_stats++;
8931 } 9090 }
@@ -8940,7 +9099,7 @@ static void bnx2x_get_ethtool_stats(struct net_device *dev,
8940 int i, j; 9099 int i, j;
8941 9100
8942 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) { 9101 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
8943 if (IS_E1HMF(bp) && (!bnx2x_stats_arr[i].flags)) 9102 if (IS_NOT_E1HMF_STAT(bp, i))
8944 continue; 9103 continue;
8945 9104
8946 if (bnx2x_stats_arr[i].size == 0) { 9105 if (bnx2x_stats_arr[i].size == 0) {
@@ -9057,7 +9216,7 @@ static int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
9057 PCI_PM_CTRL_PME_STATUS)); 9216 PCI_PM_CTRL_PME_STATUS));
9058 9217
9059 if (pmcsr & PCI_PM_CTRL_STATE_MASK) 9218 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
9060 /* delay required during transition out of D3hot */ 9219 /* delay required during transition out of D3hot */
9061 msleep(20); 9220 msleep(20);
9062 break; 9221 break;
9063 9222
@@ -9092,6 +9251,7 @@ static int bnx2x_poll(struct napi_struct *napi, int budget)
9092 napi); 9251 napi);
9093 struct bnx2x *bp = fp->bp; 9252 struct bnx2x *bp = fp->bp;
9094 int work_done = 0; 9253 int work_done = 0;
9254 u16 rx_cons_sb;
9095 9255
9096#ifdef BNX2X_STOP_ON_ERROR 9256#ifdef BNX2X_STOP_ON_ERROR
9097 if (unlikely(bp->panic)) 9257 if (unlikely(bp->panic))
@@ -9104,17 +9264,22 @@ static int bnx2x_poll(struct napi_struct *napi, int budget)
9104 9264
9105 bnx2x_update_fpsb_idx(fp); 9265 bnx2x_update_fpsb_idx(fp);
9106 9266
9107 if ((fp->tx_pkt_prod != le16_to_cpu(*fp->tx_cons_sb)) || 9267 if (BNX2X_HAS_TX_WORK(fp))
9108 (fp->tx_pkt_prod != fp->tx_pkt_cons))
9109 bnx2x_tx_int(fp, budget); 9268 bnx2x_tx_int(fp, budget);
9110 9269
9111 if (le16_to_cpu(*fp->rx_cons_sb) != fp->rx_comp_cons) 9270 rx_cons_sb = le16_to_cpu(*fp->rx_cons_sb);
9271 if ((rx_cons_sb & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
9272 rx_cons_sb++;
9273 if (BNX2X_HAS_RX_WORK(fp))
9112 work_done = bnx2x_rx_int(fp, budget); 9274 work_done = bnx2x_rx_int(fp, budget);
9113 9275
9114 rmb(); /* bnx2x_has_work() reads the status block */ 9276 rmb(); /* BNX2X_HAS_WORK() reads the status block */
9277 rx_cons_sb = le16_to_cpu(*fp->rx_cons_sb);
9278 if ((rx_cons_sb & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
9279 rx_cons_sb++;
9115 9280
9116 /* must not complete if we consumed full budget */ 9281 /* must not complete if we consumed full budget */
9117 if ((work_done < budget) && !bnx2x_has_work(fp)) { 9282 if ((work_done < budget) && !BNX2X_HAS_WORK(fp)) {
9118 9283
9119#ifdef BNX2X_STOP_ON_ERROR 9284#ifdef BNX2X_STOP_ON_ERROR
9120poll_panic: 9285poll_panic:
@@ -9131,7 +9296,7 @@ poll_panic:
9131 9296
9132 9297
9133/* we split the first BD into headers and data BDs 9298/* we split the first BD into headers and data BDs
9134 * to ease the pain of our fellow micocode engineers 9299 * to ease the pain of our fellow microcode engineers
9135 * we use one mapping for both BDs 9300 * we use one mapping for both BDs
9136 * So far this has only been observed to happen 9301 * So far this has only been observed to happen
9137 * in Other Operating Systems(TM) 9302 * in Other Operating Systems(TM)
@@ -9238,7 +9403,7 @@ static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
9238 /* Check if LSO packet needs to be copied: 9403 /* Check if LSO packet needs to be copied:
9239 3 = 1 (for headers BD) + 2 (for PBD and last BD) */ 9404 3 = 1 (for headers BD) + 2 (for PBD and last BD) */
9240 int wnd_size = MAX_FETCH_BD - 3; 9405 int wnd_size = MAX_FETCH_BD - 3;
9241 /* Number of widnows to check */ 9406 /* Number of windows to check */
9242 int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size; 9407 int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size;
9243 int wnd_idx = 0; 9408 int wnd_idx = 0;
9244 int frag_idx = 0; 9409 int frag_idx = 0;
@@ -9327,8 +9492,7 @@ static int bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
9327 fp_index = (smp_processor_id() % bp->num_queues); 9492 fp_index = (smp_processor_id() % bp->num_queues);
9328 fp = &bp->fp[fp_index]; 9493 fp = &bp->fp[fp_index];
9329 9494
9330 if (unlikely(bnx2x_tx_avail(bp->fp) < 9495 if (unlikely(bnx2x_tx_avail(fp) < (skb_shinfo(skb)->nr_frags + 3))) {
9331 (skb_shinfo(skb)->nr_frags + 3))) {
9332 bp->eth_stats.driver_xoff++, 9496 bp->eth_stats.driver_xoff++,
9333 netif_stop_queue(dev); 9497 netif_stop_queue(dev);
9334 BNX2X_ERR("BUG! Tx ring full when queue awake!\n"); 9498 BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
@@ -9340,7 +9504,7 @@ static int bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
9340 skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr, 9504 skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
9341 ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type); 9505 ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type);
9342 9506
9343 /* First, check if we need to linearaize the skb 9507 /* First, check if we need to linearize the skb
9344 (due to FW restrictions) */ 9508 (due to FW restrictions) */
9345 if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) { 9509 if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) {
9346 /* Statistics of linearization */ 9510 /* Statistics of linearization */
@@ -9349,7 +9513,7 @@ static int bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
9349 DP(NETIF_MSG_TX_QUEUED, "SKB linearization failed - " 9513 DP(NETIF_MSG_TX_QUEUED, "SKB linearization failed - "
9350 "silently dropping this SKB\n"); 9514 "silently dropping this SKB\n");
9351 dev_kfree_skb_any(skb); 9515 dev_kfree_skb_any(skb);
9352 return 0; 9516 return NETDEV_TX_OK;
9353 } 9517 }
9354 } 9518 }
9355 9519
@@ -9372,7 +9536,8 @@ static int bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
9372 tx_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD; 9536 tx_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
9373 tx_bd->general_data = (UNICAST_ADDRESS << 9537 tx_bd->general_data = (UNICAST_ADDRESS <<
9374 ETH_TX_BD_ETH_ADDR_TYPE_SHIFT); 9538 ETH_TX_BD_ETH_ADDR_TYPE_SHIFT);
9375 tx_bd->general_data |= 1; /* header nbd */ 9539 /* header nbd */
9540 tx_bd->general_data |= (1 << ETH_TX_BD_HDR_NBDS_SHIFT);
9376 9541
9377 /* remember the first BD of the packet */ 9542 /* remember the first BD of the packet */
9378 tx_buf->first_bd = fp->tx_bd_prod; 9543 tx_buf->first_bd = fp->tx_bd_prod;
@@ -9390,7 +9555,6 @@ static int bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
9390 tx_bd->vlan = cpu_to_le16(pkt_prod); 9555 tx_bd->vlan = cpu_to_le16(pkt_prod);
9391 9556
9392 if (xmit_type) { 9557 if (xmit_type) {
9393
9394 /* turn on parsing and get a BD */ 9558 /* turn on parsing and get a BD */
9395 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod)); 9559 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
9396 pbd = (void *)&fp->tx_desc_ring[bd_prod]; 9560 pbd = (void *)&fp->tx_desc_ring[bd_prod];
@@ -9451,7 +9615,7 @@ static int bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
9451 9615
9452 tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping)); 9616 tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
9453 tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping)); 9617 tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
9454 nbd = skb_shinfo(skb)->nr_frags + ((pbd == NULL)? 1 : 2); 9618 nbd = skb_shinfo(skb)->nr_frags + ((pbd == NULL) ? 1 : 2);
9455 tx_bd->nbd = cpu_to_le16(nbd); 9619 tx_bd->nbd = cpu_to_le16(nbd);
9456 tx_bd->nbytes = cpu_to_le16(skb_headlen(skb)); 9620 tx_bd->nbytes = cpu_to_le16(skb_headlen(skb));
9457 9621
@@ -9721,9 +9885,9 @@ static int bnx2x_change_mac_addr(struct net_device *dev, void *p)
9721 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); 9885 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
9722 if (netif_running(dev)) { 9886 if (netif_running(dev)) {
9723 if (CHIP_IS_E1(bp)) 9887 if (CHIP_IS_E1(bp))
9724 bnx2x_set_mac_addr_e1(bp); 9888 bnx2x_set_mac_addr_e1(bp, 1);
9725 else 9889 else
9726 bnx2x_set_mac_addr_e1h(bp); 9890 bnx2x_set_mac_addr_e1h(bp, 1);
9727 } 9891 }
9728 9892
9729 return 0; 9893 return 0;
@@ -9734,6 +9898,7 @@ static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
9734{ 9898{
9735 struct mii_ioctl_data *data = if_mii(ifr); 9899 struct mii_ioctl_data *data = if_mii(ifr);
9736 struct bnx2x *bp = netdev_priv(dev); 9900 struct bnx2x *bp = netdev_priv(dev);
9901 int port = BP_PORT(bp);
9737 int err; 9902 int err;
9738 9903
9739 switch (cmd) { 9904 switch (cmd) {
@@ -9749,7 +9914,7 @@ static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
9749 return -EAGAIN; 9914 return -EAGAIN;
9750 9915
9751 mutex_lock(&bp->port.phy_mutex); 9916 mutex_lock(&bp->port.phy_mutex);
9752 err = bnx2x_cl45_read(bp, BP_PORT(bp), 0, bp->port.phy_addr, 9917 err = bnx2x_cl45_read(bp, port, 0, bp->port.phy_addr,
9753 DEFAULT_PHY_DEV_ADDR, 9918 DEFAULT_PHY_DEV_ADDR,
9754 (data->reg_num & 0x1f), &mii_regval); 9919 (data->reg_num & 0x1f), &mii_regval);
9755 data->val_out = mii_regval; 9920 data->val_out = mii_regval;
@@ -9765,7 +9930,7 @@ static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
9765 return -EAGAIN; 9930 return -EAGAIN;
9766 9931
9767 mutex_lock(&bp->port.phy_mutex); 9932 mutex_lock(&bp->port.phy_mutex);
9768 err = bnx2x_cl45_write(bp, BP_PORT(bp), 0, bp->port.phy_addr, 9933 err = bnx2x_cl45_write(bp, port, 0, bp->port.phy_addr,
9769 DEFAULT_PHY_DEV_ADDR, 9934 DEFAULT_PHY_DEV_ADDR,
9770 (data->reg_num & 0x1f), data->val_in); 9935 (data->reg_num & 0x1f), data->val_in);
9771 mutex_unlock(&bp->port.phy_mutex); 9936 mutex_unlock(&bp->port.phy_mutex);
@@ -10141,7 +10306,7 @@ static int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
10141 10306
10142 netif_device_detach(dev); 10307 netif_device_detach(dev);
10143 10308
10144 bnx2x_nic_unload(bp, UNLOAD_NORMAL); 10309 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
10145 10310
10146 bnx2x_set_power_state(bp, pci_choose_state(pdev, state)); 10311 bnx2x_set_power_state(bp, pci_choose_state(pdev, state));
10147 10312
@@ -10174,7 +10339,7 @@ static int bnx2x_resume(struct pci_dev *pdev)
10174 bnx2x_set_power_state(bp, PCI_D0); 10339 bnx2x_set_power_state(bp, PCI_D0);
10175 netif_device_attach(dev); 10340 netif_device_attach(dev);
10176 10341
10177 rc = bnx2x_nic_load(bp, LOAD_NORMAL); 10342 rc = bnx2x_nic_load(bp, LOAD_OPEN);
10178 10343
10179 rtnl_unlock(); 10344 rtnl_unlock();
10180 10345