aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/bnx2x_main.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/bnx2x_main.c')
-rw-r--r--drivers/net/bnx2x_main.c1878
1 files changed, 1510 insertions, 368 deletions
diff --git a/drivers/net/bnx2x_main.c b/drivers/net/bnx2x_main.c
index 6c042a72d6cc..57ff5b3bcce6 100644
--- a/drivers/net/bnx2x_main.c
+++ b/drivers/net/bnx2x_main.c
@@ -57,8 +57,8 @@
57#include "bnx2x_init_ops.h" 57#include "bnx2x_init_ops.h"
58#include "bnx2x_dump.h" 58#include "bnx2x_dump.h"
59 59
60#define DRV_MODULE_VERSION "1.52.1-7" 60#define DRV_MODULE_VERSION "1.52.53-1"
61#define DRV_MODULE_RELDATE "2010/02/28" 61#define DRV_MODULE_RELDATE "2010/18/04"
62#define BNX2X_BC_VER 0x040200 62#define BNX2X_BC_VER 0x040200
63 63
64#include <linux/firmware.h> 64#include <linux/firmware.h>
@@ -102,7 +102,8 @@ MODULE_PARM_DESC(disable_tpa, " Disable the TPA (LRO) feature");
102 102
103static int int_mode; 103static int int_mode;
104module_param(int_mode, int, 0); 104module_param(int_mode, int, 0);
105MODULE_PARM_DESC(int_mode, " Force interrupt mode (1 INT#x; 2 MSI)"); 105MODULE_PARM_DESC(int_mode, " Force interrupt mode other then MSI-X "
106 "(1 INT#x; 2 MSI)");
106 107
107static int dropless_fc; 108static int dropless_fc;
108module_param(dropless_fc, int, 0); 109module_param(dropless_fc, int, 0);
@@ -352,13 +353,14 @@ void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32)
352void bnx2x_write_dmae_phys_len(struct bnx2x *bp, dma_addr_t phys_addr, 353void bnx2x_write_dmae_phys_len(struct bnx2x *bp, dma_addr_t phys_addr,
353 u32 addr, u32 len) 354 u32 addr, u32 len)
354{ 355{
356 int dmae_wr_max = DMAE_LEN32_WR_MAX(bp);
355 int offset = 0; 357 int offset = 0;
356 358
357 while (len > DMAE_LEN32_WR_MAX) { 359 while (len > dmae_wr_max) {
358 bnx2x_write_dmae(bp, phys_addr + offset, 360 bnx2x_write_dmae(bp, phys_addr + offset,
359 addr + offset, DMAE_LEN32_WR_MAX); 361 addr + offset, dmae_wr_max);
360 offset += DMAE_LEN32_WR_MAX * 4; 362 offset += dmae_wr_max * 4;
361 len -= DMAE_LEN32_WR_MAX; 363 len -= dmae_wr_max;
362 } 364 }
363 365
364 bnx2x_write_dmae(bp, phys_addr + offset, addr + offset, len); 366 bnx2x_write_dmae(bp, phys_addr + offset, addr + offset, len);
@@ -508,26 +510,31 @@ static int bnx2x_mc_assert(struct bnx2x *bp)
508 510
509static void bnx2x_fw_dump(struct bnx2x *bp) 511static void bnx2x_fw_dump(struct bnx2x *bp)
510{ 512{
513 u32 addr;
511 u32 mark, offset; 514 u32 mark, offset;
512 __be32 data[9]; 515 __be32 data[9];
513 int word; 516 int word;
514 517
515 mark = REG_RD(bp, MCP_REG_MCPR_SCRATCH + 0xf104); 518 if (BP_NOMCP(bp)) {
516 mark = ((mark + 0x3) & ~0x3); 519 BNX2X_ERR("NO MCP - can not dump\n");
520 return;
521 }
522
523 addr = bp->common.shmem_base - 0x0800 + 4;
524 mark = REG_RD(bp, addr);
525 mark = MCP_REG_MCPR_SCRATCH + ((mark + 0x3) & ~0x3) - 0x08000000;
517 pr_err("begin fw dump (mark 0x%x)\n", mark); 526 pr_err("begin fw dump (mark 0x%x)\n", mark);
518 527
519 pr_err(""); 528 pr_err("");
520 for (offset = mark - 0x08000000; offset <= 0xF900; offset += 0x8*4) { 529 for (offset = mark; offset <= bp->common.shmem_base; offset += 0x8*4) {
521 for (word = 0; word < 8; word++) 530 for (word = 0; word < 8; word++)
522 data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH + 531 data[word] = htonl(REG_RD(bp, offset + 4*word));
523 offset + 4*word));
524 data[8] = 0x0; 532 data[8] = 0x0;
525 pr_cont("%s", (char *)data); 533 pr_cont("%s", (char *)data);
526 } 534 }
527 for (offset = 0xF108; offset <= mark - 0x08000000; offset += 0x8*4) { 535 for (offset = addr + 4; offset <= mark; offset += 0x8*4) {
528 for (word = 0; word < 8; word++) 536 for (word = 0; word < 8; word++)
529 data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH + 537 data[word] = htonl(REG_RD(bp, offset + 4*word));
530 offset + 4*word));
531 data[8] = 0x0; 538 data[8] = 0x0;
532 pr_cont("%s", (char *)data); 539 pr_cont("%s", (char *)data);
533 } 540 }
@@ -546,9 +553,9 @@ static void bnx2x_panic_dump(struct bnx2x *bp)
546 553
547 /* Indices */ 554 /* Indices */
548 /* Common */ 555 /* Common */
549 BNX2X_ERR("def_c_idx(%u) def_u_idx(%u) def_x_idx(%u)" 556 BNX2X_ERR("def_c_idx(0x%x) def_u_idx(0x%x) def_x_idx(0x%x)"
550 " def_t_idx(%u) def_att_idx(%u) attn_state(%u)" 557 " def_t_idx(0x%x) def_att_idx(0x%x) attn_state(0x%x)"
551 " spq_prod_idx(%u)\n", 558 " spq_prod_idx(0x%x)\n",
552 bp->def_c_idx, bp->def_u_idx, bp->def_x_idx, bp->def_t_idx, 559 bp->def_c_idx, bp->def_u_idx, bp->def_x_idx, bp->def_t_idx,
553 bp->def_att_idx, bp->attn_state, bp->spq_prod_idx); 560 bp->def_att_idx, bp->attn_state, bp->spq_prod_idx);
554 561
@@ -556,14 +563,14 @@ static void bnx2x_panic_dump(struct bnx2x *bp)
556 for_each_queue(bp, i) { 563 for_each_queue(bp, i) {
557 struct bnx2x_fastpath *fp = &bp->fp[i]; 564 struct bnx2x_fastpath *fp = &bp->fp[i];
558 565
559 BNX2X_ERR("fp%d: rx_bd_prod(%x) rx_bd_cons(%x)" 566 BNX2X_ERR("fp%d: rx_bd_prod(0x%x) rx_bd_cons(0x%x)"
560 " *rx_bd_cons_sb(%x) rx_comp_prod(%x)" 567 " *rx_bd_cons_sb(0x%x) rx_comp_prod(0x%x)"
561 " rx_comp_cons(%x) *rx_cons_sb(%x)\n", 568 " rx_comp_cons(0x%x) *rx_cons_sb(0x%x)\n",
562 i, fp->rx_bd_prod, fp->rx_bd_cons, 569 i, fp->rx_bd_prod, fp->rx_bd_cons,
563 le16_to_cpu(*fp->rx_bd_cons_sb), fp->rx_comp_prod, 570 le16_to_cpu(*fp->rx_bd_cons_sb), fp->rx_comp_prod,
564 fp->rx_comp_cons, le16_to_cpu(*fp->rx_cons_sb)); 571 fp->rx_comp_cons, le16_to_cpu(*fp->rx_cons_sb));
565 BNX2X_ERR(" rx_sge_prod(%x) last_max_sge(%x)" 572 BNX2X_ERR(" rx_sge_prod(0x%x) last_max_sge(0x%x)"
566 " fp_u_idx(%x) *sb_u_idx(%x)\n", 573 " fp_u_idx(0x%x) *sb_u_idx(0x%x)\n",
567 fp->rx_sge_prod, fp->last_max_sge, 574 fp->rx_sge_prod, fp->last_max_sge,
568 le16_to_cpu(fp->fp_u_idx), 575 le16_to_cpu(fp->fp_u_idx),
569 fp->status_blk->u_status_block.status_block_index); 576 fp->status_blk->u_status_block.status_block_index);
@@ -573,12 +580,13 @@ static void bnx2x_panic_dump(struct bnx2x *bp)
573 for_each_queue(bp, i) { 580 for_each_queue(bp, i) {
574 struct bnx2x_fastpath *fp = &bp->fp[i]; 581 struct bnx2x_fastpath *fp = &bp->fp[i];
575 582
576 BNX2X_ERR("fp%d: tx_pkt_prod(%x) tx_pkt_cons(%x)" 583 BNX2X_ERR("fp%d: tx_pkt_prod(0x%x) tx_pkt_cons(0x%x)"
577 " tx_bd_prod(%x) tx_bd_cons(%x) *tx_cons_sb(%x)\n", 584 " tx_bd_prod(0x%x) tx_bd_cons(0x%x)"
585 " *tx_cons_sb(0x%x)\n",
578 i, fp->tx_pkt_prod, fp->tx_pkt_cons, fp->tx_bd_prod, 586 i, fp->tx_pkt_prod, fp->tx_pkt_cons, fp->tx_bd_prod,
579 fp->tx_bd_cons, le16_to_cpu(*fp->tx_cons_sb)); 587 fp->tx_bd_cons, le16_to_cpu(*fp->tx_cons_sb));
580 BNX2X_ERR(" fp_c_idx(%x) *sb_c_idx(%x)" 588 BNX2X_ERR(" fp_c_idx(0x%x) *sb_c_idx(0x%x)"
581 " tx_db_prod(%x)\n", le16_to_cpu(fp->fp_c_idx), 589 " tx_db_prod(0x%x)\n", le16_to_cpu(fp->fp_c_idx),
582 fp->status_blk->c_status_block.status_block_index, 590 fp->status_blk->c_status_block.status_block_index,
583 fp->tx_db.data.prod); 591 fp->tx_db.data.prod);
584 } 592 }
@@ -764,6 +772,40 @@ static void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw)
764 * General service functions 772 * General service functions
765 */ 773 */
766 774
775/* Return true if succeeded to acquire the lock */
776static bool bnx2x_trylock_hw_lock(struct bnx2x *bp, u32 resource)
777{
778 u32 lock_status;
779 u32 resource_bit = (1 << resource);
780 int func = BP_FUNC(bp);
781 u32 hw_lock_control_reg;
782
783 DP(NETIF_MSG_HW, "Trying to take a lock on resource %d\n", resource);
784
785 /* Validating that the resource is within range */
786 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
787 DP(NETIF_MSG_HW,
788 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
789 resource, HW_LOCK_MAX_RESOURCE_VALUE);
790 return -EINVAL;
791 }
792
793 if (func <= 5)
794 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
795 else
796 hw_lock_control_reg =
797 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
798
799 /* Try to acquire the lock */
800 REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
801 lock_status = REG_RD(bp, hw_lock_control_reg);
802 if (lock_status & resource_bit)
803 return true;
804
805 DP(NETIF_MSG_HW, "Failed to get a lock on resource %d\n", resource);
806 return false;
807}
808
767static inline void bnx2x_ack_sb(struct bnx2x *bp, u8 sb_id, 809static inline void bnx2x_ack_sb(struct bnx2x *bp, u8 sb_id,
768 u8 storm, u16 index, u8 op, u8 update) 810 u8 storm, u16 index, u8 op, u8 update)
769{ 811{
@@ -842,7 +884,7 @@ static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fastpath *fp,
842 /* unmap first bd */ 884 /* unmap first bd */
843 DP(BNX2X_MSG_OFF, "free bd_idx %d\n", bd_idx); 885 DP(BNX2X_MSG_OFF, "free bd_idx %d\n", bd_idx);
844 tx_start_bd = &fp->tx_desc_ring[bd_idx].start_bd; 886 tx_start_bd = &fp->tx_desc_ring[bd_idx].start_bd;
845 pci_unmap_single(bp->pdev, BD_UNMAP_ADDR(tx_start_bd), 887 dma_unmap_single(&bp->pdev->dev, BD_UNMAP_ADDR(tx_start_bd),
846 BD_UNMAP_LEN(tx_start_bd), PCI_DMA_TODEVICE); 888 BD_UNMAP_LEN(tx_start_bd), PCI_DMA_TODEVICE);
847 889
848 nbd = le16_to_cpu(tx_start_bd->nbd) - 1; 890 nbd = le16_to_cpu(tx_start_bd->nbd) - 1;
@@ -872,8 +914,8 @@ static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fastpath *fp,
872 914
873 DP(BNX2X_MSG_OFF, "free frag bd_idx %d\n", bd_idx); 915 DP(BNX2X_MSG_OFF, "free frag bd_idx %d\n", bd_idx);
874 tx_data_bd = &fp->tx_desc_ring[bd_idx].reg_bd; 916 tx_data_bd = &fp->tx_desc_ring[bd_idx].reg_bd;
875 pci_unmap_page(bp->pdev, BD_UNMAP_ADDR(tx_data_bd), 917 dma_unmap_page(&bp->pdev->dev, BD_UNMAP_ADDR(tx_data_bd),
876 BD_UNMAP_LEN(tx_data_bd), PCI_DMA_TODEVICE); 918 BD_UNMAP_LEN(tx_data_bd), DMA_TO_DEVICE);
877 if (--nbd) 919 if (--nbd)
878 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx)); 920 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
879 } 921 }
@@ -1023,7 +1065,8 @@ static void bnx2x_sp_event(struct bnx2x_fastpath *fp,
1023 1065
1024 default: 1066 default:
1025 BNX2X_ERR("unexpected MC reply (%d) " 1067 BNX2X_ERR("unexpected MC reply (%d) "
1026 "fp->state is %x\n", command, fp->state); 1068 "fp[%d] state is %x\n",
1069 command, fp->index, fp->state);
1027 break; 1070 break;
1028 } 1071 }
1029 mb(); /* force bnx2x_wait_ramrod() to see the change */ 1072 mb(); /* force bnx2x_wait_ramrod() to see the change */
@@ -1086,7 +1129,7 @@ static inline void bnx2x_free_rx_sge(struct bnx2x *bp,
1086 if (!page) 1129 if (!page)
1087 return; 1130 return;
1088 1131
1089 pci_unmap_page(bp->pdev, pci_unmap_addr(sw_buf, mapping), 1132 dma_unmap_page(&bp->pdev->dev, dma_unmap_addr(sw_buf, mapping),
1090 SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE); 1133 SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
1091 __free_pages(page, PAGES_PER_SGE_SHIFT); 1134 __free_pages(page, PAGES_PER_SGE_SHIFT);
1092 1135
@@ -1115,15 +1158,15 @@ static inline int bnx2x_alloc_rx_sge(struct bnx2x *bp,
1115 if (unlikely(page == NULL)) 1158 if (unlikely(page == NULL))
1116 return -ENOMEM; 1159 return -ENOMEM;
1117 1160
1118 mapping = pci_map_page(bp->pdev, page, 0, SGE_PAGE_SIZE*PAGES_PER_SGE, 1161 mapping = dma_map_page(&bp->pdev->dev, page, 0,
1119 PCI_DMA_FROMDEVICE); 1162 SGE_PAGE_SIZE*PAGES_PER_SGE, DMA_FROM_DEVICE);
1120 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) { 1163 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
1121 __free_pages(page, PAGES_PER_SGE_SHIFT); 1164 __free_pages(page, PAGES_PER_SGE_SHIFT);
1122 return -ENOMEM; 1165 return -ENOMEM;
1123 } 1166 }
1124 1167
1125 sw_buf->page = page; 1168 sw_buf->page = page;
1126 pci_unmap_addr_set(sw_buf, mapping, mapping); 1169 dma_unmap_addr_set(sw_buf, mapping, mapping);
1127 1170
1128 sge->addr_hi = cpu_to_le32(U64_HI(mapping)); 1171 sge->addr_hi = cpu_to_le32(U64_HI(mapping));
1129 sge->addr_lo = cpu_to_le32(U64_LO(mapping)); 1172 sge->addr_lo = cpu_to_le32(U64_LO(mapping));
@@ -1143,15 +1186,15 @@ static inline int bnx2x_alloc_rx_skb(struct bnx2x *bp,
1143 if (unlikely(skb == NULL)) 1186 if (unlikely(skb == NULL))
1144 return -ENOMEM; 1187 return -ENOMEM;
1145 1188
1146 mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_size, 1189 mapping = dma_map_single(&bp->pdev->dev, skb->data, bp->rx_buf_size,
1147 PCI_DMA_FROMDEVICE); 1190 DMA_FROM_DEVICE);
1148 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) { 1191 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
1149 dev_kfree_skb(skb); 1192 dev_kfree_skb(skb);
1150 return -ENOMEM; 1193 return -ENOMEM;
1151 } 1194 }
1152 1195
1153 rx_buf->skb = skb; 1196 rx_buf->skb = skb;
1154 pci_unmap_addr_set(rx_buf, mapping, mapping); 1197 dma_unmap_addr_set(rx_buf, mapping, mapping);
1155 1198
1156 rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping)); 1199 rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1157 rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping)); 1200 rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
@@ -1173,13 +1216,13 @@ static void bnx2x_reuse_rx_skb(struct bnx2x_fastpath *fp,
1173 struct eth_rx_bd *cons_bd = &fp->rx_desc_ring[cons]; 1216 struct eth_rx_bd *cons_bd = &fp->rx_desc_ring[cons];
1174 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod]; 1217 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1175 1218
1176 pci_dma_sync_single_for_device(bp->pdev, 1219 dma_sync_single_for_device(&bp->pdev->dev,
1177 pci_unmap_addr(cons_rx_buf, mapping), 1220 dma_unmap_addr(cons_rx_buf, mapping),
1178 RX_COPY_THRESH, PCI_DMA_FROMDEVICE); 1221 RX_COPY_THRESH, DMA_FROM_DEVICE);
1179 1222
1180 prod_rx_buf->skb = cons_rx_buf->skb; 1223 prod_rx_buf->skb = cons_rx_buf->skb;
1181 pci_unmap_addr_set(prod_rx_buf, mapping, 1224 dma_unmap_addr_set(prod_rx_buf, mapping,
1182 pci_unmap_addr(cons_rx_buf, mapping)); 1225 dma_unmap_addr(cons_rx_buf, mapping));
1183 *prod_bd = *cons_bd; 1226 *prod_bd = *cons_bd;
1184} 1227}
1185 1228
@@ -1283,9 +1326,9 @@ static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
1283 1326
1284 /* move empty skb from pool to prod and map it */ 1327 /* move empty skb from pool to prod and map it */
1285 prod_rx_buf->skb = fp->tpa_pool[queue].skb; 1328 prod_rx_buf->skb = fp->tpa_pool[queue].skb;
1286 mapping = pci_map_single(bp->pdev, fp->tpa_pool[queue].skb->data, 1329 mapping = dma_map_single(&bp->pdev->dev, fp->tpa_pool[queue].skb->data,
1287 bp->rx_buf_size, PCI_DMA_FROMDEVICE); 1330 bp->rx_buf_size, DMA_FROM_DEVICE);
1288 pci_unmap_addr_set(prod_rx_buf, mapping, mapping); 1331 dma_unmap_addr_set(prod_rx_buf, mapping, mapping);
1289 1332
1290 /* move partial skb from cons to pool (don't unmap yet) */ 1333 /* move partial skb from cons to pool (don't unmap yet) */
1291 fp->tpa_pool[queue] = *cons_rx_buf; 1334 fp->tpa_pool[queue] = *cons_rx_buf;
@@ -1302,7 +1345,7 @@ static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
1302 1345
1303#ifdef BNX2X_STOP_ON_ERROR 1346#ifdef BNX2X_STOP_ON_ERROR
1304 fp->tpa_queue_used |= (1 << queue); 1347 fp->tpa_queue_used |= (1 << queue);
1305#ifdef __powerpc64__ 1348#ifdef _ASM_GENERIC_INT_L64_H
1306 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n", 1349 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n",
1307#else 1350#else
1308 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n", 1351 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
@@ -1331,8 +1374,7 @@ static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1331 max(frag_size, (u32)len_on_bd)); 1374 max(frag_size, (u32)len_on_bd));
1332 1375
1333#ifdef BNX2X_STOP_ON_ERROR 1376#ifdef BNX2X_STOP_ON_ERROR
1334 if (pages > 1377 if (pages > min_t(u32, 8, MAX_SKB_FRAGS)*SGE_PAGE_SIZE*PAGES_PER_SGE) {
1335 min((u32)8, (u32)MAX_SKB_FRAGS) * SGE_PAGE_SIZE * PAGES_PER_SGE) {
1336 BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n", 1378 BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
1337 pages, cqe_idx); 1379 pages, cqe_idx);
1338 BNX2X_ERR("fp_cqe->pkt_len = %d fp_cqe->len_on_bd = %d\n", 1380 BNX2X_ERR("fp_cqe->pkt_len = %d fp_cqe->len_on_bd = %d\n",
@@ -1361,8 +1403,9 @@ static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1361 } 1403 }
1362 1404
1363 /* Unmap the page as we r going to pass it to the stack */ 1405 /* Unmap the page as we r going to pass it to the stack */
1364 pci_unmap_page(bp->pdev, pci_unmap_addr(&old_rx_pg, mapping), 1406 dma_unmap_page(&bp->pdev->dev,
1365 SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE); 1407 dma_unmap_addr(&old_rx_pg, mapping),
1408 SGE_PAGE_SIZE*PAGES_PER_SGE, DMA_FROM_DEVICE);
1366 1409
1367 /* Add one frag and update the appropriate fields in the skb */ 1410 /* Add one frag and update the appropriate fields in the skb */
1368 skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len); 1411 skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
@@ -1389,8 +1432,8 @@ static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1389 /* Unmap skb in the pool anyway, as we are going to change 1432 /* Unmap skb in the pool anyway, as we are going to change
1390 pool entry status to BNX2X_TPA_STOP even if new skb allocation 1433 pool entry status to BNX2X_TPA_STOP even if new skb allocation
1391 fails. */ 1434 fails. */
1392 pci_unmap_single(bp->pdev, pci_unmap_addr(rx_buf, mapping), 1435 dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(rx_buf, mapping),
1393 bp->rx_buf_size, PCI_DMA_FROMDEVICE); 1436 bp->rx_buf_size, DMA_FROM_DEVICE);
1394 1437
1395 if (likely(new_skb)) { 1438 if (likely(new_skb)) {
1396 /* fix ip xsum and give it to the stack */ 1439 /* fix ip xsum and give it to the stack */
@@ -1441,12 +1484,12 @@ static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1441#ifdef BCM_VLAN 1484#ifdef BCM_VLAN
1442 if ((bp->vlgrp != NULL) && is_vlan_cqe && 1485 if ((bp->vlgrp != NULL) && is_vlan_cqe &&
1443 (!is_not_hwaccel_vlan_cqe)) 1486 (!is_not_hwaccel_vlan_cqe))
1444 vlan_hwaccel_receive_skb(skb, bp->vlgrp, 1487 vlan_gro_receive(&fp->napi, bp->vlgrp,
1445 le16_to_cpu(cqe->fast_path_cqe. 1488 le16_to_cpu(cqe->fast_path_cqe.
1446 vlan_tag)); 1489 vlan_tag), skb);
1447 else 1490 else
1448#endif 1491#endif
1449 netif_receive_skb(skb); 1492 napi_gro_receive(&fp->napi, skb);
1450 } else { 1493 } else {
1451 DP(NETIF_MSG_RX_STATUS, "Failed to allocate new pages" 1494 DP(NETIF_MSG_RX_STATUS, "Failed to allocate new pages"
1452 " - dropping packet!\n"); 1495 " - dropping packet!\n");
@@ -1539,7 +1582,7 @@ static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
1539 struct sw_rx_bd *rx_buf = NULL; 1582 struct sw_rx_bd *rx_buf = NULL;
1540 struct sk_buff *skb; 1583 struct sk_buff *skb;
1541 union eth_rx_cqe *cqe; 1584 union eth_rx_cqe *cqe;
1542 u8 cqe_fp_flags; 1585 u8 cqe_fp_flags, cqe_fp_status_flags;
1543 u16 len, pad; 1586 u16 len, pad;
1544 1587
1545 comp_ring_cons = RCQ_BD(sw_comp_cons); 1588 comp_ring_cons = RCQ_BD(sw_comp_cons);
@@ -1555,6 +1598,7 @@ static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
1555 1598
1556 cqe = &fp->rx_comp_ring[comp_ring_cons]; 1599 cqe = &fp->rx_comp_ring[comp_ring_cons];
1557 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags; 1600 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
1601 cqe_fp_status_flags = cqe->fast_path_cqe.status_flags;
1558 1602
1559 DP(NETIF_MSG_RX_STATUS, "CQE type %x err %x status %x" 1603 DP(NETIF_MSG_RX_STATUS, "CQE type %x err %x status %x"
1560 " queue %x vlan %x len %u\n", CQE_TYPE(cqe_fp_flags), 1604 " queue %x vlan %x len %u\n", CQE_TYPE(cqe_fp_flags),
@@ -1573,7 +1617,6 @@ static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
1573 rx_buf = &fp->rx_buf_ring[bd_cons]; 1617 rx_buf = &fp->rx_buf_ring[bd_cons];
1574 skb = rx_buf->skb; 1618 skb = rx_buf->skb;
1575 prefetch(skb); 1619 prefetch(skb);
1576 prefetch((u8 *)skb + 256);
1577 len = le16_to_cpu(cqe->fast_path_cqe.pkt_len); 1620 len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
1578 pad = cqe->fast_path_cqe.placement_offset; 1621 pad = cqe->fast_path_cqe.placement_offset;
1579 1622
@@ -1620,11 +1663,10 @@ static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
1620 } 1663 }
1621 } 1664 }
1622 1665
1623 pci_dma_sync_single_for_device(bp->pdev, 1666 dma_sync_single_for_device(&bp->pdev->dev,
1624 pci_unmap_addr(rx_buf, mapping), 1667 dma_unmap_addr(rx_buf, mapping),
1625 pad + RX_COPY_THRESH, 1668 pad + RX_COPY_THRESH,
1626 PCI_DMA_FROMDEVICE); 1669 DMA_FROM_DEVICE);
1627 prefetch(skb);
1628 prefetch(((char *)(skb)) + 128); 1670 prefetch(((char *)(skb)) + 128);
1629 1671
1630 /* is this an error packet? */ 1672 /* is this an error packet? */
@@ -1665,10 +1707,10 @@ static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
1665 1707
1666 } else 1708 } else
1667 if (likely(bnx2x_alloc_rx_skb(bp, fp, bd_prod) == 0)) { 1709 if (likely(bnx2x_alloc_rx_skb(bp, fp, bd_prod) == 0)) {
1668 pci_unmap_single(bp->pdev, 1710 dma_unmap_single(&bp->pdev->dev,
1669 pci_unmap_addr(rx_buf, mapping), 1711 dma_unmap_addr(rx_buf, mapping),
1670 bp->rx_buf_size, 1712 bp->rx_buf_size,
1671 PCI_DMA_FROMDEVICE); 1713 DMA_FROM_DEVICE);
1672 skb_reserve(skb, pad); 1714 skb_reserve(skb, pad);
1673 skb_put(skb, len); 1715 skb_put(skb, len);
1674 1716
@@ -1684,6 +1726,12 @@ reuse_rx:
1684 1726
1685 skb->protocol = eth_type_trans(skb, bp->dev); 1727 skb->protocol = eth_type_trans(skb, bp->dev);
1686 1728
1729 if ((bp->dev->features & NETIF_F_RXHASH) &&
1730 (cqe_fp_status_flags &
1731 ETH_FAST_PATH_RX_CQE_RSS_HASH_FLG))
1732 skb->rxhash = le32_to_cpu(
1733 cqe->fast_path_cqe.rss_hash_result);
1734
1687 skb->ip_summed = CHECKSUM_NONE; 1735 skb->ip_summed = CHECKSUM_NONE;
1688 if (bp->rx_csum) { 1736 if (bp->rx_csum) {
1689 if (likely(BNX2X_RX_CSUM_OK(cqe))) 1737 if (likely(BNX2X_RX_CSUM_OK(cqe)))
@@ -1699,11 +1747,11 @@ reuse_rx:
1699 if ((bp->vlgrp != NULL) && (bp->flags & HW_VLAN_RX_FLAG) && 1747 if ((bp->vlgrp != NULL) && (bp->flags & HW_VLAN_RX_FLAG) &&
1700 (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) & 1748 (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1701 PARSING_FLAGS_VLAN)) 1749 PARSING_FLAGS_VLAN))
1702 vlan_hwaccel_receive_skb(skb, bp->vlgrp, 1750 vlan_gro_receive(&fp->napi, bp->vlgrp,
1703 le16_to_cpu(cqe->fast_path_cqe.vlan_tag)); 1751 le16_to_cpu(cqe->fast_path_cqe.vlan_tag), skb);
1704 else 1752 else
1705#endif 1753#endif
1706 netif_receive_skb(skb); 1754 napi_gro_receive(&fp->napi, skb);
1707 1755
1708 1756
1709next_rx: 1757next_rx:
@@ -1831,8 +1879,8 @@ static irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
1831 return IRQ_HANDLED; 1879 return IRQ_HANDLED;
1832 } 1880 }
1833 1881
1834 if (status) 1882 if (unlikely(status))
1835 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status %u)\n", 1883 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status 0x%x)\n",
1836 status); 1884 status);
1837 1885
1838 return IRQ_HANDLED; 1886 return IRQ_HANDLED;
@@ -1900,6 +1948,8 @@ static int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource)
1900 int func = BP_FUNC(bp); 1948 int func = BP_FUNC(bp);
1901 u32 hw_lock_control_reg; 1949 u32 hw_lock_control_reg;
1902 1950
1951 DP(NETIF_MSG_HW, "Releasing a lock on resource %d\n", resource);
1952
1903 /* Validating that the resource is within range */ 1953 /* Validating that the resource is within range */
1904 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) { 1954 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1905 DP(NETIF_MSG_HW, 1955 DP(NETIF_MSG_HW,
@@ -2254,11 +2304,14 @@ static void bnx2x__link_reset(struct bnx2x *bp)
2254 2304
2255static u8 bnx2x_link_test(struct bnx2x *bp) 2305static u8 bnx2x_link_test(struct bnx2x *bp)
2256{ 2306{
2257 u8 rc; 2307 u8 rc = 0;
2258 2308
2259 bnx2x_acquire_phy_lock(bp); 2309 if (!BP_NOMCP(bp)) {
2260 rc = bnx2x_test_link(&bp->link_params, &bp->link_vars); 2310 bnx2x_acquire_phy_lock(bp);
2261 bnx2x_release_phy_lock(bp); 2311 rc = bnx2x_test_link(&bp->link_params, &bp->link_vars);
2312 bnx2x_release_phy_lock(bp);
2313 } else
2314 BNX2X_ERR("Bootcode is missing - can not test link\n");
2262 2315
2263 return rc; 2316 return rc;
2264} 2317}
@@ -2387,10 +2440,10 @@ static void bnx2x_init_vn_minmax(struct bnx2x *bp, int func)
2387 T_FAIR_COEF / (8 * vn_weight_sum) will always be greater 2440 T_FAIR_COEF / (8 * vn_weight_sum) will always be greater
2388 than zero */ 2441 than zero */
2389 m_fair_vn.vn_credit_delta = 2442 m_fair_vn.vn_credit_delta =
2390 max((u32)(vn_min_rate * (T_FAIR_COEF / 2443 max_t(u32, (vn_min_rate * (T_FAIR_COEF /
2391 (8 * bp->vn_weight_sum))), 2444 (8 * bp->vn_weight_sum))),
2392 (u32)(bp->cmng.fair_vars.fair_threshold * 2)); 2445 (bp->cmng.fair_vars.fair_threshold * 2));
2393 DP(NETIF_MSG_IFUP, "m_fair_vn.vn_credit_delta=%d\n", 2446 DP(NETIF_MSG_IFUP, "m_fair_vn.vn_credit_delta %d\n",
2394 m_fair_vn.vn_credit_delta); 2447 m_fair_vn.vn_credit_delta);
2395 } 2448 }
2396 2449
@@ -2410,6 +2463,7 @@ static void bnx2x_init_vn_minmax(struct bnx2x *bp, int func)
2410/* This function is called upon link interrupt */ 2463/* This function is called upon link interrupt */
2411static void bnx2x_link_attn(struct bnx2x *bp) 2464static void bnx2x_link_attn(struct bnx2x *bp)
2412{ 2465{
2466 u32 prev_link_status = bp->link_vars.link_status;
2413 /* Make sure that we are synced with the current statistics */ 2467 /* Make sure that we are synced with the current statistics */
2414 bnx2x_stats_handle(bp, STATS_EVENT_STOP); 2468 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2415 2469
@@ -2442,8 +2496,9 @@ static void bnx2x_link_attn(struct bnx2x *bp)
2442 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP); 2496 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2443 } 2497 }
2444 2498
2445 /* indicate link status */ 2499 /* indicate link status only if link status actually changed */
2446 bnx2x_link_report(bp); 2500 if (prev_link_status != bp->link_vars.link_status)
2501 bnx2x_link_report(bp);
2447 2502
2448 if (IS_E1HMF(bp)) { 2503 if (IS_E1HMF(bp)) {
2449 int port = BP_PORT(bp); 2504 int port = BP_PORT(bp);
@@ -2560,7 +2615,6 @@ u32 bnx2x_fw_command(struct bnx2x *bp, u32 command)
2560 return rc; 2615 return rc;
2561} 2616}
2562 2617
2563static void bnx2x_set_storm_rx_mode(struct bnx2x *bp);
2564static void bnx2x_set_eth_mac_addr_e1h(struct bnx2x *bp, int set); 2618static void bnx2x_set_eth_mac_addr_e1h(struct bnx2x *bp, int set);
2565static void bnx2x_set_rx_mode(struct net_device *dev); 2619static void bnx2x_set_rx_mode(struct net_device *dev);
2566 2620
@@ -2696,12 +2750,6 @@ static int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
2696{ 2750{
2697 struct eth_spe *spe; 2751 struct eth_spe *spe;
2698 2752
2699 DP(BNX2X_MSG_SP/*NETIF_MSG_TIMER*/,
2700 "SPQE (%x:%x) command %d hw_cid %x data (%x:%x) left %x\n",
2701 (u32)U64_HI(bp->spq_mapping), (u32)(U64_LO(bp->spq_mapping) +
2702 (void *)bp->spq_prod_bd - (void *)bp->spq), command,
2703 HW_CID(bp, cid), data_hi, data_lo, bp->spq_left);
2704
2705#ifdef BNX2X_STOP_ON_ERROR 2753#ifdef BNX2X_STOP_ON_ERROR
2706 if (unlikely(bp->panic)) 2754 if (unlikely(bp->panic))
2707 return -EIO; 2755 return -EIO;
@@ -2720,8 +2768,8 @@ static int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
2720 2768
2721 /* CID needs port number to be encoded int it */ 2769 /* CID needs port number to be encoded int it */
2722 spe->hdr.conn_and_cmd_data = 2770 spe->hdr.conn_and_cmd_data =
2723 cpu_to_le32(((command << SPE_HDR_CMD_ID_SHIFT) | 2771 cpu_to_le32((command << SPE_HDR_CMD_ID_SHIFT) |
2724 HW_CID(bp, cid))); 2772 HW_CID(bp, cid));
2725 spe->hdr.type = cpu_to_le16(ETH_CONNECTION_TYPE); 2773 spe->hdr.type = cpu_to_le16(ETH_CONNECTION_TYPE);
2726 if (common) 2774 if (common)
2727 spe->hdr.type |= 2775 spe->hdr.type |=
@@ -2732,6 +2780,13 @@ static int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
2732 2780
2733 bp->spq_left--; 2781 bp->spq_left--;
2734 2782
2783 DP(BNX2X_MSG_SP/*NETIF_MSG_TIMER*/,
2784 "SPQE[%x] (%x:%x) command %d hw_cid %x data (%x:%x) left %x\n",
2785 bp->spq_prod_idx, (u32)U64_HI(bp->spq_mapping),
2786 (u32)(U64_LO(bp->spq_mapping) +
2787 (void *)bp->spq_prod_bd - (void *)bp->spq), command,
2788 HW_CID(bp, cid), data_hi, data_lo, bp->spq_left);
2789
2735 bnx2x_sp_prod_update(bp); 2790 bnx2x_sp_prod_update(bp);
2736 spin_unlock_bh(&bp->spq_lock); 2791 spin_unlock_bh(&bp->spq_lock);
2737 return 0; 2792 return 0;
@@ -2740,12 +2795,11 @@ static int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
2740/* acquire split MCP access lock register */ 2795/* acquire split MCP access lock register */
2741static int bnx2x_acquire_alr(struct bnx2x *bp) 2796static int bnx2x_acquire_alr(struct bnx2x *bp)
2742{ 2797{
2743 u32 i, j, val; 2798 u32 j, val;
2744 int rc = 0; 2799 int rc = 0;
2745 2800
2746 might_sleep(); 2801 might_sleep();
2747 i = 100; 2802 for (j = 0; j < 1000; j++) {
2748 for (j = 0; j < i*10; j++) {
2749 val = (1UL << 31); 2803 val = (1UL << 31);
2750 REG_WR(bp, GRCBASE_MCP + 0x9c, val); 2804 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2751 val = REG_RD(bp, GRCBASE_MCP + 0x9c); 2805 val = REG_RD(bp, GRCBASE_MCP + 0x9c);
@@ -2765,9 +2819,7 @@ static int bnx2x_acquire_alr(struct bnx2x *bp)
2765/* release split MCP access lock register */ 2819/* release split MCP access lock register */
2766static void bnx2x_release_alr(struct bnx2x *bp) 2820static void bnx2x_release_alr(struct bnx2x *bp)
2767{ 2821{
2768 u32 val = 0; 2822 REG_WR(bp, GRCBASE_MCP + 0x9c, 0);
2769
2770 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2771} 2823}
2772 2824
2773static inline u16 bnx2x_update_dsb_idx(struct bnx2x *bp) 2825static inline u16 bnx2x_update_dsb_idx(struct bnx2x *bp)
@@ -2823,7 +2875,7 @@ static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted)
2823 2875
2824 DP(NETIF_MSG_HW, "aeu_mask %x newly asserted %x\n", 2876 DP(NETIF_MSG_HW, "aeu_mask %x newly asserted %x\n",
2825 aeu_mask, asserted); 2877 aeu_mask, asserted);
2826 aeu_mask &= ~(asserted & 0xff); 2878 aeu_mask &= ~(asserted & 0x3ff);
2827 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask); 2879 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
2828 2880
2829 REG_WR(bp, aeu_addr, aeu_mask); 2881 REG_WR(bp, aeu_addr, aeu_mask);
@@ -2910,8 +2962,9 @@ static inline void bnx2x_fan_failure(struct bnx2x *bp)
2910 bp->link_params.ext_phy_config); 2962 bp->link_params.ext_phy_config);
2911 2963
2912 /* log the failure */ 2964 /* log the failure */
2913 netdev_err(bp->dev, "Fan Failure on Network Controller has caused the driver to shutdown the card to prevent permanent damage.\n" 2965 netdev_err(bp->dev, "Fan Failure on Network Controller has caused"
2914 "Please contact Dell Support for assistance.\n"); 2966 " the driver to shutdown the card to prevent permanent"
2967 " damage. Please contact OEM Support for assistance\n");
2915} 2968}
2916 2969
2917static inline void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn) 2970static inline void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
@@ -3104,10 +3157,311 @@ static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
3104 } 3157 }
3105} 3158}
3106 3159
3107static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted) 3160static int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode);
3161static int bnx2x_nic_load(struct bnx2x *bp, int load_mode);
3162
3163
3164#define BNX2X_MISC_GEN_REG MISC_REG_GENERIC_POR_1
3165#define LOAD_COUNTER_BITS 16 /* Number of bits for load counter */
3166#define LOAD_COUNTER_MASK (((u32)0x1 << LOAD_COUNTER_BITS) - 1)
3167#define RESET_DONE_FLAG_MASK (~LOAD_COUNTER_MASK)
3168#define RESET_DONE_FLAG_SHIFT LOAD_COUNTER_BITS
3169#define CHIP_PARITY_SUPPORTED(bp) (CHIP_IS_E1(bp) || CHIP_IS_E1H(bp))
3170/*
3171 * should be run under rtnl lock
3172 */
3173static inline void bnx2x_set_reset_done(struct bnx2x *bp)
3174{
3175 u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG);
3176 val &= ~(1 << RESET_DONE_FLAG_SHIFT);
3177 REG_WR(bp, BNX2X_MISC_GEN_REG, val);
3178 barrier();
3179 mmiowb();
3180}
3181
3182/*
3183 * should be run under rtnl lock
3184 */
3185static inline void bnx2x_set_reset_in_progress(struct bnx2x *bp)
3186{
3187 u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG);
3188 val |= (1 << 16);
3189 REG_WR(bp, BNX2X_MISC_GEN_REG, val);
3190 barrier();
3191 mmiowb();
3192}
3193
3194/*
3195 * should be run under rtnl lock
3196 */
3197static inline bool bnx2x_reset_is_done(struct bnx2x *bp)
3198{
3199 u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG);
3200 DP(NETIF_MSG_HW, "GEN_REG_VAL=0x%08x\n", val);
3201 return (val & RESET_DONE_FLAG_MASK) ? false : true;
3202}
3203
3204/*
3205 * should be run under rtnl lock
3206 */
3207static inline void bnx2x_inc_load_cnt(struct bnx2x *bp)
3208{
3209 u32 val1, val = REG_RD(bp, BNX2X_MISC_GEN_REG);
3210
3211 DP(NETIF_MSG_HW, "Old GEN_REG_VAL=0x%08x\n", val);
3212
3213 val1 = ((val & LOAD_COUNTER_MASK) + 1) & LOAD_COUNTER_MASK;
3214 REG_WR(bp, BNX2X_MISC_GEN_REG, (val & RESET_DONE_FLAG_MASK) | val1);
3215 barrier();
3216 mmiowb();
3217}
3218
3219/*
3220 * should be run under rtnl lock
3221 */
3222static inline u32 bnx2x_dec_load_cnt(struct bnx2x *bp)
3223{
3224 u32 val1, val = REG_RD(bp, BNX2X_MISC_GEN_REG);
3225
3226 DP(NETIF_MSG_HW, "Old GEN_REG_VAL=0x%08x\n", val);
3227
3228 val1 = ((val & LOAD_COUNTER_MASK) - 1) & LOAD_COUNTER_MASK;
3229 REG_WR(bp, BNX2X_MISC_GEN_REG, (val & RESET_DONE_FLAG_MASK) | val1);
3230 barrier();
3231 mmiowb();
3232
3233 return val1;
3234}
3235
3236/*
3237 * should be run under rtnl lock
3238 */
3239static inline u32 bnx2x_get_load_cnt(struct bnx2x *bp)
3240{
3241 return REG_RD(bp, BNX2X_MISC_GEN_REG) & LOAD_COUNTER_MASK;
3242}
3243
3244static inline void bnx2x_clear_load_cnt(struct bnx2x *bp)
3245{
3246 u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG);
3247 REG_WR(bp, BNX2X_MISC_GEN_REG, val & (~LOAD_COUNTER_MASK));
3248}
3249
3250static inline void _print_next_block(int idx, const char *blk)
3251{
3252 if (idx)
3253 pr_cont(", ");
3254 pr_cont("%s", blk);
3255}
3256
3257static inline int bnx2x_print_blocks_with_parity0(u32 sig, int par_num)
3258{
3259 int i = 0;
3260 u32 cur_bit = 0;
3261 for (i = 0; sig; i++) {
3262 cur_bit = ((u32)0x1 << i);
3263 if (sig & cur_bit) {
3264 switch (cur_bit) {
3265 case AEU_INPUTS_ATTN_BITS_BRB_PARITY_ERROR:
3266 _print_next_block(par_num++, "BRB");
3267 break;
3268 case AEU_INPUTS_ATTN_BITS_PARSER_PARITY_ERROR:
3269 _print_next_block(par_num++, "PARSER");
3270 break;
3271 case AEU_INPUTS_ATTN_BITS_TSDM_PARITY_ERROR:
3272 _print_next_block(par_num++, "TSDM");
3273 break;
3274 case AEU_INPUTS_ATTN_BITS_SEARCHER_PARITY_ERROR:
3275 _print_next_block(par_num++, "SEARCHER");
3276 break;
3277 case AEU_INPUTS_ATTN_BITS_TSEMI_PARITY_ERROR:
3278 _print_next_block(par_num++, "TSEMI");
3279 break;
3280 }
3281
3282 /* Clear the bit */
3283 sig &= ~cur_bit;
3284 }
3285 }
3286
3287 return par_num;
3288}
3289
3290static inline int bnx2x_print_blocks_with_parity1(u32 sig, int par_num)
3291{
3292 int i = 0;
3293 u32 cur_bit = 0;
3294 for (i = 0; sig; i++) {
3295 cur_bit = ((u32)0x1 << i);
3296 if (sig & cur_bit) {
3297 switch (cur_bit) {
3298 case AEU_INPUTS_ATTN_BITS_PBCLIENT_PARITY_ERROR:
3299 _print_next_block(par_num++, "PBCLIENT");
3300 break;
3301 case AEU_INPUTS_ATTN_BITS_QM_PARITY_ERROR:
3302 _print_next_block(par_num++, "QM");
3303 break;
3304 case AEU_INPUTS_ATTN_BITS_XSDM_PARITY_ERROR:
3305 _print_next_block(par_num++, "XSDM");
3306 break;
3307 case AEU_INPUTS_ATTN_BITS_XSEMI_PARITY_ERROR:
3308 _print_next_block(par_num++, "XSEMI");
3309 break;
3310 case AEU_INPUTS_ATTN_BITS_DOORBELLQ_PARITY_ERROR:
3311 _print_next_block(par_num++, "DOORBELLQ");
3312 break;
3313 case AEU_INPUTS_ATTN_BITS_VAUX_PCI_CORE_PARITY_ERROR:
3314 _print_next_block(par_num++, "VAUX PCI CORE");
3315 break;
3316 case AEU_INPUTS_ATTN_BITS_DEBUG_PARITY_ERROR:
3317 _print_next_block(par_num++, "DEBUG");
3318 break;
3319 case AEU_INPUTS_ATTN_BITS_USDM_PARITY_ERROR:
3320 _print_next_block(par_num++, "USDM");
3321 break;
3322 case AEU_INPUTS_ATTN_BITS_USEMI_PARITY_ERROR:
3323 _print_next_block(par_num++, "USEMI");
3324 break;
3325 case AEU_INPUTS_ATTN_BITS_UPB_PARITY_ERROR:
3326 _print_next_block(par_num++, "UPB");
3327 break;
3328 case AEU_INPUTS_ATTN_BITS_CSDM_PARITY_ERROR:
3329 _print_next_block(par_num++, "CSDM");
3330 break;
3331 }
3332
3333 /* Clear the bit */
3334 sig &= ~cur_bit;
3335 }
3336 }
3337
3338 return par_num;
3339}
3340
3341static inline int bnx2x_print_blocks_with_parity2(u32 sig, int par_num)
3342{
3343 int i = 0;
3344 u32 cur_bit = 0;
3345 for (i = 0; sig; i++) {
3346 cur_bit = ((u32)0x1 << i);
3347 if (sig & cur_bit) {
3348 switch (cur_bit) {
3349 case AEU_INPUTS_ATTN_BITS_CSEMI_PARITY_ERROR:
3350 _print_next_block(par_num++, "CSEMI");
3351 break;
3352 case AEU_INPUTS_ATTN_BITS_PXP_PARITY_ERROR:
3353 _print_next_block(par_num++, "PXP");
3354 break;
3355 case AEU_IN_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR:
3356 _print_next_block(par_num++,
3357 "PXPPCICLOCKCLIENT");
3358 break;
3359 case AEU_INPUTS_ATTN_BITS_CFC_PARITY_ERROR:
3360 _print_next_block(par_num++, "CFC");
3361 break;
3362 case AEU_INPUTS_ATTN_BITS_CDU_PARITY_ERROR:
3363 _print_next_block(par_num++, "CDU");
3364 break;
3365 case AEU_INPUTS_ATTN_BITS_IGU_PARITY_ERROR:
3366 _print_next_block(par_num++, "IGU");
3367 break;
3368 case AEU_INPUTS_ATTN_BITS_MISC_PARITY_ERROR:
3369 _print_next_block(par_num++, "MISC");
3370 break;
3371 }
3372
3373 /* Clear the bit */
3374 sig &= ~cur_bit;
3375 }
3376 }
3377
3378 return par_num;
3379}
3380
3381static inline int bnx2x_print_blocks_with_parity3(u32 sig, int par_num)
3382{
3383 int i = 0;
3384 u32 cur_bit = 0;
3385 for (i = 0; sig; i++) {
3386 cur_bit = ((u32)0x1 << i);
3387 if (sig & cur_bit) {
3388 switch (cur_bit) {
3389 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_ROM_PARITY:
3390 _print_next_block(par_num++, "MCP ROM");
3391 break;
3392 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_RX_PARITY:
3393 _print_next_block(par_num++, "MCP UMP RX");
3394 break;
3395 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_TX_PARITY:
3396 _print_next_block(par_num++, "MCP UMP TX");
3397 break;
3398 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY:
3399 _print_next_block(par_num++, "MCP SCPAD");
3400 break;
3401 }
3402
3403 /* Clear the bit */
3404 sig &= ~cur_bit;
3405 }
3406 }
3407
3408 return par_num;
3409}
3410
3411static inline bool bnx2x_parity_attn(struct bnx2x *bp, u32 sig0, u32 sig1,
3412 u32 sig2, u32 sig3)
3413{
3414 if ((sig0 & HW_PRTY_ASSERT_SET_0) || (sig1 & HW_PRTY_ASSERT_SET_1) ||
3415 (sig2 & HW_PRTY_ASSERT_SET_2) || (sig3 & HW_PRTY_ASSERT_SET_3)) {
3416 int par_num = 0;
3417 DP(NETIF_MSG_HW, "Was parity error: HW block parity attention: "
3418 "[0]:0x%08x [1]:0x%08x "
3419 "[2]:0x%08x [3]:0x%08x\n",
3420 sig0 & HW_PRTY_ASSERT_SET_0,
3421 sig1 & HW_PRTY_ASSERT_SET_1,
3422 sig2 & HW_PRTY_ASSERT_SET_2,
3423 sig3 & HW_PRTY_ASSERT_SET_3);
3424 printk(KERN_ERR"%s: Parity errors detected in blocks: ",
3425 bp->dev->name);
3426 par_num = bnx2x_print_blocks_with_parity0(
3427 sig0 & HW_PRTY_ASSERT_SET_0, par_num);
3428 par_num = bnx2x_print_blocks_with_parity1(
3429 sig1 & HW_PRTY_ASSERT_SET_1, par_num);
3430 par_num = bnx2x_print_blocks_with_parity2(
3431 sig2 & HW_PRTY_ASSERT_SET_2, par_num);
3432 par_num = bnx2x_print_blocks_with_parity3(
3433 sig3 & HW_PRTY_ASSERT_SET_3, par_num);
3434 printk("\n");
3435 return true;
3436 } else
3437 return false;
3438}
3439
3440static bool bnx2x_chk_parity_attn(struct bnx2x *bp)
3108{ 3441{
3109 struct attn_route attn; 3442 struct attn_route attn;
3110 struct attn_route group_mask; 3443 int port = BP_PORT(bp);
3444
3445 attn.sig[0] = REG_RD(bp,
3446 MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 +
3447 port*4);
3448 attn.sig[1] = REG_RD(bp,
3449 MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 +
3450 port*4);
3451 attn.sig[2] = REG_RD(bp,
3452 MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 +
3453 port*4);
3454 attn.sig[3] = REG_RD(bp,
3455 MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 +
3456 port*4);
3457
3458 return bnx2x_parity_attn(bp, attn.sig[0], attn.sig[1], attn.sig[2],
3459 attn.sig[3]);
3460}
3461
3462static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
3463{
3464 struct attn_route attn, *group_mask;
3111 int port = BP_PORT(bp); 3465 int port = BP_PORT(bp);
3112 int index; 3466 int index;
3113 u32 reg_addr; 3467 u32 reg_addr;
@@ -3118,6 +3472,19 @@ static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
3118 try to handle this event */ 3472 try to handle this event */
3119 bnx2x_acquire_alr(bp); 3473 bnx2x_acquire_alr(bp);
3120 3474
3475 if (bnx2x_chk_parity_attn(bp)) {
3476 bp->recovery_state = BNX2X_RECOVERY_INIT;
3477 bnx2x_set_reset_in_progress(bp);
3478 schedule_delayed_work(&bp->reset_task, 0);
3479 /* Disable HW interrupts */
3480 bnx2x_int_disable(bp);
3481 bnx2x_release_alr(bp);
3482 /* In case of parity errors don't handle attentions so that
3483 * other function would "see" parity errors.
3484 */
3485 return;
3486 }
3487
3121 attn.sig[0] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4); 3488 attn.sig[0] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4);
3122 attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4); 3489 attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
3123 attn.sig[2] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4); 3490 attn.sig[2] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4);
@@ -3127,28 +3494,20 @@ static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
3127 3494
3128 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) { 3495 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
3129 if (deasserted & (1 << index)) { 3496 if (deasserted & (1 << index)) {
3130 group_mask = bp->attn_group[index]; 3497 group_mask = &bp->attn_group[index];
3131 3498
3132 DP(NETIF_MSG_HW, "group[%d]: %08x %08x %08x %08x\n", 3499 DP(NETIF_MSG_HW, "group[%d]: %08x %08x %08x %08x\n",
3133 index, group_mask.sig[0], group_mask.sig[1], 3500 index, group_mask->sig[0], group_mask->sig[1],
3134 group_mask.sig[2], group_mask.sig[3]); 3501 group_mask->sig[2], group_mask->sig[3]);
3135 3502
3136 bnx2x_attn_int_deasserted3(bp, 3503 bnx2x_attn_int_deasserted3(bp,
3137 attn.sig[3] & group_mask.sig[3]); 3504 attn.sig[3] & group_mask->sig[3]);
3138 bnx2x_attn_int_deasserted1(bp, 3505 bnx2x_attn_int_deasserted1(bp,
3139 attn.sig[1] & group_mask.sig[1]); 3506 attn.sig[1] & group_mask->sig[1]);
3140 bnx2x_attn_int_deasserted2(bp, 3507 bnx2x_attn_int_deasserted2(bp,
3141 attn.sig[2] & group_mask.sig[2]); 3508 attn.sig[2] & group_mask->sig[2]);
3142 bnx2x_attn_int_deasserted0(bp, 3509 bnx2x_attn_int_deasserted0(bp,
3143 attn.sig[0] & group_mask.sig[0]); 3510 attn.sig[0] & group_mask->sig[0]);
3144
3145 if ((attn.sig[0] & group_mask.sig[0] &
3146 HW_PRTY_ASSERT_SET_0) ||
3147 (attn.sig[1] & group_mask.sig[1] &
3148 HW_PRTY_ASSERT_SET_1) ||
3149 (attn.sig[2] & group_mask.sig[2] &
3150 HW_PRTY_ASSERT_SET_2))
3151 BNX2X_ERR("FATAL HW block parity attention\n");
3152 } 3511 }
3153 } 3512 }
3154 3513
@@ -3172,7 +3531,7 @@ static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
3172 3531
3173 DP(NETIF_MSG_HW, "aeu_mask %x newly deasserted %x\n", 3532 DP(NETIF_MSG_HW, "aeu_mask %x newly deasserted %x\n",
3174 aeu_mask, deasserted); 3533 aeu_mask, deasserted);
3175 aeu_mask |= (deasserted & 0xff); 3534 aeu_mask |= (deasserted & 0x3ff);
3176 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask); 3535 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
3177 3536
3178 REG_WR(bp, reg_addr, aeu_mask); 3537 REG_WR(bp, reg_addr, aeu_mask);
@@ -3216,7 +3575,6 @@ static void bnx2x_sp_task(struct work_struct *work)
3216 struct bnx2x *bp = container_of(work, struct bnx2x, sp_task.work); 3575 struct bnx2x *bp = container_of(work, struct bnx2x, sp_task.work);
3217 u16 status; 3576 u16 status;
3218 3577
3219
3220 /* Return here if interrupt is disabled */ 3578 /* Return here if interrupt is disabled */
3221 if (unlikely(atomic_read(&bp->intr_sem) != 0)) { 3579 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
3222 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n"); 3580 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
@@ -3227,11 +3585,23 @@ static void bnx2x_sp_task(struct work_struct *work)
3227/* if (status == 0) */ 3585/* if (status == 0) */
3228/* BNX2X_ERR("spurious slowpath interrupt!\n"); */ 3586/* BNX2X_ERR("spurious slowpath interrupt!\n"); */
3229 3587
3230 DP(NETIF_MSG_INTR, "got a slowpath interrupt (updated %x)\n", status); 3588 DP(NETIF_MSG_INTR, "got a slowpath interrupt (status 0x%x)\n", status);
3231 3589
3232 /* HW attentions */ 3590 /* HW attentions */
3233 if (status & 0x1) 3591 if (status & 0x1) {
3234 bnx2x_attn_int(bp); 3592 bnx2x_attn_int(bp);
3593 status &= ~0x1;
3594 }
3595
3596 /* CStorm events: STAT_QUERY */
3597 if (status & 0x2) {
3598 DP(BNX2X_MSG_SP, "CStorm events: STAT_QUERY\n");
3599 status &= ~0x2;
3600 }
3601
3602 if (unlikely(status))
3603 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status 0x%x)\n",
3604 status);
3235 3605
3236 bnx2x_ack_sb(bp, DEF_SB_ID, ATTENTION_ID, le16_to_cpu(bp->def_att_idx), 3606 bnx2x_ack_sb(bp, DEF_SB_ID, ATTENTION_ID, le16_to_cpu(bp->def_att_idx),
3237 IGU_INT_NOP, 1); 3607 IGU_INT_NOP, 1);
@@ -3243,7 +3613,6 @@ static void bnx2x_sp_task(struct work_struct *work)
3243 IGU_INT_NOP, 1); 3613 IGU_INT_NOP, 1);
3244 bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, le16_to_cpu(bp->def_t_idx), 3614 bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, le16_to_cpu(bp->def_t_idx),
3245 IGU_INT_ENABLE, 1); 3615 IGU_INT_ENABLE, 1);
3246
3247} 3616}
3248 3617
3249static irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance) 3618static irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
@@ -3947,7 +4316,6 @@ static int bnx2x_hw_stats_update(struct bnx2x *bp)
3947 u32 lo; 4316 u32 lo;
3948 u32 hi; 4317 u32 hi;
3949 } diff; 4318 } diff;
3950 u32 nig_timer_max;
3951 4319
3952 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) 4320 if (bp->link_vars.mac_type == MAC_TYPE_BMAC)
3953 bnx2x_bmac_stats_update(bp); 4321 bnx2x_bmac_stats_update(bp);
@@ -3978,10 +4346,14 @@ static int bnx2x_hw_stats_update(struct bnx2x *bp)
3978 4346
3979 pstats->host_port_stats_start = ++pstats->host_port_stats_end; 4347 pstats->host_port_stats_start = ++pstats->host_port_stats_end;
3980 4348
3981 nig_timer_max = SHMEM_RD(bp, port_mb[BP_PORT(bp)].stat_nig_timer); 4349 if (!BP_NOMCP(bp)) {
3982 if (nig_timer_max != estats->nig_timer_max) { 4350 u32 nig_timer_max =
3983 estats->nig_timer_max = nig_timer_max; 4351 SHMEM_RD(bp, port_mb[BP_PORT(bp)].stat_nig_timer);
3984 BNX2X_ERR("NIG timer max (%u)\n", estats->nig_timer_max); 4352 if (nig_timer_max != estats->nig_timer_max) {
4353 estats->nig_timer_max = nig_timer_max;
4354 BNX2X_ERR("NIG timer max (%u)\n",
4355 estats->nig_timer_max);
4356 }
3985 } 4357 }
3986 4358
3987 return 0; 4359 return 0;
@@ -4025,21 +4397,21 @@ static int bnx2x_storm_stats_update(struct bnx2x *bp)
4025 if ((u16)(le16_to_cpu(xclient->stats_counter) + 1) != 4397 if ((u16)(le16_to_cpu(xclient->stats_counter) + 1) !=
4026 bp->stats_counter) { 4398 bp->stats_counter) {
4027 DP(BNX2X_MSG_STATS, "[%d] stats not updated by xstorm" 4399 DP(BNX2X_MSG_STATS, "[%d] stats not updated by xstorm"
4028 " xstorm counter (%d) != stats_counter (%d)\n", 4400 " xstorm counter (0x%x) != stats_counter (0x%x)\n",
4029 i, xclient->stats_counter, bp->stats_counter); 4401 i, xclient->stats_counter, bp->stats_counter);
4030 return -1; 4402 return -1;
4031 } 4403 }
4032 if ((u16)(le16_to_cpu(tclient->stats_counter) + 1) != 4404 if ((u16)(le16_to_cpu(tclient->stats_counter) + 1) !=
4033 bp->stats_counter) { 4405 bp->stats_counter) {
4034 DP(BNX2X_MSG_STATS, "[%d] stats not updated by tstorm" 4406 DP(BNX2X_MSG_STATS, "[%d] stats not updated by tstorm"
4035 " tstorm counter (%d) != stats_counter (%d)\n", 4407 " tstorm counter (0x%x) != stats_counter (0x%x)\n",
4036 i, tclient->stats_counter, bp->stats_counter); 4408 i, tclient->stats_counter, bp->stats_counter);
4037 return -2; 4409 return -2;
4038 } 4410 }
4039 if ((u16)(le16_to_cpu(uclient->stats_counter) + 1) != 4411 if ((u16)(le16_to_cpu(uclient->stats_counter) + 1) !=
4040 bp->stats_counter) { 4412 bp->stats_counter) {
4041 DP(BNX2X_MSG_STATS, "[%d] stats not updated by ustorm" 4413 DP(BNX2X_MSG_STATS, "[%d] stats not updated by ustorm"
4042 " ustorm counter (%d) != stats_counter (%d)\n", 4414 " ustorm counter (0x%x) != stats_counter (0x%x)\n",
4043 i, uclient->stats_counter, bp->stats_counter); 4415 i, uclient->stats_counter, bp->stats_counter);
4044 return -4; 4416 return -4;
4045 } 4417 }
@@ -4059,6 +4431,21 @@ static int bnx2x_storm_stats_update(struct bnx2x *bp)
4059 qstats->total_bytes_received_lo, 4431 qstats->total_bytes_received_lo,
4060 le32_to_cpu(tclient->rcv_unicast_bytes.lo)); 4432 le32_to_cpu(tclient->rcv_unicast_bytes.lo));
4061 4433
4434 SUB_64(qstats->total_bytes_received_hi,
4435 le32_to_cpu(uclient->bcast_no_buff_bytes.hi),
4436 qstats->total_bytes_received_lo,
4437 le32_to_cpu(uclient->bcast_no_buff_bytes.lo));
4438
4439 SUB_64(qstats->total_bytes_received_hi,
4440 le32_to_cpu(uclient->mcast_no_buff_bytes.hi),
4441 qstats->total_bytes_received_lo,
4442 le32_to_cpu(uclient->mcast_no_buff_bytes.lo));
4443
4444 SUB_64(qstats->total_bytes_received_hi,
4445 le32_to_cpu(uclient->ucast_no_buff_bytes.hi),
4446 qstats->total_bytes_received_lo,
4447 le32_to_cpu(uclient->ucast_no_buff_bytes.lo));
4448
4062 qstats->valid_bytes_received_hi = 4449 qstats->valid_bytes_received_hi =
4063 qstats->total_bytes_received_hi; 4450 qstats->total_bytes_received_hi;
4064 qstats->valid_bytes_received_lo = 4451 qstats->valid_bytes_received_lo =
@@ -4307,47 +4694,43 @@ static void bnx2x_stats_update(struct bnx2x *bp)
4307 bnx2x_drv_stats_update(bp); 4694 bnx2x_drv_stats_update(bp);
4308 4695
4309 if (netif_msg_timer(bp)) { 4696 if (netif_msg_timer(bp)) {
4310 struct bnx2x_fastpath *fp0_rx = bp->fp;
4311 struct bnx2x_fastpath *fp0_tx = bp->fp;
4312 struct tstorm_per_client_stats *old_tclient =
4313 &bp->fp->old_tclient;
4314 struct bnx2x_eth_q_stats *qstats = &bp->fp->eth_q_stats;
4315 struct bnx2x_eth_stats *estats = &bp->eth_stats; 4697 struct bnx2x_eth_stats *estats = &bp->eth_stats;
4316 struct net_device_stats *nstats = &bp->dev->stats;
4317 int i; 4698 int i;
4318 4699
4319 netdev_printk(KERN_DEBUG, bp->dev, "\n"); 4700 printk(KERN_DEBUG "%s: brb drops %u brb truncate %u\n",
4320 printk(KERN_DEBUG " tx avail (%4x) tx hc idx (%x)" 4701 bp->dev->name,
4321 " tx pkt (%lx)\n",
4322 bnx2x_tx_avail(fp0_tx),
4323 le16_to_cpu(*fp0_tx->tx_cons_sb), nstats->tx_packets);
4324 printk(KERN_DEBUG " rx usage (%4x) rx hc idx (%x)"
4325 " rx pkt (%lx)\n",
4326 (u16)(le16_to_cpu(*fp0_rx->rx_cons_sb) -
4327 fp0_rx->rx_comp_cons),
4328 le16_to_cpu(*fp0_rx->rx_cons_sb), nstats->rx_packets);
4329 printk(KERN_DEBUG " %s (Xoff events %u) brb drops %u "
4330 "brb truncate %u\n",
4331 (netif_queue_stopped(bp->dev) ? "Xoff" : "Xon"),
4332 qstats->driver_xoff,
4333 estats->brb_drop_lo, estats->brb_truncate_lo); 4702 estats->brb_drop_lo, estats->brb_truncate_lo);
4334 printk(KERN_DEBUG "tstats: checksum_discard %u "
4335 "packets_too_big_discard %lu no_buff_discard %lu "
4336 "mac_discard %u mac_filter_discard %u "
4337 "xxovrflow_discard %u brb_truncate_discard %u "
4338 "ttl0_discard %u\n",
4339 le32_to_cpu(old_tclient->checksum_discard),
4340 bnx2x_hilo(&qstats->etherstatsoverrsizepkts_hi),
4341 bnx2x_hilo(&qstats->no_buff_discard_hi),
4342 estats->mac_discard, estats->mac_filter_discard,
4343 estats->xxoverflow_discard, estats->brb_truncate_discard,
4344 le32_to_cpu(old_tclient->ttl0_discard));
4345 4703
4346 for_each_queue(bp, i) { 4704 for_each_queue(bp, i) {
4347 printk(KERN_DEBUG "[%d]: %lu\t%lu\t%lu\n", i, 4705 struct bnx2x_fastpath *fp = &bp->fp[i];
4348 bnx2x_fp(bp, i, tx_pkt), 4706 struct bnx2x_eth_q_stats *qstats = &fp->eth_q_stats;
4349 bnx2x_fp(bp, i, rx_pkt), 4707
4350 bnx2x_fp(bp, i, rx_calls)); 4708 printk(KERN_DEBUG "%s: rx usage(%4u) *rx_cons_sb(%u)"
4709 " rx pkt(%lu) rx calls(%lu %lu)\n",
4710 fp->name, (le16_to_cpu(*fp->rx_cons_sb) -
4711 fp->rx_comp_cons),
4712 le16_to_cpu(*fp->rx_cons_sb),
4713 bnx2x_hilo(&qstats->
4714 total_unicast_packets_received_hi),
4715 fp->rx_calls, fp->rx_pkt);
4716 }
4717
4718 for_each_queue(bp, i) {
4719 struct bnx2x_fastpath *fp = &bp->fp[i];
4720 struct bnx2x_eth_q_stats *qstats = &fp->eth_q_stats;
4721 struct netdev_queue *txq =
4722 netdev_get_tx_queue(bp->dev, i);
4723
4724 printk(KERN_DEBUG "%s: tx avail(%4u) *tx_cons_sb(%u)"
4725 " tx pkt(%lu) tx calls (%lu)"
4726 " %s (Xoff events %u)\n",
4727 fp->name, bnx2x_tx_avail(fp),
4728 le16_to_cpu(*fp->tx_cons_sb),
4729 bnx2x_hilo(&qstats->
4730 total_unicast_packets_transmitted_hi),
4731 fp->tx_pkt,
4732 (netif_tx_queue_stopped(txq) ? "Xoff" : "Xon"),
4733 qstats->driver_xoff);
4351 } 4734 }
4352 } 4735 }
4353 4736
@@ -4468,6 +4851,9 @@ static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event)
4468{ 4851{
4469 enum bnx2x_stats_state state = bp->stats_state; 4852 enum bnx2x_stats_state state = bp->stats_state;
4470 4853
4854 if (unlikely(bp->panic))
4855 return;
4856
4471 bnx2x_stats_stm[state][event].action(bp); 4857 bnx2x_stats_stm[state][event].action(bp);
4472 bp->stats_state = bnx2x_stats_stm[state][event].next_state; 4858 bp->stats_state = bnx2x_stats_stm[state][event].next_state;
4473 4859
@@ -4940,9 +5326,9 @@ static inline void bnx2x_free_tpa_pool(struct bnx2x *bp,
4940 } 5326 }
4941 5327
4942 if (fp->tpa_state[i] == BNX2X_TPA_START) 5328 if (fp->tpa_state[i] == BNX2X_TPA_START)
4943 pci_unmap_single(bp->pdev, 5329 dma_unmap_single(&bp->pdev->dev,
4944 pci_unmap_addr(rx_buf, mapping), 5330 dma_unmap_addr(rx_buf, mapping),
4945 bp->rx_buf_size, PCI_DMA_FROMDEVICE); 5331 bp->rx_buf_size, DMA_FROM_DEVICE);
4946 5332
4947 dev_kfree_skb(skb); 5333 dev_kfree_skb(skb);
4948 rx_buf->skb = NULL; 5334 rx_buf->skb = NULL;
@@ -4978,7 +5364,7 @@ static void bnx2x_init_rx_rings(struct bnx2x *bp)
4978 fp->disable_tpa = 1; 5364 fp->disable_tpa = 1;
4979 break; 5365 break;
4980 } 5366 }
4981 pci_unmap_addr_set((struct sw_rx_bd *) 5367 dma_unmap_addr_set((struct sw_rx_bd *)
4982 &bp->fp->tpa_pool[i], 5368 &bp->fp->tpa_pool[i],
4983 mapping, 0); 5369 mapping, 0);
4984 fp->tpa_state[i] = BNX2X_TPA_STOP; 5370 fp->tpa_state[i] = BNX2X_TPA_STOP;
@@ -5072,8 +5458,8 @@ static void bnx2x_init_rx_rings(struct bnx2x *bp)
5072 5458
5073 fp->rx_bd_prod = ring_prod; 5459 fp->rx_bd_prod = ring_prod;
5074 /* must not have more available CQEs than BDs */ 5460 /* must not have more available CQEs than BDs */
5075 fp->rx_comp_prod = min((u16)(NUM_RCQ_RINGS*RCQ_DESC_CNT), 5461 fp->rx_comp_prod = min_t(u16, NUM_RCQ_RINGS*RCQ_DESC_CNT,
5076 cqe_ring_prod); 5462 cqe_ring_prod);
5077 fp->rx_pkt = fp->rx_calls = 0; 5463 fp->rx_pkt = fp->rx_calls = 0;
5078 5464
5079 /* Warning! 5465 /* Warning!
@@ -5179,8 +5565,8 @@ static void bnx2x_init_context(struct bnx2x *bp)
5179 context->ustorm_st_context.common.flags |= 5565 context->ustorm_st_context.common.flags |=
5180 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_TPA; 5566 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_TPA;
5181 context->ustorm_st_context.common.sge_buff_size = 5567 context->ustorm_st_context.common.sge_buff_size =
5182 (u16)min((u32)SGE_PAGE_SIZE*PAGES_PER_SGE, 5568 (u16)min_t(u32, SGE_PAGE_SIZE*PAGES_PER_SGE,
5183 (u32)0xffff); 5569 0xffff);
5184 context->ustorm_st_context.common.sge_page_base_hi = 5570 context->ustorm_st_context.common.sge_page_base_hi =
5185 U64_HI(fp->rx_sge_mapping); 5571 U64_HI(fp->rx_sge_mapping);
5186 context->ustorm_st_context.common.sge_page_base_lo = 5572 context->ustorm_st_context.common.sge_page_base_lo =
@@ -5369,10 +5755,10 @@ static void bnx2x_init_internal_func(struct bnx2x *bp)
5369 u32 offset; 5755 u32 offset;
5370 u16 max_agg_size; 5756 u16 max_agg_size;
5371 5757
5372 if (is_multi(bp)) { 5758 tstorm_config.config_flags = RSS_FLAGS(bp);
5373 tstorm_config.config_flags = MULTI_FLAGS(bp); 5759
5760 if (is_multi(bp))
5374 tstorm_config.rss_result_mask = MULTI_MASK; 5761 tstorm_config.rss_result_mask = MULTI_MASK;
5375 }
5376 5762
5377 /* Enable TPA if needed */ 5763 /* Enable TPA if needed */
5378 if (bp->flags & TPA_ENABLE_FLAG) 5764 if (bp->flags & TPA_ENABLE_FLAG)
@@ -5477,10 +5863,8 @@ static void bnx2x_init_internal_func(struct bnx2x *bp)
5477 } 5863 }
5478 5864
5479 /* Init CQ ring mapping and aggregation size, the FW limit is 8 frags */ 5865 /* Init CQ ring mapping and aggregation size, the FW limit is 8 frags */
5480 max_agg_size = 5866 max_agg_size = min_t(u32, (min_t(u32, 8, MAX_SKB_FRAGS) *
5481 min((u32)(min((u32)8, (u32)MAX_SKB_FRAGS) * 5867 SGE_PAGE_SIZE * PAGES_PER_SGE), 0xffff);
5482 SGE_PAGE_SIZE * PAGES_PER_SGE),
5483 (u32)0xffff);
5484 for_each_queue(bp, i) { 5868 for_each_queue(bp, i) {
5485 struct bnx2x_fastpath *fp = &bp->fp[i]; 5869 struct bnx2x_fastpath *fp = &bp->fp[i];
5486 5870
@@ -5566,7 +5950,7 @@ static void bnx2x_init_internal_func(struct bnx2x *bp)
5566 } 5950 }
5567 5951
5568 5952
5569 /* Store it to internal memory */ 5953 /* Store cmng structures to internal memory */
5570 if (bp->port.pmf) 5954 if (bp->port.pmf)
5571 for (i = 0; i < sizeof(struct cmng_struct_per_port) / 4; i++) 5955 for (i = 0; i < sizeof(struct cmng_struct_per_port) / 4; i++)
5572 REG_WR(bp, BAR_XSTRORM_INTMEM + 5956 REG_WR(bp, BAR_XSTRORM_INTMEM +
@@ -5658,8 +6042,8 @@ static void bnx2x_nic_init(struct bnx2x *bp, u32 load_code)
5658 6042
5659static int bnx2x_gunzip_init(struct bnx2x *bp) 6043static int bnx2x_gunzip_init(struct bnx2x *bp)
5660{ 6044{
5661 bp->gunzip_buf = pci_alloc_consistent(bp->pdev, FW_BUF_SIZE, 6045 bp->gunzip_buf = dma_alloc_coherent(&bp->pdev->dev, FW_BUF_SIZE,
5662 &bp->gunzip_mapping); 6046 &bp->gunzip_mapping, GFP_KERNEL);
5663 if (bp->gunzip_buf == NULL) 6047 if (bp->gunzip_buf == NULL)
5664 goto gunzip_nomem1; 6048 goto gunzip_nomem1;
5665 6049
@@ -5679,12 +6063,13 @@ gunzip_nomem3:
5679 bp->strm = NULL; 6063 bp->strm = NULL;
5680 6064
5681gunzip_nomem2: 6065gunzip_nomem2:
5682 pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf, 6066 dma_free_coherent(&bp->pdev->dev, FW_BUF_SIZE, bp->gunzip_buf,
5683 bp->gunzip_mapping); 6067 bp->gunzip_mapping);
5684 bp->gunzip_buf = NULL; 6068 bp->gunzip_buf = NULL;
5685 6069
5686gunzip_nomem1: 6070gunzip_nomem1:
5687 netdev_err(bp->dev, "Cannot allocate firmware buffer for un-compression\n"); 6071 netdev_err(bp->dev, "Cannot allocate firmware buffer for"
6072 " un-compression\n");
5688 return -ENOMEM; 6073 return -ENOMEM;
5689} 6074}
5690 6075
@@ -5696,8 +6081,8 @@ static void bnx2x_gunzip_end(struct bnx2x *bp)
5696 bp->strm = NULL; 6081 bp->strm = NULL;
5697 6082
5698 if (bp->gunzip_buf) { 6083 if (bp->gunzip_buf) {
5699 pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf, 6084 dma_free_coherent(&bp->pdev->dev, FW_BUF_SIZE, bp->gunzip_buf,
5700 bp->gunzip_mapping); 6085 bp->gunzip_mapping);
5701 bp->gunzip_buf = NULL; 6086 bp->gunzip_buf = NULL;
5702 } 6087 }
5703} 6088}
@@ -5735,8 +6120,9 @@ static int bnx2x_gunzip(struct bnx2x *bp, const u8 *zbuf, int len)
5735 6120
5736 bp->gunzip_outlen = (FW_BUF_SIZE - bp->strm->avail_out); 6121 bp->gunzip_outlen = (FW_BUF_SIZE - bp->strm->avail_out);
5737 if (bp->gunzip_outlen & 0x3) 6122 if (bp->gunzip_outlen & 0x3)
5738 netdev_err(bp->dev, "Firmware decompression error: gunzip_outlen (%d) not aligned\n", 6123 netdev_err(bp->dev, "Firmware decompression error:"
5739 bp->gunzip_outlen); 6124 " gunzip_outlen (%d) not aligned\n",
6125 bp->gunzip_outlen);
5740 bp->gunzip_outlen >>= 2; 6126 bp->gunzip_outlen >>= 2;
5741 6127
5742 zlib_inflateEnd(bp->strm); 6128 zlib_inflateEnd(bp->strm);
@@ -5962,6 +6348,50 @@ static void enable_blocks_attention(struct bnx2x *bp)
5962 REG_WR(bp, PBF_REG_PBF_INT_MASK, 0X18); /* bit 3,4 masked */ 6348 REG_WR(bp, PBF_REG_PBF_INT_MASK, 0X18); /* bit 3,4 masked */
5963} 6349}
5964 6350
6351static const struct {
6352 u32 addr;
6353 u32 mask;
6354} bnx2x_parity_mask[] = {
6355 {PXP_REG_PXP_PRTY_MASK, 0xffffffff},
6356 {PXP2_REG_PXP2_PRTY_MASK_0, 0xffffffff},
6357 {PXP2_REG_PXP2_PRTY_MASK_1, 0xffffffff},
6358 {HC_REG_HC_PRTY_MASK, 0xffffffff},
6359 {MISC_REG_MISC_PRTY_MASK, 0xffffffff},
6360 {QM_REG_QM_PRTY_MASK, 0x0},
6361 {DORQ_REG_DORQ_PRTY_MASK, 0x0},
6362 {GRCBASE_UPB + PB_REG_PB_PRTY_MASK, 0x0},
6363 {GRCBASE_XPB + PB_REG_PB_PRTY_MASK, 0x0},
6364 {SRC_REG_SRC_PRTY_MASK, 0x4}, /* bit 2 */
6365 {CDU_REG_CDU_PRTY_MASK, 0x0},
6366 {CFC_REG_CFC_PRTY_MASK, 0x0},
6367 {DBG_REG_DBG_PRTY_MASK, 0x0},
6368 {DMAE_REG_DMAE_PRTY_MASK, 0x0},
6369 {BRB1_REG_BRB1_PRTY_MASK, 0x0},
6370 {PRS_REG_PRS_PRTY_MASK, (1<<6)},/* bit 6 */
6371 {TSDM_REG_TSDM_PRTY_MASK, 0x18},/* bit 3,4 */
6372 {CSDM_REG_CSDM_PRTY_MASK, 0x8}, /* bit 3 */
6373 {USDM_REG_USDM_PRTY_MASK, 0x38},/* bit 3,4,5 */
6374 {XSDM_REG_XSDM_PRTY_MASK, 0x8}, /* bit 3 */
6375 {TSEM_REG_TSEM_PRTY_MASK_0, 0x0},
6376 {TSEM_REG_TSEM_PRTY_MASK_1, 0x0},
6377 {USEM_REG_USEM_PRTY_MASK_0, 0x0},
6378 {USEM_REG_USEM_PRTY_MASK_1, 0x0},
6379 {CSEM_REG_CSEM_PRTY_MASK_0, 0x0},
6380 {CSEM_REG_CSEM_PRTY_MASK_1, 0x0},
6381 {XSEM_REG_XSEM_PRTY_MASK_0, 0x0},
6382 {XSEM_REG_XSEM_PRTY_MASK_1, 0x0}
6383};
6384
6385static void enable_blocks_parity(struct bnx2x *bp)
6386{
6387 int i, mask_arr_len =
6388 sizeof(bnx2x_parity_mask)/(sizeof(bnx2x_parity_mask[0]));
6389
6390 for (i = 0; i < mask_arr_len; i++)
6391 REG_WR(bp, bnx2x_parity_mask[i].addr,
6392 bnx2x_parity_mask[i].mask);
6393}
6394
5965 6395
5966static void bnx2x_reset_common(struct bnx2x *bp) 6396static void bnx2x_reset_common(struct bnx2x *bp)
5967{ 6397{
@@ -5992,10 +6422,14 @@ static void bnx2x_init_pxp(struct bnx2x *bp)
5992 6422
5993static void bnx2x_setup_fan_failure_detection(struct bnx2x *bp) 6423static void bnx2x_setup_fan_failure_detection(struct bnx2x *bp)
5994{ 6424{
6425 int is_required;
5995 u32 val; 6426 u32 val;
5996 u8 port; 6427 int port;
5997 u8 is_required = 0; 6428
6429 if (BP_NOMCP(bp))
6430 return;
5998 6431
6432 is_required = 0;
5999 val = SHMEM_RD(bp, dev_info.shared_hw_config.config2) & 6433 val = SHMEM_RD(bp, dev_info.shared_hw_config.config2) &
6000 SHARED_HW_CFG_FAN_FAILURE_MASK; 6434 SHARED_HW_CFG_FAN_FAILURE_MASK;
6001 6435
@@ -6034,7 +6468,7 @@ static void bnx2x_setup_fan_failure_detection(struct bnx2x *bp)
6034 /* set to active low mode */ 6468 /* set to active low mode */
6035 val = REG_RD(bp, MISC_REG_SPIO_INT); 6469 val = REG_RD(bp, MISC_REG_SPIO_INT);
6036 val |= ((1 << MISC_REGISTERS_SPIO_5) << 6470 val |= ((1 << MISC_REGISTERS_SPIO_5) <<
6037 MISC_REGISTERS_SPIO_INT_OLD_SET_POS); 6471 MISC_REGISTERS_SPIO_INT_OLD_SET_POS);
6038 REG_WR(bp, MISC_REG_SPIO_INT, val); 6472 REG_WR(bp, MISC_REG_SPIO_INT, val);
6039 6473
6040 /* enable interrupt to signal the IGU */ 6474 /* enable interrupt to signal the IGU */
@@ -6200,10 +6634,8 @@ static int bnx2x_init_common(struct bnx2x *bp)
6200 bnx2x_init_block(bp, PBF_BLOCK, COMMON_STAGE); 6634 bnx2x_init_block(bp, PBF_BLOCK, COMMON_STAGE);
6201 6635
6202 REG_WR(bp, SRC_REG_SOFT_RST, 1); 6636 REG_WR(bp, SRC_REG_SOFT_RST, 1);
6203 for (i = SRC_REG_KEYRSS0_0; i <= SRC_REG_KEYRSS1_9; i += 4) { 6637 for (i = SRC_REG_KEYRSS0_0; i <= SRC_REG_KEYRSS1_9; i += 4)
6204 REG_WR(bp, i, 0xc0cac01a); 6638 REG_WR(bp, i, random32());
6205 /* TODO: replace with something meaningful */
6206 }
6207 bnx2x_init_block(bp, SRCH_BLOCK, COMMON_STAGE); 6639 bnx2x_init_block(bp, SRCH_BLOCK, COMMON_STAGE);
6208#ifdef BCM_CNIC 6640#ifdef BCM_CNIC
6209 REG_WR(bp, SRC_REG_KEYSEARCH_0, 0x63285672); 6641 REG_WR(bp, SRC_REG_KEYSEARCH_0, 0x63285672);
@@ -6221,7 +6653,8 @@ static int bnx2x_init_common(struct bnx2x *bp)
6221 6653
6222 if (sizeof(union cdu_context) != 1024) 6654 if (sizeof(union cdu_context) != 1024)
6223 /* we currently assume that a context is 1024 bytes */ 6655 /* we currently assume that a context is 1024 bytes */
6224 pr_alert("please adjust the size of cdu_context(%ld)\n", 6656 dev_alert(&bp->pdev->dev, "please adjust the size "
6657 "of cdu_context(%ld)\n",
6225 (long)sizeof(union cdu_context)); 6658 (long)sizeof(union cdu_context));
6226 6659
6227 bnx2x_init_block(bp, CDU_BLOCK, COMMON_STAGE); 6660 bnx2x_init_block(bp, CDU_BLOCK, COMMON_STAGE);
@@ -6305,6 +6738,8 @@ static int bnx2x_init_common(struct bnx2x *bp)
6305 REG_RD(bp, PXP2_REG_PXP2_INT_STS_CLR_0); 6738 REG_RD(bp, PXP2_REG_PXP2_INT_STS_CLR_0);
6306 6739
6307 enable_blocks_attention(bp); 6740 enable_blocks_attention(bp);
6741 if (CHIP_PARITY_SUPPORTED(bp))
6742 enable_blocks_parity(bp);
6308 6743
6309 if (!BP_NOMCP(bp)) { 6744 if (!BP_NOMCP(bp)) {
6310 bnx2x_acquire_phy_lock(bp); 6745 bnx2x_acquire_phy_lock(bp);
@@ -6323,7 +6758,7 @@ static int bnx2x_init_port(struct bnx2x *bp)
6323 u32 low, high; 6758 u32 low, high;
6324 u32 val; 6759 u32 val;
6325 6760
6326 DP(BNX2X_MSG_MCP, "starting port init port %x\n", port); 6761 DP(BNX2X_MSG_MCP, "starting port init port %d\n", port);
6327 6762
6328 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0); 6763 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
6329 6764
@@ -6342,6 +6777,7 @@ static int bnx2x_init_port(struct bnx2x *bp)
6342 REG_WR(bp, TM_REG_LIN0_SCAN_TIME + port*4, 20); 6777 REG_WR(bp, TM_REG_LIN0_SCAN_TIME + port*4, 20);
6343 REG_WR(bp, TM_REG_LIN0_MAX_ACTIVE_CID + port*4, 31); 6778 REG_WR(bp, TM_REG_LIN0_MAX_ACTIVE_CID + port*4, 31);
6344#endif 6779#endif
6780
6345 bnx2x_init_block(bp, DQ_BLOCK, init_stage); 6781 bnx2x_init_block(bp, DQ_BLOCK, init_stage);
6346 6782
6347 bnx2x_init_block(bp, BRB1_BLOCK, init_stage); 6783 bnx2x_init_block(bp, BRB1_BLOCK, init_stage);
@@ -6534,7 +6970,7 @@ static int bnx2x_init_func(struct bnx2x *bp)
6534 u32 addr, val; 6970 u32 addr, val;
6535 int i; 6971 int i;
6536 6972
6537 DP(BNX2X_MSG_MCP, "starting func init func %x\n", func); 6973 DP(BNX2X_MSG_MCP, "starting func init func %d\n", func);
6538 6974
6539 /* set MSI reconfigure capability */ 6975 /* set MSI reconfigure capability */
6540 addr = (port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0); 6976 addr = (port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0);
@@ -6692,7 +7128,7 @@ static void bnx2x_free_mem(struct bnx2x *bp)
6692#define BNX2X_PCI_FREE(x, y, size) \ 7128#define BNX2X_PCI_FREE(x, y, size) \
6693 do { \ 7129 do { \
6694 if (x) { \ 7130 if (x) { \
6695 pci_free_consistent(bp->pdev, size, x, y); \ 7131 dma_free_coherent(&bp->pdev->dev, size, x, y); \
6696 x = NULL; \ 7132 x = NULL; \
6697 y = 0; \ 7133 y = 0; \
6698 } \ 7134 } \
@@ -6773,7 +7209,7 @@ static int bnx2x_alloc_mem(struct bnx2x *bp)
6773 7209
6774#define BNX2X_PCI_ALLOC(x, y, size) \ 7210#define BNX2X_PCI_ALLOC(x, y, size) \
6775 do { \ 7211 do { \
6776 x = pci_alloc_consistent(bp->pdev, size, y); \ 7212 x = dma_alloc_coherent(&bp->pdev->dev, size, y, GFP_KERNEL); \
6777 if (x == NULL) \ 7213 if (x == NULL) \
6778 goto alloc_mem_err; \ 7214 goto alloc_mem_err; \
6779 memset(x, 0, size); \ 7215 memset(x, 0, size); \
@@ -6906,9 +7342,9 @@ static void bnx2x_free_rx_skbs(struct bnx2x *bp)
6906 if (skb == NULL) 7342 if (skb == NULL)
6907 continue; 7343 continue;
6908 7344
6909 pci_unmap_single(bp->pdev, 7345 dma_unmap_single(&bp->pdev->dev,
6910 pci_unmap_addr(rx_buf, mapping), 7346 dma_unmap_addr(rx_buf, mapping),
6911 bp->rx_buf_size, PCI_DMA_FROMDEVICE); 7347 bp->rx_buf_size, DMA_FROM_DEVICE);
6912 7348
6913 rx_buf->skb = NULL; 7349 rx_buf->skb = NULL;
6914 dev_kfree_skb(skb); 7350 dev_kfree_skb(skb);
@@ -6987,7 +7423,31 @@ static int bnx2x_enable_msix(struct bnx2x *bp)
6987 7423
6988 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], 7424 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0],
6989 BNX2X_NUM_QUEUES(bp) + offset); 7425 BNX2X_NUM_QUEUES(bp) + offset);
6990 if (rc) { 7426
7427 /*
7428 * reconfigure number of tx/rx queues according to available
7429 * MSI-X vectors
7430 */
7431 if (rc >= BNX2X_MIN_MSIX_VEC_CNT) {
7432 /* vectors available for FP */
7433 int fp_vec = rc - BNX2X_MSIX_VEC_FP_START;
7434
7435 DP(NETIF_MSG_IFUP,
7436 "Trying to use less MSI-X vectors: %d\n", rc);
7437
7438 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], rc);
7439
7440 if (rc) {
7441 DP(NETIF_MSG_IFUP,
7442 "MSI-X is not attainable rc %d\n", rc);
7443 return rc;
7444 }
7445
7446 bp->num_queues = min(bp->num_queues, fp_vec);
7447
7448 DP(NETIF_MSG_IFUP, "New queue configuration set: %d\n",
7449 bp->num_queues);
7450 } else if (rc) {
6991 DP(NETIF_MSG_IFUP, "MSI-X is not attainable rc %d\n", rc); 7451 DP(NETIF_MSG_IFUP, "MSI-X is not attainable rc %d\n", rc);
6992 return rc; 7452 return rc;
6993 } 7453 }
@@ -7028,10 +7488,11 @@ static int bnx2x_req_msix_irqs(struct bnx2x *bp)
7028 } 7488 }
7029 7489
7030 i = BNX2X_NUM_QUEUES(bp); 7490 i = BNX2X_NUM_QUEUES(bp);
7031 netdev_info(bp->dev, "using MSI-X IRQs: sp %d fp[%d] %d ... fp[%d] %d\n", 7491 netdev_info(bp->dev, "using MSI-X IRQs: sp %d fp[%d] %d"
7032 bp->msix_table[0].vector, 7492 " ... fp[%d] %d\n",
7033 0, bp->msix_table[offset].vector, 7493 bp->msix_table[0].vector,
7034 i - 1, bp->msix_table[offset + i - 1].vector); 7494 0, bp->msix_table[offset].vector,
7495 i - 1, bp->msix_table[offset + i - 1].vector);
7035 7496
7036 return 0; 7497 return 0;
7037} 7498}
@@ -7409,8 +7870,6 @@ static int bnx2x_set_num_queues(struct bnx2x *bp)
7409 bp->num_queues = 1; 7870 bp->num_queues = 1;
7410 DP(NETIF_MSG_IFUP, "set number of queues to 1\n"); 7871 DP(NETIF_MSG_IFUP, "set number of queues to 1\n");
7411 break; 7872 break;
7412
7413 case INT_MODE_MSIX:
7414 default: 7873 default:
7415 /* Set number of queues according to bp->multi_mode value */ 7874 /* Set number of queues according to bp->multi_mode value */
7416 bnx2x_set_num_queues_msix(bp); 7875 bnx2x_set_num_queues_msix(bp);
@@ -7656,6 +8115,7 @@ static int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
7656 if (bp->state == BNX2X_STATE_OPEN) 8115 if (bp->state == BNX2X_STATE_OPEN)
7657 bnx2x_cnic_notify(bp, CNIC_CTL_START_CMD); 8116 bnx2x_cnic_notify(bp, CNIC_CTL_START_CMD);
7658#endif 8117#endif
8118 bnx2x_inc_load_cnt(bp);
7659 8119
7660 return 0; 8120 return 0;
7661 8121
@@ -7843,33 +8303,12 @@ static void bnx2x_reset_chip(struct bnx2x *bp, u32 reset_code)
7843 } 8303 }
7844} 8304}
7845 8305
7846/* must be called with rtnl_lock */ 8306static void bnx2x_chip_cleanup(struct bnx2x *bp, int unload_mode)
7847static int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
7848{ 8307{
7849 int port = BP_PORT(bp); 8308 int port = BP_PORT(bp);
7850 u32 reset_code = 0; 8309 u32 reset_code = 0;
7851 int i, cnt, rc; 8310 int i, cnt, rc;
7852 8311
7853#ifdef BCM_CNIC
7854 bnx2x_cnic_notify(bp, CNIC_CTL_STOP_CMD);
7855#endif
7856 bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
7857
7858 /* Set "drop all" */
7859 bp->rx_mode = BNX2X_RX_MODE_NONE;
7860 bnx2x_set_storm_rx_mode(bp);
7861
7862 /* Disable HW interrupts, NAPI and Tx */
7863 bnx2x_netif_stop(bp, 1);
7864
7865 del_timer_sync(&bp->timer);
7866 SHMEM_WR(bp, func_mb[BP_FUNC(bp)].drv_pulse_mb,
7867 (DRV_PULSE_ALWAYS_ALIVE | bp->fw_drv_pulse_wr_seq));
7868 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
7869
7870 /* Release IRQs */
7871 bnx2x_free_irq(bp, false);
7872
7873 /* Wait until tx fastpath tasks complete */ 8312 /* Wait until tx fastpath tasks complete */
7874 for_each_queue(bp, i) { 8313 for_each_queue(bp, i) {
7875 struct bnx2x_fastpath *fp = &bp->fp[i]; 8314 struct bnx2x_fastpath *fp = &bp->fp[i];
@@ -8010,6 +8449,70 @@ unload_error:
8010 if (!BP_NOMCP(bp)) 8449 if (!BP_NOMCP(bp))
8011 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE); 8450 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
8012 8451
8452}
8453
8454static inline void bnx2x_disable_close_the_gate(struct bnx2x *bp)
8455{
8456 u32 val;
8457
8458 DP(NETIF_MSG_HW, "Disabling \"close the gates\"\n");
8459
8460 if (CHIP_IS_E1(bp)) {
8461 int port = BP_PORT(bp);
8462 u32 addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
8463 MISC_REG_AEU_MASK_ATTN_FUNC_0;
8464
8465 val = REG_RD(bp, addr);
8466 val &= ~(0x300);
8467 REG_WR(bp, addr, val);
8468 } else if (CHIP_IS_E1H(bp)) {
8469 val = REG_RD(bp, MISC_REG_AEU_GENERAL_MASK);
8470 val &= ~(MISC_AEU_GENERAL_MASK_REG_AEU_PXP_CLOSE_MASK |
8471 MISC_AEU_GENERAL_MASK_REG_AEU_NIG_CLOSE_MASK);
8472 REG_WR(bp, MISC_REG_AEU_GENERAL_MASK, val);
8473 }
8474}
8475
8476/* must be called with rtnl_lock */
8477static int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
8478{
8479 int i;
8480
8481 if (bp->state == BNX2X_STATE_CLOSED) {
8482 /* Interface has been removed - nothing to recover */
8483 bp->recovery_state = BNX2X_RECOVERY_DONE;
8484 bp->is_leader = 0;
8485 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RESERVED_08);
8486 smp_wmb();
8487
8488 return -EINVAL;
8489 }
8490
8491#ifdef BCM_CNIC
8492 bnx2x_cnic_notify(bp, CNIC_CTL_STOP_CMD);
8493#endif
8494 bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
8495
8496 /* Set "drop all" */
8497 bp->rx_mode = BNX2X_RX_MODE_NONE;
8498 bnx2x_set_storm_rx_mode(bp);
8499
8500 /* Disable HW interrupts, NAPI and Tx */
8501 bnx2x_netif_stop(bp, 1);
8502 netif_carrier_off(bp->dev);
8503
8504 del_timer_sync(&bp->timer);
8505 SHMEM_WR(bp, func_mb[BP_FUNC(bp)].drv_pulse_mb,
8506 (DRV_PULSE_ALWAYS_ALIVE | bp->fw_drv_pulse_wr_seq));
8507 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
8508
8509 /* Release IRQs */
8510 bnx2x_free_irq(bp, false);
8511
8512 /* Cleanup the chip if needed */
8513 if (unload_mode != UNLOAD_RECOVERY)
8514 bnx2x_chip_cleanup(bp, unload_mode);
8515
8013 bp->port.pmf = 0; 8516 bp->port.pmf = 0;
8014 8517
8015 /* Free SKBs, SGEs, TPA pool and driver internals */ 8518 /* Free SKBs, SGEs, TPA pool and driver internals */
@@ -8022,19 +8525,448 @@ unload_error:
8022 8525
8023 bp->state = BNX2X_STATE_CLOSED; 8526 bp->state = BNX2X_STATE_CLOSED;
8024 8527
8025 netif_carrier_off(bp->dev); 8528 /* The last driver must disable a "close the gate" if there is no
8529 * parity attention or "process kill" pending.
8530 */
8531 if ((!bnx2x_dec_load_cnt(bp)) && (!bnx2x_chk_parity_attn(bp)) &&
8532 bnx2x_reset_is_done(bp))
8533 bnx2x_disable_close_the_gate(bp);
8534
8535 /* Reset MCP mail box sequence if there is on going recovery */
8536 if (unload_mode == UNLOAD_RECOVERY)
8537 bp->fw_seq = 0;
8538
8539 return 0;
8540}
8541
8542/* Close gates #2, #3 and #4: */
8543static void bnx2x_set_234_gates(struct bnx2x *bp, bool close)
8544{
8545 u32 val, addr;
8546
8547 /* Gates #2 and #4a are closed/opened for "not E1" only */
8548 if (!CHIP_IS_E1(bp)) {
8549 /* #4 */
8550 val = REG_RD(bp, PXP_REG_HST_DISCARD_DOORBELLS);
8551 REG_WR(bp, PXP_REG_HST_DISCARD_DOORBELLS,
8552 close ? (val | 0x1) : (val & (~(u32)1)));
8553 /* #2 */
8554 val = REG_RD(bp, PXP_REG_HST_DISCARD_INTERNAL_WRITES);
8555 REG_WR(bp, PXP_REG_HST_DISCARD_INTERNAL_WRITES,
8556 close ? (val | 0x1) : (val & (~(u32)1)));
8557 }
8558
8559 /* #3 */
8560 addr = BP_PORT(bp) ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
8561 val = REG_RD(bp, addr);
8562 REG_WR(bp, addr, (!close) ? (val | 0x1) : (val & (~(u32)1)));
8563
8564 DP(NETIF_MSG_HW, "%s gates #2, #3 and #4\n",
8565 close ? "closing" : "opening");
8566 mmiowb();
8567}
8568
8569#define SHARED_MF_CLP_MAGIC 0x80000000 /* `magic' bit */
8570
8571static void bnx2x_clp_reset_prep(struct bnx2x *bp, u32 *magic_val)
8572{
8573 /* Do some magic... */
8574 u32 val = MF_CFG_RD(bp, shared_mf_config.clp_mb);
8575 *magic_val = val & SHARED_MF_CLP_MAGIC;
8576 MF_CFG_WR(bp, shared_mf_config.clp_mb, val | SHARED_MF_CLP_MAGIC);
8577}
8578
8579/* Restore the value of the `magic' bit.
8580 *
8581 * @param pdev Device handle.
8582 * @param magic_val Old value of the `magic' bit.
8583 */
8584static void bnx2x_clp_reset_done(struct bnx2x *bp, u32 magic_val)
8585{
8586 /* Restore the `magic' bit value... */
8587 /* u32 val = SHMEM_RD(bp, mf_cfg.shared_mf_config.clp_mb);
8588 SHMEM_WR(bp, mf_cfg.shared_mf_config.clp_mb,
8589 (val & (~SHARED_MF_CLP_MAGIC)) | magic_val); */
8590 u32 val = MF_CFG_RD(bp, shared_mf_config.clp_mb);
8591 MF_CFG_WR(bp, shared_mf_config.clp_mb,
8592 (val & (~SHARED_MF_CLP_MAGIC)) | magic_val);
8593}
8594
8595/* Prepares for MCP reset: takes care of CLP configurations.
8596 *
8597 * @param bp
8598 * @param magic_val Old value of 'magic' bit.
8599 */
8600static void bnx2x_reset_mcp_prep(struct bnx2x *bp, u32 *magic_val)
8601{
8602 u32 shmem;
8603 u32 validity_offset;
8604
8605 DP(NETIF_MSG_HW, "Starting\n");
8606
8607 /* Set `magic' bit in order to save MF config */
8608 if (!CHIP_IS_E1(bp))
8609 bnx2x_clp_reset_prep(bp, magic_val);
8610
8611 /* Get shmem offset */
8612 shmem = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
8613 validity_offset = offsetof(struct shmem_region, validity_map[0]);
8614
8615 /* Clear validity map flags */
8616 if (shmem > 0)
8617 REG_WR(bp, shmem + validity_offset, 0);
8618}
8619
8620#define MCP_TIMEOUT 5000 /* 5 seconds (in ms) */
8621#define MCP_ONE_TIMEOUT 100 /* 100 ms */
8622
8623/* Waits for MCP_ONE_TIMEOUT or MCP_ONE_TIMEOUT*10,
8624 * depending on the HW type.
8625 *
8626 * @param bp
8627 */
8628static inline void bnx2x_mcp_wait_one(struct bnx2x *bp)
8629{
8630 /* special handling for emulation and FPGA,
8631 wait 10 times longer */
8632 if (CHIP_REV_IS_SLOW(bp))
8633 msleep(MCP_ONE_TIMEOUT*10);
8634 else
8635 msleep(MCP_ONE_TIMEOUT);
8636}
8637
8638static int bnx2x_reset_mcp_comp(struct bnx2x *bp, u32 magic_val)
8639{
8640 u32 shmem, cnt, validity_offset, val;
8641 int rc = 0;
8642
8643 msleep(100);
8644
8645 /* Get shmem offset */
8646 shmem = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
8647 if (shmem == 0) {
8648 BNX2X_ERR("Shmem 0 return failure\n");
8649 rc = -ENOTTY;
8650 goto exit_lbl;
8651 }
8652
8653 validity_offset = offsetof(struct shmem_region, validity_map[0]);
8654
8655 /* Wait for MCP to come up */
8656 for (cnt = 0; cnt < (MCP_TIMEOUT / MCP_ONE_TIMEOUT); cnt++) {
8657 /* TBD: its best to check validity map of last port.
8658 * currently checks on port 0.
8659 */
8660 val = REG_RD(bp, shmem + validity_offset);
8661 DP(NETIF_MSG_HW, "shmem 0x%x validity map(0x%x)=0x%x\n", shmem,
8662 shmem + validity_offset, val);
8663
8664 /* check that shared memory is valid. */
8665 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
8666 == (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
8667 break;
8668
8669 bnx2x_mcp_wait_one(bp);
8670 }
8671
8672 DP(NETIF_MSG_HW, "Cnt=%d Shmem validity map 0x%x\n", cnt, val);
8673
8674 /* Check that shared memory is valid. This indicates that MCP is up. */
8675 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB)) !=
8676 (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB)) {
8677 BNX2X_ERR("Shmem signature not present. MCP is not up !!\n");
8678 rc = -ENOTTY;
8679 goto exit_lbl;
8680 }
8681
8682exit_lbl:
8683 /* Restore the `magic' bit value */
8684 if (!CHIP_IS_E1(bp))
8685 bnx2x_clp_reset_done(bp, magic_val);
8686
8687 return rc;
8688}
8689
8690static void bnx2x_pxp_prep(struct bnx2x *bp)
8691{
8692 if (!CHIP_IS_E1(bp)) {
8693 REG_WR(bp, PXP2_REG_RD_START_INIT, 0);
8694 REG_WR(bp, PXP2_REG_RQ_RBC_DONE, 0);
8695 REG_WR(bp, PXP2_REG_RQ_CFG_DONE, 0);
8696 mmiowb();
8697 }
8698}
8699
8700/*
8701 * Reset the whole chip except for:
8702 * - PCIE core
8703 * - PCI Glue, PSWHST, PXP/PXP2 RF (all controlled by
8704 * one reset bit)
8705 * - IGU
8706 * - MISC (including AEU)
8707 * - GRC
8708 * - RBCN, RBCP
8709 */
8710static void bnx2x_process_kill_chip_reset(struct bnx2x *bp)
8711{
8712 u32 not_reset_mask1, reset_mask1, not_reset_mask2, reset_mask2;
8713
8714 not_reset_mask1 =
8715 MISC_REGISTERS_RESET_REG_1_RST_HC |
8716 MISC_REGISTERS_RESET_REG_1_RST_PXPV |
8717 MISC_REGISTERS_RESET_REG_1_RST_PXP;
8718
8719 not_reset_mask2 =
8720 MISC_REGISTERS_RESET_REG_2_RST_MDIO |
8721 MISC_REGISTERS_RESET_REG_2_RST_EMAC0_HARD_CORE |
8722 MISC_REGISTERS_RESET_REG_2_RST_EMAC1_HARD_CORE |
8723 MISC_REGISTERS_RESET_REG_2_RST_MISC_CORE |
8724 MISC_REGISTERS_RESET_REG_2_RST_RBCN |
8725 MISC_REGISTERS_RESET_REG_2_RST_GRC |
8726 MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_REG_HARD_CORE |
8727 MISC_REGISTERS_RESET_REG_2_RST_MCP_N_HARD_CORE_RST_B;
8728
8729 reset_mask1 = 0xffffffff;
8730
8731 if (CHIP_IS_E1(bp))
8732 reset_mask2 = 0xffff;
8733 else
8734 reset_mask2 = 0x1ffff;
8735
8736 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
8737 reset_mask1 & (~not_reset_mask1));
8738 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
8739 reset_mask2 & (~not_reset_mask2));
8740
8741 barrier();
8742 mmiowb();
8743
8744 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, reset_mask1);
8745 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, reset_mask2);
8746 mmiowb();
8747}
8748
8749static int bnx2x_process_kill(struct bnx2x *bp)
8750{
8751 int cnt = 1000;
8752 u32 val = 0;
8753 u32 sr_cnt, blk_cnt, port_is_idle_0, port_is_idle_1, pgl_exp_rom2;
8754
8755
8756 /* Empty the Tetris buffer, wait for 1s */
8757 do {
8758 sr_cnt = REG_RD(bp, PXP2_REG_RD_SR_CNT);
8759 blk_cnt = REG_RD(bp, PXP2_REG_RD_BLK_CNT);
8760 port_is_idle_0 = REG_RD(bp, PXP2_REG_RD_PORT_IS_IDLE_0);
8761 port_is_idle_1 = REG_RD(bp, PXP2_REG_RD_PORT_IS_IDLE_1);
8762 pgl_exp_rom2 = REG_RD(bp, PXP2_REG_PGL_EXP_ROM2);
8763 if ((sr_cnt == 0x7e) && (blk_cnt == 0xa0) &&
8764 ((port_is_idle_0 & 0x1) == 0x1) &&
8765 ((port_is_idle_1 & 0x1) == 0x1) &&
8766 (pgl_exp_rom2 == 0xffffffff))
8767 break;
8768 msleep(1);
8769 } while (cnt-- > 0);
8770
8771 if (cnt <= 0) {
8772 DP(NETIF_MSG_HW, "Tetris buffer didn't get empty or there"
8773 " are still"
8774 " outstanding read requests after 1s!\n");
8775 DP(NETIF_MSG_HW, "sr_cnt=0x%08x, blk_cnt=0x%08x,"
8776 " port_is_idle_0=0x%08x,"
8777 " port_is_idle_1=0x%08x, pgl_exp_rom2=0x%08x\n",
8778 sr_cnt, blk_cnt, port_is_idle_0, port_is_idle_1,
8779 pgl_exp_rom2);
8780 return -EAGAIN;
8781 }
8782
8783 barrier();
8784
8785 /* Close gates #2, #3 and #4 */
8786 bnx2x_set_234_gates(bp, true);
8787
8788 /* TBD: Indicate that "process kill" is in progress to MCP */
8789
8790 /* Clear "unprepared" bit */
8791 REG_WR(bp, MISC_REG_UNPREPARED, 0);
8792 barrier();
8793
8794 /* Make sure all is written to the chip before the reset */
8795 mmiowb();
8796
8797 /* Wait for 1ms to empty GLUE and PCI-E core queues,
8798 * PSWHST, GRC and PSWRD Tetris buffer.
8799 */
8800 msleep(1);
8801
8802 /* Prepare to chip reset: */
8803 /* MCP */
8804 bnx2x_reset_mcp_prep(bp, &val);
8805
8806 /* PXP */
8807 bnx2x_pxp_prep(bp);
8808 barrier();
8809
8810 /* reset the chip */
8811 bnx2x_process_kill_chip_reset(bp);
8812 barrier();
8813
8814 /* Recover after reset: */
8815 /* MCP */
8816 if (bnx2x_reset_mcp_comp(bp, val))
8817 return -EAGAIN;
8818
8819 /* PXP */
8820 bnx2x_pxp_prep(bp);
8821
8822 /* Open the gates #2, #3 and #4 */
8823 bnx2x_set_234_gates(bp, false);
8824
8825 /* TBD: IGU/AEU preparation bring back the AEU/IGU to a
8826 * reset state, re-enable attentions. */
8026 8827
8027 return 0; 8828 return 0;
8028} 8829}
8029 8830
8831static int bnx2x_leader_reset(struct bnx2x *bp)
8832{
8833 int rc = 0;
8834 /* Try to recover after the failure */
8835 if (bnx2x_process_kill(bp)) {
8836 printk(KERN_ERR "%s: Something bad had happen! Aii!\n",
8837 bp->dev->name);
8838 rc = -EAGAIN;
8839 goto exit_leader_reset;
8840 }
8841
8842 /* Clear "reset is in progress" bit and update the driver state */
8843 bnx2x_set_reset_done(bp);
8844 bp->recovery_state = BNX2X_RECOVERY_DONE;
8845
8846exit_leader_reset:
8847 bp->is_leader = 0;
8848 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RESERVED_08);
8849 smp_wmb();
8850 return rc;
8851}
8852
8853static int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state);
8854
8855/* Assumption: runs under rtnl lock. This together with the fact
8856 * that it's called only from bnx2x_reset_task() ensure that it
8857 * will never be called when netif_running(bp->dev) is false.
8858 */
8859static void bnx2x_parity_recover(struct bnx2x *bp)
8860{
8861 DP(NETIF_MSG_HW, "Handling parity\n");
8862 while (1) {
8863 switch (bp->recovery_state) {
8864 case BNX2X_RECOVERY_INIT:
8865 DP(NETIF_MSG_HW, "State is BNX2X_RECOVERY_INIT\n");
8866 /* Try to get a LEADER_LOCK HW lock */
8867 if (bnx2x_trylock_hw_lock(bp,
8868 HW_LOCK_RESOURCE_RESERVED_08))
8869 bp->is_leader = 1;
8870
8871 /* Stop the driver */
8872 /* If interface has been removed - break */
8873 if (bnx2x_nic_unload(bp, UNLOAD_RECOVERY))
8874 return;
8875
8876 bp->recovery_state = BNX2X_RECOVERY_WAIT;
8877 /* Ensure "is_leader" and "recovery_state"
8878 * update values are seen on other CPUs
8879 */
8880 smp_wmb();
8881 break;
8882
8883 case BNX2X_RECOVERY_WAIT:
8884 DP(NETIF_MSG_HW, "State is BNX2X_RECOVERY_WAIT\n");
8885 if (bp->is_leader) {
8886 u32 load_counter = bnx2x_get_load_cnt(bp);
8887 if (load_counter) {
8888 /* Wait until all other functions get
8889 * down.
8890 */
8891 schedule_delayed_work(&bp->reset_task,
8892 HZ/10);
8893 return;
8894 } else {
8895 /* If all other functions got down -
8896 * try to bring the chip back to
8897 * normal. In any case it's an exit
8898 * point for a leader.
8899 */
8900 if (bnx2x_leader_reset(bp) ||
8901 bnx2x_nic_load(bp, LOAD_NORMAL)) {
8902 printk(KERN_ERR"%s: Recovery "
8903 "has failed. Power cycle is "
8904 "needed.\n", bp->dev->name);
8905 /* Disconnect this device */
8906 netif_device_detach(bp->dev);
8907 /* Block ifup for all function
8908 * of this ASIC until
8909 * "process kill" or power
8910 * cycle.
8911 */
8912 bnx2x_set_reset_in_progress(bp);
8913 /* Shut down the power */
8914 bnx2x_set_power_state(bp,
8915 PCI_D3hot);
8916 return;
8917 }
8918
8919 return;
8920 }
8921 } else { /* non-leader */
8922 if (!bnx2x_reset_is_done(bp)) {
8923 /* Try to get a LEADER_LOCK HW lock as
8924 * long as a former leader may have
8925 * been unloaded by the user or
8926 * released a leadership by another
8927 * reason.
8928 */
8929 if (bnx2x_trylock_hw_lock(bp,
8930 HW_LOCK_RESOURCE_RESERVED_08)) {
8931 /* I'm a leader now! Restart a
8932 * switch case.
8933 */
8934 bp->is_leader = 1;
8935 break;
8936 }
8937
8938 schedule_delayed_work(&bp->reset_task,
8939 HZ/10);
8940 return;
8941
8942 } else { /* A leader has completed
8943 * the "process kill". It's an exit
8944 * point for a non-leader.
8945 */
8946 bnx2x_nic_load(bp, LOAD_NORMAL);
8947 bp->recovery_state =
8948 BNX2X_RECOVERY_DONE;
8949 smp_wmb();
8950 return;
8951 }
8952 }
8953 default:
8954 return;
8955 }
8956 }
8957}
8958
8959/* bnx2x_nic_unload() flushes the bnx2x_wq, thus reset task is
8960 * scheduled on a general queue in order to prevent a dead lock.
8961 */
8030static void bnx2x_reset_task(struct work_struct *work) 8962static void bnx2x_reset_task(struct work_struct *work)
8031{ 8963{
8032 struct bnx2x *bp = container_of(work, struct bnx2x, reset_task); 8964 struct bnx2x *bp = container_of(work, struct bnx2x, reset_task.work);
8033 8965
8034#ifdef BNX2X_STOP_ON_ERROR 8966#ifdef BNX2X_STOP_ON_ERROR
8035 BNX2X_ERR("reset task called but STOP_ON_ERROR defined" 8967 BNX2X_ERR("reset task called but STOP_ON_ERROR defined"
8036 " so reset not done to allow debug dump,\n" 8968 " so reset not done to allow debug dump,\n"
8037 " you will need to reboot when done\n"); 8969 KERN_ERR " you will need to reboot when done\n");
8038 return; 8970 return;
8039#endif 8971#endif
8040 8972
@@ -8043,8 +8975,12 @@ static void bnx2x_reset_task(struct work_struct *work)
8043 if (!netif_running(bp->dev)) 8975 if (!netif_running(bp->dev))
8044 goto reset_task_exit; 8976 goto reset_task_exit;
8045 8977
8046 bnx2x_nic_unload(bp, UNLOAD_NORMAL); 8978 if (unlikely(bp->recovery_state != BNX2X_RECOVERY_DONE))
8047 bnx2x_nic_load(bp, LOAD_NORMAL); 8979 bnx2x_parity_recover(bp);
8980 else {
8981 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
8982 bnx2x_nic_load(bp, LOAD_NORMAL);
8983 }
8048 8984
8049reset_task_exit: 8985reset_task_exit:
8050 rtnl_unlock(); 8986 rtnl_unlock();
@@ -8264,7 +9200,7 @@ static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
8264 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]); 9200 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
8265 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB)) 9201 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
8266 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB)) 9202 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
8267 BNX2X_ERR("BAD MCP validity signature\n"); 9203 BNX2X_ERROR("BAD MCP validity signature\n");
8268 9204
8269 bp->common.hw_config = SHMEM_RD(bp, dev_info.shared_hw_config.config); 9205 bp->common.hw_config = SHMEM_RD(bp, dev_info.shared_hw_config.config);
8270 BNX2X_DEV_INFO("hw_config 0x%08x\n", bp->common.hw_config); 9206 BNX2X_DEV_INFO("hw_config 0x%08x\n", bp->common.hw_config);
@@ -8288,8 +9224,8 @@ static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
8288 if (val < BNX2X_BC_VER) { 9224 if (val < BNX2X_BC_VER) {
8289 /* for now only warn 9225 /* for now only warn
8290 * later we might need to enforce this */ 9226 * later we might need to enforce this */
8291 BNX2X_ERR("This driver needs bc_ver %X but found %X," 9227 BNX2X_ERROR("This driver needs bc_ver %X but found %X, "
8292 " please upgrade BC\n", BNX2X_BC_VER, val); 9228 "please upgrade BC\n", BNX2X_BC_VER, val);
8293 } 9229 }
8294 bp->link_params.feature_config_flags |= 9230 bp->link_params.feature_config_flags |=
8295 (val >= REQ_BC_VER_4_VRFY_OPT_MDL) ? 9231 (val >= REQ_BC_VER_4_VRFY_OPT_MDL) ?
@@ -8310,7 +9246,8 @@ static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
8310 val3 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[8]); 9246 val3 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[8]);
8311 val4 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[12]); 9247 val4 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[12]);
8312 9248
8313 pr_info("part number %X-%X-%X-%X\n", val, val2, val3, val4); 9249 dev_info(&bp->pdev->dev, "part number %X-%X-%X-%X\n",
9250 val, val2, val3, val4);
8314} 9251}
8315 9252
8316static void __devinit bnx2x_link_settings_supported(struct bnx2x *bp, 9253static void __devinit bnx2x_link_settings_supported(struct bnx2x *bp,
@@ -8588,11 +9525,11 @@ static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp)
8588 bp->port.advertising = (ADVERTISED_10baseT_Full | 9525 bp->port.advertising = (ADVERTISED_10baseT_Full |
8589 ADVERTISED_TP); 9526 ADVERTISED_TP);
8590 } else { 9527 } else {
8591 BNX2X_ERR("NVRAM config error. " 9528 BNX2X_ERROR("NVRAM config error. "
8592 "Invalid link_config 0x%x" 9529 "Invalid link_config 0x%x"
8593 " speed_cap_mask 0x%x\n", 9530 " speed_cap_mask 0x%x\n",
8594 bp->port.link_config, 9531 bp->port.link_config,
8595 bp->link_params.speed_cap_mask); 9532 bp->link_params.speed_cap_mask);
8596 return; 9533 return;
8597 } 9534 }
8598 break; 9535 break;
@@ -8604,11 +9541,11 @@ static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp)
8604 bp->port.advertising = (ADVERTISED_10baseT_Half | 9541 bp->port.advertising = (ADVERTISED_10baseT_Half |
8605 ADVERTISED_TP); 9542 ADVERTISED_TP);
8606 } else { 9543 } else {
8607 BNX2X_ERR("NVRAM config error. " 9544 BNX2X_ERROR("NVRAM config error. "
8608 "Invalid link_config 0x%x" 9545 "Invalid link_config 0x%x"
8609 " speed_cap_mask 0x%x\n", 9546 " speed_cap_mask 0x%x\n",
8610 bp->port.link_config, 9547 bp->port.link_config,
8611 bp->link_params.speed_cap_mask); 9548 bp->link_params.speed_cap_mask);
8612 return; 9549 return;
8613 } 9550 }
8614 break; 9551 break;
@@ -8619,11 +9556,11 @@ static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp)
8619 bp->port.advertising = (ADVERTISED_100baseT_Full | 9556 bp->port.advertising = (ADVERTISED_100baseT_Full |
8620 ADVERTISED_TP); 9557 ADVERTISED_TP);
8621 } else { 9558 } else {
8622 BNX2X_ERR("NVRAM config error. " 9559 BNX2X_ERROR("NVRAM config error. "
8623 "Invalid link_config 0x%x" 9560 "Invalid link_config 0x%x"
8624 " speed_cap_mask 0x%x\n", 9561 " speed_cap_mask 0x%x\n",
8625 bp->port.link_config, 9562 bp->port.link_config,
8626 bp->link_params.speed_cap_mask); 9563 bp->link_params.speed_cap_mask);
8627 return; 9564 return;
8628 } 9565 }
8629 break; 9566 break;
@@ -8635,11 +9572,11 @@ static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp)
8635 bp->port.advertising = (ADVERTISED_100baseT_Half | 9572 bp->port.advertising = (ADVERTISED_100baseT_Half |
8636 ADVERTISED_TP); 9573 ADVERTISED_TP);
8637 } else { 9574 } else {
8638 BNX2X_ERR("NVRAM config error. " 9575 BNX2X_ERROR("NVRAM config error. "
8639 "Invalid link_config 0x%x" 9576 "Invalid link_config 0x%x"
8640 " speed_cap_mask 0x%x\n", 9577 " speed_cap_mask 0x%x\n",
8641 bp->port.link_config, 9578 bp->port.link_config,
8642 bp->link_params.speed_cap_mask); 9579 bp->link_params.speed_cap_mask);
8643 return; 9580 return;
8644 } 9581 }
8645 break; 9582 break;
@@ -8650,11 +9587,11 @@ static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp)
8650 bp->port.advertising = (ADVERTISED_1000baseT_Full | 9587 bp->port.advertising = (ADVERTISED_1000baseT_Full |
8651 ADVERTISED_TP); 9588 ADVERTISED_TP);
8652 } else { 9589 } else {
8653 BNX2X_ERR("NVRAM config error. " 9590 BNX2X_ERROR("NVRAM config error. "
8654 "Invalid link_config 0x%x" 9591 "Invalid link_config 0x%x"
8655 " speed_cap_mask 0x%x\n", 9592 " speed_cap_mask 0x%x\n",
8656 bp->port.link_config, 9593 bp->port.link_config,
8657 bp->link_params.speed_cap_mask); 9594 bp->link_params.speed_cap_mask);
8658 return; 9595 return;
8659 } 9596 }
8660 break; 9597 break;
@@ -8665,11 +9602,11 @@ static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp)
8665 bp->port.advertising = (ADVERTISED_2500baseX_Full | 9602 bp->port.advertising = (ADVERTISED_2500baseX_Full |
8666 ADVERTISED_TP); 9603 ADVERTISED_TP);
8667 } else { 9604 } else {
8668 BNX2X_ERR("NVRAM config error. " 9605 BNX2X_ERROR("NVRAM config error. "
8669 "Invalid link_config 0x%x" 9606 "Invalid link_config 0x%x"
8670 " speed_cap_mask 0x%x\n", 9607 " speed_cap_mask 0x%x\n",
8671 bp->port.link_config, 9608 bp->port.link_config,
8672 bp->link_params.speed_cap_mask); 9609 bp->link_params.speed_cap_mask);
8673 return; 9610 return;
8674 } 9611 }
8675 break; 9612 break;
@@ -8682,19 +9619,19 @@ static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp)
8682 bp->port.advertising = (ADVERTISED_10000baseT_Full | 9619 bp->port.advertising = (ADVERTISED_10000baseT_Full |
8683 ADVERTISED_FIBRE); 9620 ADVERTISED_FIBRE);
8684 } else { 9621 } else {
8685 BNX2X_ERR("NVRAM config error. " 9622 BNX2X_ERROR("NVRAM config error. "
8686 "Invalid link_config 0x%x" 9623 "Invalid link_config 0x%x"
8687 " speed_cap_mask 0x%x\n", 9624 " speed_cap_mask 0x%x\n",
8688 bp->port.link_config, 9625 bp->port.link_config,
8689 bp->link_params.speed_cap_mask); 9626 bp->link_params.speed_cap_mask);
8690 return; 9627 return;
8691 } 9628 }
8692 break; 9629 break;
8693 9630
8694 default: 9631 default:
8695 BNX2X_ERR("NVRAM config error. " 9632 BNX2X_ERROR("NVRAM config error. "
8696 "BAD link speed link_config 0x%x\n", 9633 "BAD link speed link_config 0x%x\n",
8697 bp->port.link_config); 9634 bp->port.link_config);
8698 bp->link_params.req_line_speed = SPEED_AUTO_NEG; 9635 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
8699 bp->port.advertising = bp->port.supported; 9636 bp->port.advertising = bp->port.supported;
8700 break; 9637 break;
@@ -8823,7 +9760,7 @@ static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
8823 9760
8824 bp->e1hov = 0; 9761 bp->e1hov = 0;
8825 bp->e1hmf = 0; 9762 bp->e1hmf = 0;
8826 if (CHIP_IS_E1H(bp)) { 9763 if (CHIP_IS_E1H(bp) && !BP_NOMCP(bp)) {
8827 bp->mf_config = 9764 bp->mf_config =
8828 SHMEM_RD(bp, mf_cfg.func_mf_config[func].config); 9765 SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
8829 9766
@@ -8844,14 +9781,14 @@ static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
8844 "(0x%04x)\n", 9781 "(0x%04x)\n",
8845 func, bp->e1hov, bp->e1hov); 9782 func, bp->e1hov, bp->e1hov);
8846 } else { 9783 } else {
8847 BNX2X_ERR("!!! No valid E1HOV for func %d," 9784 BNX2X_ERROR("No valid E1HOV for func %d,"
8848 " aborting\n", func); 9785 " aborting\n", func);
8849 rc = -EPERM; 9786 rc = -EPERM;
8850 } 9787 }
8851 } else { 9788 } else {
8852 if (BP_E1HVN(bp)) { 9789 if (BP_E1HVN(bp)) {
8853 BNX2X_ERR("!!! VN %d in single function mode," 9790 BNX2X_ERROR("VN %d in single function mode,"
8854 " aborting\n", BP_E1HVN(bp)); 9791 " aborting\n", BP_E1HVN(bp));
8855 rc = -EPERM; 9792 rc = -EPERM;
8856 } 9793 }
8857 } 9794 }
@@ -8887,7 +9824,7 @@ static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
8887 9824
8888 if (BP_NOMCP(bp)) { 9825 if (BP_NOMCP(bp)) {
8889 /* only supposed to happen on emulation/FPGA */ 9826 /* only supposed to happen on emulation/FPGA */
8890 BNX2X_ERR("warning random MAC workaround active\n"); 9827 BNX2X_ERROR("warning: random MAC workaround active\n");
8891 random_ether_addr(bp->dev->dev_addr); 9828 random_ether_addr(bp->dev->dev_addr);
8892 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN); 9829 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
8893 } 9830 }
@@ -8895,6 +9832,70 @@ static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
8895 return rc; 9832 return rc;
8896} 9833}
8897 9834
9835static void __devinit bnx2x_read_fwinfo(struct bnx2x *bp)
9836{
9837 int cnt, i, block_end, rodi;
9838 char vpd_data[BNX2X_VPD_LEN+1];
9839 char str_id_reg[VENDOR_ID_LEN+1];
9840 char str_id_cap[VENDOR_ID_LEN+1];
9841 u8 len;
9842
9843 cnt = pci_read_vpd(bp->pdev, 0, BNX2X_VPD_LEN, vpd_data);
9844 memset(bp->fw_ver, 0, sizeof(bp->fw_ver));
9845
9846 if (cnt < BNX2X_VPD_LEN)
9847 goto out_not_found;
9848
9849 i = pci_vpd_find_tag(vpd_data, 0, BNX2X_VPD_LEN,
9850 PCI_VPD_LRDT_RO_DATA);
9851 if (i < 0)
9852 goto out_not_found;
9853
9854
9855 block_end = i + PCI_VPD_LRDT_TAG_SIZE +
9856 pci_vpd_lrdt_size(&vpd_data[i]);
9857
9858 i += PCI_VPD_LRDT_TAG_SIZE;
9859
9860 if (block_end > BNX2X_VPD_LEN)
9861 goto out_not_found;
9862
9863 rodi = pci_vpd_find_info_keyword(vpd_data, i, block_end,
9864 PCI_VPD_RO_KEYWORD_MFR_ID);
9865 if (rodi < 0)
9866 goto out_not_found;
9867
9868 len = pci_vpd_info_field_size(&vpd_data[rodi]);
9869
9870 if (len != VENDOR_ID_LEN)
9871 goto out_not_found;
9872
9873 rodi += PCI_VPD_INFO_FLD_HDR_SIZE;
9874
9875 /* vendor specific info */
9876 snprintf(str_id_reg, VENDOR_ID_LEN + 1, "%04x", PCI_VENDOR_ID_DELL);
9877 snprintf(str_id_cap, VENDOR_ID_LEN + 1, "%04X", PCI_VENDOR_ID_DELL);
9878 if (!strncmp(str_id_reg, &vpd_data[rodi], VENDOR_ID_LEN) ||
9879 !strncmp(str_id_cap, &vpd_data[rodi], VENDOR_ID_LEN)) {
9880
9881 rodi = pci_vpd_find_info_keyword(vpd_data, i, block_end,
9882 PCI_VPD_RO_KEYWORD_VENDOR0);
9883 if (rodi >= 0) {
9884 len = pci_vpd_info_field_size(&vpd_data[rodi]);
9885
9886 rodi += PCI_VPD_INFO_FLD_HDR_SIZE;
9887
9888 if (len < 32 && (len + rodi) <= BNX2X_VPD_LEN) {
9889 memcpy(bp->fw_ver, &vpd_data[rodi], len);
9890 bp->fw_ver[len] = ' ';
9891 }
9892 }
9893 return;
9894 }
9895out_not_found:
9896 return;
9897}
9898
8898static int __devinit bnx2x_init_bp(struct bnx2x *bp) 9899static int __devinit bnx2x_init_bp(struct bnx2x *bp)
8899{ 9900{
8900 int func = BP_FUNC(bp); 9901 int func = BP_FUNC(bp);
@@ -8912,29 +9913,34 @@ static int __devinit bnx2x_init_bp(struct bnx2x *bp)
8912#endif 9913#endif
8913 9914
8914 INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task); 9915 INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task);
8915 INIT_WORK(&bp->reset_task, bnx2x_reset_task); 9916 INIT_DELAYED_WORK(&bp->reset_task, bnx2x_reset_task);
8916 9917
8917 rc = bnx2x_get_hwinfo(bp); 9918 rc = bnx2x_get_hwinfo(bp);
8918 9919
9920 bnx2x_read_fwinfo(bp);
8919 /* need to reset chip if undi was active */ 9921 /* need to reset chip if undi was active */
8920 if (!BP_NOMCP(bp)) 9922 if (!BP_NOMCP(bp))
8921 bnx2x_undi_unload(bp); 9923 bnx2x_undi_unload(bp);
8922 9924
8923 if (CHIP_REV_IS_FPGA(bp)) 9925 if (CHIP_REV_IS_FPGA(bp))
8924 pr_err("FPGA detected\n"); 9926 dev_err(&bp->pdev->dev, "FPGA detected\n");
8925 9927
8926 if (BP_NOMCP(bp) && (func == 0)) 9928 if (BP_NOMCP(bp) && (func == 0))
8927 pr_err("MCP disabled, must load devices in order!\n"); 9929 dev_err(&bp->pdev->dev, "MCP disabled, "
9930 "must load devices in order!\n");
8928 9931
8929 /* Set multi queue mode */ 9932 /* Set multi queue mode */
8930 if ((multi_mode != ETH_RSS_MODE_DISABLED) && 9933 if ((multi_mode != ETH_RSS_MODE_DISABLED) &&
8931 ((int_mode == INT_MODE_INTx) || (int_mode == INT_MODE_MSI))) { 9934 ((int_mode == INT_MODE_INTx) || (int_mode == INT_MODE_MSI))) {
8932 pr_err("Multi disabled since int_mode requested is not MSI-X\n"); 9935 dev_err(&bp->pdev->dev, "Multi disabled since int_mode "
9936 "requested is not MSI-X\n");
8933 multi_mode = ETH_RSS_MODE_DISABLED; 9937 multi_mode = ETH_RSS_MODE_DISABLED;
8934 } 9938 }
8935 bp->multi_mode = multi_mode; 9939 bp->multi_mode = multi_mode;
8936 9940
8937 9941
9942 bp->dev->features |= NETIF_F_GRO;
9943
8938 /* Set TPA flags */ 9944 /* Set TPA flags */
8939 if (disable_tpa) { 9945 if (disable_tpa) {
8940 bp->flags &= ~TPA_ENABLE_FLAG; 9946 bp->flags &= ~TPA_ENABLE_FLAG;
@@ -9304,11 +10310,13 @@ static void bnx2x_get_drvinfo(struct net_device *dev,
9304 bnx2x_release_phy_lock(bp); 10310 bnx2x_release_phy_lock(bp);
9305 } 10311 }
9306 10312
9307 snprintf(info->fw_version, 32, "BC:%d.%d.%d%s%s", 10313 strncpy(info->fw_version, bp->fw_ver, 32);
10314 snprintf(info->fw_version + strlen(bp->fw_ver), 32 - strlen(bp->fw_ver),
10315 "bc %d.%d.%d%s%s",
9308 (bp->common.bc_ver & 0xff0000) >> 16, 10316 (bp->common.bc_ver & 0xff0000) >> 16,
9309 (bp->common.bc_ver & 0xff00) >> 8, 10317 (bp->common.bc_ver & 0xff00) >> 8,
9310 (bp->common.bc_ver & 0xff), 10318 (bp->common.bc_ver & 0xff),
9311 ((phy_fw_ver[0] != '\0') ? " PHY:" : ""), phy_fw_ver); 10319 ((phy_fw_ver[0] != '\0') ? " phy " : ""), phy_fw_ver);
9312 strcpy(info->bus_info, pci_name(bp->pdev)); 10320 strcpy(info->bus_info, pci_name(bp->pdev));
9313 info->n_stats = BNX2X_NUM_STATS; 10321 info->n_stats = BNX2X_NUM_STATS;
9314 info->testinfo_len = BNX2X_NUM_TESTS; 10322 info->testinfo_len = BNX2X_NUM_TESTS;
@@ -9842,19 +10850,18 @@ static int bnx2x_get_coalesce(struct net_device *dev,
9842 return 0; 10850 return 0;
9843} 10851}
9844 10852
9845#define BNX2X_MAX_COALES_TOUT (0xf0*12) /* Maximal coalescing timeout in us */
9846static int bnx2x_set_coalesce(struct net_device *dev, 10853static int bnx2x_set_coalesce(struct net_device *dev,
9847 struct ethtool_coalesce *coal) 10854 struct ethtool_coalesce *coal)
9848{ 10855{
9849 struct bnx2x *bp = netdev_priv(dev); 10856 struct bnx2x *bp = netdev_priv(dev);
9850 10857
9851 bp->rx_ticks = (u16) coal->rx_coalesce_usecs; 10858 bp->rx_ticks = (u16)coal->rx_coalesce_usecs;
9852 if (bp->rx_ticks > BNX2X_MAX_COALES_TOUT) 10859 if (bp->rx_ticks > BNX2X_MAX_COALESCE_TOUT)
9853 bp->rx_ticks = BNX2X_MAX_COALES_TOUT; 10860 bp->rx_ticks = BNX2X_MAX_COALESCE_TOUT;
9854 10861
9855 bp->tx_ticks = (u16) coal->tx_coalesce_usecs; 10862 bp->tx_ticks = (u16)coal->tx_coalesce_usecs;
9856 if (bp->tx_ticks > BNX2X_MAX_COALES_TOUT) 10863 if (bp->tx_ticks > BNX2X_MAX_COALESCE_TOUT)
9857 bp->tx_ticks = BNX2X_MAX_COALES_TOUT; 10864 bp->tx_ticks = BNX2X_MAX_COALESCE_TOUT;
9858 10865
9859 if (netif_running(dev)) 10866 if (netif_running(dev))
9860 bnx2x_update_coalesce(bp); 10867 bnx2x_update_coalesce(bp);
@@ -9885,6 +10892,11 @@ static int bnx2x_set_ringparam(struct net_device *dev,
9885 struct bnx2x *bp = netdev_priv(dev); 10892 struct bnx2x *bp = netdev_priv(dev);
9886 int rc = 0; 10893 int rc = 0;
9887 10894
10895 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
10896 printk(KERN_ERR "Handling parity error recovery. Try again later\n");
10897 return -EAGAIN;
10898 }
10899
9888 if ((ering->rx_pending > MAX_RX_AVAIL) || 10900 if ((ering->rx_pending > MAX_RX_AVAIL) ||
9889 (ering->tx_pending > MAX_TX_AVAIL) || 10901 (ering->tx_pending > MAX_TX_AVAIL) ||
9890 (ering->tx_pending <= MAX_SKB_FRAGS + 4)) 10902 (ering->tx_pending <= MAX_SKB_FRAGS + 4))
@@ -9970,6 +10982,11 @@ static int bnx2x_set_flags(struct net_device *dev, u32 data)
9970 int changed = 0; 10982 int changed = 0;
9971 int rc = 0; 10983 int rc = 0;
9972 10984
10985 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
10986 printk(KERN_ERR "Handling parity error recovery. Try again later\n");
10987 return -EAGAIN;
10988 }
10989
9973 /* TPA requires Rx CSUM offloading */ 10990 /* TPA requires Rx CSUM offloading */
9974 if ((data & ETH_FLAG_LRO) && bp->rx_csum) { 10991 if ((data & ETH_FLAG_LRO) && bp->rx_csum) {
9975 if (!disable_tpa) { 10992 if (!disable_tpa) {
@@ -9986,6 +11003,11 @@ static int bnx2x_set_flags(struct net_device *dev, u32 data)
9986 changed = 1; 11003 changed = 1;
9987 } 11004 }
9988 11005
11006 if (data & ETH_FLAG_RXHASH)
11007 dev->features |= NETIF_F_RXHASH;
11008 else
11009 dev->features &= ~NETIF_F_RXHASH;
11010
9989 if (changed && netif_running(dev)) { 11011 if (changed && netif_running(dev)) {
9990 bnx2x_nic_unload(bp, UNLOAD_NORMAL); 11012 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
9991 rc = bnx2x_nic_load(bp, LOAD_NORMAL); 11013 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
@@ -10006,6 +11028,11 @@ static int bnx2x_set_rx_csum(struct net_device *dev, u32 data)
10006 struct bnx2x *bp = netdev_priv(dev); 11028 struct bnx2x *bp = netdev_priv(dev);
10007 int rc = 0; 11029 int rc = 0;
10008 11030
11031 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
11032 printk(KERN_ERR "Handling parity error recovery. Try again later\n");
11033 return -EAGAIN;
11034 }
11035
10009 bp->rx_csum = data; 11036 bp->rx_csum = data;
10010 11037
10011 /* Disable TPA, when Rx CSUM is disabled. Otherwise all 11038 /* Disable TPA, when Rx CSUM is disabled. Otherwise all
@@ -10050,9 +11077,9 @@ static int bnx2x_test_registers(struct bnx2x *bp)
10050 u32 wr_val = 0; 11077 u32 wr_val = 0;
10051 int port = BP_PORT(bp); 11078 int port = BP_PORT(bp);
10052 static const struct { 11079 static const struct {
10053 u32 offset0; 11080 u32 offset0;
10054 u32 offset1; 11081 u32 offset1;
10055 u32 mask; 11082 u32 mask;
10056 } reg_tbl[] = { 11083 } reg_tbl[] = {
10057/* 0 */ { BRB1_REG_PAUSE_LOW_THRESHOLD_0, 4, 0x000003ff }, 11084/* 0 */ { BRB1_REG_PAUSE_LOW_THRESHOLD_0, 4, 0x000003ff },
10058 { DORQ_REG_DB_ADDR0, 4, 0xffffffff }, 11085 { DORQ_REG_DB_ADDR0, 4, 0xffffffff },
@@ -10119,15 +11146,19 @@ static int bnx2x_test_registers(struct bnx2x *bp)
10119 11146
10120 save_val = REG_RD(bp, offset); 11147 save_val = REG_RD(bp, offset);
10121 11148
10122 REG_WR(bp, offset, wr_val); 11149 REG_WR(bp, offset, (wr_val & mask));
10123 val = REG_RD(bp, offset); 11150 val = REG_RD(bp, offset);
10124 11151
10125 /* Restore the original register's value */ 11152 /* Restore the original register's value */
10126 REG_WR(bp, offset, save_val); 11153 REG_WR(bp, offset, save_val);
10127 11154
10128 /* verify that value is as expected value */ 11155 /* verify value is as expected */
10129 if ((val & mask) != (wr_val & mask)) 11156 if ((val & mask) != (wr_val & mask)) {
11157 DP(NETIF_MSG_PROBE,
11158 "offset 0x%x: val 0x%x != 0x%x mask 0x%x\n",
11159 offset, val, wr_val, mask);
10130 goto test_reg_exit; 11160 goto test_reg_exit;
11161 }
10131 } 11162 }
10132 } 11163 }
10133 11164
@@ -10267,8 +11298,8 @@ static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode, u8 link_up)
10267 11298
10268 bd_prod = TX_BD(fp_tx->tx_bd_prod); 11299 bd_prod = TX_BD(fp_tx->tx_bd_prod);
10269 tx_start_bd = &fp_tx->tx_desc_ring[bd_prod].start_bd; 11300 tx_start_bd = &fp_tx->tx_desc_ring[bd_prod].start_bd;
10270 mapping = pci_map_single(bp->pdev, skb->data, 11301 mapping = dma_map_single(&bp->pdev->dev, skb->data,
10271 skb_headlen(skb), PCI_DMA_TODEVICE); 11302 skb_headlen(skb), DMA_TO_DEVICE);
10272 tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping)); 11303 tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
10273 tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping)); 11304 tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
10274 tx_start_bd->nbd = cpu_to_le16(2); /* start + pbd */ 11305 tx_start_bd->nbd = cpu_to_le16(2); /* start + pbd */
@@ -10344,6 +11375,9 @@ static int bnx2x_test_loopback(struct bnx2x *bp, u8 link_up)
10344{ 11375{
10345 int rc = 0, res; 11376 int rc = 0, res;
10346 11377
11378 if (BP_NOMCP(bp))
11379 return rc;
11380
10347 if (!netif_running(bp->dev)) 11381 if (!netif_running(bp->dev))
10348 return BNX2X_LOOPBACK_FAILED; 11382 return BNX2X_LOOPBACK_FAILED;
10349 11383
@@ -10391,6 +11425,9 @@ static int bnx2x_test_nvram(struct bnx2x *bp)
10391 int i, rc; 11425 int i, rc;
10392 u32 magic, crc; 11426 u32 magic, crc;
10393 11427
11428 if (BP_NOMCP(bp))
11429 return 0;
11430
10394 rc = bnx2x_nvram_read(bp, 0, data, 4); 11431 rc = bnx2x_nvram_read(bp, 0, data, 4);
10395 if (rc) { 11432 if (rc) {
10396 DP(NETIF_MSG_PROBE, "magic value read (rc %d)\n", rc); 11433 DP(NETIF_MSG_PROBE, "magic value read (rc %d)\n", rc);
@@ -10468,6 +11505,12 @@ static void bnx2x_self_test(struct net_device *dev,
10468{ 11505{
10469 struct bnx2x *bp = netdev_priv(dev); 11506 struct bnx2x *bp = netdev_priv(dev);
10470 11507
11508 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
11509 printk(KERN_ERR "Handling parity error recovery. Try again later\n");
11510 etest->flags |= ETH_TEST_FL_FAILED;
11511 return;
11512 }
11513
10471 memset(buf, 0, sizeof(u64) * BNX2X_NUM_TESTS); 11514 memset(buf, 0, sizeof(u64) * BNX2X_NUM_TESTS);
10472 11515
10473 if (!netif_running(dev)) 11516 if (!netif_running(dev))
@@ -10556,7 +11599,11 @@ static const struct {
10556 11599
10557/* 10 */{ Q_STATS_OFFSET32(total_bytes_transmitted_hi), 8, "[%d]: tx_bytes" }, 11600/* 10 */{ Q_STATS_OFFSET32(total_bytes_transmitted_hi), 8, "[%d]: tx_bytes" },
10558 { Q_STATS_OFFSET32(total_unicast_packets_transmitted_hi), 11601 { Q_STATS_OFFSET32(total_unicast_packets_transmitted_hi),
10559 8, "[%d]: tx_packets" } 11602 8, "[%d]: tx_ucast_packets" },
11603 { Q_STATS_OFFSET32(total_multicast_packets_transmitted_hi),
11604 8, "[%d]: tx_mcast_packets" },
11605 { Q_STATS_OFFSET32(total_broadcast_packets_transmitted_hi),
11606 8, "[%d]: tx_bcast_packets" }
10560}; 11607};
10561 11608
10562static const struct { 11609static const struct {
@@ -10618,16 +11665,20 @@ static const struct {
10618 { STATS_OFFSET32(tx_stat_ifhcoutbadoctets_hi), 11665 { STATS_OFFSET32(tx_stat_ifhcoutbadoctets_hi),
10619 8, STATS_FLAGS_PORT, "tx_error_bytes" }, 11666 8, STATS_FLAGS_PORT, "tx_error_bytes" },
10620 { STATS_OFFSET32(total_unicast_packets_transmitted_hi), 11667 { STATS_OFFSET32(total_unicast_packets_transmitted_hi),
10621 8, STATS_FLAGS_BOTH, "tx_packets" }, 11668 8, STATS_FLAGS_BOTH, "tx_ucast_packets" },
11669 { STATS_OFFSET32(total_multicast_packets_transmitted_hi),
11670 8, STATS_FLAGS_BOTH, "tx_mcast_packets" },
11671 { STATS_OFFSET32(total_broadcast_packets_transmitted_hi),
11672 8, STATS_FLAGS_BOTH, "tx_bcast_packets" },
10622 { STATS_OFFSET32(tx_stat_dot3statsinternalmactransmiterrors_hi), 11673 { STATS_OFFSET32(tx_stat_dot3statsinternalmactransmiterrors_hi),
10623 8, STATS_FLAGS_PORT, "tx_mac_errors" }, 11674 8, STATS_FLAGS_PORT, "tx_mac_errors" },
10624 { STATS_OFFSET32(rx_stat_dot3statscarriersenseerrors_hi), 11675 { STATS_OFFSET32(rx_stat_dot3statscarriersenseerrors_hi),
10625 8, STATS_FLAGS_PORT, "tx_carrier_errors" }, 11676 8, STATS_FLAGS_PORT, "tx_carrier_errors" },
10626 { STATS_OFFSET32(tx_stat_dot3statssinglecollisionframes_hi), 11677/* 30 */{ STATS_OFFSET32(tx_stat_dot3statssinglecollisionframes_hi),
10627 8, STATS_FLAGS_PORT, "tx_single_collisions" }, 11678 8, STATS_FLAGS_PORT, "tx_single_collisions" },
10628 { STATS_OFFSET32(tx_stat_dot3statsmultiplecollisionframes_hi), 11679 { STATS_OFFSET32(tx_stat_dot3statsmultiplecollisionframes_hi),
10629 8, STATS_FLAGS_PORT, "tx_multi_collisions" }, 11680 8, STATS_FLAGS_PORT, "tx_multi_collisions" },
10630/* 30 */{ STATS_OFFSET32(tx_stat_dot3statsdeferredtransmissions_hi), 11681 { STATS_OFFSET32(tx_stat_dot3statsdeferredtransmissions_hi),
10631 8, STATS_FLAGS_PORT, "tx_deferred" }, 11682 8, STATS_FLAGS_PORT, "tx_deferred" },
10632 { STATS_OFFSET32(tx_stat_dot3statsexcessivecollisions_hi), 11683 { STATS_OFFSET32(tx_stat_dot3statsexcessivecollisions_hi),
10633 8, STATS_FLAGS_PORT, "tx_excess_collisions" }, 11684 8, STATS_FLAGS_PORT, "tx_excess_collisions" },
@@ -10643,11 +11694,11 @@ static const struct {
10643 8, STATS_FLAGS_PORT, "tx_128_to_255_byte_packets" }, 11694 8, STATS_FLAGS_PORT, "tx_128_to_255_byte_packets" },
10644 { STATS_OFFSET32(tx_stat_etherstatspkts256octetsto511octets_hi), 11695 { STATS_OFFSET32(tx_stat_etherstatspkts256octetsto511octets_hi),
10645 8, STATS_FLAGS_PORT, "tx_256_to_511_byte_packets" }, 11696 8, STATS_FLAGS_PORT, "tx_256_to_511_byte_packets" },
10646 { STATS_OFFSET32(tx_stat_etherstatspkts512octetsto1023octets_hi), 11697/* 40 */{ STATS_OFFSET32(tx_stat_etherstatspkts512octetsto1023octets_hi),
10647 8, STATS_FLAGS_PORT, "tx_512_to_1023_byte_packets" }, 11698 8, STATS_FLAGS_PORT, "tx_512_to_1023_byte_packets" },
10648 { STATS_OFFSET32(etherstatspkts1024octetsto1522octets_hi), 11699 { STATS_OFFSET32(etherstatspkts1024octetsto1522octets_hi),
10649 8, STATS_FLAGS_PORT, "tx_1024_to_1522_byte_packets" }, 11700 8, STATS_FLAGS_PORT, "tx_1024_to_1522_byte_packets" },
10650/* 40 */{ STATS_OFFSET32(etherstatspktsover1522octets_hi), 11701 { STATS_OFFSET32(etherstatspktsover1522octets_hi),
10651 8, STATS_FLAGS_PORT, "tx_1523_to_9022_byte_packets" }, 11702 8, STATS_FLAGS_PORT, "tx_1523_to_9022_byte_packets" },
10652 { STATS_OFFSET32(pause_frames_sent_hi), 11703 { STATS_OFFSET32(pause_frames_sent_hi),
10653 8, STATS_FLAGS_PORT, "tx_pause_frames" } 11704 8, STATS_FLAGS_PORT, "tx_pause_frames" }
@@ -10664,7 +11715,7 @@ static int bnx2x_get_sset_count(struct net_device *dev, int stringset)
10664 struct bnx2x *bp = netdev_priv(dev); 11715 struct bnx2x *bp = netdev_priv(dev);
10665 int i, num_stats; 11716 int i, num_stats;
10666 11717
10667 switch(stringset) { 11718 switch (stringset) {
10668 case ETH_SS_STATS: 11719 case ETH_SS_STATS:
10669 if (is_multi(bp)) { 11720 if (is_multi(bp)) {
10670 num_stats = BNX2X_NUM_Q_STATS * bp->num_queues; 11721 num_stats = BNX2X_NUM_Q_STATS * bp->num_queues;
@@ -10893,6 +11944,14 @@ static int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
10893 break; 11944 break;
10894 11945
10895 case PCI_D3hot: 11946 case PCI_D3hot:
11947 /* If there are other clients above don't
11948 shut down the power */
11949 if (atomic_read(&bp->pdev->enable_cnt) != 1)
11950 return 0;
11951 /* Don't shut down the power for emulation and FPGA */
11952 if (CHIP_REV_IS_SLOW(bp))
11953 return 0;
11954
10896 pmcsr &= ~PCI_PM_CTRL_STATE_MASK; 11955 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
10897 pmcsr |= 3; 11956 pmcsr |= 3;
10898 11957
@@ -11182,6 +12241,8 @@ static netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
11182 int i; 12241 int i;
11183 u8 hlen = 0; 12242 u8 hlen = 0;
11184 __le16 pkt_size = 0; 12243 __le16 pkt_size = 0;
12244 struct ethhdr *eth;
12245 u8 mac_type = UNICAST_ADDRESS;
11185 12246
11186#ifdef BNX2X_STOP_ON_ERROR 12247#ifdef BNX2X_STOP_ON_ERROR
11187 if (unlikely(bp->panic)) 12248 if (unlikely(bp->panic))
@@ -11205,6 +12266,16 @@ static netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
11205 skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr, 12266 skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
11206 ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type); 12267 ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type);
11207 12268
12269 eth = (struct ethhdr *)skb->data;
12270
12271 /* set flag according to packet type (UNICAST_ADDRESS is default)*/
12272 if (unlikely(is_multicast_ether_addr(eth->h_dest))) {
12273 if (is_broadcast_ether_addr(eth->h_dest))
12274 mac_type = BROADCAST_ADDRESS;
12275 else
12276 mac_type = MULTICAST_ADDRESS;
12277 }
12278
11208#if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3) 12279#if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
11209 /* First, check if we need to linearize the skb (due to FW 12280 /* First, check if we need to linearize the skb (due to FW
11210 restrictions). No need to check fragmentation if page size > 8K 12281 restrictions). No need to check fragmentation if page size > 8K
@@ -11238,8 +12309,8 @@ static netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
11238 tx_start_bd = &fp->tx_desc_ring[bd_prod].start_bd; 12309 tx_start_bd = &fp->tx_desc_ring[bd_prod].start_bd;
11239 12310
11240 tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD; 12311 tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
11241 tx_start_bd->general_data = (UNICAST_ADDRESS << 12312 tx_start_bd->general_data = (mac_type <<
11242 ETH_TX_START_BD_ETH_ADDR_TYPE_SHIFT); 12313 ETH_TX_START_BD_ETH_ADDR_TYPE_SHIFT);
11243 /* header nbd */ 12314 /* header nbd */
11244 tx_start_bd->general_data |= (1 << ETH_TX_START_BD_HDR_NBDS_SHIFT); 12315 tx_start_bd->general_data |= (1 << ETH_TX_START_BD_HDR_NBDS_SHIFT);
11245 12316
@@ -11314,8 +12385,8 @@ static netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
11314 } 12385 }
11315 } 12386 }
11316 12387
11317 mapping = pci_map_single(bp->pdev, skb->data, 12388 mapping = dma_map_single(&bp->pdev->dev, skb->data,
11318 skb_headlen(skb), PCI_DMA_TODEVICE); 12389 skb_headlen(skb), DMA_TO_DEVICE);
11319 12390
11320 tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping)); 12391 tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
11321 tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping)); 12392 tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
@@ -11372,8 +12443,9 @@ static netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
11372 if (total_pkt_bd == NULL) 12443 if (total_pkt_bd == NULL)
11373 total_pkt_bd = &fp->tx_desc_ring[bd_prod].reg_bd; 12444 total_pkt_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
11374 12445
11375 mapping = pci_map_page(bp->pdev, frag->page, frag->page_offset, 12446 mapping = dma_map_page(&bp->pdev->dev, frag->page,
11376 frag->size, PCI_DMA_TODEVICE); 12447 frag->page_offset,
12448 frag->size, DMA_TO_DEVICE);
11377 12449
11378 tx_data_bd->addr_hi = cpu_to_le32(U64_HI(mapping)); 12450 tx_data_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
11379 tx_data_bd->addr_lo = cpu_to_le32(U64_LO(mapping)); 12451 tx_data_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
@@ -11452,6 +12524,40 @@ static int bnx2x_open(struct net_device *dev)
11452 12524
11453 bnx2x_set_power_state(bp, PCI_D0); 12525 bnx2x_set_power_state(bp, PCI_D0);
11454 12526
12527 if (!bnx2x_reset_is_done(bp)) {
12528 do {
12529 /* Reset MCP mail box sequence if there is on going
12530 * recovery
12531 */
12532 bp->fw_seq = 0;
12533
12534 /* If it's the first function to load and reset done
12535 * is still not cleared it may mean that. We don't
12536 * check the attention state here because it may have
12537 * already been cleared by a "common" reset but we
12538 * shell proceed with "process kill" anyway.
12539 */
12540 if ((bnx2x_get_load_cnt(bp) == 0) &&
12541 bnx2x_trylock_hw_lock(bp,
12542 HW_LOCK_RESOURCE_RESERVED_08) &&
12543 (!bnx2x_leader_reset(bp))) {
12544 DP(NETIF_MSG_HW, "Recovered in open\n");
12545 break;
12546 }
12547
12548 bnx2x_set_power_state(bp, PCI_D3hot);
12549
12550 printk(KERN_ERR"%s: Recovery flow hasn't been properly"
12551 " completed yet. Try again later. If u still see this"
12552 " message after a few retries then power cycle is"
12553 " required.\n", bp->dev->name);
12554
12555 return -EAGAIN;
12556 } while (0);
12557 }
12558
12559 bp->recovery_state = BNX2X_RECOVERY_DONE;
12560
11455 return bnx2x_nic_load(bp, LOAD_OPEN); 12561 return bnx2x_nic_load(bp, LOAD_OPEN);
11456} 12562}
11457 12563
@@ -11462,9 +12568,7 @@ static int bnx2x_close(struct net_device *dev)
11462 12568
11463 /* Unload the driver, release IRQs */ 12569 /* Unload the driver, release IRQs */
11464 bnx2x_nic_unload(bp, UNLOAD_CLOSE); 12570 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
11465 if (atomic_read(&bp->pdev->enable_cnt) == 1) 12571 bnx2x_set_power_state(bp, PCI_D3hot);
11466 if (!CHIP_REV_IS_SLOW(bp))
11467 bnx2x_set_power_state(bp, PCI_D3hot);
11468 12572
11469 return 0; 12573 return 0;
11470} 12574}
@@ -11494,21 +12598,21 @@ static void bnx2x_set_rx_mode(struct net_device *dev)
11494 else { /* some multicasts */ 12598 else { /* some multicasts */
11495 if (CHIP_IS_E1(bp)) { 12599 if (CHIP_IS_E1(bp)) {
11496 int i, old, offset; 12600 int i, old, offset;
11497 struct dev_mc_list *mclist; 12601 struct netdev_hw_addr *ha;
11498 struct mac_configuration_cmd *config = 12602 struct mac_configuration_cmd *config =
11499 bnx2x_sp(bp, mcast_config); 12603 bnx2x_sp(bp, mcast_config);
11500 12604
11501 i = 0; 12605 i = 0;
11502 netdev_for_each_mc_addr(mclist, dev) { 12606 netdev_for_each_mc_addr(ha, dev) {
11503 config->config_table[i]. 12607 config->config_table[i].
11504 cam_entry.msb_mac_addr = 12608 cam_entry.msb_mac_addr =
11505 swab16(*(u16 *)&mclist->dmi_addr[0]); 12609 swab16(*(u16 *)&ha->addr[0]);
11506 config->config_table[i]. 12610 config->config_table[i].
11507 cam_entry.middle_mac_addr = 12611 cam_entry.middle_mac_addr =
11508 swab16(*(u16 *)&mclist->dmi_addr[2]); 12612 swab16(*(u16 *)&ha->addr[2]);
11509 config->config_table[i]. 12613 config->config_table[i].
11510 cam_entry.lsb_mac_addr = 12614 cam_entry.lsb_mac_addr =
11511 swab16(*(u16 *)&mclist->dmi_addr[4]); 12615 swab16(*(u16 *)&ha->addr[4]);
11512 config->config_table[i].cam_entry.flags = 12616 config->config_table[i].cam_entry.flags =
11513 cpu_to_le16(port); 12617 cpu_to_le16(port);
11514 config->config_table[i]. 12618 config->config_table[i].
@@ -11562,18 +12666,18 @@ static void bnx2x_set_rx_mode(struct net_device *dev)
11562 0); 12666 0);
11563 } else { /* E1H */ 12667 } else { /* E1H */
11564 /* Accept one or more multicasts */ 12668 /* Accept one or more multicasts */
11565 struct dev_mc_list *mclist; 12669 struct netdev_hw_addr *ha;
11566 u32 mc_filter[MC_HASH_SIZE]; 12670 u32 mc_filter[MC_HASH_SIZE];
11567 u32 crc, bit, regidx; 12671 u32 crc, bit, regidx;
11568 int i; 12672 int i;
11569 12673
11570 memset(mc_filter, 0, 4 * MC_HASH_SIZE); 12674 memset(mc_filter, 0, 4 * MC_HASH_SIZE);
11571 12675
11572 netdev_for_each_mc_addr(mclist, dev) { 12676 netdev_for_each_mc_addr(ha, dev) {
11573 DP(NETIF_MSG_IFUP, "Adding mcast MAC: %pM\n", 12677 DP(NETIF_MSG_IFUP, "Adding mcast MAC: %pM\n",
11574 mclist->dmi_addr); 12678 ha->addr);
11575 12679
11576 crc = crc32c_le(0, mclist->dmi_addr, ETH_ALEN); 12680 crc = crc32c_le(0, ha->addr, ETH_ALEN);
11577 bit = (crc >> 24) & 0xff; 12681 bit = (crc >> 24) & 0xff;
11578 regidx = bit >> 5; 12682 regidx = bit >> 5;
11579 bit &= 0x1f; 12683 bit &= 0x1f;
@@ -11690,6 +12794,11 @@ static int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
11690 struct bnx2x *bp = netdev_priv(dev); 12794 struct bnx2x *bp = netdev_priv(dev);
11691 int rc = 0; 12795 int rc = 0;
11692 12796
12797 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
12798 printk(KERN_ERR "Handling parity error recovery. Try again later\n");
12799 return -EAGAIN;
12800 }
12801
11693 if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) || 12802 if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) ||
11694 ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE)) 12803 ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE))
11695 return -EINVAL; 12804 return -EINVAL;
@@ -11717,7 +12826,7 @@ static void bnx2x_tx_timeout(struct net_device *dev)
11717 bnx2x_panic(); 12826 bnx2x_panic();
11718#endif 12827#endif
11719 /* This allows the netif to be shutdown gracefully before resetting */ 12828 /* This allows the netif to be shutdown gracefully before resetting */
11720 schedule_work(&bp->reset_task); 12829 schedule_delayed_work(&bp->reset_task, 0);
11721} 12830}
11722 12831
11723#ifdef BCM_VLAN 12832#ifdef BCM_VLAN
@@ -11789,18 +12898,21 @@ static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
11789 12898
11790 rc = pci_enable_device(pdev); 12899 rc = pci_enable_device(pdev);
11791 if (rc) { 12900 if (rc) {
11792 pr_err("Cannot enable PCI device, aborting\n"); 12901 dev_err(&bp->pdev->dev,
12902 "Cannot enable PCI device, aborting\n");
11793 goto err_out; 12903 goto err_out;
11794 } 12904 }
11795 12905
11796 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) { 12906 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
11797 pr_err("Cannot find PCI device base address, aborting\n"); 12907 dev_err(&bp->pdev->dev,
12908 "Cannot find PCI device base address, aborting\n");
11798 rc = -ENODEV; 12909 rc = -ENODEV;
11799 goto err_out_disable; 12910 goto err_out_disable;
11800 } 12911 }
11801 12912
11802 if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) { 12913 if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
11803 pr_err("Cannot find second PCI device base address, aborting\n"); 12914 dev_err(&bp->pdev->dev, "Cannot find second PCI device"
12915 " base address, aborting\n");
11804 rc = -ENODEV; 12916 rc = -ENODEV;
11805 goto err_out_disable; 12917 goto err_out_disable;
11806 } 12918 }
@@ -11808,7 +12920,8 @@ static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
11808 if (atomic_read(&pdev->enable_cnt) == 1) { 12920 if (atomic_read(&pdev->enable_cnt) == 1) {
11809 rc = pci_request_regions(pdev, DRV_MODULE_NAME); 12921 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
11810 if (rc) { 12922 if (rc) {
11811 pr_err("Cannot obtain PCI resources, aborting\n"); 12923 dev_err(&bp->pdev->dev,
12924 "Cannot obtain PCI resources, aborting\n");
11812 goto err_out_disable; 12925 goto err_out_disable;
11813 } 12926 }
11814 12927
@@ -11818,28 +12931,32 @@ static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
11818 12931
11819 bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM); 12932 bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
11820 if (bp->pm_cap == 0) { 12933 if (bp->pm_cap == 0) {
11821 pr_err("Cannot find power management capability, aborting\n"); 12934 dev_err(&bp->pdev->dev,
12935 "Cannot find power management capability, aborting\n");
11822 rc = -EIO; 12936 rc = -EIO;
11823 goto err_out_release; 12937 goto err_out_release;
11824 } 12938 }
11825 12939
11826 bp->pcie_cap = pci_find_capability(pdev, PCI_CAP_ID_EXP); 12940 bp->pcie_cap = pci_find_capability(pdev, PCI_CAP_ID_EXP);
11827 if (bp->pcie_cap == 0) { 12941 if (bp->pcie_cap == 0) {
11828 pr_err("Cannot find PCI Express capability, aborting\n"); 12942 dev_err(&bp->pdev->dev,
12943 "Cannot find PCI Express capability, aborting\n");
11829 rc = -EIO; 12944 rc = -EIO;
11830 goto err_out_release; 12945 goto err_out_release;
11831 } 12946 }
11832 12947
11833 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) == 0) { 12948 if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)) == 0) {
11834 bp->flags |= USING_DAC_FLAG; 12949 bp->flags |= USING_DAC_FLAG;
11835 if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)) != 0) { 12950 if (dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64)) != 0) {
11836 pr_err("pci_set_consistent_dma_mask failed, aborting\n"); 12951 dev_err(&bp->pdev->dev, "dma_set_coherent_mask"
12952 " failed, aborting\n");
11837 rc = -EIO; 12953 rc = -EIO;
11838 goto err_out_release; 12954 goto err_out_release;
11839 } 12955 }
11840 12956
11841 } else if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0) { 12957 } else if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)) != 0) {
11842 pr_err("System does not support DMA, aborting\n"); 12958 dev_err(&bp->pdev->dev,
12959 "System does not support DMA, aborting\n");
11843 rc = -EIO; 12960 rc = -EIO;
11844 goto err_out_release; 12961 goto err_out_release;
11845 } 12962 }
@@ -11852,7 +12969,8 @@ static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
11852 12969
11853 bp->regview = pci_ioremap_bar(pdev, 0); 12970 bp->regview = pci_ioremap_bar(pdev, 0);
11854 if (!bp->regview) { 12971 if (!bp->regview) {
11855 pr_err("Cannot map register space, aborting\n"); 12972 dev_err(&bp->pdev->dev,
12973 "Cannot map register space, aborting\n");
11856 rc = -ENOMEM; 12974 rc = -ENOMEM;
11857 goto err_out_release; 12975 goto err_out_release;
11858 } 12976 }
@@ -11861,7 +12979,8 @@ static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
11861 min_t(u64, BNX2X_DB_SIZE, 12979 min_t(u64, BNX2X_DB_SIZE,
11862 pci_resource_len(pdev, 2))); 12980 pci_resource_len(pdev, 2)));
11863 if (!bp->doorbells) { 12981 if (!bp->doorbells) {
11864 pr_err("Cannot map doorbell space, aborting\n"); 12982 dev_err(&bp->pdev->dev,
12983 "Cannot map doorbell space, aborting\n");
11865 rc = -ENOMEM; 12984 rc = -ENOMEM;
11866 goto err_out_unmap; 12985 goto err_out_unmap;
11867 } 12986 }
@@ -11876,6 +12995,9 @@ static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
11876 REG_WR(bp, PXP2_REG_PGL_ADDR_90_F0 + BP_PORT(bp)*16, 0); 12995 REG_WR(bp, PXP2_REG_PGL_ADDR_90_F0 + BP_PORT(bp)*16, 0);
11877 REG_WR(bp, PXP2_REG_PGL_ADDR_94_F0 + BP_PORT(bp)*16, 0); 12996 REG_WR(bp, PXP2_REG_PGL_ADDR_94_F0 + BP_PORT(bp)*16, 0);
11878 12997
12998 /* Reset the load counter */
12999 bnx2x_clear_load_cnt(bp);
13000
11879 dev->watchdog_timeo = TX_TIMEOUT; 13001 dev->watchdog_timeo = TX_TIMEOUT;
11880 13002
11881 dev->netdev_ops = &bnx2x_netdev_ops; 13003 dev->netdev_ops = &bnx2x_netdev_ops;
@@ -11963,7 +13085,8 @@ static int __devinit bnx2x_check_firmware(struct bnx2x *bp)
11963 offset = be32_to_cpu(sections[i].offset); 13085 offset = be32_to_cpu(sections[i].offset);
11964 len = be32_to_cpu(sections[i].len); 13086 len = be32_to_cpu(sections[i].len);
11965 if (offset + len > firmware->size) { 13087 if (offset + len > firmware->size) {
11966 pr_err("Section %d length is out of bounds\n", i); 13088 dev_err(&bp->pdev->dev,
13089 "Section %d length is out of bounds\n", i);
11967 return -EINVAL; 13090 return -EINVAL;
11968 } 13091 }
11969 } 13092 }
@@ -11975,7 +13098,8 @@ static int __devinit bnx2x_check_firmware(struct bnx2x *bp)
11975 13098
11976 for (i = 0; i < be32_to_cpu(fw_hdr->init_ops_offsets.len) / 2; i++) { 13099 for (i = 0; i < be32_to_cpu(fw_hdr->init_ops_offsets.len) / 2; i++) {
11977 if (be16_to_cpu(ops_offsets[i]) > num_ops) { 13100 if (be16_to_cpu(ops_offsets[i]) > num_ops) {
11978 pr_err("Section offset %d is out of bounds\n", i); 13101 dev_err(&bp->pdev->dev,
13102 "Section offset %d is out of bounds\n", i);
11979 return -EINVAL; 13103 return -EINVAL;
11980 } 13104 }
11981 } 13105 }
@@ -11987,7 +13111,8 @@ static int __devinit bnx2x_check_firmware(struct bnx2x *bp)
11987 (fw_ver[1] != BCM_5710_FW_MINOR_VERSION) || 13111 (fw_ver[1] != BCM_5710_FW_MINOR_VERSION) ||
11988 (fw_ver[2] != BCM_5710_FW_REVISION_VERSION) || 13112 (fw_ver[2] != BCM_5710_FW_REVISION_VERSION) ||
11989 (fw_ver[3] != BCM_5710_FW_ENGINEERING_VERSION)) { 13113 (fw_ver[3] != BCM_5710_FW_ENGINEERING_VERSION)) {
11990 pr_err("Bad FW version:%d.%d.%d.%d. Should be %d.%d.%d.%d\n", 13114 dev_err(&bp->pdev->dev,
13115 "Bad FW version:%d.%d.%d.%d. Should be %d.%d.%d.%d\n",
11991 fw_ver[0], fw_ver[1], fw_ver[2], 13116 fw_ver[0], fw_ver[1], fw_ver[2],
11992 fw_ver[3], BCM_5710_FW_MAJOR_VERSION, 13117 fw_ver[3], BCM_5710_FW_MAJOR_VERSION,
11993 BCM_5710_FW_MINOR_VERSION, 13118 BCM_5710_FW_MINOR_VERSION,
@@ -12022,8 +13147,8 @@ static inline void bnx2x_prep_ops(const u8 *_source, u8 *_target, u32 n)
12022 for (i = 0, j = 0; i < n/8; i++, j += 2) { 13147 for (i = 0, j = 0; i < n/8; i++, j += 2) {
12023 tmp = be32_to_cpu(source[j]); 13148 tmp = be32_to_cpu(source[j]);
12024 target[i].op = (tmp >> 24) & 0xff; 13149 target[i].op = (tmp >> 24) & 0xff;
12025 target[i].offset = tmp & 0xffffff; 13150 target[i].offset = tmp & 0xffffff;
12026 target[i].raw_data = be32_to_cpu(source[j+1]); 13151 target[i].raw_data = be32_to_cpu(source[j + 1]);
12027 } 13152 }
12028} 13153}
12029 13154
@@ -12057,20 +13182,24 @@ static int __devinit bnx2x_init_firmware(struct bnx2x *bp, struct device *dev)
12057 13182
12058 if (CHIP_IS_E1(bp)) 13183 if (CHIP_IS_E1(bp))
12059 fw_file_name = FW_FILE_NAME_E1; 13184 fw_file_name = FW_FILE_NAME_E1;
12060 else 13185 else if (CHIP_IS_E1H(bp))
12061 fw_file_name = FW_FILE_NAME_E1H; 13186 fw_file_name = FW_FILE_NAME_E1H;
13187 else {
13188 dev_err(dev, "Unsupported chip revision\n");
13189 return -EINVAL;
13190 }
12062 13191
12063 pr_info("Loading %s\n", fw_file_name); 13192 dev_info(dev, "Loading %s\n", fw_file_name);
12064 13193
12065 rc = request_firmware(&bp->firmware, fw_file_name, dev); 13194 rc = request_firmware(&bp->firmware, fw_file_name, dev);
12066 if (rc) { 13195 if (rc) {
12067 pr_err("Can't load firmware file %s\n", fw_file_name); 13196 dev_err(dev, "Can't load firmware file %s\n", fw_file_name);
12068 goto request_firmware_exit; 13197 goto request_firmware_exit;
12069 } 13198 }
12070 13199
12071 rc = bnx2x_check_firmware(bp); 13200 rc = bnx2x_check_firmware(bp);
12072 if (rc) { 13201 if (rc) {
12073 pr_err("Corrupt firmware file %s\n", fw_file_name); 13202 dev_err(dev, "Corrupt firmware file %s\n", fw_file_name);
12074 goto request_firmware_exit; 13203 goto request_firmware_exit;
12075 } 13204 }
12076 13205
@@ -12129,7 +13258,7 @@ static int __devinit bnx2x_init_one(struct pci_dev *pdev,
12129 /* dev zeroed in init_etherdev */ 13258 /* dev zeroed in init_etherdev */
12130 dev = alloc_etherdev_mq(sizeof(*bp), MAX_CONTEXT); 13259 dev = alloc_etherdev_mq(sizeof(*bp), MAX_CONTEXT);
12131 if (!dev) { 13260 if (!dev) {
12132 pr_err("Cannot allocate net device\n"); 13261 dev_err(&pdev->dev, "Cannot allocate net device\n");
12133 return -ENOMEM; 13262 return -ENOMEM;
12134 } 13263 }
12135 13264
@@ -12151,7 +13280,7 @@ static int __devinit bnx2x_init_one(struct pci_dev *pdev,
12151 /* Set init arrays */ 13280 /* Set init arrays */
12152 rc = bnx2x_init_firmware(bp, &pdev->dev); 13281 rc = bnx2x_init_firmware(bp, &pdev->dev);
12153 if (rc) { 13282 if (rc) {
12154 pr_err("Error loading firmware\n"); 13283 dev_err(&pdev->dev, "Error loading firmware\n");
12155 goto init_one_exit; 13284 goto init_one_exit;
12156 } 13285 }
12157 13286
@@ -12162,11 +13291,12 @@ static int __devinit bnx2x_init_one(struct pci_dev *pdev,
12162 } 13291 }
12163 13292
12164 bnx2x_get_pcie_width_speed(bp, &pcie_width, &pcie_speed); 13293 bnx2x_get_pcie_width_speed(bp, &pcie_width, &pcie_speed);
12165 netdev_info(dev, "%s (%c%d) PCI-E x%d %s found at mem %lx, IRQ %d, node addr %pM\n", 13294 netdev_info(dev, "%s (%c%d) PCI-E x%d %s found at mem %lx,"
12166 board_info[ent->driver_data].name, 13295 " IRQ %d, ", board_info[ent->driver_data].name,
12167 (CHIP_REV(bp) >> 12) + 'A', (CHIP_METAL(bp) >> 4), 13296 (CHIP_REV(bp) >> 12) + 'A', (CHIP_METAL(bp) >> 4),
12168 pcie_width, (pcie_speed == 2) ? "5GHz (Gen2)" : "2.5GHz", 13297 pcie_width, (pcie_speed == 2) ? "5GHz (Gen2)" : "2.5GHz",
12169 dev->base_addr, bp->pdev->irq, dev->dev_addr); 13298 dev->base_addr, bp->pdev->irq);
13299 pr_cont("node addr %pM\n", dev->dev_addr);
12170 13300
12171 return 0; 13301 return 0;
12172 13302
@@ -12194,13 +13324,16 @@ static void __devexit bnx2x_remove_one(struct pci_dev *pdev)
12194 struct bnx2x *bp; 13324 struct bnx2x *bp;
12195 13325
12196 if (!dev) { 13326 if (!dev) {
12197 pr_err("BAD net device from bnx2x_init_one\n"); 13327 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
12198 return; 13328 return;
12199 } 13329 }
12200 bp = netdev_priv(dev); 13330 bp = netdev_priv(dev);
12201 13331
12202 unregister_netdev(dev); 13332 unregister_netdev(dev);
12203 13333
13334 /* Make sure RESET task is not scheduled before continuing */
13335 cancel_delayed_work_sync(&bp->reset_task);
13336
12204 kfree(bp->init_ops_offsets); 13337 kfree(bp->init_ops_offsets);
12205 kfree(bp->init_ops); 13338 kfree(bp->init_ops);
12206 kfree(bp->init_data); 13339 kfree(bp->init_data);
@@ -12227,7 +13360,7 @@ static int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
12227 struct bnx2x *bp; 13360 struct bnx2x *bp;
12228 13361
12229 if (!dev) { 13362 if (!dev) {
12230 pr_err("BAD net device from bnx2x_init_one\n"); 13363 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
12231 return -ENODEV; 13364 return -ENODEV;
12232 } 13365 }
12233 bp = netdev_priv(dev); 13366 bp = netdev_priv(dev);
@@ -12259,11 +13392,16 @@ static int bnx2x_resume(struct pci_dev *pdev)
12259 int rc; 13392 int rc;
12260 13393
12261 if (!dev) { 13394 if (!dev) {
12262 pr_err("BAD net device from bnx2x_init_one\n"); 13395 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
12263 return -ENODEV; 13396 return -ENODEV;
12264 } 13397 }
12265 bp = netdev_priv(dev); 13398 bp = netdev_priv(dev);
12266 13399
13400 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
13401 printk(KERN_ERR "Handling parity error recovery. Try again later\n");
13402 return -EAGAIN;
13403 }
13404
12267 rtnl_lock(); 13405 rtnl_lock();
12268 13406
12269 pci_restore_state(pdev); 13407 pci_restore_state(pdev);
@@ -12292,6 +13430,7 @@ static int bnx2x_eeh_nic_unload(struct bnx2x *bp)
12292 bp->rx_mode = BNX2X_RX_MODE_NONE; 13430 bp->rx_mode = BNX2X_RX_MODE_NONE;
12293 13431
12294 bnx2x_netif_stop(bp, 0); 13432 bnx2x_netif_stop(bp, 0);
13433 netif_carrier_off(bp->dev);
12295 13434
12296 del_timer_sync(&bp->timer); 13435 del_timer_sync(&bp->timer);
12297 bp->stats_state = STATS_STATE_DISABLED; 13436 bp->stats_state = STATS_STATE_DISABLED;
@@ -12318,8 +13457,6 @@ static int bnx2x_eeh_nic_unload(struct bnx2x *bp)
12318 13457
12319 bp->state = BNX2X_STATE_CLOSED; 13458 bp->state = BNX2X_STATE_CLOSED;
12320 13459
12321 netif_carrier_off(bp->dev);
12322
12323 return 0; 13460 return 0;
12324} 13461}
12325 13462
@@ -12430,6 +13567,11 @@ static void bnx2x_io_resume(struct pci_dev *pdev)
12430 struct net_device *dev = pci_get_drvdata(pdev); 13567 struct net_device *dev = pci_get_drvdata(pdev);
12431 struct bnx2x *bp = netdev_priv(dev); 13568 struct bnx2x *bp = netdev_priv(dev);
12432 13569
13570 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
13571 printk(KERN_ERR "Handling parity error recovery. Try again later\n");
13572 return;
13573 }
13574
12433 rtnl_lock(); 13575 rtnl_lock();
12434 13576
12435 bnx2x_eeh_recover(bp); 13577 bnx2x_eeh_recover(bp);