aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/bnx2x_main.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/bnx2x_main.c')
-rw-r--r--drivers/net/bnx2x_main.c1846
1 files changed, 1491 insertions, 355 deletions
diff --git a/drivers/net/bnx2x_main.c b/drivers/net/bnx2x_main.c
index 6c042a72d6cc..0c6dba24e37e 100644
--- a/drivers/net/bnx2x_main.c
+++ b/drivers/net/bnx2x_main.c
@@ -57,8 +57,8 @@
57#include "bnx2x_init_ops.h" 57#include "bnx2x_init_ops.h"
58#include "bnx2x_dump.h" 58#include "bnx2x_dump.h"
59 59
60#define DRV_MODULE_VERSION "1.52.1-7" 60#define DRV_MODULE_VERSION "1.52.53-1"
61#define DRV_MODULE_RELDATE "2010/02/28" 61#define DRV_MODULE_RELDATE "2010/18/04"
62#define BNX2X_BC_VER 0x040200 62#define BNX2X_BC_VER 0x040200
63 63
64#include <linux/firmware.h> 64#include <linux/firmware.h>
@@ -102,7 +102,8 @@ MODULE_PARM_DESC(disable_tpa, " Disable the TPA (LRO) feature");
102 102
103static int int_mode; 103static int int_mode;
104module_param(int_mode, int, 0); 104module_param(int_mode, int, 0);
105MODULE_PARM_DESC(int_mode, " Force interrupt mode (1 INT#x; 2 MSI)"); 105MODULE_PARM_DESC(int_mode, " Force interrupt mode other then MSI-X "
106 "(1 INT#x; 2 MSI)");
106 107
107static int dropless_fc; 108static int dropless_fc;
108module_param(dropless_fc, int, 0); 109module_param(dropless_fc, int, 0);
@@ -352,13 +353,14 @@ void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32)
352void bnx2x_write_dmae_phys_len(struct bnx2x *bp, dma_addr_t phys_addr, 353void bnx2x_write_dmae_phys_len(struct bnx2x *bp, dma_addr_t phys_addr,
353 u32 addr, u32 len) 354 u32 addr, u32 len)
354{ 355{
356 int dmae_wr_max = DMAE_LEN32_WR_MAX(bp);
355 int offset = 0; 357 int offset = 0;
356 358
357 while (len > DMAE_LEN32_WR_MAX) { 359 while (len > dmae_wr_max) {
358 bnx2x_write_dmae(bp, phys_addr + offset, 360 bnx2x_write_dmae(bp, phys_addr + offset,
359 addr + offset, DMAE_LEN32_WR_MAX); 361 addr + offset, dmae_wr_max);
360 offset += DMAE_LEN32_WR_MAX * 4; 362 offset += dmae_wr_max * 4;
361 len -= DMAE_LEN32_WR_MAX; 363 len -= dmae_wr_max;
362 } 364 }
363 365
364 bnx2x_write_dmae(bp, phys_addr + offset, addr + offset, len); 366 bnx2x_write_dmae(bp, phys_addr + offset, addr + offset, len);
@@ -508,26 +510,31 @@ static int bnx2x_mc_assert(struct bnx2x *bp)
508 510
509static void bnx2x_fw_dump(struct bnx2x *bp) 511static void bnx2x_fw_dump(struct bnx2x *bp)
510{ 512{
513 u32 addr;
511 u32 mark, offset; 514 u32 mark, offset;
512 __be32 data[9]; 515 __be32 data[9];
513 int word; 516 int word;
514 517
515 mark = REG_RD(bp, MCP_REG_MCPR_SCRATCH + 0xf104); 518 if (BP_NOMCP(bp)) {
516 mark = ((mark + 0x3) & ~0x3); 519 BNX2X_ERR("NO MCP - can not dump\n");
520 return;
521 }
522
523 addr = bp->common.shmem_base - 0x0800 + 4;
524 mark = REG_RD(bp, addr);
525 mark = MCP_REG_MCPR_SCRATCH + ((mark + 0x3) & ~0x3) - 0x08000000;
517 pr_err("begin fw dump (mark 0x%x)\n", mark); 526 pr_err("begin fw dump (mark 0x%x)\n", mark);
518 527
519 pr_err(""); 528 pr_err("");
520 for (offset = mark - 0x08000000; offset <= 0xF900; offset += 0x8*4) { 529 for (offset = mark; offset <= bp->common.shmem_base; offset += 0x8*4) {
521 for (word = 0; word < 8; word++) 530 for (word = 0; word < 8; word++)
522 data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH + 531 data[word] = htonl(REG_RD(bp, offset + 4*word));
523 offset + 4*word));
524 data[8] = 0x0; 532 data[8] = 0x0;
525 pr_cont("%s", (char *)data); 533 pr_cont("%s", (char *)data);
526 } 534 }
527 for (offset = 0xF108; offset <= mark - 0x08000000; offset += 0x8*4) { 535 for (offset = addr + 4; offset <= mark; offset += 0x8*4) {
528 for (word = 0; word < 8; word++) 536 for (word = 0; word < 8; word++)
529 data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH + 537 data[word] = htonl(REG_RD(bp, offset + 4*word));
530 offset + 4*word));
531 data[8] = 0x0; 538 data[8] = 0x0;
532 pr_cont("%s", (char *)data); 539 pr_cont("%s", (char *)data);
533 } 540 }
@@ -546,9 +553,9 @@ static void bnx2x_panic_dump(struct bnx2x *bp)
546 553
547 /* Indices */ 554 /* Indices */
548 /* Common */ 555 /* Common */
549 BNX2X_ERR("def_c_idx(%u) def_u_idx(%u) def_x_idx(%u)" 556 BNX2X_ERR("def_c_idx(0x%x) def_u_idx(0x%x) def_x_idx(0x%x)"
550 " def_t_idx(%u) def_att_idx(%u) attn_state(%u)" 557 " def_t_idx(0x%x) def_att_idx(0x%x) attn_state(0x%x)"
551 " spq_prod_idx(%u)\n", 558 " spq_prod_idx(0x%x)\n",
552 bp->def_c_idx, bp->def_u_idx, bp->def_x_idx, bp->def_t_idx, 559 bp->def_c_idx, bp->def_u_idx, bp->def_x_idx, bp->def_t_idx,
553 bp->def_att_idx, bp->attn_state, bp->spq_prod_idx); 560 bp->def_att_idx, bp->attn_state, bp->spq_prod_idx);
554 561
@@ -556,14 +563,14 @@ static void bnx2x_panic_dump(struct bnx2x *bp)
556 for_each_queue(bp, i) { 563 for_each_queue(bp, i) {
557 struct bnx2x_fastpath *fp = &bp->fp[i]; 564 struct bnx2x_fastpath *fp = &bp->fp[i];
558 565
559 BNX2X_ERR("fp%d: rx_bd_prod(%x) rx_bd_cons(%x)" 566 BNX2X_ERR("fp%d: rx_bd_prod(0x%x) rx_bd_cons(0x%x)"
560 " *rx_bd_cons_sb(%x) rx_comp_prod(%x)" 567 " *rx_bd_cons_sb(0x%x) rx_comp_prod(0x%x)"
561 " rx_comp_cons(%x) *rx_cons_sb(%x)\n", 568 " rx_comp_cons(0x%x) *rx_cons_sb(0x%x)\n",
562 i, fp->rx_bd_prod, fp->rx_bd_cons, 569 i, fp->rx_bd_prod, fp->rx_bd_cons,
563 le16_to_cpu(*fp->rx_bd_cons_sb), fp->rx_comp_prod, 570 le16_to_cpu(*fp->rx_bd_cons_sb), fp->rx_comp_prod,
564 fp->rx_comp_cons, le16_to_cpu(*fp->rx_cons_sb)); 571 fp->rx_comp_cons, le16_to_cpu(*fp->rx_cons_sb));
565 BNX2X_ERR(" rx_sge_prod(%x) last_max_sge(%x)" 572 BNX2X_ERR(" rx_sge_prod(0x%x) last_max_sge(0x%x)"
566 " fp_u_idx(%x) *sb_u_idx(%x)\n", 573 " fp_u_idx(0x%x) *sb_u_idx(0x%x)\n",
567 fp->rx_sge_prod, fp->last_max_sge, 574 fp->rx_sge_prod, fp->last_max_sge,
568 le16_to_cpu(fp->fp_u_idx), 575 le16_to_cpu(fp->fp_u_idx),
569 fp->status_blk->u_status_block.status_block_index); 576 fp->status_blk->u_status_block.status_block_index);
@@ -573,12 +580,13 @@ static void bnx2x_panic_dump(struct bnx2x *bp)
573 for_each_queue(bp, i) { 580 for_each_queue(bp, i) {
574 struct bnx2x_fastpath *fp = &bp->fp[i]; 581 struct bnx2x_fastpath *fp = &bp->fp[i];
575 582
576 BNX2X_ERR("fp%d: tx_pkt_prod(%x) tx_pkt_cons(%x)" 583 BNX2X_ERR("fp%d: tx_pkt_prod(0x%x) tx_pkt_cons(0x%x)"
577 " tx_bd_prod(%x) tx_bd_cons(%x) *tx_cons_sb(%x)\n", 584 " tx_bd_prod(0x%x) tx_bd_cons(0x%x)"
585 " *tx_cons_sb(0x%x)\n",
578 i, fp->tx_pkt_prod, fp->tx_pkt_cons, fp->tx_bd_prod, 586 i, fp->tx_pkt_prod, fp->tx_pkt_cons, fp->tx_bd_prod,
579 fp->tx_bd_cons, le16_to_cpu(*fp->tx_cons_sb)); 587 fp->tx_bd_cons, le16_to_cpu(*fp->tx_cons_sb));
580 BNX2X_ERR(" fp_c_idx(%x) *sb_c_idx(%x)" 588 BNX2X_ERR(" fp_c_idx(0x%x) *sb_c_idx(0x%x)"
581 " tx_db_prod(%x)\n", le16_to_cpu(fp->fp_c_idx), 589 " tx_db_prod(0x%x)\n", le16_to_cpu(fp->fp_c_idx),
582 fp->status_blk->c_status_block.status_block_index, 590 fp->status_blk->c_status_block.status_block_index,
583 fp->tx_db.data.prod); 591 fp->tx_db.data.prod);
584 } 592 }
@@ -764,6 +772,40 @@ static void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw)
764 * General service functions 772 * General service functions
765 */ 773 */
766 774
775/* Return true if succeeded to acquire the lock */
776static bool bnx2x_trylock_hw_lock(struct bnx2x *bp, u32 resource)
777{
778 u32 lock_status;
779 u32 resource_bit = (1 << resource);
780 int func = BP_FUNC(bp);
781 u32 hw_lock_control_reg;
782
783 DP(NETIF_MSG_HW, "Trying to take a lock on resource %d\n", resource);
784
785 /* Validating that the resource is within range */
786 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
787 DP(NETIF_MSG_HW,
788 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
789 resource, HW_LOCK_MAX_RESOURCE_VALUE);
790 return -EINVAL;
791 }
792
793 if (func <= 5)
794 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
795 else
796 hw_lock_control_reg =
797 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
798
799 /* Try to acquire the lock */
800 REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
801 lock_status = REG_RD(bp, hw_lock_control_reg);
802 if (lock_status & resource_bit)
803 return true;
804
805 DP(NETIF_MSG_HW, "Failed to get a lock on resource %d\n", resource);
806 return false;
807}
808
767static inline void bnx2x_ack_sb(struct bnx2x *bp, u8 sb_id, 809static inline void bnx2x_ack_sb(struct bnx2x *bp, u8 sb_id,
768 u8 storm, u16 index, u8 op, u8 update) 810 u8 storm, u16 index, u8 op, u8 update)
769{ 811{
@@ -842,7 +884,7 @@ static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fastpath *fp,
842 /* unmap first bd */ 884 /* unmap first bd */
843 DP(BNX2X_MSG_OFF, "free bd_idx %d\n", bd_idx); 885 DP(BNX2X_MSG_OFF, "free bd_idx %d\n", bd_idx);
844 tx_start_bd = &fp->tx_desc_ring[bd_idx].start_bd; 886 tx_start_bd = &fp->tx_desc_ring[bd_idx].start_bd;
845 pci_unmap_single(bp->pdev, BD_UNMAP_ADDR(tx_start_bd), 887 dma_unmap_single(&bp->pdev->dev, BD_UNMAP_ADDR(tx_start_bd),
846 BD_UNMAP_LEN(tx_start_bd), PCI_DMA_TODEVICE); 888 BD_UNMAP_LEN(tx_start_bd), PCI_DMA_TODEVICE);
847 889
848 nbd = le16_to_cpu(tx_start_bd->nbd) - 1; 890 nbd = le16_to_cpu(tx_start_bd->nbd) - 1;
@@ -872,8 +914,8 @@ static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fastpath *fp,
872 914
873 DP(BNX2X_MSG_OFF, "free frag bd_idx %d\n", bd_idx); 915 DP(BNX2X_MSG_OFF, "free frag bd_idx %d\n", bd_idx);
874 tx_data_bd = &fp->tx_desc_ring[bd_idx].reg_bd; 916 tx_data_bd = &fp->tx_desc_ring[bd_idx].reg_bd;
875 pci_unmap_page(bp->pdev, BD_UNMAP_ADDR(tx_data_bd), 917 dma_unmap_page(&bp->pdev->dev, BD_UNMAP_ADDR(tx_data_bd),
876 BD_UNMAP_LEN(tx_data_bd), PCI_DMA_TODEVICE); 918 BD_UNMAP_LEN(tx_data_bd), DMA_TO_DEVICE);
877 if (--nbd) 919 if (--nbd)
878 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx)); 920 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
879 } 921 }
@@ -1023,7 +1065,8 @@ static void bnx2x_sp_event(struct bnx2x_fastpath *fp,
1023 1065
1024 default: 1066 default:
1025 BNX2X_ERR("unexpected MC reply (%d) " 1067 BNX2X_ERR("unexpected MC reply (%d) "
1026 "fp->state is %x\n", command, fp->state); 1068 "fp[%d] state is %x\n",
1069 command, fp->index, fp->state);
1027 break; 1070 break;
1028 } 1071 }
1029 mb(); /* force bnx2x_wait_ramrod() to see the change */ 1072 mb(); /* force bnx2x_wait_ramrod() to see the change */
@@ -1086,7 +1129,7 @@ static inline void bnx2x_free_rx_sge(struct bnx2x *bp,
1086 if (!page) 1129 if (!page)
1087 return; 1130 return;
1088 1131
1089 pci_unmap_page(bp->pdev, pci_unmap_addr(sw_buf, mapping), 1132 dma_unmap_page(&bp->pdev->dev, dma_unmap_addr(sw_buf, mapping),
1090 SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE); 1133 SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
1091 __free_pages(page, PAGES_PER_SGE_SHIFT); 1134 __free_pages(page, PAGES_PER_SGE_SHIFT);
1092 1135
@@ -1115,15 +1158,15 @@ static inline int bnx2x_alloc_rx_sge(struct bnx2x *bp,
1115 if (unlikely(page == NULL)) 1158 if (unlikely(page == NULL))
1116 return -ENOMEM; 1159 return -ENOMEM;
1117 1160
1118 mapping = pci_map_page(bp->pdev, page, 0, SGE_PAGE_SIZE*PAGES_PER_SGE, 1161 mapping = dma_map_page(&bp->pdev->dev, page, 0,
1119 PCI_DMA_FROMDEVICE); 1162 SGE_PAGE_SIZE*PAGES_PER_SGE, DMA_FROM_DEVICE);
1120 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) { 1163 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
1121 __free_pages(page, PAGES_PER_SGE_SHIFT); 1164 __free_pages(page, PAGES_PER_SGE_SHIFT);
1122 return -ENOMEM; 1165 return -ENOMEM;
1123 } 1166 }
1124 1167
1125 sw_buf->page = page; 1168 sw_buf->page = page;
1126 pci_unmap_addr_set(sw_buf, mapping, mapping); 1169 dma_unmap_addr_set(sw_buf, mapping, mapping);
1127 1170
1128 sge->addr_hi = cpu_to_le32(U64_HI(mapping)); 1171 sge->addr_hi = cpu_to_le32(U64_HI(mapping));
1129 sge->addr_lo = cpu_to_le32(U64_LO(mapping)); 1172 sge->addr_lo = cpu_to_le32(U64_LO(mapping));
@@ -1143,15 +1186,15 @@ static inline int bnx2x_alloc_rx_skb(struct bnx2x *bp,
1143 if (unlikely(skb == NULL)) 1186 if (unlikely(skb == NULL))
1144 return -ENOMEM; 1187 return -ENOMEM;
1145 1188
1146 mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_size, 1189 mapping = dma_map_single(&bp->pdev->dev, skb->data, bp->rx_buf_size,
1147 PCI_DMA_FROMDEVICE); 1190 DMA_FROM_DEVICE);
1148 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) { 1191 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
1149 dev_kfree_skb(skb); 1192 dev_kfree_skb(skb);
1150 return -ENOMEM; 1193 return -ENOMEM;
1151 } 1194 }
1152 1195
1153 rx_buf->skb = skb; 1196 rx_buf->skb = skb;
1154 pci_unmap_addr_set(rx_buf, mapping, mapping); 1197 dma_unmap_addr_set(rx_buf, mapping, mapping);
1155 1198
1156 rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping)); 1199 rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1157 rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping)); 1200 rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
@@ -1173,13 +1216,13 @@ static void bnx2x_reuse_rx_skb(struct bnx2x_fastpath *fp,
1173 struct eth_rx_bd *cons_bd = &fp->rx_desc_ring[cons]; 1216 struct eth_rx_bd *cons_bd = &fp->rx_desc_ring[cons];
1174 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod]; 1217 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1175 1218
1176 pci_dma_sync_single_for_device(bp->pdev, 1219 dma_sync_single_for_device(&bp->pdev->dev,
1177 pci_unmap_addr(cons_rx_buf, mapping), 1220 dma_unmap_addr(cons_rx_buf, mapping),
1178 RX_COPY_THRESH, PCI_DMA_FROMDEVICE); 1221 RX_COPY_THRESH, DMA_FROM_DEVICE);
1179 1222
1180 prod_rx_buf->skb = cons_rx_buf->skb; 1223 prod_rx_buf->skb = cons_rx_buf->skb;
1181 pci_unmap_addr_set(prod_rx_buf, mapping, 1224 dma_unmap_addr_set(prod_rx_buf, mapping,
1182 pci_unmap_addr(cons_rx_buf, mapping)); 1225 dma_unmap_addr(cons_rx_buf, mapping));
1183 *prod_bd = *cons_bd; 1226 *prod_bd = *cons_bd;
1184} 1227}
1185 1228
@@ -1283,9 +1326,9 @@ static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
1283 1326
1284 /* move empty skb from pool to prod and map it */ 1327 /* move empty skb from pool to prod and map it */
1285 prod_rx_buf->skb = fp->tpa_pool[queue].skb; 1328 prod_rx_buf->skb = fp->tpa_pool[queue].skb;
1286 mapping = pci_map_single(bp->pdev, fp->tpa_pool[queue].skb->data, 1329 mapping = dma_map_single(&bp->pdev->dev, fp->tpa_pool[queue].skb->data,
1287 bp->rx_buf_size, PCI_DMA_FROMDEVICE); 1330 bp->rx_buf_size, DMA_FROM_DEVICE);
1288 pci_unmap_addr_set(prod_rx_buf, mapping, mapping); 1331 dma_unmap_addr_set(prod_rx_buf, mapping, mapping);
1289 1332
1290 /* move partial skb from cons to pool (don't unmap yet) */ 1333 /* move partial skb from cons to pool (don't unmap yet) */
1291 fp->tpa_pool[queue] = *cons_rx_buf; 1334 fp->tpa_pool[queue] = *cons_rx_buf;
@@ -1302,7 +1345,7 @@ static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
1302 1345
1303#ifdef BNX2X_STOP_ON_ERROR 1346#ifdef BNX2X_STOP_ON_ERROR
1304 fp->tpa_queue_used |= (1 << queue); 1347 fp->tpa_queue_used |= (1 << queue);
1305#ifdef __powerpc64__ 1348#ifdef _ASM_GENERIC_INT_L64_H
1306 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n", 1349 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n",
1307#else 1350#else
1308 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n", 1351 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
@@ -1331,8 +1374,7 @@ static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1331 max(frag_size, (u32)len_on_bd)); 1374 max(frag_size, (u32)len_on_bd));
1332 1375
1333#ifdef BNX2X_STOP_ON_ERROR 1376#ifdef BNX2X_STOP_ON_ERROR
1334 if (pages > 1377 if (pages > min_t(u32, 8, MAX_SKB_FRAGS)*SGE_PAGE_SIZE*PAGES_PER_SGE) {
1335 min((u32)8, (u32)MAX_SKB_FRAGS) * SGE_PAGE_SIZE * PAGES_PER_SGE) {
1336 BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n", 1378 BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
1337 pages, cqe_idx); 1379 pages, cqe_idx);
1338 BNX2X_ERR("fp_cqe->pkt_len = %d fp_cqe->len_on_bd = %d\n", 1380 BNX2X_ERR("fp_cqe->pkt_len = %d fp_cqe->len_on_bd = %d\n",
@@ -1361,8 +1403,9 @@ static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1361 } 1403 }
1362 1404
1363 /* Unmap the page as we r going to pass it to the stack */ 1405 /* Unmap the page as we r going to pass it to the stack */
1364 pci_unmap_page(bp->pdev, pci_unmap_addr(&old_rx_pg, mapping), 1406 dma_unmap_page(&bp->pdev->dev,
1365 SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE); 1407 dma_unmap_addr(&old_rx_pg, mapping),
1408 SGE_PAGE_SIZE*PAGES_PER_SGE, DMA_FROM_DEVICE);
1366 1409
1367 /* Add one frag and update the appropriate fields in the skb */ 1410 /* Add one frag and update the appropriate fields in the skb */
1368 skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len); 1411 skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
@@ -1389,8 +1432,8 @@ static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1389 /* Unmap skb in the pool anyway, as we are going to change 1432 /* Unmap skb in the pool anyway, as we are going to change
1390 pool entry status to BNX2X_TPA_STOP even if new skb allocation 1433 pool entry status to BNX2X_TPA_STOP even if new skb allocation
1391 fails. */ 1434 fails. */
1392 pci_unmap_single(bp->pdev, pci_unmap_addr(rx_buf, mapping), 1435 dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(rx_buf, mapping),
1393 bp->rx_buf_size, PCI_DMA_FROMDEVICE); 1436 bp->rx_buf_size, DMA_FROM_DEVICE);
1394 1437
1395 if (likely(new_skb)) { 1438 if (likely(new_skb)) {
1396 /* fix ip xsum and give it to the stack */ 1439 /* fix ip xsum and give it to the stack */
@@ -1441,12 +1484,12 @@ static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1441#ifdef BCM_VLAN 1484#ifdef BCM_VLAN
1442 if ((bp->vlgrp != NULL) && is_vlan_cqe && 1485 if ((bp->vlgrp != NULL) && is_vlan_cqe &&
1443 (!is_not_hwaccel_vlan_cqe)) 1486 (!is_not_hwaccel_vlan_cqe))
1444 vlan_hwaccel_receive_skb(skb, bp->vlgrp, 1487 vlan_gro_receive(&fp->napi, bp->vlgrp,
1445 le16_to_cpu(cqe->fast_path_cqe. 1488 le16_to_cpu(cqe->fast_path_cqe.
1446 vlan_tag)); 1489 vlan_tag), skb);
1447 else 1490 else
1448#endif 1491#endif
1449 netif_receive_skb(skb); 1492 napi_gro_receive(&fp->napi, skb);
1450 } else { 1493 } else {
1451 DP(NETIF_MSG_RX_STATUS, "Failed to allocate new pages" 1494 DP(NETIF_MSG_RX_STATUS, "Failed to allocate new pages"
1452 " - dropping packet!\n"); 1495 " - dropping packet!\n");
@@ -1620,10 +1663,10 @@ static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
1620 } 1663 }
1621 } 1664 }
1622 1665
1623 pci_dma_sync_single_for_device(bp->pdev, 1666 dma_sync_single_for_device(&bp->pdev->dev,
1624 pci_unmap_addr(rx_buf, mapping), 1667 dma_unmap_addr(rx_buf, mapping),
1625 pad + RX_COPY_THRESH, 1668 pad + RX_COPY_THRESH,
1626 PCI_DMA_FROMDEVICE); 1669 DMA_FROM_DEVICE);
1627 prefetch(skb); 1670 prefetch(skb);
1628 prefetch(((char *)(skb)) + 128); 1671 prefetch(((char *)(skb)) + 128);
1629 1672
@@ -1665,10 +1708,10 @@ static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
1665 1708
1666 } else 1709 } else
1667 if (likely(bnx2x_alloc_rx_skb(bp, fp, bd_prod) == 0)) { 1710 if (likely(bnx2x_alloc_rx_skb(bp, fp, bd_prod) == 0)) {
1668 pci_unmap_single(bp->pdev, 1711 dma_unmap_single(&bp->pdev->dev,
1669 pci_unmap_addr(rx_buf, mapping), 1712 dma_unmap_addr(rx_buf, mapping),
1670 bp->rx_buf_size, 1713 bp->rx_buf_size,
1671 PCI_DMA_FROMDEVICE); 1714 DMA_FROM_DEVICE);
1672 skb_reserve(skb, pad); 1715 skb_reserve(skb, pad);
1673 skb_put(skb, len); 1716 skb_put(skb, len);
1674 1717
@@ -1699,11 +1742,11 @@ reuse_rx:
1699 if ((bp->vlgrp != NULL) && (bp->flags & HW_VLAN_RX_FLAG) && 1742 if ((bp->vlgrp != NULL) && (bp->flags & HW_VLAN_RX_FLAG) &&
1700 (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) & 1743 (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1701 PARSING_FLAGS_VLAN)) 1744 PARSING_FLAGS_VLAN))
1702 vlan_hwaccel_receive_skb(skb, bp->vlgrp, 1745 vlan_gro_receive(&fp->napi, bp->vlgrp,
1703 le16_to_cpu(cqe->fast_path_cqe.vlan_tag)); 1746 le16_to_cpu(cqe->fast_path_cqe.vlan_tag), skb);
1704 else 1747 else
1705#endif 1748#endif
1706 netif_receive_skb(skb); 1749 napi_gro_receive(&fp->napi, skb);
1707 1750
1708 1751
1709next_rx: 1752next_rx:
@@ -1831,8 +1874,8 @@ static irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
1831 return IRQ_HANDLED; 1874 return IRQ_HANDLED;
1832 } 1875 }
1833 1876
1834 if (status) 1877 if (unlikely(status))
1835 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status %u)\n", 1878 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status 0x%x)\n",
1836 status); 1879 status);
1837 1880
1838 return IRQ_HANDLED; 1881 return IRQ_HANDLED;
@@ -1900,6 +1943,8 @@ static int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource)
1900 int func = BP_FUNC(bp); 1943 int func = BP_FUNC(bp);
1901 u32 hw_lock_control_reg; 1944 u32 hw_lock_control_reg;
1902 1945
1946 DP(NETIF_MSG_HW, "Releasing a lock on resource %d\n", resource);
1947
1903 /* Validating that the resource is within range */ 1948 /* Validating that the resource is within range */
1904 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) { 1949 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1905 DP(NETIF_MSG_HW, 1950 DP(NETIF_MSG_HW,
@@ -2254,11 +2299,14 @@ static void bnx2x__link_reset(struct bnx2x *bp)
2254 2299
2255static u8 bnx2x_link_test(struct bnx2x *bp) 2300static u8 bnx2x_link_test(struct bnx2x *bp)
2256{ 2301{
2257 u8 rc; 2302 u8 rc = 0;
2258 2303
2259 bnx2x_acquire_phy_lock(bp); 2304 if (!BP_NOMCP(bp)) {
2260 rc = bnx2x_test_link(&bp->link_params, &bp->link_vars); 2305 bnx2x_acquire_phy_lock(bp);
2261 bnx2x_release_phy_lock(bp); 2306 rc = bnx2x_test_link(&bp->link_params, &bp->link_vars);
2307 bnx2x_release_phy_lock(bp);
2308 } else
2309 BNX2X_ERR("Bootcode is missing - can not test link\n");
2262 2310
2263 return rc; 2311 return rc;
2264} 2312}
@@ -2387,10 +2435,10 @@ static void bnx2x_init_vn_minmax(struct bnx2x *bp, int func)
2387 T_FAIR_COEF / (8 * vn_weight_sum) will always be greater 2435 T_FAIR_COEF / (8 * vn_weight_sum) will always be greater
2388 than zero */ 2436 than zero */
2389 m_fair_vn.vn_credit_delta = 2437 m_fair_vn.vn_credit_delta =
2390 max((u32)(vn_min_rate * (T_FAIR_COEF / 2438 max_t(u32, (vn_min_rate * (T_FAIR_COEF /
2391 (8 * bp->vn_weight_sum))), 2439 (8 * bp->vn_weight_sum))),
2392 (u32)(bp->cmng.fair_vars.fair_threshold * 2)); 2440 (bp->cmng.fair_vars.fair_threshold * 2));
2393 DP(NETIF_MSG_IFUP, "m_fair_vn.vn_credit_delta=%d\n", 2441 DP(NETIF_MSG_IFUP, "m_fair_vn.vn_credit_delta %d\n",
2394 m_fair_vn.vn_credit_delta); 2442 m_fair_vn.vn_credit_delta);
2395 } 2443 }
2396 2444
@@ -2410,6 +2458,7 @@ static void bnx2x_init_vn_minmax(struct bnx2x *bp, int func)
2410/* This function is called upon link interrupt */ 2458/* This function is called upon link interrupt */
2411static void bnx2x_link_attn(struct bnx2x *bp) 2459static void bnx2x_link_attn(struct bnx2x *bp)
2412{ 2460{
2461 u32 prev_link_status = bp->link_vars.link_status;
2413 /* Make sure that we are synced with the current statistics */ 2462 /* Make sure that we are synced with the current statistics */
2414 bnx2x_stats_handle(bp, STATS_EVENT_STOP); 2463 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2415 2464
@@ -2442,8 +2491,9 @@ static void bnx2x_link_attn(struct bnx2x *bp)
2442 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP); 2491 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2443 } 2492 }
2444 2493
2445 /* indicate link status */ 2494 /* indicate link status only if link status actually changed */
2446 bnx2x_link_report(bp); 2495 if (prev_link_status != bp->link_vars.link_status)
2496 bnx2x_link_report(bp);
2447 2497
2448 if (IS_E1HMF(bp)) { 2498 if (IS_E1HMF(bp)) {
2449 int port = BP_PORT(bp); 2499 int port = BP_PORT(bp);
@@ -2560,7 +2610,6 @@ u32 bnx2x_fw_command(struct bnx2x *bp, u32 command)
2560 return rc; 2610 return rc;
2561} 2611}
2562 2612
2563static void bnx2x_set_storm_rx_mode(struct bnx2x *bp);
2564static void bnx2x_set_eth_mac_addr_e1h(struct bnx2x *bp, int set); 2613static void bnx2x_set_eth_mac_addr_e1h(struct bnx2x *bp, int set);
2565static void bnx2x_set_rx_mode(struct net_device *dev); 2614static void bnx2x_set_rx_mode(struct net_device *dev);
2566 2615
@@ -2696,12 +2745,6 @@ static int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
2696{ 2745{
2697 struct eth_spe *spe; 2746 struct eth_spe *spe;
2698 2747
2699 DP(BNX2X_MSG_SP/*NETIF_MSG_TIMER*/,
2700 "SPQE (%x:%x) command %d hw_cid %x data (%x:%x) left %x\n",
2701 (u32)U64_HI(bp->spq_mapping), (u32)(U64_LO(bp->spq_mapping) +
2702 (void *)bp->spq_prod_bd - (void *)bp->spq), command,
2703 HW_CID(bp, cid), data_hi, data_lo, bp->spq_left);
2704
2705#ifdef BNX2X_STOP_ON_ERROR 2748#ifdef BNX2X_STOP_ON_ERROR
2706 if (unlikely(bp->panic)) 2749 if (unlikely(bp->panic))
2707 return -EIO; 2750 return -EIO;
@@ -2720,8 +2763,8 @@ static int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
2720 2763
2721 /* CID needs port number to be encoded int it */ 2764 /* CID needs port number to be encoded int it */
2722 spe->hdr.conn_and_cmd_data = 2765 spe->hdr.conn_and_cmd_data =
2723 cpu_to_le32(((command << SPE_HDR_CMD_ID_SHIFT) | 2766 cpu_to_le32((command << SPE_HDR_CMD_ID_SHIFT) |
2724 HW_CID(bp, cid))); 2767 HW_CID(bp, cid));
2725 spe->hdr.type = cpu_to_le16(ETH_CONNECTION_TYPE); 2768 spe->hdr.type = cpu_to_le16(ETH_CONNECTION_TYPE);
2726 if (common) 2769 if (common)
2727 spe->hdr.type |= 2770 spe->hdr.type |=
@@ -2732,6 +2775,13 @@ static int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
2732 2775
2733 bp->spq_left--; 2776 bp->spq_left--;
2734 2777
2778 DP(BNX2X_MSG_SP/*NETIF_MSG_TIMER*/,
2779 "SPQE[%x] (%x:%x) command %d hw_cid %x data (%x:%x) left %x\n",
2780 bp->spq_prod_idx, (u32)U64_HI(bp->spq_mapping),
2781 (u32)(U64_LO(bp->spq_mapping) +
2782 (void *)bp->spq_prod_bd - (void *)bp->spq), command,
2783 HW_CID(bp, cid), data_hi, data_lo, bp->spq_left);
2784
2735 bnx2x_sp_prod_update(bp); 2785 bnx2x_sp_prod_update(bp);
2736 spin_unlock_bh(&bp->spq_lock); 2786 spin_unlock_bh(&bp->spq_lock);
2737 return 0; 2787 return 0;
@@ -2740,12 +2790,11 @@ static int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
2740/* acquire split MCP access lock register */ 2790/* acquire split MCP access lock register */
2741static int bnx2x_acquire_alr(struct bnx2x *bp) 2791static int bnx2x_acquire_alr(struct bnx2x *bp)
2742{ 2792{
2743 u32 i, j, val; 2793 u32 j, val;
2744 int rc = 0; 2794 int rc = 0;
2745 2795
2746 might_sleep(); 2796 might_sleep();
2747 i = 100; 2797 for (j = 0; j < 1000; j++) {
2748 for (j = 0; j < i*10; j++) {
2749 val = (1UL << 31); 2798 val = (1UL << 31);
2750 REG_WR(bp, GRCBASE_MCP + 0x9c, val); 2799 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2751 val = REG_RD(bp, GRCBASE_MCP + 0x9c); 2800 val = REG_RD(bp, GRCBASE_MCP + 0x9c);
@@ -2765,9 +2814,7 @@ static int bnx2x_acquire_alr(struct bnx2x *bp)
2765/* release split MCP access lock register */ 2814/* release split MCP access lock register */
2766static void bnx2x_release_alr(struct bnx2x *bp) 2815static void bnx2x_release_alr(struct bnx2x *bp)
2767{ 2816{
2768 u32 val = 0; 2817 REG_WR(bp, GRCBASE_MCP + 0x9c, 0);
2769
2770 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2771} 2818}
2772 2819
2773static inline u16 bnx2x_update_dsb_idx(struct bnx2x *bp) 2820static inline u16 bnx2x_update_dsb_idx(struct bnx2x *bp)
@@ -2823,7 +2870,7 @@ static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted)
2823 2870
2824 DP(NETIF_MSG_HW, "aeu_mask %x newly asserted %x\n", 2871 DP(NETIF_MSG_HW, "aeu_mask %x newly asserted %x\n",
2825 aeu_mask, asserted); 2872 aeu_mask, asserted);
2826 aeu_mask &= ~(asserted & 0xff); 2873 aeu_mask &= ~(asserted & 0x3ff);
2827 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask); 2874 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
2828 2875
2829 REG_WR(bp, aeu_addr, aeu_mask); 2876 REG_WR(bp, aeu_addr, aeu_mask);
@@ -2910,8 +2957,9 @@ static inline void bnx2x_fan_failure(struct bnx2x *bp)
2910 bp->link_params.ext_phy_config); 2957 bp->link_params.ext_phy_config);
2911 2958
2912 /* log the failure */ 2959 /* log the failure */
2913 netdev_err(bp->dev, "Fan Failure on Network Controller has caused the driver to shutdown the card to prevent permanent damage.\n" 2960 netdev_err(bp->dev, "Fan Failure on Network Controller has caused"
2914 "Please contact Dell Support for assistance.\n"); 2961 " the driver to shutdown the card to prevent permanent"
2962 " damage. Please contact OEM Support for assistance\n");
2915} 2963}
2916 2964
2917static inline void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn) 2965static inline void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
@@ -3104,10 +3152,311 @@ static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
3104 } 3152 }
3105} 3153}
3106 3154
3107static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted) 3155static int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode);
3156static int bnx2x_nic_load(struct bnx2x *bp, int load_mode);
3157
3158
3159#define BNX2X_MISC_GEN_REG MISC_REG_GENERIC_POR_1
3160#define LOAD_COUNTER_BITS 16 /* Number of bits for load counter */
3161#define LOAD_COUNTER_MASK (((u32)0x1 << LOAD_COUNTER_BITS) - 1)
3162#define RESET_DONE_FLAG_MASK (~LOAD_COUNTER_MASK)
3163#define RESET_DONE_FLAG_SHIFT LOAD_COUNTER_BITS
3164#define CHIP_PARITY_SUPPORTED(bp) (CHIP_IS_E1(bp) || CHIP_IS_E1H(bp))
3165/*
3166 * should be run under rtnl lock
3167 */
3168static inline void bnx2x_set_reset_done(struct bnx2x *bp)
3169{
3170 u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG);
3171 val &= ~(1 << RESET_DONE_FLAG_SHIFT);
3172 REG_WR(bp, BNX2X_MISC_GEN_REG, val);
3173 barrier();
3174 mmiowb();
3175}
3176
3177/*
3178 * should be run under rtnl lock
3179 */
3180static inline void bnx2x_set_reset_in_progress(struct bnx2x *bp)
3181{
3182 u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG);
3183 val |= (1 << 16);
3184 REG_WR(bp, BNX2X_MISC_GEN_REG, val);
3185 barrier();
3186 mmiowb();
3187}
3188
3189/*
3190 * should be run under rtnl lock
3191 */
3192static inline bool bnx2x_reset_is_done(struct bnx2x *bp)
3193{
3194 u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG);
3195 DP(NETIF_MSG_HW, "GEN_REG_VAL=0x%08x\n", val);
3196 return (val & RESET_DONE_FLAG_MASK) ? false : true;
3197}
3198
3199/*
3200 * should be run under rtnl lock
3201 */
3202static inline void bnx2x_inc_load_cnt(struct bnx2x *bp)
3203{
3204 u32 val1, val = REG_RD(bp, BNX2X_MISC_GEN_REG);
3205
3206 DP(NETIF_MSG_HW, "Old GEN_REG_VAL=0x%08x\n", val);
3207
3208 val1 = ((val & LOAD_COUNTER_MASK) + 1) & LOAD_COUNTER_MASK;
3209 REG_WR(bp, BNX2X_MISC_GEN_REG, (val & RESET_DONE_FLAG_MASK) | val1);
3210 barrier();
3211 mmiowb();
3212}
3213
3214/*
3215 * should be run under rtnl lock
3216 */
3217static inline u32 bnx2x_dec_load_cnt(struct bnx2x *bp)
3218{
3219 u32 val1, val = REG_RD(bp, BNX2X_MISC_GEN_REG);
3220
3221 DP(NETIF_MSG_HW, "Old GEN_REG_VAL=0x%08x\n", val);
3222
3223 val1 = ((val & LOAD_COUNTER_MASK) - 1) & LOAD_COUNTER_MASK;
3224 REG_WR(bp, BNX2X_MISC_GEN_REG, (val & RESET_DONE_FLAG_MASK) | val1);
3225 barrier();
3226 mmiowb();
3227
3228 return val1;
3229}
3230
3231/*
3232 * should be run under rtnl lock
3233 */
3234static inline u32 bnx2x_get_load_cnt(struct bnx2x *bp)
3235{
3236 return REG_RD(bp, BNX2X_MISC_GEN_REG) & LOAD_COUNTER_MASK;
3237}
3238
3239static inline void bnx2x_clear_load_cnt(struct bnx2x *bp)
3240{
3241 u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG);
3242 REG_WR(bp, BNX2X_MISC_GEN_REG, val & (~LOAD_COUNTER_MASK));
3243}
3244
3245static inline void _print_next_block(int idx, const char *blk)
3246{
3247 if (idx)
3248 pr_cont(", ");
3249 pr_cont("%s", blk);
3250}
3251
3252static inline int bnx2x_print_blocks_with_parity0(u32 sig, int par_num)
3253{
3254 int i = 0;
3255 u32 cur_bit = 0;
3256 for (i = 0; sig; i++) {
3257 cur_bit = ((u32)0x1 << i);
3258 if (sig & cur_bit) {
3259 switch (cur_bit) {
3260 case AEU_INPUTS_ATTN_BITS_BRB_PARITY_ERROR:
3261 _print_next_block(par_num++, "BRB");
3262 break;
3263 case AEU_INPUTS_ATTN_BITS_PARSER_PARITY_ERROR:
3264 _print_next_block(par_num++, "PARSER");
3265 break;
3266 case AEU_INPUTS_ATTN_BITS_TSDM_PARITY_ERROR:
3267 _print_next_block(par_num++, "TSDM");
3268 break;
3269 case AEU_INPUTS_ATTN_BITS_SEARCHER_PARITY_ERROR:
3270 _print_next_block(par_num++, "SEARCHER");
3271 break;
3272 case AEU_INPUTS_ATTN_BITS_TSEMI_PARITY_ERROR:
3273 _print_next_block(par_num++, "TSEMI");
3274 break;
3275 }
3276
3277 /* Clear the bit */
3278 sig &= ~cur_bit;
3279 }
3280 }
3281
3282 return par_num;
3283}
3284
3285static inline int bnx2x_print_blocks_with_parity1(u32 sig, int par_num)
3286{
3287 int i = 0;
3288 u32 cur_bit = 0;
3289 for (i = 0; sig; i++) {
3290 cur_bit = ((u32)0x1 << i);
3291 if (sig & cur_bit) {
3292 switch (cur_bit) {
3293 case AEU_INPUTS_ATTN_BITS_PBCLIENT_PARITY_ERROR:
3294 _print_next_block(par_num++, "PBCLIENT");
3295 break;
3296 case AEU_INPUTS_ATTN_BITS_QM_PARITY_ERROR:
3297 _print_next_block(par_num++, "QM");
3298 break;
3299 case AEU_INPUTS_ATTN_BITS_XSDM_PARITY_ERROR:
3300 _print_next_block(par_num++, "XSDM");
3301 break;
3302 case AEU_INPUTS_ATTN_BITS_XSEMI_PARITY_ERROR:
3303 _print_next_block(par_num++, "XSEMI");
3304 break;
3305 case AEU_INPUTS_ATTN_BITS_DOORBELLQ_PARITY_ERROR:
3306 _print_next_block(par_num++, "DOORBELLQ");
3307 break;
3308 case AEU_INPUTS_ATTN_BITS_VAUX_PCI_CORE_PARITY_ERROR:
3309 _print_next_block(par_num++, "VAUX PCI CORE");
3310 break;
3311 case AEU_INPUTS_ATTN_BITS_DEBUG_PARITY_ERROR:
3312 _print_next_block(par_num++, "DEBUG");
3313 break;
3314 case AEU_INPUTS_ATTN_BITS_USDM_PARITY_ERROR:
3315 _print_next_block(par_num++, "USDM");
3316 break;
3317 case AEU_INPUTS_ATTN_BITS_USEMI_PARITY_ERROR:
3318 _print_next_block(par_num++, "USEMI");
3319 break;
3320 case AEU_INPUTS_ATTN_BITS_UPB_PARITY_ERROR:
3321 _print_next_block(par_num++, "UPB");
3322 break;
3323 case AEU_INPUTS_ATTN_BITS_CSDM_PARITY_ERROR:
3324 _print_next_block(par_num++, "CSDM");
3325 break;
3326 }
3327
3328 /* Clear the bit */
3329 sig &= ~cur_bit;
3330 }
3331 }
3332
3333 return par_num;
3334}
3335
3336static inline int bnx2x_print_blocks_with_parity2(u32 sig, int par_num)
3337{
3338 int i = 0;
3339 u32 cur_bit = 0;
3340 for (i = 0; sig; i++) {
3341 cur_bit = ((u32)0x1 << i);
3342 if (sig & cur_bit) {
3343 switch (cur_bit) {
3344 case AEU_INPUTS_ATTN_BITS_CSEMI_PARITY_ERROR:
3345 _print_next_block(par_num++, "CSEMI");
3346 break;
3347 case AEU_INPUTS_ATTN_BITS_PXP_PARITY_ERROR:
3348 _print_next_block(par_num++, "PXP");
3349 break;
3350 case AEU_IN_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR:
3351 _print_next_block(par_num++,
3352 "PXPPCICLOCKCLIENT");
3353 break;
3354 case AEU_INPUTS_ATTN_BITS_CFC_PARITY_ERROR:
3355 _print_next_block(par_num++, "CFC");
3356 break;
3357 case AEU_INPUTS_ATTN_BITS_CDU_PARITY_ERROR:
3358 _print_next_block(par_num++, "CDU");
3359 break;
3360 case AEU_INPUTS_ATTN_BITS_IGU_PARITY_ERROR:
3361 _print_next_block(par_num++, "IGU");
3362 break;
3363 case AEU_INPUTS_ATTN_BITS_MISC_PARITY_ERROR:
3364 _print_next_block(par_num++, "MISC");
3365 break;
3366 }
3367
3368 /* Clear the bit */
3369 sig &= ~cur_bit;
3370 }
3371 }
3372
3373 return par_num;
3374}
3375
3376static inline int bnx2x_print_blocks_with_parity3(u32 sig, int par_num)
3377{
3378 int i = 0;
3379 u32 cur_bit = 0;
3380 for (i = 0; sig; i++) {
3381 cur_bit = ((u32)0x1 << i);
3382 if (sig & cur_bit) {
3383 switch (cur_bit) {
3384 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_ROM_PARITY:
3385 _print_next_block(par_num++, "MCP ROM");
3386 break;
3387 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_RX_PARITY:
3388 _print_next_block(par_num++, "MCP UMP RX");
3389 break;
3390 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_TX_PARITY:
3391 _print_next_block(par_num++, "MCP UMP TX");
3392 break;
3393 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY:
3394 _print_next_block(par_num++, "MCP SCPAD");
3395 break;
3396 }
3397
3398 /* Clear the bit */
3399 sig &= ~cur_bit;
3400 }
3401 }
3402
3403 return par_num;
3404}
3405
3406static inline bool bnx2x_parity_attn(struct bnx2x *bp, u32 sig0, u32 sig1,
3407 u32 sig2, u32 sig3)
3408{
3409 if ((sig0 & HW_PRTY_ASSERT_SET_0) || (sig1 & HW_PRTY_ASSERT_SET_1) ||
3410 (sig2 & HW_PRTY_ASSERT_SET_2) || (sig3 & HW_PRTY_ASSERT_SET_3)) {
3411 int par_num = 0;
3412 DP(NETIF_MSG_HW, "Was parity error: HW block parity attention: "
3413 "[0]:0x%08x [1]:0x%08x "
3414 "[2]:0x%08x [3]:0x%08x\n",
3415 sig0 & HW_PRTY_ASSERT_SET_0,
3416 sig1 & HW_PRTY_ASSERT_SET_1,
3417 sig2 & HW_PRTY_ASSERT_SET_2,
3418 sig3 & HW_PRTY_ASSERT_SET_3);
3419 printk(KERN_ERR"%s: Parity errors detected in blocks: ",
3420 bp->dev->name);
3421 par_num = bnx2x_print_blocks_with_parity0(
3422 sig0 & HW_PRTY_ASSERT_SET_0, par_num);
3423 par_num = bnx2x_print_blocks_with_parity1(
3424 sig1 & HW_PRTY_ASSERT_SET_1, par_num);
3425 par_num = bnx2x_print_blocks_with_parity2(
3426 sig2 & HW_PRTY_ASSERT_SET_2, par_num);
3427 par_num = bnx2x_print_blocks_with_parity3(
3428 sig3 & HW_PRTY_ASSERT_SET_3, par_num);
3429 printk("\n");
3430 return true;
3431 } else
3432 return false;
3433}
3434
3435static bool bnx2x_chk_parity_attn(struct bnx2x *bp)
3108{ 3436{
3109 struct attn_route attn; 3437 struct attn_route attn;
3110 struct attn_route group_mask; 3438 int port = BP_PORT(bp);
3439
3440 attn.sig[0] = REG_RD(bp,
3441 MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 +
3442 port*4);
3443 attn.sig[1] = REG_RD(bp,
3444 MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 +
3445 port*4);
3446 attn.sig[2] = REG_RD(bp,
3447 MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 +
3448 port*4);
3449 attn.sig[3] = REG_RD(bp,
3450 MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 +
3451 port*4);
3452
3453 return bnx2x_parity_attn(bp, attn.sig[0], attn.sig[1], attn.sig[2],
3454 attn.sig[3]);
3455}
3456
3457static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
3458{
3459 struct attn_route attn, *group_mask;
3111 int port = BP_PORT(bp); 3460 int port = BP_PORT(bp);
3112 int index; 3461 int index;
3113 u32 reg_addr; 3462 u32 reg_addr;
@@ -3118,6 +3467,19 @@ static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
3118 try to handle this event */ 3467 try to handle this event */
3119 bnx2x_acquire_alr(bp); 3468 bnx2x_acquire_alr(bp);
3120 3469
3470 if (bnx2x_chk_parity_attn(bp)) {
3471 bp->recovery_state = BNX2X_RECOVERY_INIT;
3472 bnx2x_set_reset_in_progress(bp);
3473 schedule_delayed_work(&bp->reset_task, 0);
3474 /* Disable HW interrupts */
3475 bnx2x_int_disable(bp);
3476 bnx2x_release_alr(bp);
3477 /* In case of parity errors don't handle attentions so that
3478 * other function would "see" parity errors.
3479 */
3480 return;
3481 }
3482
3121 attn.sig[0] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4); 3483 attn.sig[0] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4);
3122 attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4); 3484 attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
3123 attn.sig[2] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4); 3485 attn.sig[2] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4);
@@ -3127,28 +3489,20 @@ static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
3127 3489
3128 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) { 3490 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
3129 if (deasserted & (1 << index)) { 3491 if (deasserted & (1 << index)) {
3130 group_mask = bp->attn_group[index]; 3492 group_mask = &bp->attn_group[index];
3131 3493
3132 DP(NETIF_MSG_HW, "group[%d]: %08x %08x %08x %08x\n", 3494 DP(NETIF_MSG_HW, "group[%d]: %08x %08x %08x %08x\n",
3133 index, group_mask.sig[0], group_mask.sig[1], 3495 index, group_mask->sig[0], group_mask->sig[1],
3134 group_mask.sig[2], group_mask.sig[3]); 3496 group_mask->sig[2], group_mask->sig[3]);
3135 3497
3136 bnx2x_attn_int_deasserted3(bp, 3498 bnx2x_attn_int_deasserted3(bp,
3137 attn.sig[3] & group_mask.sig[3]); 3499 attn.sig[3] & group_mask->sig[3]);
3138 bnx2x_attn_int_deasserted1(bp, 3500 bnx2x_attn_int_deasserted1(bp,
3139 attn.sig[1] & group_mask.sig[1]); 3501 attn.sig[1] & group_mask->sig[1]);
3140 bnx2x_attn_int_deasserted2(bp, 3502 bnx2x_attn_int_deasserted2(bp,
3141 attn.sig[2] & group_mask.sig[2]); 3503 attn.sig[2] & group_mask->sig[2]);
3142 bnx2x_attn_int_deasserted0(bp, 3504 bnx2x_attn_int_deasserted0(bp,
3143 attn.sig[0] & group_mask.sig[0]); 3505 attn.sig[0] & group_mask->sig[0]);
3144
3145 if ((attn.sig[0] & group_mask.sig[0] &
3146 HW_PRTY_ASSERT_SET_0) ||
3147 (attn.sig[1] & group_mask.sig[1] &
3148 HW_PRTY_ASSERT_SET_1) ||
3149 (attn.sig[2] & group_mask.sig[2] &
3150 HW_PRTY_ASSERT_SET_2))
3151 BNX2X_ERR("FATAL HW block parity attention\n");
3152 } 3506 }
3153 } 3507 }
3154 3508
@@ -3172,7 +3526,7 @@ static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
3172 3526
3173 DP(NETIF_MSG_HW, "aeu_mask %x newly deasserted %x\n", 3527 DP(NETIF_MSG_HW, "aeu_mask %x newly deasserted %x\n",
3174 aeu_mask, deasserted); 3528 aeu_mask, deasserted);
3175 aeu_mask |= (deasserted & 0xff); 3529 aeu_mask |= (deasserted & 0x3ff);
3176 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask); 3530 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
3177 3531
3178 REG_WR(bp, reg_addr, aeu_mask); 3532 REG_WR(bp, reg_addr, aeu_mask);
@@ -3216,7 +3570,6 @@ static void bnx2x_sp_task(struct work_struct *work)
3216 struct bnx2x *bp = container_of(work, struct bnx2x, sp_task.work); 3570 struct bnx2x *bp = container_of(work, struct bnx2x, sp_task.work);
3217 u16 status; 3571 u16 status;
3218 3572
3219
3220 /* Return here if interrupt is disabled */ 3573 /* Return here if interrupt is disabled */
3221 if (unlikely(atomic_read(&bp->intr_sem) != 0)) { 3574 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
3222 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n"); 3575 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
@@ -3227,11 +3580,23 @@ static void bnx2x_sp_task(struct work_struct *work)
3227/* if (status == 0) */ 3580/* if (status == 0) */
3228/* BNX2X_ERR("spurious slowpath interrupt!\n"); */ 3581/* BNX2X_ERR("spurious slowpath interrupt!\n"); */
3229 3582
3230 DP(NETIF_MSG_INTR, "got a slowpath interrupt (updated %x)\n", status); 3583 DP(NETIF_MSG_INTR, "got a slowpath interrupt (status 0x%x)\n", status);
3231 3584
3232 /* HW attentions */ 3585 /* HW attentions */
3233 if (status & 0x1) 3586 if (status & 0x1) {
3234 bnx2x_attn_int(bp); 3587 bnx2x_attn_int(bp);
3588 status &= ~0x1;
3589 }
3590
3591 /* CStorm events: STAT_QUERY */
3592 if (status & 0x2) {
3593 DP(BNX2X_MSG_SP, "CStorm events: STAT_QUERY\n");
3594 status &= ~0x2;
3595 }
3596
3597 if (unlikely(status))
3598 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status 0x%x)\n",
3599 status);
3235 3600
3236 bnx2x_ack_sb(bp, DEF_SB_ID, ATTENTION_ID, le16_to_cpu(bp->def_att_idx), 3601 bnx2x_ack_sb(bp, DEF_SB_ID, ATTENTION_ID, le16_to_cpu(bp->def_att_idx),
3237 IGU_INT_NOP, 1); 3602 IGU_INT_NOP, 1);
@@ -3243,7 +3608,6 @@ static void bnx2x_sp_task(struct work_struct *work)
3243 IGU_INT_NOP, 1); 3608 IGU_INT_NOP, 1);
3244 bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, le16_to_cpu(bp->def_t_idx), 3609 bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, le16_to_cpu(bp->def_t_idx),
3245 IGU_INT_ENABLE, 1); 3610 IGU_INT_ENABLE, 1);
3246
3247} 3611}
3248 3612
3249static irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance) 3613static irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
@@ -3947,7 +4311,6 @@ static int bnx2x_hw_stats_update(struct bnx2x *bp)
3947 u32 lo; 4311 u32 lo;
3948 u32 hi; 4312 u32 hi;
3949 } diff; 4313 } diff;
3950 u32 nig_timer_max;
3951 4314
3952 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) 4315 if (bp->link_vars.mac_type == MAC_TYPE_BMAC)
3953 bnx2x_bmac_stats_update(bp); 4316 bnx2x_bmac_stats_update(bp);
@@ -3978,10 +4341,14 @@ static int bnx2x_hw_stats_update(struct bnx2x *bp)
3978 4341
3979 pstats->host_port_stats_start = ++pstats->host_port_stats_end; 4342 pstats->host_port_stats_start = ++pstats->host_port_stats_end;
3980 4343
3981 nig_timer_max = SHMEM_RD(bp, port_mb[BP_PORT(bp)].stat_nig_timer); 4344 if (!BP_NOMCP(bp)) {
3982 if (nig_timer_max != estats->nig_timer_max) { 4345 u32 nig_timer_max =
3983 estats->nig_timer_max = nig_timer_max; 4346 SHMEM_RD(bp, port_mb[BP_PORT(bp)].stat_nig_timer);
3984 BNX2X_ERR("NIG timer max (%u)\n", estats->nig_timer_max); 4347 if (nig_timer_max != estats->nig_timer_max) {
4348 estats->nig_timer_max = nig_timer_max;
4349 BNX2X_ERR("NIG timer max (%u)\n",
4350 estats->nig_timer_max);
4351 }
3985 } 4352 }
3986 4353
3987 return 0; 4354 return 0;
@@ -4025,21 +4392,21 @@ static int bnx2x_storm_stats_update(struct bnx2x *bp)
4025 if ((u16)(le16_to_cpu(xclient->stats_counter) + 1) != 4392 if ((u16)(le16_to_cpu(xclient->stats_counter) + 1) !=
4026 bp->stats_counter) { 4393 bp->stats_counter) {
4027 DP(BNX2X_MSG_STATS, "[%d] stats not updated by xstorm" 4394 DP(BNX2X_MSG_STATS, "[%d] stats not updated by xstorm"
4028 " xstorm counter (%d) != stats_counter (%d)\n", 4395 " xstorm counter (0x%x) != stats_counter (0x%x)\n",
4029 i, xclient->stats_counter, bp->stats_counter); 4396 i, xclient->stats_counter, bp->stats_counter);
4030 return -1; 4397 return -1;
4031 } 4398 }
4032 if ((u16)(le16_to_cpu(tclient->stats_counter) + 1) != 4399 if ((u16)(le16_to_cpu(tclient->stats_counter) + 1) !=
4033 bp->stats_counter) { 4400 bp->stats_counter) {
4034 DP(BNX2X_MSG_STATS, "[%d] stats not updated by tstorm" 4401 DP(BNX2X_MSG_STATS, "[%d] stats not updated by tstorm"
4035 " tstorm counter (%d) != stats_counter (%d)\n", 4402 " tstorm counter (0x%x) != stats_counter (0x%x)\n",
4036 i, tclient->stats_counter, bp->stats_counter); 4403 i, tclient->stats_counter, bp->stats_counter);
4037 return -2; 4404 return -2;
4038 } 4405 }
4039 if ((u16)(le16_to_cpu(uclient->stats_counter) + 1) != 4406 if ((u16)(le16_to_cpu(uclient->stats_counter) + 1) !=
4040 bp->stats_counter) { 4407 bp->stats_counter) {
4041 DP(BNX2X_MSG_STATS, "[%d] stats not updated by ustorm" 4408 DP(BNX2X_MSG_STATS, "[%d] stats not updated by ustorm"
4042 " ustorm counter (%d) != stats_counter (%d)\n", 4409 " ustorm counter (0x%x) != stats_counter (0x%x)\n",
4043 i, uclient->stats_counter, bp->stats_counter); 4410 i, uclient->stats_counter, bp->stats_counter);
4044 return -4; 4411 return -4;
4045 } 4412 }
@@ -4059,6 +4426,21 @@ static int bnx2x_storm_stats_update(struct bnx2x *bp)
4059 qstats->total_bytes_received_lo, 4426 qstats->total_bytes_received_lo,
4060 le32_to_cpu(tclient->rcv_unicast_bytes.lo)); 4427 le32_to_cpu(tclient->rcv_unicast_bytes.lo));
4061 4428
4429 SUB_64(qstats->total_bytes_received_hi,
4430 le32_to_cpu(uclient->bcast_no_buff_bytes.hi),
4431 qstats->total_bytes_received_lo,
4432 le32_to_cpu(uclient->bcast_no_buff_bytes.lo));
4433
4434 SUB_64(qstats->total_bytes_received_hi,
4435 le32_to_cpu(uclient->mcast_no_buff_bytes.hi),
4436 qstats->total_bytes_received_lo,
4437 le32_to_cpu(uclient->mcast_no_buff_bytes.lo));
4438
4439 SUB_64(qstats->total_bytes_received_hi,
4440 le32_to_cpu(uclient->ucast_no_buff_bytes.hi),
4441 qstats->total_bytes_received_lo,
4442 le32_to_cpu(uclient->ucast_no_buff_bytes.lo));
4443
4062 qstats->valid_bytes_received_hi = 4444 qstats->valid_bytes_received_hi =
4063 qstats->total_bytes_received_hi; 4445 qstats->total_bytes_received_hi;
4064 qstats->valid_bytes_received_lo = 4446 qstats->valid_bytes_received_lo =
@@ -4307,47 +4689,43 @@ static void bnx2x_stats_update(struct bnx2x *bp)
4307 bnx2x_drv_stats_update(bp); 4689 bnx2x_drv_stats_update(bp);
4308 4690
4309 if (netif_msg_timer(bp)) { 4691 if (netif_msg_timer(bp)) {
4310 struct bnx2x_fastpath *fp0_rx = bp->fp;
4311 struct bnx2x_fastpath *fp0_tx = bp->fp;
4312 struct tstorm_per_client_stats *old_tclient =
4313 &bp->fp->old_tclient;
4314 struct bnx2x_eth_q_stats *qstats = &bp->fp->eth_q_stats;
4315 struct bnx2x_eth_stats *estats = &bp->eth_stats; 4692 struct bnx2x_eth_stats *estats = &bp->eth_stats;
4316 struct net_device_stats *nstats = &bp->dev->stats;
4317 int i; 4693 int i;
4318 4694
4319 netdev_printk(KERN_DEBUG, bp->dev, "\n"); 4695 printk(KERN_DEBUG "%s: brb drops %u brb truncate %u\n",
4320 printk(KERN_DEBUG " tx avail (%4x) tx hc idx (%x)" 4696 bp->dev->name,
4321 " tx pkt (%lx)\n",
4322 bnx2x_tx_avail(fp0_tx),
4323 le16_to_cpu(*fp0_tx->tx_cons_sb), nstats->tx_packets);
4324 printk(KERN_DEBUG " rx usage (%4x) rx hc idx (%x)"
4325 " rx pkt (%lx)\n",
4326 (u16)(le16_to_cpu(*fp0_rx->rx_cons_sb) -
4327 fp0_rx->rx_comp_cons),
4328 le16_to_cpu(*fp0_rx->rx_cons_sb), nstats->rx_packets);
4329 printk(KERN_DEBUG " %s (Xoff events %u) brb drops %u "
4330 "brb truncate %u\n",
4331 (netif_queue_stopped(bp->dev) ? "Xoff" : "Xon"),
4332 qstats->driver_xoff,
4333 estats->brb_drop_lo, estats->brb_truncate_lo); 4697 estats->brb_drop_lo, estats->brb_truncate_lo);
4334 printk(KERN_DEBUG "tstats: checksum_discard %u "
4335 "packets_too_big_discard %lu no_buff_discard %lu "
4336 "mac_discard %u mac_filter_discard %u "
4337 "xxovrflow_discard %u brb_truncate_discard %u "
4338 "ttl0_discard %u\n",
4339 le32_to_cpu(old_tclient->checksum_discard),
4340 bnx2x_hilo(&qstats->etherstatsoverrsizepkts_hi),
4341 bnx2x_hilo(&qstats->no_buff_discard_hi),
4342 estats->mac_discard, estats->mac_filter_discard,
4343 estats->xxoverflow_discard, estats->brb_truncate_discard,
4344 le32_to_cpu(old_tclient->ttl0_discard));
4345 4698
4346 for_each_queue(bp, i) { 4699 for_each_queue(bp, i) {
4347 printk(KERN_DEBUG "[%d]: %lu\t%lu\t%lu\n", i, 4700 struct bnx2x_fastpath *fp = &bp->fp[i];
4348 bnx2x_fp(bp, i, tx_pkt), 4701 struct bnx2x_eth_q_stats *qstats = &fp->eth_q_stats;
4349 bnx2x_fp(bp, i, rx_pkt), 4702
4350 bnx2x_fp(bp, i, rx_calls)); 4703 printk(KERN_DEBUG "%s: rx usage(%4u) *rx_cons_sb(%u)"
4704 " rx pkt(%lu) rx calls(%lu %lu)\n",
4705 fp->name, (le16_to_cpu(*fp->rx_cons_sb) -
4706 fp->rx_comp_cons),
4707 le16_to_cpu(*fp->rx_cons_sb),
4708 bnx2x_hilo(&qstats->
4709 total_unicast_packets_received_hi),
4710 fp->rx_calls, fp->rx_pkt);
4711 }
4712
4713 for_each_queue(bp, i) {
4714 struct bnx2x_fastpath *fp = &bp->fp[i];
4715 struct bnx2x_eth_q_stats *qstats = &fp->eth_q_stats;
4716 struct netdev_queue *txq =
4717 netdev_get_tx_queue(bp->dev, i);
4718
4719 printk(KERN_DEBUG "%s: tx avail(%4u) *tx_cons_sb(%u)"
4720 " tx pkt(%lu) tx calls (%lu)"
4721 " %s (Xoff events %u)\n",
4722 fp->name, bnx2x_tx_avail(fp),
4723 le16_to_cpu(*fp->tx_cons_sb),
4724 bnx2x_hilo(&qstats->
4725 total_unicast_packets_transmitted_hi),
4726 fp->tx_pkt,
4727 (netif_tx_queue_stopped(txq) ? "Xoff" : "Xon"),
4728 qstats->driver_xoff);
4351 } 4729 }
4352 } 4730 }
4353 4731
@@ -4468,6 +4846,9 @@ static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event)
4468{ 4846{
4469 enum bnx2x_stats_state state = bp->stats_state; 4847 enum bnx2x_stats_state state = bp->stats_state;
4470 4848
4849 if (unlikely(bp->panic))
4850 return;
4851
4471 bnx2x_stats_stm[state][event].action(bp); 4852 bnx2x_stats_stm[state][event].action(bp);
4472 bp->stats_state = bnx2x_stats_stm[state][event].next_state; 4853 bp->stats_state = bnx2x_stats_stm[state][event].next_state;
4473 4854
@@ -4940,9 +5321,9 @@ static inline void bnx2x_free_tpa_pool(struct bnx2x *bp,
4940 } 5321 }
4941 5322
4942 if (fp->tpa_state[i] == BNX2X_TPA_START) 5323 if (fp->tpa_state[i] == BNX2X_TPA_START)
4943 pci_unmap_single(bp->pdev, 5324 dma_unmap_single(&bp->pdev->dev,
4944 pci_unmap_addr(rx_buf, mapping), 5325 dma_unmap_addr(rx_buf, mapping),
4945 bp->rx_buf_size, PCI_DMA_FROMDEVICE); 5326 bp->rx_buf_size, DMA_FROM_DEVICE);
4946 5327
4947 dev_kfree_skb(skb); 5328 dev_kfree_skb(skb);
4948 rx_buf->skb = NULL; 5329 rx_buf->skb = NULL;
@@ -4978,7 +5359,7 @@ static void bnx2x_init_rx_rings(struct bnx2x *bp)
4978 fp->disable_tpa = 1; 5359 fp->disable_tpa = 1;
4979 break; 5360 break;
4980 } 5361 }
4981 pci_unmap_addr_set((struct sw_rx_bd *) 5362 dma_unmap_addr_set((struct sw_rx_bd *)
4982 &bp->fp->tpa_pool[i], 5363 &bp->fp->tpa_pool[i],
4983 mapping, 0); 5364 mapping, 0);
4984 fp->tpa_state[i] = BNX2X_TPA_STOP; 5365 fp->tpa_state[i] = BNX2X_TPA_STOP;
@@ -5072,8 +5453,8 @@ static void bnx2x_init_rx_rings(struct bnx2x *bp)
5072 5453
5073 fp->rx_bd_prod = ring_prod; 5454 fp->rx_bd_prod = ring_prod;
5074 /* must not have more available CQEs than BDs */ 5455 /* must not have more available CQEs than BDs */
5075 fp->rx_comp_prod = min((u16)(NUM_RCQ_RINGS*RCQ_DESC_CNT), 5456 fp->rx_comp_prod = min_t(u16, NUM_RCQ_RINGS*RCQ_DESC_CNT,
5076 cqe_ring_prod); 5457 cqe_ring_prod);
5077 fp->rx_pkt = fp->rx_calls = 0; 5458 fp->rx_pkt = fp->rx_calls = 0;
5078 5459
5079 /* Warning! 5460 /* Warning!
@@ -5179,8 +5560,8 @@ static void bnx2x_init_context(struct bnx2x *bp)
5179 context->ustorm_st_context.common.flags |= 5560 context->ustorm_st_context.common.flags |=
5180 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_TPA; 5561 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_TPA;
5181 context->ustorm_st_context.common.sge_buff_size = 5562 context->ustorm_st_context.common.sge_buff_size =
5182 (u16)min((u32)SGE_PAGE_SIZE*PAGES_PER_SGE, 5563 (u16)min_t(u32, SGE_PAGE_SIZE*PAGES_PER_SGE,
5183 (u32)0xffff); 5564 0xffff);
5184 context->ustorm_st_context.common.sge_page_base_hi = 5565 context->ustorm_st_context.common.sge_page_base_hi =
5185 U64_HI(fp->rx_sge_mapping); 5566 U64_HI(fp->rx_sge_mapping);
5186 context->ustorm_st_context.common.sge_page_base_lo = 5567 context->ustorm_st_context.common.sge_page_base_lo =
@@ -5477,10 +5858,8 @@ static void bnx2x_init_internal_func(struct bnx2x *bp)
5477 } 5858 }
5478 5859
5479 /* Init CQ ring mapping and aggregation size, the FW limit is 8 frags */ 5860 /* Init CQ ring mapping and aggregation size, the FW limit is 8 frags */
5480 max_agg_size = 5861 max_agg_size = min_t(u32, (min_t(u32, 8, MAX_SKB_FRAGS) *
5481 min((u32)(min((u32)8, (u32)MAX_SKB_FRAGS) * 5862 SGE_PAGE_SIZE * PAGES_PER_SGE), 0xffff);
5482 SGE_PAGE_SIZE * PAGES_PER_SGE),
5483 (u32)0xffff);
5484 for_each_queue(bp, i) { 5863 for_each_queue(bp, i) {
5485 struct bnx2x_fastpath *fp = &bp->fp[i]; 5864 struct bnx2x_fastpath *fp = &bp->fp[i];
5486 5865
@@ -5566,7 +5945,7 @@ static void bnx2x_init_internal_func(struct bnx2x *bp)
5566 } 5945 }
5567 5946
5568 5947
5569 /* Store it to internal memory */ 5948 /* Store cmng structures to internal memory */
5570 if (bp->port.pmf) 5949 if (bp->port.pmf)
5571 for (i = 0; i < sizeof(struct cmng_struct_per_port) / 4; i++) 5950 for (i = 0; i < sizeof(struct cmng_struct_per_port) / 4; i++)
5572 REG_WR(bp, BAR_XSTRORM_INTMEM + 5951 REG_WR(bp, BAR_XSTRORM_INTMEM +
@@ -5658,8 +6037,8 @@ static void bnx2x_nic_init(struct bnx2x *bp, u32 load_code)
5658 6037
5659static int bnx2x_gunzip_init(struct bnx2x *bp) 6038static int bnx2x_gunzip_init(struct bnx2x *bp)
5660{ 6039{
5661 bp->gunzip_buf = pci_alloc_consistent(bp->pdev, FW_BUF_SIZE, 6040 bp->gunzip_buf = dma_alloc_coherent(&bp->pdev->dev, FW_BUF_SIZE,
5662 &bp->gunzip_mapping); 6041 &bp->gunzip_mapping, GFP_KERNEL);
5663 if (bp->gunzip_buf == NULL) 6042 if (bp->gunzip_buf == NULL)
5664 goto gunzip_nomem1; 6043 goto gunzip_nomem1;
5665 6044
@@ -5679,12 +6058,13 @@ gunzip_nomem3:
5679 bp->strm = NULL; 6058 bp->strm = NULL;
5680 6059
5681gunzip_nomem2: 6060gunzip_nomem2:
5682 pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf, 6061 dma_free_coherent(&bp->pdev->dev, FW_BUF_SIZE, bp->gunzip_buf,
5683 bp->gunzip_mapping); 6062 bp->gunzip_mapping);
5684 bp->gunzip_buf = NULL; 6063 bp->gunzip_buf = NULL;
5685 6064
5686gunzip_nomem1: 6065gunzip_nomem1:
5687 netdev_err(bp->dev, "Cannot allocate firmware buffer for un-compression\n"); 6066 netdev_err(bp->dev, "Cannot allocate firmware buffer for"
6067 " un-compression\n");
5688 return -ENOMEM; 6068 return -ENOMEM;
5689} 6069}
5690 6070
@@ -5696,8 +6076,8 @@ static void bnx2x_gunzip_end(struct bnx2x *bp)
5696 bp->strm = NULL; 6076 bp->strm = NULL;
5697 6077
5698 if (bp->gunzip_buf) { 6078 if (bp->gunzip_buf) {
5699 pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf, 6079 dma_free_coherent(&bp->pdev->dev, FW_BUF_SIZE, bp->gunzip_buf,
5700 bp->gunzip_mapping); 6080 bp->gunzip_mapping);
5701 bp->gunzip_buf = NULL; 6081 bp->gunzip_buf = NULL;
5702 } 6082 }
5703} 6083}
@@ -5735,8 +6115,9 @@ static int bnx2x_gunzip(struct bnx2x *bp, const u8 *zbuf, int len)
5735 6115
5736 bp->gunzip_outlen = (FW_BUF_SIZE - bp->strm->avail_out); 6116 bp->gunzip_outlen = (FW_BUF_SIZE - bp->strm->avail_out);
5737 if (bp->gunzip_outlen & 0x3) 6117 if (bp->gunzip_outlen & 0x3)
5738 netdev_err(bp->dev, "Firmware decompression error: gunzip_outlen (%d) not aligned\n", 6118 netdev_err(bp->dev, "Firmware decompression error:"
5739 bp->gunzip_outlen); 6119 " gunzip_outlen (%d) not aligned\n",
6120 bp->gunzip_outlen);
5740 bp->gunzip_outlen >>= 2; 6121 bp->gunzip_outlen >>= 2;
5741 6122
5742 zlib_inflateEnd(bp->strm); 6123 zlib_inflateEnd(bp->strm);
@@ -5962,6 +6343,50 @@ static void enable_blocks_attention(struct bnx2x *bp)
5962 REG_WR(bp, PBF_REG_PBF_INT_MASK, 0X18); /* bit 3,4 masked */ 6343 REG_WR(bp, PBF_REG_PBF_INT_MASK, 0X18); /* bit 3,4 masked */
5963} 6344}
5964 6345
6346static const struct {
6347 u32 addr;
6348 u32 mask;
6349} bnx2x_parity_mask[] = {
6350 {PXP_REG_PXP_PRTY_MASK, 0xffffffff},
6351 {PXP2_REG_PXP2_PRTY_MASK_0, 0xffffffff},
6352 {PXP2_REG_PXP2_PRTY_MASK_1, 0xffffffff},
6353 {HC_REG_HC_PRTY_MASK, 0xffffffff},
6354 {MISC_REG_MISC_PRTY_MASK, 0xffffffff},
6355 {QM_REG_QM_PRTY_MASK, 0x0},
6356 {DORQ_REG_DORQ_PRTY_MASK, 0x0},
6357 {GRCBASE_UPB + PB_REG_PB_PRTY_MASK, 0x0},
6358 {GRCBASE_XPB + PB_REG_PB_PRTY_MASK, 0x0},
6359 {SRC_REG_SRC_PRTY_MASK, 0x4}, /* bit 2 */
6360 {CDU_REG_CDU_PRTY_MASK, 0x0},
6361 {CFC_REG_CFC_PRTY_MASK, 0x0},
6362 {DBG_REG_DBG_PRTY_MASK, 0x0},
6363 {DMAE_REG_DMAE_PRTY_MASK, 0x0},
6364 {BRB1_REG_BRB1_PRTY_MASK, 0x0},
6365 {PRS_REG_PRS_PRTY_MASK, (1<<6)},/* bit 6 */
6366 {TSDM_REG_TSDM_PRTY_MASK, 0x18},/* bit 3,4 */
6367 {CSDM_REG_CSDM_PRTY_MASK, 0x8}, /* bit 3 */
6368 {USDM_REG_USDM_PRTY_MASK, 0x38},/* bit 3,4,5 */
6369 {XSDM_REG_XSDM_PRTY_MASK, 0x8}, /* bit 3 */
6370 {TSEM_REG_TSEM_PRTY_MASK_0, 0x0},
6371 {TSEM_REG_TSEM_PRTY_MASK_1, 0x0},
6372 {USEM_REG_USEM_PRTY_MASK_0, 0x0},
6373 {USEM_REG_USEM_PRTY_MASK_1, 0x0},
6374 {CSEM_REG_CSEM_PRTY_MASK_0, 0x0},
6375 {CSEM_REG_CSEM_PRTY_MASK_1, 0x0},
6376 {XSEM_REG_XSEM_PRTY_MASK_0, 0x0},
6377 {XSEM_REG_XSEM_PRTY_MASK_1, 0x0}
6378};
6379
6380static void enable_blocks_parity(struct bnx2x *bp)
6381{
6382 int i, mask_arr_len =
6383 sizeof(bnx2x_parity_mask)/(sizeof(bnx2x_parity_mask[0]));
6384
6385 for (i = 0; i < mask_arr_len; i++)
6386 REG_WR(bp, bnx2x_parity_mask[i].addr,
6387 bnx2x_parity_mask[i].mask);
6388}
6389
5965 6390
5966static void bnx2x_reset_common(struct bnx2x *bp) 6391static void bnx2x_reset_common(struct bnx2x *bp)
5967{ 6392{
@@ -5992,10 +6417,14 @@ static void bnx2x_init_pxp(struct bnx2x *bp)
5992 6417
5993static void bnx2x_setup_fan_failure_detection(struct bnx2x *bp) 6418static void bnx2x_setup_fan_failure_detection(struct bnx2x *bp)
5994{ 6419{
6420 int is_required;
5995 u32 val; 6421 u32 val;
5996 u8 port; 6422 int port;
5997 u8 is_required = 0; 6423
6424 if (BP_NOMCP(bp))
6425 return;
5998 6426
6427 is_required = 0;
5999 val = SHMEM_RD(bp, dev_info.shared_hw_config.config2) & 6428 val = SHMEM_RD(bp, dev_info.shared_hw_config.config2) &
6000 SHARED_HW_CFG_FAN_FAILURE_MASK; 6429 SHARED_HW_CFG_FAN_FAILURE_MASK;
6001 6430
@@ -6034,7 +6463,7 @@ static void bnx2x_setup_fan_failure_detection(struct bnx2x *bp)
6034 /* set to active low mode */ 6463 /* set to active low mode */
6035 val = REG_RD(bp, MISC_REG_SPIO_INT); 6464 val = REG_RD(bp, MISC_REG_SPIO_INT);
6036 val |= ((1 << MISC_REGISTERS_SPIO_5) << 6465 val |= ((1 << MISC_REGISTERS_SPIO_5) <<
6037 MISC_REGISTERS_SPIO_INT_OLD_SET_POS); 6466 MISC_REGISTERS_SPIO_INT_OLD_SET_POS);
6038 REG_WR(bp, MISC_REG_SPIO_INT, val); 6467 REG_WR(bp, MISC_REG_SPIO_INT, val);
6039 6468
6040 /* enable interrupt to signal the IGU */ 6469 /* enable interrupt to signal the IGU */
@@ -6221,7 +6650,8 @@ static int bnx2x_init_common(struct bnx2x *bp)
6221 6650
6222 if (sizeof(union cdu_context) != 1024) 6651 if (sizeof(union cdu_context) != 1024)
6223 /* we currently assume that a context is 1024 bytes */ 6652 /* we currently assume that a context is 1024 bytes */
6224 pr_alert("please adjust the size of cdu_context(%ld)\n", 6653 dev_alert(&bp->pdev->dev, "please adjust the size "
6654 "of cdu_context(%ld)\n",
6225 (long)sizeof(union cdu_context)); 6655 (long)sizeof(union cdu_context));
6226 6656
6227 bnx2x_init_block(bp, CDU_BLOCK, COMMON_STAGE); 6657 bnx2x_init_block(bp, CDU_BLOCK, COMMON_STAGE);
@@ -6305,6 +6735,8 @@ static int bnx2x_init_common(struct bnx2x *bp)
6305 REG_RD(bp, PXP2_REG_PXP2_INT_STS_CLR_0); 6735 REG_RD(bp, PXP2_REG_PXP2_INT_STS_CLR_0);
6306 6736
6307 enable_blocks_attention(bp); 6737 enable_blocks_attention(bp);
6738 if (CHIP_PARITY_SUPPORTED(bp))
6739 enable_blocks_parity(bp);
6308 6740
6309 if (!BP_NOMCP(bp)) { 6741 if (!BP_NOMCP(bp)) {
6310 bnx2x_acquire_phy_lock(bp); 6742 bnx2x_acquire_phy_lock(bp);
@@ -6323,7 +6755,7 @@ static int bnx2x_init_port(struct bnx2x *bp)
6323 u32 low, high; 6755 u32 low, high;
6324 u32 val; 6756 u32 val;
6325 6757
6326 DP(BNX2X_MSG_MCP, "starting port init port %x\n", port); 6758 DP(BNX2X_MSG_MCP, "starting port init port %d\n", port);
6327 6759
6328 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0); 6760 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
6329 6761
@@ -6342,6 +6774,7 @@ static int bnx2x_init_port(struct bnx2x *bp)
6342 REG_WR(bp, TM_REG_LIN0_SCAN_TIME + port*4, 20); 6774 REG_WR(bp, TM_REG_LIN0_SCAN_TIME + port*4, 20);
6343 REG_WR(bp, TM_REG_LIN0_MAX_ACTIVE_CID + port*4, 31); 6775 REG_WR(bp, TM_REG_LIN0_MAX_ACTIVE_CID + port*4, 31);
6344#endif 6776#endif
6777
6345 bnx2x_init_block(bp, DQ_BLOCK, init_stage); 6778 bnx2x_init_block(bp, DQ_BLOCK, init_stage);
6346 6779
6347 bnx2x_init_block(bp, BRB1_BLOCK, init_stage); 6780 bnx2x_init_block(bp, BRB1_BLOCK, init_stage);
@@ -6534,7 +6967,7 @@ static int bnx2x_init_func(struct bnx2x *bp)
6534 u32 addr, val; 6967 u32 addr, val;
6535 int i; 6968 int i;
6536 6969
6537 DP(BNX2X_MSG_MCP, "starting func init func %x\n", func); 6970 DP(BNX2X_MSG_MCP, "starting func init func %d\n", func);
6538 6971
6539 /* set MSI reconfigure capability */ 6972 /* set MSI reconfigure capability */
6540 addr = (port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0); 6973 addr = (port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0);
@@ -6692,7 +7125,7 @@ static void bnx2x_free_mem(struct bnx2x *bp)
6692#define BNX2X_PCI_FREE(x, y, size) \ 7125#define BNX2X_PCI_FREE(x, y, size) \
6693 do { \ 7126 do { \
6694 if (x) { \ 7127 if (x) { \
6695 pci_free_consistent(bp->pdev, size, x, y); \ 7128 dma_free_coherent(&bp->pdev->dev, size, x, y); \
6696 x = NULL; \ 7129 x = NULL; \
6697 y = 0; \ 7130 y = 0; \
6698 } \ 7131 } \
@@ -6773,7 +7206,7 @@ static int bnx2x_alloc_mem(struct bnx2x *bp)
6773 7206
6774#define BNX2X_PCI_ALLOC(x, y, size) \ 7207#define BNX2X_PCI_ALLOC(x, y, size) \
6775 do { \ 7208 do { \
6776 x = pci_alloc_consistent(bp->pdev, size, y); \ 7209 x = dma_alloc_coherent(&bp->pdev->dev, size, y, GFP_KERNEL); \
6777 if (x == NULL) \ 7210 if (x == NULL) \
6778 goto alloc_mem_err; \ 7211 goto alloc_mem_err; \
6779 memset(x, 0, size); \ 7212 memset(x, 0, size); \
@@ -6906,9 +7339,9 @@ static void bnx2x_free_rx_skbs(struct bnx2x *bp)
6906 if (skb == NULL) 7339 if (skb == NULL)
6907 continue; 7340 continue;
6908 7341
6909 pci_unmap_single(bp->pdev, 7342 dma_unmap_single(&bp->pdev->dev,
6910 pci_unmap_addr(rx_buf, mapping), 7343 dma_unmap_addr(rx_buf, mapping),
6911 bp->rx_buf_size, PCI_DMA_FROMDEVICE); 7344 bp->rx_buf_size, DMA_FROM_DEVICE);
6912 7345
6913 rx_buf->skb = NULL; 7346 rx_buf->skb = NULL;
6914 dev_kfree_skb(skb); 7347 dev_kfree_skb(skb);
@@ -6987,7 +7420,31 @@ static int bnx2x_enable_msix(struct bnx2x *bp)
6987 7420
6988 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], 7421 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0],
6989 BNX2X_NUM_QUEUES(bp) + offset); 7422 BNX2X_NUM_QUEUES(bp) + offset);
6990 if (rc) { 7423
7424 /*
7425 * reconfigure number of tx/rx queues according to available
7426 * MSI-X vectors
7427 */
7428 if (rc >= BNX2X_MIN_MSIX_VEC_CNT) {
7429 /* vectors available for FP */
7430 int fp_vec = rc - BNX2X_MSIX_VEC_FP_START;
7431
7432 DP(NETIF_MSG_IFUP,
7433 "Trying to use less MSI-X vectors: %d\n", rc);
7434
7435 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], rc);
7436
7437 if (rc) {
7438 DP(NETIF_MSG_IFUP,
7439 "MSI-X is not attainable rc %d\n", rc);
7440 return rc;
7441 }
7442
7443 bp->num_queues = min(bp->num_queues, fp_vec);
7444
7445 DP(NETIF_MSG_IFUP, "New queue configuration set: %d\n",
7446 bp->num_queues);
7447 } else if (rc) {
6991 DP(NETIF_MSG_IFUP, "MSI-X is not attainable rc %d\n", rc); 7448 DP(NETIF_MSG_IFUP, "MSI-X is not attainable rc %d\n", rc);
6992 return rc; 7449 return rc;
6993 } 7450 }
@@ -7028,10 +7485,11 @@ static int bnx2x_req_msix_irqs(struct bnx2x *bp)
7028 } 7485 }
7029 7486
7030 i = BNX2X_NUM_QUEUES(bp); 7487 i = BNX2X_NUM_QUEUES(bp);
7031 netdev_info(bp->dev, "using MSI-X IRQs: sp %d fp[%d] %d ... fp[%d] %d\n", 7488 netdev_info(bp->dev, "using MSI-X IRQs: sp %d fp[%d] %d"
7032 bp->msix_table[0].vector, 7489 " ... fp[%d] %d\n",
7033 0, bp->msix_table[offset].vector, 7490 bp->msix_table[0].vector,
7034 i - 1, bp->msix_table[offset + i - 1].vector); 7491 0, bp->msix_table[offset].vector,
7492 i - 1, bp->msix_table[offset + i - 1].vector);
7035 7493
7036 return 0; 7494 return 0;
7037} 7495}
@@ -7409,8 +7867,6 @@ static int bnx2x_set_num_queues(struct bnx2x *bp)
7409 bp->num_queues = 1; 7867 bp->num_queues = 1;
7410 DP(NETIF_MSG_IFUP, "set number of queues to 1\n"); 7868 DP(NETIF_MSG_IFUP, "set number of queues to 1\n");
7411 break; 7869 break;
7412
7413 case INT_MODE_MSIX:
7414 default: 7870 default:
7415 /* Set number of queues according to bp->multi_mode value */ 7871 /* Set number of queues according to bp->multi_mode value */
7416 bnx2x_set_num_queues_msix(bp); 7872 bnx2x_set_num_queues_msix(bp);
@@ -7656,6 +8112,7 @@ static int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
7656 if (bp->state == BNX2X_STATE_OPEN) 8112 if (bp->state == BNX2X_STATE_OPEN)
7657 bnx2x_cnic_notify(bp, CNIC_CTL_START_CMD); 8113 bnx2x_cnic_notify(bp, CNIC_CTL_START_CMD);
7658#endif 8114#endif
8115 bnx2x_inc_load_cnt(bp);
7659 8116
7660 return 0; 8117 return 0;
7661 8118
@@ -7843,33 +8300,12 @@ static void bnx2x_reset_chip(struct bnx2x *bp, u32 reset_code)
7843 } 8300 }
7844} 8301}
7845 8302
7846/* must be called with rtnl_lock */ 8303static void bnx2x_chip_cleanup(struct bnx2x *bp, int unload_mode)
7847static int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
7848{ 8304{
7849 int port = BP_PORT(bp); 8305 int port = BP_PORT(bp);
7850 u32 reset_code = 0; 8306 u32 reset_code = 0;
7851 int i, cnt, rc; 8307 int i, cnt, rc;
7852 8308
7853#ifdef BCM_CNIC
7854 bnx2x_cnic_notify(bp, CNIC_CTL_STOP_CMD);
7855#endif
7856 bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
7857
7858 /* Set "drop all" */
7859 bp->rx_mode = BNX2X_RX_MODE_NONE;
7860 bnx2x_set_storm_rx_mode(bp);
7861
7862 /* Disable HW interrupts, NAPI and Tx */
7863 bnx2x_netif_stop(bp, 1);
7864
7865 del_timer_sync(&bp->timer);
7866 SHMEM_WR(bp, func_mb[BP_FUNC(bp)].drv_pulse_mb,
7867 (DRV_PULSE_ALWAYS_ALIVE | bp->fw_drv_pulse_wr_seq));
7868 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
7869
7870 /* Release IRQs */
7871 bnx2x_free_irq(bp, false);
7872
7873 /* Wait until tx fastpath tasks complete */ 8309 /* Wait until tx fastpath tasks complete */
7874 for_each_queue(bp, i) { 8310 for_each_queue(bp, i) {
7875 struct bnx2x_fastpath *fp = &bp->fp[i]; 8311 struct bnx2x_fastpath *fp = &bp->fp[i];
@@ -8010,6 +8446,69 @@ unload_error:
8010 if (!BP_NOMCP(bp)) 8446 if (!BP_NOMCP(bp))
8011 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE); 8447 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
8012 8448
8449}
8450
8451static inline void bnx2x_disable_close_the_gate(struct bnx2x *bp)
8452{
8453 u32 val;
8454
8455 DP(NETIF_MSG_HW, "Disabling \"close the gates\"\n");
8456
8457 if (CHIP_IS_E1(bp)) {
8458 int port = BP_PORT(bp);
8459 u32 addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
8460 MISC_REG_AEU_MASK_ATTN_FUNC_0;
8461
8462 val = REG_RD(bp, addr);
8463 val &= ~(0x300);
8464 REG_WR(bp, addr, val);
8465 } else if (CHIP_IS_E1H(bp)) {
8466 val = REG_RD(bp, MISC_REG_AEU_GENERAL_MASK);
8467 val &= ~(MISC_AEU_GENERAL_MASK_REG_AEU_PXP_CLOSE_MASK |
8468 MISC_AEU_GENERAL_MASK_REG_AEU_NIG_CLOSE_MASK);
8469 REG_WR(bp, MISC_REG_AEU_GENERAL_MASK, val);
8470 }
8471}
8472
8473/* must be called with rtnl_lock */
8474static int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
8475{
8476 int i;
8477
8478 if (bp->state == BNX2X_STATE_CLOSED) {
8479 /* Interface has been removed - nothing to recover */
8480 bp->recovery_state = BNX2X_RECOVERY_DONE;
8481 bp->is_leader = 0;
8482 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RESERVED_08);
8483 smp_wmb();
8484
8485 return -EINVAL;
8486 }
8487
8488#ifdef BCM_CNIC
8489 bnx2x_cnic_notify(bp, CNIC_CTL_STOP_CMD);
8490#endif
8491 bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
8492
8493 /* Set "drop all" */
8494 bp->rx_mode = BNX2X_RX_MODE_NONE;
8495 bnx2x_set_storm_rx_mode(bp);
8496
8497 /* Disable HW interrupts, NAPI and Tx */
8498 bnx2x_netif_stop(bp, 1);
8499
8500 del_timer_sync(&bp->timer);
8501 SHMEM_WR(bp, func_mb[BP_FUNC(bp)].drv_pulse_mb,
8502 (DRV_PULSE_ALWAYS_ALIVE | bp->fw_drv_pulse_wr_seq));
8503 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
8504
8505 /* Release IRQs */
8506 bnx2x_free_irq(bp, false);
8507
8508 /* Cleanup the chip if needed */
8509 if (unload_mode != UNLOAD_RECOVERY)
8510 bnx2x_chip_cleanup(bp, unload_mode);
8511
8013 bp->port.pmf = 0; 8512 bp->port.pmf = 0;
8014 8513
8015 /* Free SKBs, SGEs, TPA pool and driver internals */ 8514 /* Free SKBs, SGEs, TPA pool and driver internals */
@@ -8024,17 +8523,448 @@ unload_error:
8024 8523
8025 netif_carrier_off(bp->dev); 8524 netif_carrier_off(bp->dev);
8026 8525
8526 /* The last driver must disable a "close the gate" if there is no
8527 * parity attention or "process kill" pending.
8528 */
8529 if ((!bnx2x_dec_load_cnt(bp)) && (!bnx2x_chk_parity_attn(bp)) &&
8530 bnx2x_reset_is_done(bp))
8531 bnx2x_disable_close_the_gate(bp);
8532
8533 /* Reset MCP mail box sequence if there is on going recovery */
8534 if (unload_mode == UNLOAD_RECOVERY)
8535 bp->fw_seq = 0;
8536
8027 return 0; 8537 return 0;
8028} 8538}
8029 8539
8540/* Close gates #2, #3 and #4: */
8541static void bnx2x_set_234_gates(struct bnx2x *bp, bool close)
8542{
8543 u32 val, addr;
8544
8545 /* Gates #2 and #4a are closed/opened for "not E1" only */
8546 if (!CHIP_IS_E1(bp)) {
8547 /* #4 */
8548 val = REG_RD(bp, PXP_REG_HST_DISCARD_DOORBELLS);
8549 REG_WR(bp, PXP_REG_HST_DISCARD_DOORBELLS,
8550 close ? (val | 0x1) : (val & (~(u32)1)));
8551 /* #2 */
8552 val = REG_RD(bp, PXP_REG_HST_DISCARD_INTERNAL_WRITES);
8553 REG_WR(bp, PXP_REG_HST_DISCARD_INTERNAL_WRITES,
8554 close ? (val | 0x1) : (val & (~(u32)1)));
8555 }
8556
8557 /* #3 */
8558 addr = BP_PORT(bp) ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
8559 val = REG_RD(bp, addr);
8560 REG_WR(bp, addr, (!close) ? (val | 0x1) : (val & (~(u32)1)));
8561
8562 DP(NETIF_MSG_HW, "%s gates #2, #3 and #4\n",
8563 close ? "closing" : "opening");
8564 mmiowb();
8565}
8566
8567#define SHARED_MF_CLP_MAGIC 0x80000000 /* `magic' bit */
8568
8569static void bnx2x_clp_reset_prep(struct bnx2x *bp, u32 *magic_val)
8570{
8571 /* Do some magic... */
8572 u32 val = MF_CFG_RD(bp, shared_mf_config.clp_mb);
8573 *magic_val = val & SHARED_MF_CLP_MAGIC;
8574 MF_CFG_WR(bp, shared_mf_config.clp_mb, val | SHARED_MF_CLP_MAGIC);
8575}
8576
8577/* Restore the value of the `magic' bit.
8578 *
8579 * @param pdev Device handle.
8580 * @param magic_val Old value of the `magic' bit.
8581 */
8582static void bnx2x_clp_reset_done(struct bnx2x *bp, u32 magic_val)
8583{
8584 /* Restore the `magic' bit value... */
8585 /* u32 val = SHMEM_RD(bp, mf_cfg.shared_mf_config.clp_mb);
8586 SHMEM_WR(bp, mf_cfg.shared_mf_config.clp_mb,
8587 (val & (~SHARED_MF_CLP_MAGIC)) | magic_val); */
8588 u32 val = MF_CFG_RD(bp, shared_mf_config.clp_mb);
8589 MF_CFG_WR(bp, shared_mf_config.clp_mb,
8590 (val & (~SHARED_MF_CLP_MAGIC)) | magic_val);
8591}
8592
8593/* Prepares for MCP reset: takes care of CLP configurations.
8594 *
8595 * @param bp
8596 * @param magic_val Old value of 'magic' bit.
8597 */
8598static void bnx2x_reset_mcp_prep(struct bnx2x *bp, u32 *magic_val)
8599{
8600 u32 shmem;
8601 u32 validity_offset;
8602
8603 DP(NETIF_MSG_HW, "Starting\n");
8604
8605 /* Set `magic' bit in order to save MF config */
8606 if (!CHIP_IS_E1(bp))
8607 bnx2x_clp_reset_prep(bp, magic_val);
8608
8609 /* Get shmem offset */
8610 shmem = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
8611 validity_offset = offsetof(struct shmem_region, validity_map[0]);
8612
8613 /* Clear validity map flags */
8614 if (shmem > 0)
8615 REG_WR(bp, shmem + validity_offset, 0);
8616}
8617
8618#define MCP_TIMEOUT 5000 /* 5 seconds (in ms) */
8619#define MCP_ONE_TIMEOUT 100 /* 100 ms */
8620
8621/* Waits for MCP_ONE_TIMEOUT or MCP_ONE_TIMEOUT*10,
8622 * depending on the HW type.
8623 *
8624 * @param bp
8625 */
8626static inline void bnx2x_mcp_wait_one(struct bnx2x *bp)
8627{
8628 /* special handling for emulation and FPGA,
8629 wait 10 times longer */
8630 if (CHIP_REV_IS_SLOW(bp))
8631 msleep(MCP_ONE_TIMEOUT*10);
8632 else
8633 msleep(MCP_ONE_TIMEOUT);
8634}
8635
8636static int bnx2x_reset_mcp_comp(struct bnx2x *bp, u32 magic_val)
8637{
8638 u32 shmem, cnt, validity_offset, val;
8639 int rc = 0;
8640
8641 msleep(100);
8642
8643 /* Get shmem offset */
8644 shmem = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
8645 if (shmem == 0) {
8646 BNX2X_ERR("Shmem 0 return failure\n");
8647 rc = -ENOTTY;
8648 goto exit_lbl;
8649 }
8650
8651 validity_offset = offsetof(struct shmem_region, validity_map[0]);
8652
8653 /* Wait for MCP to come up */
8654 for (cnt = 0; cnt < (MCP_TIMEOUT / MCP_ONE_TIMEOUT); cnt++) {
8655 /* TBD: its best to check validity map of last port.
8656 * currently checks on port 0.
8657 */
8658 val = REG_RD(bp, shmem + validity_offset);
8659 DP(NETIF_MSG_HW, "shmem 0x%x validity map(0x%x)=0x%x\n", shmem,
8660 shmem + validity_offset, val);
8661
8662 /* check that shared memory is valid. */
8663 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
8664 == (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
8665 break;
8666
8667 bnx2x_mcp_wait_one(bp);
8668 }
8669
8670 DP(NETIF_MSG_HW, "Cnt=%d Shmem validity map 0x%x\n", cnt, val);
8671
8672 /* Check that shared memory is valid. This indicates that MCP is up. */
8673 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB)) !=
8674 (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB)) {
8675 BNX2X_ERR("Shmem signature not present. MCP is not up !!\n");
8676 rc = -ENOTTY;
8677 goto exit_lbl;
8678 }
8679
8680exit_lbl:
8681 /* Restore the `magic' bit value */
8682 if (!CHIP_IS_E1(bp))
8683 bnx2x_clp_reset_done(bp, magic_val);
8684
8685 return rc;
8686}
8687
8688static void bnx2x_pxp_prep(struct bnx2x *bp)
8689{
8690 if (!CHIP_IS_E1(bp)) {
8691 REG_WR(bp, PXP2_REG_RD_START_INIT, 0);
8692 REG_WR(bp, PXP2_REG_RQ_RBC_DONE, 0);
8693 REG_WR(bp, PXP2_REG_RQ_CFG_DONE, 0);
8694 mmiowb();
8695 }
8696}
8697
8698/*
8699 * Reset the whole chip except for:
8700 * - PCIE core
8701 * - PCI Glue, PSWHST, PXP/PXP2 RF (all controlled by
8702 * one reset bit)
8703 * - IGU
8704 * - MISC (including AEU)
8705 * - GRC
8706 * - RBCN, RBCP
8707 */
8708static void bnx2x_process_kill_chip_reset(struct bnx2x *bp)
8709{
8710 u32 not_reset_mask1, reset_mask1, not_reset_mask2, reset_mask2;
8711
8712 not_reset_mask1 =
8713 MISC_REGISTERS_RESET_REG_1_RST_HC |
8714 MISC_REGISTERS_RESET_REG_1_RST_PXPV |
8715 MISC_REGISTERS_RESET_REG_1_RST_PXP;
8716
8717 not_reset_mask2 =
8718 MISC_REGISTERS_RESET_REG_2_RST_MDIO |
8719 MISC_REGISTERS_RESET_REG_2_RST_EMAC0_HARD_CORE |
8720 MISC_REGISTERS_RESET_REG_2_RST_EMAC1_HARD_CORE |
8721 MISC_REGISTERS_RESET_REG_2_RST_MISC_CORE |
8722 MISC_REGISTERS_RESET_REG_2_RST_RBCN |
8723 MISC_REGISTERS_RESET_REG_2_RST_GRC |
8724 MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_REG_HARD_CORE |
8725 MISC_REGISTERS_RESET_REG_2_RST_MCP_N_HARD_CORE_RST_B;
8726
8727 reset_mask1 = 0xffffffff;
8728
8729 if (CHIP_IS_E1(bp))
8730 reset_mask2 = 0xffff;
8731 else
8732 reset_mask2 = 0x1ffff;
8733
8734 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
8735 reset_mask1 & (~not_reset_mask1));
8736 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
8737 reset_mask2 & (~not_reset_mask2));
8738
8739 barrier();
8740 mmiowb();
8741
8742 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, reset_mask1);
8743 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, reset_mask2);
8744 mmiowb();
8745}
8746
8747static int bnx2x_process_kill(struct bnx2x *bp)
8748{
8749 int cnt = 1000;
8750 u32 val = 0;
8751 u32 sr_cnt, blk_cnt, port_is_idle_0, port_is_idle_1, pgl_exp_rom2;
8752
8753
8754 /* Empty the Tetris buffer, wait for 1s */
8755 do {
8756 sr_cnt = REG_RD(bp, PXP2_REG_RD_SR_CNT);
8757 blk_cnt = REG_RD(bp, PXP2_REG_RD_BLK_CNT);
8758 port_is_idle_0 = REG_RD(bp, PXP2_REG_RD_PORT_IS_IDLE_0);
8759 port_is_idle_1 = REG_RD(bp, PXP2_REG_RD_PORT_IS_IDLE_1);
8760 pgl_exp_rom2 = REG_RD(bp, PXP2_REG_PGL_EXP_ROM2);
8761 if ((sr_cnt == 0x7e) && (blk_cnt == 0xa0) &&
8762 ((port_is_idle_0 & 0x1) == 0x1) &&
8763 ((port_is_idle_1 & 0x1) == 0x1) &&
8764 (pgl_exp_rom2 == 0xffffffff))
8765 break;
8766 msleep(1);
8767 } while (cnt-- > 0);
8768
8769 if (cnt <= 0) {
8770 DP(NETIF_MSG_HW, "Tetris buffer didn't get empty or there"
8771 " are still"
8772 " outstanding read requests after 1s!\n");
8773 DP(NETIF_MSG_HW, "sr_cnt=0x%08x, blk_cnt=0x%08x,"
8774 " port_is_idle_0=0x%08x,"
8775 " port_is_idle_1=0x%08x, pgl_exp_rom2=0x%08x\n",
8776 sr_cnt, blk_cnt, port_is_idle_0, port_is_idle_1,
8777 pgl_exp_rom2);
8778 return -EAGAIN;
8779 }
8780
8781 barrier();
8782
8783 /* Close gates #2, #3 and #4 */
8784 bnx2x_set_234_gates(bp, true);
8785
8786 /* TBD: Indicate that "process kill" is in progress to MCP */
8787
8788 /* Clear "unprepared" bit */
8789 REG_WR(bp, MISC_REG_UNPREPARED, 0);
8790 barrier();
8791
8792 /* Make sure all is written to the chip before the reset */
8793 mmiowb();
8794
8795 /* Wait for 1ms to empty GLUE and PCI-E core queues,
8796 * PSWHST, GRC and PSWRD Tetris buffer.
8797 */
8798 msleep(1);
8799
8800 /* Prepare to chip reset: */
8801 /* MCP */
8802 bnx2x_reset_mcp_prep(bp, &val);
8803
8804 /* PXP */
8805 bnx2x_pxp_prep(bp);
8806 barrier();
8807
8808 /* reset the chip */
8809 bnx2x_process_kill_chip_reset(bp);
8810 barrier();
8811
8812 /* Recover after reset: */
8813 /* MCP */
8814 if (bnx2x_reset_mcp_comp(bp, val))
8815 return -EAGAIN;
8816
8817 /* PXP */
8818 bnx2x_pxp_prep(bp);
8819
8820 /* Open the gates #2, #3 and #4 */
8821 bnx2x_set_234_gates(bp, false);
8822
8823 /* TBD: IGU/AEU preparation bring back the AEU/IGU to a
8824 * reset state, re-enable attentions. */
8825
8826 return 0;
8827}
8828
8829static int bnx2x_leader_reset(struct bnx2x *bp)
8830{
8831 int rc = 0;
8832 /* Try to recover after the failure */
8833 if (bnx2x_process_kill(bp)) {
8834 printk(KERN_ERR "%s: Something bad had happen! Aii!\n",
8835 bp->dev->name);
8836 rc = -EAGAIN;
8837 goto exit_leader_reset;
8838 }
8839
8840 /* Clear "reset is in progress" bit and update the driver state */
8841 bnx2x_set_reset_done(bp);
8842 bp->recovery_state = BNX2X_RECOVERY_DONE;
8843
8844exit_leader_reset:
8845 bp->is_leader = 0;
8846 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RESERVED_08);
8847 smp_wmb();
8848 return rc;
8849}
8850
8851static int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state);
8852
8853/* Assumption: runs under rtnl lock. This together with the fact
8854 * that it's called only from bnx2x_reset_task() ensure that it
8855 * will never be called when netif_running(bp->dev) is false.
8856 */
8857static void bnx2x_parity_recover(struct bnx2x *bp)
8858{
8859 DP(NETIF_MSG_HW, "Handling parity\n");
8860 while (1) {
8861 switch (bp->recovery_state) {
8862 case BNX2X_RECOVERY_INIT:
8863 DP(NETIF_MSG_HW, "State is BNX2X_RECOVERY_INIT\n");
8864 /* Try to get a LEADER_LOCK HW lock */
8865 if (bnx2x_trylock_hw_lock(bp,
8866 HW_LOCK_RESOURCE_RESERVED_08))
8867 bp->is_leader = 1;
8868
8869 /* Stop the driver */
8870 /* If interface has been removed - break */
8871 if (bnx2x_nic_unload(bp, UNLOAD_RECOVERY))
8872 return;
8873
8874 bp->recovery_state = BNX2X_RECOVERY_WAIT;
8875 /* Ensure "is_leader" and "recovery_state"
8876 * update values are seen on other CPUs
8877 */
8878 smp_wmb();
8879 break;
8880
8881 case BNX2X_RECOVERY_WAIT:
8882 DP(NETIF_MSG_HW, "State is BNX2X_RECOVERY_WAIT\n");
8883 if (bp->is_leader) {
8884 u32 load_counter = bnx2x_get_load_cnt(bp);
8885 if (load_counter) {
8886 /* Wait until all other functions get
8887 * down.
8888 */
8889 schedule_delayed_work(&bp->reset_task,
8890 HZ/10);
8891 return;
8892 } else {
8893 /* If all other functions got down -
8894 * try to bring the chip back to
8895 * normal. In any case it's an exit
8896 * point for a leader.
8897 */
8898 if (bnx2x_leader_reset(bp) ||
8899 bnx2x_nic_load(bp, LOAD_NORMAL)) {
8900 printk(KERN_ERR"%s: Recovery "
8901 "has failed. Power cycle is "
8902 "needed.\n", bp->dev->name);
8903 /* Disconnect this device */
8904 netif_device_detach(bp->dev);
8905 /* Block ifup for all function
8906 * of this ASIC until
8907 * "process kill" or power
8908 * cycle.
8909 */
8910 bnx2x_set_reset_in_progress(bp);
8911 /* Shut down the power */
8912 bnx2x_set_power_state(bp,
8913 PCI_D3hot);
8914 return;
8915 }
8916
8917 return;
8918 }
8919 } else { /* non-leader */
8920 if (!bnx2x_reset_is_done(bp)) {
8921 /* Try to get a LEADER_LOCK HW lock as
8922 * long as a former leader may have
8923 * been unloaded by the user or
8924 * released a leadership by another
8925 * reason.
8926 */
8927 if (bnx2x_trylock_hw_lock(bp,
8928 HW_LOCK_RESOURCE_RESERVED_08)) {
8929 /* I'm a leader now! Restart a
8930 * switch case.
8931 */
8932 bp->is_leader = 1;
8933 break;
8934 }
8935
8936 schedule_delayed_work(&bp->reset_task,
8937 HZ/10);
8938 return;
8939
8940 } else { /* A leader has completed
8941 * the "process kill". It's an exit
8942 * point for a non-leader.
8943 */
8944 bnx2x_nic_load(bp, LOAD_NORMAL);
8945 bp->recovery_state =
8946 BNX2X_RECOVERY_DONE;
8947 smp_wmb();
8948 return;
8949 }
8950 }
8951 default:
8952 return;
8953 }
8954 }
8955}
8956
8957/* bnx2x_nic_unload() flushes the bnx2x_wq, thus reset task is
8958 * scheduled on a general queue in order to prevent a dead lock.
8959 */
8030static void bnx2x_reset_task(struct work_struct *work) 8960static void bnx2x_reset_task(struct work_struct *work)
8031{ 8961{
8032 struct bnx2x *bp = container_of(work, struct bnx2x, reset_task); 8962 struct bnx2x *bp = container_of(work, struct bnx2x, reset_task.work);
8033 8963
8034#ifdef BNX2X_STOP_ON_ERROR 8964#ifdef BNX2X_STOP_ON_ERROR
8035 BNX2X_ERR("reset task called but STOP_ON_ERROR defined" 8965 BNX2X_ERR("reset task called but STOP_ON_ERROR defined"
8036 " so reset not done to allow debug dump,\n" 8966 " so reset not done to allow debug dump,\n"
8037 " you will need to reboot when done\n"); 8967 KERN_ERR " you will need to reboot when done\n");
8038 return; 8968 return;
8039#endif 8969#endif
8040 8970
@@ -8043,8 +8973,12 @@ static void bnx2x_reset_task(struct work_struct *work)
8043 if (!netif_running(bp->dev)) 8973 if (!netif_running(bp->dev))
8044 goto reset_task_exit; 8974 goto reset_task_exit;
8045 8975
8046 bnx2x_nic_unload(bp, UNLOAD_NORMAL); 8976 if (unlikely(bp->recovery_state != BNX2X_RECOVERY_DONE))
8047 bnx2x_nic_load(bp, LOAD_NORMAL); 8977 bnx2x_parity_recover(bp);
8978 else {
8979 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
8980 bnx2x_nic_load(bp, LOAD_NORMAL);
8981 }
8048 8982
8049reset_task_exit: 8983reset_task_exit:
8050 rtnl_unlock(); 8984 rtnl_unlock();
@@ -8264,7 +9198,7 @@ static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
8264 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]); 9198 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
8265 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB)) 9199 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
8266 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB)) 9200 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
8267 BNX2X_ERR("BAD MCP validity signature\n"); 9201 BNX2X_ERROR("BAD MCP validity signature\n");
8268 9202
8269 bp->common.hw_config = SHMEM_RD(bp, dev_info.shared_hw_config.config); 9203 bp->common.hw_config = SHMEM_RD(bp, dev_info.shared_hw_config.config);
8270 BNX2X_DEV_INFO("hw_config 0x%08x\n", bp->common.hw_config); 9204 BNX2X_DEV_INFO("hw_config 0x%08x\n", bp->common.hw_config);
@@ -8288,8 +9222,8 @@ static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
8288 if (val < BNX2X_BC_VER) { 9222 if (val < BNX2X_BC_VER) {
8289 /* for now only warn 9223 /* for now only warn
8290 * later we might need to enforce this */ 9224 * later we might need to enforce this */
8291 BNX2X_ERR("This driver needs bc_ver %X but found %X," 9225 BNX2X_ERROR("This driver needs bc_ver %X but found %X, "
8292 " please upgrade BC\n", BNX2X_BC_VER, val); 9226 "please upgrade BC\n", BNX2X_BC_VER, val);
8293 } 9227 }
8294 bp->link_params.feature_config_flags |= 9228 bp->link_params.feature_config_flags |=
8295 (val >= REQ_BC_VER_4_VRFY_OPT_MDL) ? 9229 (val >= REQ_BC_VER_4_VRFY_OPT_MDL) ?
@@ -8310,7 +9244,8 @@ static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
8310 val3 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[8]); 9244 val3 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[8]);
8311 val4 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[12]); 9245 val4 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[12]);
8312 9246
8313 pr_info("part number %X-%X-%X-%X\n", val, val2, val3, val4); 9247 dev_info(&bp->pdev->dev, "part number %X-%X-%X-%X\n",
9248 val, val2, val3, val4);
8314} 9249}
8315 9250
8316static void __devinit bnx2x_link_settings_supported(struct bnx2x *bp, 9251static void __devinit bnx2x_link_settings_supported(struct bnx2x *bp,
@@ -8588,11 +9523,11 @@ static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp)
8588 bp->port.advertising = (ADVERTISED_10baseT_Full | 9523 bp->port.advertising = (ADVERTISED_10baseT_Full |
8589 ADVERTISED_TP); 9524 ADVERTISED_TP);
8590 } else { 9525 } else {
8591 BNX2X_ERR("NVRAM config error. " 9526 BNX2X_ERROR("NVRAM config error. "
8592 "Invalid link_config 0x%x" 9527 "Invalid link_config 0x%x"
8593 " speed_cap_mask 0x%x\n", 9528 " speed_cap_mask 0x%x\n",
8594 bp->port.link_config, 9529 bp->port.link_config,
8595 bp->link_params.speed_cap_mask); 9530 bp->link_params.speed_cap_mask);
8596 return; 9531 return;
8597 } 9532 }
8598 break; 9533 break;
@@ -8604,11 +9539,11 @@ static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp)
8604 bp->port.advertising = (ADVERTISED_10baseT_Half | 9539 bp->port.advertising = (ADVERTISED_10baseT_Half |
8605 ADVERTISED_TP); 9540 ADVERTISED_TP);
8606 } else { 9541 } else {
8607 BNX2X_ERR("NVRAM config error. " 9542 BNX2X_ERROR("NVRAM config error. "
8608 "Invalid link_config 0x%x" 9543 "Invalid link_config 0x%x"
8609 " speed_cap_mask 0x%x\n", 9544 " speed_cap_mask 0x%x\n",
8610 bp->port.link_config, 9545 bp->port.link_config,
8611 bp->link_params.speed_cap_mask); 9546 bp->link_params.speed_cap_mask);
8612 return; 9547 return;
8613 } 9548 }
8614 break; 9549 break;
@@ -8619,11 +9554,11 @@ static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp)
8619 bp->port.advertising = (ADVERTISED_100baseT_Full | 9554 bp->port.advertising = (ADVERTISED_100baseT_Full |
8620 ADVERTISED_TP); 9555 ADVERTISED_TP);
8621 } else { 9556 } else {
8622 BNX2X_ERR("NVRAM config error. " 9557 BNX2X_ERROR("NVRAM config error. "
8623 "Invalid link_config 0x%x" 9558 "Invalid link_config 0x%x"
8624 " speed_cap_mask 0x%x\n", 9559 " speed_cap_mask 0x%x\n",
8625 bp->port.link_config, 9560 bp->port.link_config,
8626 bp->link_params.speed_cap_mask); 9561 bp->link_params.speed_cap_mask);
8627 return; 9562 return;
8628 } 9563 }
8629 break; 9564 break;
@@ -8635,11 +9570,11 @@ static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp)
8635 bp->port.advertising = (ADVERTISED_100baseT_Half | 9570 bp->port.advertising = (ADVERTISED_100baseT_Half |
8636 ADVERTISED_TP); 9571 ADVERTISED_TP);
8637 } else { 9572 } else {
8638 BNX2X_ERR("NVRAM config error. " 9573 BNX2X_ERROR("NVRAM config error. "
8639 "Invalid link_config 0x%x" 9574 "Invalid link_config 0x%x"
8640 " speed_cap_mask 0x%x\n", 9575 " speed_cap_mask 0x%x\n",
8641 bp->port.link_config, 9576 bp->port.link_config,
8642 bp->link_params.speed_cap_mask); 9577 bp->link_params.speed_cap_mask);
8643 return; 9578 return;
8644 } 9579 }
8645 break; 9580 break;
@@ -8650,11 +9585,11 @@ static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp)
8650 bp->port.advertising = (ADVERTISED_1000baseT_Full | 9585 bp->port.advertising = (ADVERTISED_1000baseT_Full |
8651 ADVERTISED_TP); 9586 ADVERTISED_TP);
8652 } else { 9587 } else {
8653 BNX2X_ERR("NVRAM config error. " 9588 BNX2X_ERROR("NVRAM config error. "
8654 "Invalid link_config 0x%x" 9589 "Invalid link_config 0x%x"
8655 " speed_cap_mask 0x%x\n", 9590 " speed_cap_mask 0x%x\n",
8656 bp->port.link_config, 9591 bp->port.link_config,
8657 bp->link_params.speed_cap_mask); 9592 bp->link_params.speed_cap_mask);
8658 return; 9593 return;
8659 } 9594 }
8660 break; 9595 break;
@@ -8665,11 +9600,11 @@ static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp)
8665 bp->port.advertising = (ADVERTISED_2500baseX_Full | 9600 bp->port.advertising = (ADVERTISED_2500baseX_Full |
8666 ADVERTISED_TP); 9601 ADVERTISED_TP);
8667 } else { 9602 } else {
8668 BNX2X_ERR("NVRAM config error. " 9603 BNX2X_ERROR("NVRAM config error. "
8669 "Invalid link_config 0x%x" 9604 "Invalid link_config 0x%x"
8670 " speed_cap_mask 0x%x\n", 9605 " speed_cap_mask 0x%x\n",
8671 bp->port.link_config, 9606 bp->port.link_config,
8672 bp->link_params.speed_cap_mask); 9607 bp->link_params.speed_cap_mask);
8673 return; 9608 return;
8674 } 9609 }
8675 break; 9610 break;
@@ -8682,19 +9617,19 @@ static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp)
8682 bp->port.advertising = (ADVERTISED_10000baseT_Full | 9617 bp->port.advertising = (ADVERTISED_10000baseT_Full |
8683 ADVERTISED_FIBRE); 9618 ADVERTISED_FIBRE);
8684 } else { 9619 } else {
8685 BNX2X_ERR("NVRAM config error. " 9620 BNX2X_ERROR("NVRAM config error. "
8686 "Invalid link_config 0x%x" 9621 "Invalid link_config 0x%x"
8687 " speed_cap_mask 0x%x\n", 9622 " speed_cap_mask 0x%x\n",
8688 bp->port.link_config, 9623 bp->port.link_config,
8689 bp->link_params.speed_cap_mask); 9624 bp->link_params.speed_cap_mask);
8690 return; 9625 return;
8691 } 9626 }
8692 break; 9627 break;
8693 9628
8694 default: 9629 default:
8695 BNX2X_ERR("NVRAM config error. " 9630 BNX2X_ERROR("NVRAM config error. "
8696 "BAD link speed link_config 0x%x\n", 9631 "BAD link speed link_config 0x%x\n",
8697 bp->port.link_config); 9632 bp->port.link_config);
8698 bp->link_params.req_line_speed = SPEED_AUTO_NEG; 9633 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
8699 bp->port.advertising = bp->port.supported; 9634 bp->port.advertising = bp->port.supported;
8700 break; 9635 break;
@@ -8823,7 +9758,7 @@ static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
8823 9758
8824 bp->e1hov = 0; 9759 bp->e1hov = 0;
8825 bp->e1hmf = 0; 9760 bp->e1hmf = 0;
8826 if (CHIP_IS_E1H(bp)) { 9761 if (CHIP_IS_E1H(bp) && !BP_NOMCP(bp)) {
8827 bp->mf_config = 9762 bp->mf_config =
8828 SHMEM_RD(bp, mf_cfg.func_mf_config[func].config); 9763 SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
8829 9764
@@ -8844,14 +9779,14 @@ static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
8844 "(0x%04x)\n", 9779 "(0x%04x)\n",
8845 func, bp->e1hov, bp->e1hov); 9780 func, bp->e1hov, bp->e1hov);
8846 } else { 9781 } else {
8847 BNX2X_ERR("!!! No valid E1HOV for func %d," 9782 BNX2X_ERROR("No valid E1HOV for func %d,"
8848 " aborting\n", func); 9783 " aborting\n", func);
8849 rc = -EPERM; 9784 rc = -EPERM;
8850 } 9785 }
8851 } else { 9786 } else {
8852 if (BP_E1HVN(bp)) { 9787 if (BP_E1HVN(bp)) {
8853 BNX2X_ERR("!!! VN %d in single function mode," 9788 BNX2X_ERROR("VN %d in single function mode,"
8854 " aborting\n", BP_E1HVN(bp)); 9789 " aborting\n", BP_E1HVN(bp));
8855 rc = -EPERM; 9790 rc = -EPERM;
8856 } 9791 }
8857 } 9792 }
@@ -8887,7 +9822,7 @@ static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
8887 9822
8888 if (BP_NOMCP(bp)) { 9823 if (BP_NOMCP(bp)) {
8889 /* only supposed to happen on emulation/FPGA */ 9824 /* only supposed to happen on emulation/FPGA */
8890 BNX2X_ERR("warning random MAC workaround active\n"); 9825 BNX2X_ERROR("warning: random MAC workaround active\n");
8891 random_ether_addr(bp->dev->dev_addr); 9826 random_ether_addr(bp->dev->dev_addr);
8892 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN); 9827 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
8893 } 9828 }
@@ -8895,6 +9830,70 @@ static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
8895 return rc; 9830 return rc;
8896} 9831}
8897 9832
9833static void __devinit bnx2x_read_fwinfo(struct bnx2x *bp)
9834{
9835 int cnt, i, block_end, rodi;
9836 char vpd_data[BNX2X_VPD_LEN+1];
9837 char str_id_reg[VENDOR_ID_LEN+1];
9838 char str_id_cap[VENDOR_ID_LEN+1];
9839 u8 len;
9840
9841 cnt = pci_read_vpd(bp->pdev, 0, BNX2X_VPD_LEN, vpd_data);
9842 memset(bp->fw_ver, 0, sizeof(bp->fw_ver));
9843
9844 if (cnt < BNX2X_VPD_LEN)
9845 goto out_not_found;
9846
9847 i = pci_vpd_find_tag(vpd_data, 0, BNX2X_VPD_LEN,
9848 PCI_VPD_LRDT_RO_DATA);
9849 if (i < 0)
9850 goto out_not_found;
9851
9852
9853 block_end = i + PCI_VPD_LRDT_TAG_SIZE +
9854 pci_vpd_lrdt_size(&vpd_data[i]);
9855
9856 i += PCI_VPD_LRDT_TAG_SIZE;
9857
9858 if (block_end > BNX2X_VPD_LEN)
9859 goto out_not_found;
9860
9861 rodi = pci_vpd_find_info_keyword(vpd_data, i, block_end,
9862 PCI_VPD_RO_KEYWORD_MFR_ID);
9863 if (rodi < 0)
9864 goto out_not_found;
9865
9866 len = pci_vpd_info_field_size(&vpd_data[rodi]);
9867
9868 if (len != VENDOR_ID_LEN)
9869 goto out_not_found;
9870
9871 rodi += PCI_VPD_INFO_FLD_HDR_SIZE;
9872
9873 /* vendor specific info */
9874 snprintf(str_id_reg, VENDOR_ID_LEN + 1, "%04x", PCI_VENDOR_ID_DELL);
9875 snprintf(str_id_cap, VENDOR_ID_LEN + 1, "%04X", PCI_VENDOR_ID_DELL);
9876 if (!strncmp(str_id_reg, &vpd_data[rodi], VENDOR_ID_LEN) ||
9877 !strncmp(str_id_cap, &vpd_data[rodi], VENDOR_ID_LEN)) {
9878
9879 rodi = pci_vpd_find_info_keyword(vpd_data, i, block_end,
9880 PCI_VPD_RO_KEYWORD_VENDOR0);
9881 if (rodi >= 0) {
9882 len = pci_vpd_info_field_size(&vpd_data[rodi]);
9883
9884 rodi += PCI_VPD_INFO_FLD_HDR_SIZE;
9885
9886 if (len < 32 && (len + rodi) <= BNX2X_VPD_LEN) {
9887 memcpy(bp->fw_ver, &vpd_data[rodi], len);
9888 bp->fw_ver[len] = ' ';
9889 }
9890 }
9891 return;
9892 }
9893out_not_found:
9894 return;
9895}
9896
8898static int __devinit bnx2x_init_bp(struct bnx2x *bp) 9897static int __devinit bnx2x_init_bp(struct bnx2x *bp)
8899{ 9898{
8900 int func = BP_FUNC(bp); 9899 int func = BP_FUNC(bp);
@@ -8912,29 +9911,34 @@ static int __devinit bnx2x_init_bp(struct bnx2x *bp)
8912#endif 9911#endif
8913 9912
8914 INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task); 9913 INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task);
8915 INIT_WORK(&bp->reset_task, bnx2x_reset_task); 9914 INIT_DELAYED_WORK(&bp->reset_task, bnx2x_reset_task);
8916 9915
8917 rc = bnx2x_get_hwinfo(bp); 9916 rc = bnx2x_get_hwinfo(bp);
8918 9917
9918 bnx2x_read_fwinfo(bp);
8919 /* need to reset chip if undi was active */ 9919 /* need to reset chip if undi was active */
8920 if (!BP_NOMCP(bp)) 9920 if (!BP_NOMCP(bp))
8921 bnx2x_undi_unload(bp); 9921 bnx2x_undi_unload(bp);
8922 9922
8923 if (CHIP_REV_IS_FPGA(bp)) 9923 if (CHIP_REV_IS_FPGA(bp))
8924 pr_err("FPGA detected\n"); 9924 dev_err(&bp->pdev->dev, "FPGA detected\n");
8925 9925
8926 if (BP_NOMCP(bp) && (func == 0)) 9926 if (BP_NOMCP(bp) && (func == 0))
8927 pr_err("MCP disabled, must load devices in order!\n"); 9927 dev_err(&bp->pdev->dev, "MCP disabled, "
9928 "must load devices in order!\n");
8928 9929
8929 /* Set multi queue mode */ 9930 /* Set multi queue mode */
8930 if ((multi_mode != ETH_RSS_MODE_DISABLED) && 9931 if ((multi_mode != ETH_RSS_MODE_DISABLED) &&
8931 ((int_mode == INT_MODE_INTx) || (int_mode == INT_MODE_MSI))) { 9932 ((int_mode == INT_MODE_INTx) || (int_mode == INT_MODE_MSI))) {
8932 pr_err("Multi disabled since int_mode requested is not MSI-X\n"); 9933 dev_err(&bp->pdev->dev, "Multi disabled since int_mode "
9934 "requested is not MSI-X\n");
8933 multi_mode = ETH_RSS_MODE_DISABLED; 9935 multi_mode = ETH_RSS_MODE_DISABLED;
8934 } 9936 }
8935 bp->multi_mode = multi_mode; 9937 bp->multi_mode = multi_mode;
8936 9938
8937 9939
9940 bp->dev->features |= NETIF_F_GRO;
9941
8938 /* Set TPA flags */ 9942 /* Set TPA flags */
8939 if (disable_tpa) { 9943 if (disable_tpa) {
8940 bp->flags &= ~TPA_ENABLE_FLAG; 9944 bp->flags &= ~TPA_ENABLE_FLAG;
@@ -9304,11 +10308,13 @@ static void bnx2x_get_drvinfo(struct net_device *dev,
9304 bnx2x_release_phy_lock(bp); 10308 bnx2x_release_phy_lock(bp);
9305 } 10309 }
9306 10310
9307 snprintf(info->fw_version, 32, "BC:%d.%d.%d%s%s", 10311 strncpy(info->fw_version, bp->fw_ver, 32);
10312 snprintf(info->fw_version + strlen(bp->fw_ver), 32 - strlen(bp->fw_ver),
10313 "bc %d.%d.%d%s%s",
9308 (bp->common.bc_ver & 0xff0000) >> 16, 10314 (bp->common.bc_ver & 0xff0000) >> 16,
9309 (bp->common.bc_ver & 0xff00) >> 8, 10315 (bp->common.bc_ver & 0xff00) >> 8,
9310 (bp->common.bc_ver & 0xff), 10316 (bp->common.bc_ver & 0xff),
9311 ((phy_fw_ver[0] != '\0') ? " PHY:" : ""), phy_fw_ver); 10317 ((phy_fw_ver[0] != '\0') ? " phy " : ""), phy_fw_ver);
9312 strcpy(info->bus_info, pci_name(bp->pdev)); 10318 strcpy(info->bus_info, pci_name(bp->pdev));
9313 info->n_stats = BNX2X_NUM_STATS; 10319 info->n_stats = BNX2X_NUM_STATS;
9314 info->testinfo_len = BNX2X_NUM_TESTS; 10320 info->testinfo_len = BNX2X_NUM_TESTS;
@@ -9842,19 +10848,18 @@ static int bnx2x_get_coalesce(struct net_device *dev,
9842 return 0; 10848 return 0;
9843} 10849}
9844 10850
9845#define BNX2X_MAX_COALES_TOUT (0xf0*12) /* Maximal coalescing timeout in us */
9846static int bnx2x_set_coalesce(struct net_device *dev, 10851static int bnx2x_set_coalesce(struct net_device *dev,
9847 struct ethtool_coalesce *coal) 10852 struct ethtool_coalesce *coal)
9848{ 10853{
9849 struct bnx2x *bp = netdev_priv(dev); 10854 struct bnx2x *bp = netdev_priv(dev);
9850 10855
9851 bp->rx_ticks = (u16) coal->rx_coalesce_usecs; 10856 bp->rx_ticks = (u16)coal->rx_coalesce_usecs;
9852 if (bp->rx_ticks > BNX2X_MAX_COALES_TOUT) 10857 if (bp->rx_ticks > BNX2X_MAX_COALESCE_TOUT)
9853 bp->rx_ticks = BNX2X_MAX_COALES_TOUT; 10858 bp->rx_ticks = BNX2X_MAX_COALESCE_TOUT;
9854 10859
9855 bp->tx_ticks = (u16) coal->tx_coalesce_usecs; 10860 bp->tx_ticks = (u16)coal->tx_coalesce_usecs;
9856 if (bp->tx_ticks > BNX2X_MAX_COALES_TOUT) 10861 if (bp->tx_ticks > BNX2X_MAX_COALESCE_TOUT)
9857 bp->tx_ticks = BNX2X_MAX_COALES_TOUT; 10862 bp->tx_ticks = BNX2X_MAX_COALESCE_TOUT;
9858 10863
9859 if (netif_running(dev)) 10864 if (netif_running(dev))
9860 bnx2x_update_coalesce(bp); 10865 bnx2x_update_coalesce(bp);
@@ -9885,6 +10890,11 @@ static int bnx2x_set_ringparam(struct net_device *dev,
9885 struct bnx2x *bp = netdev_priv(dev); 10890 struct bnx2x *bp = netdev_priv(dev);
9886 int rc = 0; 10891 int rc = 0;
9887 10892
10893 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
10894 printk(KERN_ERR "Handling parity error recovery. Try again later\n");
10895 return -EAGAIN;
10896 }
10897
9888 if ((ering->rx_pending > MAX_RX_AVAIL) || 10898 if ((ering->rx_pending > MAX_RX_AVAIL) ||
9889 (ering->tx_pending > MAX_TX_AVAIL) || 10899 (ering->tx_pending > MAX_TX_AVAIL) ||
9890 (ering->tx_pending <= MAX_SKB_FRAGS + 4)) 10900 (ering->tx_pending <= MAX_SKB_FRAGS + 4))
@@ -9970,6 +10980,11 @@ static int bnx2x_set_flags(struct net_device *dev, u32 data)
9970 int changed = 0; 10980 int changed = 0;
9971 int rc = 0; 10981 int rc = 0;
9972 10982
10983 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
10984 printk(KERN_ERR "Handling parity error recovery. Try again later\n");
10985 return -EAGAIN;
10986 }
10987
9973 /* TPA requires Rx CSUM offloading */ 10988 /* TPA requires Rx CSUM offloading */
9974 if ((data & ETH_FLAG_LRO) && bp->rx_csum) { 10989 if ((data & ETH_FLAG_LRO) && bp->rx_csum) {
9975 if (!disable_tpa) { 10990 if (!disable_tpa) {
@@ -10006,6 +11021,11 @@ static int bnx2x_set_rx_csum(struct net_device *dev, u32 data)
10006 struct bnx2x *bp = netdev_priv(dev); 11021 struct bnx2x *bp = netdev_priv(dev);
10007 int rc = 0; 11022 int rc = 0;
10008 11023
11024 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
11025 printk(KERN_ERR "Handling parity error recovery. Try again later\n");
11026 return -EAGAIN;
11027 }
11028
10009 bp->rx_csum = data; 11029 bp->rx_csum = data;
10010 11030
10011 /* Disable TPA, when Rx CSUM is disabled. Otherwise all 11031 /* Disable TPA, when Rx CSUM is disabled. Otherwise all
@@ -10050,9 +11070,9 @@ static int bnx2x_test_registers(struct bnx2x *bp)
10050 u32 wr_val = 0; 11070 u32 wr_val = 0;
10051 int port = BP_PORT(bp); 11071 int port = BP_PORT(bp);
10052 static const struct { 11072 static const struct {
10053 u32 offset0; 11073 u32 offset0;
10054 u32 offset1; 11074 u32 offset1;
10055 u32 mask; 11075 u32 mask;
10056 } reg_tbl[] = { 11076 } reg_tbl[] = {
10057/* 0 */ { BRB1_REG_PAUSE_LOW_THRESHOLD_0, 4, 0x000003ff }, 11077/* 0 */ { BRB1_REG_PAUSE_LOW_THRESHOLD_0, 4, 0x000003ff },
10058 { DORQ_REG_DB_ADDR0, 4, 0xffffffff }, 11078 { DORQ_REG_DB_ADDR0, 4, 0xffffffff },
@@ -10119,15 +11139,19 @@ static int bnx2x_test_registers(struct bnx2x *bp)
10119 11139
10120 save_val = REG_RD(bp, offset); 11140 save_val = REG_RD(bp, offset);
10121 11141
10122 REG_WR(bp, offset, wr_val); 11142 REG_WR(bp, offset, (wr_val & mask));
10123 val = REG_RD(bp, offset); 11143 val = REG_RD(bp, offset);
10124 11144
10125 /* Restore the original register's value */ 11145 /* Restore the original register's value */
10126 REG_WR(bp, offset, save_val); 11146 REG_WR(bp, offset, save_val);
10127 11147
10128 /* verify that value is as expected value */ 11148 /* verify value is as expected */
10129 if ((val & mask) != (wr_val & mask)) 11149 if ((val & mask) != (wr_val & mask)) {
11150 DP(NETIF_MSG_PROBE,
11151 "offset 0x%x: val 0x%x != 0x%x mask 0x%x\n",
11152 offset, val, wr_val, mask);
10130 goto test_reg_exit; 11153 goto test_reg_exit;
11154 }
10131 } 11155 }
10132 } 11156 }
10133 11157
@@ -10267,8 +11291,8 @@ static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode, u8 link_up)
10267 11291
10268 bd_prod = TX_BD(fp_tx->tx_bd_prod); 11292 bd_prod = TX_BD(fp_tx->tx_bd_prod);
10269 tx_start_bd = &fp_tx->tx_desc_ring[bd_prod].start_bd; 11293 tx_start_bd = &fp_tx->tx_desc_ring[bd_prod].start_bd;
10270 mapping = pci_map_single(bp->pdev, skb->data, 11294 mapping = dma_map_single(&bp->pdev->dev, skb->data,
10271 skb_headlen(skb), PCI_DMA_TODEVICE); 11295 skb_headlen(skb), DMA_TO_DEVICE);
10272 tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping)); 11296 tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
10273 tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping)); 11297 tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
10274 tx_start_bd->nbd = cpu_to_le16(2); /* start + pbd */ 11298 tx_start_bd->nbd = cpu_to_le16(2); /* start + pbd */
@@ -10344,6 +11368,9 @@ static int bnx2x_test_loopback(struct bnx2x *bp, u8 link_up)
10344{ 11368{
10345 int rc = 0, res; 11369 int rc = 0, res;
10346 11370
11371 if (BP_NOMCP(bp))
11372 return rc;
11373
10347 if (!netif_running(bp->dev)) 11374 if (!netif_running(bp->dev))
10348 return BNX2X_LOOPBACK_FAILED; 11375 return BNX2X_LOOPBACK_FAILED;
10349 11376
@@ -10391,6 +11418,9 @@ static int bnx2x_test_nvram(struct bnx2x *bp)
10391 int i, rc; 11418 int i, rc;
10392 u32 magic, crc; 11419 u32 magic, crc;
10393 11420
11421 if (BP_NOMCP(bp))
11422 return 0;
11423
10394 rc = bnx2x_nvram_read(bp, 0, data, 4); 11424 rc = bnx2x_nvram_read(bp, 0, data, 4);
10395 if (rc) { 11425 if (rc) {
10396 DP(NETIF_MSG_PROBE, "magic value read (rc %d)\n", rc); 11426 DP(NETIF_MSG_PROBE, "magic value read (rc %d)\n", rc);
@@ -10468,6 +11498,12 @@ static void bnx2x_self_test(struct net_device *dev,
10468{ 11498{
10469 struct bnx2x *bp = netdev_priv(dev); 11499 struct bnx2x *bp = netdev_priv(dev);
10470 11500
11501 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
11502 printk(KERN_ERR "Handling parity error recovery. Try again later\n");
11503 etest->flags |= ETH_TEST_FL_FAILED;
11504 return;
11505 }
11506
10471 memset(buf, 0, sizeof(u64) * BNX2X_NUM_TESTS); 11507 memset(buf, 0, sizeof(u64) * BNX2X_NUM_TESTS);
10472 11508
10473 if (!netif_running(dev)) 11509 if (!netif_running(dev))
@@ -10556,7 +11592,11 @@ static const struct {
10556 11592
10557/* 10 */{ Q_STATS_OFFSET32(total_bytes_transmitted_hi), 8, "[%d]: tx_bytes" }, 11593/* 10 */{ Q_STATS_OFFSET32(total_bytes_transmitted_hi), 8, "[%d]: tx_bytes" },
10558 { Q_STATS_OFFSET32(total_unicast_packets_transmitted_hi), 11594 { Q_STATS_OFFSET32(total_unicast_packets_transmitted_hi),
10559 8, "[%d]: tx_packets" } 11595 8, "[%d]: tx_ucast_packets" },
11596 { Q_STATS_OFFSET32(total_multicast_packets_transmitted_hi),
11597 8, "[%d]: tx_mcast_packets" },
11598 { Q_STATS_OFFSET32(total_broadcast_packets_transmitted_hi),
11599 8, "[%d]: tx_bcast_packets" }
10560}; 11600};
10561 11601
10562static const struct { 11602static const struct {
@@ -10618,16 +11658,20 @@ static const struct {
10618 { STATS_OFFSET32(tx_stat_ifhcoutbadoctets_hi), 11658 { STATS_OFFSET32(tx_stat_ifhcoutbadoctets_hi),
10619 8, STATS_FLAGS_PORT, "tx_error_bytes" }, 11659 8, STATS_FLAGS_PORT, "tx_error_bytes" },
10620 { STATS_OFFSET32(total_unicast_packets_transmitted_hi), 11660 { STATS_OFFSET32(total_unicast_packets_transmitted_hi),
10621 8, STATS_FLAGS_BOTH, "tx_packets" }, 11661 8, STATS_FLAGS_BOTH, "tx_ucast_packets" },
11662 { STATS_OFFSET32(total_multicast_packets_transmitted_hi),
11663 8, STATS_FLAGS_BOTH, "tx_mcast_packets" },
11664 { STATS_OFFSET32(total_broadcast_packets_transmitted_hi),
11665 8, STATS_FLAGS_BOTH, "tx_bcast_packets" },
10622 { STATS_OFFSET32(tx_stat_dot3statsinternalmactransmiterrors_hi), 11666 { STATS_OFFSET32(tx_stat_dot3statsinternalmactransmiterrors_hi),
10623 8, STATS_FLAGS_PORT, "tx_mac_errors" }, 11667 8, STATS_FLAGS_PORT, "tx_mac_errors" },
10624 { STATS_OFFSET32(rx_stat_dot3statscarriersenseerrors_hi), 11668 { STATS_OFFSET32(rx_stat_dot3statscarriersenseerrors_hi),
10625 8, STATS_FLAGS_PORT, "tx_carrier_errors" }, 11669 8, STATS_FLAGS_PORT, "tx_carrier_errors" },
10626 { STATS_OFFSET32(tx_stat_dot3statssinglecollisionframes_hi), 11670/* 30 */{ STATS_OFFSET32(tx_stat_dot3statssinglecollisionframes_hi),
10627 8, STATS_FLAGS_PORT, "tx_single_collisions" }, 11671 8, STATS_FLAGS_PORT, "tx_single_collisions" },
10628 { STATS_OFFSET32(tx_stat_dot3statsmultiplecollisionframes_hi), 11672 { STATS_OFFSET32(tx_stat_dot3statsmultiplecollisionframes_hi),
10629 8, STATS_FLAGS_PORT, "tx_multi_collisions" }, 11673 8, STATS_FLAGS_PORT, "tx_multi_collisions" },
10630/* 30 */{ STATS_OFFSET32(tx_stat_dot3statsdeferredtransmissions_hi), 11674 { STATS_OFFSET32(tx_stat_dot3statsdeferredtransmissions_hi),
10631 8, STATS_FLAGS_PORT, "tx_deferred" }, 11675 8, STATS_FLAGS_PORT, "tx_deferred" },
10632 { STATS_OFFSET32(tx_stat_dot3statsexcessivecollisions_hi), 11676 { STATS_OFFSET32(tx_stat_dot3statsexcessivecollisions_hi),
10633 8, STATS_FLAGS_PORT, "tx_excess_collisions" }, 11677 8, STATS_FLAGS_PORT, "tx_excess_collisions" },
@@ -10643,11 +11687,11 @@ static const struct {
10643 8, STATS_FLAGS_PORT, "tx_128_to_255_byte_packets" }, 11687 8, STATS_FLAGS_PORT, "tx_128_to_255_byte_packets" },
10644 { STATS_OFFSET32(tx_stat_etherstatspkts256octetsto511octets_hi), 11688 { STATS_OFFSET32(tx_stat_etherstatspkts256octetsto511octets_hi),
10645 8, STATS_FLAGS_PORT, "tx_256_to_511_byte_packets" }, 11689 8, STATS_FLAGS_PORT, "tx_256_to_511_byte_packets" },
10646 { STATS_OFFSET32(tx_stat_etherstatspkts512octetsto1023octets_hi), 11690/* 40 */{ STATS_OFFSET32(tx_stat_etherstatspkts512octetsto1023octets_hi),
10647 8, STATS_FLAGS_PORT, "tx_512_to_1023_byte_packets" }, 11691 8, STATS_FLAGS_PORT, "tx_512_to_1023_byte_packets" },
10648 { STATS_OFFSET32(etherstatspkts1024octetsto1522octets_hi), 11692 { STATS_OFFSET32(etherstatspkts1024octetsto1522octets_hi),
10649 8, STATS_FLAGS_PORT, "tx_1024_to_1522_byte_packets" }, 11693 8, STATS_FLAGS_PORT, "tx_1024_to_1522_byte_packets" },
10650/* 40 */{ STATS_OFFSET32(etherstatspktsover1522octets_hi), 11694 { STATS_OFFSET32(etherstatspktsover1522octets_hi),
10651 8, STATS_FLAGS_PORT, "tx_1523_to_9022_byte_packets" }, 11695 8, STATS_FLAGS_PORT, "tx_1523_to_9022_byte_packets" },
10652 { STATS_OFFSET32(pause_frames_sent_hi), 11696 { STATS_OFFSET32(pause_frames_sent_hi),
10653 8, STATS_FLAGS_PORT, "tx_pause_frames" } 11697 8, STATS_FLAGS_PORT, "tx_pause_frames" }
@@ -10664,7 +11708,7 @@ static int bnx2x_get_sset_count(struct net_device *dev, int stringset)
10664 struct bnx2x *bp = netdev_priv(dev); 11708 struct bnx2x *bp = netdev_priv(dev);
10665 int i, num_stats; 11709 int i, num_stats;
10666 11710
10667 switch(stringset) { 11711 switch (stringset) {
10668 case ETH_SS_STATS: 11712 case ETH_SS_STATS:
10669 if (is_multi(bp)) { 11713 if (is_multi(bp)) {
10670 num_stats = BNX2X_NUM_Q_STATS * bp->num_queues; 11714 num_stats = BNX2X_NUM_Q_STATS * bp->num_queues;
@@ -10893,6 +11937,14 @@ static int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
10893 break; 11937 break;
10894 11938
10895 case PCI_D3hot: 11939 case PCI_D3hot:
11940 /* If there are other clients above don't
11941 shut down the power */
11942 if (atomic_read(&bp->pdev->enable_cnt) != 1)
11943 return 0;
11944 /* Don't shut down the power for emulation and FPGA */
11945 if (CHIP_REV_IS_SLOW(bp))
11946 return 0;
11947
10896 pmcsr &= ~PCI_PM_CTRL_STATE_MASK; 11948 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
10897 pmcsr |= 3; 11949 pmcsr |= 3;
10898 11950
@@ -11182,6 +12234,8 @@ static netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
11182 int i; 12234 int i;
11183 u8 hlen = 0; 12235 u8 hlen = 0;
11184 __le16 pkt_size = 0; 12236 __le16 pkt_size = 0;
12237 struct ethhdr *eth;
12238 u8 mac_type = UNICAST_ADDRESS;
11185 12239
11186#ifdef BNX2X_STOP_ON_ERROR 12240#ifdef BNX2X_STOP_ON_ERROR
11187 if (unlikely(bp->panic)) 12241 if (unlikely(bp->panic))
@@ -11205,6 +12259,16 @@ static netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
11205 skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr, 12259 skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
11206 ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type); 12260 ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type);
11207 12261
12262 eth = (struct ethhdr *)skb->data;
12263
12264 /* set flag according to packet type (UNICAST_ADDRESS is default)*/
12265 if (unlikely(is_multicast_ether_addr(eth->h_dest))) {
12266 if (is_broadcast_ether_addr(eth->h_dest))
12267 mac_type = BROADCAST_ADDRESS;
12268 else
12269 mac_type = MULTICAST_ADDRESS;
12270 }
12271
11208#if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3) 12272#if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
11209 /* First, check if we need to linearize the skb (due to FW 12273 /* First, check if we need to linearize the skb (due to FW
11210 restrictions). No need to check fragmentation if page size > 8K 12274 restrictions). No need to check fragmentation if page size > 8K
@@ -11238,8 +12302,8 @@ static netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
11238 tx_start_bd = &fp->tx_desc_ring[bd_prod].start_bd; 12302 tx_start_bd = &fp->tx_desc_ring[bd_prod].start_bd;
11239 12303
11240 tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD; 12304 tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
11241 tx_start_bd->general_data = (UNICAST_ADDRESS << 12305 tx_start_bd->general_data = (mac_type <<
11242 ETH_TX_START_BD_ETH_ADDR_TYPE_SHIFT); 12306 ETH_TX_START_BD_ETH_ADDR_TYPE_SHIFT);
11243 /* header nbd */ 12307 /* header nbd */
11244 tx_start_bd->general_data |= (1 << ETH_TX_START_BD_HDR_NBDS_SHIFT); 12308 tx_start_bd->general_data |= (1 << ETH_TX_START_BD_HDR_NBDS_SHIFT);
11245 12309
@@ -11314,8 +12378,8 @@ static netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
11314 } 12378 }
11315 } 12379 }
11316 12380
11317 mapping = pci_map_single(bp->pdev, skb->data, 12381 mapping = dma_map_single(&bp->pdev->dev, skb->data,
11318 skb_headlen(skb), PCI_DMA_TODEVICE); 12382 skb_headlen(skb), DMA_TO_DEVICE);
11319 12383
11320 tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping)); 12384 tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
11321 tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping)); 12385 tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
@@ -11372,8 +12436,9 @@ static netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
11372 if (total_pkt_bd == NULL) 12436 if (total_pkt_bd == NULL)
11373 total_pkt_bd = &fp->tx_desc_ring[bd_prod].reg_bd; 12437 total_pkt_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
11374 12438
11375 mapping = pci_map_page(bp->pdev, frag->page, frag->page_offset, 12439 mapping = dma_map_page(&bp->pdev->dev, frag->page,
11376 frag->size, PCI_DMA_TODEVICE); 12440 frag->page_offset,
12441 frag->size, DMA_TO_DEVICE);
11377 12442
11378 tx_data_bd->addr_hi = cpu_to_le32(U64_HI(mapping)); 12443 tx_data_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
11379 tx_data_bd->addr_lo = cpu_to_le32(U64_LO(mapping)); 12444 tx_data_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
@@ -11452,6 +12517,40 @@ static int bnx2x_open(struct net_device *dev)
11452 12517
11453 bnx2x_set_power_state(bp, PCI_D0); 12518 bnx2x_set_power_state(bp, PCI_D0);
11454 12519
12520 if (!bnx2x_reset_is_done(bp)) {
12521 do {
12522 /* Reset MCP mail box sequence if there is on going
12523 * recovery
12524 */
12525 bp->fw_seq = 0;
12526
12527 /* If it's the first function to load and reset done
12528 * is still not cleared it may mean that. We don't
12529 * check the attention state here because it may have
12530 * already been cleared by a "common" reset but we
12531 * shell proceed with "process kill" anyway.
12532 */
12533 if ((bnx2x_get_load_cnt(bp) == 0) &&
12534 bnx2x_trylock_hw_lock(bp,
12535 HW_LOCK_RESOURCE_RESERVED_08) &&
12536 (!bnx2x_leader_reset(bp))) {
12537 DP(NETIF_MSG_HW, "Recovered in open\n");
12538 break;
12539 }
12540
12541 bnx2x_set_power_state(bp, PCI_D3hot);
12542
12543 printk(KERN_ERR"%s: Recovery flow hasn't been properly"
12544 " completed yet. Try again later. If u still see this"
12545 " message after a few retries then power cycle is"
12546 " required.\n", bp->dev->name);
12547
12548 return -EAGAIN;
12549 } while (0);
12550 }
12551
12552 bp->recovery_state = BNX2X_RECOVERY_DONE;
12553
11455 return bnx2x_nic_load(bp, LOAD_OPEN); 12554 return bnx2x_nic_load(bp, LOAD_OPEN);
11456} 12555}
11457 12556
@@ -11462,9 +12561,7 @@ static int bnx2x_close(struct net_device *dev)
11462 12561
11463 /* Unload the driver, release IRQs */ 12562 /* Unload the driver, release IRQs */
11464 bnx2x_nic_unload(bp, UNLOAD_CLOSE); 12563 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
11465 if (atomic_read(&bp->pdev->enable_cnt) == 1) 12564 bnx2x_set_power_state(bp, PCI_D3hot);
11466 if (!CHIP_REV_IS_SLOW(bp))
11467 bnx2x_set_power_state(bp, PCI_D3hot);
11468 12565
11469 return 0; 12566 return 0;
11470} 12567}
@@ -11494,21 +12591,21 @@ static void bnx2x_set_rx_mode(struct net_device *dev)
11494 else { /* some multicasts */ 12591 else { /* some multicasts */
11495 if (CHIP_IS_E1(bp)) { 12592 if (CHIP_IS_E1(bp)) {
11496 int i, old, offset; 12593 int i, old, offset;
11497 struct dev_mc_list *mclist; 12594 struct netdev_hw_addr *ha;
11498 struct mac_configuration_cmd *config = 12595 struct mac_configuration_cmd *config =
11499 bnx2x_sp(bp, mcast_config); 12596 bnx2x_sp(bp, mcast_config);
11500 12597
11501 i = 0; 12598 i = 0;
11502 netdev_for_each_mc_addr(mclist, dev) { 12599 netdev_for_each_mc_addr(ha, dev) {
11503 config->config_table[i]. 12600 config->config_table[i].
11504 cam_entry.msb_mac_addr = 12601 cam_entry.msb_mac_addr =
11505 swab16(*(u16 *)&mclist->dmi_addr[0]); 12602 swab16(*(u16 *)&ha->addr[0]);
11506 config->config_table[i]. 12603 config->config_table[i].
11507 cam_entry.middle_mac_addr = 12604 cam_entry.middle_mac_addr =
11508 swab16(*(u16 *)&mclist->dmi_addr[2]); 12605 swab16(*(u16 *)&ha->addr[2]);
11509 config->config_table[i]. 12606 config->config_table[i].
11510 cam_entry.lsb_mac_addr = 12607 cam_entry.lsb_mac_addr =
11511 swab16(*(u16 *)&mclist->dmi_addr[4]); 12608 swab16(*(u16 *)&ha->addr[4]);
11512 config->config_table[i].cam_entry.flags = 12609 config->config_table[i].cam_entry.flags =
11513 cpu_to_le16(port); 12610 cpu_to_le16(port);
11514 config->config_table[i]. 12611 config->config_table[i].
@@ -11562,18 +12659,18 @@ static void bnx2x_set_rx_mode(struct net_device *dev)
11562 0); 12659 0);
11563 } else { /* E1H */ 12660 } else { /* E1H */
11564 /* Accept one or more multicasts */ 12661 /* Accept one or more multicasts */
11565 struct dev_mc_list *mclist; 12662 struct netdev_hw_addr *ha;
11566 u32 mc_filter[MC_HASH_SIZE]; 12663 u32 mc_filter[MC_HASH_SIZE];
11567 u32 crc, bit, regidx; 12664 u32 crc, bit, regidx;
11568 int i; 12665 int i;
11569 12666
11570 memset(mc_filter, 0, 4 * MC_HASH_SIZE); 12667 memset(mc_filter, 0, 4 * MC_HASH_SIZE);
11571 12668
11572 netdev_for_each_mc_addr(mclist, dev) { 12669 netdev_for_each_mc_addr(ha, dev) {
11573 DP(NETIF_MSG_IFUP, "Adding mcast MAC: %pM\n", 12670 DP(NETIF_MSG_IFUP, "Adding mcast MAC: %pM\n",
11574 mclist->dmi_addr); 12671 ha->addr);
11575 12672
11576 crc = crc32c_le(0, mclist->dmi_addr, ETH_ALEN); 12673 crc = crc32c_le(0, ha->addr, ETH_ALEN);
11577 bit = (crc >> 24) & 0xff; 12674 bit = (crc >> 24) & 0xff;
11578 regidx = bit >> 5; 12675 regidx = bit >> 5;
11579 bit &= 0x1f; 12676 bit &= 0x1f;
@@ -11690,6 +12787,11 @@ static int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
11690 struct bnx2x *bp = netdev_priv(dev); 12787 struct bnx2x *bp = netdev_priv(dev);
11691 int rc = 0; 12788 int rc = 0;
11692 12789
12790 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
12791 printk(KERN_ERR "Handling parity error recovery. Try again later\n");
12792 return -EAGAIN;
12793 }
12794
11693 if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) || 12795 if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) ||
11694 ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE)) 12796 ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE))
11695 return -EINVAL; 12797 return -EINVAL;
@@ -11717,7 +12819,7 @@ static void bnx2x_tx_timeout(struct net_device *dev)
11717 bnx2x_panic(); 12819 bnx2x_panic();
11718#endif 12820#endif
11719 /* This allows the netif to be shutdown gracefully before resetting */ 12821 /* This allows the netif to be shutdown gracefully before resetting */
11720 schedule_work(&bp->reset_task); 12822 schedule_delayed_work(&bp->reset_task, 0);
11721} 12823}
11722 12824
11723#ifdef BCM_VLAN 12825#ifdef BCM_VLAN
@@ -11789,18 +12891,21 @@ static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
11789 12891
11790 rc = pci_enable_device(pdev); 12892 rc = pci_enable_device(pdev);
11791 if (rc) { 12893 if (rc) {
11792 pr_err("Cannot enable PCI device, aborting\n"); 12894 dev_err(&bp->pdev->dev,
12895 "Cannot enable PCI device, aborting\n");
11793 goto err_out; 12896 goto err_out;
11794 } 12897 }
11795 12898
11796 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) { 12899 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
11797 pr_err("Cannot find PCI device base address, aborting\n"); 12900 dev_err(&bp->pdev->dev,
12901 "Cannot find PCI device base address, aborting\n");
11798 rc = -ENODEV; 12902 rc = -ENODEV;
11799 goto err_out_disable; 12903 goto err_out_disable;
11800 } 12904 }
11801 12905
11802 if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) { 12906 if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
11803 pr_err("Cannot find second PCI device base address, aborting\n"); 12907 dev_err(&bp->pdev->dev, "Cannot find second PCI device"
12908 " base address, aborting\n");
11804 rc = -ENODEV; 12909 rc = -ENODEV;
11805 goto err_out_disable; 12910 goto err_out_disable;
11806 } 12911 }
@@ -11808,7 +12913,8 @@ static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
11808 if (atomic_read(&pdev->enable_cnt) == 1) { 12913 if (atomic_read(&pdev->enable_cnt) == 1) {
11809 rc = pci_request_regions(pdev, DRV_MODULE_NAME); 12914 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
11810 if (rc) { 12915 if (rc) {
11811 pr_err("Cannot obtain PCI resources, aborting\n"); 12916 dev_err(&bp->pdev->dev,
12917 "Cannot obtain PCI resources, aborting\n");
11812 goto err_out_disable; 12918 goto err_out_disable;
11813 } 12919 }
11814 12920
@@ -11818,28 +12924,32 @@ static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
11818 12924
11819 bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM); 12925 bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
11820 if (bp->pm_cap == 0) { 12926 if (bp->pm_cap == 0) {
11821 pr_err("Cannot find power management capability, aborting\n"); 12927 dev_err(&bp->pdev->dev,
12928 "Cannot find power management capability, aborting\n");
11822 rc = -EIO; 12929 rc = -EIO;
11823 goto err_out_release; 12930 goto err_out_release;
11824 } 12931 }
11825 12932
11826 bp->pcie_cap = pci_find_capability(pdev, PCI_CAP_ID_EXP); 12933 bp->pcie_cap = pci_find_capability(pdev, PCI_CAP_ID_EXP);
11827 if (bp->pcie_cap == 0) { 12934 if (bp->pcie_cap == 0) {
11828 pr_err("Cannot find PCI Express capability, aborting\n"); 12935 dev_err(&bp->pdev->dev,
12936 "Cannot find PCI Express capability, aborting\n");
11829 rc = -EIO; 12937 rc = -EIO;
11830 goto err_out_release; 12938 goto err_out_release;
11831 } 12939 }
11832 12940
11833 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) == 0) { 12941 if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)) == 0) {
11834 bp->flags |= USING_DAC_FLAG; 12942 bp->flags |= USING_DAC_FLAG;
11835 if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)) != 0) { 12943 if (dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64)) != 0) {
11836 pr_err("pci_set_consistent_dma_mask failed, aborting\n"); 12944 dev_err(&bp->pdev->dev, "dma_set_coherent_mask"
12945 " failed, aborting\n");
11837 rc = -EIO; 12946 rc = -EIO;
11838 goto err_out_release; 12947 goto err_out_release;
11839 } 12948 }
11840 12949
11841 } else if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0) { 12950 } else if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)) != 0) {
11842 pr_err("System does not support DMA, aborting\n"); 12951 dev_err(&bp->pdev->dev,
12952 "System does not support DMA, aborting\n");
11843 rc = -EIO; 12953 rc = -EIO;
11844 goto err_out_release; 12954 goto err_out_release;
11845 } 12955 }
@@ -11852,7 +12962,8 @@ static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
11852 12962
11853 bp->regview = pci_ioremap_bar(pdev, 0); 12963 bp->regview = pci_ioremap_bar(pdev, 0);
11854 if (!bp->regview) { 12964 if (!bp->regview) {
11855 pr_err("Cannot map register space, aborting\n"); 12965 dev_err(&bp->pdev->dev,
12966 "Cannot map register space, aborting\n");
11856 rc = -ENOMEM; 12967 rc = -ENOMEM;
11857 goto err_out_release; 12968 goto err_out_release;
11858 } 12969 }
@@ -11861,7 +12972,8 @@ static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
11861 min_t(u64, BNX2X_DB_SIZE, 12972 min_t(u64, BNX2X_DB_SIZE,
11862 pci_resource_len(pdev, 2))); 12973 pci_resource_len(pdev, 2)));
11863 if (!bp->doorbells) { 12974 if (!bp->doorbells) {
11864 pr_err("Cannot map doorbell space, aborting\n"); 12975 dev_err(&bp->pdev->dev,
12976 "Cannot map doorbell space, aborting\n");
11865 rc = -ENOMEM; 12977 rc = -ENOMEM;
11866 goto err_out_unmap; 12978 goto err_out_unmap;
11867 } 12979 }
@@ -11876,6 +12988,9 @@ static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
11876 REG_WR(bp, PXP2_REG_PGL_ADDR_90_F0 + BP_PORT(bp)*16, 0); 12988 REG_WR(bp, PXP2_REG_PGL_ADDR_90_F0 + BP_PORT(bp)*16, 0);
11877 REG_WR(bp, PXP2_REG_PGL_ADDR_94_F0 + BP_PORT(bp)*16, 0); 12989 REG_WR(bp, PXP2_REG_PGL_ADDR_94_F0 + BP_PORT(bp)*16, 0);
11878 12990
12991 /* Reset the load counter */
12992 bnx2x_clear_load_cnt(bp);
12993
11879 dev->watchdog_timeo = TX_TIMEOUT; 12994 dev->watchdog_timeo = TX_TIMEOUT;
11880 12995
11881 dev->netdev_ops = &bnx2x_netdev_ops; 12996 dev->netdev_ops = &bnx2x_netdev_ops;
@@ -11963,7 +13078,8 @@ static int __devinit bnx2x_check_firmware(struct bnx2x *bp)
11963 offset = be32_to_cpu(sections[i].offset); 13078 offset = be32_to_cpu(sections[i].offset);
11964 len = be32_to_cpu(sections[i].len); 13079 len = be32_to_cpu(sections[i].len);
11965 if (offset + len > firmware->size) { 13080 if (offset + len > firmware->size) {
11966 pr_err("Section %d length is out of bounds\n", i); 13081 dev_err(&bp->pdev->dev,
13082 "Section %d length is out of bounds\n", i);
11967 return -EINVAL; 13083 return -EINVAL;
11968 } 13084 }
11969 } 13085 }
@@ -11975,7 +13091,8 @@ static int __devinit bnx2x_check_firmware(struct bnx2x *bp)
11975 13091
11976 for (i = 0; i < be32_to_cpu(fw_hdr->init_ops_offsets.len) / 2; i++) { 13092 for (i = 0; i < be32_to_cpu(fw_hdr->init_ops_offsets.len) / 2; i++) {
11977 if (be16_to_cpu(ops_offsets[i]) > num_ops) { 13093 if (be16_to_cpu(ops_offsets[i]) > num_ops) {
11978 pr_err("Section offset %d is out of bounds\n", i); 13094 dev_err(&bp->pdev->dev,
13095 "Section offset %d is out of bounds\n", i);
11979 return -EINVAL; 13096 return -EINVAL;
11980 } 13097 }
11981 } 13098 }
@@ -11987,7 +13104,8 @@ static int __devinit bnx2x_check_firmware(struct bnx2x *bp)
11987 (fw_ver[1] != BCM_5710_FW_MINOR_VERSION) || 13104 (fw_ver[1] != BCM_5710_FW_MINOR_VERSION) ||
11988 (fw_ver[2] != BCM_5710_FW_REVISION_VERSION) || 13105 (fw_ver[2] != BCM_5710_FW_REVISION_VERSION) ||
11989 (fw_ver[3] != BCM_5710_FW_ENGINEERING_VERSION)) { 13106 (fw_ver[3] != BCM_5710_FW_ENGINEERING_VERSION)) {
11990 pr_err("Bad FW version:%d.%d.%d.%d. Should be %d.%d.%d.%d\n", 13107 dev_err(&bp->pdev->dev,
13108 "Bad FW version:%d.%d.%d.%d. Should be %d.%d.%d.%d\n",
11991 fw_ver[0], fw_ver[1], fw_ver[2], 13109 fw_ver[0], fw_ver[1], fw_ver[2],
11992 fw_ver[3], BCM_5710_FW_MAJOR_VERSION, 13110 fw_ver[3], BCM_5710_FW_MAJOR_VERSION,
11993 BCM_5710_FW_MINOR_VERSION, 13111 BCM_5710_FW_MINOR_VERSION,
@@ -12022,8 +13140,8 @@ static inline void bnx2x_prep_ops(const u8 *_source, u8 *_target, u32 n)
12022 for (i = 0, j = 0; i < n/8; i++, j += 2) { 13140 for (i = 0, j = 0; i < n/8; i++, j += 2) {
12023 tmp = be32_to_cpu(source[j]); 13141 tmp = be32_to_cpu(source[j]);
12024 target[i].op = (tmp >> 24) & 0xff; 13142 target[i].op = (tmp >> 24) & 0xff;
12025 target[i].offset = tmp & 0xffffff; 13143 target[i].offset = tmp & 0xffffff;
12026 target[i].raw_data = be32_to_cpu(source[j+1]); 13144 target[i].raw_data = be32_to_cpu(source[j + 1]);
12027 } 13145 }
12028} 13146}
12029 13147
@@ -12057,20 +13175,24 @@ static int __devinit bnx2x_init_firmware(struct bnx2x *bp, struct device *dev)
12057 13175
12058 if (CHIP_IS_E1(bp)) 13176 if (CHIP_IS_E1(bp))
12059 fw_file_name = FW_FILE_NAME_E1; 13177 fw_file_name = FW_FILE_NAME_E1;
12060 else 13178 else if (CHIP_IS_E1H(bp))
12061 fw_file_name = FW_FILE_NAME_E1H; 13179 fw_file_name = FW_FILE_NAME_E1H;
13180 else {
13181 dev_err(dev, "Unsupported chip revision\n");
13182 return -EINVAL;
13183 }
12062 13184
12063 pr_info("Loading %s\n", fw_file_name); 13185 dev_info(dev, "Loading %s\n", fw_file_name);
12064 13186
12065 rc = request_firmware(&bp->firmware, fw_file_name, dev); 13187 rc = request_firmware(&bp->firmware, fw_file_name, dev);
12066 if (rc) { 13188 if (rc) {
12067 pr_err("Can't load firmware file %s\n", fw_file_name); 13189 dev_err(dev, "Can't load firmware file %s\n", fw_file_name);
12068 goto request_firmware_exit; 13190 goto request_firmware_exit;
12069 } 13191 }
12070 13192
12071 rc = bnx2x_check_firmware(bp); 13193 rc = bnx2x_check_firmware(bp);
12072 if (rc) { 13194 if (rc) {
12073 pr_err("Corrupt firmware file %s\n", fw_file_name); 13195 dev_err(dev, "Corrupt firmware file %s\n", fw_file_name);
12074 goto request_firmware_exit; 13196 goto request_firmware_exit;
12075 } 13197 }
12076 13198
@@ -12129,7 +13251,7 @@ static int __devinit bnx2x_init_one(struct pci_dev *pdev,
12129 /* dev zeroed in init_etherdev */ 13251 /* dev zeroed in init_etherdev */
12130 dev = alloc_etherdev_mq(sizeof(*bp), MAX_CONTEXT); 13252 dev = alloc_etherdev_mq(sizeof(*bp), MAX_CONTEXT);
12131 if (!dev) { 13253 if (!dev) {
12132 pr_err("Cannot allocate net device\n"); 13254 dev_err(&pdev->dev, "Cannot allocate net device\n");
12133 return -ENOMEM; 13255 return -ENOMEM;
12134 } 13256 }
12135 13257
@@ -12151,7 +13273,7 @@ static int __devinit bnx2x_init_one(struct pci_dev *pdev,
12151 /* Set init arrays */ 13273 /* Set init arrays */
12152 rc = bnx2x_init_firmware(bp, &pdev->dev); 13274 rc = bnx2x_init_firmware(bp, &pdev->dev);
12153 if (rc) { 13275 if (rc) {
12154 pr_err("Error loading firmware\n"); 13276 dev_err(&pdev->dev, "Error loading firmware\n");
12155 goto init_one_exit; 13277 goto init_one_exit;
12156 } 13278 }
12157 13279
@@ -12162,11 +13284,12 @@ static int __devinit bnx2x_init_one(struct pci_dev *pdev,
12162 } 13284 }
12163 13285
12164 bnx2x_get_pcie_width_speed(bp, &pcie_width, &pcie_speed); 13286 bnx2x_get_pcie_width_speed(bp, &pcie_width, &pcie_speed);
12165 netdev_info(dev, "%s (%c%d) PCI-E x%d %s found at mem %lx, IRQ %d, node addr %pM\n", 13287 netdev_info(dev, "%s (%c%d) PCI-E x%d %s found at mem %lx,"
12166 board_info[ent->driver_data].name, 13288 " IRQ %d, ", board_info[ent->driver_data].name,
12167 (CHIP_REV(bp) >> 12) + 'A', (CHIP_METAL(bp) >> 4), 13289 (CHIP_REV(bp) >> 12) + 'A', (CHIP_METAL(bp) >> 4),
12168 pcie_width, (pcie_speed == 2) ? "5GHz (Gen2)" : "2.5GHz", 13290 pcie_width, (pcie_speed == 2) ? "5GHz (Gen2)" : "2.5GHz",
12169 dev->base_addr, bp->pdev->irq, dev->dev_addr); 13291 dev->base_addr, bp->pdev->irq);
13292 pr_cont("node addr %pM\n", dev->dev_addr);
12170 13293
12171 return 0; 13294 return 0;
12172 13295
@@ -12194,13 +13317,16 @@ static void __devexit bnx2x_remove_one(struct pci_dev *pdev)
12194 struct bnx2x *bp; 13317 struct bnx2x *bp;
12195 13318
12196 if (!dev) { 13319 if (!dev) {
12197 pr_err("BAD net device from bnx2x_init_one\n"); 13320 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
12198 return; 13321 return;
12199 } 13322 }
12200 bp = netdev_priv(dev); 13323 bp = netdev_priv(dev);
12201 13324
12202 unregister_netdev(dev); 13325 unregister_netdev(dev);
12203 13326
13327 /* Make sure RESET task is not scheduled before continuing */
13328 cancel_delayed_work_sync(&bp->reset_task);
13329
12204 kfree(bp->init_ops_offsets); 13330 kfree(bp->init_ops_offsets);
12205 kfree(bp->init_ops); 13331 kfree(bp->init_ops);
12206 kfree(bp->init_data); 13332 kfree(bp->init_data);
@@ -12227,7 +13353,7 @@ static int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
12227 struct bnx2x *bp; 13353 struct bnx2x *bp;
12228 13354
12229 if (!dev) { 13355 if (!dev) {
12230 pr_err("BAD net device from bnx2x_init_one\n"); 13356 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
12231 return -ENODEV; 13357 return -ENODEV;
12232 } 13358 }
12233 bp = netdev_priv(dev); 13359 bp = netdev_priv(dev);
@@ -12259,11 +13385,16 @@ static int bnx2x_resume(struct pci_dev *pdev)
12259 int rc; 13385 int rc;
12260 13386
12261 if (!dev) { 13387 if (!dev) {
12262 pr_err("BAD net device from bnx2x_init_one\n"); 13388 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
12263 return -ENODEV; 13389 return -ENODEV;
12264 } 13390 }
12265 bp = netdev_priv(dev); 13391 bp = netdev_priv(dev);
12266 13392
13393 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
13394 printk(KERN_ERR "Handling parity error recovery. Try again later\n");
13395 return -EAGAIN;
13396 }
13397
12267 rtnl_lock(); 13398 rtnl_lock();
12268 13399
12269 pci_restore_state(pdev); 13400 pci_restore_state(pdev);
@@ -12430,6 +13561,11 @@ static void bnx2x_io_resume(struct pci_dev *pdev)
12430 struct net_device *dev = pci_get_drvdata(pdev); 13561 struct net_device *dev = pci_get_drvdata(pdev);
12431 struct bnx2x *bp = netdev_priv(dev); 13562 struct bnx2x *bp = netdev_priv(dev);
12432 13563
13564 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
13565 printk(KERN_ERR "Handling parity error recovery. Try again later\n");
13566 return;
13567 }
13568
12433 rtnl_lock(); 13569 rtnl_lock();
12434 13570
12435 bnx2x_eeh_recover(bp); 13571 bnx2x_eeh_recover(bp);