aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/bnx2x_main.c
diff options
context:
space:
mode:
authorAndrea Bastoni <bastoni@cs.unc.edu>2010-05-30 19:16:45 -0400
committerAndrea Bastoni <bastoni@cs.unc.edu>2010-05-30 19:16:45 -0400
commitada47b5fe13d89735805b566185f4885f5a3f750 (patch)
tree644b88f8a71896307d71438e9b3af49126ffb22b /drivers/net/bnx2x_main.c
parent43e98717ad40a4ae64545b5ba047c7b86aa44f4f (diff)
parent3280f21d43ee541f97f8cda5792150d2dbec20d5 (diff)
Merge branch 'wip-2.6.34' into old-private-masterarchived-private-master
Diffstat (limited to 'drivers/net/bnx2x_main.c')
-rw-r--r--drivers/net/bnx2x_main.c1731
1 files changed, 1113 insertions, 618 deletions
diff --git a/drivers/net/bnx2x_main.c b/drivers/net/bnx2x_main.c
index 20f0ed956df2..6c042a72d6cc 100644
--- a/drivers/net/bnx2x_main.c
+++ b/drivers/net/bnx2x_main.c
@@ -1,6 +1,6 @@
1/* bnx2x_main.c: Broadcom Everest network driver. 1/* bnx2x_main.c: Broadcom Everest network driver.
2 * 2 *
3 * Copyright (c) 2007-2009 Broadcom Corporation 3 * Copyright (c) 2007-2010 Broadcom Corporation
4 * 4 *
5 * This program is free software; you can redistribute it and/or modify 5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by 6 * it under the terms of the GNU General Public License as published by
@@ -49,6 +49,7 @@
49#include <linux/prefetch.h> 49#include <linux/prefetch.h>
50#include <linux/zlib.h> 50#include <linux/zlib.h>
51#include <linux/io.h> 51#include <linux/io.h>
52#include <linux/stringify.h>
52 53
53 54
54#include "bnx2x.h" 55#include "bnx2x.h"
@@ -56,15 +57,20 @@
56#include "bnx2x_init_ops.h" 57#include "bnx2x_init_ops.h"
57#include "bnx2x_dump.h" 58#include "bnx2x_dump.h"
58 59
59#define DRV_MODULE_VERSION "1.52.1" 60#define DRV_MODULE_VERSION "1.52.1-7"
60#define DRV_MODULE_RELDATE "2009/08/12" 61#define DRV_MODULE_RELDATE "2010/02/28"
61#define BNX2X_BC_VER 0x040200 62#define BNX2X_BC_VER 0x040200
62 63
63#include <linux/firmware.h> 64#include <linux/firmware.h>
64#include "bnx2x_fw_file_hdr.h" 65#include "bnx2x_fw_file_hdr.h"
65/* FW files */ 66/* FW files */
66#define FW_FILE_PREFIX_E1 "bnx2x-e1-" 67#define FW_FILE_VERSION \
67#define FW_FILE_PREFIX_E1H "bnx2x-e1h-" 68 __stringify(BCM_5710_FW_MAJOR_VERSION) "." \
69 __stringify(BCM_5710_FW_MINOR_VERSION) "." \
70 __stringify(BCM_5710_FW_REVISION_VERSION) "." \
71 __stringify(BCM_5710_FW_ENGINEERING_VERSION)
72#define FW_FILE_NAME_E1 "bnx2x-e1-" FW_FILE_VERSION ".fw"
73#define FW_FILE_NAME_E1H "bnx2x-e1h-" FW_FILE_VERSION ".fw"
68 74
69/* Time in jiffies before concluding the transmitter is hung */ 75/* Time in jiffies before concluding the transmitter is hung */
70#define TX_TIMEOUT (5*HZ) 76#define TX_TIMEOUT (5*HZ)
@@ -77,21 +83,18 @@ MODULE_AUTHOR("Eliezer Tamir");
77MODULE_DESCRIPTION("Broadcom NetXtreme II BCM57710/57711/57711E Driver"); 83MODULE_DESCRIPTION("Broadcom NetXtreme II BCM57710/57711/57711E Driver");
78MODULE_LICENSE("GPL"); 84MODULE_LICENSE("GPL");
79MODULE_VERSION(DRV_MODULE_VERSION); 85MODULE_VERSION(DRV_MODULE_VERSION);
86MODULE_FIRMWARE(FW_FILE_NAME_E1);
87MODULE_FIRMWARE(FW_FILE_NAME_E1H);
80 88
81static int multi_mode = 1; 89static int multi_mode = 1;
82module_param(multi_mode, int, 0); 90module_param(multi_mode, int, 0);
83MODULE_PARM_DESC(multi_mode, " Multi queue mode " 91MODULE_PARM_DESC(multi_mode, " Multi queue mode "
84 "(0 Disable; 1 Enable (default))"); 92 "(0 Disable; 1 Enable (default))");
85 93
86static int num_rx_queues; 94static int num_queues;
87module_param(num_rx_queues, int, 0); 95module_param(num_queues, int, 0);
88MODULE_PARM_DESC(num_rx_queues, " Number of Rx queues for multi_mode=1" 96MODULE_PARM_DESC(num_queues, " Number of queues for multi_mode=1"
89 " (default is half number of CPUs)"); 97 " (default is as a number of CPUs)");
90
91static int num_tx_queues;
92module_param(num_tx_queues, int, 0);
93MODULE_PARM_DESC(num_tx_queues, " Number of Tx queues for multi_mode=1"
94 " (default is half number of CPUs)");
95 98
96static int disable_tpa; 99static int disable_tpa;
97module_param(disable_tpa, int, 0); 100module_param(disable_tpa, int, 0);
@@ -137,7 +140,7 @@ static struct {
137}; 140};
138 141
139 142
140static const struct pci_device_id bnx2x_pci_tbl[] = { 143static DEFINE_PCI_DEVICE_TABLE(bnx2x_pci_tbl) = {
141 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57710), BCM57710 }, 144 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57710), BCM57710 },
142 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711), BCM57711 }, 145 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711), BCM57711 },
143 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711E), BCM57711E }, 146 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711E), BCM57711E },
@@ -511,24 +514,24 @@ static void bnx2x_fw_dump(struct bnx2x *bp)
511 514
512 mark = REG_RD(bp, MCP_REG_MCPR_SCRATCH + 0xf104); 515 mark = REG_RD(bp, MCP_REG_MCPR_SCRATCH + 0xf104);
513 mark = ((mark + 0x3) & ~0x3); 516 mark = ((mark + 0x3) & ~0x3);
514 printk(KERN_ERR PFX "begin fw dump (mark 0x%x)\n", mark); 517 pr_err("begin fw dump (mark 0x%x)\n", mark);
515 518
516 printk(KERN_ERR PFX); 519 pr_err("");
517 for (offset = mark - 0x08000000; offset <= 0xF900; offset += 0x8*4) { 520 for (offset = mark - 0x08000000; offset <= 0xF900; offset += 0x8*4) {
518 for (word = 0; word < 8; word++) 521 for (word = 0; word < 8; word++)
519 data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH + 522 data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
520 offset + 4*word)); 523 offset + 4*word));
521 data[8] = 0x0; 524 data[8] = 0x0;
522 printk(KERN_CONT "%s", (char *)data); 525 pr_cont("%s", (char *)data);
523 } 526 }
524 for (offset = 0xF108; offset <= mark - 0x08000000; offset += 0x8*4) { 527 for (offset = 0xF108; offset <= mark - 0x08000000; offset += 0x8*4) {
525 for (word = 0; word < 8; word++) 528 for (word = 0; word < 8; word++)
526 data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH + 529 data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
527 offset + 4*word)); 530 offset + 4*word));
528 data[8] = 0x0; 531 data[8] = 0x0;
529 printk(KERN_CONT "%s", (char *)data); 532 pr_cont("%s", (char *)data);
530 } 533 }
531 printk(KERN_ERR PFX "end of fw dump\n"); 534 pr_err("end of fw dump\n");
532} 535}
533 536
534static void bnx2x_panic_dump(struct bnx2x *bp) 537static void bnx2x_panic_dump(struct bnx2x *bp)
@@ -550,7 +553,7 @@ static void bnx2x_panic_dump(struct bnx2x *bp)
550 bp->def_att_idx, bp->attn_state, bp->spq_prod_idx); 553 bp->def_att_idx, bp->attn_state, bp->spq_prod_idx);
551 554
552 /* Rx */ 555 /* Rx */
553 for_each_rx_queue(bp, i) { 556 for_each_queue(bp, i) {
554 struct bnx2x_fastpath *fp = &bp->fp[i]; 557 struct bnx2x_fastpath *fp = &bp->fp[i];
555 558
556 BNX2X_ERR("fp%d: rx_bd_prod(%x) rx_bd_cons(%x)" 559 BNX2X_ERR("fp%d: rx_bd_prod(%x) rx_bd_cons(%x)"
@@ -567,7 +570,7 @@ static void bnx2x_panic_dump(struct bnx2x *bp)
567 } 570 }
568 571
569 /* Tx */ 572 /* Tx */
570 for_each_tx_queue(bp, i) { 573 for_each_queue(bp, i) {
571 struct bnx2x_fastpath *fp = &bp->fp[i]; 574 struct bnx2x_fastpath *fp = &bp->fp[i];
572 575
573 BNX2X_ERR("fp%d: tx_pkt_prod(%x) tx_pkt_cons(%x)" 576 BNX2X_ERR("fp%d: tx_pkt_prod(%x) tx_pkt_cons(%x)"
@@ -582,7 +585,7 @@ static void bnx2x_panic_dump(struct bnx2x *bp)
582 585
583 /* Rings */ 586 /* Rings */
584 /* Rx */ 587 /* Rx */
585 for_each_rx_queue(bp, i) { 588 for_each_queue(bp, i) {
586 struct bnx2x_fastpath *fp = &bp->fp[i]; 589 struct bnx2x_fastpath *fp = &bp->fp[i];
587 590
588 start = RX_BD(le16_to_cpu(*fp->rx_cons_sb) - 10); 591 start = RX_BD(le16_to_cpu(*fp->rx_cons_sb) - 10);
@@ -616,7 +619,7 @@ static void bnx2x_panic_dump(struct bnx2x *bp)
616 } 619 }
617 620
618 /* Tx */ 621 /* Tx */
619 for_each_tx_queue(bp, i) { 622 for_each_queue(bp, i) {
620 struct bnx2x_fastpath *fp = &bp->fp[i]; 623 struct bnx2x_fastpath *fp = &bp->fp[i];
621 624
622 start = TX_BD(le16_to_cpu(*fp->tx_cons_sb) - 10); 625 start = TX_BD(le16_to_cpu(*fp->tx_cons_sb) - 10);
@@ -742,6 +745,9 @@ static void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw)
742 if (msix) { 745 if (msix) {
743 synchronize_irq(bp->msix_table[0].vector); 746 synchronize_irq(bp->msix_table[0].vector);
744 offset = 1; 747 offset = 1;
748#ifdef BCM_CNIC
749 offset++;
750#endif
745 for_each_queue(bp, i) 751 for_each_queue(bp, i)
746 synchronize_irq(bp->msix_table[i + offset].vector); 752 synchronize_irq(bp->msix_table[i + offset].vector);
747 } else 753 } else
@@ -781,21 +787,13 @@ static inline void bnx2x_ack_sb(struct bnx2x *bp, u8 sb_id,
781 barrier(); 787 barrier();
782} 788}
783 789
784static inline u16 bnx2x_update_fpsb_idx(struct bnx2x_fastpath *fp) 790static inline void bnx2x_update_fpsb_idx(struct bnx2x_fastpath *fp)
785{ 791{
786 struct host_status_block *fpsb = fp->status_blk; 792 struct host_status_block *fpsb = fp->status_blk;
787 u16 rc = 0;
788 793
789 barrier(); /* status block is written to by the chip */ 794 barrier(); /* status block is written to by the chip */
790 if (fp->fp_c_idx != fpsb->c_status_block.status_block_index) { 795 fp->fp_c_idx = fpsb->c_status_block.status_block_index;
791 fp->fp_c_idx = fpsb->c_status_block.status_block_index; 796 fp->fp_u_idx = fpsb->u_status_block.status_block_index;
792 rc |= 1;
793 }
794 if (fp->fp_u_idx != fpsb->u_status_block.status_block_index) {
795 fp->fp_u_idx = fpsb->u_status_block.status_block_index;
796 rc |= 2;
797 }
798 return rc;
799} 797}
800 798
801static u16 bnx2x_ack_int(struct bnx2x *bp) 799static u16 bnx2x_ack_int(struct bnx2x *bp)
@@ -835,6 +833,9 @@ static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fastpath *fp,
835 u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons; 833 u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
836 int nbd; 834 int nbd;
837 835
836 /* prefetch skb end pointer to speedup dev_kfree_skb() */
837 prefetch(&skb->end);
838
838 DP(BNX2X_MSG_OFF, "pkt_idx %d buff @(%p)->skb %p\n", 839 DP(BNX2X_MSG_OFF, "pkt_idx %d buff @(%p)->skb %p\n",
839 idx, tx_buf, skb); 840 idx, tx_buf, skb);
840 841
@@ -879,7 +880,7 @@ static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fastpath *fp,
879 880
880 /* release skb */ 881 /* release skb */
881 WARN_ON(!skb); 882 WARN_ON(!skb);
882 dev_kfree_skb_any(skb); 883 dev_kfree_skb(skb);
883 tx_buf->first_bd = 0; 884 tx_buf->first_bd = 0;
884 tx_buf->skb = NULL; 885 tx_buf->skb = NULL;
885 886
@@ -892,7 +893,6 @@ static inline u16 bnx2x_tx_avail(struct bnx2x_fastpath *fp)
892 u16 prod; 893 u16 prod;
893 u16 cons; 894 u16 cons;
894 895
895 barrier(); /* Tell compiler that prod and cons can change */
896 prod = fp->tx_bd_prod; 896 prod = fp->tx_bd_prod;
897 cons = fp->tx_bd_cons; 897 cons = fp->tx_bd_cons;
898 898
@@ -909,19 +909,28 @@ static inline u16 bnx2x_tx_avail(struct bnx2x_fastpath *fp)
909 return (s16)(fp->bp->tx_ring_size) - used; 909 return (s16)(fp->bp->tx_ring_size) - used;
910} 910}
911 911
912static void bnx2x_tx_int(struct bnx2x_fastpath *fp) 912static inline int bnx2x_has_tx_work(struct bnx2x_fastpath *fp)
913{
914 u16 hw_cons;
915
916 /* Tell compiler that status block fields can change */
917 barrier();
918 hw_cons = le16_to_cpu(*fp->tx_cons_sb);
919 return hw_cons != fp->tx_pkt_cons;
920}
921
922static int bnx2x_tx_int(struct bnx2x_fastpath *fp)
913{ 923{
914 struct bnx2x *bp = fp->bp; 924 struct bnx2x *bp = fp->bp;
915 struct netdev_queue *txq; 925 struct netdev_queue *txq;
916 u16 hw_cons, sw_cons, bd_cons = fp->tx_bd_cons; 926 u16 hw_cons, sw_cons, bd_cons = fp->tx_bd_cons;
917 int done = 0;
918 927
919#ifdef BNX2X_STOP_ON_ERROR 928#ifdef BNX2X_STOP_ON_ERROR
920 if (unlikely(bp->panic)) 929 if (unlikely(bp->panic))
921 return; 930 return -1;
922#endif 931#endif
923 932
924 txq = netdev_get_tx_queue(bp->dev, fp->index - bp->num_rx_queues); 933 txq = netdev_get_tx_queue(bp->dev, fp->index);
925 hw_cons = le16_to_cpu(*fp->tx_cons_sb); 934 hw_cons = le16_to_cpu(*fp->tx_cons_sb);
926 sw_cons = fp->tx_pkt_cons; 935 sw_cons = fp->tx_pkt_cons;
927 936
@@ -942,30 +951,46 @@ static void bnx2x_tx_int(struct bnx2x_fastpath *fp)
942*/ 951*/
943 bd_cons = bnx2x_free_tx_pkt(bp, fp, pkt_cons); 952 bd_cons = bnx2x_free_tx_pkt(bp, fp, pkt_cons);
944 sw_cons++; 953 sw_cons++;
945 done++;
946 } 954 }
947 955
948 fp->tx_pkt_cons = sw_cons; 956 fp->tx_pkt_cons = sw_cons;
949 fp->tx_bd_cons = bd_cons; 957 fp->tx_bd_cons = bd_cons;
950 958
959 /* Need to make the tx_bd_cons update visible to start_xmit()
960 * before checking for netif_tx_queue_stopped(). Without the
961 * memory barrier, there is a small possibility that
962 * start_xmit() will miss it and cause the queue to be stopped
963 * forever.
964 */
965 smp_mb();
966
951 /* TBD need a thresh? */ 967 /* TBD need a thresh? */
952 if (unlikely(netif_tx_queue_stopped(txq))) { 968 if (unlikely(netif_tx_queue_stopped(txq))) {
953 969 /* Taking tx_lock() is needed to prevent reenabling the queue
954 /* Need to make the tx_bd_cons update visible to start_xmit() 970 * while it's empty. This could have happen if rx_action() gets
955 * before checking for netif_tx_queue_stopped(). Without the 971 * suspended in bnx2x_tx_int() after the condition before
956 * memory barrier, there is a small possibility that 972 * netif_tx_wake_queue(), while tx_action (bnx2x_start_xmit()):
957 * start_xmit() will miss it and cause the queue to be stopped 973 *
958 * forever. 974 * stops the queue->sees fresh tx_bd_cons->releases the queue->
975 * sends some packets consuming the whole queue again->
976 * stops the queue
959 */ 977 */
960 smp_mb(); 978
979 __netif_tx_lock(txq, smp_processor_id());
961 980
962 if ((netif_tx_queue_stopped(txq)) && 981 if ((netif_tx_queue_stopped(txq)) &&
963 (bp->state == BNX2X_STATE_OPEN) && 982 (bp->state == BNX2X_STATE_OPEN) &&
964 (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3)) 983 (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3))
965 netif_tx_wake_queue(txq); 984 netif_tx_wake_queue(txq);
985
986 __netif_tx_unlock(txq);
966 } 987 }
988 return 0;
967} 989}
968 990
991#ifdef BCM_CNIC
992static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid);
993#endif
969 994
970static void bnx2x_sp_event(struct bnx2x_fastpath *fp, 995static void bnx2x_sp_event(struct bnx2x_fastpath *fp,
971 union eth_rx_cqe *rr_cqe) 996 union eth_rx_cqe *rr_cqe)
@@ -1022,16 +1047,24 @@ static void bnx2x_sp_event(struct bnx2x_fastpath *fp,
1022 bnx2x_fp(bp, cid, state) = BNX2X_FP_STATE_CLOSED; 1047 bnx2x_fp(bp, cid, state) = BNX2X_FP_STATE_CLOSED;
1023 break; 1048 break;
1024 1049
1050#ifdef BCM_CNIC
1051 case (RAMROD_CMD_ID_ETH_CFC_DEL | BNX2X_STATE_OPEN):
1052 DP(NETIF_MSG_IFDOWN, "got delete ramrod for CID %d\n", cid);
1053 bnx2x_cnic_cfc_comp(bp, cid);
1054 break;
1055#endif
1025 1056
1026 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_OPEN): 1057 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_OPEN):
1027 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_DIAG): 1058 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_DIAG):
1028 DP(NETIF_MSG_IFUP, "got set mac ramrod\n"); 1059 DP(NETIF_MSG_IFUP, "got set mac ramrod\n");
1029 bp->set_mac_pending = 0; 1060 bp->set_mac_pending--;
1061 smp_wmb();
1030 break; 1062 break;
1031 1063
1032 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_CLOSING_WAIT4_HALT): 1064 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_CLOSING_WAIT4_HALT):
1033 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_DISABLED):
1034 DP(NETIF_MSG_IFDOWN, "got (un)set mac ramrod\n"); 1065 DP(NETIF_MSG_IFDOWN, "got (un)set mac ramrod\n");
1066 bp->set_mac_pending--;
1067 smp_wmb();
1035 break; 1068 break;
1036 1069
1037 default: 1070 default:
@@ -1539,6 +1572,8 @@ static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
1539 } else { 1572 } else {
1540 rx_buf = &fp->rx_buf_ring[bd_cons]; 1573 rx_buf = &fp->rx_buf_ring[bd_cons];
1541 skb = rx_buf->skb; 1574 skb = rx_buf->skb;
1575 prefetch(skb);
1576 prefetch((u8 *)skb + 256);
1542 len = le16_to_cpu(cqe->fast_path_cqe.pkt_len); 1577 len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
1543 pad = cqe->fast_path_cqe.placement_offset; 1578 pad = cqe->fast_path_cqe.placement_offset;
1544 1579
@@ -1720,27 +1755,13 @@ static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
1720 if (unlikely(bp->panic)) 1755 if (unlikely(bp->panic))
1721 return IRQ_HANDLED; 1756 return IRQ_HANDLED;
1722#endif 1757#endif
1723 /* Handle Rx or Tx according to MSI-X vector */
1724 if (fp->is_rx_queue) {
1725 prefetch(fp->rx_cons_sb);
1726 prefetch(&fp->status_blk->u_status_block.status_block_index);
1727
1728 napi_schedule(&bnx2x_fp(bp, fp->index, napi));
1729
1730 } else {
1731 prefetch(fp->tx_cons_sb);
1732 prefetch(&fp->status_blk->c_status_block.status_block_index);
1733 1758
1734 bnx2x_update_fpsb_idx(fp); 1759 /* Handle Rx and Tx according to MSI-X vector */
1735 rmb(); 1760 prefetch(fp->rx_cons_sb);
1736 bnx2x_tx_int(fp); 1761 prefetch(fp->tx_cons_sb);
1737 1762 prefetch(&fp->status_blk->u_status_block.status_block_index);
1738 /* Re-enable interrupts */ 1763 prefetch(&fp->status_blk->c_status_block.status_block_index);
1739 bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID, 1764 napi_schedule(&bnx2x_fp(bp, fp->index, napi));
1740 le16_to_cpu(fp->fp_u_idx), IGU_INT_NOP, 1);
1741 bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID,
1742 le16_to_cpu(fp->fp_c_idx), IGU_INT_ENABLE, 1);
1743 }
1744 1765
1745 return IRQ_HANDLED; 1766 return IRQ_HANDLED;
1746} 1767}
@@ -1775,35 +1796,32 @@ static irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
1775 1796
1776 mask = 0x2 << fp->sb_id; 1797 mask = 0x2 << fp->sb_id;
1777 if (status & mask) { 1798 if (status & mask) {
1778 /* Handle Rx or Tx according to SB id */ 1799 /* Handle Rx and Tx according to SB id */
1779 if (fp->is_rx_queue) { 1800 prefetch(fp->rx_cons_sb);
1780 prefetch(fp->rx_cons_sb); 1801 prefetch(&fp->status_blk->u_status_block.
1781 prefetch(&fp->status_blk->u_status_block. 1802 status_block_index);
1782 status_block_index); 1803 prefetch(fp->tx_cons_sb);
1783 1804 prefetch(&fp->status_blk->c_status_block.
1784 napi_schedule(&bnx2x_fp(bp, fp->index, napi)); 1805 status_block_index);
1785 1806 napi_schedule(&bnx2x_fp(bp, fp->index, napi));
1786 } else {
1787 prefetch(fp->tx_cons_sb);
1788 prefetch(&fp->status_blk->c_status_block.
1789 status_block_index);
1790
1791 bnx2x_update_fpsb_idx(fp);
1792 rmb();
1793 bnx2x_tx_int(fp);
1794
1795 /* Re-enable interrupts */
1796 bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID,
1797 le16_to_cpu(fp->fp_u_idx),
1798 IGU_INT_NOP, 1);
1799 bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID,
1800 le16_to_cpu(fp->fp_c_idx),
1801 IGU_INT_ENABLE, 1);
1802 }
1803 status &= ~mask; 1807 status &= ~mask;
1804 } 1808 }
1805 } 1809 }
1806 1810
1811#ifdef BCM_CNIC
1812 mask = 0x2 << CNIC_SB_ID(bp);
1813 if (status & (mask | 0x1)) {
1814 struct cnic_ops *c_ops = NULL;
1815
1816 rcu_read_lock();
1817 c_ops = rcu_dereference(bp->cnic_ops);
1818 if (c_ops)
1819 c_ops->cnic_handler(bp->cnic_data, NULL);
1820 rcu_read_unlock();
1821
1822 status &= ~mask;
1823 }
1824#endif
1807 1825
1808 if (unlikely(status & 0x1)) { 1826 if (unlikely(status & 0x1)) {
1809 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0); 1827 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
@@ -2128,40 +2146,52 @@ static void bnx2x_calc_fc_adv(struct bnx2x *bp)
2128 2146
2129static void bnx2x_link_report(struct bnx2x *bp) 2147static void bnx2x_link_report(struct bnx2x *bp)
2130{ 2148{
2131 if (bp->state == BNX2X_STATE_DISABLED) { 2149 if (bp->flags & MF_FUNC_DIS) {
2132 netif_carrier_off(bp->dev); 2150 netif_carrier_off(bp->dev);
2133 printk(KERN_ERR PFX "%s NIC Link is Down\n", bp->dev->name); 2151 netdev_err(bp->dev, "NIC Link is Down\n");
2134 return; 2152 return;
2135 } 2153 }
2136 2154
2137 if (bp->link_vars.link_up) { 2155 if (bp->link_vars.link_up) {
2156 u16 line_speed;
2157
2138 if (bp->state == BNX2X_STATE_OPEN) 2158 if (bp->state == BNX2X_STATE_OPEN)
2139 netif_carrier_on(bp->dev); 2159 netif_carrier_on(bp->dev);
2140 printk(KERN_INFO PFX "%s NIC Link is Up, ", bp->dev->name); 2160 netdev_info(bp->dev, "NIC Link is Up, ");
2161
2162 line_speed = bp->link_vars.line_speed;
2163 if (IS_E1HMF(bp)) {
2164 u16 vn_max_rate;
2141 2165
2142 printk("%d Mbps ", bp->link_vars.line_speed); 2166 vn_max_rate =
2167 ((bp->mf_config & FUNC_MF_CFG_MAX_BW_MASK) >>
2168 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
2169 if (vn_max_rate < line_speed)
2170 line_speed = vn_max_rate;
2171 }
2172 pr_cont("%d Mbps ", line_speed);
2143 2173
2144 if (bp->link_vars.duplex == DUPLEX_FULL) 2174 if (bp->link_vars.duplex == DUPLEX_FULL)
2145 printk("full duplex"); 2175 pr_cont("full duplex");
2146 else 2176 else
2147 printk("half duplex"); 2177 pr_cont("half duplex");
2148 2178
2149 if (bp->link_vars.flow_ctrl != BNX2X_FLOW_CTRL_NONE) { 2179 if (bp->link_vars.flow_ctrl != BNX2X_FLOW_CTRL_NONE) {
2150 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) { 2180 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) {
2151 printk(", receive "); 2181 pr_cont(", receive ");
2152 if (bp->link_vars.flow_ctrl & 2182 if (bp->link_vars.flow_ctrl &
2153 BNX2X_FLOW_CTRL_TX) 2183 BNX2X_FLOW_CTRL_TX)
2154 printk("& transmit "); 2184 pr_cont("& transmit ");
2155 } else { 2185 } else {
2156 printk(", transmit "); 2186 pr_cont(", transmit ");
2157 } 2187 }
2158 printk("flow control ON"); 2188 pr_cont("flow control ON");
2159 } 2189 }
2160 printk("\n"); 2190 pr_cont("\n");
2161 2191
2162 } else { /* link_down */ 2192 } else { /* link_down */
2163 netif_carrier_off(bp->dev); 2193 netif_carrier_off(bp->dev);
2164 printk(KERN_ERR PFX "%s NIC Link is Down\n", bp->dev->name); 2194 netdev_err(bp->dev, "NIC Link is Down\n");
2165 } 2195 }
2166} 2196}
2167 2197
@@ -2304,8 +2334,14 @@ static void bnx2x_calc_vn_weight_sum(struct bnx2x *bp)
2304 } 2334 }
2305 2335
2306 /* ... only if all min rates are zeros - disable fairness */ 2336 /* ... only if all min rates are zeros - disable fairness */
2307 if (all_zero) 2337 if (all_zero) {
2308 bp->vn_weight_sum = 0; 2338 bp->cmng.flags.cmng_enables &=
2339 ~CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
2340 DP(NETIF_MSG_IFUP, "All MIN values are zeroes"
2341 " fairness will be disabled\n");
2342 } else
2343 bp->cmng.flags.cmng_enables |=
2344 CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
2309} 2345}
2310 2346
2311static void bnx2x_init_vn_minmax(struct bnx2x *bp, int func) 2347static void bnx2x_init_vn_minmax(struct bnx2x *bp, int func)
@@ -2324,17 +2360,14 @@ static void bnx2x_init_vn_minmax(struct bnx2x *bp, int func)
2324 } else { 2360 } else {
2325 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >> 2361 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2326 FUNC_MF_CFG_MIN_BW_SHIFT) * 100; 2362 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
2327 /* If fairness is enabled (not all min rates are zeroes) and 2363 /* If min rate is zero - set it to 1 */
2328 if current min rate is zero - set it to 1. 2364 if (!vn_min_rate)
2329 This is a requirement of the algorithm. */
2330 if (bp->vn_weight_sum && (vn_min_rate == 0))
2331 vn_min_rate = DEF_MIN_RATE; 2365 vn_min_rate = DEF_MIN_RATE;
2332 vn_max_rate = ((vn_cfg & FUNC_MF_CFG_MAX_BW_MASK) >> 2366 vn_max_rate = ((vn_cfg & FUNC_MF_CFG_MAX_BW_MASK) >>
2333 FUNC_MF_CFG_MAX_BW_SHIFT) * 100; 2367 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
2334 } 2368 }
2335
2336 DP(NETIF_MSG_IFUP, 2369 DP(NETIF_MSG_IFUP,
2337 "func %d: vn_min_rate=%d vn_max_rate=%d vn_weight_sum=%d\n", 2370 "func %d: vn_min_rate %d vn_max_rate %d vn_weight_sum %d\n",
2338 func, vn_min_rate, vn_max_rate, bp->vn_weight_sum); 2371 func, vn_min_rate, vn_max_rate, bp->vn_weight_sum);
2339 2372
2340 memset(&m_rs_vn, 0, sizeof(struct rate_shaping_vars_per_vn)); 2373 memset(&m_rs_vn, 0, sizeof(struct rate_shaping_vars_per_vn));
@@ -2405,8 +2438,7 @@ static void bnx2x_link_attn(struct bnx2x *bp)
2405 memset(&(pstats->mac_stx[0]), 0, 2438 memset(&(pstats->mac_stx[0]), 0,
2406 sizeof(struct mac_stx)); 2439 sizeof(struct mac_stx));
2407 } 2440 }
2408 if ((bp->state == BNX2X_STATE_OPEN) || 2441 if (bp->state == BNX2X_STATE_OPEN)
2409 (bp->state == BNX2X_STATE_DISABLED))
2410 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP); 2442 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2411 } 2443 }
2412 2444
@@ -2449,9 +2481,7 @@ static void bnx2x_link_attn(struct bnx2x *bp)
2449 2481
2450static void bnx2x__link_status_update(struct bnx2x *bp) 2482static void bnx2x__link_status_update(struct bnx2x *bp)
2451{ 2483{
2452 int func = BP_FUNC(bp); 2484 if ((bp->state != BNX2X_STATE_OPEN) || (bp->flags & MF_FUNC_DIS))
2453
2454 if (bp->state != BNX2X_STATE_OPEN)
2455 return; 2485 return;
2456 2486
2457 bnx2x_link_status_update(&bp->link_params, &bp->link_vars); 2487 bnx2x_link_status_update(&bp->link_params, &bp->link_vars);
@@ -2461,7 +2491,6 @@ static void bnx2x__link_status_update(struct bnx2x *bp)
2461 else 2491 else
2462 bnx2x_stats_handle(bp, STATS_EVENT_STOP); 2492 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2463 2493
2464 bp->mf_config = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
2465 bnx2x_calc_vn_weight_sum(bp); 2494 bnx2x_calc_vn_weight_sum(bp);
2466 2495
2467 /* indicate link status */ 2496 /* indicate link status */
@@ -2501,6 +2530,7 @@ u32 bnx2x_fw_command(struct bnx2x *bp, u32 command)
2501 u32 cnt = 1; 2530 u32 cnt = 1;
2502 u8 delay = CHIP_REV_IS_SLOW(bp) ? 100 : 10; 2531 u8 delay = CHIP_REV_IS_SLOW(bp) ? 100 : 10;
2503 2532
2533 mutex_lock(&bp->fw_mb_mutex);
2504 SHMEM_WR(bp, func_mb[func].drv_mb_header, (command | seq)); 2534 SHMEM_WR(bp, func_mb[func].drv_mb_header, (command | seq));
2505 DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB\n", (command | seq)); 2535 DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB\n", (command | seq));
2506 2536
@@ -2510,8 +2540,8 @@ u32 bnx2x_fw_command(struct bnx2x *bp, u32 command)
2510 2540
2511 rc = SHMEM_RD(bp, func_mb[func].fw_mb_header); 2541 rc = SHMEM_RD(bp, func_mb[func].fw_mb_header);
2512 2542
2513 /* Give the FW up to 2 second (200*10ms) */ 2543 /* Give the FW up to 5 second (500*10ms) */
2514 } while ((seq != (rc & FW_MSG_SEQ_NUMBER_MASK)) && (cnt++ < 200)); 2544 } while ((seq != (rc & FW_MSG_SEQ_NUMBER_MASK)) && (cnt++ < 500));
2515 2545
2516 DP(BNX2X_MSG_MCP, "[after %d ms] read (%x) seq is (%x) from FW MB\n", 2546 DP(BNX2X_MSG_MCP, "[after %d ms] read (%x) seq is (%x) from FW MB\n",
2517 cnt*delay, rc, seq); 2547 cnt*delay, rc, seq);
@@ -2525,32 +2555,23 @@ u32 bnx2x_fw_command(struct bnx2x *bp, u32 command)
2525 bnx2x_fw_dump(bp); 2555 bnx2x_fw_dump(bp);
2526 rc = 0; 2556 rc = 0;
2527 } 2557 }
2558 mutex_unlock(&bp->fw_mb_mutex);
2528 2559
2529 return rc; 2560 return rc;
2530} 2561}
2531 2562
2532static void bnx2x_set_storm_rx_mode(struct bnx2x *bp); 2563static void bnx2x_set_storm_rx_mode(struct bnx2x *bp);
2533static void bnx2x_set_mac_addr_e1h(struct bnx2x *bp, int set); 2564static void bnx2x_set_eth_mac_addr_e1h(struct bnx2x *bp, int set);
2534static void bnx2x_set_rx_mode(struct net_device *dev); 2565static void bnx2x_set_rx_mode(struct net_device *dev);
2535 2566
2536static void bnx2x_e1h_disable(struct bnx2x *bp) 2567static void bnx2x_e1h_disable(struct bnx2x *bp)
2537{ 2568{
2538 int port = BP_PORT(bp); 2569 int port = BP_PORT(bp);
2539 int i;
2540
2541 bp->rx_mode = BNX2X_RX_MODE_NONE;
2542 bnx2x_set_storm_rx_mode(bp);
2543 2570
2544 netif_tx_disable(bp->dev); 2571 netif_tx_disable(bp->dev);
2545 bp->dev->trans_start = jiffies; /* prevent tx timeout */
2546 2572
2547 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0); 2573 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
2548 2574
2549 bnx2x_set_mac_addr_e1h(bp, 0);
2550
2551 for (i = 0; i < MC_HASH_SIZE; i++)
2552 REG_WR(bp, MC_HASH_OFFSET(bp, i), 0);
2553
2554 netif_carrier_off(bp->dev); 2575 netif_carrier_off(bp->dev);
2555} 2576}
2556 2577
@@ -2560,13 +2581,13 @@ static void bnx2x_e1h_enable(struct bnx2x *bp)
2560 2581
2561 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1); 2582 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
2562 2583
2563 bnx2x_set_mac_addr_e1h(bp, 1);
2564
2565 /* Tx queue should be only reenabled */ 2584 /* Tx queue should be only reenabled */
2566 netif_tx_wake_all_queues(bp->dev); 2585 netif_tx_wake_all_queues(bp->dev);
2567 2586
2568 /* Initialize the receive filter. */ 2587 /*
2569 bnx2x_set_rx_mode(bp->dev); 2588 * Should not call netif_carrier_on since it will be called if the link
2589 * is up when checking for link state
2590 */
2570} 2591}
2571 2592
2572static void bnx2x_update_min_max(struct bnx2x *bp) 2593static void bnx2x_update_min_max(struct bnx2x *bp)
@@ -2605,21 +2626,23 @@ static void bnx2x_update_min_max(struct bnx2x *bp)
2605 2626
2606static void bnx2x_dcc_event(struct bnx2x *bp, u32 dcc_event) 2627static void bnx2x_dcc_event(struct bnx2x *bp, u32 dcc_event)
2607{ 2628{
2608 int func = BP_FUNC(bp);
2609
2610 DP(BNX2X_MSG_MCP, "dcc_event 0x%x\n", dcc_event); 2629 DP(BNX2X_MSG_MCP, "dcc_event 0x%x\n", dcc_event);
2611 bp->mf_config = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
2612 2630
2613 if (dcc_event & DRV_STATUS_DCC_DISABLE_ENABLE_PF) { 2631 if (dcc_event & DRV_STATUS_DCC_DISABLE_ENABLE_PF) {
2614 2632
2633 /*
2634 * This is the only place besides the function initialization
2635 * where the bp->flags can change so it is done without any
2636 * locks
2637 */
2615 if (bp->mf_config & FUNC_MF_CFG_FUNC_DISABLED) { 2638 if (bp->mf_config & FUNC_MF_CFG_FUNC_DISABLED) {
2616 DP(NETIF_MSG_IFDOWN, "mf_cfg function disabled\n"); 2639 DP(NETIF_MSG_IFDOWN, "mf_cfg function disabled\n");
2617 bp->state = BNX2X_STATE_DISABLED; 2640 bp->flags |= MF_FUNC_DIS;
2618 2641
2619 bnx2x_e1h_disable(bp); 2642 bnx2x_e1h_disable(bp);
2620 } else { 2643 } else {
2621 DP(NETIF_MSG_IFUP, "mf_cfg function enabled\n"); 2644 DP(NETIF_MSG_IFUP, "mf_cfg function enabled\n");
2622 bp->state = BNX2X_STATE_OPEN; 2645 bp->flags &= ~MF_FUNC_DIS;
2623 2646
2624 bnx2x_e1h_enable(bp); 2647 bnx2x_e1h_enable(bp);
2625 } 2648 }
@@ -2638,11 +2661,40 @@ static void bnx2x_dcc_event(struct bnx2x *bp, u32 dcc_event)
2638 bnx2x_fw_command(bp, DRV_MSG_CODE_DCC_OK); 2661 bnx2x_fw_command(bp, DRV_MSG_CODE_DCC_OK);
2639} 2662}
2640 2663
2664/* must be called under the spq lock */
2665static inline struct eth_spe *bnx2x_sp_get_next(struct bnx2x *bp)
2666{
2667 struct eth_spe *next_spe = bp->spq_prod_bd;
2668
2669 if (bp->spq_prod_bd == bp->spq_last_bd) {
2670 bp->spq_prod_bd = bp->spq;
2671 bp->spq_prod_idx = 0;
2672 DP(NETIF_MSG_TIMER, "end of spq\n");
2673 } else {
2674 bp->spq_prod_bd++;
2675 bp->spq_prod_idx++;
2676 }
2677 return next_spe;
2678}
2679
2680/* must be called under the spq lock */
2681static inline void bnx2x_sp_prod_update(struct bnx2x *bp)
2682{
2683 int func = BP_FUNC(bp);
2684
2685 /* Make sure that BD data is updated before writing the producer */
2686 wmb();
2687
2688 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func),
2689 bp->spq_prod_idx);
2690 mmiowb();
2691}
2692
2641/* the slow path queue is odd since completions arrive on the fastpath ring */ 2693/* the slow path queue is odd since completions arrive on the fastpath ring */
2642static int bnx2x_sp_post(struct bnx2x *bp, int command, int cid, 2694static int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
2643 u32 data_hi, u32 data_lo, int common) 2695 u32 data_hi, u32 data_lo, int common)
2644{ 2696{
2645 int func = BP_FUNC(bp); 2697 struct eth_spe *spe;
2646 2698
2647 DP(BNX2X_MSG_SP/*NETIF_MSG_TIMER*/, 2699 DP(BNX2X_MSG_SP/*NETIF_MSG_TIMER*/,
2648 "SPQE (%x:%x) command %d hw_cid %x data (%x:%x) left %x\n", 2700 "SPQE (%x:%x) command %d hw_cid %x data (%x:%x) left %x\n",
@@ -2664,38 +2716,23 @@ static int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
2664 return -EBUSY; 2716 return -EBUSY;
2665 } 2717 }
2666 2718
2719 spe = bnx2x_sp_get_next(bp);
2720
2667 /* CID needs port number to be encoded int it */ 2721 /* CID needs port number to be encoded int it */
2668 bp->spq_prod_bd->hdr.conn_and_cmd_data = 2722 spe->hdr.conn_and_cmd_data =
2669 cpu_to_le32(((command << SPE_HDR_CMD_ID_SHIFT) | 2723 cpu_to_le32(((command << SPE_HDR_CMD_ID_SHIFT) |
2670 HW_CID(bp, cid))); 2724 HW_CID(bp, cid)));
2671 bp->spq_prod_bd->hdr.type = cpu_to_le16(ETH_CONNECTION_TYPE); 2725 spe->hdr.type = cpu_to_le16(ETH_CONNECTION_TYPE);
2672 if (common) 2726 if (common)
2673 bp->spq_prod_bd->hdr.type |= 2727 spe->hdr.type |=
2674 cpu_to_le16((1 << SPE_HDR_COMMON_RAMROD_SHIFT)); 2728 cpu_to_le16((1 << SPE_HDR_COMMON_RAMROD_SHIFT));
2675 2729
2676 bp->spq_prod_bd->data.mac_config_addr.hi = cpu_to_le32(data_hi); 2730 spe->data.mac_config_addr.hi = cpu_to_le32(data_hi);
2677 bp->spq_prod_bd->data.mac_config_addr.lo = cpu_to_le32(data_lo); 2731 spe->data.mac_config_addr.lo = cpu_to_le32(data_lo);
2678 2732
2679 bp->spq_left--; 2733 bp->spq_left--;
2680 2734
2681 if (bp->spq_prod_bd == bp->spq_last_bd) { 2735 bnx2x_sp_prod_update(bp);
2682 bp->spq_prod_bd = bp->spq;
2683 bp->spq_prod_idx = 0;
2684 DP(NETIF_MSG_TIMER, "end of spq\n");
2685
2686 } else {
2687 bp->spq_prod_bd++;
2688 bp->spq_prod_idx++;
2689 }
2690
2691 /* Make sure that BD data is updated before writing the producer */
2692 wmb();
2693
2694 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func),
2695 bp->spq_prod_idx);
2696
2697 mmiowb();
2698
2699 spin_unlock_bh(&bp->spq_lock); 2736 spin_unlock_bh(&bp->spq_lock);
2700 return 0; 2737 return 0;
2701} 2738}
@@ -2873,10 +2910,8 @@ static inline void bnx2x_fan_failure(struct bnx2x *bp)
2873 bp->link_params.ext_phy_config); 2910 bp->link_params.ext_phy_config);
2874 2911
2875 /* log the failure */ 2912 /* log the failure */
2876 printk(KERN_ERR PFX "Fan Failure on Network Controller %s has caused" 2913 netdev_err(bp->dev, "Fan Failure on Network Controller has caused the driver to shutdown the card to prevent permanent damage.\n"
2877 " the driver to shutdown the card to prevent permanent" 2914 "Please contact Dell Support for assistance.\n");
2878 " damage. Please contact Dell Support for assistance\n",
2879 bp->dev->name);
2880} 2915}
2881 2916
2882static inline void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn) 2917static inline void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
@@ -3024,6 +3059,8 @@ static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
3024 int func = BP_FUNC(bp); 3059 int func = BP_FUNC(bp);
3025 3060
3026 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0); 3061 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
3062 bp->mf_config = SHMEM_RD(bp,
3063 mf_cfg.func_mf_config[func].config);
3027 val = SHMEM_RD(bp, func_mb[func].drv_status); 3064 val = SHMEM_RD(bp, func_mb[func].drv_status);
3028 if (val & DRV_STATUS_DCC_EVENT_MASK) 3065 if (val & DRV_STATUS_DCC_EVENT_MASK)
3029 bnx2x_dcc_event(bp, 3066 bnx2x_dcc_event(bp,
@@ -3227,6 +3264,17 @@ static irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
3227 return IRQ_HANDLED; 3264 return IRQ_HANDLED;
3228#endif 3265#endif
3229 3266
3267#ifdef BCM_CNIC
3268 {
3269 struct cnic_ops *c_ops;
3270
3271 rcu_read_lock();
3272 c_ops = rcu_dereference(bp->cnic_ops);
3273 if (c_ops)
3274 c_ops->cnic_handler(bp->cnic_data, NULL);
3275 rcu_read_unlock();
3276 }
3277#endif
3230 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0); 3278 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
3231 3279
3232 return IRQ_HANDLED; 3280 return IRQ_HANDLED;
@@ -3958,7 +4006,7 @@ static int bnx2x_storm_stats_update(struct bnx2x *bp)
3958 estats->no_buff_discard_hi = 0; 4006 estats->no_buff_discard_hi = 0;
3959 estats->no_buff_discard_lo = 0; 4007 estats->no_buff_discard_lo = 0;
3960 4008
3961 for_each_rx_queue(bp, i) { 4009 for_each_queue(bp, i) {
3962 struct bnx2x_fastpath *fp = &bp->fp[i]; 4010 struct bnx2x_fastpath *fp = &bp->fp[i];
3963 int cl_id = fp->cl_id; 4011 int cl_id = fp->cl_id;
3964 struct tstorm_per_client_stats *tclient = 4012 struct tstorm_per_client_stats *tclient =
@@ -4175,7 +4223,7 @@ static void bnx2x_net_stats_update(struct bnx2x *bp)
4175 nstats->tx_bytes = bnx2x_hilo(&estats->total_bytes_transmitted_hi); 4223 nstats->tx_bytes = bnx2x_hilo(&estats->total_bytes_transmitted_hi);
4176 4224
4177 nstats->rx_dropped = estats->mac_discard; 4225 nstats->rx_dropped = estats->mac_discard;
4178 for_each_rx_queue(bp, i) 4226 for_each_queue(bp, i)
4179 nstats->rx_dropped += 4227 nstats->rx_dropped +=
4180 le32_to_cpu(bp->fp[i].old_tclient.checksum_discard); 4228 le32_to_cpu(bp->fp[i].old_tclient.checksum_discard);
4181 4229
@@ -4229,7 +4277,7 @@ static void bnx2x_drv_stats_update(struct bnx2x *bp)
4229 estats->rx_err_discard_pkt = 0; 4277 estats->rx_err_discard_pkt = 0;
4230 estats->rx_skb_alloc_failed = 0; 4278 estats->rx_skb_alloc_failed = 0;
4231 estats->hw_csum_err = 0; 4279 estats->hw_csum_err = 0;
4232 for_each_rx_queue(bp, i) { 4280 for_each_queue(bp, i) {
4233 struct bnx2x_eth_q_stats *qstats = &bp->fp[i].eth_q_stats; 4281 struct bnx2x_eth_q_stats *qstats = &bp->fp[i].eth_q_stats;
4234 4282
4235 estats->driver_xoff += qstats->driver_xoff; 4283 estats->driver_xoff += qstats->driver_xoff;
@@ -4258,9 +4306,9 @@ static void bnx2x_stats_update(struct bnx2x *bp)
4258 bnx2x_net_stats_update(bp); 4306 bnx2x_net_stats_update(bp);
4259 bnx2x_drv_stats_update(bp); 4307 bnx2x_drv_stats_update(bp);
4260 4308
4261 if (bp->msglevel & NETIF_MSG_TIMER) { 4309 if (netif_msg_timer(bp)) {
4262 struct bnx2x_fastpath *fp0_rx = bp->fp; 4310 struct bnx2x_fastpath *fp0_rx = bp->fp;
4263 struct bnx2x_fastpath *fp0_tx = &(bp->fp[bp->num_rx_queues]); 4311 struct bnx2x_fastpath *fp0_tx = bp->fp;
4264 struct tstorm_per_client_stats *old_tclient = 4312 struct tstorm_per_client_stats *old_tclient =
4265 &bp->fp->old_tclient; 4313 &bp->fp->old_tclient;
4266 struct bnx2x_eth_q_stats *qstats = &bp->fp->eth_q_stats; 4314 struct bnx2x_eth_q_stats *qstats = &bp->fp->eth_q_stats;
@@ -4268,7 +4316,7 @@ static void bnx2x_stats_update(struct bnx2x *bp)
4268 struct net_device_stats *nstats = &bp->dev->stats; 4316 struct net_device_stats *nstats = &bp->dev->stats;
4269 int i; 4317 int i;
4270 4318
4271 printk(KERN_DEBUG "%s:\n", bp->dev->name); 4319 netdev_printk(KERN_DEBUG, bp->dev, "\n");
4272 printk(KERN_DEBUG " tx avail (%4x) tx hc idx (%x)" 4320 printk(KERN_DEBUG " tx avail (%4x) tx hc idx (%x)"
4273 " tx pkt (%lx)\n", 4321 " tx pkt (%lx)\n",
4274 bnx2x_tx_avail(fp0_tx), 4322 bnx2x_tx_avail(fp0_tx),
@@ -4426,7 +4474,7 @@ static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event)
4426 /* Make sure the state has been "changed" */ 4474 /* Make sure the state has been "changed" */
4427 smp_wmb(); 4475 smp_wmb();
4428 4476
4429 if ((event != STATS_EVENT_UPDATE) || (bp->msglevel & NETIF_MSG_TIMER)) 4477 if ((event != STATS_EVENT_UPDATE) || netif_msg_timer(bp))
4430 DP(BNX2X_MSG_STATS, "state %d -> event %d -> state %d\n", 4478 DP(BNX2X_MSG_STATS, "state %d -> event %d -> state %d\n",
4431 state, event, bp->stats_state); 4479 state, event, bp->stats_state);
4432} 4480}
@@ -4640,8 +4688,7 @@ static void bnx2x_timer(unsigned long data)
4640 } 4688 }
4641 } 4689 }
4642 4690
4643 if ((bp->state == BNX2X_STATE_OPEN) || 4691 if (bp->state == BNX2X_STATE_OPEN)
4644 (bp->state == BNX2X_STATE_DISABLED))
4645 bnx2x_stats_handle(bp, STATS_EVENT_UPDATE); 4692 bnx2x_stats_handle(bp, STATS_EVENT_UPDATE);
4646 4693
4647timer_restart: 4694timer_restart:
@@ -4860,21 +4907,21 @@ static void bnx2x_update_coalesce(struct bnx2x *bp)
4860 REG_WR8(bp, BAR_CSTRORM_INTMEM + 4907 REG_WR8(bp, BAR_CSTRORM_INTMEM +
4861 CSTORM_SB_HC_TIMEOUT_U_OFFSET(port, sb_id, 4908 CSTORM_SB_HC_TIMEOUT_U_OFFSET(port, sb_id,
4862 U_SB_ETH_RX_CQ_INDEX), 4909 U_SB_ETH_RX_CQ_INDEX),
4863 bp->rx_ticks/12); 4910 bp->rx_ticks/(4 * BNX2X_BTR));
4864 REG_WR16(bp, BAR_CSTRORM_INTMEM + 4911 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4865 CSTORM_SB_HC_DISABLE_U_OFFSET(port, sb_id, 4912 CSTORM_SB_HC_DISABLE_U_OFFSET(port, sb_id,
4866 U_SB_ETH_RX_CQ_INDEX), 4913 U_SB_ETH_RX_CQ_INDEX),
4867 (bp->rx_ticks/12) ? 0 : 1); 4914 (bp->rx_ticks/(4 * BNX2X_BTR)) ? 0 : 1);
4868 4915
4869 /* HC_INDEX_C_ETH_TX_CQ_CONS */ 4916 /* HC_INDEX_C_ETH_TX_CQ_CONS */
4870 REG_WR8(bp, BAR_CSTRORM_INTMEM + 4917 REG_WR8(bp, BAR_CSTRORM_INTMEM +
4871 CSTORM_SB_HC_TIMEOUT_C_OFFSET(port, sb_id, 4918 CSTORM_SB_HC_TIMEOUT_C_OFFSET(port, sb_id,
4872 C_SB_ETH_TX_CQ_INDEX), 4919 C_SB_ETH_TX_CQ_INDEX),
4873 bp->tx_ticks/12); 4920 bp->tx_ticks/(4 * BNX2X_BTR));
4874 REG_WR16(bp, BAR_CSTRORM_INTMEM + 4921 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4875 CSTORM_SB_HC_DISABLE_C_OFFSET(port, sb_id, 4922 CSTORM_SB_HC_DISABLE_C_OFFSET(port, sb_id,
4876 C_SB_ETH_TX_CQ_INDEX), 4923 C_SB_ETH_TX_CQ_INDEX),
4877 (bp->tx_ticks/12) ? 0 : 1); 4924 (bp->tx_ticks/(4 * BNX2X_BTR)) ? 0 : 1);
4878 } 4925 }
4879} 4926}
4880 4927
@@ -4916,7 +4963,7 @@ static void bnx2x_init_rx_rings(struct bnx2x *bp)
4916 4963
4917 if (bp->flags & TPA_ENABLE_FLAG) { 4964 if (bp->flags & TPA_ENABLE_FLAG) {
4918 4965
4919 for_each_rx_queue(bp, j) { 4966 for_each_queue(bp, j) {
4920 struct bnx2x_fastpath *fp = &bp->fp[j]; 4967 struct bnx2x_fastpath *fp = &bp->fp[j];
4921 4968
4922 for (i = 0; i < max_agg_queues; i++) { 4969 for (i = 0; i < max_agg_queues; i++) {
@@ -4939,16 +4986,13 @@ static void bnx2x_init_rx_rings(struct bnx2x *bp)
4939 } 4986 }
4940 } 4987 }
4941 4988
4942 for_each_rx_queue(bp, j) { 4989 for_each_queue(bp, j) {
4943 struct bnx2x_fastpath *fp = &bp->fp[j]; 4990 struct bnx2x_fastpath *fp = &bp->fp[j];
4944 4991
4945 fp->rx_bd_cons = 0; 4992 fp->rx_bd_cons = 0;
4946 fp->rx_cons_sb = BNX2X_RX_SB_INDEX; 4993 fp->rx_cons_sb = BNX2X_RX_SB_INDEX;
4947 fp->rx_bd_cons_sb = BNX2X_RX_SB_BD_INDEX; 4994 fp->rx_bd_cons_sb = BNX2X_RX_SB_BD_INDEX;
4948 4995
4949 /* Mark queue as Rx */
4950 fp->is_rx_queue = 1;
4951
4952 /* "next page" elements initialization */ 4996 /* "next page" elements initialization */
4953 /* SGE ring */ 4997 /* SGE ring */
4954 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) { 4998 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
@@ -5054,7 +5098,7 @@ static void bnx2x_init_tx_ring(struct bnx2x *bp)
5054{ 5098{
5055 int i, j; 5099 int i, j;
5056 5100
5057 for_each_tx_queue(bp, j) { 5101 for_each_queue(bp, j) {
5058 struct bnx2x_fastpath *fp = &bp->fp[j]; 5102 struct bnx2x_fastpath *fp = &bp->fp[j];
5059 5103
5060 for (i = 1; i <= NUM_TX_RINGS; i++) { 5104 for (i = 1; i <= NUM_TX_RINGS; i++) {
@@ -5080,10 +5124,6 @@ static void bnx2x_init_tx_ring(struct bnx2x *bp)
5080 fp->tx_cons_sb = BNX2X_TX_SB_INDEX; 5124 fp->tx_cons_sb = BNX2X_TX_SB_INDEX;
5081 fp->tx_pkt = 0; 5125 fp->tx_pkt = 0;
5082 } 5126 }
5083
5084 /* clean tx statistics */
5085 for_each_rx_queue(bp, i)
5086 bnx2x_fp(bp, i, tx_pkt) = 0;
5087} 5127}
5088 5128
5089static void bnx2x_init_sp_ring(struct bnx2x *bp) 5129static void bnx2x_init_sp_ring(struct bnx2x *bp)
@@ -5112,7 +5152,8 @@ static void bnx2x_init_context(struct bnx2x *bp)
5112{ 5152{
5113 int i; 5153 int i;
5114 5154
5115 for_each_rx_queue(bp, i) { 5155 /* Rx */
5156 for_each_queue(bp, i) {
5116 struct eth_context *context = bnx2x_sp(bp, context[i].eth); 5157 struct eth_context *context = bnx2x_sp(bp, context[i].eth);
5117 struct bnx2x_fastpath *fp = &bp->fp[i]; 5158 struct bnx2x_fastpath *fp = &bp->fp[i];
5118 u8 cl_id = fp->cl_id; 5159 u8 cl_id = fp->cl_id;
@@ -5164,10 +5205,11 @@ static void bnx2x_init_context(struct bnx2x *bp)
5164 ETH_CONNECTION_TYPE); 5205 ETH_CONNECTION_TYPE);
5165 } 5206 }
5166 5207
5167 for_each_tx_queue(bp, i) { 5208 /* Tx */
5209 for_each_queue(bp, i) {
5168 struct bnx2x_fastpath *fp = &bp->fp[i]; 5210 struct bnx2x_fastpath *fp = &bp->fp[i];
5169 struct eth_context *context = 5211 struct eth_context *context =
5170 bnx2x_sp(bp, context[i - bp->num_rx_queues].eth); 5212 bnx2x_sp(bp, context[i].eth);
5171 5213
5172 context->cstorm_st_context.sb_index_number = 5214 context->cstorm_st_context.sb_index_number =
5173 C_SB_ETH_TX_CQ_INDEX; 5215 C_SB_ETH_TX_CQ_INDEX;
@@ -5195,7 +5237,7 @@ static void bnx2x_init_ind_table(struct bnx2x *bp)
5195 for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++) 5237 for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++)
5196 REG_WR8(bp, BAR_TSTRORM_INTMEM + 5238 REG_WR8(bp, BAR_TSTRORM_INTMEM +
5197 TSTORM_INDIRECTION_TABLE_OFFSET(func) + i, 5239 TSTORM_INDIRECTION_TABLE_OFFSET(func) + i,
5198 bp->fp->cl_id + (i % bp->num_rx_queues)); 5240 bp->fp->cl_id + (i % bp->num_queues));
5199} 5241}
5200 5242
5201static void bnx2x_set_client_config(struct bnx2x *bp) 5243static void bnx2x_set_client_config(struct bnx2x *bp)
@@ -5235,7 +5277,7 @@ static void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
5235{ 5277{
5236 struct tstorm_eth_mac_filter_config tstorm_mac_filter = {0}; 5278 struct tstorm_eth_mac_filter_config tstorm_mac_filter = {0};
5237 int mode = bp->rx_mode; 5279 int mode = bp->rx_mode;
5238 int mask = (1 << BP_L_ID(bp)); 5280 int mask = bp->rx_mode_cl_mask;
5239 int func = BP_FUNC(bp); 5281 int func = BP_FUNC(bp);
5240 int port = BP_PORT(bp); 5282 int port = BP_PORT(bp);
5241 int i; 5283 int i;
@@ -5348,6 +5390,7 @@ static void bnx2x_init_internal_func(struct bnx2x *bp)
5348 (*(u32 *)&tstorm_config)); 5390 (*(u32 *)&tstorm_config));
5349 5391
5350 bp->rx_mode = BNX2X_RX_MODE_NONE; /* no rx until link is up */ 5392 bp->rx_mode = BNX2X_RX_MODE_NONE; /* no rx until link is up */
5393 bp->rx_mode_cl_mask = (1 << BP_L_ID(bp));
5351 bnx2x_set_storm_rx_mode(bp); 5394 bnx2x_set_storm_rx_mode(bp);
5352 5395
5353 for_each_queue(bp, i) { 5396 for_each_queue(bp, i) {
@@ -5438,7 +5481,7 @@ static void bnx2x_init_internal_func(struct bnx2x *bp)
5438 min((u32)(min((u32)8, (u32)MAX_SKB_FRAGS) * 5481 min((u32)(min((u32)8, (u32)MAX_SKB_FRAGS) *
5439 SGE_PAGE_SIZE * PAGES_PER_SGE), 5482 SGE_PAGE_SIZE * PAGES_PER_SGE),
5440 (u32)0xffff); 5483 (u32)0xffff);
5441 for_each_rx_queue(bp, i) { 5484 for_each_queue(bp, i) {
5442 struct bnx2x_fastpath *fp = &bp->fp[i]; 5485 struct bnx2x_fastpath *fp = &bp->fp[i];
5443 5486
5444 REG_WR(bp, BAR_USTRORM_INTMEM + 5487 REG_WR(bp, BAR_USTRORM_INTMEM +
@@ -5473,7 +5516,7 @@ static void bnx2x_init_internal_func(struct bnx2x *bp)
5473 rx_pause.cqe_thr_high = 350; 5516 rx_pause.cqe_thr_high = 350;
5474 rx_pause.sge_thr_high = 0; 5517 rx_pause.sge_thr_high = 0;
5475 5518
5476 for_each_rx_queue(bp, i) { 5519 for_each_queue(bp, i) {
5477 struct bnx2x_fastpath *fp = &bp->fp[i]; 5520 struct bnx2x_fastpath *fp = &bp->fp[i];
5478 5521
5479 if (!fp->disable_tpa) { 5522 if (!fp->disable_tpa) {
@@ -5504,20 +5547,18 @@ static void bnx2x_init_internal_func(struct bnx2x *bp)
5504 bp->link_vars.line_speed = SPEED_10000; 5547 bp->link_vars.line_speed = SPEED_10000;
5505 bnx2x_init_port_minmax(bp); 5548 bnx2x_init_port_minmax(bp);
5506 5549
5550 if (!BP_NOMCP(bp))
5551 bp->mf_config =
5552 SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
5507 bnx2x_calc_vn_weight_sum(bp); 5553 bnx2x_calc_vn_weight_sum(bp);
5508 5554
5509 for (vn = VN_0; vn < E1HVN_MAX; vn++) 5555 for (vn = VN_0; vn < E1HVN_MAX; vn++)
5510 bnx2x_init_vn_minmax(bp, 2*vn + port); 5556 bnx2x_init_vn_minmax(bp, 2*vn + port);
5511 5557
5512 /* Enable rate shaping and fairness */ 5558 /* Enable rate shaping and fairness */
5513 bp->cmng.flags.cmng_enables = 5559 bp->cmng.flags.cmng_enables |=
5514 CMNG_FLAGS_PER_PORT_RATE_SHAPING_VN; 5560 CMNG_FLAGS_PER_PORT_RATE_SHAPING_VN;
5515 if (bp->vn_weight_sum) 5561
5516 bp->cmng.flags.cmng_enables |=
5517 CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
5518 else
5519 DP(NETIF_MSG_IFUP, "All MIN values are zeroes"
5520 " fairness will be disabled\n");
5521 } else { 5562 } else {
5522 /* rate shaping and fairness are disabled */ 5563 /* rate shaping and fairness are disabled */
5523 DP(NETIF_MSG_IFUP, 5564 DP(NETIF_MSG_IFUP,
@@ -5565,10 +5606,11 @@ static void bnx2x_nic_init(struct bnx2x *bp, u32 load_code)
5565 fp->state = BNX2X_FP_STATE_CLOSED; 5606 fp->state = BNX2X_FP_STATE_CLOSED;
5566 fp->index = i; 5607 fp->index = i;
5567 fp->cl_id = BP_L_ID(bp) + i; 5608 fp->cl_id = BP_L_ID(bp) + i;
5609#ifdef BCM_CNIC
5610 fp->sb_id = fp->cl_id + 1;
5611#else
5568 fp->sb_id = fp->cl_id; 5612 fp->sb_id = fp->cl_id;
5569 /* Suitable Rx and Tx SBs are served by the same client */ 5613#endif
5570 if (i >= bp->num_rx_queues)
5571 fp->cl_id -= bp->num_rx_queues;
5572 DP(NETIF_MSG_IFUP, 5614 DP(NETIF_MSG_IFUP,
5573 "queue[%d]: bnx2x_init_sb(%p,%p) cl_id %d sb %d\n", 5615 "queue[%d]: bnx2x_init_sb(%p,%p) cl_id %d sb %d\n",
5574 i, bp, fp->status_blk, fp->cl_id, fp->sb_id); 5616 i, bp, fp->status_blk, fp->cl_id, fp->sb_id);
@@ -5642,8 +5684,7 @@ gunzip_nomem2:
5642 bp->gunzip_buf = NULL; 5684 bp->gunzip_buf = NULL;
5643 5685
5644gunzip_nomem1: 5686gunzip_nomem1:
5645 printk(KERN_ERR PFX "%s: Cannot allocate firmware buffer for" 5687 netdev_err(bp->dev, "Cannot allocate firmware buffer for un-compression\n");
5646 " un-compression\n", bp->dev->name);
5647 return -ENOMEM; 5688 return -ENOMEM;
5648} 5689}
5649 5690
@@ -5689,14 +5730,13 @@ static int bnx2x_gunzip(struct bnx2x *bp, const u8 *zbuf, int len)
5689 5730
5690 rc = zlib_inflate(bp->strm, Z_FINISH); 5731 rc = zlib_inflate(bp->strm, Z_FINISH);
5691 if ((rc != Z_OK) && (rc != Z_STREAM_END)) 5732 if ((rc != Z_OK) && (rc != Z_STREAM_END))
5692 printk(KERN_ERR PFX "%s: Firmware decompression error: %s\n", 5733 netdev_err(bp->dev, "Firmware decompression error: %s\n",
5693 bp->dev->name, bp->strm->msg); 5734 bp->strm->msg);
5694 5735
5695 bp->gunzip_outlen = (FW_BUF_SIZE - bp->strm->avail_out); 5736 bp->gunzip_outlen = (FW_BUF_SIZE - bp->strm->avail_out);
5696 if (bp->gunzip_outlen & 0x3) 5737 if (bp->gunzip_outlen & 0x3)
5697 printk(KERN_ERR PFX "%s: Firmware decompression error:" 5738 netdev_err(bp->dev, "Firmware decompression error: gunzip_outlen (%d) not aligned\n",
5698 " gunzip_outlen (%d) not aligned\n", 5739 bp->gunzip_outlen);
5699 bp->dev->name, bp->gunzip_outlen);
5700 bp->gunzip_outlen >>= 2; 5740 bp->gunzip_outlen >>= 2;
5701 5741
5702 zlib_inflateEnd(bp->strm); 5742 zlib_inflateEnd(bp->strm);
@@ -5867,7 +5907,7 @@ static int bnx2x_int_mem_test(struct bnx2x *bp)
5867 msleep(50); 5907 msleep(50);
5868 bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE); 5908 bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
5869 bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE); 5909 bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
5870#ifndef BCM_ISCSI 5910#ifndef BCM_CNIC
5871 /* set NIC mode */ 5911 /* set NIC mode */
5872 REG_WR(bp, PRS_REG_NIC_MODE, 1); 5912 REG_WR(bp, PRS_REG_NIC_MODE, 1);
5873#endif 5913#endif
@@ -6006,6 +6046,9 @@ static void bnx2x_setup_fan_failure_detection(struct bnx2x *bp)
6006static int bnx2x_init_common(struct bnx2x *bp) 6046static int bnx2x_init_common(struct bnx2x *bp)
6007{ 6047{
6008 u32 val, i; 6048 u32 val, i;
6049#ifdef BCM_CNIC
6050 u32 wb_write[2];
6051#endif
6009 6052
6010 DP(BNX2X_MSG_MCP, "starting common init func %d\n", BP_FUNC(bp)); 6053 DP(BNX2X_MSG_MCP, "starting common init func %d\n", BP_FUNC(bp));
6011 6054
@@ -6048,7 +6091,7 @@ static int bnx2x_init_common(struct bnx2x *bp)
6048#endif 6091#endif
6049 6092
6050 REG_WR(bp, PXP2_REG_RQ_CDU_P_SIZE, 2); 6093 REG_WR(bp, PXP2_REG_RQ_CDU_P_SIZE, 2);
6051#ifdef BCM_ISCSI 6094#ifdef BCM_CNIC
6052 REG_WR(bp, PXP2_REG_RQ_TM_P_SIZE, 5); 6095 REG_WR(bp, PXP2_REG_RQ_TM_P_SIZE, 5);
6053 REG_WR(bp, PXP2_REG_RQ_QM_P_SIZE, 5); 6096 REG_WR(bp, PXP2_REG_RQ_QM_P_SIZE, 5);
6054 REG_WR(bp, PXP2_REG_RQ_SRC_P_SIZE, 5); 6097 REG_WR(bp, PXP2_REG_RQ_SRC_P_SIZE, 5);
@@ -6091,11 +6134,26 @@ static int bnx2x_init_common(struct bnx2x *bp)
6091 bnx2x_read_dmae(bp, USEM_REG_PASSIVE_BUFFER, 3); 6134 bnx2x_read_dmae(bp, USEM_REG_PASSIVE_BUFFER, 3);
6092 6135
6093 bnx2x_init_block(bp, QM_BLOCK, COMMON_STAGE); 6136 bnx2x_init_block(bp, QM_BLOCK, COMMON_STAGE);
6137
6138#ifdef BCM_CNIC
6139 wb_write[0] = 0;
6140 wb_write[1] = 0;
6141 for (i = 0; i < 64; i++) {
6142 REG_WR(bp, QM_REG_BASEADDR + i*4, 1024 * 4 * (i%16));
6143 bnx2x_init_ind_wr(bp, QM_REG_PTRTBL + i*8, wb_write, 2);
6144
6145 if (CHIP_IS_E1H(bp)) {
6146 REG_WR(bp, QM_REG_BASEADDR_EXT_A + i*4, 1024*4*(i%16));
6147 bnx2x_init_ind_wr(bp, QM_REG_PTRTBL_EXT_A + i*8,
6148 wb_write, 2);
6149 }
6150 }
6151#endif
6094 /* soft reset pulse */ 6152 /* soft reset pulse */
6095 REG_WR(bp, QM_REG_SOFT_RESET, 1); 6153 REG_WR(bp, QM_REG_SOFT_RESET, 1);
6096 REG_WR(bp, QM_REG_SOFT_RESET, 0); 6154 REG_WR(bp, QM_REG_SOFT_RESET, 0);
6097 6155
6098#ifdef BCM_ISCSI 6156#ifdef BCM_CNIC
6099 bnx2x_init_block(bp, TIMERS_BLOCK, COMMON_STAGE); 6157 bnx2x_init_block(bp, TIMERS_BLOCK, COMMON_STAGE);
6100#endif 6158#endif
6101 6159
@@ -6109,8 +6167,10 @@ static int bnx2x_init_common(struct bnx2x *bp)
6109 bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE); 6167 bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
6110 bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE); 6168 bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
6111 REG_WR(bp, PRS_REG_A_PRSU_20, 0xf); 6169 REG_WR(bp, PRS_REG_A_PRSU_20, 0xf);
6170#ifndef BCM_CNIC
6112 /* set NIC mode */ 6171 /* set NIC mode */
6113 REG_WR(bp, PRS_REG_NIC_MODE, 1); 6172 REG_WR(bp, PRS_REG_NIC_MODE, 1);
6173#endif
6114 if (CHIP_IS_E1H(bp)) 6174 if (CHIP_IS_E1H(bp))
6115 REG_WR(bp, PRS_REG_E1HOV_MODE, IS_E1HMF(bp)); 6175 REG_WR(bp, PRS_REG_E1HOV_MODE, IS_E1HMF(bp));
6116 6176
@@ -6145,12 +6205,24 @@ static int bnx2x_init_common(struct bnx2x *bp)
6145 /* TODO: replace with something meaningful */ 6205 /* TODO: replace with something meaningful */
6146 } 6206 }
6147 bnx2x_init_block(bp, SRCH_BLOCK, COMMON_STAGE); 6207 bnx2x_init_block(bp, SRCH_BLOCK, COMMON_STAGE);
6208#ifdef BCM_CNIC
6209 REG_WR(bp, SRC_REG_KEYSEARCH_0, 0x63285672);
6210 REG_WR(bp, SRC_REG_KEYSEARCH_1, 0x24b8f2cc);
6211 REG_WR(bp, SRC_REG_KEYSEARCH_2, 0x223aef9b);
6212 REG_WR(bp, SRC_REG_KEYSEARCH_3, 0x26001e3a);
6213 REG_WR(bp, SRC_REG_KEYSEARCH_4, 0x7ae91116);
6214 REG_WR(bp, SRC_REG_KEYSEARCH_5, 0x5ce5230b);
6215 REG_WR(bp, SRC_REG_KEYSEARCH_6, 0x298d8adf);
6216 REG_WR(bp, SRC_REG_KEYSEARCH_7, 0x6eb0ff09);
6217 REG_WR(bp, SRC_REG_KEYSEARCH_8, 0x1830f82f);
6218 REG_WR(bp, SRC_REG_KEYSEARCH_9, 0x01e46be7);
6219#endif
6148 REG_WR(bp, SRC_REG_SOFT_RST, 0); 6220 REG_WR(bp, SRC_REG_SOFT_RST, 0);
6149 6221
6150 if (sizeof(union cdu_context) != 1024) 6222 if (sizeof(union cdu_context) != 1024)
6151 /* we currently assume that a context is 1024 bytes */ 6223 /* we currently assume that a context is 1024 bytes */
6152 printk(KERN_ALERT PFX "please adjust the size of" 6224 pr_alert("please adjust the size of cdu_context(%ld)\n",
6153 " cdu_context(%ld)\n", (long)sizeof(union cdu_context)); 6225 (long)sizeof(union cdu_context));
6154 6226
6155 bnx2x_init_block(bp, CDU_BLOCK, COMMON_STAGE); 6227 bnx2x_init_block(bp, CDU_BLOCK, COMMON_STAGE);
6156 val = (4 << 24) + (0 << 12) + 1024; 6228 val = (4 << 24) + (0 << 12) + 1024;
@@ -6261,38 +6333,14 @@ static int bnx2x_init_port(struct bnx2x *bp)
6261 bnx2x_init_block(bp, TCM_BLOCK, init_stage); 6333 bnx2x_init_block(bp, TCM_BLOCK, init_stage);
6262 bnx2x_init_block(bp, UCM_BLOCK, init_stage); 6334 bnx2x_init_block(bp, UCM_BLOCK, init_stage);
6263 bnx2x_init_block(bp, CCM_BLOCK, init_stage); 6335 bnx2x_init_block(bp, CCM_BLOCK, init_stage);
6264#ifdef BCM_ISCSI
6265 /* Port0 1
6266 * Port1 385 */
6267 i++;
6268 wb_write[0] = ONCHIP_ADDR1(bp->timers_mapping);
6269 wb_write[1] = ONCHIP_ADDR2(bp->timers_mapping);
6270 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
6271 REG_WR(bp, PXP2_REG_PSWRQ_TM0_L2P + func*4, PXP_ONE_ILT(i));
6272
6273 /* Port0 2
6274 * Port1 386 */
6275 i++;
6276 wb_write[0] = ONCHIP_ADDR1(bp->qm_mapping);
6277 wb_write[1] = ONCHIP_ADDR2(bp->qm_mapping);
6278 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
6279 REG_WR(bp, PXP2_REG_PSWRQ_QM0_L2P + func*4, PXP_ONE_ILT(i));
6280
6281 /* Port0 3
6282 * Port1 387 */
6283 i++;
6284 wb_write[0] = ONCHIP_ADDR1(bp->t1_mapping);
6285 wb_write[1] = ONCHIP_ADDR2(bp->t1_mapping);
6286 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
6287 REG_WR(bp, PXP2_REG_PSWRQ_SRC0_L2P + func*4, PXP_ONE_ILT(i));
6288#endif
6289 bnx2x_init_block(bp, XCM_BLOCK, init_stage); 6336 bnx2x_init_block(bp, XCM_BLOCK, init_stage);
6290 6337
6291#ifdef BCM_ISCSI 6338#ifdef BCM_CNIC
6292 REG_WR(bp, TM_REG_LIN0_SCAN_TIME + func*4, 1024/64*20); 6339 REG_WR(bp, QM_REG_CONNNUM_0 + port*4, 1024/16 - 1);
6293 REG_WR(bp, TM_REG_LIN0_MAX_ACTIVE_CID + func*4, 31);
6294 6340
6295 bnx2x_init_block(bp, TIMERS_BLOCK, init_stage); 6341 bnx2x_init_block(bp, TIMERS_BLOCK, init_stage);
6342 REG_WR(bp, TM_REG_LIN0_SCAN_TIME + port*4, 20);
6343 REG_WR(bp, TM_REG_LIN0_MAX_ACTIVE_CID + port*4, 31);
6296#endif 6344#endif
6297 bnx2x_init_block(bp, DQ_BLOCK, init_stage); 6345 bnx2x_init_block(bp, DQ_BLOCK, init_stage);
6298 6346
@@ -6350,18 +6398,8 @@ static int bnx2x_init_port(struct bnx2x *bp)
6350 msleep(5); 6398 msleep(5);
6351 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0); 6399 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0);
6352 6400
6353#ifdef BCM_ISCSI 6401#ifdef BCM_CNIC
6354 /* tell the searcher where the T2 table is */ 6402 bnx2x_init_block(bp, SRCH_BLOCK, init_stage);
6355 REG_WR(bp, SRC_REG_COUNTFREE0 + func*4, 16*1024/64);
6356
6357 wb_write[0] = U64_LO(bp->t2_mapping);
6358 wb_write[1] = U64_HI(bp->t2_mapping);
6359 REG_WR_DMAE(bp, SRC_REG_FIRSTFREE0 + func*4, wb_write, 2);
6360 wb_write[0] = U64_LO((u64)bp->t2_mapping + 16*1024 - 64);
6361 wb_write[1] = U64_HI((u64)bp->t2_mapping + 16*1024 - 64);
6362 REG_WR_DMAE(bp, SRC_REG_LASTFREE0 + func*4, wb_write, 2);
6363
6364 REG_WR(bp, SRC_REG_NUMBER_HASH_BITS0 + func*4, 10);
6365#endif 6403#endif
6366 bnx2x_init_block(bp, CDU_BLOCK, init_stage); 6404 bnx2x_init_block(bp, CDU_BLOCK, init_stage);
6367 bnx2x_init_block(bp, CFC_BLOCK, init_stage); 6405 bnx2x_init_block(bp, CFC_BLOCK, init_stage);
@@ -6470,7 +6508,12 @@ static int bnx2x_init_port(struct bnx2x *bp)
6470#define PXP_ONE_ILT(x) (((x) << 10) | x) 6508#define PXP_ONE_ILT(x) (((x) << 10) | x)
6471#define PXP_ILT_RANGE(f, l) (((l) << 10) | f) 6509#define PXP_ILT_RANGE(f, l) (((l) << 10) | f)
6472 6510
6511#ifdef BCM_CNIC
6512#define CNIC_ILT_LINES 127
6513#define CNIC_CTX_PER_ILT 16
6514#else
6473#define CNIC_ILT_LINES 0 6515#define CNIC_ILT_LINES 0
6516#endif
6474 6517
6475static void bnx2x_ilt_wr(struct bnx2x *bp, u32 index, dma_addr_t addr) 6518static void bnx2x_ilt_wr(struct bnx2x *bp, u32 index, dma_addr_t addr)
6476{ 6519{
@@ -6509,6 +6552,46 @@ static int bnx2x_init_func(struct bnx2x *bp)
6509 REG_WR(bp, PXP2_REG_PSWRQ_CDU0_L2P + func*4, 6552 REG_WR(bp, PXP2_REG_PSWRQ_CDU0_L2P + func*4,
6510 PXP_ILT_RANGE(i, i + CNIC_ILT_LINES)); 6553 PXP_ILT_RANGE(i, i + CNIC_ILT_LINES));
6511 6554
6555#ifdef BCM_CNIC
6556 i += 1 + CNIC_ILT_LINES;
6557 bnx2x_ilt_wr(bp, i, bp->timers_mapping);
6558 if (CHIP_IS_E1(bp))
6559 REG_WR(bp, PXP2_REG_PSWRQ_TM0_L2P + func*4, PXP_ONE_ILT(i));
6560 else {
6561 REG_WR(bp, PXP2_REG_RQ_TM_FIRST_ILT, i);
6562 REG_WR(bp, PXP2_REG_RQ_TM_LAST_ILT, i);
6563 }
6564
6565 i++;
6566 bnx2x_ilt_wr(bp, i, bp->qm_mapping);
6567 if (CHIP_IS_E1(bp))
6568 REG_WR(bp, PXP2_REG_PSWRQ_QM0_L2P + func*4, PXP_ONE_ILT(i));
6569 else {
6570 REG_WR(bp, PXP2_REG_RQ_QM_FIRST_ILT, i);
6571 REG_WR(bp, PXP2_REG_RQ_QM_LAST_ILT, i);
6572 }
6573
6574 i++;
6575 bnx2x_ilt_wr(bp, i, bp->t1_mapping);
6576 if (CHIP_IS_E1(bp))
6577 REG_WR(bp, PXP2_REG_PSWRQ_SRC0_L2P + func*4, PXP_ONE_ILT(i));
6578 else {
6579 REG_WR(bp, PXP2_REG_RQ_SRC_FIRST_ILT, i);
6580 REG_WR(bp, PXP2_REG_RQ_SRC_LAST_ILT, i);
6581 }
6582
6583 /* tell the searcher where the T2 table is */
6584 REG_WR(bp, SRC_REG_COUNTFREE0 + port*4, 16*1024/64);
6585
6586 bnx2x_wb_wr(bp, SRC_REG_FIRSTFREE0 + port*16,
6587 U64_LO(bp->t2_mapping), U64_HI(bp->t2_mapping));
6588
6589 bnx2x_wb_wr(bp, SRC_REG_LASTFREE0 + port*16,
6590 U64_LO((u64)bp->t2_mapping + 16*1024 - 64),
6591 U64_HI((u64)bp->t2_mapping + 16*1024 - 64));
6592
6593 REG_WR(bp, SRC_REG_NUMBER_HASH_BITS0 + port*4, 10);
6594#endif
6512 6595
6513 if (CHIP_IS_E1H(bp)) { 6596 if (CHIP_IS_E1H(bp)) {
6514 bnx2x_init_block(bp, MISC_BLOCK, FUNC0_STAGE + func); 6597 bnx2x_init_block(bp, MISC_BLOCK, FUNC0_STAGE + func);
@@ -6593,6 +6676,9 @@ static int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
6593 bnx2x_zero_def_sb(bp); 6676 bnx2x_zero_def_sb(bp);
6594 for_each_queue(bp, i) 6677 for_each_queue(bp, i)
6595 bnx2x_zero_sb(bp, BP_L_ID(bp) + i); 6678 bnx2x_zero_sb(bp, BP_L_ID(bp) + i);
6679#ifdef BCM_CNIC
6680 bnx2x_zero_sb(bp, BP_L_ID(bp) + i);
6681#endif
6596 6682
6597init_hw_err: 6683init_hw_err:
6598 bnx2x_gunzip_end(bp); 6684 bnx2x_gunzip_end(bp);
@@ -6632,7 +6718,7 @@ static void bnx2x_free_mem(struct bnx2x *bp)
6632 sizeof(struct host_status_block)); 6718 sizeof(struct host_status_block));
6633 } 6719 }
6634 /* Rx */ 6720 /* Rx */
6635 for_each_rx_queue(bp, i) { 6721 for_each_queue(bp, i) {
6636 6722
6637 /* fastpath rx rings: rx_buf rx_desc rx_comp */ 6723 /* fastpath rx rings: rx_buf rx_desc rx_comp */
6638 BNX2X_FREE(bnx2x_fp(bp, i, rx_buf_ring)); 6724 BNX2X_FREE(bnx2x_fp(bp, i, rx_buf_ring));
@@ -6652,7 +6738,7 @@ static void bnx2x_free_mem(struct bnx2x *bp)
6652 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES); 6738 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
6653 } 6739 }
6654 /* Tx */ 6740 /* Tx */
6655 for_each_tx_queue(bp, i) { 6741 for_each_queue(bp, i) {
6656 6742
6657 /* fastpath tx rings: tx_buf tx_desc */ 6743 /* fastpath tx rings: tx_buf tx_desc */
6658 BNX2X_FREE(bnx2x_fp(bp, i, tx_buf_ring)); 6744 BNX2X_FREE(bnx2x_fp(bp, i, tx_buf_ring));
@@ -6668,11 +6754,13 @@ static void bnx2x_free_mem(struct bnx2x *bp)
6668 BNX2X_PCI_FREE(bp->slowpath, bp->slowpath_mapping, 6754 BNX2X_PCI_FREE(bp->slowpath, bp->slowpath_mapping,
6669 sizeof(struct bnx2x_slowpath)); 6755 sizeof(struct bnx2x_slowpath));
6670 6756
6671#ifdef BCM_ISCSI 6757#ifdef BCM_CNIC
6672 BNX2X_PCI_FREE(bp->t1, bp->t1_mapping, 64*1024); 6758 BNX2X_PCI_FREE(bp->t1, bp->t1_mapping, 64*1024);
6673 BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, 16*1024); 6759 BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, 16*1024);
6674 BNX2X_PCI_FREE(bp->timers, bp->timers_mapping, 8*1024); 6760 BNX2X_PCI_FREE(bp->timers, bp->timers_mapping, 8*1024);
6675 BNX2X_PCI_FREE(bp->qm, bp->qm_mapping, 128*1024); 6761 BNX2X_PCI_FREE(bp->qm, bp->qm_mapping, 128*1024);
6762 BNX2X_PCI_FREE(bp->cnic_sb, bp->cnic_sb_mapping,
6763 sizeof(struct host_status_block));
6676#endif 6764#endif
6677 BNX2X_PCI_FREE(bp->spq, bp->spq_mapping, BCM_PAGE_SIZE); 6765 BNX2X_PCI_FREE(bp->spq, bp->spq_mapping, BCM_PAGE_SIZE);
6678 6766
@@ -6712,7 +6800,7 @@ static int bnx2x_alloc_mem(struct bnx2x *bp)
6712 sizeof(struct host_status_block)); 6800 sizeof(struct host_status_block));
6713 } 6801 }
6714 /* Rx */ 6802 /* Rx */
6715 for_each_rx_queue(bp, i) { 6803 for_each_queue(bp, i) {
6716 6804
6717 /* fastpath rx rings: rx_buf rx_desc rx_comp */ 6805 /* fastpath rx rings: rx_buf rx_desc rx_comp */
6718 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_buf_ring), 6806 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_buf_ring),
@@ -6734,7 +6822,7 @@ static int bnx2x_alloc_mem(struct bnx2x *bp)
6734 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES); 6822 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
6735 } 6823 }
6736 /* Tx */ 6824 /* Tx */
6737 for_each_tx_queue(bp, i) { 6825 for_each_queue(bp, i) {
6738 6826
6739 /* fastpath tx rings: tx_buf tx_desc */ 6827 /* fastpath tx rings: tx_buf tx_desc */
6740 BNX2X_ALLOC(bnx2x_fp(bp, i, tx_buf_ring), 6828 BNX2X_ALLOC(bnx2x_fp(bp, i, tx_buf_ring),
@@ -6751,32 +6839,26 @@ static int bnx2x_alloc_mem(struct bnx2x *bp)
6751 BNX2X_PCI_ALLOC(bp->slowpath, &bp->slowpath_mapping, 6839 BNX2X_PCI_ALLOC(bp->slowpath, &bp->slowpath_mapping,
6752 sizeof(struct bnx2x_slowpath)); 6840 sizeof(struct bnx2x_slowpath));
6753 6841
6754#ifdef BCM_ISCSI 6842#ifdef BCM_CNIC
6755 BNX2X_PCI_ALLOC(bp->t1, &bp->t1_mapping, 64*1024); 6843 BNX2X_PCI_ALLOC(bp->t1, &bp->t1_mapping, 64*1024);
6756 6844
6757 /* Initialize T1 */
6758 for (i = 0; i < 64*1024; i += 64) {
6759 *(u64 *)((char *)bp->t1 + i + 56) = 0x0UL;
6760 *(u64 *)((char *)bp->t1 + i + 3) = 0x0UL;
6761 }
6762
6763 /* allocate searcher T2 table 6845 /* allocate searcher T2 table
6764 we allocate 1/4 of alloc num for T2 6846 we allocate 1/4 of alloc num for T2
6765 (which is not entered into the ILT) */ 6847 (which is not entered into the ILT) */
6766 BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, 16*1024); 6848 BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, 16*1024);
6767 6849
6768 /* Initialize T2 */ 6850 /* Initialize T2 (for 1024 connections) */
6769 for (i = 0; i < 16*1024; i += 64) 6851 for (i = 0; i < 16*1024; i += 64)
6770 * (u64 *)((char *)bp->t2 + i + 56) = bp->t2_mapping + i + 64; 6852 *(u64 *)((char *)bp->t2 + i + 56) = bp->t2_mapping + i + 64;
6771 6853
6772 /* now fixup the last line in the block to point to the next block */ 6854 /* Timer block array (8*MAX_CONN) phys uncached for now 1024 conns */
6773 *(u64 *)((char *)bp->t2 + 1024*16-8) = bp->t2_mapping;
6774
6775 /* Timer block array (MAX_CONN*8) phys uncached for now 1024 conns */
6776 BNX2X_PCI_ALLOC(bp->timers, &bp->timers_mapping, 8*1024); 6855 BNX2X_PCI_ALLOC(bp->timers, &bp->timers_mapping, 8*1024);
6777 6856
6778 /* QM queues (128*MAX_CONN) */ 6857 /* QM queues (128*MAX_CONN) */
6779 BNX2X_PCI_ALLOC(bp->qm, &bp->qm_mapping, 128*1024); 6858 BNX2X_PCI_ALLOC(bp->qm, &bp->qm_mapping, 128*1024);
6859
6860 BNX2X_PCI_ALLOC(bp->cnic_sb, &bp->cnic_sb_mapping,
6861 sizeof(struct host_status_block));
6780#endif 6862#endif
6781 6863
6782 /* Slow path ring */ 6864 /* Slow path ring */
@@ -6796,7 +6878,7 @@ static void bnx2x_free_tx_skbs(struct bnx2x *bp)
6796{ 6878{
6797 int i; 6879 int i;
6798 6880
6799 for_each_tx_queue(bp, i) { 6881 for_each_queue(bp, i) {
6800 struct bnx2x_fastpath *fp = &bp->fp[i]; 6882 struct bnx2x_fastpath *fp = &bp->fp[i];
6801 6883
6802 u16 bd_cons = fp->tx_bd_cons; 6884 u16 bd_cons = fp->tx_bd_cons;
@@ -6814,7 +6896,7 @@ static void bnx2x_free_rx_skbs(struct bnx2x *bp)
6814{ 6896{
6815 int i, j; 6897 int i, j;
6816 6898
6817 for_each_rx_queue(bp, j) { 6899 for_each_queue(bp, j) {
6818 struct bnx2x_fastpath *fp = &bp->fp[j]; 6900 struct bnx2x_fastpath *fp = &bp->fp[j];
6819 6901
6820 for (i = 0; i < NUM_RX_BD; i++) { 6902 for (i = 0; i < NUM_RX_BD; i++) {
@@ -6852,6 +6934,9 @@ static void bnx2x_free_msix_irqs(struct bnx2x *bp)
6852 DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n", 6934 DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
6853 bp->msix_table[0].vector); 6935 bp->msix_table[0].vector);
6854 6936
6937#ifdef BCM_CNIC
6938 offset++;
6939#endif
6855 for_each_queue(bp, i) { 6940 for_each_queue(bp, i) {
6856 DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq " 6941 DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq "
6857 "state %x\n", i, bp->msix_table[i + offset].vector, 6942 "state %x\n", i, bp->msix_table[i + offset].vector,
@@ -6861,19 +6946,21 @@ static void bnx2x_free_msix_irqs(struct bnx2x *bp)
6861 } 6946 }
6862} 6947}
6863 6948
6864static void bnx2x_free_irq(struct bnx2x *bp) 6949static void bnx2x_free_irq(struct bnx2x *bp, bool disable_only)
6865{ 6950{
6866 if (bp->flags & USING_MSIX_FLAG) { 6951 if (bp->flags & USING_MSIX_FLAG) {
6867 bnx2x_free_msix_irqs(bp); 6952 if (!disable_only)
6953 bnx2x_free_msix_irqs(bp);
6868 pci_disable_msix(bp->pdev); 6954 pci_disable_msix(bp->pdev);
6869 bp->flags &= ~USING_MSIX_FLAG; 6955 bp->flags &= ~USING_MSIX_FLAG;
6870 6956
6871 } else if (bp->flags & USING_MSI_FLAG) { 6957 } else if (bp->flags & USING_MSI_FLAG) {
6872 free_irq(bp->pdev->irq, bp->dev); 6958 if (!disable_only)
6959 free_irq(bp->pdev->irq, bp->dev);
6873 pci_disable_msi(bp->pdev); 6960 pci_disable_msi(bp->pdev);
6874 bp->flags &= ~USING_MSI_FLAG; 6961 bp->flags &= ~USING_MSI_FLAG;
6875 6962
6876 } else 6963 } else if (!disable_only)
6877 free_irq(bp->pdev->irq, bp->dev); 6964 free_irq(bp->pdev->irq, bp->dev);
6878} 6965}
6879 6966
@@ -6885,6 +6972,12 @@ static int bnx2x_enable_msix(struct bnx2x *bp)
6885 bp->msix_table[0].entry = igu_vec; 6972 bp->msix_table[0].entry = igu_vec;
6886 DP(NETIF_MSG_IFUP, "msix_table[0].entry = %d (slowpath)\n", igu_vec); 6973 DP(NETIF_MSG_IFUP, "msix_table[0].entry = %d (slowpath)\n", igu_vec);
6887 6974
6975#ifdef BCM_CNIC
6976 igu_vec = BP_L_ID(bp) + offset;
6977 bp->msix_table[1].entry = igu_vec;
6978 DP(NETIF_MSG_IFUP, "msix_table[1].entry = %d (CNIC)\n", igu_vec);
6979 offset++;
6980#endif
6888 for_each_queue(bp, i) { 6981 for_each_queue(bp, i) {
6889 igu_vec = BP_L_ID(bp) + offset + i; 6982 igu_vec = BP_L_ID(bp) + offset + i;
6890 bp->msix_table[i + offset].entry = igu_vec; 6983 bp->msix_table[i + offset].entry = igu_vec;
@@ -6915,14 +7008,13 @@ static int bnx2x_req_msix_irqs(struct bnx2x *bp)
6915 return -EBUSY; 7008 return -EBUSY;
6916 } 7009 }
6917 7010
7011#ifdef BCM_CNIC
7012 offset++;
7013#endif
6918 for_each_queue(bp, i) { 7014 for_each_queue(bp, i) {
6919 struct bnx2x_fastpath *fp = &bp->fp[i]; 7015 struct bnx2x_fastpath *fp = &bp->fp[i];
6920 7016 snprintf(fp->name, sizeof(fp->name), "%s-fp-%d",
6921 if (i < bp->num_rx_queues) 7017 bp->dev->name, i);
6922 sprintf(fp->name, "%s-rx-%d", bp->dev->name, i);
6923 else
6924 sprintf(fp->name, "%s-tx-%d",
6925 bp->dev->name, i - bp->num_rx_queues);
6926 7018
6927 rc = request_irq(bp->msix_table[i + offset].vector, 7019 rc = request_irq(bp->msix_table[i + offset].vector,
6928 bnx2x_msix_fp_int, 0, fp->name, fp); 7020 bnx2x_msix_fp_int, 0, fp->name, fp);
@@ -6936,11 +7028,10 @@ static int bnx2x_req_msix_irqs(struct bnx2x *bp)
6936 } 7028 }
6937 7029
6938 i = BNX2X_NUM_QUEUES(bp); 7030 i = BNX2X_NUM_QUEUES(bp);
6939 printk(KERN_INFO PFX "%s: using MSI-X IRQs: sp %d fp[%d] %d" 7031 netdev_info(bp->dev, "using MSI-X IRQs: sp %d fp[%d] %d ... fp[%d] %d\n",
6940 " ... fp[%d] %d\n", 7032 bp->msix_table[0].vector,
6941 bp->dev->name, bp->msix_table[0].vector, 7033 0, bp->msix_table[offset].vector,
6942 0, bp->msix_table[offset].vector, 7034 i - 1, bp->msix_table[offset + i - 1].vector);
6943 i - 1, bp->msix_table[offset + i - 1].vector);
6944 7035
6945 return 0; 7036 return 0;
6946} 7037}
@@ -6981,7 +7072,7 @@ static void bnx2x_napi_enable(struct bnx2x *bp)
6981{ 7072{
6982 int i; 7073 int i;
6983 7074
6984 for_each_rx_queue(bp, i) 7075 for_each_queue(bp, i)
6985 napi_enable(&bnx2x_fp(bp, i, napi)); 7076 napi_enable(&bnx2x_fp(bp, i, napi));
6986} 7077}
6987 7078
@@ -6989,7 +7080,7 @@ static void bnx2x_napi_disable(struct bnx2x *bp)
6989{ 7080{
6990 int i; 7081 int i;
6991 7082
6992 for_each_rx_queue(bp, i) 7083 for_each_queue(bp, i)
6993 napi_disable(&bnx2x_fp(bp, i, napi)); 7084 napi_disable(&bnx2x_fp(bp, i, napi));
6994} 7085}
6995 7086
@@ -7015,14 +7106,25 @@ static void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
7015 bnx2x_int_disable_sync(bp, disable_hw); 7106 bnx2x_int_disable_sync(bp, disable_hw);
7016 bnx2x_napi_disable(bp); 7107 bnx2x_napi_disable(bp);
7017 netif_tx_disable(bp->dev); 7108 netif_tx_disable(bp->dev);
7018 bp->dev->trans_start = jiffies; /* prevent tx timeout */
7019} 7109}
7020 7110
7021/* 7111/*
7022 * Init service functions 7112 * Init service functions
7023 */ 7113 */
7024 7114
7025static void bnx2x_set_mac_addr_e1(struct bnx2x *bp, int set) 7115/**
7116 * Sets a MAC in a CAM for a few L2 Clients for E1 chip
7117 *
7118 * @param bp driver descriptor
7119 * @param set set or clear an entry (1 or 0)
7120 * @param mac pointer to a buffer containing a MAC
7121 * @param cl_bit_vec bit vector of clients to register a MAC for
7122 * @param cam_offset offset in a CAM to use
7123 * @param with_bcast set broadcast MAC as well
7124 */
7125static void bnx2x_set_mac_addr_e1_gen(struct bnx2x *bp, int set, u8 *mac,
7126 u32 cl_bit_vec, u8 cam_offset,
7127 u8 with_bcast)
7026{ 7128{
7027 struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config); 7129 struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
7028 int port = BP_PORT(bp); 7130 int port = BP_PORT(bp);
@@ -7031,25 +7133,25 @@ static void bnx2x_set_mac_addr_e1(struct bnx2x *bp, int set)
7031 * unicasts 0-31:port0 32-63:port1 7133 * unicasts 0-31:port0 32-63:port1
7032 * multicast 64-127:port0 128-191:port1 7134 * multicast 64-127:port0 128-191:port1
7033 */ 7135 */
7034 config->hdr.length = 2; 7136 config->hdr.length = 1 + (with_bcast ? 1 : 0);
7035 config->hdr.offset = port ? 32 : 0; 7137 config->hdr.offset = cam_offset;
7036 config->hdr.client_id = bp->fp->cl_id; 7138 config->hdr.client_id = 0xff;
7037 config->hdr.reserved1 = 0; 7139 config->hdr.reserved1 = 0;
7038 7140
7039 /* primary MAC */ 7141 /* primary MAC */
7040 config->config_table[0].cam_entry.msb_mac_addr = 7142 config->config_table[0].cam_entry.msb_mac_addr =
7041 swab16(*(u16 *)&bp->dev->dev_addr[0]); 7143 swab16(*(u16 *)&mac[0]);
7042 config->config_table[0].cam_entry.middle_mac_addr = 7144 config->config_table[0].cam_entry.middle_mac_addr =
7043 swab16(*(u16 *)&bp->dev->dev_addr[2]); 7145 swab16(*(u16 *)&mac[2]);
7044 config->config_table[0].cam_entry.lsb_mac_addr = 7146 config->config_table[0].cam_entry.lsb_mac_addr =
7045 swab16(*(u16 *)&bp->dev->dev_addr[4]); 7147 swab16(*(u16 *)&mac[4]);
7046 config->config_table[0].cam_entry.flags = cpu_to_le16(port); 7148 config->config_table[0].cam_entry.flags = cpu_to_le16(port);
7047 if (set) 7149 if (set)
7048 config->config_table[0].target_table_entry.flags = 0; 7150 config->config_table[0].target_table_entry.flags = 0;
7049 else 7151 else
7050 CAM_INVALIDATE(config->config_table[0]); 7152 CAM_INVALIDATE(config->config_table[0]);
7051 config->config_table[0].target_table_entry.clients_bit_vector = 7153 config->config_table[0].target_table_entry.clients_bit_vector =
7052 cpu_to_le32(1 << BP_L_ID(bp)); 7154 cpu_to_le32(cl_bit_vec);
7053 config->config_table[0].target_table_entry.vlan_id = 0; 7155 config->config_table[0].target_table_entry.vlan_id = 0;
7054 7156
7055 DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x)\n", 7157 DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x)\n",
@@ -7059,47 +7161,58 @@ static void bnx2x_set_mac_addr_e1(struct bnx2x *bp, int set)
7059 config->config_table[0].cam_entry.lsb_mac_addr); 7161 config->config_table[0].cam_entry.lsb_mac_addr);
7060 7162
7061 /* broadcast */ 7163 /* broadcast */
7062 config->config_table[1].cam_entry.msb_mac_addr = cpu_to_le16(0xffff); 7164 if (with_bcast) {
7063 config->config_table[1].cam_entry.middle_mac_addr = cpu_to_le16(0xffff); 7165 config->config_table[1].cam_entry.msb_mac_addr =
7064 config->config_table[1].cam_entry.lsb_mac_addr = cpu_to_le16(0xffff); 7166 cpu_to_le16(0xffff);
7065 config->config_table[1].cam_entry.flags = cpu_to_le16(port); 7167 config->config_table[1].cam_entry.middle_mac_addr =
7066 if (set) 7168 cpu_to_le16(0xffff);
7067 config->config_table[1].target_table_entry.flags = 7169 config->config_table[1].cam_entry.lsb_mac_addr =
7068 TSTORM_CAM_TARGET_TABLE_ENTRY_BROADCAST; 7170 cpu_to_le16(0xffff);
7069 else 7171 config->config_table[1].cam_entry.flags = cpu_to_le16(port);
7070 CAM_INVALIDATE(config->config_table[1]); 7172 if (set)
7071 config->config_table[1].target_table_entry.clients_bit_vector = 7173 config->config_table[1].target_table_entry.flags =
7072 cpu_to_le32(1 << BP_L_ID(bp)); 7174 TSTORM_CAM_TARGET_TABLE_ENTRY_BROADCAST;
7073 config->config_table[1].target_table_entry.vlan_id = 0; 7175 else
7176 CAM_INVALIDATE(config->config_table[1]);
7177 config->config_table[1].target_table_entry.clients_bit_vector =
7178 cpu_to_le32(cl_bit_vec);
7179 config->config_table[1].target_table_entry.vlan_id = 0;
7180 }
7074 7181
7075 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0, 7182 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
7076 U64_HI(bnx2x_sp_mapping(bp, mac_config)), 7183 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
7077 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0); 7184 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
7078} 7185}
7079 7186
7080static void bnx2x_set_mac_addr_e1h(struct bnx2x *bp, int set) 7187/**
7188 * Sets a MAC in a CAM for a few L2 Clients for E1H chip
7189 *
7190 * @param bp driver descriptor
7191 * @param set set or clear an entry (1 or 0)
7192 * @param mac pointer to a buffer containing a MAC
7193 * @param cl_bit_vec bit vector of clients to register a MAC for
7194 * @param cam_offset offset in a CAM to use
7195 */
7196static void bnx2x_set_mac_addr_e1h_gen(struct bnx2x *bp, int set, u8 *mac,
7197 u32 cl_bit_vec, u8 cam_offset)
7081{ 7198{
7082 struct mac_configuration_cmd_e1h *config = 7199 struct mac_configuration_cmd_e1h *config =
7083 (struct mac_configuration_cmd_e1h *)bnx2x_sp(bp, mac_config); 7200 (struct mac_configuration_cmd_e1h *)bnx2x_sp(bp, mac_config);
7084 7201
7085 /* CAM allocation for E1H
7086 * unicasts: by func number
7087 * multicast: 20+FUNC*20, 20 each
7088 */
7089 config->hdr.length = 1; 7202 config->hdr.length = 1;
7090 config->hdr.offset = BP_FUNC(bp); 7203 config->hdr.offset = cam_offset;
7091 config->hdr.client_id = bp->fp->cl_id; 7204 config->hdr.client_id = 0xff;
7092 config->hdr.reserved1 = 0; 7205 config->hdr.reserved1 = 0;
7093 7206
7094 /* primary MAC */ 7207 /* primary MAC */
7095 config->config_table[0].msb_mac_addr = 7208 config->config_table[0].msb_mac_addr =
7096 swab16(*(u16 *)&bp->dev->dev_addr[0]); 7209 swab16(*(u16 *)&mac[0]);
7097 config->config_table[0].middle_mac_addr = 7210 config->config_table[0].middle_mac_addr =
7098 swab16(*(u16 *)&bp->dev->dev_addr[2]); 7211 swab16(*(u16 *)&mac[2]);
7099 config->config_table[0].lsb_mac_addr = 7212 config->config_table[0].lsb_mac_addr =
7100 swab16(*(u16 *)&bp->dev->dev_addr[4]); 7213 swab16(*(u16 *)&mac[4]);
7101 config->config_table[0].clients_bit_vector = 7214 config->config_table[0].clients_bit_vector =
7102 cpu_to_le32(1 << BP_L_ID(bp)); 7215 cpu_to_le32(cl_bit_vec);
7103 config->config_table[0].vlan_id = 0; 7216 config->config_table[0].vlan_id = 0;
7104 config->config_table[0].e1hov_id = cpu_to_le16(bp->e1hov); 7217 config->config_table[0].e1hov_id = cpu_to_le16(bp->e1hov);
7105 if (set) 7218 if (set)
@@ -7108,11 +7221,11 @@ static void bnx2x_set_mac_addr_e1h(struct bnx2x *bp, int set)
7108 config->config_table[0].flags = 7221 config->config_table[0].flags =
7109 MAC_CONFIGURATION_ENTRY_E1H_ACTION_TYPE; 7222 MAC_CONFIGURATION_ENTRY_E1H_ACTION_TYPE;
7110 7223
7111 DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x) E1HOV %d CLID %d\n", 7224 DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x) E1HOV %d CLID mask %d\n",
7112 (set ? "setting" : "clearing"), 7225 (set ? "setting" : "clearing"),
7113 config->config_table[0].msb_mac_addr, 7226 config->config_table[0].msb_mac_addr,
7114 config->config_table[0].middle_mac_addr, 7227 config->config_table[0].middle_mac_addr,
7115 config->config_table[0].lsb_mac_addr, bp->e1hov, BP_L_ID(bp)); 7228 config->config_table[0].lsb_mac_addr, bp->e1hov, cl_bit_vec);
7116 7229
7117 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0, 7230 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
7118 U64_HI(bnx2x_sp_mapping(bp, mac_config)), 7231 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
@@ -7164,6 +7277,69 @@ static int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
7164 return -EBUSY; 7277 return -EBUSY;
7165} 7278}
7166 7279
7280static void bnx2x_set_eth_mac_addr_e1h(struct bnx2x *bp, int set)
7281{
7282 bp->set_mac_pending++;
7283 smp_wmb();
7284
7285 bnx2x_set_mac_addr_e1h_gen(bp, set, bp->dev->dev_addr,
7286 (1 << bp->fp->cl_id), BP_FUNC(bp));
7287
7288 /* Wait for a completion */
7289 bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending, set ? 0 : 1);
7290}
7291
7292static void bnx2x_set_eth_mac_addr_e1(struct bnx2x *bp, int set)
7293{
7294 bp->set_mac_pending++;
7295 smp_wmb();
7296
7297 bnx2x_set_mac_addr_e1_gen(bp, set, bp->dev->dev_addr,
7298 (1 << bp->fp->cl_id), (BP_PORT(bp) ? 32 : 0),
7299 1);
7300
7301 /* Wait for a completion */
7302 bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending, set ? 0 : 1);
7303}
7304
7305#ifdef BCM_CNIC
7306/**
7307 * Set iSCSI MAC(s) at the next enties in the CAM after the ETH
7308 * MAC(s). This function will wait until the ramdord completion
7309 * returns.
7310 *
7311 * @param bp driver handle
7312 * @param set set or clear the CAM entry
7313 *
7314 * @return 0 if cussess, -ENODEV if ramrod doesn't return.
7315 */
7316static int bnx2x_set_iscsi_eth_mac_addr(struct bnx2x *bp, int set)
7317{
7318 u32 cl_bit_vec = (1 << BCM_ISCSI_ETH_CL_ID);
7319
7320 bp->set_mac_pending++;
7321 smp_wmb();
7322
7323 /* Send a SET_MAC ramrod */
7324 if (CHIP_IS_E1(bp))
7325 bnx2x_set_mac_addr_e1_gen(bp, set, bp->iscsi_mac,
7326 cl_bit_vec, (BP_PORT(bp) ? 32 : 0) + 2,
7327 1);
7328 else
7329 /* CAM allocation for E1H
7330 * unicasts: by func number
7331 * multicast: 20+FUNC*20, 20 each
7332 */
7333 bnx2x_set_mac_addr_e1h_gen(bp, set, bp->iscsi_mac,
7334 cl_bit_vec, E1H_FUNC_MAX + BP_FUNC(bp));
7335
7336 /* Wait for a completion when setting */
7337 bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending, set ? 0 : 1);
7338
7339 return 0;
7340}
7341#endif
7342
7167static int bnx2x_setup_leading(struct bnx2x *bp) 7343static int bnx2x_setup_leading(struct bnx2x *bp)
7168{ 7344{
7169 int rc; 7345 int rc;
@@ -7199,96 +7375,67 @@ static int bnx2x_setup_multi(struct bnx2x *bp, int index)
7199 7375
7200static int bnx2x_poll(struct napi_struct *napi, int budget); 7376static int bnx2x_poll(struct napi_struct *napi, int budget);
7201 7377
7202static void bnx2x_set_int_mode_msix(struct bnx2x *bp, int *num_rx_queues_out, 7378static void bnx2x_set_num_queues_msix(struct bnx2x *bp)
7203 int *num_tx_queues_out)
7204{ 7379{
7205 int _num_rx_queues = 0, _num_tx_queues = 0;
7206 7380
7207 switch (bp->multi_mode) { 7381 switch (bp->multi_mode) {
7208 case ETH_RSS_MODE_DISABLED: 7382 case ETH_RSS_MODE_DISABLED:
7209 _num_rx_queues = 1; 7383 bp->num_queues = 1;
7210 _num_tx_queues = 1;
7211 break; 7384 break;
7212 7385
7213 case ETH_RSS_MODE_REGULAR: 7386 case ETH_RSS_MODE_REGULAR:
7214 if (num_rx_queues) 7387 if (num_queues)
7215 _num_rx_queues = min_t(u32, num_rx_queues, 7388 bp->num_queues = min_t(u32, num_queues,
7216 BNX2X_MAX_QUEUES(bp)); 7389 BNX2X_MAX_QUEUES(bp));
7217 else
7218 _num_rx_queues = min_t(u32, num_online_cpus(),
7219 BNX2X_MAX_QUEUES(bp));
7220
7221 if (num_tx_queues)
7222 _num_tx_queues = min_t(u32, num_tx_queues,
7223 BNX2X_MAX_QUEUES(bp));
7224 else 7390 else
7225 _num_tx_queues = min_t(u32, num_online_cpus(), 7391 bp->num_queues = min_t(u32, num_online_cpus(),
7226 BNX2X_MAX_QUEUES(bp)); 7392 BNX2X_MAX_QUEUES(bp));
7227
7228 /* There must be not more Tx queues than Rx queues */
7229 if (_num_tx_queues > _num_rx_queues) {
7230 BNX2X_ERR("number of tx queues (%d) > "
7231 "number of rx queues (%d)"
7232 " defaulting to %d\n",
7233 _num_tx_queues, _num_rx_queues,
7234 _num_rx_queues);
7235 _num_tx_queues = _num_rx_queues;
7236 }
7237 break; 7393 break;
7238 7394
7239 7395
7240 default: 7396 default:
7241 _num_rx_queues = 1; 7397 bp->num_queues = 1;
7242 _num_tx_queues = 1;
7243 break; 7398 break;
7244 } 7399 }
7245
7246 *num_rx_queues_out = _num_rx_queues;
7247 *num_tx_queues_out = _num_tx_queues;
7248} 7400}
7249 7401
7250static int bnx2x_set_int_mode(struct bnx2x *bp) 7402static int bnx2x_set_num_queues(struct bnx2x *bp)
7251{ 7403{
7252 int rc = 0; 7404 int rc = 0;
7253 7405
7254 switch (int_mode) { 7406 switch (int_mode) {
7255 case INT_MODE_INTx: 7407 case INT_MODE_INTx:
7256 case INT_MODE_MSI: 7408 case INT_MODE_MSI:
7257 bp->num_rx_queues = 1; 7409 bp->num_queues = 1;
7258 bp->num_tx_queues = 1;
7259 DP(NETIF_MSG_IFUP, "set number of queues to 1\n"); 7410 DP(NETIF_MSG_IFUP, "set number of queues to 1\n");
7260 break; 7411 break;
7261 7412
7262 case INT_MODE_MSIX: 7413 case INT_MODE_MSIX:
7263 default: 7414 default:
7264 /* Set interrupt mode according to bp->multi_mode value */ 7415 /* Set number of queues according to bp->multi_mode value */
7265 bnx2x_set_int_mode_msix(bp, &bp->num_rx_queues, 7416 bnx2x_set_num_queues_msix(bp);
7266 &bp->num_tx_queues);
7267 7417
7268 DP(NETIF_MSG_IFUP, "set number of queues to: rx %d tx %d\n", 7418 DP(NETIF_MSG_IFUP, "set number of queues to %d\n",
7269 bp->num_rx_queues, bp->num_tx_queues); 7419 bp->num_queues);
7270 7420
7271 /* if we can't use MSI-X we only need one fp, 7421 /* if we can't use MSI-X we only need one fp,
7272 * so try to enable MSI-X with the requested number of fp's 7422 * so try to enable MSI-X with the requested number of fp's
7273 * and fallback to MSI or legacy INTx with one fp 7423 * and fallback to MSI or legacy INTx with one fp
7274 */ 7424 */
7275 rc = bnx2x_enable_msix(bp); 7425 rc = bnx2x_enable_msix(bp);
7276 if (rc) { 7426 if (rc)
7277 /* failed to enable MSI-X */ 7427 /* failed to enable MSI-X */
7278 if (bp->multi_mode) 7428 bp->num_queues = 1;
7279 BNX2X_ERR("Multi requested but failed to "
7280 "enable MSI-X (rx %d tx %d), "
7281 "set number of queues to 1\n",
7282 bp->num_rx_queues, bp->num_tx_queues);
7283 bp->num_rx_queues = 1;
7284 bp->num_tx_queues = 1;
7285 }
7286 break; 7429 break;
7287 } 7430 }
7288 bp->dev->real_num_tx_queues = bp->num_tx_queues; 7431 bp->dev->real_num_tx_queues = bp->num_queues;
7289 return rc; 7432 return rc;
7290} 7433}
7291 7434
7435#ifdef BCM_CNIC
7436static int bnx2x_cnic_notify(struct bnx2x *bp, int cmd);
7437static void bnx2x_setup_cnic_irq_info(struct bnx2x *bp);
7438#endif
7292 7439
7293/* must be called with rtnl_lock */ 7440/* must be called with rtnl_lock */
7294static int bnx2x_nic_load(struct bnx2x *bp, int load_mode) 7441static int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
@@ -7303,16 +7450,18 @@ static int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
7303 7450
7304 bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD; 7451 bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
7305 7452
7306 rc = bnx2x_set_int_mode(bp); 7453 rc = bnx2x_set_num_queues(bp);
7307 7454
7308 if (bnx2x_alloc_mem(bp)) 7455 if (bnx2x_alloc_mem(bp)) {
7456 bnx2x_free_irq(bp, true);
7309 return -ENOMEM; 7457 return -ENOMEM;
7458 }
7310 7459
7311 for_each_rx_queue(bp, i) 7460 for_each_queue(bp, i)
7312 bnx2x_fp(bp, i, disable_tpa) = 7461 bnx2x_fp(bp, i, disable_tpa) =
7313 ((bp->flags & TPA_ENABLE_FLAG) == 0); 7462 ((bp->flags & TPA_ENABLE_FLAG) == 0);
7314 7463
7315 for_each_rx_queue(bp, i) 7464 for_each_queue(bp, i)
7316 netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi), 7465 netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
7317 bnx2x_poll, 128); 7466 bnx2x_poll, 128);
7318 7467
@@ -7321,26 +7470,25 @@ static int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
7321 if (bp->flags & USING_MSIX_FLAG) { 7470 if (bp->flags & USING_MSIX_FLAG) {
7322 rc = bnx2x_req_msix_irqs(bp); 7471 rc = bnx2x_req_msix_irqs(bp);
7323 if (rc) { 7472 if (rc) {
7324 pci_disable_msix(bp->pdev); 7473 bnx2x_free_irq(bp, true);
7325 goto load_error1; 7474 goto load_error1;
7326 } 7475 }
7327 } else { 7476 } else {
7328 /* Fall to INTx if failed to enable MSI-X due to lack of 7477 /* Fall to INTx if failed to enable MSI-X due to lack of
7329 memory (in bnx2x_set_int_mode()) */ 7478 memory (in bnx2x_set_num_queues()) */
7330 if ((rc != -ENOMEM) && (int_mode != INT_MODE_INTx)) 7479 if ((rc != -ENOMEM) && (int_mode != INT_MODE_INTx))
7331 bnx2x_enable_msi(bp); 7480 bnx2x_enable_msi(bp);
7332 bnx2x_ack_int(bp); 7481 bnx2x_ack_int(bp);
7333 rc = bnx2x_req_irq(bp); 7482 rc = bnx2x_req_irq(bp);
7334 if (rc) { 7483 if (rc) {
7335 BNX2X_ERR("IRQ request failed rc %d, aborting\n", rc); 7484 BNX2X_ERR("IRQ request failed rc %d, aborting\n", rc);
7336 if (bp->flags & USING_MSI_FLAG) 7485 bnx2x_free_irq(bp, true);
7337 pci_disable_msi(bp->pdev);
7338 goto load_error1; 7486 goto load_error1;
7339 } 7487 }
7340 if (bp->flags & USING_MSI_FLAG) { 7488 if (bp->flags & USING_MSI_FLAG) {
7341 bp->dev->irq = bp->pdev->irq; 7489 bp->dev->irq = bp->pdev->irq;
7342 printk(KERN_INFO PFX "%s: using MSI IRQ %d\n", 7490 netdev_info(bp->dev, "using MSI IRQ %d\n",
7343 bp->dev->name, bp->pdev->irq); 7491 bp->pdev->irq);
7344 } 7492 }
7345 } 7493 }
7346 7494
@@ -7389,6 +7537,9 @@ static int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
7389 rc = bnx2x_init_hw(bp, load_code); 7537 rc = bnx2x_init_hw(bp, load_code);
7390 if (rc) { 7538 if (rc) {
7391 BNX2X_ERR("HW init failed, aborting\n"); 7539 BNX2X_ERR("HW init failed, aborting\n");
7540 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE);
7541 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP);
7542 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
7392 goto load_error2; 7543 goto load_error2;
7393 } 7544 }
7394 7545
@@ -7427,20 +7578,39 @@ static int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
7427 if (CHIP_IS_E1H(bp)) 7578 if (CHIP_IS_E1H(bp))
7428 if (bp->mf_config & FUNC_MF_CFG_FUNC_DISABLED) { 7579 if (bp->mf_config & FUNC_MF_CFG_FUNC_DISABLED) {
7429 DP(NETIF_MSG_IFUP, "mf_cfg function disabled\n"); 7580 DP(NETIF_MSG_IFUP, "mf_cfg function disabled\n");
7430 bp->state = BNX2X_STATE_DISABLED; 7581 bp->flags |= MF_FUNC_DIS;
7431 } 7582 }
7432 7583
7433 if (bp->state == BNX2X_STATE_OPEN) { 7584 if (bp->state == BNX2X_STATE_OPEN) {
7585#ifdef BCM_CNIC
7586 /* Enable Timer scan */
7587 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + BP_PORT(bp)*4, 1);
7588#endif
7434 for_each_nondefault_queue(bp, i) { 7589 for_each_nondefault_queue(bp, i) {
7435 rc = bnx2x_setup_multi(bp, i); 7590 rc = bnx2x_setup_multi(bp, i);
7436 if (rc) 7591 if (rc)
7592#ifdef BCM_CNIC
7593 goto load_error4;
7594#else
7437 goto load_error3; 7595 goto load_error3;
7596#endif
7438 } 7597 }
7439 7598
7440 if (CHIP_IS_E1(bp)) 7599 if (CHIP_IS_E1(bp))
7441 bnx2x_set_mac_addr_e1(bp, 1); 7600 bnx2x_set_eth_mac_addr_e1(bp, 1);
7442 else 7601 else
7443 bnx2x_set_mac_addr_e1h(bp, 1); 7602 bnx2x_set_eth_mac_addr_e1h(bp, 1);
7603#ifdef BCM_CNIC
7604 /* Set iSCSI L2 MAC */
7605 mutex_lock(&bp->cnic_mutex);
7606 if (bp->cnic_eth_dev.drv_state & CNIC_DRV_STATE_REGD) {
7607 bnx2x_set_iscsi_eth_mac_addr(bp, 1);
7608 bp->cnic_flags |= BNX2X_CNIC_FLAG_MAC_SET;
7609 bnx2x_init_sb(bp, bp->cnic_sb, bp->cnic_sb_mapping,
7610 CNIC_SB_ID(bp));
7611 }
7612 mutex_unlock(&bp->cnic_mutex);
7613#endif
7444 } 7614 }
7445 7615
7446 if (bp->port.pmf) 7616 if (bp->port.pmf)
@@ -7481,9 +7651,19 @@ static int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
7481 /* start the timer */ 7651 /* start the timer */
7482 mod_timer(&bp->timer, jiffies + bp->current_interval); 7652 mod_timer(&bp->timer, jiffies + bp->current_interval);
7483 7653
7654#ifdef BCM_CNIC
7655 bnx2x_setup_cnic_irq_info(bp);
7656 if (bp->state == BNX2X_STATE_OPEN)
7657 bnx2x_cnic_notify(bp, CNIC_CTL_START_CMD);
7658#endif
7484 7659
7485 return 0; 7660 return 0;
7486 7661
7662#ifdef BCM_CNIC
7663load_error4:
7664 /* Disable Timer scan */
7665 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + BP_PORT(bp)*4, 0);
7666#endif
7487load_error3: 7667load_error3:
7488 bnx2x_int_disable_sync(bp, 1); 7668 bnx2x_int_disable_sync(bp, 1);
7489 if (!BP_NOMCP(bp)) { 7669 if (!BP_NOMCP(bp)) {
@@ -7493,14 +7673,14 @@ load_error3:
7493 bp->port.pmf = 0; 7673 bp->port.pmf = 0;
7494 /* Free SKBs, SGEs, TPA pool and driver internals */ 7674 /* Free SKBs, SGEs, TPA pool and driver internals */
7495 bnx2x_free_skbs(bp); 7675 bnx2x_free_skbs(bp);
7496 for_each_rx_queue(bp, i) 7676 for_each_queue(bp, i)
7497 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE); 7677 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
7498load_error2: 7678load_error2:
7499 /* Release IRQs */ 7679 /* Release IRQs */
7500 bnx2x_free_irq(bp); 7680 bnx2x_free_irq(bp, false);
7501load_error1: 7681load_error1:
7502 bnx2x_napi_disable(bp); 7682 bnx2x_napi_disable(bp);
7503 for_each_rx_queue(bp, i) 7683 for_each_queue(bp, i)
7504 netif_napi_del(&bnx2x_fp(bp, i, napi)); 7684 netif_napi_del(&bnx2x_fp(bp, i, napi));
7505 bnx2x_free_mem(bp); 7685 bnx2x_free_mem(bp);
7506 7686
@@ -7591,6 +7771,19 @@ static void bnx2x_reset_func(struct bnx2x *bp)
7591 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0); 7771 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
7592 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0); 7772 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
7593 7773
7774#ifdef BCM_CNIC
7775 /* Disable Timer scan */
7776 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 0);
7777 /*
7778 * Wait for at least 10ms and up to 2 second for the timers scan to
7779 * complete
7780 */
7781 for (i = 0; i < 200; i++) {
7782 msleep(10);
7783 if (!REG_RD(bp, TM_REG_LIN0_SCAN_ON + port*4))
7784 break;
7785 }
7786#endif
7594 /* Clear ILT */ 7787 /* Clear ILT */
7595 base = FUNC_ILT_BASE(func); 7788 base = FUNC_ILT_BASE(func);
7596 for (i = base; i < base + ILT_PER_FUNC; i++) 7789 for (i = base; i < base + ILT_PER_FUNC; i++)
@@ -7657,6 +7850,9 @@ static int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
7657 u32 reset_code = 0; 7850 u32 reset_code = 0;
7658 int i, cnt, rc; 7851 int i, cnt, rc;
7659 7852
7853#ifdef BCM_CNIC
7854 bnx2x_cnic_notify(bp, CNIC_CTL_STOP_CMD);
7855#endif
7660 bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT; 7856 bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
7661 7857
7662 /* Set "drop all" */ 7858 /* Set "drop all" */
@@ -7672,10 +7868,10 @@ static int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
7672 bnx2x_stats_handle(bp, STATS_EVENT_STOP); 7868 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
7673 7869
7674 /* Release IRQs */ 7870 /* Release IRQs */
7675 bnx2x_free_irq(bp); 7871 bnx2x_free_irq(bp, false);
7676 7872
7677 /* Wait until tx fastpath tasks complete */ 7873 /* Wait until tx fastpath tasks complete */
7678 for_each_tx_queue(bp, i) { 7874 for_each_queue(bp, i) {
7679 struct bnx2x_fastpath *fp = &bp->fp[i]; 7875 struct bnx2x_fastpath *fp = &bp->fp[i];
7680 7876
7681 cnt = 1000; 7877 cnt = 1000;
@@ -7703,7 +7899,7 @@ static int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
7703 struct mac_configuration_cmd *config = 7899 struct mac_configuration_cmd *config =
7704 bnx2x_sp(bp, mcast_config); 7900 bnx2x_sp(bp, mcast_config);
7705 7901
7706 bnx2x_set_mac_addr_e1(bp, 0); 7902 bnx2x_set_eth_mac_addr_e1(bp, 0);
7707 7903
7708 for (i = 0; i < config->hdr.length; i++) 7904 for (i = 0; i < config->hdr.length; i++)
7709 CAM_INVALIDATE(config->config_table[i]); 7905 CAM_INVALIDATE(config->config_table[i]);
@@ -7716,6 +7912,9 @@ static int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
7716 config->hdr.client_id = bp->fp->cl_id; 7912 config->hdr.client_id = bp->fp->cl_id;
7717 config->hdr.reserved1 = 0; 7913 config->hdr.reserved1 = 0;
7718 7914
7915 bp->set_mac_pending++;
7916 smp_wmb();
7917
7719 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0, 7918 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
7720 U64_HI(bnx2x_sp_mapping(bp, mcast_config)), 7919 U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
7721 U64_LO(bnx2x_sp_mapping(bp, mcast_config)), 0); 7920 U64_LO(bnx2x_sp_mapping(bp, mcast_config)), 0);
@@ -7723,13 +7922,22 @@ static int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
7723 } else { /* E1H */ 7922 } else { /* E1H */
7724 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0); 7923 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
7725 7924
7726 bnx2x_set_mac_addr_e1h(bp, 0); 7925 bnx2x_set_eth_mac_addr_e1h(bp, 0);
7727 7926
7728 for (i = 0; i < MC_HASH_SIZE; i++) 7927 for (i = 0; i < MC_HASH_SIZE; i++)
7729 REG_WR(bp, MC_HASH_OFFSET(bp, i), 0); 7928 REG_WR(bp, MC_HASH_OFFSET(bp, i), 0);
7730 7929
7731 REG_WR(bp, MISC_REG_E1HMF_MODE, 0); 7930 REG_WR(bp, MISC_REG_E1HMF_MODE, 0);
7732 } 7931 }
7932#ifdef BCM_CNIC
7933 /* Clear iSCSI L2 MAC */
7934 mutex_lock(&bp->cnic_mutex);
7935 if (bp->cnic_flags & BNX2X_CNIC_FLAG_MAC_SET) {
7936 bnx2x_set_iscsi_eth_mac_addr(bp, 0);
7937 bp->cnic_flags &= ~BNX2X_CNIC_FLAG_MAC_SET;
7938 }
7939 mutex_unlock(&bp->cnic_mutex);
7940#endif
7733 7941
7734 if (unload_mode == UNLOAD_NORMAL) 7942 if (unload_mode == UNLOAD_NORMAL)
7735 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS; 7943 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
@@ -7806,9 +8014,9 @@ unload_error:
7806 8014
7807 /* Free SKBs, SGEs, TPA pool and driver internals */ 8015 /* Free SKBs, SGEs, TPA pool and driver internals */
7808 bnx2x_free_skbs(bp); 8016 bnx2x_free_skbs(bp);
7809 for_each_rx_queue(bp, i) 8017 for_each_queue(bp, i)
7810 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE); 8018 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
7811 for_each_rx_queue(bp, i) 8019 for_each_queue(bp, i)
7812 netif_napi_del(&bnx2x_fp(bp, i, napi)); 8020 netif_napi_del(&bnx2x_fp(bp, i, napi));
7813 bnx2x_free_mem(bp); 8021 bnx2x_free_mem(bp);
7814 8022
@@ -8102,8 +8310,7 @@ static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
8102 val3 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[8]); 8310 val3 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[8]);
8103 val4 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[12]); 8311 val4 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[12]);
8104 8312
8105 printk(KERN_INFO PFX "part number %X-%X-%X-%X\n", 8313 pr_info("part number %X-%X-%X-%X\n", val, val2, val3, val4);
8106 val, val2, val3, val4);
8107} 8314}
8108 8315
8109static void __devinit bnx2x_link_settings_supported(struct bnx2x *bp, 8316static void __devinit bnx2x_link_settings_supported(struct bnx2x *bp,
@@ -8506,6 +8713,14 @@ static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp)
8506 bp->link_params.req_flow_ctrl, bp->port.advertising); 8713 bp->link_params.req_flow_ctrl, bp->port.advertising);
8507} 8714}
8508 8715
8716static void __devinit bnx2x_set_mac_buf(u8 *mac_buf, u32 mac_lo, u16 mac_hi)
8717{
8718 mac_hi = cpu_to_be16(mac_hi);
8719 mac_lo = cpu_to_be32(mac_lo);
8720 memcpy(mac_buf, &mac_hi, sizeof(mac_hi));
8721 memcpy(mac_buf + sizeof(mac_hi), &mac_lo, sizeof(mac_lo));
8722}
8723
8509static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp) 8724static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp)
8510{ 8725{
8511 int port = BP_PORT(bp); 8726 int port = BP_PORT(bp);
@@ -8587,14 +8802,15 @@ static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp)
8587 8802
8588 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper); 8803 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper);
8589 val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower); 8804 val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower);
8590 bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff); 8805 bnx2x_set_mac_buf(bp->dev->dev_addr, val, val2);
8591 bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
8592 bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
8593 bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
8594 bp->dev->dev_addr[4] = (u8)(val >> 8 & 0xff);
8595 bp->dev->dev_addr[5] = (u8)(val & 0xff);
8596 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr, ETH_ALEN); 8806 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr, ETH_ALEN);
8597 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN); 8807 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
8808
8809#ifdef BCM_CNIC
8810 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].iscsi_mac_upper);
8811 val = SHMEM_RD(bp, dev_info.port_hw_config[port].iscsi_mac_lower);
8812 bnx2x_set_mac_buf(bp->iscsi_mac, val, val2);
8813#endif
8598} 8814}
8599 8815
8600static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp) 8816static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
@@ -8690,6 +8906,10 @@ static int __devinit bnx2x_init_bp(struct bnx2x *bp)
8690 smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */ 8906 smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
8691 8907
8692 mutex_init(&bp->port.phy_mutex); 8908 mutex_init(&bp->port.phy_mutex);
8909 mutex_init(&bp->fw_mb_mutex);
8910#ifdef BCM_CNIC
8911 mutex_init(&bp->cnic_mutex);
8912#endif
8693 8913
8694 INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task); 8914 INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task);
8695 INIT_WORK(&bp->reset_task, bnx2x_reset_task); 8915 INIT_WORK(&bp->reset_task, bnx2x_reset_task);
@@ -8701,17 +8921,15 @@ static int __devinit bnx2x_init_bp(struct bnx2x *bp)
8701 bnx2x_undi_unload(bp); 8921 bnx2x_undi_unload(bp);
8702 8922
8703 if (CHIP_REV_IS_FPGA(bp)) 8923 if (CHIP_REV_IS_FPGA(bp))
8704 printk(KERN_ERR PFX "FPGA detected\n"); 8924 pr_err("FPGA detected\n");
8705 8925
8706 if (BP_NOMCP(bp) && (func == 0)) 8926 if (BP_NOMCP(bp) && (func == 0))
8707 printk(KERN_ERR PFX 8927 pr_err("MCP disabled, must load devices in order!\n");
8708 "MCP disabled, must load devices in order!\n");
8709 8928
8710 /* Set multi queue mode */ 8929 /* Set multi queue mode */
8711 if ((multi_mode != ETH_RSS_MODE_DISABLED) && 8930 if ((multi_mode != ETH_RSS_MODE_DISABLED) &&
8712 ((int_mode == INT_MODE_INTx) || (int_mode == INT_MODE_MSI))) { 8931 ((int_mode == INT_MODE_INTx) || (int_mode == INT_MODE_MSI))) {
8713 printk(KERN_ERR PFX 8932 pr_err("Multi disabled since int_mode requested is not MSI-X\n");
8714 "Multi disabled since int_mode requested is not MSI-X\n");
8715 multi_mode = ETH_RSS_MODE_DISABLED; 8933 multi_mode = ETH_RSS_MODE_DISABLED;
8716 } 8934 }
8717 bp->multi_mode = multi_mode; 8935 bp->multi_mode = multi_mode;
@@ -8738,8 +8956,9 @@ static int __devinit bnx2x_init_bp(struct bnx2x *bp)
8738 8956
8739 bp->rx_csum = 1; 8957 bp->rx_csum = 1;
8740 8958
8741 bp->tx_ticks = 50; 8959 /* make sure that the numbers are in the right granularity */
8742 bp->rx_ticks = 25; 8960 bp->tx_ticks = (50 / (4 * BNX2X_BTR)) * (4 * BNX2X_BTR);
8961 bp->rx_ticks = (25 / (4 * BNX2X_BTR)) * (4 * BNX2X_BTR);
8743 8962
8744 timer_interval = (CHIP_REV_IS_SLOW(bp) ? 5*HZ : HZ); 8963 timer_interval = (CHIP_REV_IS_SLOW(bp) ? 5*HZ : HZ);
8745 bp->current_interval = (poll ? poll : timer_interval); 8964 bp->current_interval = (poll ? poll : timer_interval);
@@ -8765,20 +8984,23 @@ static int bnx2x_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
8765 cmd->supported = bp->port.supported; 8984 cmd->supported = bp->port.supported;
8766 cmd->advertising = bp->port.advertising; 8985 cmd->advertising = bp->port.advertising;
8767 8986
8768 if (netif_carrier_ok(dev)) { 8987 if ((bp->state == BNX2X_STATE_OPEN) &&
8988 !(bp->flags & MF_FUNC_DIS) &&
8989 (bp->link_vars.link_up)) {
8769 cmd->speed = bp->link_vars.line_speed; 8990 cmd->speed = bp->link_vars.line_speed;
8770 cmd->duplex = bp->link_vars.duplex; 8991 cmd->duplex = bp->link_vars.duplex;
8771 } else { 8992 if (IS_E1HMF(bp)) {
8772 cmd->speed = bp->link_params.req_line_speed; 8993 u16 vn_max_rate;
8773 cmd->duplex = bp->link_params.req_duplex;
8774 }
8775 if (IS_E1HMF(bp)) {
8776 u16 vn_max_rate;
8777 8994
8778 vn_max_rate = ((bp->mf_config & FUNC_MF_CFG_MAX_BW_MASK) >> 8995 vn_max_rate =
8996 ((bp->mf_config & FUNC_MF_CFG_MAX_BW_MASK) >>
8779 FUNC_MF_CFG_MAX_BW_SHIFT) * 100; 8997 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
8780 if (vn_max_rate < cmd->speed) 8998 if (vn_max_rate < cmd->speed)
8781 cmd->speed = vn_max_rate; 8999 cmd->speed = vn_max_rate;
9000 }
9001 } else {
9002 cmd->speed = -1;
9003 cmd->duplex = -1;
8782 } 9004 }
8783 9005
8784 if (bp->link_params.switch_cfg == SWITCH_CFG_10G) { 9006 if (bp->link_params.switch_cfg == SWITCH_CFG_10G) {
@@ -9133,7 +9355,7 @@ static u32 bnx2x_get_msglevel(struct net_device *dev)
9133{ 9355{
9134 struct bnx2x *bp = netdev_priv(dev); 9356 struct bnx2x *bp = netdev_priv(dev);
9135 9357
9136 return bp->msglevel; 9358 return bp->msg_enable;
9137} 9359}
9138 9360
9139static void bnx2x_set_msglevel(struct net_device *dev, u32 level) 9361static void bnx2x_set_msglevel(struct net_device *dev, u32 level)
@@ -9141,7 +9363,7 @@ static void bnx2x_set_msglevel(struct net_device *dev, u32 level)
9141 struct bnx2x *bp = netdev_priv(dev); 9363 struct bnx2x *bp = netdev_priv(dev);
9142 9364
9143 if (capable(CAP_NET_ADMIN)) 9365 if (capable(CAP_NET_ADMIN))
9144 bp->msglevel = level; 9366 bp->msg_enable = level;
9145} 9367}
9146 9368
9147static int bnx2x_nway_reset(struct net_device *dev) 9369static int bnx2x_nway_reset(struct net_device *dev)
@@ -9163,6 +9385,9 @@ static u32 bnx2x_get_link(struct net_device *dev)
9163{ 9385{
9164 struct bnx2x *bp = netdev_priv(dev); 9386 struct bnx2x *bp = netdev_priv(dev);
9165 9387
9388 if (bp->flags & MF_FUNC_DIS)
9389 return 0;
9390
9166 return bp->link_vars.link_up; 9391 return bp->link_vars.link_up;
9167} 9392}
9168 9393
@@ -9567,8 +9792,7 @@ static int bnx2x_set_eeprom(struct net_device *dev,
9567 9792
9568 } else if (eeprom->magic == 0x50485952) { 9793 } else if (eeprom->magic == 0x50485952) {
9569 /* 'PHYR' (0x50485952): re-init link after FW upgrade */ 9794 /* 'PHYR' (0x50485952): re-init link after FW upgrade */
9570 if ((bp->state == BNX2X_STATE_OPEN) || 9795 if (bp->state == BNX2X_STATE_OPEN) {
9571 (bp->state == BNX2X_STATE_DISABLED)) {
9572 bnx2x_acquire_phy_lock(bp); 9796 bnx2x_acquire_phy_lock(bp);
9573 rc |= bnx2x_link_reset(&bp->link_params, 9797 rc |= bnx2x_link_reset(&bp->link_params,
9574 &bp->link_vars, 1); 9798 &bp->link_vars, 1);
@@ -9748,12 +9972,14 @@ static int bnx2x_set_flags(struct net_device *dev, u32 data)
9748 9972
9749 /* TPA requires Rx CSUM offloading */ 9973 /* TPA requires Rx CSUM offloading */
9750 if ((data & ETH_FLAG_LRO) && bp->rx_csum) { 9974 if ((data & ETH_FLAG_LRO) && bp->rx_csum) {
9751 if (!(dev->features & NETIF_F_LRO)) { 9975 if (!disable_tpa) {
9752 dev->features |= NETIF_F_LRO; 9976 if (!(dev->features & NETIF_F_LRO)) {
9753 bp->flags |= TPA_ENABLE_FLAG; 9977 dev->features |= NETIF_F_LRO;
9754 changed = 1; 9978 bp->flags |= TPA_ENABLE_FLAG;
9755 } 9979 changed = 1;
9756 9980 }
9981 } else
9982 rc = -EINVAL;
9757 } else if (dev->features & NETIF_F_LRO) { 9983 } else if (dev->features & NETIF_F_LRO) {
9758 dev->features &= ~NETIF_F_LRO; 9984 dev->features &= ~NETIF_F_LRO;
9759 bp->flags &= ~TPA_ENABLE_FLAG; 9985 bp->flags &= ~TPA_ENABLE_FLAG;
@@ -9818,11 +10044,6 @@ static const struct {
9818 { "idle check (online)" } 10044 { "idle check (online)" }
9819}; 10045};
9820 10046
9821static int bnx2x_self_test_count(struct net_device *dev)
9822{
9823 return BNX2X_NUM_TESTS;
9824}
9825
9826static int bnx2x_test_registers(struct bnx2x *bp) 10047static int bnx2x_test_registers(struct bnx2x *bp)
9827{ 10048{
9828 int idx, i, rc = -ENODEV; 10049 int idx, i, rc = -ENODEV;
@@ -9990,7 +10211,7 @@ static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode, u8 link_up)
9990 struct sk_buff *skb; 10211 struct sk_buff *skb;
9991 unsigned char *packet; 10212 unsigned char *packet;
9992 struct bnx2x_fastpath *fp_rx = &bp->fp[0]; 10213 struct bnx2x_fastpath *fp_rx = &bp->fp[0];
9993 struct bnx2x_fastpath *fp_tx = &bp->fp[bp->num_rx_queues]; 10214 struct bnx2x_fastpath *fp_tx = &bp->fp[0];
9994 u16 tx_start_idx, tx_idx; 10215 u16 tx_start_idx, tx_idx;
9995 u16 rx_start_idx, rx_idx; 10216 u16 rx_start_idx, rx_idx;
9996 u16 pkt_prod, bd_prod; 10217 u16 pkt_prod, bd_prod;
@@ -10067,13 +10288,12 @@ static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode, u8 link_up)
10067 10288
10068 fp_tx->tx_db.data.prod += 2; 10289 fp_tx->tx_db.data.prod += 2;
10069 barrier(); 10290 barrier();
10070 DOORBELL(bp, fp_tx->index - bp->num_rx_queues, fp_tx->tx_db.raw); 10291 DOORBELL(bp, fp_tx->index, fp_tx->tx_db.raw);
10071 10292
10072 mmiowb(); 10293 mmiowb();
10073 10294
10074 num_pkts++; 10295 num_pkts++;
10075 fp_tx->tx_bd_prod += 2; /* start + pbd */ 10296 fp_tx->tx_bd_prod += 2; /* start + pbd */
10076 bp->dev->trans_start = jiffies;
10077 10297
10078 udelay(100); 10298 udelay(100);
10079 10299
@@ -10217,20 +10437,23 @@ static int bnx2x_test_intr(struct bnx2x *bp)
10217 10437
10218 config->hdr.length = 0; 10438 config->hdr.length = 0;
10219 if (CHIP_IS_E1(bp)) 10439 if (CHIP_IS_E1(bp))
10220 config->hdr.offset = (BP_PORT(bp) ? 32 : 0); 10440 /* use last unicast entries */
10441 config->hdr.offset = (BP_PORT(bp) ? 63 : 31);
10221 else 10442 else
10222 config->hdr.offset = BP_FUNC(bp); 10443 config->hdr.offset = BP_FUNC(bp);
10223 config->hdr.client_id = bp->fp->cl_id; 10444 config->hdr.client_id = bp->fp->cl_id;
10224 config->hdr.reserved1 = 0; 10445 config->hdr.reserved1 = 0;
10225 10446
10447 bp->set_mac_pending++;
10448 smp_wmb();
10226 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0, 10449 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
10227 U64_HI(bnx2x_sp_mapping(bp, mac_config)), 10450 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
10228 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0); 10451 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
10229 if (rc == 0) { 10452 if (rc == 0) {
10230 bp->set_mac_pending++;
10231 for (i = 0; i < 10; i++) { 10453 for (i = 0; i < 10; i++) {
10232 if (!bp->set_mac_pending) 10454 if (!bp->set_mac_pending)
10233 break; 10455 break;
10456 smp_rmb();
10234 msleep_interruptible(10); 10457 msleep_interruptible(10);
10235 } 10458 }
10236 if (i == 10) 10459 if (i == 10)
@@ -10264,7 +10487,7 @@ static void bnx2x_self_test(struct net_device *dev,
10264 /* disable input for TX port IF */ 10487 /* disable input for TX port IF */
10265 REG_WR(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4, 0); 10488 REG_WR(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4, 0);
10266 10489
10267 link_up = bp->link_vars.link_up; 10490 link_up = (bnx2x_link_test(bp) == 0);
10268 bnx2x_nic_unload(bp, UNLOAD_NORMAL); 10491 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
10269 bnx2x_nic_load(bp, LOAD_DIAG); 10492 bnx2x_nic_load(bp, LOAD_DIAG);
10270 /* wait until link state is restored */ 10493 /* wait until link state is restored */
@@ -10434,7 +10657,37 @@ static const struct {
10434 ((bnx2x_stats_arr[i].flags & STATS_FLAGS_BOTH) == STATS_FLAGS_PORT) 10657 ((bnx2x_stats_arr[i].flags & STATS_FLAGS_BOTH) == STATS_FLAGS_PORT)
10435#define IS_FUNC_STAT(i) (bnx2x_stats_arr[i].flags & STATS_FLAGS_FUNC) 10658#define IS_FUNC_STAT(i) (bnx2x_stats_arr[i].flags & STATS_FLAGS_FUNC)
10436#define IS_E1HMF_MODE_STAT(bp) \ 10659#define IS_E1HMF_MODE_STAT(bp) \
10437 (IS_E1HMF(bp) && !(bp->msglevel & BNX2X_MSG_STATS)) 10660 (IS_E1HMF(bp) && !(bp->msg_enable & BNX2X_MSG_STATS))
10661
10662static int bnx2x_get_sset_count(struct net_device *dev, int stringset)
10663{
10664 struct bnx2x *bp = netdev_priv(dev);
10665 int i, num_stats;
10666
10667 switch(stringset) {
10668 case ETH_SS_STATS:
10669 if (is_multi(bp)) {
10670 num_stats = BNX2X_NUM_Q_STATS * bp->num_queues;
10671 if (!IS_E1HMF_MODE_STAT(bp))
10672 num_stats += BNX2X_NUM_STATS;
10673 } else {
10674 if (IS_E1HMF_MODE_STAT(bp)) {
10675 num_stats = 0;
10676 for (i = 0; i < BNX2X_NUM_STATS; i++)
10677 if (IS_FUNC_STAT(i))
10678 num_stats++;
10679 } else
10680 num_stats = BNX2X_NUM_STATS;
10681 }
10682 return num_stats;
10683
10684 case ETH_SS_TEST:
10685 return BNX2X_NUM_TESTS;
10686
10687 default:
10688 return -EINVAL;
10689 }
10690}
10438 10691
10439static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf) 10692static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
10440{ 10693{
@@ -10445,7 +10698,7 @@ static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
10445 case ETH_SS_STATS: 10698 case ETH_SS_STATS:
10446 if (is_multi(bp)) { 10699 if (is_multi(bp)) {
10447 k = 0; 10700 k = 0;
10448 for_each_rx_queue(bp, i) { 10701 for_each_queue(bp, i) {
10449 for (j = 0; j < BNX2X_NUM_Q_STATS; j++) 10702 for (j = 0; j < BNX2X_NUM_Q_STATS; j++)
10450 sprintf(buf + (k + j)*ETH_GSTRING_LEN, 10703 sprintf(buf + (k + j)*ETH_GSTRING_LEN,
10451 bnx2x_q_stats_arr[j].string, i); 10704 bnx2x_q_stats_arr[j].string, i);
@@ -10473,28 +10726,6 @@ static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
10473 } 10726 }
10474} 10727}
10475 10728
10476static int bnx2x_get_stats_count(struct net_device *dev)
10477{
10478 struct bnx2x *bp = netdev_priv(dev);
10479 int i, num_stats;
10480
10481 if (is_multi(bp)) {
10482 num_stats = BNX2X_NUM_Q_STATS * bp->num_rx_queues;
10483 if (!IS_E1HMF_MODE_STAT(bp))
10484 num_stats += BNX2X_NUM_STATS;
10485 } else {
10486 if (IS_E1HMF_MODE_STAT(bp)) {
10487 num_stats = 0;
10488 for (i = 0; i < BNX2X_NUM_STATS; i++)
10489 if (IS_FUNC_STAT(i))
10490 num_stats++;
10491 } else
10492 num_stats = BNX2X_NUM_STATS;
10493 }
10494
10495 return num_stats;
10496}
10497
10498static void bnx2x_get_ethtool_stats(struct net_device *dev, 10729static void bnx2x_get_ethtool_stats(struct net_device *dev,
10499 struct ethtool_stats *stats, u64 *buf) 10730 struct ethtool_stats *stats, u64 *buf)
10500{ 10731{
@@ -10504,7 +10735,7 @@ static void bnx2x_get_ethtool_stats(struct net_device *dev,
10504 10735
10505 if (is_multi(bp)) { 10736 if (is_multi(bp)) {
10506 k = 0; 10737 k = 0;
10507 for_each_rx_queue(bp, i) { 10738 for_each_queue(bp, i) {
10508 hw_stats = (u32 *)&bp->fp[i].eth_q_stats; 10739 hw_stats = (u32 *)&bp->fp[i].eth_q_stats;
10509 for (j = 0; j < BNX2X_NUM_Q_STATS; j++) { 10740 for (j = 0; j < BNX2X_NUM_Q_STATS; j++) {
10510 if (bnx2x_q_stats_arr[j].size == 0) { 10741 if (bnx2x_q_stats_arr[j].size == 0) {
@@ -10570,7 +10801,6 @@ static void bnx2x_get_ethtool_stats(struct net_device *dev,
10570static int bnx2x_phys_id(struct net_device *dev, u32 data) 10801static int bnx2x_phys_id(struct net_device *dev, u32 data)
10571{ 10802{
10572 struct bnx2x *bp = netdev_priv(dev); 10803 struct bnx2x *bp = netdev_priv(dev);
10573 int port = BP_PORT(bp);
10574 int i; 10804 int i;
10575 10805
10576 if (!netif_running(dev)) 10806 if (!netif_running(dev))
@@ -10584,13 +10814,10 @@ static int bnx2x_phys_id(struct net_device *dev, u32 data)
10584 10814
10585 for (i = 0; i < (data * 2); i++) { 10815 for (i = 0; i < (data * 2); i++) {
10586 if ((i % 2) == 0) 10816 if ((i % 2) == 0)
10587 bnx2x_set_led(bp, port, LED_MODE_OPER, SPEED_1000, 10817 bnx2x_set_led(&bp->link_params, LED_MODE_OPER,
10588 bp->link_params.hw_led_mode, 10818 SPEED_1000);
10589 bp->link_params.chip_id);
10590 else 10819 else
10591 bnx2x_set_led(bp, port, LED_MODE_OFF, 0, 10820 bnx2x_set_led(&bp->link_params, LED_MODE_OFF, 0);
10592 bp->link_params.hw_led_mode,
10593 bp->link_params.chip_id);
10594 10821
10595 msleep_interruptible(500); 10822 msleep_interruptible(500);
10596 if (signal_pending(current)) 10823 if (signal_pending(current))
@@ -10598,10 +10825,8 @@ static int bnx2x_phys_id(struct net_device *dev, u32 data)
10598 } 10825 }
10599 10826
10600 if (bp->link_vars.link_up) 10827 if (bp->link_vars.link_up)
10601 bnx2x_set_led(bp, port, LED_MODE_OPER, 10828 bnx2x_set_led(&bp->link_params, LED_MODE_OPER,
10602 bp->link_vars.line_speed, 10829 bp->link_vars.line_speed);
10603 bp->link_params.hw_led_mode,
10604 bp->link_params.chip_id);
10605 10830
10606 return 0; 10831 return 0;
10607} 10832}
@@ -10637,11 +10862,10 @@ static const struct ethtool_ops bnx2x_ethtool_ops = {
10637 .set_sg = ethtool_op_set_sg, 10862 .set_sg = ethtool_op_set_sg,
10638 .get_tso = ethtool_op_get_tso, 10863 .get_tso = ethtool_op_get_tso,
10639 .set_tso = bnx2x_set_tso, 10864 .set_tso = bnx2x_set_tso,
10640 .self_test_count = bnx2x_self_test_count,
10641 .self_test = bnx2x_self_test, 10865 .self_test = bnx2x_self_test,
10866 .get_sset_count = bnx2x_get_sset_count,
10642 .get_strings = bnx2x_get_strings, 10867 .get_strings = bnx2x_get_strings,
10643 .phys_id = bnx2x_phys_id, 10868 .phys_id = bnx2x_phys_id,
10644 .get_stats_count = bnx2x_get_stats_count,
10645 .get_ethtool_stats = bnx2x_get_ethtool_stats, 10869 .get_ethtool_stats = bnx2x_get_ethtool_stats,
10646}; 10870};
10647 10871
@@ -10707,54 +10931,60 @@ static inline int bnx2x_has_rx_work(struct bnx2x_fastpath *fp)
10707 10931
10708static int bnx2x_poll(struct napi_struct *napi, int budget) 10932static int bnx2x_poll(struct napi_struct *napi, int budget)
10709{ 10933{
10934 int work_done = 0;
10710 struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath, 10935 struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
10711 napi); 10936 napi);
10712 struct bnx2x *bp = fp->bp; 10937 struct bnx2x *bp = fp->bp;
10713 int work_done = 0;
10714 10938
10939 while (1) {
10715#ifdef BNX2X_STOP_ON_ERROR 10940#ifdef BNX2X_STOP_ON_ERROR
10716 if (unlikely(bp->panic)) 10941 if (unlikely(bp->panic)) {
10717 goto poll_panic; 10942 napi_complete(napi);
10943 return 0;
10944 }
10718#endif 10945#endif
10719 10946
10720 prefetch(fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)].skb); 10947 if (bnx2x_has_tx_work(fp))
10721 prefetch((char *)(fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)].skb) + 256); 10948 bnx2x_tx_int(fp);
10722
10723 bnx2x_update_fpsb_idx(fp);
10724
10725 if (bnx2x_has_rx_work(fp)) {
10726 work_done = bnx2x_rx_int(fp, budget);
10727 10949
10728 /* must not complete if we consumed full budget */ 10950 if (bnx2x_has_rx_work(fp)) {
10729 if (work_done >= budget) 10951 work_done += bnx2x_rx_int(fp, budget - work_done);
10730 goto poll_again;
10731 }
10732 10952
10733 /* bnx2x_has_rx_work() reads the status block, thus we need to 10953 /* must not complete if we consumed full budget */
10734 * ensure that status block indices have been actually read 10954 if (work_done >= budget)
10735 * (bnx2x_update_fpsb_idx) prior to this check (bnx2x_has_rx_work) 10955 break;
10736 * so that we won't write the "newer" value of the status block to IGU 10956 }
10737 * (if there was a DMA right after bnx2x_has_rx_work and
10738 * if there is no rmb, the memory reading (bnx2x_update_fpsb_idx)
10739 * may be postponed to right before bnx2x_ack_sb). In this case
10740 * there will never be another interrupt until there is another update
10741 * of the status block, while there is still unhandled work.
10742 */
10743 rmb();
10744 10957
10745 if (!bnx2x_has_rx_work(fp)) { 10958 /* Fall out from the NAPI loop if needed */
10746#ifdef BNX2X_STOP_ON_ERROR 10959 if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
10747poll_panic: 10960 bnx2x_update_fpsb_idx(fp);
10748#endif 10961 /* bnx2x_has_rx_work() reads the status block, thus we need
10749 napi_complete(napi); 10962 * to ensure that status block indices have been actually read
10963 * (bnx2x_update_fpsb_idx) prior to this check
10964 * (bnx2x_has_rx_work) so that we won't write the "newer"
10965 * value of the status block to IGU (if there was a DMA right
10966 * after bnx2x_has_rx_work and if there is no rmb, the memory
10967 * reading (bnx2x_update_fpsb_idx) may be postponed to right
10968 * before bnx2x_ack_sb). In this case there will never be
10969 * another interrupt until there is another update of the
10970 * status block, while there is still unhandled work.
10971 */
10972 rmb();
10750 10973
10751 bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID, 10974 if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
10752 le16_to_cpu(fp->fp_u_idx), IGU_INT_NOP, 1); 10975 napi_complete(napi);
10753 bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID, 10976 /* Re-enable interrupts */
10754 le16_to_cpu(fp->fp_c_idx), IGU_INT_ENABLE, 1); 10977 bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID,
10978 le16_to_cpu(fp->fp_c_idx),
10979 IGU_INT_NOP, 1);
10980 bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID,
10981 le16_to_cpu(fp->fp_u_idx),
10982 IGU_INT_ENABLE, 1);
10983 break;
10984 }
10985 }
10755 } 10986 }
10756 10987
10757poll_again:
10758 return work_done; 10988 return work_done;
10759} 10989}
10760 10990
@@ -10843,10 +11073,10 @@ static inline u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
10843 } 11073 }
10844 11074
10845 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4) 11075 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
10846 rc |= XMIT_GSO_V4; 11076 rc |= (XMIT_GSO_V4 | XMIT_CSUM_V4 | XMIT_CSUM_TCP);
10847 11077
10848 else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) 11078 else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
10849 rc |= XMIT_GSO_V6; 11079 rc |= (XMIT_GSO_V6 | XMIT_CSUM_TCP | XMIT_CSUM_V6);
10850 11080
10851 return rc; 11081 return rc;
10852} 11082}
@@ -10939,7 +11169,7 @@ exit_lbl:
10939static netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev) 11169static netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
10940{ 11170{
10941 struct bnx2x *bp = netdev_priv(dev); 11171 struct bnx2x *bp = netdev_priv(dev);
10942 struct bnx2x_fastpath *fp, *fp_stat; 11172 struct bnx2x_fastpath *fp;
10943 struct netdev_queue *txq; 11173 struct netdev_queue *txq;
10944 struct sw_tx_bd *tx_buf; 11174 struct sw_tx_bd *tx_buf;
10945 struct eth_tx_start_bd *tx_start_bd; 11175 struct eth_tx_start_bd *tx_start_bd;
@@ -10961,11 +11191,10 @@ static netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
10961 fp_index = skb_get_queue_mapping(skb); 11191 fp_index = skb_get_queue_mapping(skb);
10962 txq = netdev_get_tx_queue(dev, fp_index); 11192 txq = netdev_get_tx_queue(dev, fp_index);
10963 11193
10964 fp = &bp->fp[fp_index + bp->num_rx_queues]; 11194 fp = &bp->fp[fp_index];
10965 fp_stat = &bp->fp[fp_index];
10966 11195
10967 if (unlikely(bnx2x_tx_avail(fp) < (skb_shinfo(skb)->nr_frags + 3))) { 11196 if (unlikely(bnx2x_tx_avail(fp) < (skb_shinfo(skb)->nr_frags + 3))) {
10968 fp_stat->eth_q_stats.driver_xoff++; 11197 fp->eth_q_stats.driver_xoff++;
10969 netif_tx_stop_queue(txq); 11198 netif_tx_stop_queue(txq);
10970 BNX2X_ERR("BUG! Tx ring full when queue awake!\n"); 11199 BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
10971 return NETDEV_TX_BUSY; 11200 return NETDEV_TX_BUSY;
@@ -11191,7 +11420,7 @@ static netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
11191 11420
11192 fp->tx_db.data.prod += nbd; 11421 fp->tx_db.data.prod += nbd;
11193 barrier(); 11422 barrier();
11194 DOORBELL(bp, fp->index - bp->num_rx_queues, fp->tx_db.raw); 11423 DOORBELL(bp, fp->index, fp->tx_db.raw);
11195 11424
11196 mmiowb(); 11425 mmiowb();
11197 11426
@@ -11199,14 +11428,17 @@ static netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
11199 11428
11200 if (unlikely(bnx2x_tx_avail(fp) < MAX_SKB_FRAGS + 3)) { 11429 if (unlikely(bnx2x_tx_avail(fp) < MAX_SKB_FRAGS + 3)) {
11201 netif_tx_stop_queue(txq); 11430 netif_tx_stop_queue(txq);
11202 /* We want bnx2x_tx_int to "see" the updated tx_bd_prod 11431
11203 if we put Tx into XOFF state. */ 11432 /* paired memory barrier is in bnx2x_tx_int(), we have to keep
11433 * ordering of set_bit() in netif_tx_stop_queue() and read of
11434 * fp->bd_tx_cons */
11204 smp_mb(); 11435 smp_mb();
11205 fp_stat->eth_q_stats.driver_xoff++; 11436
11437 fp->eth_q_stats.driver_xoff++;
11206 if (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3) 11438 if (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3)
11207 netif_tx_wake_queue(txq); 11439 netif_tx_wake_queue(txq);
11208 } 11440 }
11209 fp_stat->tx_pkt++; 11441 fp->tx_pkt++;
11210 11442
11211 return NETDEV_TX_OK; 11443 return NETDEV_TX_OK;
11212} 11444}
@@ -11255,7 +11487,8 @@ static void bnx2x_set_rx_mode(struct net_device *dev)
11255 rx_mode = BNX2X_RX_MODE_PROMISC; 11487 rx_mode = BNX2X_RX_MODE_PROMISC;
11256 11488
11257 else if ((dev->flags & IFF_ALLMULTI) || 11489 else if ((dev->flags & IFF_ALLMULTI) ||
11258 ((dev->mc_count > BNX2X_MAX_MULTICAST) && CHIP_IS_E1(bp))) 11490 ((netdev_mc_count(dev) > BNX2X_MAX_MULTICAST) &&
11491 CHIP_IS_E1(bp)))
11259 rx_mode = BNX2X_RX_MODE_ALLMULTI; 11492 rx_mode = BNX2X_RX_MODE_ALLMULTI;
11260 11493
11261 else { /* some multicasts */ 11494 else { /* some multicasts */
@@ -11265,10 +11498,8 @@ static void bnx2x_set_rx_mode(struct net_device *dev)
11265 struct mac_configuration_cmd *config = 11498 struct mac_configuration_cmd *config =
11266 bnx2x_sp(bp, mcast_config); 11499 bnx2x_sp(bp, mcast_config);
11267 11500
11268 for (i = 0, mclist = dev->mc_list; 11501 i = 0;
11269 mclist && (i < dev->mc_count); 11502 netdev_for_each_mc_addr(mclist, dev) {
11270 i++, mclist = mclist->next) {
11271
11272 config->config_table[i]. 11503 config->config_table[i].
11273 cam_entry.msb_mac_addr = 11504 cam_entry.msb_mac_addr =
11274 swab16(*(u16 *)&mclist->dmi_addr[0]); 11505 swab16(*(u16 *)&mclist->dmi_addr[0]);
@@ -11296,6 +11527,7 @@ static void bnx2x_set_rx_mode(struct net_device *dev)
11296 cam_entry.middle_mac_addr, 11527 cam_entry.middle_mac_addr,
11297 config->config_table[i]. 11528 config->config_table[i].
11298 cam_entry.lsb_mac_addr); 11529 cam_entry.lsb_mac_addr);
11530 i++;
11299 } 11531 }
11300 old = config->hdr.length; 11532 old = config->hdr.length;
11301 if (old > i) { 11533 if (old > i) {
@@ -11321,6 +11553,9 @@ static void bnx2x_set_rx_mode(struct net_device *dev)
11321 config->hdr.client_id = bp->fp->cl_id; 11553 config->hdr.client_id = bp->fp->cl_id;
11322 config->hdr.reserved1 = 0; 11554 config->hdr.reserved1 = 0;
11323 11555
11556 bp->set_mac_pending++;
11557 smp_wmb();
11558
11324 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0, 11559 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
11325 U64_HI(bnx2x_sp_mapping(bp, mcast_config)), 11560 U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
11326 U64_LO(bnx2x_sp_mapping(bp, mcast_config)), 11561 U64_LO(bnx2x_sp_mapping(bp, mcast_config)),
@@ -11334,10 +11569,7 @@ static void bnx2x_set_rx_mode(struct net_device *dev)
11334 11569
11335 memset(mc_filter, 0, 4 * MC_HASH_SIZE); 11570 memset(mc_filter, 0, 4 * MC_HASH_SIZE);
11336 11571
11337 for (i = 0, mclist = dev->mc_list; 11572 netdev_for_each_mc_addr(mclist, dev) {
11338 mclist && (i < dev->mc_count);
11339 i++, mclist = mclist->next) {
11340
11341 DP(NETIF_MSG_IFUP, "Adding mcast MAC: %pM\n", 11573 DP(NETIF_MSG_IFUP, "Adding mcast MAC: %pM\n",
11342 mclist->dmi_addr); 11574 mclist->dmi_addr);
11343 11575
@@ -11370,9 +11602,9 @@ static int bnx2x_change_mac_addr(struct net_device *dev, void *p)
11370 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); 11602 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
11371 if (netif_running(dev)) { 11603 if (netif_running(dev)) {
11372 if (CHIP_IS_E1(bp)) 11604 if (CHIP_IS_E1(bp))
11373 bnx2x_set_mac_addr_e1(bp, 1); 11605 bnx2x_set_eth_mac_addr_e1(bp, 1);
11374 else 11606 else
11375 bnx2x_set_mac_addr_e1h(bp, 1); 11607 bnx2x_set_eth_mac_addr_e1h(bp, 1);
11376 } 11608 }
11377 11609
11378 return 0; 11610 return 0;
@@ -11512,7 +11744,7 @@ static void bnx2x_vlan_rx_register(struct net_device *dev,
11512 11744
11513#endif 11745#endif
11514 11746
11515#if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER) 11747#ifdef CONFIG_NET_POLL_CONTROLLER
11516static void poll_bnx2x(struct net_device *dev) 11748static void poll_bnx2x(struct net_device *dev)
11517{ 11749{
11518 struct bnx2x *bp = netdev_priv(dev); 11750 struct bnx2x *bp = netdev_priv(dev);
@@ -11536,7 +11768,7 @@ static const struct net_device_ops bnx2x_netdev_ops = {
11536#ifdef BCM_VLAN 11768#ifdef BCM_VLAN
11537 .ndo_vlan_rx_register = bnx2x_vlan_rx_register, 11769 .ndo_vlan_rx_register = bnx2x_vlan_rx_register,
11538#endif 11770#endif
11539#if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER) 11771#ifdef CONFIG_NET_POLL_CONTROLLER
11540 .ndo_poll_controller = poll_bnx2x, 11772 .ndo_poll_controller = poll_bnx2x,
11541#endif 11773#endif
11542}; 11774};
@@ -11557,20 +11789,18 @@ static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
11557 11789
11558 rc = pci_enable_device(pdev); 11790 rc = pci_enable_device(pdev);
11559 if (rc) { 11791 if (rc) {
11560 printk(KERN_ERR PFX "Cannot enable PCI device, aborting\n"); 11792 pr_err("Cannot enable PCI device, aborting\n");
11561 goto err_out; 11793 goto err_out;
11562 } 11794 }
11563 11795
11564 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) { 11796 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
11565 printk(KERN_ERR PFX "Cannot find PCI device base address," 11797 pr_err("Cannot find PCI device base address, aborting\n");
11566 " aborting\n");
11567 rc = -ENODEV; 11798 rc = -ENODEV;
11568 goto err_out_disable; 11799 goto err_out_disable;
11569 } 11800 }
11570 11801
11571 if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) { 11802 if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
11572 printk(KERN_ERR PFX "Cannot find second PCI device" 11803 pr_err("Cannot find second PCI device base address, aborting\n");
11573 " base address, aborting\n");
11574 rc = -ENODEV; 11804 rc = -ENODEV;
11575 goto err_out_disable; 11805 goto err_out_disable;
11576 } 11806 }
@@ -11578,8 +11808,7 @@ static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
11578 if (atomic_read(&pdev->enable_cnt) == 1) { 11808 if (atomic_read(&pdev->enable_cnt) == 1) {
11579 rc = pci_request_regions(pdev, DRV_MODULE_NAME); 11809 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
11580 if (rc) { 11810 if (rc) {
11581 printk(KERN_ERR PFX "Cannot obtain PCI resources," 11811 pr_err("Cannot obtain PCI resources, aborting\n");
11582 " aborting\n");
11583 goto err_out_disable; 11812 goto err_out_disable;
11584 } 11813 }
11585 11814
@@ -11589,16 +11818,14 @@ static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
11589 11818
11590 bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM); 11819 bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
11591 if (bp->pm_cap == 0) { 11820 if (bp->pm_cap == 0) {
11592 printk(KERN_ERR PFX "Cannot find power management" 11821 pr_err("Cannot find power management capability, aborting\n");
11593 " capability, aborting\n");
11594 rc = -EIO; 11822 rc = -EIO;
11595 goto err_out_release; 11823 goto err_out_release;
11596 } 11824 }
11597 11825
11598 bp->pcie_cap = pci_find_capability(pdev, PCI_CAP_ID_EXP); 11826 bp->pcie_cap = pci_find_capability(pdev, PCI_CAP_ID_EXP);
11599 if (bp->pcie_cap == 0) { 11827 if (bp->pcie_cap == 0) {
11600 printk(KERN_ERR PFX "Cannot find PCI Express capability," 11828 pr_err("Cannot find PCI Express capability, aborting\n");
11601 " aborting\n");
11602 rc = -EIO; 11829 rc = -EIO;
11603 goto err_out_release; 11830 goto err_out_release;
11604 } 11831 }
@@ -11606,15 +11833,13 @@ static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
11606 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) == 0) { 11833 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) == 0) {
11607 bp->flags |= USING_DAC_FLAG; 11834 bp->flags |= USING_DAC_FLAG;
11608 if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)) != 0) { 11835 if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)) != 0) {
11609 printk(KERN_ERR PFX "pci_set_consistent_dma_mask" 11836 pr_err("pci_set_consistent_dma_mask failed, aborting\n");
11610 " failed, aborting\n");
11611 rc = -EIO; 11837 rc = -EIO;
11612 goto err_out_release; 11838 goto err_out_release;
11613 } 11839 }
11614 11840
11615 } else if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0) { 11841 } else if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0) {
11616 printk(KERN_ERR PFX "System does not support DMA," 11842 pr_err("System does not support DMA, aborting\n");
11617 " aborting\n");
11618 rc = -EIO; 11843 rc = -EIO;
11619 goto err_out_release; 11844 goto err_out_release;
11620 } 11845 }
@@ -11627,7 +11852,7 @@ static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
11627 11852
11628 bp->regview = pci_ioremap_bar(pdev, 0); 11853 bp->regview = pci_ioremap_bar(pdev, 0);
11629 if (!bp->regview) { 11854 if (!bp->regview) {
11630 printk(KERN_ERR PFX "Cannot map register space, aborting\n"); 11855 pr_err("Cannot map register space, aborting\n");
11631 rc = -ENOMEM; 11856 rc = -ENOMEM;
11632 goto err_out_release; 11857 goto err_out_release;
11633 } 11858 }
@@ -11636,7 +11861,7 @@ static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
11636 min_t(u64, BNX2X_DB_SIZE, 11861 min_t(u64, BNX2X_DB_SIZE,
11637 pci_resource_len(pdev, 2))); 11862 pci_resource_len(pdev, 2)));
11638 if (!bp->doorbells) { 11863 if (!bp->doorbells) {
11639 printk(KERN_ERR PFX "Cannot map doorbell space, aborting\n"); 11864 pr_err("Cannot map doorbell space, aborting\n");
11640 rc = -ENOMEM; 11865 rc = -ENOMEM;
11641 goto err_out_unmap; 11866 goto err_out_unmap;
11642 } 11867 }
@@ -11738,8 +11963,7 @@ static int __devinit bnx2x_check_firmware(struct bnx2x *bp)
11738 offset = be32_to_cpu(sections[i].offset); 11963 offset = be32_to_cpu(sections[i].offset);
11739 len = be32_to_cpu(sections[i].len); 11964 len = be32_to_cpu(sections[i].len);
11740 if (offset + len > firmware->size) { 11965 if (offset + len > firmware->size) {
11741 printk(KERN_ERR PFX "Section %d length is out of " 11966 pr_err("Section %d length is out of bounds\n", i);
11742 "bounds\n", i);
11743 return -EINVAL; 11967 return -EINVAL;
11744 } 11968 }
11745 } 11969 }
@@ -11751,8 +11975,7 @@ static int __devinit bnx2x_check_firmware(struct bnx2x *bp)
11751 11975
11752 for (i = 0; i < be32_to_cpu(fw_hdr->init_ops_offsets.len) / 2; i++) { 11976 for (i = 0; i < be32_to_cpu(fw_hdr->init_ops_offsets.len) / 2; i++) {
11753 if (be16_to_cpu(ops_offsets[i]) > num_ops) { 11977 if (be16_to_cpu(ops_offsets[i]) > num_ops) {
11754 printk(KERN_ERR PFX "Section offset %d is out of " 11978 pr_err("Section offset %d is out of bounds\n", i);
11755 "bounds\n", i);
11756 return -EINVAL; 11979 return -EINVAL;
11757 } 11980 }
11758 } 11981 }
@@ -11764,8 +11987,7 @@ static int __devinit bnx2x_check_firmware(struct bnx2x *bp)
11764 (fw_ver[1] != BCM_5710_FW_MINOR_VERSION) || 11987 (fw_ver[1] != BCM_5710_FW_MINOR_VERSION) ||
11765 (fw_ver[2] != BCM_5710_FW_REVISION_VERSION) || 11988 (fw_ver[2] != BCM_5710_FW_REVISION_VERSION) ||
11766 (fw_ver[3] != BCM_5710_FW_ENGINEERING_VERSION)) { 11989 (fw_ver[3] != BCM_5710_FW_ENGINEERING_VERSION)) {
11767 printk(KERN_ERR PFX "Bad FW version:%d.%d.%d.%d." 11990 pr_err("Bad FW version:%d.%d.%d.%d. Should be %d.%d.%d.%d\n",
11768 " Should be %d.%d.%d.%d\n",
11769 fw_ver[0], fw_ver[1], fw_ver[2], 11991 fw_ver[0], fw_ver[1], fw_ver[2],
11770 fw_ver[3], BCM_5710_FW_MAJOR_VERSION, 11992 fw_ver[3], BCM_5710_FW_MAJOR_VERSION,
11771 BCM_5710_FW_MINOR_VERSION, 11993 BCM_5710_FW_MINOR_VERSION,
@@ -11815,49 +12037,40 @@ static inline void be16_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
11815 target[i] = be16_to_cpu(source[i]); 12037 target[i] = be16_to_cpu(source[i]);
11816} 12038}
11817 12039
11818#define BNX2X_ALLOC_AND_SET(arr, lbl, func) \ 12040#define BNX2X_ALLOC_AND_SET(arr, lbl, func) \
11819 do { \ 12041do { \
11820 u32 len = be32_to_cpu(fw_hdr->arr.len); \ 12042 u32 len = be32_to_cpu(fw_hdr->arr.len); \
11821 bp->arr = kmalloc(len, GFP_KERNEL); \ 12043 bp->arr = kmalloc(len, GFP_KERNEL); \
11822 if (!bp->arr) { \ 12044 if (!bp->arr) { \
11823 printk(KERN_ERR PFX "Failed to allocate %d bytes " \ 12045 pr_err("Failed to allocate %d bytes for "#arr"\n", len); \
11824 "for "#arr"\n", len); \ 12046 goto lbl; \
11825 goto lbl; \ 12047 } \
11826 } \ 12048 func(bp->firmware->data + be32_to_cpu(fw_hdr->arr.offset), \
11827 func(bp->firmware->data + be32_to_cpu(fw_hdr->arr.offset), \ 12049 (u8 *)bp->arr, len); \
11828 (u8 *)bp->arr, len); \ 12050} while (0)
11829 } while (0)
11830 12051
11831static int __devinit bnx2x_init_firmware(struct bnx2x *bp, struct device *dev) 12052static int __devinit bnx2x_init_firmware(struct bnx2x *bp, struct device *dev)
11832{ 12053{
11833 char fw_file_name[40] = {0}; 12054 const char *fw_file_name;
11834 struct bnx2x_fw_file_hdr *fw_hdr; 12055 struct bnx2x_fw_file_hdr *fw_hdr;
11835 int rc, offset; 12056 int rc;
11836 12057
11837 /* Create a FW file name */
11838 if (CHIP_IS_E1(bp)) 12058 if (CHIP_IS_E1(bp))
11839 offset = sprintf(fw_file_name, FW_FILE_PREFIX_E1); 12059 fw_file_name = FW_FILE_NAME_E1;
11840 else 12060 else
11841 offset = sprintf(fw_file_name, FW_FILE_PREFIX_E1H); 12061 fw_file_name = FW_FILE_NAME_E1H;
11842 12062
11843 sprintf(fw_file_name + offset, "%d.%d.%d.%d.fw", 12063 pr_info("Loading %s\n", fw_file_name);
11844 BCM_5710_FW_MAJOR_VERSION,
11845 BCM_5710_FW_MINOR_VERSION,
11846 BCM_5710_FW_REVISION_VERSION,
11847 BCM_5710_FW_ENGINEERING_VERSION);
11848
11849 printk(KERN_INFO PFX "Loading %s\n", fw_file_name);
11850 12064
11851 rc = request_firmware(&bp->firmware, fw_file_name, dev); 12065 rc = request_firmware(&bp->firmware, fw_file_name, dev);
11852 if (rc) { 12066 if (rc) {
11853 printk(KERN_ERR PFX "Can't load firmware file %s\n", 12067 pr_err("Can't load firmware file %s\n", fw_file_name);
11854 fw_file_name);
11855 goto request_firmware_exit; 12068 goto request_firmware_exit;
11856 } 12069 }
11857 12070
11858 rc = bnx2x_check_firmware(bp); 12071 rc = bnx2x_check_firmware(bp);
11859 if (rc) { 12072 if (rc) {
11860 printk(KERN_ERR PFX "Corrupt firmware file %s\n", fw_file_name); 12073 pr_err("Corrupt firmware file %s\n", fw_file_name);
11861 goto request_firmware_exit; 12074 goto request_firmware_exit;
11862 } 12075 }
11863 12076
@@ -11916,12 +12129,12 @@ static int __devinit bnx2x_init_one(struct pci_dev *pdev,
11916 /* dev zeroed in init_etherdev */ 12129 /* dev zeroed in init_etherdev */
11917 dev = alloc_etherdev_mq(sizeof(*bp), MAX_CONTEXT); 12130 dev = alloc_etherdev_mq(sizeof(*bp), MAX_CONTEXT);
11918 if (!dev) { 12131 if (!dev) {
11919 printk(KERN_ERR PFX "Cannot allocate net device\n"); 12132 pr_err("Cannot allocate net device\n");
11920 return -ENOMEM; 12133 return -ENOMEM;
11921 } 12134 }
11922 12135
11923 bp = netdev_priv(dev); 12136 bp = netdev_priv(dev);
11924 bp->msglevel = debug; 12137 bp->msg_enable = debug;
11925 12138
11926 pci_set_drvdata(pdev, dev); 12139 pci_set_drvdata(pdev, dev);
11927 12140
@@ -11938,7 +12151,7 @@ static int __devinit bnx2x_init_one(struct pci_dev *pdev,
11938 /* Set init arrays */ 12151 /* Set init arrays */
11939 rc = bnx2x_init_firmware(bp, &pdev->dev); 12152 rc = bnx2x_init_firmware(bp, &pdev->dev);
11940 if (rc) { 12153 if (rc) {
11941 printk(KERN_ERR PFX "Error loading firmware\n"); 12154 pr_err("Error loading firmware\n");
11942 goto init_one_exit; 12155 goto init_one_exit;
11943 } 12156 }
11944 12157
@@ -11949,12 +12162,11 @@ static int __devinit bnx2x_init_one(struct pci_dev *pdev,
11949 } 12162 }
11950 12163
11951 bnx2x_get_pcie_width_speed(bp, &pcie_width, &pcie_speed); 12164 bnx2x_get_pcie_width_speed(bp, &pcie_width, &pcie_speed);
11952 printk(KERN_INFO "%s: %s (%c%d) PCI-E x%d %s found at mem %lx," 12165 netdev_info(dev, "%s (%c%d) PCI-E x%d %s found at mem %lx, IRQ %d, node addr %pM\n",
11953 " IRQ %d, ", dev->name, board_info[ent->driver_data].name, 12166 board_info[ent->driver_data].name,
11954 (CHIP_REV(bp) >> 12) + 'A', (CHIP_METAL(bp) >> 4), 12167 (CHIP_REV(bp) >> 12) + 'A', (CHIP_METAL(bp) >> 4),
11955 pcie_width, (pcie_speed == 2) ? "5GHz (Gen2)" : "2.5GHz", 12168 pcie_width, (pcie_speed == 2) ? "5GHz (Gen2)" : "2.5GHz",
11956 dev->base_addr, bp->pdev->irq); 12169 dev->base_addr, bp->pdev->irq, dev->dev_addr);
11957 printk(KERN_CONT "node addr %pM\n", dev->dev_addr);
11958 12170
11959 return 0; 12171 return 0;
11960 12172
@@ -11982,7 +12194,7 @@ static void __devexit bnx2x_remove_one(struct pci_dev *pdev)
11982 struct bnx2x *bp; 12194 struct bnx2x *bp;
11983 12195
11984 if (!dev) { 12196 if (!dev) {
11985 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n"); 12197 pr_err("BAD net device from bnx2x_init_one\n");
11986 return; 12198 return;
11987 } 12199 }
11988 bp = netdev_priv(dev); 12200 bp = netdev_priv(dev);
@@ -12015,7 +12227,7 @@ static int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
12015 struct bnx2x *bp; 12227 struct bnx2x *bp;
12016 12228
12017 if (!dev) { 12229 if (!dev) {
12018 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n"); 12230 pr_err("BAD net device from bnx2x_init_one\n");
12019 return -ENODEV; 12231 return -ENODEV;
12020 } 12232 }
12021 bp = netdev_priv(dev); 12233 bp = netdev_priv(dev);
@@ -12047,7 +12259,7 @@ static int bnx2x_resume(struct pci_dev *pdev)
12047 int rc; 12259 int rc;
12048 12260
12049 if (!dev) { 12261 if (!dev) {
12050 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n"); 12262 pr_err("BAD net device from bnx2x_init_one\n");
12051 return -ENODEV; 12263 return -ENODEV;
12052 } 12264 }
12053 bp = netdev_priv(dev); 12265 bp = netdev_priv(dev);
@@ -12086,7 +12298,7 @@ static int bnx2x_eeh_nic_unload(struct bnx2x *bp)
12086 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n"); 12298 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
12087 12299
12088 /* Release IRQs */ 12300 /* Release IRQs */
12089 bnx2x_free_irq(bp); 12301 bnx2x_free_irq(bp, false);
12090 12302
12091 if (CHIP_IS_E1(bp)) { 12303 if (CHIP_IS_E1(bp)) {
12092 struct mac_configuration_cmd *config = 12304 struct mac_configuration_cmd *config =
@@ -12098,9 +12310,9 @@ static int bnx2x_eeh_nic_unload(struct bnx2x *bp)
12098 12310
12099 /* Free SKBs, SGEs, TPA pool and driver internals */ 12311 /* Free SKBs, SGEs, TPA pool and driver internals */
12100 bnx2x_free_skbs(bp); 12312 bnx2x_free_skbs(bp);
12101 for_each_rx_queue(bp, i) 12313 for_each_queue(bp, i)
12102 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE); 12314 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
12103 for_each_rx_queue(bp, i) 12315 for_each_queue(bp, i)
12104 netif_napi_del(&bnx2x_fp(bp, i, napi)); 12316 netif_napi_del(&bnx2x_fp(bp, i, napi));
12105 bnx2x_free_mem(bp); 12317 bnx2x_free_mem(bp);
12106 12318
@@ -12250,17 +12462,17 @@ static int __init bnx2x_init(void)
12250{ 12462{
12251 int ret; 12463 int ret;
12252 12464
12253 printk(KERN_INFO "%s", version); 12465 pr_info("%s", version);
12254 12466
12255 bnx2x_wq = create_singlethread_workqueue("bnx2x"); 12467 bnx2x_wq = create_singlethread_workqueue("bnx2x");
12256 if (bnx2x_wq == NULL) { 12468 if (bnx2x_wq == NULL) {
12257 printk(KERN_ERR PFX "Cannot create workqueue\n"); 12469 pr_err("Cannot create workqueue\n");
12258 return -ENOMEM; 12470 return -ENOMEM;
12259 } 12471 }
12260 12472
12261 ret = pci_register_driver(&bnx2x_pci_driver); 12473 ret = pci_register_driver(&bnx2x_pci_driver);
12262 if (ret) { 12474 if (ret) {
12263 printk(KERN_ERR PFX "Cannot register driver\n"); 12475 pr_err("Cannot register driver\n");
12264 destroy_workqueue(bnx2x_wq); 12476 destroy_workqueue(bnx2x_wq);
12265 } 12477 }
12266 return ret; 12478 return ret;
@@ -12276,4 +12488,287 @@ static void __exit bnx2x_cleanup(void)
12276module_init(bnx2x_init); 12488module_init(bnx2x_init);
12277module_exit(bnx2x_cleanup); 12489module_exit(bnx2x_cleanup);
12278 12490
12491#ifdef BCM_CNIC
12492
12493/* count denotes the number of new completions we have seen */
12494static void bnx2x_cnic_sp_post(struct bnx2x *bp, int count)
12495{
12496 struct eth_spe *spe;
12497
12498#ifdef BNX2X_STOP_ON_ERROR
12499 if (unlikely(bp->panic))
12500 return;
12501#endif
12502
12503 spin_lock_bh(&bp->spq_lock);
12504 bp->cnic_spq_pending -= count;
12505
12506 for (; bp->cnic_spq_pending < bp->cnic_eth_dev.max_kwqe_pending;
12507 bp->cnic_spq_pending++) {
12508
12509 if (!bp->cnic_kwq_pending)
12510 break;
12511
12512 spe = bnx2x_sp_get_next(bp);
12513 *spe = *bp->cnic_kwq_cons;
12514
12515 bp->cnic_kwq_pending--;
12516
12517 DP(NETIF_MSG_TIMER, "pending on SPQ %d, on KWQ %d count %d\n",
12518 bp->cnic_spq_pending, bp->cnic_kwq_pending, count);
12519
12520 if (bp->cnic_kwq_cons == bp->cnic_kwq_last)
12521 bp->cnic_kwq_cons = bp->cnic_kwq;
12522 else
12523 bp->cnic_kwq_cons++;
12524 }
12525 bnx2x_sp_prod_update(bp);
12526 spin_unlock_bh(&bp->spq_lock);
12527}
12528
12529static int bnx2x_cnic_sp_queue(struct net_device *dev,
12530 struct kwqe_16 *kwqes[], u32 count)
12531{
12532 struct bnx2x *bp = netdev_priv(dev);
12533 int i;
12534
12535#ifdef BNX2X_STOP_ON_ERROR
12536 if (unlikely(bp->panic))
12537 return -EIO;
12538#endif
12539
12540 spin_lock_bh(&bp->spq_lock);
12541
12542 for (i = 0; i < count; i++) {
12543 struct eth_spe *spe = (struct eth_spe *)kwqes[i];
12544
12545 if (bp->cnic_kwq_pending == MAX_SP_DESC_CNT)
12546 break;
12547
12548 *bp->cnic_kwq_prod = *spe;
12549
12550 bp->cnic_kwq_pending++;
12551
12552 DP(NETIF_MSG_TIMER, "L5 SPQE %x %x %x:%x pos %d\n",
12553 spe->hdr.conn_and_cmd_data, spe->hdr.type,
12554 spe->data.mac_config_addr.hi,
12555 spe->data.mac_config_addr.lo,
12556 bp->cnic_kwq_pending);
12557
12558 if (bp->cnic_kwq_prod == bp->cnic_kwq_last)
12559 bp->cnic_kwq_prod = bp->cnic_kwq;
12560 else
12561 bp->cnic_kwq_prod++;
12562 }
12563
12564 spin_unlock_bh(&bp->spq_lock);
12565
12566 if (bp->cnic_spq_pending < bp->cnic_eth_dev.max_kwqe_pending)
12567 bnx2x_cnic_sp_post(bp, 0);
12568
12569 return i;
12570}
12571
12572static int bnx2x_cnic_ctl_send(struct bnx2x *bp, struct cnic_ctl_info *ctl)
12573{
12574 struct cnic_ops *c_ops;
12575 int rc = 0;
12576
12577 mutex_lock(&bp->cnic_mutex);
12578 c_ops = bp->cnic_ops;
12579 if (c_ops)
12580 rc = c_ops->cnic_ctl(bp->cnic_data, ctl);
12581 mutex_unlock(&bp->cnic_mutex);
12582
12583 return rc;
12584}
12585
12586static int bnx2x_cnic_ctl_send_bh(struct bnx2x *bp, struct cnic_ctl_info *ctl)
12587{
12588 struct cnic_ops *c_ops;
12589 int rc = 0;
12590
12591 rcu_read_lock();
12592 c_ops = rcu_dereference(bp->cnic_ops);
12593 if (c_ops)
12594 rc = c_ops->cnic_ctl(bp->cnic_data, ctl);
12595 rcu_read_unlock();
12596
12597 return rc;
12598}
12599
12600/*
12601 * for commands that have no data
12602 */
12603static int bnx2x_cnic_notify(struct bnx2x *bp, int cmd)
12604{
12605 struct cnic_ctl_info ctl = {0};
12606
12607 ctl.cmd = cmd;
12608
12609 return bnx2x_cnic_ctl_send(bp, &ctl);
12610}
12611
12612static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid)
12613{
12614 struct cnic_ctl_info ctl;
12615
12616 /* first we tell CNIC and only then we count this as a completion */
12617 ctl.cmd = CNIC_CTL_COMPLETION_CMD;
12618 ctl.data.comp.cid = cid;
12619
12620 bnx2x_cnic_ctl_send_bh(bp, &ctl);
12621 bnx2x_cnic_sp_post(bp, 1);
12622}
12623
12624static int bnx2x_drv_ctl(struct net_device *dev, struct drv_ctl_info *ctl)
12625{
12626 struct bnx2x *bp = netdev_priv(dev);
12627 int rc = 0;
12628
12629 switch (ctl->cmd) {
12630 case DRV_CTL_CTXTBL_WR_CMD: {
12631 u32 index = ctl->data.io.offset;
12632 dma_addr_t addr = ctl->data.io.dma_addr;
12633
12634 bnx2x_ilt_wr(bp, index, addr);
12635 break;
12636 }
12637
12638 case DRV_CTL_COMPLETION_CMD: {
12639 int count = ctl->data.comp.comp_count;
12640
12641 bnx2x_cnic_sp_post(bp, count);
12642 break;
12643 }
12644
12645 /* rtnl_lock is held. */
12646 case DRV_CTL_START_L2_CMD: {
12647 u32 cli = ctl->data.ring.client_id;
12648
12649 bp->rx_mode_cl_mask |= (1 << cli);
12650 bnx2x_set_storm_rx_mode(bp);
12651 break;
12652 }
12653
12654 /* rtnl_lock is held. */
12655 case DRV_CTL_STOP_L2_CMD: {
12656 u32 cli = ctl->data.ring.client_id;
12657
12658 bp->rx_mode_cl_mask &= ~(1 << cli);
12659 bnx2x_set_storm_rx_mode(bp);
12660 break;
12661 }
12662
12663 default:
12664 BNX2X_ERR("unknown command %x\n", ctl->cmd);
12665 rc = -EINVAL;
12666 }
12667
12668 return rc;
12669}
12670
12671static void bnx2x_setup_cnic_irq_info(struct bnx2x *bp)
12672{
12673 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
12674
12675 if (bp->flags & USING_MSIX_FLAG) {
12676 cp->drv_state |= CNIC_DRV_STATE_USING_MSIX;
12677 cp->irq_arr[0].irq_flags |= CNIC_IRQ_FL_MSIX;
12678 cp->irq_arr[0].vector = bp->msix_table[1].vector;
12679 } else {
12680 cp->drv_state &= ~CNIC_DRV_STATE_USING_MSIX;
12681 cp->irq_arr[0].irq_flags &= ~CNIC_IRQ_FL_MSIX;
12682 }
12683 cp->irq_arr[0].status_blk = bp->cnic_sb;
12684 cp->irq_arr[0].status_blk_num = CNIC_SB_ID(bp);
12685 cp->irq_arr[1].status_blk = bp->def_status_blk;
12686 cp->irq_arr[1].status_blk_num = DEF_SB_ID;
12687
12688 cp->num_irq = 2;
12689}
12690
12691static int bnx2x_register_cnic(struct net_device *dev, struct cnic_ops *ops,
12692 void *data)
12693{
12694 struct bnx2x *bp = netdev_priv(dev);
12695 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
12696
12697 if (ops == NULL)
12698 return -EINVAL;
12699
12700 if (atomic_read(&bp->intr_sem) != 0)
12701 return -EBUSY;
12702
12703 bp->cnic_kwq = kzalloc(PAGE_SIZE, GFP_KERNEL);
12704 if (!bp->cnic_kwq)
12705 return -ENOMEM;
12706
12707 bp->cnic_kwq_cons = bp->cnic_kwq;
12708 bp->cnic_kwq_prod = bp->cnic_kwq;
12709 bp->cnic_kwq_last = bp->cnic_kwq + MAX_SP_DESC_CNT;
12710
12711 bp->cnic_spq_pending = 0;
12712 bp->cnic_kwq_pending = 0;
12713
12714 bp->cnic_data = data;
12715
12716 cp->num_irq = 0;
12717 cp->drv_state = CNIC_DRV_STATE_REGD;
12718
12719 bnx2x_init_sb(bp, bp->cnic_sb, bp->cnic_sb_mapping, CNIC_SB_ID(bp));
12720
12721 bnx2x_setup_cnic_irq_info(bp);
12722 bnx2x_set_iscsi_eth_mac_addr(bp, 1);
12723 bp->cnic_flags |= BNX2X_CNIC_FLAG_MAC_SET;
12724 rcu_assign_pointer(bp->cnic_ops, ops);
12725
12726 return 0;
12727}
12728
12729static int bnx2x_unregister_cnic(struct net_device *dev)
12730{
12731 struct bnx2x *bp = netdev_priv(dev);
12732 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
12733
12734 mutex_lock(&bp->cnic_mutex);
12735 if (bp->cnic_flags & BNX2X_CNIC_FLAG_MAC_SET) {
12736 bp->cnic_flags &= ~BNX2X_CNIC_FLAG_MAC_SET;
12737 bnx2x_set_iscsi_eth_mac_addr(bp, 0);
12738 }
12739 cp->drv_state = 0;
12740 rcu_assign_pointer(bp->cnic_ops, NULL);
12741 mutex_unlock(&bp->cnic_mutex);
12742 synchronize_rcu();
12743 kfree(bp->cnic_kwq);
12744 bp->cnic_kwq = NULL;
12745
12746 return 0;
12747}
12748
12749struct cnic_eth_dev *bnx2x_cnic_probe(struct net_device *dev)
12750{
12751 struct bnx2x *bp = netdev_priv(dev);
12752 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
12753
12754 cp->drv_owner = THIS_MODULE;
12755 cp->chip_id = CHIP_ID(bp);
12756 cp->pdev = bp->pdev;
12757 cp->io_base = bp->regview;
12758 cp->io_base2 = bp->doorbells;
12759 cp->max_kwqe_pending = 8;
12760 cp->ctx_blk_size = CNIC_CTX_PER_ILT * sizeof(union cdu_context);
12761 cp->ctx_tbl_offset = FUNC_ILT_BASE(BP_FUNC(bp)) + 1;
12762 cp->ctx_tbl_len = CNIC_ILT_LINES;
12763 cp->starting_cid = BCM_CNIC_CID_START;
12764 cp->drv_submit_kwqes_16 = bnx2x_cnic_sp_queue;
12765 cp->drv_ctl = bnx2x_drv_ctl;
12766 cp->drv_register_cnic = bnx2x_register_cnic;
12767 cp->drv_unregister_cnic = bnx2x_unregister_cnic;
12768
12769 return cp;
12770}
12771EXPORT_SYMBOL(bnx2x_cnic_probe);
12772
12773#endif /* BCM_CNIC */
12279 12774