aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/bnx2x/bnx2x_cmn.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/bnx2x/bnx2x_cmn.c')
-rw-r--r--drivers/net/bnx2x/bnx2x_cmn.c488
1 files changed, 270 insertions, 218 deletions
diff --git a/drivers/net/bnx2x/bnx2x_cmn.c b/drivers/net/bnx2x/bnx2x_cmn.c
index 8d42067a6989..bcc4a8f4677b 100644
--- a/drivers/net/bnx2x/bnx2x_cmn.c
+++ b/drivers/net/bnx2x/bnx2x_cmn.c
@@ -27,6 +27,8 @@
27#include <linux/if_vlan.h> 27#include <linux/if_vlan.h>
28#endif 28#endif
29 29
30#include "bnx2x_init.h"
31
30static int bnx2x_poll(struct napi_struct *napi, int budget); 32static int bnx2x_poll(struct napi_struct *napi, int budget);
31 33
32/* free skb in the packet ring at pos idx 34/* free skb in the packet ring at pos idx
@@ -190,14 +192,16 @@ static void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
190 192
191 /* First mark all used pages */ 193 /* First mark all used pages */
192 for (i = 0; i < sge_len; i++) 194 for (i = 0; i < sge_len; i++)
193 SGE_MASK_CLEAR_BIT(fp, RX_SGE(le16_to_cpu(fp_cqe->sgl[i]))); 195 SGE_MASK_CLEAR_BIT(fp,
196 RX_SGE(le16_to_cpu(fp_cqe->sgl_or_raw_data.sgl[i])));
194 197
195 DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n", 198 DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
196 sge_len - 1, le16_to_cpu(fp_cqe->sgl[sge_len - 1])); 199 sge_len - 1, le16_to_cpu(fp_cqe->sgl_or_raw_data.sgl[sge_len - 1]));
197 200
198 /* Here we assume that the last SGE index is the biggest */ 201 /* Here we assume that the last SGE index is the biggest */
199 prefetch((void *)(fp->sge_mask)); 202 prefetch((void *)(fp->sge_mask));
200 bnx2x_update_last_max_sge(fp, le16_to_cpu(fp_cqe->sgl[sge_len - 1])); 203 bnx2x_update_last_max_sge(fp,
204 le16_to_cpu(fp_cqe->sgl_or_raw_data.sgl[sge_len - 1]));
201 205
202 last_max = RX_SGE(fp->last_max_sge); 206 last_max = RX_SGE(fp->last_max_sge);
203 last_elem = last_max >> RX_SGE_MASK_ELEM_SHIFT; 207 last_elem = last_max >> RX_SGE_MASK_ELEM_SHIFT;
@@ -298,7 +302,8 @@ static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
298 302
299 /* Run through the SGL and compose the fragmented skb */ 303 /* Run through the SGL and compose the fragmented skb */
300 for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) { 304 for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
301 u16 sge_idx = RX_SGE(le16_to_cpu(fp_cqe->sgl[j])); 305 u16 sge_idx =
306 RX_SGE(le16_to_cpu(fp_cqe->sgl_or_raw_data.sgl[j]));
302 307
303 /* FW gives the indices of the SGE as if the ring is an array 308 /* FW gives the indices of the SGE as if the ring is an array
304 (meaning that "next" element will consume 2 indices) */ 309 (meaning that "next" element will consume 2 indices) */
@@ -394,8 +399,9 @@ static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
394 if (!bnx2x_fill_frag_skb(bp, fp, skb, 399 if (!bnx2x_fill_frag_skb(bp, fp, skb,
395 &cqe->fast_path_cqe, cqe_idx)) { 400 &cqe->fast_path_cqe, cqe_idx)) {
396#ifdef BCM_VLAN 401#ifdef BCM_VLAN
397 if ((bp->vlgrp != NULL) && is_vlan_cqe && 402 if ((bp->vlgrp != NULL) &&
398 (!is_not_hwaccel_vlan_cqe)) 403 (le16_to_cpu(cqe->fast_path_cqe.
404 pars_flags.flags) & PARSING_FLAGS_VLAN))
399 vlan_gro_receive(&fp->napi, bp->vlgrp, 405 vlan_gro_receive(&fp->napi, bp->vlgrp,
400 le16_to_cpu(cqe->fast_path_cqe. 406 le16_to_cpu(cqe->fast_path_cqe.
401 vlan_tag), skb); 407 vlan_tag), skb);
@@ -686,9 +692,10 @@ static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
686 return IRQ_HANDLED; 692 return IRQ_HANDLED;
687 } 693 }
688 694
689 DP(BNX2X_MSG_FP, "got an MSI-X interrupt on IDX:SB [%d:%d]\n", 695 DP(BNX2X_MSG_FP, "got an MSI-X interrupt on IDX:SB "
690 fp->index, fp->sb_id); 696 "[fp %d fw_sd %d igusb %d]\n",
691 bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0); 697 fp->index, fp->fw_sb_id, fp->igu_sb_id);
698 bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
692 699
693#ifdef BNX2X_STOP_ON_ERROR 700#ifdef BNX2X_STOP_ON_ERROR
694 if (unlikely(bp->panic)) 701 if (unlikely(bp->panic))
@@ -698,8 +705,7 @@ static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
698 /* Handle Rx and Tx according to MSI-X vector */ 705 /* Handle Rx and Tx according to MSI-X vector */
699 prefetch(fp->rx_cons_sb); 706 prefetch(fp->rx_cons_sb);
700 prefetch(fp->tx_cons_sb); 707 prefetch(fp->tx_cons_sb);
701 prefetch(&fp->status_blk->u_status_block.status_block_index); 708 prefetch(&fp->sb_running_index[SM_RX_ID]);
702 prefetch(&fp->status_blk->c_status_block.status_block_index);
703 napi_schedule(&bnx2x_fp(bp, fp->index, napi)); 709 napi_schedule(&bnx2x_fp(bp, fp->index, napi));
704 710
705 return IRQ_HANDLED; 711 return IRQ_HANDLED;
@@ -774,27 +780,73 @@ void bnx2x_link_report(struct bnx2x *bp)
774 } 780 }
775} 781}
776 782
783/* Returns the number of actually allocated BDs */
784static inline int bnx2x_alloc_rx_bds(struct bnx2x_fastpath *fp,
785 int rx_ring_size)
786{
787 struct bnx2x *bp = fp->bp;
788 u16 ring_prod, cqe_ring_prod;
789 int i;
790
791 fp->rx_comp_cons = 0;
792 cqe_ring_prod = ring_prod = 0;
793 for (i = 0; i < rx_ring_size; i++) {
794 if (bnx2x_alloc_rx_skb(bp, fp, ring_prod) < 0) {
795 BNX2X_ERR("was only able to allocate "
796 "%d rx skbs on queue[%d]\n", i, fp->index);
797 fp->eth_q_stats.rx_skb_alloc_failed++;
798 break;
799 }
800 ring_prod = NEXT_RX_IDX(ring_prod);
801 cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod);
802 WARN_ON(ring_prod <= i);
803 }
804
805 fp->rx_bd_prod = ring_prod;
806 /* Limit the CQE producer by the CQE ring size */
807 fp->rx_comp_prod = min_t(u16, NUM_RCQ_RINGS*RCQ_DESC_CNT,
808 cqe_ring_prod);
809 fp->rx_pkt = fp->rx_calls = 0;
810
811 return i;
812}
813
814static inline void bnx2x_alloc_rx_bd_ring(struct bnx2x_fastpath *fp)
815{
816 struct bnx2x *bp = fp->bp;
817 int rx_ring_size = bp->rx_ring_size ? bp->rx_ring_size :
818 MAX_RX_AVAIL/bp->num_queues;
819
820 rx_ring_size = max_t(int, MIN_RX_AVAIL, rx_ring_size);
821
822 bnx2x_alloc_rx_bds(fp, rx_ring_size);
823
824 /* Warning!
825 * this will generate an interrupt (to the TSTORM)
826 * must only be done after chip is initialized
827 */
828 bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod,
829 fp->rx_sge_prod);
830}
831
777void bnx2x_init_rx_rings(struct bnx2x *bp) 832void bnx2x_init_rx_rings(struct bnx2x *bp)
778{ 833{
779 int func = BP_FUNC(bp); 834 int func = BP_FUNC(bp);
780 int max_agg_queues = CHIP_IS_E1(bp) ? ETH_MAX_AGGREGATION_QUEUES_E1 : 835 int max_agg_queues = CHIP_IS_E1(bp) ? ETH_MAX_AGGREGATION_QUEUES_E1 :
781 ETH_MAX_AGGREGATION_QUEUES_E1H; 836 ETH_MAX_AGGREGATION_QUEUES_E1H;
782 u16 ring_prod, cqe_ring_prod; 837 u16 ring_prod;
783 int i, j; 838 int i, j;
784 int rx_ring_size = bp->rx_ring_size ? bp->rx_ring_size :
785 MAX_RX_AVAIL/bp->num_queues;
786 839
787 rx_ring_size = max_t(int, MIN_RX_AVAIL, rx_ring_size); 840 bp->rx_buf_size = bp->dev->mtu + ETH_OVREHEAD + BNX2X_RX_ALIGN +
841 BNX2X_FW_IP_HDR_ALIGN_PAD;
788 842
789 bp->rx_buf_size = bp->dev->mtu + ETH_OVREHEAD + BNX2X_RX_ALIGN;
790 DP(NETIF_MSG_IFUP, 843 DP(NETIF_MSG_IFUP,
791 "mtu %d rx_buf_size %d\n", bp->dev->mtu, bp->rx_buf_size); 844 "mtu %d rx_buf_size %d\n", bp->dev->mtu, bp->rx_buf_size);
792 845
793 if (bp->flags & TPA_ENABLE_FLAG) { 846 for_each_queue(bp, j) {
794 847 struct bnx2x_fastpath *fp = &bp->fp[j];
795 for_each_queue(bp, j) {
796 struct bnx2x_fastpath *fp = &bp->fp[j];
797 848
849 if (!fp->disable_tpa) {
798 for (i = 0; i < max_agg_queues; i++) { 850 for (i = 0; i < max_agg_queues; i++) {
799 fp->tpa_pool[i].skb = 851 fp->tpa_pool[i].skb =
800 netdev_alloc_skb(bp->dev, bp->rx_buf_size); 852 netdev_alloc_skb(bp->dev, bp->rx_buf_size);
@@ -812,6 +864,35 @@ void bnx2x_init_rx_rings(struct bnx2x *bp)
812 mapping, 0); 864 mapping, 0);
813 fp->tpa_state[i] = BNX2X_TPA_STOP; 865 fp->tpa_state[i] = BNX2X_TPA_STOP;
814 } 866 }
867
868 /* "next page" elements initialization */
869 bnx2x_set_next_page_sgl(fp);
870
871 /* set SGEs bit mask */
872 bnx2x_init_sge_ring_bit_mask(fp);
873
874 /* Allocate SGEs and initialize the ring elements */
875 for (i = 0, ring_prod = 0;
876 i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
877
878 if (bnx2x_alloc_rx_sge(bp, fp, ring_prod) < 0) {
879 BNX2X_ERR("was only able to allocate "
880 "%d rx sges\n", i);
881 BNX2X_ERR("disabling TPA for"
882 " queue[%d]\n", j);
883 /* Cleanup already allocated elements */
884 bnx2x_free_rx_sge_range(bp,
885 fp, ring_prod);
886 bnx2x_free_tpa_pool(bp,
887 fp, max_agg_queues);
888 fp->disable_tpa = 1;
889 ring_prod = 0;
890 break;
891 }
892 ring_prod = NEXT_SGE_IDX(ring_prod);
893 }
894
895 fp->rx_sge_prod = ring_prod;
815 } 896 }
816 } 897 }
817 898
@@ -819,98 +900,15 @@ void bnx2x_init_rx_rings(struct bnx2x *bp)
819 struct bnx2x_fastpath *fp = &bp->fp[j]; 900 struct bnx2x_fastpath *fp = &bp->fp[j];
820 901
821 fp->rx_bd_cons = 0; 902 fp->rx_bd_cons = 0;
822 fp->rx_cons_sb = BNX2X_RX_SB_INDEX;
823 fp->rx_bd_cons_sb = BNX2X_RX_SB_BD_INDEX;
824
825 /* "next page" elements initialization */
826 /* SGE ring */
827 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
828 struct eth_rx_sge *sge;
829
830 sge = &fp->rx_sge_ring[RX_SGE_CNT * i - 2];
831 sge->addr_hi =
832 cpu_to_le32(U64_HI(fp->rx_sge_mapping +
833 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
834 sge->addr_lo =
835 cpu_to_le32(U64_LO(fp->rx_sge_mapping +
836 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
837 }
838 903
839 bnx2x_init_sge_ring_bit_mask(fp); 904 bnx2x_set_next_page_rx_bd(fp);
840
841 /* RX BD ring */
842 for (i = 1; i <= NUM_RX_RINGS; i++) {
843 struct eth_rx_bd *rx_bd;
844
845 rx_bd = &fp->rx_desc_ring[RX_DESC_CNT * i - 2];
846 rx_bd->addr_hi =
847 cpu_to_le32(U64_HI(fp->rx_desc_mapping +
848 BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
849 rx_bd->addr_lo =
850 cpu_to_le32(U64_LO(fp->rx_desc_mapping +
851 BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
852 }
853 905
854 /* CQ ring */ 906 /* CQ ring */
855 for (i = 1; i <= NUM_RCQ_RINGS; i++) { 907 bnx2x_set_next_page_rx_cq(fp);
856 struct eth_rx_cqe_next_page *nextpg;
857
858 nextpg = (struct eth_rx_cqe_next_page *)
859 &fp->rx_comp_ring[RCQ_DESC_CNT * i - 1];
860 nextpg->addr_hi =
861 cpu_to_le32(U64_HI(fp->rx_comp_mapping +
862 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
863 nextpg->addr_lo =
864 cpu_to_le32(U64_LO(fp->rx_comp_mapping +
865 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
866 }
867
868 /* Allocate SGEs and initialize the ring elements */
869 for (i = 0, ring_prod = 0;
870 i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
871
872 if (bnx2x_alloc_rx_sge(bp, fp, ring_prod) < 0) {
873 BNX2X_ERR("was only able to allocate "
874 "%d rx sges\n", i);
875 BNX2X_ERR("disabling TPA for queue[%d]\n", j);
876 /* Cleanup already allocated elements */
877 bnx2x_free_rx_sge_range(bp, fp, ring_prod);
878 bnx2x_free_tpa_pool(bp, fp, max_agg_queues);
879 fp->disable_tpa = 1;
880 ring_prod = 0;
881 break;
882 }
883 ring_prod = NEXT_SGE_IDX(ring_prod);
884 }
885 fp->rx_sge_prod = ring_prod;
886 908
887 /* Allocate BDs and initialize BD ring */ 909 /* Allocate BDs and initialize BD ring */
888 fp->rx_comp_cons = 0; 910 bnx2x_alloc_rx_bd_ring(fp);
889 cqe_ring_prod = ring_prod = 0;
890 for (i = 0; i < rx_ring_size; i++) {
891 if (bnx2x_alloc_rx_skb(bp, fp, ring_prod) < 0) {
892 BNX2X_ERR("was only able to allocate "
893 "%d rx skbs on queue[%d]\n", i, j);
894 fp->eth_q_stats.rx_skb_alloc_failed++;
895 break;
896 }
897 ring_prod = NEXT_RX_IDX(ring_prod);
898 cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod);
899 WARN_ON(ring_prod <= i);
900 }
901 911
902 fp->rx_bd_prod = ring_prod;
903 /* must not have more available CQEs than BDs */
904 fp->rx_comp_prod = min_t(u16, NUM_RCQ_RINGS*RCQ_DESC_CNT,
905 cqe_ring_prod);
906 fp->rx_pkt = fp->rx_calls = 0;
907
908 /* Warning!
909 * this will generate an interrupt (to the TSTORM)
910 * must only be done after chip is initialized
911 */
912 bnx2x_update_rx_prod(bp, fp, ring_prod, fp->rx_comp_prod,
913 fp->rx_sge_prod);
914 if (j != 0) 912 if (j != 0)
915 continue; 913 continue;
916 914
@@ -921,6 +919,7 @@ void bnx2x_init_rx_rings(struct bnx2x *bp)
921 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4, 919 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4,
922 U64_HI(fp->rx_comp_mapping)); 920 U64_HI(fp->rx_comp_mapping));
923 } 921 }
922
924} 923}
925static void bnx2x_free_tx_skbs(struct bnx2x *bp) 924static void bnx2x_free_tx_skbs(struct bnx2x *bp)
926{ 925{
@@ -1252,6 +1251,9 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
1252 if (rc) 1251 if (rc)
1253 return rc; 1252 return rc;
1254 1253
1254 /* must be called before memory allocation and HW init */
1255 bnx2x_ilt_set_info(bp);
1256
1255 if (bnx2x_alloc_mem(bp)) { 1257 if (bnx2x_alloc_mem(bp)) {
1256 bnx2x_free_irq(bp, true); 1258 bnx2x_free_irq(bp, true);
1257 return -ENOMEM; 1259 return -ENOMEM;
@@ -1339,6 +1341,11 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
1339 goto load_error2; 1341 goto load_error2;
1340 } 1342 }
1341 1343
1344 if (rc) {
1345 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
1346 goto load_error2;
1347 }
1348
1342 /* Setup NIC internals and enable interrupts */ 1349 /* Setup NIC internals and enable interrupts */
1343 bnx2x_nic_init(bp, load_code); 1350 bnx2x_nic_init(bp, load_code);
1344 1351
@@ -1360,7 +1367,18 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
1360 1367
1361 bp->state = BNX2X_STATE_OPENING_WAIT4_PORT; 1368 bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
1362 1369
1363 rc = bnx2x_setup_leading(bp); 1370 rc = bnx2x_func_start(bp);
1371 if (rc) {
1372 BNX2X_ERR("Function start failed!\n");
1373#ifndef BNX2X_STOP_ON_ERROR
1374 goto load_error3;
1375#else
1376 bp->panic = 1;
1377 return -EBUSY;
1378#endif
1379 }
1380
1381 rc = bnx2x_setup_client(bp, &bp->fp[0], 1 /* Leading */);
1364 if (rc) { 1382 if (rc) {
1365 BNX2X_ERR("Setup leading failed!\n"); 1383 BNX2X_ERR("Setup leading failed!\n");
1366#ifndef BNX2X_STOP_ON_ERROR 1384#ifndef BNX2X_STOP_ON_ERROR
@@ -1377,37 +1395,37 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
1377 bp->flags |= MF_FUNC_DIS; 1395 bp->flags |= MF_FUNC_DIS;
1378 } 1396 }
1379 1397
1380 if (bp->state == BNX2X_STATE_OPEN) {
1381#ifdef BCM_CNIC 1398#ifdef BCM_CNIC
1382 /* Enable Timer scan */ 1399 /* Enable Timer scan */
1383 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + BP_PORT(bp)*4, 1); 1400 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + BP_PORT(bp)*4, 1);
1384#endif 1401#endif
1385 for_each_nondefault_queue(bp, i) { 1402 for_each_nondefault_queue(bp, i) {
1386 rc = bnx2x_setup_multi(bp, i); 1403 rc = bnx2x_setup_client(bp, &bp->fp[i], 0);
1387 if (rc) 1404 if (rc)
1388#ifdef BCM_CNIC 1405#ifdef BCM_CNIC
1389 goto load_error4; 1406 goto load_error4;
1390#else 1407#else
1391 goto load_error3; 1408 goto load_error3;
1392#endif 1409#endif
1393 } 1410 }
1411
1412 /* Now when Clients are configured we are ready to work */
1413 bp->state = BNX2X_STATE_OPEN;
1414
1415 bnx2x_set_eth_mac(bp, 1);
1394 1416
1395 if (CHIP_IS_E1(bp))
1396 bnx2x_set_eth_mac_addr_e1(bp, 1);
1397 else
1398 bnx2x_set_eth_mac_addr_e1h(bp, 1);
1399#ifdef BCM_CNIC 1417#ifdef BCM_CNIC
1400 /* Set iSCSI L2 MAC */ 1418 /* Set iSCSI L2 MAC */
1401 mutex_lock(&bp->cnic_mutex); 1419 mutex_lock(&bp->cnic_mutex);
1402 if (bp->cnic_eth_dev.drv_state & CNIC_DRV_STATE_REGD) { 1420 if (bp->cnic_eth_dev.drv_state & CNIC_DRV_STATE_REGD) {
1403 bnx2x_set_iscsi_eth_mac_addr(bp, 1); 1421 bnx2x_set_iscsi_eth_mac_addr(bp, 1);
1404 bp->cnic_flags |= BNX2X_CNIC_FLAG_MAC_SET; 1422 bp->cnic_flags |= BNX2X_CNIC_FLAG_MAC_SET;
1405 bnx2x_init_sb(bp, bp->cnic_sb, bp->cnic_sb_mapping, 1423 bnx2x_init_sb(bp, bp->cnic_sb_mapping,
1406 CNIC_SB_ID(bp)); 1424 BNX2X_VF_ID_INVALID, false,
1407 } 1425 CNIC_SB_ID(bp), CNIC_IGU_SB_ID(bp));
1408 mutex_unlock(&bp->cnic_mutex);
1409#endif
1410 } 1426 }
1427 mutex_unlock(&bp->cnic_mutex);
1428#endif
1411 1429
1412 if (bp->port.pmf) 1430 if (bp->port.pmf)
1413 bnx2x_initial_phy_init(bp, load_mode); 1431 bnx2x_initial_phy_init(bp, load_mode);
@@ -1415,18 +1433,15 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
1415 /* Start fast path */ 1433 /* Start fast path */
1416 switch (load_mode) { 1434 switch (load_mode) {
1417 case LOAD_NORMAL: 1435 case LOAD_NORMAL:
1418 if (bp->state == BNX2X_STATE_OPEN) { 1436 /* Tx queue should be only reenabled */
1419 /* Tx queue should be only reenabled */ 1437 netif_tx_wake_all_queues(bp->dev);
1420 netif_tx_wake_all_queues(bp->dev);
1421 }
1422 /* Initialize the receive filter. */ 1438 /* Initialize the receive filter. */
1423 bnx2x_set_rx_mode(bp->dev); 1439 bnx2x_set_rx_mode(bp->dev);
1424 break; 1440 break;
1425 1441
1426 case LOAD_OPEN: 1442 case LOAD_OPEN:
1427 netif_tx_start_all_queues(bp->dev); 1443 netif_tx_start_all_queues(bp->dev);
1428 if (bp->state != BNX2X_STATE_OPEN) 1444 smp_mb__after_clear_bit();
1429 netif_tx_disable(bp->dev);
1430 /* Initialize the receive filter. */ 1445 /* Initialize the receive filter. */
1431 bnx2x_set_rx_mode(bp->dev); 1446 bnx2x_set_rx_mode(bp->dev);
1432 break; 1447 break;
@@ -1512,21 +1527,22 @@ int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
1512 bp->rx_mode = BNX2X_RX_MODE_NONE; 1527 bp->rx_mode = BNX2X_RX_MODE_NONE;
1513 bnx2x_set_storm_rx_mode(bp); 1528 bnx2x_set_storm_rx_mode(bp);
1514 1529
1515 /* Disable HW interrupts, NAPI and Tx */
1516 bnx2x_netif_stop(bp, 1);
1517 netif_carrier_off(bp->dev);
1518
1519 del_timer_sync(&bp->timer); 1530 del_timer_sync(&bp->timer);
1520 SHMEM_WR(bp, func_mb[BP_FUNC(bp)].drv_pulse_mb, 1531 SHMEM_WR(bp, func_mb[BP_FUNC(bp)].drv_pulse_mb,
1521 (DRV_PULSE_ALWAYS_ALIVE | bp->fw_drv_pulse_wr_seq)); 1532 (DRV_PULSE_ALWAYS_ALIVE | bp->fw_drv_pulse_wr_seq));
1522 bnx2x_stats_handle(bp, STATS_EVENT_STOP); 1533 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
1523 1534
1524 /* Release IRQs */
1525 bnx2x_free_irq(bp, false);
1526 1535
1527 /* Cleanup the chip if needed */ 1536 /* Cleanup the chip if needed */
1528 if (unload_mode != UNLOAD_RECOVERY) 1537 if (unload_mode != UNLOAD_RECOVERY)
1529 bnx2x_chip_cleanup(bp, unload_mode); 1538 bnx2x_chip_cleanup(bp, unload_mode);
1539 else {
1540 /* Disable HW interrupts, NAPI and Tx */
1541 bnx2x_netif_stop(bp, 1);
1542
1543 /* Release IRQs */
1544 bnx2x_free_irq(bp, false);
1545 }
1530 1546
1531 bp->port.pmf = 0; 1547 bp->port.pmf = 0;
1532 1548
@@ -1634,27 +1650,28 @@ static int bnx2x_poll(struct napi_struct *napi, int budget)
1634 /* Fall out from the NAPI loop if needed */ 1650 /* Fall out from the NAPI loop if needed */
1635 if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) { 1651 if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
1636 bnx2x_update_fpsb_idx(fp); 1652 bnx2x_update_fpsb_idx(fp);
1637 /* bnx2x_has_rx_work() reads the status block, thus we need 1653 /* bnx2x_has_rx_work() reads the status block,
1638 * to ensure that status block indices have been actually read 1654 * thus we need to ensure that status block indices
1639 * (bnx2x_update_fpsb_idx) prior to this check 1655 * have been actually read (bnx2x_update_fpsb_idx)
1640 * (bnx2x_has_rx_work) so that we won't write the "newer" 1656 * prior to this check (bnx2x_has_rx_work) so that
1641 * value of the status block to IGU (if there was a DMA right 1657 * we won't write the "newer" value of the status block
1642 * after bnx2x_has_rx_work and if there is no rmb, the memory 1658 * to IGU (if there was a DMA right after
1643 * reading (bnx2x_update_fpsb_idx) may be postponed to right 1659 * bnx2x_has_rx_work and if there is no rmb, the memory
1644 * before bnx2x_ack_sb). In this case there will never be 1660 * reading (bnx2x_update_fpsb_idx) may be postponed
1645 * another interrupt until there is another update of the 1661 * to right before bnx2x_ack_sb). In this case there
1646 * status block, while there is still unhandled work. 1662 * will never be another interrupt until there is
1663 * another update of the status block, while there
1664 * is still unhandled work.
1647 */ 1665 */
1648 rmb(); 1666 rmb();
1649 1667
1650 if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) { 1668 if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
1651 napi_complete(napi); 1669 napi_complete(napi);
1652 /* Re-enable interrupts */ 1670 /* Re-enable interrupts */
1653 bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID, 1671 DP(NETIF_MSG_HW,
1654 le16_to_cpu(fp->fp_c_idx), 1672 "Update index to %d\n", fp->fp_hc_idx);
1655 IGU_INT_NOP, 1); 1673 bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID,
1656 bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID, 1674 le16_to_cpu(fp->fp_hc_idx),
1657 le16_to_cpu(fp->fp_u_idx),
1658 IGU_INT_ENABLE, 1); 1675 IGU_INT_ENABLE, 1);
1659 break; 1676 break;
1660 } 1677 }
@@ -1850,7 +1867,7 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
1850 struct sw_tx_bd *tx_buf; 1867 struct sw_tx_bd *tx_buf;
1851 struct eth_tx_start_bd *tx_start_bd; 1868 struct eth_tx_start_bd *tx_start_bd;
1852 struct eth_tx_bd *tx_data_bd, *total_pkt_bd = NULL; 1869 struct eth_tx_bd *tx_data_bd, *total_pkt_bd = NULL;
1853 struct eth_tx_parse_bd *pbd = NULL; 1870 struct eth_tx_parse_bd_e1x *pbd_e1x = NULL;
1854 u16 pkt_prod, bd_prod; 1871 u16 pkt_prod, bd_prod;
1855 int nbd, fp_index; 1872 int nbd, fp_index;
1856 dma_addr_t mapping; 1873 dma_addr_t mapping;
@@ -1926,10 +1943,13 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
1926 tx_start_bd = &fp->tx_desc_ring[bd_prod].start_bd; 1943 tx_start_bd = &fp->tx_desc_ring[bd_prod].start_bd;
1927 1944
1928 tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD; 1945 tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
1929 tx_start_bd->general_data = (mac_type << 1946 SET_FLAG(tx_start_bd->general_data,
1930 ETH_TX_START_BD_ETH_ADDR_TYPE_SHIFT); 1947 ETH_TX_START_BD_ETH_ADDR_TYPE,
1948 mac_type);
1931 /* header nbd */ 1949 /* header nbd */
1932 tx_start_bd->general_data |= (1 << ETH_TX_START_BD_HDR_NBDS_SHIFT); 1950 SET_FLAG(tx_start_bd->general_data,
1951 ETH_TX_START_BD_HDR_NBDS,
1952 1);
1933 1953
1934 /* remember the first BD of the packet */ 1954 /* remember the first BD of the packet */
1935 tx_buf->first_bd = fp->tx_bd_prod; 1955 tx_buf->first_bd = fp->tx_bd_prod;
@@ -1943,62 +1963,68 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
1943#ifdef BCM_VLAN 1963#ifdef BCM_VLAN
1944 if ((bp->vlgrp != NULL) && vlan_tx_tag_present(skb) && 1964 if ((bp->vlgrp != NULL) && vlan_tx_tag_present(skb) &&
1945 (bp->flags & HW_VLAN_TX_FLAG)) { 1965 (bp->flags & HW_VLAN_TX_FLAG)) {
1946 tx_start_bd->vlan = cpu_to_le16(vlan_tx_tag_get(skb)); 1966 tx_start_bd->vlan_or_ethertype =
1947 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_VLAN_TAG; 1967 cpu_to_le16(vlan_tx_tag_get(skb));
1968 tx_start_bd->bd_flags.as_bitfield |=
1969 (X_ETH_OUTBAND_VLAN << ETH_TX_BD_FLAGS_VLAN_MODE_SHIFT);
1948 } else 1970 } else
1949#endif 1971#endif
1950 tx_start_bd->vlan = cpu_to_le16(pkt_prod); 1972 tx_start_bd->vlan_or_ethertype = cpu_to_le16(pkt_prod);
1951 1973
1952 /* turn on parsing and get a BD */ 1974 /* turn on parsing and get a BD */
1953 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod)); 1975 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
1954 pbd = &fp->tx_desc_ring[bd_prod].parse_bd;
1955 1976
1956 memset(pbd, 0, sizeof(struct eth_tx_parse_bd)); 1977 if (xmit_type & XMIT_CSUM) {
1978 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_L4_CSUM;
1979
1980 if (xmit_type & XMIT_CSUM_V4)
1981 tx_start_bd->bd_flags.as_bitfield |=
1982 ETH_TX_BD_FLAGS_IP_CSUM;
1983 else
1984 tx_start_bd->bd_flags.as_bitfield |=
1985 ETH_TX_BD_FLAGS_IPV6;
1957 1986
1987 if (!(xmit_type & XMIT_CSUM_TCP))
1988 tx_start_bd->bd_flags.as_bitfield |=
1989 ETH_TX_BD_FLAGS_IS_UDP;
1990 }
1991 pbd_e1x = &fp->tx_desc_ring[bd_prod].parse_bd_e1x;
1992 memset(pbd_e1x, 0, sizeof(struct eth_tx_parse_bd_e1x));
1993 /* Set PBD in checksum offload case */
1958 if (xmit_type & XMIT_CSUM) { 1994 if (xmit_type & XMIT_CSUM) {
1959 hlen = (skb_network_header(skb) - skb->data) / 2; 1995 hlen = (skb_network_header(skb) - skb->data) / 2;
1960 1996
1961 /* for now NS flag is not used in Linux */ 1997 /* for now NS flag is not used in Linux */
1962 pbd->global_data = 1998 pbd_e1x->global_data =
1963 (hlen | ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) << 1999 (hlen | ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) <<
1964 ETH_TX_PARSE_BD_LLC_SNAP_EN_SHIFT)); 2000 ETH_TX_PARSE_BD_E1X_LLC_SNAP_EN_SHIFT));
1965 2001
1966 pbd->ip_hlen = (skb_transport_header(skb) - 2002 pbd_e1x->ip_hlen_w = (skb_transport_header(skb) -
1967 skb_network_header(skb)) / 2; 2003 skb_network_header(skb)) / 2;
1968 2004
1969 hlen += pbd->ip_hlen + tcp_hdrlen(skb) / 2; 2005 hlen += pbd_e1x->ip_hlen_w + tcp_hdrlen(skb) / 2;
1970 2006
1971 pbd->total_hlen = cpu_to_le16(hlen); 2007 pbd_e1x->total_hlen_w = cpu_to_le16(hlen);
1972 hlen = hlen*2; 2008 hlen = hlen*2;
1973 2009
1974 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_L4_CSUM;
1975
1976 if (xmit_type & XMIT_CSUM_V4)
1977 tx_start_bd->bd_flags.as_bitfield |=
1978 ETH_TX_BD_FLAGS_IP_CSUM;
1979 else
1980 tx_start_bd->bd_flags.as_bitfield |=
1981 ETH_TX_BD_FLAGS_IPV6;
1982
1983 if (xmit_type & XMIT_CSUM_TCP) { 2010 if (xmit_type & XMIT_CSUM_TCP) {
1984 pbd->tcp_pseudo_csum = swab16(tcp_hdr(skb)->check); 2011 pbd_e1x->tcp_pseudo_csum = swab16(tcp_hdr(skb)->check);
1985 2012
1986 } else { 2013 } else {
1987 s8 fix = SKB_CS_OFF(skb); /* signed! */ 2014 s8 fix = SKB_CS_OFF(skb); /* signed! */
1988 2015
1989 pbd->global_data |= ETH_TX_PARSE_BD_UDP_CS_FLG;
1990
1991 DP(NETIF_MSG_TX_QUEUED, 2016 DP(NETIF_MSG_TX_QUEUED,
1992 "hlen %d fix %d csum before fix %x\n", 2017 "hlen %d fix %d csum before fix %x\n",
1993 le16_to_cpu(pbd->total_hlen), fix, SKB_CS(skb)); 2018 le16_to_cpu(pbd_e1x->total_hlen_w),
2019 fix, SKB_CS(skb));
1994 2020
1995 /* HW bug: fixup the CSUM */ 2021 /* HW bug: fixup the CSUM */
1996 pbd->tcp_pseudo_csum = 2022 pbd_e1x->tcp_pseudo_csum =
1997 bnx2x_csum_fix(skb_transport_header(skb), 2023 bnx2x_csum_fix(skb_transport_header(skb),
1998 SKB_CS(skb), fix); 2024 SKB_CS(skb), fix);
1999 2025
2000 DP(NETIF_MSG_TX_QUEUED, "csum after fix %x\n", 2026 DP(NETIF_MSG_TX_QUEUED, "csum after fix %x\n",
2001 pbd->tcp_pseudo_csum); 2027 pbd_e1x->tcp_pseudo_csum);
2002 } 2028 }
2003 } 2029 }
2004 2030
@@ -2016,7 +2042,8 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
2016 " nbytes %d flags %x vlan %x\n", 2042 " nbytes %d flags %x vlan %x\n",
2017 tx_start_bd, tx_start_bd->addr_hi, tx_start_bd->addr_lo, 2043 tx_start_bd, tx_start_bd->addr_hi, tx_start_bd->addr_lo,
2018 le16_to_cpu(tx_start_bd->nbd), le16_to_cpu(tx_start_bd->nbytes), 2044 le16_to_cpu(tx_start_bd->nbd), le16_to_cpu(tx_start_bd->nbytes),
2019 tx_start_bd->bd_flags.as_bitfield, le16_to_cpu(tx_start_bd->vlan)); 2045 tx_start_bd->bd_flags.as_bitfield,
2046 le16_to_cpu(tx_start_bd->vlan_or_ethertype));
2020 2047
2021 if (xmit_type & XMIT_GSO) { 2048 if (xmit_type & XMIT_GSO) {
2022 2049
@@ -2031,24 +2058,25 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
2031 bd_prod = bnx2x_tx_split(bp, fp, tx_buf, &tx_start_bd, 2058 bd_prod = bnx2x_tx_split(bp, fp, tx_buf, &tx_start_bd,
2032 hlen, bd_prod, ++nbd); 2059 hlen, bd_prod, ++nbd);
2033 2060
2034 pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size); 2061 pbd_e1x->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
2035 pbd->tcp_send_seq = swab32(tcp_hdr(skb)->seq); 2062 pbd_e1x->tcp_send_seq = swab32(tcp_hdr(skb)->seq);
2036 pbd->tcp_flags = pbd_tcp_flags(skb); 2063 pbd_e1x->tcp_flags = pbd_tcp_flags(skb);
2037 2064
2038 if (xmit_type & XMIT_GSO_V4) { 2065 if (xmit_type & XMIT_GSO_V4) {
2039 pbd->ip_id = swab16(ip_hdr(skb)->id); 2066 pbd_e1x->ip_id = swab16(ip_hdr(skb)->id);
2040 pbd->tcp_pseudo_csum = 2067 pbd_e1x->tcp_pseudo_csum =
2041 swab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr, 2068 swab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
2042 ip_hdr(skb)->daddr, 2069 ip_hdr(skb)->daddr,
2043 0, IPPROTO_TCP, 0)); 2070 0, IPPROTO_TCP, 0));
2044 2071
2045 } else 2072 } else
2046 pbd->tcp_pseudo_csum = 2073 pbd_e1x->tcp_pseudo_csum =
2047 swab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr, 2074 swab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
2048 &ipv6_hdr(skb)->daddr, 2075 &ipv6_hdr(skb)->daddr,
2049 0, IPPROTO_TCP, 0)); 2076 0, IPPROTO_TCP, 0));
2050 2077
2051 pbd->global_data |= ETH_TX_PARSE_BD_PSEUDO_CS_WITHOUT_LEN; 2078 pbd_e1x->global_data |=
2079 ETH_TX_PARSE_BD_E1X_PSEUDO_CS_WITHOUT_LEN;
2052 } 2080 }
2053 tx_data_bd = (struct eth_tx_bd *)tx_start_bd; 2081 tx_data_bd = (struct eth_tx_bd *)tx_start_bd;
2054 2082
@@ -2088,13 +2116,14 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
2088 if (total_pkt_bd != NULL) 2116 if (total_pkt_bd != NULL)
2089 total_pkt_bd->total_pkt_bytes = pkt_size; 2117 total_pkt_bd->total_pkt_bytes = pkt_size;
2090 2118
2091 if (pbd) 2119 if (pbd_e1x)
2092 DP(NETIF_MSG_TX_QUEUED, 2120 DP(NETIF_MSG_TX_QUEUED,
2093 "PBD @%p ip_data %x ip_hlen %u ip_id %u lso_mss %u" 2121 "PBD (E1X) @%p ip_data %x ip_hlen %u ip_id %u lso_mss %u"
2094 " tcp_flags %x xsum %x seq %u hlen %u\n", 2122 " tcp_flags %x xsum %x seq %u hlen %u\n",
2095 pbd, pbd->global_data, pbd->ip_hlen, pbd->ip_id, 2123 pbd_e1x, pbd_e1x->global_data, pbd_e1x->ip_hlen_w,
2096 pbd->lso_mss, pbd->tcp_flags, pbd->tcp_pseudo_csum, 2124 pbd_e1x->ip_id, pbd_e1x->lso_mss, pbd_e1x->tcp_flags,
2097 pbd->tcp_send_seq, le16_to_cpu(pbd->total_hlen)); 2125 pbd_e1x->tcp_pseudo_csum, pbd_e1x->tcp_send_seq,
2126 le16_to_cpu(pbd_e1x->total_hlen_w));
2098 2127
2099 DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d bd %u\n", nbd, bd_prod); 2128 DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d bd %u\n", nbd, bd_prod);
2100 2129
@@ -2109,7 +2138,7 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
2109 2138
2110 fp->tx_db.data.prod += nbd; 2139 fp->tx_db.data.prod += nbd;
2111 barrier(); 2140 barrier();
2112 DOORBELL(bp, fp->index, fp->tx_db.raw); 2141 DOORBELL(bp, fp->cid, fp->tx_db.raw);
2113 2142
2114 mmiowb(); 2143 mmiowb();
2115 2144
@@ -2141,16 +2170,51 @@ int bnx2x_change_mac_addr(struct net_device *dev, void *p)
2141 return -EINVAL; 2170 return -EINVAL;
2142 2171
2143 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); 2172 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
2144 if (netif_running(dev)) { 2173 if (netif_running(dev))
2145 if (CHIP_IS_E1(bp)) 2174 bnx2x_set_eth_mac(bp, 1);
2146 bnx2x_set_eth_mac_addr_e1(bp, 1);
2147 else
2148 bnx2x_set_eth_mac_addr_e1h(bp, 1);
2149 }
2150 2175
2151 return 0; 2176 return 0;
2152} 2177}
2153 2178
2179void bnx2x_free_mem_bp(struct bnx2x *bp)
2180{
2181 kfree(bp->fp);
2182 kfree(bp->msix_table);
2183 kfree(bp->ilt);
2184}
2185
2186int __devinit bnx2x_alloc_mem_bp(struct bnx2x *bp)
2187{
2188 struct bnx2x_fastpath *fp;
2189 struct msix_entry *tbl;
2190 struct bnx2x_ilt *ilt;
2191
2192 /* fp array */
2193 fp = kzalloc(L2_FP_COUNT(bp->l2_cid_count)*sizeof(*fp), GFP_KERNEL);
2194 if (!fp)
2195 goto alloc_err;
2196 bp->fp = fp;
2197
2198 /* msix table */
2199 tbl = kzalloc((bp->l2_cid_count + 1) * sizeof(*tbl),
2200 GFP_KERNEL);
2201 if (!tbl)
2202 goto alloc_err;
2203 bp->msix_table = tbl;
2204
2205 /* ilt */
2206 ilt = kzalloc(sizeof(*ilt), GFP_KERNEL);
2207 if (!ilt)
2208 goto alloc_err;
2209 bp->ilt = ilt;
2210
2211 return 0;
2212alloc_err:
2213 bnx2x_free_mem_bp(bp);
2214 return -ENOMEM;
2215
2216}
2217
2154/* called with rtnl_lock */ 2218/* called with rtnl_lock */
2155int bnx2x_change_mtu(struct net_device *dev, int new_mtu) 2219int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
2156{ 2220{
@@ -2200,18 +2264,6 @@ void bnx2x_vlan_rx_register(struct net_device *dev,
2200 struct bnx2x *bp = netdev_priv(dev); 2264 struct bnx2x *bp = netdev_priv(dev);
2201 2265
2202 bp->vlgrp = vlgrp; 2266 bp->vlgrp = vlgrp;
2203
2204 /* Set flags according to the required capabilities */
2205 bp->flags &= ~(HW_VLAN_RX_FLAG | HW_VLAN_TX_FLAG);
2206
2207 if (dev->features & NETIF_F_HW_VLAN_TX)
2208 bp->flags |= HW_VLAN_TX_FLAG;
2209
2210 if (dev->features & NETIF_F_HW_VLAN_RX)
2211 bp->flags |= HW_VLAN_RX_FLAG;
2212
2213 if (netif_running(dev))
2214 bnx2x_set_client_config(bp);
2215} 2267}
2216 2268
2217#endif 2269#endif