diff options
author | Yuval Mintz <yuvalmin@broadcom.com> | 2013-06-01 20:06:18 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2013-06-03 00:36:47 -0400 |
commit | 16a5fd9265e757121bb5f1b9ad757836f370e76c (patch) | |
tree | 96353769dbb151fdd948ccd87beb7d0e5afd688f | |
parent | d76a611187c4840a4a45fb3f493f9b63c19df4ca (diff) |
bnx2x: Revise comments and alignment
This patch correct various typos, fix comments conventions and
adds/removes a few comments.
Signed-off-by: Yuval Mintz <yuvalmin@broadcom.com>
Signed-off-by: Ariel Elior <ariele@broadcom.com>
Signed-off-by: Eilon Greenstein <eilong@broadcom.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r-- | drivers/net/ethernet/broadcom/bnx2x/bnx2x.h | 25 | ||||
-rw-r--r-- | drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c | 48 | ||||
-rw-r--r-- | drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h | 4 | ||||
-rw-r--r-- | drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c | 37 | ||||
-rw-r--r-- | drivers/net/ethernet/broadcom/bnx2x/bnx2x_dump.h | 6 | ||||
-rw-r--r-- | drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c | 10 | ||||
-rw-r--r-- | drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c | 81 | ||||
-rw-r--r-- | drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c | 254 | ||||
-rw-r--r-- | drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h | 54 | ||||
-rw-r--r-- | drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c | 29 | ||||
-rw-r--r-- | drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h | 2 | ||||
-rw-r--r-- | drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c | 8 |
12 files changed, 251 insertions, 307 deletions
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h index 0c1104f30de6..8f551cf7bb30 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h | |||
@@ -362,7 +362,7 @@ union db_prod { | |||
362 | /* | 362 | /* |
363 | * Number of required SGEs is the sum of two: | 363 | * Number of required SGEs is the sum of two: |
364 | * 1. Number of possible opened aggregations (next packet for | 364 | * 1. Number of possible opened aggregations (next packet for |
365 | * these aggregations will probably consume SGE immidiatelly) | 365 | * these aggregations will probably consume SGE immediately) |
366 | * 2. Rest of BRB blocks divided by 2 (block will consume new SGE only | 366 | * 2. Rest of BRB blocks divided by 2 (block will consume new SGE only |
367 | * after placement on BD for new TPA aggregation) | 367 | * after placement on BD for new TPA aggregation) |
368 | * | 368 | * |
@@ -486,10 +486,10 @@ struct bnx2x_fastpath { | |||
486 | 486 | ||
487 | struct napi_struct napi; | 487 | struct napi_struct napi; |
488 | union host_hc_status_block status_blk; | 488 | union host_hc_status_block status_blk; |
489 | /* chip independed shortcuts into sb structure */ | 489 | /* chip independent shortcuts into sb structure */ |
490 | __le16 *sb_index_values; | 490 | __le16 *sb_index_values; |
491 | __le16 *sb_running_index; | 491 | __le16 *sb_running_index; |
492 | /* chip independed shortcut into rx_prods_offset memory */ | 492 | /* chip independent shortcut into rx_prods_offset memory */ |
493 | u32 ustorm_rx_prods_offset; | 493 | u32 ustorm_rx_prods_offset; |
494 | 494 | ||
495 | u32 rx_buf_size; | 495 | u32 rx_buf_size; |
@@ -603,7 +603,7 @@ struct bnx2x_fastpath { | |||
603 | * START_BD(splitted) - includes unpaged data segment for GSO | 603 | * START_BD(splitted) - includes unpaged data segment for GSO |
604 | * PARSING_BD - for TSO and CSUM data | 604 | * PARSING_BD - for TSO and CSUM data |
605 | * PARSING_BD2 - for encapsulation data | 605 | * PARSING_BD2 - for encapsulation data |
606 | * Frag BDs - decribes pages for frags | 606 | * Frag BDs - describes pages for frags |
607 | */ | 607 | */ |
608 | #define BDS_PER_TX_PKT 4 | 608 | #define BDS_PER_TX_PKT 4 |
609 | #define MAX_BDS_PER_TX_PKT (MAX_SKB_FRAGS + BDS_PER_TX_PKT) | 609 | #define MAX_BDS_PER_TX_PKT (MAX_SKB_FRAGS + BDS_PER_TX_PKT) |
@@ -886,14 +886,14 @@ struct bnx2x_common { | |||
886 | #define CHIP_IS_E3A0(bp) (CHIP_IS_E3(bp) && \ | 886 | #define CHIP_IS_E3A0(bp) (CHIP_IS_E3(bp) && \ |
887 | (CHIP_REV(bp) == CHIP_REV_Ax)) | 887 | (CHIP_REV(bp) == CHIP_REV_Ax)) |
888 | /* This define is used in two main places: | 888 | /* This define is used in two main places: |
889 | * 1. In the early stages of nic_load, to know if to configrue Parser / Searcher | 889 | * 1. In the early stages of nic_load, to know if to configure Parser / Searcher |
890 | * to nic-only mode or to offload mode. Offload mode is configured if either the | 890 | * to nic-only mode or to offload mode. Offload mode is configured if either the |
891 | * chip is E1x (where MIC_MODE register is not applicable), or if cnic already | 891 | * chip is E1x (where MIC_MODE register is not applicable), or if cnic already |
892 | * registered for this port (which means that the user wants storage services). | 892 | * registered for this port (which means that the user wants storage services). |
893 | * 2. During cnic-related load, to know if offload mode is already configured in | 893 | * 2. During cnic-related load, to know if offload mode is already configured in |
894 | * the HW or needs to be configrued. | 894 | * the HW or needs to be configured. |
895 | * Since the transition from nic-mode to offload-mode in HW causes traffic | 895 | * Since the transition from nic-mode to offload-mode in HW causes traffic |
896 | * coruption, nic-mode is configured only in ports on which storage services | 896 | * corruption, nic-mode is configured only in ports on which storage services |
897 | * where never requested. | 897 | * where never requested. |
898 | */ | 898 | */ |
899 | #define CONFIGURE_NIC_MODE(bp) (!CHIP_IS_E1x(bp) && !CNIC_ENABLED(bp)) | 899 | #define CONFIGURE_NIC_MODE(bp) (!CHIP_IS_E1x(bp) && !CNIC_ENABLED(bp)) |
@@ -994,14 +994,14 @@ extern struct workqueue_struct *bnx2x_wq; | |||
994 | * If the maximum number of FP-SB available is X then: | 994 | * If the maximum number of FP-SB available is X then: |
995 | * a. If CNIC is supported it consumes 1 FP-SB thus the max number of | 995 | * a. If CNIC is supported it consumes 1 FP-SB thus the max number of |
996 | * regular L2 queues is Y=X-1 | 996 | * regular L2 queues is Y=X-1 |
997 | * b. in MF mode the actual number of L2 queues is Y= (X-1/MF_factor) | 997 | * b. In MF mode the actual number of L2 queues is Y= (X-1/MF_factor) |
998 | * c. If the FCoE L2 queue is supported the actual number of L2 queues | 998 | * c. If the FCoE L2 queue is supported the actual number of L2 queues |
999 | * is Y+1 | 999 | * is Y+1 |
1000 | * d. The number of irqs (MSIX vectors) is either Y+1 (one extra for | 1000 | * d. The number of irqs (MSIX vectors) is either Y+1 (one extra for |
1001 | * slow-path interrupts) or Y+2 if CNIC is supported (one additional | 1001 | * slow-path interrupts) or Y+2 if CNIC is supported (one additional |
1002 | * FP interrupt context for the CNIC). | 1002 | * FP interrupt context for the CNIC). |
1003 | * e. The number of HW context (CID count) is always X or X+1 if FCoE | 1003 | * e. The number of HW context (CID count) is always X or X+1 if FCoE |
1004 | * L2 queue is supported. the cid for the FCoE L2 queue is always X. | 1004 | * L2 queue is supported. The cid for the FCoE L2 queue is always X. |
1005 | */ | 1005 | */ |
1006 | 1006 | ||
1007 | /* fast-path interrupt contexts E1x */ | 1007 | /* fast-path interrupt contexts E1x */ |
@@ -1568,7 +1568,7 @@ struct bnx2x { | |||
1568 | struct mutex cnic_mutex; | 1568 | struct mutex cnic_mutex; |
1569 | struct bnx2x_vlan_mac_obj iscsi_l2_mac_obj; | 1569 | struct bnx2x_vlan_mac_obj iscsi_l2_mac_obj; |
1570 | 1570 | ||
1571 | /* Start index of the "special" (CNIC related) L2 cleints */ | 1571 | /* Start index of the "special" (CNIC related) L2 clients */ |
1572 | u8 cnic_base_cl_id; | 1572 | u8 cnic_base_cl_id; |
1573 | 1573 | ||
1574 | int dmae_ready; | 1574 | int dmae_ready; |
@@ -1682,7 +1682,7 @@ struct bnx2x { | |||
1682 | /* operation indication for the sp_rtnl task */ | 1682 | /* operation indication for the sp_rtnl task */ |
1683 | unsigned long sp_rtnl_state; | 1683 | unsigned long sp_rtnl_state; |
1684 | 1684 | ||
1685 | /* DCBX Negotation results */ | 1685 | /* DCBX Negotiation results */ |
1686 | struct dcbx_features dcbx_local_feat; | 1686 | struct dcbx_features dcbx_local_feat; |
1687 | u32 dcbx_error; | 1687 | u32 dcbx_error; |
1688 | 1688 | ||
@@ -2042,7 +2042,8 @@ void bnx2x_igu_clear_sb_gen(struct bnx2x *bp, u8 func, u8 idu_sb_id, | |||
2042 | #define DMAE_LEN32_WR_MAX(bp) (CHIP_IS_E1(bp) ? 0x400 : 0x2000) | 2042 | #define DMAE_LEN32_WR_MAX(bp) (CHIP_IS_E1(bp) ? 0x400 : 0x2000) |
2043 | 2043 | ||
2044 | #define DMAE_COMP_VAL 0x60d0d0ae /* E2 and on - upper bit | 2044 | #define DMAE_COMP_VAL 0x60d0d0ae /* E2 and on - upper bit |
2045 | indicates eror */ | 2045 | * indicates error |
2046 | */ | ||
2046 | 2047 | ||
2047 | #define MAX_DMAE_C_PER_PORT 8 | 2048 | #define MAX_DMAE_C_PER_PORT 8 |
2048 | #define INIT_DMAE_C(bp) (BP_PORT(bp) * MAX_DMAE_C_PER_PORT + \ | 2049 | #define INIT_DMAE_C(bp) (BP_PORT(bp) * MAX_DMAE_C_PER_PORT + \ |
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c index b28aaf170755..ef7fed52891a 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c | |||
@@ -124,7 +124,7 @@ static void bnx2x_shrink_eth_fp(struct bnx2x *bp, int delta) | |||
124 | int i, cos, old_eth_num = BNX2X_NUM_ETH_QUEUES(bp); | 124 | int i, cos, old_eth_num = BNX2X_NUM_ETH_QUEUES(bp); |
125 | 125 | ||
126 | /* Queue pointer cannot be re-set on an fp-basis, as moving pointer | 126 | /* Queue pointer cannot be re-set on an fp-basis, as moving pointer |
127 | * backward along the array could cause memory to be overriden | 127 | * backward along the array could cause memory to be overridden |
128 | */ | 128 | */ |
129 | for (cos = 1; cos < bp->max_cos; cos++) { | 129 | for (cos = 1; cos < bp->max_cos; cos++) { |
130 | for (i = 0; i < old_eth_num - delta; i++) { | 130 | for (i = 0; i < old_eth_num - delta; i++) { |
@@ -258,7 +258,7 @@ int bnx2x_tx_int(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata) | |||
258 | smp_mb(); | 258 | smp_mb(); |
259 | 259 | ||
260 | if (unlikely(netif_tx_queue_stopped(txq))) { | 260 | if (unlikely(netif_tx_queue_stopped(txq))) { |
261 | /* Taking tx_lock() is needed to prevent reenabling the queue | 261 | /* Taking tx_lock() is needed to prevent re-enabling the queue |
262 | * while it's empty. This could have happen if rx_action() gets | 262 | * while it's empty. This could have happen if rx_action() gets |
263 | * suspended in bnx2x_tx_int() after the condition before | 263 | * suspended in bnx2x_tx_int() after the condition before |
264 | * netif_tx_wake_queue(), while tx_action (bnx2x_start_xmit()): | 264 | * netif_tx_wake_queue(), while tx_action (bnx2x_start_xmit()): |
@@ -571,7 +571,7 @@ static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp, | |||
571 | return err; | 571 | return err; |
572 | } | 572 | } |
573 | 573 | ||
574 | /* Unmap the page as we r going to pass it to the stack */ | 574 | /* Unmap the page as we're going to pass it to the stack */ |
575 | dma_unmap_page(&bp->pdev->dev, | 575 | dma_unmap_page(&bp->pdev->dev, |
576 | dma_unmap_addr(&old_rx_pg, mapping), | 576 | dma_unmap_addr(&old_rx_pg, mapping), |
577 | SGE_PAGES, DMA_FROM_DEVICE); | 577 | SGE_PAGES, DMA_FROM_DEVICE); |
@@ -1114,7 +1114,7 @@ static void bnx2x_fill_report_data(struct bnx2x *bp, | |||
1114 | 1114 | ||
1115 | memset(data, 0, sizeof(*data)); | 1115 | memset(data, 0, sizeof(*data)); |
1116 | 1116 | ||
1117 | /* Fill the report data: efective line speed */ | 1117 | /* Fill the report data: effective line speed */ |
1118 | data->line_speed = line_speed; | 1118 | data->line_speed = line_speed; |
1119 | 1119 | ||
1120 | /* Link is down */ | 1120 | /* Link is down */ |
@@ -1157,7 +1157,7 @@ void bnx2x_link_report(struct bnx2x *bp) | |||
1157 | * | 1157 | * |
1158 | * @bp: driver handle | 1158 | * @bp: driver handle |
1159 | * | 1159 | * |
1160 | * None atomic inmlementation. | 1160 | * None atomic implementation. |
1161 | * Should be called under the phy_lock. | 1161 | * Should be called under the phy_lock. |
1162 | */ | 1162 | */ |
1163 | void __bnx2x_link_report(struct bnx2x *bp) | 1163 | void __bnx2x_link_report(struct bnx2x *bp) |
@@ -1300,7 +1300,7 @@ void bnx2x_init_rx_rings(struct bnx2x *bp) | |||
1300 | "mtu %d rx_buf_size %d\n", bp->dev->mtu, fp->rx_buf_size); | 1300 | "mtu %d rx_buf_size %d\n", bp->dev->mtu, fp->rx_buf_size); |
1301 | 1301 | ||
1302 | if (!fp->disable_tpa) { | 1302 | if (!fp->disable_tpa) { |
1303 | /* Fill the per-aggregtion pool */ | 1303 | /* Fill the per-aggregation pool */ |
1304 | for (i = 0; i < MAX_AGG_QS(bp); i++) { | 1304 | for (i = 0; i < MAX_AGG_QS(bp); i++) { |
1305 | struct bnx2x_agg_info *tpa_info = | 1305 | struct bnx2x_agg_info *tpa_info = |
1306 | &fp->tpa_info[i]; | 1306 | &fp->tpa_info[i]; |
@@ -1858,7 +1858,7 @@ void bnx2x_set_num_queues(struct bnx2x *bp) | |||
1858 | * | 1858 | * |
1859 | * If the actual number of Tx queues (for each CoS) is less than 16 then there | 1859 | * If the actual number of Tx queues (for each CoS) is less than 16 then there |
1860 | * will be the holes at the end of each group of 16 ETh L2 indices (0..15, | 1860 | * will be the holes at the end of each group of 16 ETh L2 indices (0..15, |
1861 | * 16..31,...) with indicies that are not coupled with any real Tx queue. | 1861 | * 16..31,...) with indices that are not coupled with any real Tx queue. |
1862 | * | 1862 | * |
1863 | * The proper configuration of skb->queue_mapping is handled by | 1863 | * The proper configuration of skb->queue_mapping is handled by |
1864 | * bnx2x_select_queue() and __skb_tx_hash(). | 1864 | * bnx2x_select_queue() and __skb_tx_hash(). |
@@ -1920,7 +1920,7 @@ static void bnx2x_set_rx_buf_size(struct bnx2x *bp) | |||
1920 | ETH_OVREHEAD + | 1920 | ETH_OVREHEAD + |
1921 | mtu + | 1921 | mtu + |
1922 | BNX2X_FW_RX_ALIGN_END; | 1922 | BNX2X_FW_RX_ALIGN_END; |
1923 | /* Note : rx_buf_size doesnt take into account NET_SKB_PAD */ | 1923 | /* Note : rx_buf_size doesn't take into account NET_SKB_PAD */ |
1924 | if (fp->rx_buf_size + NET_SKB_PAD <= PAGE_SIZE) | 1924 | if (fp->rx_buf_size + NET_SKB_PAD <= PAGE_SIZE) |
1925 | fp->rx_frag_size = fp->rx_buf_size + NET_SKB_PAD; | 1925 | fp->rx_frag_size = fp->rx_buf_size + NET_SKB_PAD; |
1926 | else | 1926 | else |
@@ -1933,7 +1933,7 @@ static int bnx2x_init_rss_pf(struct bnx2x *bp) | |||
1933 | int i; | 1933 | int i; |
1934 | u8 num_eth_queues = BNX2X_NUM_ETH_QUEUES(bp); | 1934 | u8 num_eth_queues = BNX2X_NUM_ETH_QUEUES(bp); |
1935 | 1935 | ||
1936 | /* Prepare the initial contents fo the indirection table if RSS is | 1936 | /* Prepare the initial contents for the indirection table if RSS is |
1937 | * enabled | 1937 | * enabled |
1938 | */ | 1938 | */ |
1939 | for (i = 0; i < sizeof(bp->rss_conf_obj.ind_table); i++) | 1939 | for (i = 0; i < sizeof(bp->rss_conf_obj.ind_table); i++) |
@@ -2011,7 +2011,7 @@ static int bnx2x_init_hw(struct bnx2x *bp, u32 load_code) | |||
2011 | 2011 | ||
2012 | /* | 2012 | /* |
2013 | * Cleans the object that have internal lists without sending | 2013 | * Cleans the object that have internal lists without sending |
2014 | * ramrods. Should be run when interrutps are disabled. | 2014 | * ramrods. Should be run when interrupts are disabled. |
2015 | */ | 2015 | */ |
2016 | void bnx2x_squeeze_objects(struct bnx2x *bp) | 2016 | void bnx2x_squeeze_objects(struct bnx2x *bp) |
2017 | { | 2017 | { |
@@ -2347,8 +2347,7 @@ static void bnx2x_bz_fp(struct bnx2x *bp, int index) | |||
2347 | fp->txdata_ptr[cos] = &bp->bnx2x_txq[cos * | 2347 | fp->txdata_ptr[cos] = &bp->bnx2x_txq[cos * |
2348 | BNX2X_NUM_ETH_QUEUES(bp) + index]; | 2348 | BNX2X_NUM_ETH_QUEUES(bp) + index]; |
2349 | 2349 | ||
2350 | /* | 2350 | /* set the tpa flag for each queue. The tpa flag determines the queue |
2351 | * set the tpa flag for each queue. The tpa flag determines the queue | ||
2352 | * minimal size so it must be set prior to queue memory allocation | 2351 | * minimal size so it must be set prior to queue memory allocation |
2353 | */ | 2352 | */ |
2354 | fp->disable_tpa = !(bp->flags & TPA_ENABLE_FLAG || | 2353 | fp->disable_tpa = !(bp->flags & TPA_ENABLE_FLAG || |
@@ -2473,6 +2472,7 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode) | |||
2473 | 2472 | ||
2474 | bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD; | 2473 | bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD; |
2475 | 2474 | ||
2475 | /* zero the structure w/o any lock, before SP handler is initialized */ | ||
2476 | memset(&bp->last_reported_link, 0, sizeof(bp->last_reported_link)); | 2476 | memset(&bp->last_reported_link, 0, sizeof(bp->last_reported_link)); |
2477 | __set_bit(BNX2X_LINK_REPORT_LINK_DOWN, | 2477 | __set_bit(BNX2X_LINK_REPORT_LINK_DOWN, |
2478 | &bp->last_reported_link.link_report_flags); | 2478 | &bp->last_reported_link.link_report_flags); |
@@ -2537,8 +2537,8 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode) | |||
2537 | } | 2537 | } |
2538 | 2538 | ||
2539 | /* configure multi cos mappings in kernel. | 2539 | /* configure multi cos mappings in kernel. |
2540 | * this configuration may be overriden by a multi class queue discipline | 2540 | * this configuration may be overridden by a multi class queue |
2541 | * or by a dcbx negotiation result. | 2541 | * discipline or by a dcbx negotiation result. |
2542 | */ | 2542 | */ |
2543 | bnx2x_setup_tc(bp->dev, bp->max_cos); | 2543 | bnx2x_setup_tc(bp->dev, bp->max_cos); |
2544 | 2544 | ||
@@ -2697,7 +2697,7 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode) | |||
2697 | /* Start the Tx */ | 2697 | /* Start the Tx */ |
2698 | switch (load_mode) { | 2698 | switch (load_mode) { |
2699 | case LOAD_NORMAL: | 2699 | case LOAD_NORMAL: |
2700 | /* Tx queue should be only reenabled */ | 2700 | /* Tx queue should be only re-enabled */ |
2701 | netif_tx_wake_all_queues(bp->dev); | 2701 | netif_tx_wake_all_queues(bp->dev); |
2702 | break; | 2702 | break; |
2703 | 2703 | ||
@@ -2842,7 +2842,7 @@ int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode, bool keep_link) | |||
2842 | } | 2842 | } |
2843 | 2843 | ||
2844 | /* Nothing to do during unload if previous bnx2x_nic_load() | 2844 | /* Nothing to do during unload if previous bnx2x_nic_load() |
2845 | * have not completed succesfully - all resourses are released. | 2845 | * have not completed successfully - all resources are released. |
2846 | * | 2846 | * |
2847 | * we can get here only after unsuccessful ndo_* callback, during which | 2847 | * we can get here only after unsuccessful ndo_* callback, during which |
2848 | * dev->IFF_UP flag is still on. | 2848 | * dev->IFF_UP flag is still on. |
@@ -2891,10 +2891,9 @@ int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode, bool keep_link) | |||
2891 | /* Send the UNLOAD_REQUEST to the MCP */ | 2891 | /* Send the UNLOAD_REQUEST to the MCP */ |
2892 | bnx2x_send_unload_req(bp, unload_mode); | 2892 | bnx2x_send_unload_req(bp, unload_mode); |
2893 | 2893 | ||
2894 | /* | 2894 | /* Prevent transactions to host from the functions on the |
2895 | * Prevent transactions to host from the functions on the | ||
2896 | * engine that doesn't reset global blocks in case of global | 2895 | * engine that doesn't reset global blocks in case of global |
2897 | * attention once gloabl blocks are reset and gates are opened | 2896 | * attention once global blocks are reset and gates are opened |
2898 | * (the engine which leader will perform the recovery | 2897 | * (the engine which leader will perform the recovery |
2899 | * last). | 2898 | * last). |
2900 | */ | 2899 | */ |
@@ -2915,7 +2914,7 @@ int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode, bool keep_link) | |||
2915 | } | 2914 | } |
2916 | 2915 | ||
2917 | /* | 2916 | /* |
2918 | * At this stage no more interrupts will arrive so we may safly clean | 2917 | * At this stage no more interrupts will arrive so we may safely clean |
2919 | * the queueable objects here in case they failed to get cleaned so far. | 2918 | * the queueable objects here in case they failed to get cleaned so far. |
2920 | */ | 2919 | */ |
2921 | if (IS_PF(bp)) | 2920 | if (IS_PF(bp)) |
@@ -3587,7 +3586,7 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
3587 | DP(NETIF_MSG_TX_QUEUED, "indices: txq %d, fp %d, txdata %d\n", | 3586 | DP(NETIF_MSG_TX_QUEUED, "indices: txq %d, fp %d, txdata %d\n", |
3588 | txq_index, fp_index, txdata_index); */ | 3587 | txq_index, fp_index, txdata_index); */ |
3589 | 3588 | ||
3590 | /* enable this debug print to view the tranmission details | 3589 | /* enable this debug print to view the transmission details |
3591 | DP(NETIF_MSG_TX_QUEUED, | 3590 | DP(NETIF_MSG_TX_QUEUED, |
3592 | "transmitting packet cid %d fp index %d txdata_index %d tx_data ptr %p fp pointer %p\n", | 3591 | "transmitting packet cid %d fp index %d txdata_index %d tx_data ptr %p fp pointer %p\n", |
3593 | txdata->cid, fp_index, txdata_index, txdata, fp); */ | 3592 | txdata->cid, fp_index, txdata_index, txdata, fp); */ |
@@ -3970,7 +3969,7 @@ int bnx2x_setup_tc(struct net_device *dev, u8 num_tc) | |||
3970 | /* setup tc must be called under rtnl lock */ | 3969 | /* setup tc must be called under rtnl lock */ |
3971 | ASSERT_RTNL(); | 3970 | ASSERT_RTNL(); |
3972 | 3971 | ||
3973 | /* no traffic classes requested. aborting */ | 3972 | /* no traffic classes requested. Aborting */ |
3974 | if (!num_tc) { | 3973 | if (!num_tc) { |
3975 | netdev_reset_tc(dev); | 3974 | netdev_reset_tc(dev); |
3976 | return 0; | 3975 | return 0; |
@@ -3997,8 +3996,7 @@ int bnx2x_setup_tc(struct net_device *dev, u8 num_tc) | |||
3997 | prio, bp->prio_to_cos[prio]); | 3996 | prio, bp->prio_to_cos[prio]); |
3998 | } | 3997 | } |
3999 | 3998 | ||
4000 | 3999 | /* Use this configuration to differentiate tc0 from other COSes | |
4001 | /* Use this configuration to diffrentiate tc0 from other COSes | ||
4002 | This can be used for ets or pfc, and save the effort of setting | 4000 | This can be used for ets or pfc, and save the effort of setting |
4003 | up a multio class queue disc or negotiating DCBX with a switch | 4001 | up a multio class queue disc or negotiating DCBX with a switch |
4004 | netdev_set_prio_tc_map(dev, 0, 0); | 4002 | netdev_set_prio_tc_map(dev, 0, 0); |
@@ -4629,7 +4627,7 @@ int bnx2x_set_features(struct net_device *dev, netdev_features_t features) | |||
4629 | 4627 | ||
4630 | changes = flags ^ bp->flags; | 4628 | changes = flags ^ bp->flags; |
4631 | 4629 | ||
4632 | /* if GRO is changed while LRO is enabled, dont force a reload */ | 4630 | /* if GRO is changed while LRO is enabled, don't force a reload */ |
4633 | if ((changes & GRO_ENABLE_FLAG) && (flags & TPA_ENABLE_FLAG)) | 4631 | if ((changes & GRO_ENABLE_FLAG) && (flags & TPA_ENABLE_FLAG)) |
4634 | changes &= ~GRO_ENABLE_FLAG; | 4632 | changes &= ~GRO_ENABLE_FLAG; |
4635 | 4633 | ||
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h index f62ffc11ac54..4a643d7b9b22 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h | |||
@@ -1331,8 +1331,8 @@ static inline bool bnx2x_mtu_allows_gro(int mtu) | |||
1331 | int fpp = SGE_PAGE_SIZE / (mtu - ETH_MAX_TPA_HEADER_SIZE); | 1331 | int fpp = SGE_PAGE_SIZE / (mtu - ETH_MAX_TPA_HEADER_SIZE); |
1332 | 1332 | ||
1333 | /* | 1333 | /* |
1334 | * 1. number of frags should not grow above MAX_SKB_FRAGS | 1334 | * 1. Number of frags should not grow above MAX_SKB_FRAGS |
1335 | * 2. frag must fit the page | 1335 | * 2. Frag must fit the page |
1336 | */ | 1336 | */ |
1337 | return mtu <= SGE_PAGE_SIZE && (U_ETH_SGL_SIZE * fpp) <= MAX_SKB_FRAGS; | 1337 | return mtu <= SGE_PAGE_SIZE && (U_ETH_SGL_SIZE * fpp) <= MAX_SKB_FRAGS; |
1338 | } | 1338 | } |
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c index 2689f28010fd..210614e37b9b 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c | |||
@@ -687,7 +687,7 @@ static inline void bnx2x_dcbx_update_tc_mapping(struct bnx2x *bp) | |||
687 | } | 687 | } |
688 | 688 | ||
689 | /* setup tc must be called under rtnl lock, but we can't take it here | 689 | /* setup tc must be called under rtnl lock, but we can't take it here |
690 | * as we are handling an attetntion on a work queue which must be | 690 | * as we are handling an attention on a work queue which must be |
691 | * flushed at some rtnl-locked contexts (e.g. if down) | 691 | * flushed at some rtnl-locked contexts (e.g. if down) |
692 | */ | 692 | */ |
693 | if (!test_and_set_bit(BNX2X_SP_RTNL_SETUP_TC, &bp->sp_rtnl_state)) | 693 | if (!test_and_set_bit(BNX2X_SP_RTNL_SETUP_TC, &bp->sp_rtnl_state)) |
@@ -707,7 +707,7 @@ void bnx2x_dcbx_set_params(struct bnx2x *bp, u32 state) | |||
707 | */ | 707 | */ |
708 | bnx2x_dcbnl_update_applist(bp, true); | 708 | bnx2x_dcbnl_update_applist(bp, true); |
709 | 709 | ||
710 | /* Read rmeote mib if dcbx is in the FW */ | 710 | /* Read remote mib if dcbx is in the FW */ |
711 | if (bnx2x_dcbx_read_shmem_remote_mib(bp)) | 711 | if (bnx2x_dcbx_read_shmem_remote_mib(bp)) |
712 | return; | 712 | return; |
713 | #endif | 713 | #endif |
@@ -738,7 +738,7 @@ void bnx2x_dcbx_set_params(struct bnx2x *bp, u32 state) | |||
738 | bnx2x_dcbx_update_tc_mapping(bp); | 738 | bnx2x_dcbx_update_tc_mapping(bp); |
739 | 739 | ||
740 | /* | 740 | /* |
741 | * allow other funtions to update their netdevices | 741 | * allow other functions to update their netdevices |
742 | * accordingly | 742 | * accordingly |
743 | */ | 743 | */ |
744 | if (IS_MF(bp)) | 744 | if (IS_MF(bp)) |
@@ -860,7 +860,7 @@ static void bnx2x_dcbx_admin_mib_updated_params(struct bnx2x *bp, | |||
860 | i, DCBX_PRI_PG_GET(af->ets.pri_pg_tbl, i)); | 860 | i, DCBX_PRI_PG_GET(af->ets.pri_pg_tbl, i)); |
861 | } | 861 | } |
862 | 862 | ||
863 | /*For IEEE admin_recommendation_bw_precentage | 863 | /*For IEEE admin_recommendation_bw_percentage |
864 | *For IEEE admin_recommendation_ets_pg */ | 864 | *For IEEE admin_recommendation_ets_pg */ |
865 | af->pfc.pri_en_bitmap = (u8)dp->admin_pfc_bitmap; | 865 | af->pfc.pri_en_bitmap = (u8)dp->admin_pfc_bitmap; |
866 | for (i = 0; i < DCBX_CONFIG_MAX_APP_PROTOCOL; i++) { | 866 | for (i = 0; i < DCBX_CONFIG_MAX_APP_PROTOCOL; i++) { |
@@ -1070,7 +1070,7 @@ static void bnx2x_dcbx_get_num_pg_traf_type(struct bnx2x *bp, | |||
1070 | bool pg_found = false; | 1070 | bool pg_found = false; |
1071 | u32 i, traf_type, add_traf_type, add_pg; | 1071 | u32 i, traf_type, add_traf_type, add_pg; |
1072 | u32 *ttp = bp->dcbx_port_params.app.traffic_type_priority; | 1072 | u32 *ttp = bp->dcbx_port_params.app.traffic_type_priority; |
1073 | struct pg_entry_help_data *data = help_data->data; /*shotcut*/ | 1073 | struct pg_entry_help_data *data = help_data->data; /*shortcut*/ |
1074 | 1074 | ||
1075 | /* Set to invalid */ | 1075 | /* Set to invalid */ |
1076 | for (i = 0; i < LLFC_DRIVER_TRAFFIC_TYPE_MAX; i++) | 1076 | for (i = 0; i < LLFC_DRIVER_TRAFFIC_TYPE_MAX; i++) |
@@ -1166,7 +1166,8 @@ static void bnx2x_dcbx_separate_pauseable_from_non(struct bnx2x *bp, | |||
1166 | DCBX_PG_BW_GET(ets->pg_bw_tbl, pg_entry)); | 1166 | DCBX_PG_BW_GET(ets->pg_bw_tbl, pg_entry)); |
1167 | else | 1167 | else |
1168 | /* If we join a group and one is strict | 1168 | /* If we join a group and one is strict |
1169 | * than the bw rulls */ | 1169 | * than the bw rules |
1170 | */ | ||
1170 | cos_data->data[entry].strict = | 1171 | cos_data->data[entry].strict = |
1171 | BNX2X_DCBX_STRICT_COS_HIGHEST; | 1172 | BNX2X_DCBX_STRICT_COS_HIGHEST; |
1172 | } | 1173 | } |
@@ -1277,7 +1278,7 @@ static void bnx2x_dcbx_2cos_limit_cee_single_pg_to_cos_params(struct bnx2x *bp, | |||
1277 | } else { | 1278 | } else { |
1278 | /* If there are only pauseable priorities or | 1279 | /* If there are only pauseable priorities or |
1279 | * only non-pauseable,* the lower priorities go | 1280 | * only non-pauseable,* the lower priorities go |
1280 | * to the first queue and the higherpriorities go | 1281 | * to the first queue and the higher priorities go |
1281 | * to the second queue. | 1282 | * to the second queue. |
1282 | */ | 1283 | */ |
1283 | cos_data->data[0].pausable = | 1284 | cos_data->data[0].pausable = |
@@ -1477,7 +1478,7 @@ static void bnx2x_dcbx_2cos_limit_cee_three_pg_to_cos_params( | |||
1477 | * queue and one priority goes to the second queue. | 1478 | * queue and one priority goes to the second queue. |
1478 | * | 1479 | * |
1479 | * We will join this two cases: | 1480 | * We will join this two cases: |
1480 | * if one is BW limited it will go to the secoend queue | 1481 | * if one is BW limited it will go to the second queue |
1481 | * otherwise the last priority will get it | 1482 | * otherwise the last priority will get it |
1482 | */ | 1483 | */ |
1483 | 1484 | ||
@@ -1497,7 +1498,8 @@ static void bnx2x_dcbx_2cos_limit_cee_three_pg_to_cos_params( | |||
1497 | false == b_found_strict) | 1498 | false == b_found_strict) |
1498 | /* last entry will be handled separately | 1499 | /* last entry will be handled separately |
1499 | * If no priority is strict than last | 1500 | * If no priority is strict than last |
1500 | * enty goes to last queue.*/ | 1501 | * entry goes to last queue. |
1502 | */ | ||
1501 | entry = 1; | 1503 | entry = 1; |
1502 | cos_data->data[entry].pri_join_mask |= | 1504 | cos_data->data[entry].pri_join_mask |= |
1503 | pri_tested; | 1505 | pri_tested; |
@@ -1509,7 +1511,8 @@ static void bnx2x_dcbx_2cos_limit_cee_three_pg_to_cos_params( | |||
1509 | b_found_strict = true; | 1511 | b_found_strict = true; |
1510 | cos_data->data[1].pri_join_mask |= pri_tested; | 1512 | cos_data->data[1].pri_join_mask |= pri_tested; |
1511 | /* If we join a group and one is strict | 1513 | /* If we join a group and one is strict |
1512 | * than the bw rulls */ | 1514 | * than the bw rules |
1515 | */ | ||
1513 | cos_data->data[1].strict = | 1516 | cos_data->data[1].strict = |
1514 | BNX2X_DCBX_STRICT_COS_HIGHEST; | 1517 | BNX2X_DCBX_STRICT_COS_HIGHEST; |
1515 | } | 1518 | } |
@@ -1838,7 +1841,7 @@ static void bnx2x_dcbx_fw_struct(struct bnx2x *bp, | |||
1838 | 1841 | ||
1839 | void bnx2x_dcbx_pmf_update(struct bnx2x *bp) | 1842 | void bnx2x_dcbx_pmf_update(struct bnx2x *bp) |
1840 | { | 1843 | { |
1841 | /* if we need to syncronize DCBX result from prev PMF | 1844 | /* if we need to synchronize DCBX result from prev PMF |
1842 | * read it from shmem and update bp and netdev accordingly | 1845 | * read it from shmem and update bp and netdev accordingly |
1843 | */ | 1846 | */ |
1844 | if (SHMEM2_HAS(bp, drv_flags) && | 1847 | if (SHMEM2_HAS(bp, drv_flags) && |
@@ -1932,14 +1935,14 @@ static void bnx2x_dcbnl_set_pg_tccfg_tx(struct net_device *netdev, int prio, | |||
1932 | return; | 1935 | return; |
1933 | 1936 | ||
1934 | /** | 1937 | /** |
1935 | * bw_pct ingnored - band-width percentage devision between user | 1938 | * bw_pct ignored - band-width percentage devision between user |
1936 | * priorities within the same group is not | 1939 | * priorities within the same group is not |
1937 | * standard and hence not supported | 1940 | * standard and hence not supported |
1938 | * | 1941 | * |
1939 | * prio_type igonred - priority levels within the same group are not | 1942 | * prio_type ignored - priority levels within the same group are not |
1940 | * standard and hence are not supported. According | 1943 | * standard and hence are not supported. According |
1941 | * to the standard pgid 15 is dedicated to strict | 1944 | * to the standard pgid 15 is dedicated to strict |
1942 | * prioirty traffic (on the port level). | 1945 | * priority traffic (on the port level). |
1943 | * | 1946 | * |
1944 | * up_map ignored | 1947 | * up_map ignored |
1945 | */ | 1948 | */ |
@@ -1984,14 +1987,14 @@ static void bnx2x_dcbnl_get_pg_tccfg_tx(struct net_device *netdev, int prio, | |||
1984 | DP(BNX2X_MSG_DCB, "prio = %d\n", prio); | 1987 | DP(BNX2X_MSG_DCB, "prio = %d\n", prio); |
1985 | 1988 | ||
1986 | /** | 1989 | /** |
1987 | * bw_pct ingnored - band-width percentage devision between user | 1990 | * bw_pct ignored - band-width percentage devision between user |
1988 | * priorities within the same group is not | 1991 | * priorities within the same group is not |
1989 | * standard and hence not supported | 1992 | * standard and hence not supported |
1990 | * | 1993 | * |
1991 | * prio_type igonred - priority levels within the same group are not | 1994 | * prio_type ignored - priority levels within the same group are not |
1992 | * standard and hence are not supported. According | 1995 | * standard and hence are not supported. According |
1993 | * to the standard pgid 15 is dedicated to strict | 1996 | * to the standard pgid 15 is dedicated to strict |
1994 | * prioirty traffic (on the port level). | 1997 | * priority traffic (on the port level). |
1995 | * | 1998 | * |
1996 | * up_map ignored | 1999 | * up_map ignored |
1997 | */ | 2000 | */ |
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dump.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dump.h index 219dd84d4bf5..12eb4baee9f6 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dump.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dump.h | |||
@@ -13,12 +13,6 @@ | |||
13 | * consent. | 13 | * consent. |
14 | */ | 14 | */ |
15 | 15 | ||
16 | |||
17 | /* This struct holds a signature to ensure the dump returned from the driver | ||
18 | * match the meta data file inserted to grc_dump.tcl | ||
19 | * The signature is time stamp, diag version and grc_dump version | ||
20 | */ | ||
21 | |||
22 | #ifndef BNX2X_DUMP_H | 16 | #ifndef BNX2X_DUMP_H |
23 | #define BNX2X_DUMP_H | 17 | #define BNX2X_DUMP_H |
24 | 18 | ||
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c index 91978eb166ea..72b1b59294b4 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c | |||
@@ -320,7 +320,7 @@ static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) | |||
320 | 320 | ||
321 | speed = ethtool_cmd_speed(cmd); | 321 | speed = ethtool_cmd_speed(cmd); |
322 | 322 | ||
323 | /* If recieved a request for an unknown duplex, assume full*/ | 323 | /* If received a request for an unknown duplex, assume full*/ |
324 | if (cmd->duplex == DUPLEX_UNKNOWN) | 324 | if (cmd->duplex == DUPLEX_UNKNOWN) |
325 | cmd->duplex = DUPLEX_FULL; | 325 | cmd->duplex = DUPLEX_FULL; |
326 | 326 | ||
@@ -849,7 +849,7 @@ static int __bnx2x_get_preset_regs(struct bnx2x *bp, u32 *p, u32 preset) | |||
849 | 849 | ||
850 | /* Paged registers are supported in E2 & E3 only */ | 850 | /* Paged registers are supported in E2 & E3 only */ |
851 | if (CHIP_IS_E2(bp) || CHIP_IS_E3(bp)) { | 851 | if (CHIP_IS_E2(bp) || CHIP_IS_E3(bp)) { |
852 | /* Read "paged" registes */ | 852 | /* Read "paged" registers */ |
853 | bnx2x_read_pages_regs(bp, p, preset); | 853 | bnx2x_read_pages_regs(bp, p, preset); |
854 | } | 854 | } |
855 | 855 | ||
@@ -1154,8 +1154,8 @@ static int bnx2x_get_eeprom_len(struct net_device *dev) | |||
1154 | return bp->common.flash_size; | 1154 | return bp->common.flash_size; |
1155 | } | 1155 | } |
1156 | 1156 | ||
1157 | /* Per pf misc lock must be aquired before the per port mcp lock. Otherwise, had | 1157 | /* Per pf misc lock must be acquired before the per port mcp lock. Otherwise, |
1158 | * we done things the other way around, if two pfs from the same port would | 1158 | * had we done things the other way around, if two pfs from the same port would |
1159 | * attempt to access nvram at the same time, we could run into a scenario such | 1159 | * attempt to access nvram at the same time, we could run into a scenario such |
1160 | * as: | 1160 | * as: |
1161 | * pf A takes the port lock. | 1161 | * pf A takes the port lock. |
@@ -2070,7 +2070,7 @@ static int bnx2x_set_eee(struct net_device *dev, struct ethtool_eee *edata) | |||
2070 | EEE_MODE_OVERRIDE_NVRAM | | 2070 | EEE_MODE_OVERRIDE_NVRAM | |
2071 | EEE_MODE_OUTPUT_TIME; | 2071 | EEE_MODE_OUTPUT_TIME; |
2072 | 2072 | ||
2073 | /* Restart link to propogate changes */ | 2073 | /* Restart link to propagate changes */ |
2074 | if (netif_running(dev)) { | 2074 | if (netif_running(dev)) { |
2075 | bnx2x_stats_handle(bp, STATS_EVENT_STOP); | 2075 | bnx2x_stats_handle(bp, STATS_EVENT_STOP); |
2076 | bnx2x_force_link_reset(bp); | 2076 | bnx2x_force_link_reset(bp); |
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c index 5081990daea0..82f3696437c2 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c | |||
@@ -808,8 +808,8 @@ static void bnx2x_hc_int_disable(struct bnx2x *bp) | |||
808 | u32 val = REG_RD(bp, addr); | 808 | u32 val = REG_RD(bp, addr); |
809 | 809 | ||
810 | /* in E1 we must use only PCI configuration space to disable | 810 | /* in E1 we must use only PCI configuration space to disable |
811 | * MSI/MSIX capablility | 811 | * MSI/MSIX capability |
812 | * It's forbitten to disable IGU_PF_CONF_MSI_MSIX_EN in HC block | 812 | * It's forbidden to disable IGU_PF_CONF_MSI_MSIX_EN in HC block |
813 | */ | 813 | */ |
814 | if (CHIP_IS_E1(bp)) { | 814 | if (CHIP_IS_E1(bp)) { |
815 | /* Since IGU_PF_CONF_MSI_MSIX_EN still always on | 815 | /* Since IGU_PF_CONF_MSI_MSIX_EN still always on |
@@ -1012,7 +1012,7 @@ void bnx2x_panic_dump(struct bnx2x *bp, bool disable_int) | |||
1012 | hc_sm_p[j].timer_value); | 1012 | hc_sm_p[j].timer_value); |
1013 | } | 1013 | } |
1014 | 1014 | ||
1015 | /* Indecies data */ | 1015 | /* Indices data */ |
1016 | for (j = 0; j < loop; j++) { | 1016 | for (j = 0; j < loop; j++) { |
1017 | pr_cont("INDEX[%d] flags (0x%x) timeout (0x%x)\n", j, | 1017 | pr_cont("INDEX[%d] flags (0x%x) timeout (0x%x)\n", j, |
1018 | hc_index_p[j].flags, | 1018 | hc_index_p[j].flags, |
@@ -1107,7 +1107,7 @@ void bnx2x_panic_dump(struct bnx2x *bp, bool disable_int) | |||
1107 | * bnx2x_pf_flr_clnup() is called during nic_load in the per function HW | 1107 | * bnx2x_pf_flr_clnup() is called during nic_load in the per function HW |
1108 | * initialization. | 1108 | * initialization. |
1109 | */ | 1109 | */ |
1110 | #define FLR_WAIT_USEC 10000 /* 10 miliseconds */ | 1110 | #define FLR_WAIT_USEC 10000 /* 10 milliseconds */ |
1111 | #define FLR_WAIT_INTERVAL 50 /* usec */ | 1111 | #define FLR_WAIT_INTERVAL 50 /* usec */ |
1112 | #define FLR_POLL_CNT (FLR_WAIT_USEC/FLR_WAIT_INTERVAL) /* 200 */ | 1112 | #define FLR_POLL_CNT (FLR_WAIT_USEC/FLR_WAIT_INTERVAL) /* 200 */ |
1113 | 1113 | ||
@@ -1327,7 +1327,7 @@ int bnx2x_send_final_clnup(struct bnx2x *bp, u8 clnup_func, u32 poll_cnt) | |||
1327 | bnx2x_panic(); | 1327 | bnx2x_panic(); |
1328 | return 1; | 1328 | return 1; |
1329 | } | 1329 | } |
1330 | /* Zero completion for nxt FLR */ | 1330 | /* Zero completion for next FLR */ |
1331 | REG_WR(bp, comp_addr, 0); | 1331 | REG_WR(bp, comp_addr, 0); |
1332 | 1332 | ||
1333 | return ret; | 1333 | return ret; |
@@ -2343,7 +2343,7 @@ u8 bnx2x_link_test(struct bnx2x *bp, u8 is_serdes) | |||
2343 | sum of vn_min_rates. | 2343 | sum of vn_min_rates. |
2344 | or | 2344 | or |
2345 | 0 - if all the min_rates are 0. | 2345 | 0 - if all the min_rates are 0. |
2346 | In the later case fainess algorithm should be deactivated. | 2346 | In the later case fairness algorithm should be deactivated. |
2347 | If not all min_rates are zero then those that are zeroes will be set to 1. | 2347 | If not all min_rates are zero then those that are zeroes will be set to 1. |
2348 | */ | 2348 | */ |
2349 | static void bnx2x_calc_vn_min(struct bnx2x *bp, | 2349 | static void bnx2x_calc_vn_min(struct bnx2x *bp, |
@@ -2423,7 +2423,7 @@ void bnx2x_read_mf_cfg(struct bnx2x *bp) | |||
2423 | int vn, n = (CHIP_MODE_IS_4_PORT(bp) ? 2 : 1); | 2423 | int vn, n = (CHIP_MODE_IS_4_PORT(bp) ? 2 : 1); |
2424 | 2424 | ||
2425 | if (BP_NOMCP(bp)) | 2425 | if (BP_NOMCP(bp)) |
2426 | return; /* what should be the default bvalue in this case */ | 2426 | return; /* what should be the default value in this case */ |
2427 | 2427 | ||
2428 | /* For 2 port configuration the absolute function number formula | 2428 | /* For 2 port configuration the absolute function number formula |
2429 | * is: | 2429 | * is: |
@@ -2922,7 +2922,7 @@ void bnx2x_func_init(struct bnx2x *bp, struct bnx2x_func_init_params *p) | |||
2922 | } | 2922 | } |
2923 | 2923 | ||
2924 | /** | 2924 | /** |
2925 | * bnx2x_get_tx_only_flags - Return common flags | 2925 | * bnx2x_get_common_flags - Return common flags |
2926 | * | 2926 | * |
2927 | * @bp device handle | 2927 | * @bp device handle |
2928 | * @fp queue handle | 2928 | * @fp queue handle |
@@ -3110,7 +3110,7 @@ static void bnx2x_pf_tx_q_prep(struct bnx2x *bp, | |||
3110 | txq_init->fw_sb_id = fp->fw_sb_id; | 3110 | txq_init->fw_sb_id = fp->fw_sb_id; |
3111 | 3111 | ||
3112 | /* | 3112 | /* |
3113 | * set the tss leading client id for TX classfication == | 3113 | * set the tss leading client id for TX classification == |
3114 | * leading RSS client id | 3114 | * leading RSS client id |
3115 | */ | 3115 | */ |
3116 | txq_init->tss_leading_cl_id = bnx2x_fp(bp, 0, cl_id); | 3116 | txq_init->tss_leading_cl_id = bnx2x_fp(bp, 0, cl_id); |
@@ -3197,7 +3197,7 @@ static void bnx2x_e1h_enable(struct bnx2x *bp) | |||
3197 | 3197 | ||
3198 | REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1); | 3198 | REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1); |
3199 | 3199 | ||
3200 | /* Tx queue should be only reenabled */ | 3200 | /* Tx queue should be only re-enabled */ |
3201 | netif_tx_wake_all_queues(bp->dev); | 3201 | netif_tx_wake_all_queues(bp->dev); |
3202 | 3202 | ||
3203 | /* | 3203 | /* |
@@ -3591,7 +3591,7 @@ int bnx2x_sp_post(struct bnx2x *bp, int command, int cid, | |||
3591 | /* | 3591 | /* |
3592 | * It's ok if the actual decrement is issued towards the memory | 3592 | * It's ok if the actual decrement is issued towards the memory |
3593 | * somewhere between the spin_lock and spin_unlock. Thus no | 3593 | * somewhere between the spin_lock and spin_unlock. Thus no |
3594 | * more explict memory barrier is needed. | 3594 | * more explicit memory barrier is needed. |
3595 | */ | 3595 | */ |
3596 | if (common) | 3596 | if (common) |
3597 | atomic_dec(&bp->eq_spq_left); | 3597 | atomic_dec(&bp->eq_spq_left); |
@@ -3660,7 +3660,7 @@ static u16 bnx2x_update_dsb_idx(struct bnx2x *bp) | |||
3660 | rc |= BNX2X_DEF_SB_IDX; | 3660 | rc |= BNX2X_DEF_SB_IDX; |
3661 | } | 3661 | } |
3662 | 3662 | ||
3663 | /* Do not reorder: indecies reading should complete before handling */ | 3663 | /* Do not reorder: indices reading should complete before handling */ |
3664 | barrier(); | 3664 | barrier(); |
3665 | return rc; | 3665 | return rc; |
3666 | } | 3666 | } |
@@ -3809,8 +3809,7 @@ static void bnx2x_fan_failure(struct bnx2x *bp) | |||
3809 | netdev_err(bp->dev, "Fan Failure on Network Controller has caused the driver to shutdown the card to prevent permanent damage.\n" | 3809 | netdev_err(bp->dev, "Fan Failure on Network Controller has caused the driver to shutdown the card to prevent permanent damage.\n" |
3810 | "Please contact OEM Support for assistance\n"); | 3810 | "Please contact OEM Support for assistance\n"); |
3811 | 3811 | ||
3812 | /* | 3812 | /* Schedule device reset (unload) |
3813 | * Schedule device reset (unload) | ||
3814 | * This is due to some boards consuming sufficient power when driver is | 3813 | * This is due to some boards consuming sufficient power when driver is |
3815 | * up to overheat if fan fails. | 3814 | * up to overheat if fan fails. |
3816 | */ | 3815 | */ |
@@ -4987,7 +4986,7 @@ static void bnx2x_eq_int(struct bnx2x *bp) | |||
4987 | hw_cons = le16_to_cpu(*bp->eq_cons_sb); | 4986 | hw_cons = le16_to_cpu(*bp->eq_cons_sb); |
4988 | 4987 | ||
4989 | /* The hw_cos range is 1-255, 257 - the sw_cons range is 0-254, 256. | 4988 | /* The hw_cos range is 1-255, 257 - the sw_cons range is 0-254, 256. |
4990 | * when we get the the next-page we nned to adjust so the loop | 4989 | * when we get the next-page we need to adjust so the loop |
4991 | * condition below will be met. The next element is the size of a | 4990 | * condition below will be met. The next element is the size of a |
4992 | * regular element and hence incrementing by 1 | 4991 | * regular element and hence incrementing by 1 |
4993 | */ | 4992 | */ |
@@ -5194,7 +5193,7 @@ static void bnx2x_sp_task(struct work_struct *work) | |||
5194 | 5193 | ||
5195 | DP(BNX2X_MSG_SP, "sp task invoked\n"); | 5194 | DP(BNX2X_MSG_SP, "sp task invoked\n"); |
5196 | 5195 | ||
5197 | /* make sure the atomic interupt_occurred has been written */ | 5196 | /* make sure the atomic interrupt_occurred has been written */ |
5198 | smp_rmb(); | 5197 | smp_rmb(); |
5199 | if (atomic_read(&bp->interrupt_occurred)) { | 5198 | if (atomic_read(&bp->interrupt_occurred)) { |
5200 | 5199 | ||
@@ -5670,7 +5669,7 @@ static void bnx2x_init_eq_ring(struct bnx2x *bp) | |||
5670 | bp->eq_cons = 0; | 5669 | bp->eq_cons = 0; |
5671 | bp->eq_prod = NUM_EQ_DESC; | 5670 | bp->eq_prod = NUM_EQ_DESC; |
5672 | bp->eq_cons_sb = BNX2X_EQ_INDEX; | 5671 | bp->eq_cons_sb = BNX2X_EQ_INDEX; |
5673 | /* we want a warning message before it gets rought... */ | 5672 | /* we want a warning message before it gets wrought... */ |
5674 | atomic_set(&bp->eq_spq_left, | 5673 | atomic_set(&bp->eq_spq_left, |
5675 | min_t(int, MAX_SP_DESC_CNT - MAX_SPQ_PENDING, NUM_EQ_DESC) - 1); | 5674 | min_t(int, MAX_SP_DESC_CNT - MAX_SPQ_PENDING, NUM_EQ_DESC) - 1); |
5676 | } | 5675 | } |
@@ -5754,7 +5753,7 @@ static int bnx2x_fill_accept_flags(struct bnx2x *bp, u32 rx_mode, | |||
5754 | 5753 | ||
5755 | break; | 5754 | break; |
5756 | case BNX2X_RX_MODE_PROMISC: | 5755 | case BNX2X_RX_MODE_PROMISC: |
5757 | /* According to deffinition of SI mode, iface in promisc mode | 5756 | /* According to definition of SI mode, iface in promisc mode |
5758 | * should receive matched and unmatched (in resolution of port) | 5757 | * should receive matched and unmatched (in resolution of port) |
5759 | * unicast packets. | 5758 | * unicast packets. |
5760 | */ | 5759 | */ |
@@ -5897,7 +5896,7 @@ static void bnx2x_init_eth_fp(struct bnx2x *bp, int fp_idx) | |||
5897 | /* init shortcut */ | 5896 | /* init shortcut */ |
5898 | fp->ustorm_rx_prods_offset = bnx2x_rx_ustorm_prods_offset(fp); | 5897 | fp->ustorm_rx_prods_offset = bnx2x_rx_ustorm_prods_offset(fp); |
5899 | 5898 | ||
5900 | /* Setup SB indicies */ | 5899 | /* Setup SB indices */ |
5901 | fp->rx_cons_sb = BNX2X_RX_SB_INDEX; | 5900 | fp->rx_cons_sb = BNX2X_RX_SB_INDEX; |
5902 | 5901 | ||
5903 | /* Configure Queue State object */ | 5902 | /* Configure Queue State object */ |
@@ -6652,7 +6651,7 @@ static int bnx2x_init_hw_common(struct bnx2x *bp) | |||
6652 | * stay set) | 6651 | * stay set) |
6653 | * f. If this is VNIC 3 of a port then also init | 6652 | * f. If this is VNIC 3 of a port then also init |
6654 | * first_timers_ilt_entry to zero and last_timers_ilt_entry | 6653 | * first_timers_ilt_entry to zero and last_timers_ilt_entry |
6655 | * to the last enrty in the ILT. | 6654 | * to the last entry in the ILT. |
6656 | * | 6655 | * |
6657 | * Notes: | 6656 | * Notes: |
6658 | * Currently the PF error in the PGLC is non recoverable. | 6657 | * Currently the PF error in the PGLC is non recoverable. |
@@ -7118,8 +7117,8 @@ static int bnx2x_init_hw_port(struct bnx2x *bp) | |||
7118 | 7117 | ||
7119 | bnx2x_init_block(bp, BLOCK_MISC_AEU, init_phase); | 7118 | bnx2x_init_block(bp, BLOCK_MISC_AEU, init_phase); |
7120 | /* init aeu_mask_attn_func_0/1: | 7119 | /* init aeu_mask_attn_func_0/1: |
7121 | * - SF mode: bits 3-7 are masked. only bits 0-2 are in use | 7120 | * - SF mode: bits 3-7 are masked. Only bits 0-2 are in use |
7122 | * - MF mode: bit 3 is masked. bits 0-2 are in use as in SF | 7121 | * - MF mode: bit 3 is masked. Bits 0-2 are in use as in SF |
7123 | * bits 4-7 are used for "per vn group attention" */ | 7122 | * bits 4-7 are used for "per vn group attention" */ |
7124 | val = IS_MF(bp) ? 0xF7 : 0x7; | 7123 | val = IS_MF(bp) ? 0xF7 : 0x7; |
7125 | /* Enable DCBX attention for all but E1 */ | 7124 | /* Enable DCBX attention for all but E1 */ |
@@ -7371,7 +7370,7 @@ int bnx2x_init_hw_func_cnic(struct bnx2x *bp) | |||
7371 | bnx2x_ilt_init_op_cnic(bp, INITOP_SET); | 7370 | bnx2x_ilt_init_op_cnic(bp, INITOP_SET); |
7372 | 7371 | ||
7373 | if (CONFIGURE_NIC_MODE(bp)) { | 7372 | if (CONFIGURE_NIC_MODE(bp)) { |
7374 | /* Configrue searcher as part of function hw init */ | 7373 | /* Configure searcher as part of function hw init */ |
7375 | bnx2x_init_searcher(bp); | 7374 | bnx2x_init_searcher(bp); |
7376 | 7375 | ||
7377 | /* Reset NIC mode */ | 7376 | /* Reset NIC mode */ |
@@ -7641,7 +7640,7 @@ static int bnx2x_init_hw_func(struct bnx2x *bp) | |||
7641 | } | 7640 | } |
7642 | bnx2x_igu_clear_sb(bp, bp->igu_dsb_id); | 7641 | bnx2x_igu_clear_sb(bp, bp->igu_dsb_id); |
7643 | 7642 | ||
7644 | /* !!! these should become driver const once | 7643 | /* !!! These should become driver const once |
7645 | rf-tool supports split-68 const */ | 7644 | rf-tool supports split-68 const */ |
7646 | REG_WR(bp, IGU_REG_SB_INT_BEFORE_MASK_LSB, 0); | 7645 | REG_WR(bp, IGU_REG_SB_INT_BEFORE_MASK_LSB, 0); |
7647 | REG_WR(bp, IGU_REG_SB_INT_BEFORE_MASK_MSB, 0); | 7646 | REG_WR(bp, IGU_REG_SB_INT_BEFORE_MASK_MSB, 0); |
@@ -7755,7 +7754,7 @@ int bnx2x_alloc_mem_cnic(struct bnx2x *bp) | |||
7755 | host_hc_status_block_e1x)); | 7754 | host_hc_status_block_e1x)); |
7756 | 7755 | ||
7757 | if (CONFIGURE_NIC_MODE(bp) && !bp->t2) | 7756 | if (CONFIGURE_NIC_MODE(bp) && !bp->t2) |
7758 | /* allocate searcher T2 table, as it wan't allocated before */ | 7757 | /* allocate searcher T2 table, as it wasn't allocated before */ |
7759 | BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, SRC_T2_SZ); | 7758 | BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, SRC_T2_SZ); |
7760 | 7759 | ||
7761 | /* write address to which L5 should insert its values */ | 7760 | /* write address to which L5 should insert its values */ |
@@ -8093,7 +8092,7 @@ static void bnx2x_pf_q_prep_init(struct bnx2x *bp, | |||
8093 | __set_bit(BNX2X_Q_FLG_HC, &init_params->rx.flags); | 8092 | __set_bit(BNX2X_Q_FLG_HC, &init_params->rx.flags); |
8094 | __set_bit(BNX2X_Q_FLG_HC, &init_params->tx.flags); | 8093 | __set_bit(BNX2X_Q_FLG_HC, &init_params->tx.flags); |
8095 | 8094 | ||
8096 | /* If HC is supporterd, enable host coalescing in the transition | 8095 | /* If HC is supported, enable host coalescing in the transition |
8097 | * to INIT state. | 8096 | * to INIT state. |
8098 | */ | 8097 | */ |
8099 | __set_bit(BNX2X_Q_FLG_HC_EN, &init_params->rx.flags); | 8098 | __set_bit(BNX2X_Q_FLG_HC_EN, &init_params->rx.flags); |
@@ -8579,14 +8578,14 @@ static int bnx2x_func_wait_started(struct bnx2x *bp) | |||
8579 | 8578 | ||
8580 | /* | 8579 | /* |
8581 | * (assumption: No Attention from MCP at this stage) | 8580 | * (assumption: No Attention from MCP at this stage) |
8582 | * PMF probably in the middle of TXdisable/enable transaction | 8581 | * PMF probably in the middle of TX disable/enable transaction |
8583 | * 1. Sync IRS for default SB | 8582 | * 1. Sync IRS for default SB |
8584 | * 2. Sync SP queue - this guarantes us that attention handling started | 8583 | * 2. Sync SP queue - this guarantees us that attention handling started |
8585 | * 3. Wait, that TXdisable/enable transaction completes | 8584 | * 3. Wait, that TX disable/enable transaction completes |
8586 | * | 8585 | * |
8587 | * 1+2 guranty that if DCBx attention was scheduled it already changed | 8586 | * 1+2 guarantee that if DCBx attention was scheduled it already changed |
8588 | * pending bit of transaction from STARTED-->TX_STOPPED, if we alredy | 8587 | * pending bit of transaction from STARTED-->TX_STOPPED, if we already |
8589 | * received complettion for the transaction the state is TX_STOPPED. | 8588 | * received completion for the transaction the state is TX_STOPPED. |
8590 | * State will return to STARTED after completion of TX_STOPPED-->STARTED | 8589 | * State will return to STARTED after completion of TX_STOPPED-->STARTED |
8591 | * transaction. | 8590 | * transaction. |
8592 | */ | 8591 | */ |
@@ -8705,7 +8704,7 @@ void bnx2x_chip_cleanup(struct bnx2x *bp, int unload_mode, bool keep_link) | |||
8705 | 8704 | ||
8706 | /* | 8705 | /* |
8707 | * (assumption: No Attention from MCP at this stage) | 8706 | * (assumption: No Attention from MCP at this stage) |
8708 | * PMF probably in the middle of TXdisable/enable transaction | 8707 | * PMF probably in the middle of TX disable/enable transaction |
8709 | */ | 8708 | */ |
8710 | rc = bnx2x_func_wait_started(bp); | 8709 | rc = bnx2x_func_wait_started(bp); |
8711 | if (rc) { | 8710 | if (rc) { |
@@ -9320,7 +9319,7 @@ static void bnx2x_parity_recover(struct bnx2x *bp) | |||
9320 | * the first leader that performs a | 9319 | * the first leader that performs a |
9321 | * leader_reset() reset the global blocks in | 9320 | * leader_reset() reset the global blocks in |
9322 | * order to clear global attentions. Otherwise | 9321 | * order to clear global attentions. Otherwise |
9323 | * the the gates will remain closed for that | 9322 | * the gates will remain closed for that |
9324 | * engine. | 9323 | * engine. |
9325 | */ | 9324 | */ |
9326 | if (load_status || | 9325 | if (load_status || |
@@ -10056,7 +10055,7 @@ static int bnx2x_prev_unload(struct bnx2x *bp) | |||
10056 | break; | 10055 | break; |
10057 | } | 10056 | } |
10058 | 10057 | ||
10059 | /* non-common reply from MCP night require looping */ | 10058 | /* non-common reply from MCP might require looping */ |
10060 | rc = bnx2x_prev_unload_uncommon(bp); | 10059 | rc = bnx2x_prev_unload_uncommon(bp); |
10061 | if (rc != BNX2X_PREV_WAIT_NEEDED) | 10060 | if (rc != BNX2X_PREV_WAIT_NEEDED) |
10062 | break; | 10061 | break; |
@@ -11034,7 +11033,7 @@ static int bnx2x_get_hwinfo(struct bnx2x *bp) | |||
11034 | } else { | 11033 | } else { |
11035 | bp->common.int_block = INT_BLOCK_IGU; | 11034 | bp->common.int_block = INT_BLOCK_IGU; |
11036 | 11035 | ||
11037 | /* do not allow device reset during IGU info preocessing */ | 11036 | /* do not allow device reset during IGU info processing */ |
11038 | bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RESET); | 11037 | bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RESET); |
11039 | 11038 | ||
11040 | val = REG_RD(bp, IGU_REG_BLOCK_CONFIGURATION); | 11039 | val = REG_RD(bp, IGU_REG_BLOCK_CONFIGURATION); |
@@ -11113,7 +11112,7 @@ static int bnx2x_get_hwinfo(struct bnx2x *bp) | |||
11113 | E1H_FUNC_MAX * sizeof(struct drv_func_mb); | 11112 | E1H_FUNC_MAX * sizeof(struct drv_func_mb); |
11114 | /* | 11113 | /* |
11115 | * get mf configuration: | 11114 | * get mf configuration: |
11116 | * 1. existence of MF configuration | 11115 | * 1. Existence of MF configuration |
11117 | * 2. MAC address must be legal (check only upper bytes) | 11116 | * 2. MAC address must be legal (check only upper bytes) |
11118 | * for Switch-Independent mode; | 11117 | * for Switch-Independent mode; |
11119 | * OVLAN must be legal for Switch-Dependent mode | 11118 | * OVLAN must be legal for Switch-Dependent mode |
@@ -11490,7 +11489,7 @@ static int bnx2x_init_bp(struct bnx2x *bp) | |||
11490 | 11489 | ||
11491 | /* We need at least one default status block for slow-path events, | 11490 | /* We need at least one default status block for slow-path events, |
11492 | * second status block for the L2 queue, and a third status block for | 11491 | * second status block for the L2 queue, and a third status block for |
11493 | * CNIC if supproted. | 11492 | * CNIC if supported. |
11494 | */ | 11493 | */ |
11495 | if (CNIC_SUPPORT(bp)) | 11494 | if (CNIC_SUPPORT(bp)) |
11496 | bp->min_msix_vec_cnt = 3; | 11495 | bp->min_msix_vec_cnt = 3; |
@@ -12874,7 +12873,7 @@ static pci_ers_result_t bnx2x_io_slot_reset(struct pci_dev *pdev) | |||
12874 | 12873 | ||
12875 | bnx2x_prev_unload(bp); | 12874 | bnx2x_prev_unload(bp); |
12876 | 12875 | ||
12877 | /* We should have resetted the engine, so It's fair to | 12876 | /* We should have reseted the engine, so It's fair to |
12878 | * assume the FW will no longer write to the bnx2x driver. | 12877 | * assume the FW will no longer write to the bnx2x driver. |
12879 | */ | 12878 | */ |
12880 | bnx2x_squeeze_objects(bp); | 12879 | bnx2x_squeeze_objects(bp); |
@@ -12993,7 +12992,7 @@ static void __exit bnx2x_cleanup(void) | |||
12993 | 12992 | ||
12994 | destroy_workqueue(bnx2x_wq); | 12993 | destroy_workqueue(bnx2x_wq); |
12995 | 12994 | ||
12996 | /* Free globablly allocated resources */ | 12995 | /* Free globally allocated resources */ |
12997 | list_for_each_safe(pos, q, &bnx2x_prev_list) { | 12996 | list_for_each_safe(pos, q, &bnx2x_prev_list) { |
12998 | struct bnx2x_prev_path_list *tmp = | 12997 | struct bnx2x_prev_path_list *tmp = |
12999 | list_entry(pos, struct bnx2x_prev_path_list, list); | 12998 | list_entry(pos, struct bnx2x_prev_path_list, list); |
@@ -13016,7 +13015,7 @@ module_exit(bnx2x_cleanup); | |||
13016 | * @bp: driver handle | 13015 | * @bp: driver handle |
13017 | * @set: set or clear the CAM entry | 13016 | * @set: set or clear the CAM entry |
13018 | * | 13017 | * |
13019 | * This function will wait until the ramdord completion returns. | 13018 | * This function will wait until the ramrod completion returns. |
13020 | * Return 0 if success, -ENODEV if ramrod doesn't return. | 13019 | * Return 0 if success, -ENODEV if ramrod doesn't return. |
13021 | */ | 13020 | */ |
13022 | static int bnx2x_set_iscsi_eth_mac_addr(struct bnx2x *bp) | 13021 | static int bnx2x_set_iscsi_eth_mac_addr(struct bnx2x *bp) |
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c index 1f9dbb2f1756..8f03c984550f 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c | |||
@@ -35,9 +35,9 @@ | |||
35 | /** | 35 | /** |
36 | * bnx2x_exe_queue_init - init the Exe Queue object | 36 | * bnx2x_exe_queue_init - init the Exe Queue object |
37 | * | 37 | * |
38 | * @o: poiter to the object | 38 | * @o: pointer to the object |
39 | * @exe_len: length | 39 | * @exe_len: length |
40 | * @owner: poiter to the owner | 40 | * @owner: pointer to the owner |
41 | * @validate: validate function pointer | 41 | * @validate: validate function pointer |
42 | * @optimize: optimize function pointer | 42 | * @optimize: optimize function pointer |
43 | * @exec: execute function pointer | 43 | * @exec: execute function pointer |
@@ -176,7 +176,7 @@ static inline void bnx2x_exe_queue_reset_pending(struct bnx2x *bp, | |||
176 | * @o: queue | 176 | * @o: queue |
177 | * @ramrod_flags: flags | 177 | * @ramrod_flags: flags |
178 | * | 178 | * |
179 | * (Atomicy is ensured using the exe_queue->lock). | 179 | * (Atomicity is ensured using the exe_queue->lock). |
180 | */ | 180 | */ |
181 | static inline int bnx2x_exe_queue_step(struct bnx2x *bp, | 181 | static inline int bnx2x_exe_queue_step(struct bnx2x *bp, |
182 | struct bnx2x_exe_queue_obj *o, | 182 | struct bnx2x_exe_queue_obj *o, |
@@ -189,8 +189,7 @@ static inline int bnx2x_exe_queue_step(struct bnx2x *bp, | |||
189 | 189 | ||
190 | spin_lock_bh(&o->lock); | 190 | spin_lock_bh(&o->lock); |
191 | 191 | ||
192 | /* | 192 | /* Next step should not be performed until the current is finished, |
193 | * Next step should not be performed until the current is finished, | ||
194 | * unless a DRV_CLEAR_ONLY bit is set. In this case we just want to | 193 | * unless a DRV_CLEAR_ONLY bit is set. In this case we just want to |
195 | * properly clear object internals without sending any command to the FW | 194 | * properly clear object internals without sending any command to the FW |
196 | * which also implies there won't be any completion to clear the | 195 | * which also implies there won't be any completion to clear the |
@@ -206,8 +205,7 @@ static inline int bnx2x_exe_queue_step(struct bnx2x *bp, | |||
206 | } | 205 | } |
207 | } | 206 | } |
208 | 207 | ||
209 | /* | 208 | /* Run through the pending commands list and create a next |
210 | * Run through the pending commands list and create a next | ||
211 | * execution chunk. | 209 | * execution chunk. |
212 | */ | 210 | */ |
213 | while (!list_empty(&o->exe_queue)) { | 211 | while (!list_empty(&o->exe_queue)) { |
@@ -217,8 +215,7 @@ static inline int bnx2x_exe_queue_step(struct bnx2x *bp, | |||
217 | 215 | ||
218 | if (cur_len + elem->cmd_len <= o->exe_chunk_len) { | 216 | if (cur_len + elem->cmd_len <= o->exe_chunk_len) { |
219 | cur_len += elem->cmd_len; | 217 | cur_len += elem->cmd_len; |
220 | /* | 218 | /* Prevent from both lists being empty when moving an |
221 | * Prevent from both lists being empty when moving an | ||
222 | * element. This will allow the call of | 219 | * element. This will allow the call of |
223 | * bnx2x_exe_queue_empty() without locking. | 220 | * bnx2x_exe_queue_empty() without locking. |
224 | */ | 221 | */ |
@@ -238,14 +235,12 @@ static inline int bnx2x_exe_queue_step(struct bnx2x *bp, | |||
238 | 235 | ||
239 | rc = o->execute(bp, o->owner, &o->pending_comp, ramrod_flags); | 236 | rc = o->execute(bp, o->owner, &o->pending_comp, ramrod_flags); |
240 | if (rc < 0) | 237 | if (rc < 0) |
241 | /* | 238 | /* In case of an error return the commands back to the queue |
242 | * In case of an error return the commands back to the queue | 239 | * and reset the pending_comp. |
243 | * and reset the pending_comp. | ||
244 | */ | 240 | */ |
245 | list_splice_init(&o->pending_comp, &o->exe_queue); | 241 | list_splice_init(&o->pending_comp, &o->exe_queue); |
246 | else if (!rc) | 242 | else if (!rc) |
247 | /* | 243 | /* If zero is returned, means there are no outstanding pending |
248 | * If zero is returned, means there are no outstanding pending | ||
249 | * completions and we may dismiss the pending list. | 244 | * completions and we may dismiss the pending list. |
250 | */ | 245 | */ |
251 | __bnx2x_exe_queue_reset_pending(bp, o); | 246 | __bnx2x_exe_queue_reset_pending(bp, o); |
@@ -685,7 +680,7 @@ static inline void bnx2x_vlan_mac_set_cmd_hdr_e2(struct bnx2x *bp, | |||
685 | * | 680 | * |
686 | * @cid: connection id | 681 | * @cid: connection id |
687 | * @type: BNX2X_FILTER_XXX_PENDING | 682 | * @type: BNX2X_FILTER_XXX_PENDING |
688 | * @hdr: poiter to header to setup | 683 | * @hdr: pointer to header to setup |
689 | * @rule_cnt: | 684 | * @rule_cnt: |
690 | * | 685 | * |
691 | * currently we always configure one rule and echo field to contain a CID and an | 686 | * currently we always configure one rule and echo field to contain a CID and an |
@@ -714,8 +709,7 @@ static void bnx2x_set_one_mac_e2(struct bnx2x *bp, | |||
714 | unsigned long *vlan_mac_flags = &elem->cmd_data.vlan_mac.vlan_mac_flags; | 709 | unsigned long *vlan_mac_flags = &elem->cmd_data.vlan_mac.vlan_mac_flags; |
715 | u8 *mac = elem->cmd_data.vlan_mac.u.mac.mac; | 710 | u8 *mac = elem->cmd_data.vlan_mac.u.mac.mac; |
716 | 711 | ||
717 | /* | 712 | /* Set LLH CAM entry: currently only iSCSI and ETH macs are |
718 | * Set LLH CAM entry: currently only iSCSI and ETH macs are | ||
719 | * relevant. In addition, current implementation is tuned for a | 713 | * relevant. In addition, current implementation is tuned for a |
720 | * single ETH MAC. | 714 | * single ETH MAC. |
721 | * | 715 | * |
@@ -870,8 +864,7 @@ static void bnx2x_set_one_mac_e1x(struct bnx2x *bp, | |||
870 | struct bnx2x_raw_obj *raw = &o->raw; | 864 | struct bnx2x_raw_obj *raw = &o->raw; |
871 | struct mac_configuration_cmd *config = | 865 | struct mac_configuration_cmd *config = |
872 | (struct mac_configuration_cmd *)(raw->rdata); | 866 | (struct mac_configuration_cmd *)(raw->rdata); |
873 | /* | 867 | /* 57710 and 57711 do not support MOVE command, |
874 | * 57710 and 57711 do not support MOVE command, | ||
875 | * so it's either ADD or DEL | 868 | * so it's either ADD or DEL |
876 | */ | 869 | */ |
877 | bool add = (elem->cmd_data.vlan_mac.cmd == BNX2X_VLAN_MAC_ADD) ? | 870 | bool add = (elem->cmd_data.vlan_mac.cmd == BNX2X_VLAN_MAC_ADD) ? |
@@ -959,7 +952,7 @@ static void bnx2x_set_one_vlan_mac_e2(struct bnx2x *bp, | |||
959 | bnx2x_vlan_mac_set_cmd_hdr_e2(bp, o, add, CLASSIFY_RULE_OPCODE_PAIR, | 952 | bnx2x_vlan_mac_set_cmd_hdr_e2(bp, o, add, CLASSIFY_RULE_OPCODE_PAIR, |
960 | &rule_entry->pair.header); | 953 | &rule_entry->pair.header); |
961 | 954 | ||
962 | /* Set VLAN and MAC themselvs */ | 955 | /* Set VLAN and MAC themselves */ |
963 | rule_entry->pair.vlan = cpu_to_le16(vlan); | 956 | rule_entry->pair.vlan = cpu_to_le16(vlan); |
964 | bnx2x_set_fw_mac_addr(&rule_entry->pair.mac_msb, | 957 | bnx2x_set_fw_mac_addr(&rule_entry->pair.mac_msb, |
965 | &rule_entry->pair.mac_mid, | 958 | &rule_entry->pair.mac_mid, |
@@ -1011,8 +1004,7 @@ static void bnx2x_set_one_vlan_mac_e1h(struct bnx2x *bp, | |||
1011 | struct bnx2x_raw_obj *raw = &o->raw; | 1004 | struct bnx2x_raw_obj *raw = &o->raw; |
1012 | struct mac_configuration_cmd *config = | 1005 | struct mac_configuration_cmd *config = |
1013 | (struct mac_configuration_cmd *)(raw->rdata); | 1006 | (struct mac_configuration_cmd *)(raw->rdata); |
1014 | /* | 1007 | /* 57710 and 57711 do not support MOVE command, |
1015 | * 57710 and 57711 do not support MOVE command, | ||
1016 | * so it's either ADD or DEL | 1008 | * so it's either ADD or DEL |
1017 | */ | 1009 | */ |
1018 | bool add = (elem->cmd_data.vlan_mac.cmd == BNX2X_VLAN_MAC_ADD) ? | 1010 | bool add = (elem->cmd_data.vlan_mac.cmd == BNX2X_VLAN_MAC_ADD) ? |
@@ -1036,7 +1028,7 @@ static void bnx2x_set_one_vlan_mac_e1h(struct bnx2x *bp, | |||
1036 | * | 1028 | * |
1037 | * @bp: device handle | 1029 | * @bp: device handle |
1038 | * @p: command parameters | 1030 | * @p: command parameters |
1039 | * @ppos: pointer to the cooky | 1031 | * @ppos: pointer to the cookie |
1040 | * | 1032 | * |
1041 | * reconfigure next MAC/VLAN/VLAN-MAC element from the | 1033 | * reconfigure next MAC/VLAN/VLAN-MAC element from the |
1042 | * previously configured elements list. | 1034 | * previously configured elements list. |
@@ -1044,7 +1036,7 @@ static void bnx2x_set_one_vlan_mac_e1h(struct bnx2x *bp, | |||
1044 | * from command parameters only RAMROD_COMP_WAIT bit in ramrod_flags is taken | 1036 | * from command parameters only RAMROD_COMP_WAIT bit in ramrod_flags is taken |
1045 | * into an account | 1037 | * into an account |
1046 | * | 1038 | * |
1047 | * pointer to the cooky - that should be given back in the next call to make | 1039 | * pointer to the cookie - that should be given back in the next call to make |
1048 | * function handle the next element. If *ppos is set to NULL it will restart the | 1040 | * function handle the next element. If *ppos is set to NULL it will restart the |
1049 | * iterator. If returned *ppos == NULL this means that the last element has been | 1041 | * iterator. If returned *ppos == NULL this means that the last element has been |
1050 | * handled. | 1042 | * handled. |
@@ -1092,8 +1084,7 @@ static int bnx2x_vlan_mac_restore(struct bnx2x *bp, | |||
1092 | return bnx2x_config_vlan_mac(bp, p); | 1084 | return bnx2x_config_vlan_mac(bp, p); |
1093 | } | 1085 | } |
1094 | 1086 | ||
1095 | /* | 1087 | /* bnx2x_exeq_get_mac/bnx2x_exeq_get_vlan/bnx2x_exeq_get_vlan_mac return a |
1096 | * bnx2x_exeq_get_mac/bnx2x_exeq_get_vlan/bnx2x_exeq_get_vlan_mac return a | ||
1097 | * pointer to an element with a specific criteria and NULL if such an element | 1088 | * pointer to an element with a specific criteria and NULL if such an element |
1098 | * hasn't been found. | 1089 | * hasn't been found. |
1099 | */ | 1090 | */ |
@@ -1177,8 +1168,7 @@ static inline int bnx2x_validate_vlan_mac_add(struct bnx2x *bp, | |||
1177 | return rc; | 1168 | return rc; |
1178 | } | 1169 | } |
1179 | 1170 | ||
1180 | /* | 1171 | /* Check if there is a pending ADD command for this |
1181 | * Check if there is a pending ADD command for this | ||
1182 | * MAC/VLAN/VLAN-MAC. Return an error if there is. | 1172 | * MAC/VLAN/VLAN-MAC. Return an error if there is. |
1183 | */ | 1173 | */ |
1184 | if (exeq->get(exeq, elem)) { | 1174 | if (exeq->get(exeq, elem)) { |
@@ -1186,8 +1176,7 @@ static inline int bnx2x_validate_vlan_mac_add(struct bnx2x *bp, | |||
1186 | return -EEXIST; | 1176 | return -EEXIST; |
1187 | } | 1177 | } |
1188 | 1178 | ||
1189 | /* | 1179 | /* TODO: Check the pending MOVE from other objects where this |
1190 | * TODO: Check the pending MOVE from other objects where this | ||
1191 | * object is a destination object. | 1180 | * object is a destination object. |
1192 | */ | 1181 | */ |
1193 | 1182 | ||
@@ -1230,8 +1219,7 @@ static inline int bnx2x_validate_vlan_mac_del(struct bnx2x *bp, | |||
1230 | return -EEXIST; | 1219 | return -EEXIST; |
1231 | } | 1220 | } |
1232 | 1221 | ||
1233 | /* | 1222 | /* Check if there are pending DEL or MOVE commands for this |
1234 | * Check if there are pending DEL or MOVE commands for this | ||
1235 | * MAC/VLAN/VLAN-MAC. Return an error if so. | 1223 | * MAC/VLAN/VLAN-MAC. Return an error if so. |
1236 | */ | 1224 | */ |
1237 | memcpy(&query_elem, elem, sizeof(query_elem)); | 1225 | memcpy(&query_elem, elem, sizeof(query_elem)); |
@@ -1282,8 +1270,7 @@ static inline int bnx2x_validate_vlan_mac_move(struct bnx2x *bp, | |||
1282 | struct bnx2x_exe_queue_obj *src_exeq = &src_o->exe_queue; | 1270 | struct bnx2x_exe_queue_obj *src_exeq = &src_o->exe_queue; |
1283 | struct bnx2x_exe_queue_obj *dest_exeq = &dest_o->exe_queue; | 1271 | struct bnx2x_exe_queue_obj *dest_exeq = &dest_o->exe_queue; |
1284 | 1272 | ||
1285 | /* | 1273 | /* Check if we can perform this operation based on the current registry |
1286 | * Check if we can perform this operation based on the current registry | ||
1287 | * state. | 1274 | * state. |
1288 | */ | 1275 | */ |
1289 | if (!src_o->check_move(bp, src_o, dest_o, | 1276 | if (!src_o->check_move(bp, src_o, dest_o, |
@@ -1292,8 +1279,7 @@ static inline int bnx2x_validate_vlan_mac_move(struct bnx2x *bp, | |||
1292 | return -EINVAL; | 1279 | return -EINVAL; |
1293 | } | 1280 | } |
1294 | 1281 | ||
1295 | /* | 1282 | /* Check if there is an already pending DEL or MOVE command for the |
1296 | * Check if there is an already pending DEL or MOVE command for the | ||
1297 | * source object or ADD command for a destination object. Return an | 1283 | * source object or ADD command for a destination object. Return an |
1298 | * error if so. | 1284 | * error if so. |
1299 | */ | 1285 | */ |
@@ -1382,7 +1368,7 @@ static int bnx2x_remove_vlan_mac(struct bnx2x *bp, | |||
1382 | } | 1368 | } |
1383 | 1369 | ||
1384 | /** | 1370 | /** |
1385 | * bnx2x_wait_vlan_mac - passivly wait for 5 seconds until all work completes. | 1371 | * bnx2x_wait_vlan_mac - passively wait for 5 seconds until all work completes. |
1386 | * | 1372 | * |
1387 | * @bp: device handle | 1373 | * @bp: device handle |
1388 | * @o: bnx2x_vlan_mac_obj | 1374 | * @o: bnx2x_vlan_mac_obj |
@@ -1540,9 +1526,8 @@ static inline int bnx2x_vlan_mac_get_registry_elem( | |||
1540 | 1526 | ||
1541 | /* Get a new CAM offset */ | 1527 | /* Get a new CAM offset */ |
1542 | if (!o->get_cam_offset(o, ®_elem->cam_offset)) { | 1528 | if (!o->get_cam_offset(o, ®_elem->cam_offset)) { |
1543 | /* | 1529 | /* This shall never happen, because we have checked the |
1544 | * This shell never happen, because we have checked the | 1530 | * CAM availability in the 'validate'. |
1545 | * CAM availiability in the 'validate'. | ||
1546 | */ | 1531 | */ |
1547 | WARN_ON(1); | 1532 | WARN_ON(1); |
1548 | kfree(reg_elem); | 1533 | kfree(reg_elem); |
@@ -1589,8 +1574,7 @@ static int bnx2x_execute_vlan_mac(struct bnx2x *bp, | |||
1589 | struct bnx2x_vlan_mac_registry_elem *reg_elem; | 1574 | struct bnx2x_vlan_mac_registry_elem *reg_elem; |
1590 | enum bnx2x_vlan_mac_cmd cmd; | 1575 | enum bnx2x_vlan_mac_cmd cmd; |
1591 | 1576 | ||
1592 | /* | 1577 | /* If DRIVER_ONLY execution is requested, cleanup a registry |
1593 | * If DRIVER_ONLY execution is requested, cleanup a registry | ||
1594 | * and exit. Otherwise send a ramrod to FW. | 1578 | * and exit. Otherwise send a ramrod to FW. |
1595 | */ | 1579 | */ |
1596 | if (!drv_only) { | 1580 | if (!drv_only) { |
@@ -1599,11 +1583,10 @@ static int bnx2x_execute_vlan_mac(struct bnx2x *bp, | |||
1599 | /* Set pending */ | 1583 | /* Set pending */ |
1600 | r->set_pending(r); | 1584 | r->set_pending(r); |
1601 | 1585 | ||
1602 | /* Fill tha ramrod data */ | 1586 | /* Fill the ramrod data */ |
1603 | list_for_each_entry(elem, exe_chunk, link) { | 1587 | list_for_each_entry(elem, exe_chunk, link) { |
1604 | cmd = elem->cmd_data.vlan_mac.cmd; | 1588 | cmd = elem->cmd_data.vlan_mac.cmd; |
1605 | /* | 1589 | /* We will add to the target object in MOVE command, so |
1606 | * We will add to the target object in MOVE command, so | ||
1607 | * change the object for a CAM search. | 1590 | * change the object for a CAM search. |
1608 | */ | 1591 | */ |
1609 | if (cmd == BNX2X_VLAN_MAC_MOVE) | 1592 | if (cmd == BNX2X_VLAN_MAC_MOVE) |
@@ -1636,12 +1619,11 @@ static int bnx2x_execute_vlan_mac(struct bnx2x *bp, | |||
1636 | idx++; | 1619 | idx++; |
1637 | } | 1620 | } |
1638 | 1621 | ||
1639 | /* | 1622 | /* No need for an explicit memory barrier here as long we would |
1640 | * No need for an explicit memory barrier here as long we would | 1623 | * need to ensure the ordering of writing to the SPQ element |
1641 | * need to ensure the ordering of writing to the SPQ element | 1624 | * and updating of the SPQ producer which involves a memory |
1642 | * and updating of the SPQ producer which involves a memory | 1625 | * read and we will have to put a full memory barrier there |
1643 | * read and we will have to put a full memory barrier there | 1626 | * (inside bnx2x_sp_post()). |
1644 | * (inside bnx2x_sp_post()). | ||
1645 | */ | 1627 | */ |
1646 | 1628 | ||
1647 | rc = bnx2x_sp_post(bp, o->ramrod_cmd, r->cid, | 1629 | rc = bnx2x_sp_post(bp, o->ramrod_cmd, r->cid, |
@@ -1756,8 +1738,7 @@ int bnx2x_config_vlan_mac( | |||
1756 | return rc; | 1738 | return rc; |
1757 | } | 1739 | } |
1758 | 1740 | ||
1759 | /* | 1741 | /* If nothing will be executed further in this iteration we want to |
1760 | * If nothing will be executed further in this iteration we want to | ||
1761 | * return PENDING if there are pending commands | 1742 | * return PENDING if there are pending commands |
1762 | */ | 1743 | */ |
1763 | if (!bnx2x_exe_queue_empty(&o->exe_queue)) | 1744 | if (!bnx2x_exe_queue_empty(&o->exe_queue)) |
@@ -1776,13 +1757,11 @@ int bnx2x_config_vlan_mac( | |||
1776 | return rc; | 1757 | return rc; |
1777 | } | 1758 | } |
1778 | 1759 | ||
1779 | /* | 1760 | /* RAMROD_COMP_WAIT is a superset of RAMROD_EXEC. If it was set |
1780 | * RAMROD_COMP_WAIT is a superset of RAMROD_EXEC. If it was set | ||
1781 | * then user want to wait until the last command is done. | 1761 | * then user want to wait until the last command is done. |
1782 | */ | 1762 | */ |
1783 | if (test_bit(RAMROD_COMP_WAIT, &p->ramrod_flags)) { | 1763 | if (test_bit(RAMROD_COMP_WAIT, &p->ramrod_flags)) { |
1784 | /* | 1764 | /* Wait maximum for the current exe_queue length iterations plus |
1785 | * Wait maximum for the current exe_queue length iterations plus | ||
1786 | * one (for the current pending command). | 1765 | * one (for the current pending command). |
1787 | */ | 1766 | */ |
1788 | int max_iterations = bnx2x_exe_queue_length(&o->exe_queue) + 1; | 1767 | int max_iterations = bnx2x_exe_queue_length(&o->exe_queue) + 1; |
@@ -1817,7 +1796,7 @@ int bnx2x_config_vlan_mac( | |||
1817 | * @ramrod_flags: execution flags to be used for this deletion | 1796 | * @ramrod_flags: execution flags to be used for this deletion |
1818 | * | 1797 | * |
1819 | * if the last operation has completed successfully and there are no | 1798 | * if the last operation has completed successfully and there are no |
1820 | * moreelements left, positive value if the last operation has completed | 1799 | * more elements left, positive value if the last operation has completed |
1821 | * successfully and there are more previously configured elements, negative | 1800 | * successfully and there are more previously configured elements, negative |
1822 | * value is current operation has failed. | 1801 | * value is current operation has failed. |
1823 | */ | 1802 | */ |
@@ -1858,8 +1837,7 @@ static int bnx2x_vlan_mac_del_all(struct bnx2x *bp, | |||
1858 | p.ramrod_flags = *ramrod_flags; | 1837 | p.ramrod_flags = *ramrod_flags; |
1859 | p.user_req.cmd = BNX2X_VLAN_MAC_DEL; | 1838 | p.user_req.cmd = BNX2X_VLAN_MAC_DEL; |
1860 | 1839 | ||
1861 | /* | 1840 | /* Add all but the last VLAN-MAC to the execution queue without actually |
1862 | * Add all but the last VLAN-MAC to the execution queue without actually | ||
1863 | * execution anything. | 1841 | * execution anything. |
1864 | */ | 1842 | */ |
1865 | __clear_bit(RAMROD_COMP_WAIT, &p.ramrod_flags); | 1843 | __clear_bit(RAMROD_COMP_WAIT, &p.ramrod_flags); |
@@ -2035,8 +2013,7 @@ void bnx2x_init_vlan_mac_obj(struct bnx2x *bp, | |||
2035 | /* CAM pool handling */ | 2013 | /* CAM pool handling */ |
2036 | vlan_mac_obj->get_credit = bnx2x_get_credit_vlan_mac; | 2014 | vlan_mac_obj->get_credit = bnx2x_get_credit_vlan_mac; |
2037 | vlan_mac_obj->put_credit = bnx2x_put_credit_vlan_mac; | 2015 | vlan_mac_obj->put_credit = bnx2x_put_credit_vlan_mac; |
2038 | /* | 2016 | /* CAM offset is relevant for 57710 and 57711 chips only which have a |
2039 | * CAM offset is relevant for 57710 and 57711 chips only which have a | ||
2040 | * single CAM for both MACs and VLAN-MAC pairs. So the offset | 2017 | * single CAM for both MACs and VLAN-MAC pairs. So the offset |
2041 | * will be taken from MACs' pool object only. | 2018 | * will be taken from MACs' pool object only. |
2042 | */ | 2019 | */ |
@@ -2103,12 +2080,12 @@ static int bnx2x_set_rx_mode_e1x(struct bnx2x *bp, | |||
2103 | struct tstorm_eth_mac_filter_config *mac_filters = | 2080 | struct tstorm_eth_mac_filter_config *mac_filters = |
2104 | (struct tstorm_eth_mac_filter_config *)p->rdata; | 2081 | (struct tstorm_eth_mac_filter_config *)p->rdata; |
2105 | 2082 | ||
2106 | /* initial seeting is drop-all */ | 2083 | /* initial setting is drop-all */ |
2107 | u8 drop_all_ucast = 1, drop_all_mcast = 1; | 2084 | u8 drop_all_ucast = 1, drop_all_mcast = 1; |
2108 | u8 accp_all_ucast = 0, accp_all_bcast = 0, accp_all_mcast = 0; | 2085 | u8 accp_all_ucast = 0, accp_all_bcast = 0, accp_all_mcast = 0; |
2109 | u8 unmatched_unicast = 0; | 2086 | u8 unmatched_unicast = 0; |
2110 | 2087 | ||
2111 | /* In e1x there we only take into account rx acceot flag since tx switching | 2088 | /* In e1x there we only take into account rx accept flag since tx switching |
2112 | * isn't enabled. */ | 2089 | * isn't enabled. */ |
2113 | if (test_bit(BNX2X_ACCEPT_UNICAST, &p->rx_accept_flags)) | 2090 | if (test_bit(BNX2X_ACCEPT_UNICAST, &p->rx_accept_flags)) |
2114 | /* accept matched ucast */ | 2091 | /* accept matched ucast */ |
@@ -2271,8 +2248,7 @@ static int bnx2x_set_rx_mode_e2(struct bnx2x *bp, | |||
2271 | false); | 2248 | false); |
2272 | } | 2249 | } |
2273 | 2250 | ||
2274 | /* | 2251 | /* If FCoE Queue configuration has been requested configure the Rx and |
2275 | * If FCoE Queue configuration has been requested configure the Rx and | ||
2276 | * internal switching modes for this queue in separate rules. | 2252 | * internal switching modes for this queue in separate rules. |
2277 | * | 2253 | * |
2278 | * FCoE queue shell never be set to ACCEPT_ALL packets of any sort: | 2254 | * FCoE queue shell never be set to ACCEPT_ALL packets of any sort: |
@@ -2308,8 +2284,7 @@ static int bnx2x_set_rx_mode_e2(struct bnx2x *bp, | |||
2308 | } | 2284 | } |
2309 | } | 2285 | } |
2310 | 2286 | ||
2311 | /* | 2287 | /* Set the ramrod header (most importantly - number of rules to |
2312 | * Set the ramrod header (most importantly - number of rules to | ||
2313 | * configure). | 2288 | * configure). |
2314 | */ | 2289 | */ |
2315 | bnx2x_rx_mode_set_rdata_hdr_e2(p->cid, &data->header, rule_idx); | 2290 | bnx2x_rx_mode_set_rdata_hdr_e2(p->cid, &data->header, rule_idx); |
@@ -2318,12 +2293,11 @@ static int bnx2x_set_rx_mode_e2(struct bnx2x *bp, | |||
2318 | data->header.rule_cnt, p->rx_accept_flags, | 2293 | data->header.rule_cnt, p->rx_accept_flags, |
2319 | p->tx_accept_flags); | 2294 | p->tx_accept_flags); |
2320 | 2295 | ||
2321 | /* | 2296 | /* No need for an explicit memory barrier here as long we would |
2322 | * No need for an explicit memory barrier here as long we would | 2297 | * need to ensure the ordering of writing to the SPQ element |
2323 | * need to ensure the ordering of writing to the SPQ element | 2298 | * and updating of the SPQ producer which involves a memory |
2324 | * and updating of the SPQ producer which involves a memory | 2299 | * read and we will have to put a full memory barrier there |
2325 | * read and we will have to put a full memory barrier there | 2300 | * (inside bnx2x_sp_post()). |
2326 | * (inside bnx2x_sp_post()). | ||
2327 | */ | 2301 | */ |
2328 | 2302 | ||
2329 | /* Send a ramrod */ | 2303 | /* Send a ramrod */ |
@@ -2460,7 +2434,7 @@ static int bnx2x_mcast_enqueue_cmd(struct bnx2x *bp, | |||
2460 | cur_mac = (struct bnx2x_mcast_mac_elem *) | 2434 | cur_mac = (struct bnx2x_mcast_mac_elem *) |
2461 | ((u8 *)new_cmd + sizeof(*new_cmd)); | 2435 | ((u8 *)new_cmd + sizeof(*new_cmd)); |
2462 | 2436 | ||
2463 | /* Push the MACs of the current command into the pendig command | 2437 | /* Push the MACs of the current command into the pending command |
2464 | * MACs list: FIFO | 2438 | * MACs list: FIFO |
2465 | */ | 2439 | */ |
2466 | list_for_each_entry(pos, &p->mcast_list, link) { | 2440 | list_for_each_entry(pos, &p->mcast_list, link) { |
@@ -3017,20 +2991,18 @@ static int bnx2x_mcast_setup_e2(struct bnx2x *bp, | |||
3017 | if (!o->total_pending_num) | 2991 | if (!o->total_pending_num) |
3018 | bnx2x_mcast_refresh_registry_e2(bp, o); | 2992 | bnx2x_mcast_refresh_registry_e2(bp, o); |
3019 | 2993 | ||
3020 | /* | 2994 | /* If CLEAR_ONLY was requested - don't send a ramrod and clear |
3021 | * If CLEAR_ONLY was requested - don't send a ramrod and clear | ||
3022 | * RAMROD_PENDING status immediately. | 2995 | * RAMROD_PENDING status immediately. |
3023 | */ | 2996 | */ |
3024 | if (test_bit(RAMROD_DRV_CLR_ONLY, &p->ramrod_flags)) { | 2997 | if (test_bit(RAMROD_DRV_CLR_ONLY, &p->ramrod_flags)) { |
3025 | raw->clear_pending(raw); | 2998 | raw->clear_pending(raw); |
3026 | return 0; | 2999 | return 0; |
3027 | } else { | 3000 | } else { |
3028 | /* | 3001 | /* No need for an explicit memory barrier here as long we would |
3029 | * No need for an explicit memory barrier here as long we would | 3002 | * need to ensure the ordering of writing to the SPQ element |
3030 | * need to ensure the ordering of writing to the SPQ element | 3003 | * and updating of the SPQ producer which involves a memory |
3031 | * and updating of the SPQ producer which involves a memory | 3004 | * read and we will have to put a full memory barrier there |
3032 | * read and we will have to put a full memory barrier there | 3005 | * (inside bnx2x_sp_post()). |
3033 | * (inside bnx2x_sp_post()). | ||
3034 | */ | 3006 | */ |
3035 | 3007 | ||
3036 | /* Send a ramrod */ | 3008 | /* Send a ramrod */ |
@@ -3104,7 +3076,7 @@ static inline void bnx2x_mcast_hdl_restore_e1h(struct bnx2x *bp, | |||
3104 | } | 3076 | } |
3105 | } | 3077 | } |
3106 | 3078 | ||
3107 | /* On 57711 we write the multicast MACs' aproximate match | 3079 | /* On 57711 we write the multicast MACs' approximate match |
3108 | * table by directly into the TSTORM's internal RAM. So we don't | 3080 | * table by directly into the TSTORM's internal RAM. So we don't |
3109 | * really need to handle any tricks to make it work. | 3081 | * really need to handle any tricks to make it work. |
3110 | */ | 3082 | */ |
@@ -3227,7 +3199,7 @@ static void bnx2x_mcast_revert_e1(struct bnx2x *bp, | |||
3227 | 3199 | ||
3228 | /* If current command hasn't been handled yet and we are | 3200 | /* If current command hasn't been handled yet and we are |
3229 | * here means that it's meant to be dropped and we have to | 3201 | * here means that it's meant to be dropped and we have to |
3230 | * update the number of outstandling MACs accordingly. | 3202 | * update the number of outstanding MACs accordingly. |
3231 | */ | 3203 | */ |
3232 | if (p->mcast_list_len) | 3204 | if (p->mcast_list_len) |
3233 | o->total_pending_num -= o->max_cmd_len; | 3205 | o->total_pending_num -= o->max_cmd_len; |
@@ -3503,20 +3475,18 @@ static int bnx2x_mcast_setup_e1(struct bnx2x *bp, | |||
3503 | if (rc) | 3475 | if (rc) |
3504 | return rc; | 3476 | return rc; |
3505 | 3477 | ||
3506 | /* | 3478 | /* If CLEAR_ONLY was requested - don't send a ramrod and clear |
3507 | * If CLEAR_ONLY was requested - don't send a ramrod and clear | ||
3508 | * RAMROD_PENDING status immediately. | 3479 | * RAMROD_PENDING status immediately. |
3509 | */ | 3480 | */ |
3510 | if (test_bit(RAMROD_DRV_CLR_ONLY, &p->ramrod_flags)) { | 3481 | if (test_bit(RAMROD_DRV_CLR_ONLY, &p->ramrod_flags)) { |
3511 | raw->clear_pending(raw); | 3482 | raw->clear_pending(raw); |
3512 | return 0; | 3483 | return 0; |
3513 | } else { | 3484 | } else { |
3514 | /* | 3485 | /* No need for an explicit memory barrier here as long we would |
3515 | * No need for an explicit memory barrier here as long we would | 3486 | * need to ensure the ordering of writing to the SPQ element |
3516 | * need to ensure the ordering of writing to the SPQ element | 3487 | * and updating of the SPQ producer which involves a memory |
3517 | * and updating of the SPQ producer which involves a memory | 3488 | * read and we will have to put a full memory barrier there |
3518 | * read and we will have to put a full memory barrier there | 3489 | * (inside bnx2x_sp_post()). |
3519 | * (inside bnx2x_sp_post()). | ||
3520 | */ | 3490 | */ |
3521 | 3491 | ||
3522 | /* Send a ramrod */ | 3492 | /* Send a ramrod */ |
@@ -3977,8 +3947,7 @@ void bnx2x_init_mac_credit_pool(struct bnx2x *bp, | |||
3977 | 3947 | ||
3978 | } else { | 3948 | } else { |
3979 | 3949 | ||
3980 | /* | 3950 | /* CAM credit is equaly divided between all active functions |
3981 | * CAM credit is equaly divided between all active functions | ||
3982 | * on the PATH. | 3951 | * on the PATH. |
3983 | */ | 3952 | */ |
3984 | if ((func_num > 0)) { | 3953 | if ((func_num > 0)) { |
@@ -3987,8 +3956,7 @@ void bnx2x_init_mac_credit_pool(struct bnx2x *bp, | |||
3987 | else | 3956 | else |
3988 | cam_sz = BNX2X_CAM_SIZE_EMUL; | 3957 | cam_sz = BNX2X_CAM_SIZE_EMUL; |
3989 | 3958 | ||
3990 | /* | 3959 | /* No need for CAM entries handling for 57712 and |
3991 | * No need for CAM entries handling for 57712 and | ||
3992 | * newer. | 3960 | * newer. |
3993 | */ | 3961 | */ |
3994 | bnx2x_init_credit_pool(p, -1, cam_sz); | 3962 | bnx2x_init_credit_pool(p, -1, cam_sz); |
@@ -4005,14 +3973,12 @@ void bnx2x_init_vlan_credit_pool(struct bnx2x *bp, | |||
4005 | u8 func_num) | 3973 | u8 func_num) |
4006 | { | 3974 | { |
4007 | if (CHIP_IS_E1x(bp)) { | 3975 | if (CHIP_IS_E1x(bp)) { |
4008 | /* | 3976 | /* There is no VLAN credit in HW on 57710 and 57711 only |
4009 | * There is no VLAN credit in HW on 57710 and 57711 only | ||
4010 | * MAC / MAC-VLAN can be set | 3977 | * MAC / MAC-VLAN can be set |
4011 | */ | 3978 | */ |
4012 | bnx2x_init_credit_pool(p, 0, -1); | 3979 | bnx2x_init_credit_pool(p, 0, -1); |
4013 | } else { | 3980 | } else { |
4014 | /* | 3981 | /* CAM credit is equally divided between all active functions |
4015 | * CAM credit is equaly divided between all active functions | ||
4016 | * on the PATH. | 3982 | * on the PATH. |
4017 | */ | 3983 | */ |
4018 | if (func_num > 0) { | 3984 | if (func_num > 0) { |
@@ -4028,7 +3994,7 @@ void bnx2x_init_vlan_credit_pool(struct bnx2x *bp, | |||
4028 | /** | 3994 | /** |
4029 | * bnx2x_debug_print_ind_table - prints the indirection table configuration. | 3995 | * bnx2x_debug_print_ind_table - prints the indirection table configuration. |
4030 | * | 3996 | * |
4031 | * @bp: driver hanlde | 3997 | * @bp: driver handle |
4032 | * @p: pointer to rss configuration | 3998 | * @p: pointer to rss configuration |
4033 | * | 3999 | * |
4034 | * Prints it when NETIF_MSG_IFUP debug level is configured. | 4000 | * Prints it when NETIF_MSG_IFUP debug level is configured. |
@@ -4141,12 +4107,11 @@ static int bnx2x_setup_rss(struct bnx2x *bp, | |||
4141 | data->capabilities |= ETH_RSS_UPDATE_RAMROD_DATA_UPDATE_RSS_KEY; | 4107 | data->capabilities |= ETH_RSS_UPDATE_RAMROD_DATA_UPDATE_RSS_KEY; |
4142 | } | 4108 | } |
4143 | 4109 | ||
4144 | /* | 4110 | /* No need for an explicit memory barrier here as long we would |
4145 | * No need for an explicit memory barrier here as long we would | 4111 | * need to ensure the ordering of writing to the SPQ element |
4146 | * need to ensure the ordering of writing to the SPQ element | 4112 | * and updating of the SPQ producer which involves a memory |
4147 | * and updating of the SPQ producer which involves a memory | 4113 | * read and we will have to put a full memory barrier there |
4148 | * read and we will have to put a full memory barrier there | 4114 | * (inside bnx2x_sp_post()). |
4149 | * (inside bnx2x_sp_post()). | ||
4150 | */ | 4115 | */ |
4151 | 4116 | ||
4152 | /* Send a ramrod */ | 4117 | /* Send a ramrod */ |
@@ -4312,7 +4277,7 @@ static int bnx2x_queue_comp_cmd(struct bnx2x *bp, | |||
4312 | } | 4277 | } |
4313 | 4278 | ||
4314 | if (o->next_tx_only >= o->max_cos) | 4279 | if (o->next_tx_only >= o->max_cos) |
4315 | /* >= becuase tx only must always be smaller than cos since the | 4280 | /* >= because tx only must always be smaller than cos since the |
4316 | * primary connection supports COS 0 | 4281 | * primary connection supports COS 0 |
4317 | */ | 4282 | */ |
4318 | BNX2X_ERR("illegal value for next tx_only: %d. max cos was %d", | 4283 | BNX2X_ERR("illegal value for next tx_only: %d. max cos was %d", |
@@ -4625,12 +4590,11 @@ static inline int bnx2x_q_send_setup_e1x(struct bnx2x *bp, | |||
4625 | /* Fill the ramrod data */ | 4590 | /* Fill the ramrod data */ |
4626 | bnx2x_q_fill_setup_data_cmn(bp, params, rdata); | 4591 | bnx2x_q_fill_setup_data_cmn(bp, params, rdata); |
4627 | 4592 | ||
4628 | /* | 4593 | /* No need for an explicit memory barrier here as long we would |
4629 | * No need for an explicit memory barrier here as long we would | 4594 | * need to ensure the ordering of writing to the SPQ element |
4630 | * need to ensure the ordering of writing to the SPQ element | 4595 | * and updating of the SPQ producer which involves a memory |
4631 | * and updating of the SPQ producer which involves a memory | 4596 | * read and we will have to put a full memory barrier there |
4632 | * read and we will have to put a full memory barrier there | 4597 | * (inside bnx2x_sp_post()). |
4633 | * (inside bnx2x_sp_post()). | ||
4634 | */ | 4598 | */ |
4635 | 4599 | ||
4636 | return bnx2x_sp_post(bp, ramrod, o->cids[BNX2X_PRIMARY_CID_INDEX], | 4600 | return bnx2x_sp_post(bp, ramrod, o->cids[BNX2X_PRIMARY_CID_INDEX], |
@@ -4654,12 +4618,11 @@ static inline int bnx2x_q_send_setup_e2(struct bnx2x *bp, | |||
4654 | bnx2x_q_fill_setup_data_cmn(bp, params, rdata); | 4618 | bnx2x_q_fill_setup_data_cmn(bp, params, rdata); |
4655 | bnx2x_q_fill_setup_data_e2(bp, params, rdata); | 4619 | bnx2x_q_fill_setup_data_e2(bp, params, rdata); |
4656 | 4620 | ||
4657 | /* | 4621 | /* No need for an explicit memory barrier here as long we would |
4658 | * No need for an explicit memory barrier here as long we would | 4622 | * need to ensure the ordering of writing to the SPQ element |
4659 | * need to ensure the ordering of writing to the SPQ element | 4623 | * and updating of the SPQ producer which involves a memory |
4660 | * and updating of the SPQ producer which involves a memory | 4624 | * read and we will have to put a full memory barrier there |
4661 | * read and we will have to put a full memory barrier there | 4625 | * (inside bnx2x_sp_post()). |
4662 | * (inside bnx2x_sp_post()). | ||
4663 | */ | 4626 | */ |
4664 | 4627 | ||
4665 | return bnx2x_sp_post(bp, ramrod, o->cids[BNX2X_PRIMARY_CID_INDEX], | 4628 | return bnx2x_sp_post(bp, ramrod, o->cids[BNX2X_PRIMARY_CID_INDEX], |
@@ -4699,12 +4662,11 @@ static inline int bnx2x_q_send_setup_tx_only(struct bnx2x *bp, | |||
4699 | o->cids[cid_index], rdata->general.client_id, | 4662 | o->cids[cid_index], rdata->general.client_id, |
4700 | rdata->general.sp_client_id, rdata->general.cos); | 4663 | rdata->general.sp_client_id, rdata->general.cos); |
4701 | 4664 | ||
4702 | /* | 4665 | /* No need for an explicit memory barrier here as long we would |
4703 | * No need for an explicit memory barrier here as long we would | 4666 | * need to ensure the ordering of writing to the SPQ element |
4704 | * need to ensure the ordering of writing to the SPQ element | 4667 | * and updating of the SPQ producer which involves a memory |
4705 | * and updating of the SPQ producer which involves a memory | 4668 | * read and we will have to put a full memory barrier there |
4706 | * read and we will have to put a full memory barrier there | 4669 | * (inside bnx2x_sp_post()). |
4707 | * (inside bnx2x_sp_post()). | ||
4708 | */ | 4670 | */ |
4709 | 4671 | ||
4710 | return bnx2x_sp_post(bp, ramrod, o->cids[cid_index], | 4672 | return bnx2x_sp_post(bp, ramrod, o->cids[cid_index], |
@@ -4733,7 +4695,7 @@ static void bnx2x_q_fill_update_data(struct bnx2x *bp, | |||
4733 | test_bit(BNX2X_Q_UPDATE_IN_VLAN_REM_CHNG, | 4695 | test_bit(BNX2X_Q_UPDATE_IN_VLAN_REM_CHNG, |
4734 | ¶ms->update_flags); | 4696 | ¶ms->update_flags); |
4735 | 4697 | ||
4736 | /* Outer VLAN sripping */ | 4698 | /* Outer VLAN stripping */ |
4737 | data->outer_vlan_removal_enable_flg = | 4699 | data->outer_vlan_removal_enable_flg = |
4738 | test_bit(BNX2X_Q_UPDATE_OUT_VLAN_REM, ¶ms->update_flags); | 4700 | test_bit(BNX2X_Q_UPDATE_OUT_VLAN_REM, ¶ms->update_flags); |
4739 | data->outer_vlan_removal_change_flg = | 4701 | data->outer_vlan_removal_change_flg = |
@@ -4794,12 +4756,11 @@ static inline int bnx2x_q_send_update(struct bnx2x *bp, | |||
4794 | /* Fill the ramrod data */ | 4756 | /* Fill the ramrod data */ |
4795 | bnx2x_q_fill_update_data(bp, o, update_params, rdata); | 4757 | bnx2x_q_fill_update_data(bp, o, update_params, rdata); |
4796 | 4758 | ||
4797 | /* | 4759 | /* No need for an explicit memory barrier here as long we would |
4798 | * No need for an explicit memory barrier here as long we would | 4760 | * need to ensure the ordering of writing to the SPQ element |
4799 | * need to ensure the ordering of writing to the SPQ element | 4761 | * and updating of the SPQ producer which involves a memory |
4800 | * and updating of the SPQ producer which involves a memory | 4762 | * read and we will have to put a full memory barrier there |
4801 | * read and we will have to put a full memory barrier there | 4763 | * (inside bnx2x_sp_post()). |
4802 | * (inside bnx2x_sp_post()). | ||
4803 | */ | 4764 | */ |
4804 | 4765 | ||
4805 | return bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CLIENT_UPDATE, | 4766 | return bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CLIENT_UPDATE, |
@@ -5009,8 +4970,7 @@ static int bnx2x_queue_chk_transition(struct bnx2x *bp, | |||
5009 | ¶ms->params.update; | 4970 | ¶ms->params.update; |
5010 | u8 next_tx_only = o->num_tx_only; | 4971 | u8 next_tx_only = o->num_tx_only; |
5011 | 4972 | ||
5012 | /* | 4973 | /* Forget all pending for completion commands if a driver only state |
5013 | * Forget all pending for completion commands if a driver only state | ||
5014 | * transition has been requested. | 4974 | * transition has been requested. |
5015 | */ | 4975 | */ |
5016 | if (test_bit(RAMROD_DRV_CLR_ONLY, ¶ms->ramrod_flags)) { | 4976 | if (test_bit(RAMROD_DRV_CLR_ONLY, ¶ms->ramrod_flags)) { |
@@ -5018,8 +4978,7 @@ static int bnx2x_queue_chk_transition(struct bnx2x *bp, | |||
5018 | o->next_state = BNX2X_Q_STATE_MAX; | 4978 | o->next_state = BNX2X_Q_STATE_MAX; |
5019 | } | 4979 | } |
5020 | 4980 | ||
5021 | /* | 4981 | /* Don't allow a next state transition if we are in the middle of |
5022 | * Don't allow a next state transition if we are in the middle of | ||
5023 | * the previous one. | 4982 | * the previous one. |
5024 | */ | 4983 | */ |
5025 | if (o->pending) { | 4984 | if (o->pending) { |
@@ -5228,8 +5187,7 @@ enum bnx2x_func_state bnx2x_func_get_state(struct bnx2x *bp, | |||
5228 | if (o->pending) | 5187 | if (o->pending) |
5229 | return BNX2X_F_STATE_MAX; | 5188 | return BNX2X_F_STATE_MAX; |
5230 | 5189 | ||
5231 | /* | 5190 | /* unsure the order of reading of o->pending and o->state |
5232 | * unsure the order of reading of o->pending and o->state | ||
5233 | * o->pending should be read first | 5191 | * o->pending should be read first |
5234 | */ | 5192 | */ |
5235 | rmb(); | 5193 | rmb(); |
@@ -5327,8 +5285,7 @@ static int bnx2x_func_chk_transition(struct bnx2x *bp, | |||
5327 | enum bnx2x_func_state state = o->state, next_state = BNX2X_F_STATE_MAX; | 5285 | enum bnx2x_func_state state = o->state, next_state = BNX2X_F_STATE_MAX; |
5328 | enum bnx2x_func_cmd cmd = params->cmd; | 5286 | enum bnx2x_func_cmd cmd = params->cmd; |
5329 | 5287 | ||
5330 | /* | 5288 | /* Forget all pending for completion commands if a driver only state |
5331 | * Forget all pending for completion commands if a driver only state | ||
5332 | * transition has been requested. | 5289 | * transition has been requested. |
5333 | */ | 5290 | */ |
5334 | if (test_bit(RAMROD_DRV_CLR_ONLY, ¶ms->ramrod_flags)) { | 5291 | if (test_bit(RAMROD_DRV_CLR_ONLY, ¶ms->ramrod_flags)) { |
@@ -5336,8 +5293,7 @@ static int bnx2x_func_chk_transition(struct bnx2x *bp, | |||
5336 | o->next_state = BNX2X_F_STATE_MAX; | 5293 | o->next_state = BNX2X_F_STATE_MAX; |
5337 | } | 5294 | } |
5338 | 5295 | ||
5339 | /* | 5296 | /* Don't allow a next state transition if we are in the middle of |
5340 | * Don't allow a next state transition if we are in the middle of | ||
5341 | * the previous one. | 5297 | * the previous one. |
5342 | */ | 5298 | */ |
5343 | if (o->pending) | 5299 | if (o->pending) |
@@ -5510,7 +5466,7 @@ static int bnx2x_func_hw_init(struct bnx2x *bp, | |||
5510 | goto init_err; | 5466 | goto init_err; |
5511 | } | 5467 | } |
5512 | 5468 | ||
5513 | /* Handle the beginning of COMMON_XXX pases separatelly... */ | 5469 | /* Handle the beginning of COMMON_XXX pases separately... */ |
5514 | switch (load_code) { | 5470 | switch (load_code) { |
5515 | case FW_MSG_CODE_DRV_LOAD_COMMON_CHIP: | 5471 | case FW_MSG_CODE_DRV_LOAD_COMMON_CHIP: |
5516 | rc = bnx2x_func_init_cmn_chip(bp, drv); | 5472 | rc = bnx2x_func_init_cmn_chip(bp, drv); |
@@ -5544,7 +5500,7 @@ static int bnx2x_func_hw_init(struct bnx2x *bp, | |||
5544 | init_err: | 5500 | init_err: |
5545 | drv->gunzip_end(bp); | 5501 | drv->gunzip_end(bp); |
5546 | 5502 | ||
5547 | /* In case of success, complete the comand immediatelly: no ramrods | 5503 | /* In case of success, complete the command immediately: no ramrods |
5548 | * have been sent. | 5504 | * have been sent. |
5549 | */ | 5505 | */ |
5550 | if (!rc) | 5506 | if (!rc) |
@@ -5569,7 +5525,7 @@ static inline void bnx2x_func_reset_func(struct bnx2x *bp, | |||
5569 | } | 5525 | } |
5570 | 5526 | ||
5571 | /** | 5527 | /** |
5572 | * bnx2x_func_reset_port - reser HW at port stage | 5528 | * bnx2x_func_reset_port - reset HW at port stage |
5573 | * | 5529 | * |
5574 | * @bp: device handle | 5530 | * @bp: device handle |
5575 | * @drv: | 5531 | * @drv: |
@@ -5591,7 +5547,7 @@ static inline void bnx2x_func_reset_port(struct bnx2x *bp, | |||
5591 | } | 5547 | } |
5592 | 5548 | ||
5593 | /** | 5549 | /** |
5594 | * bnx2x_func_reset_cmn - reser HW at common stage | 5550 | * bnx2x_func_reset_cmn - reset HW at common stage |
5595 | * | 5551 | * |
5596 | * @bp: device handle | 5552 | * @bp: device handle |
5597 | * @drv: | 5553 | * @drv: |
@@ -5633,7 +5589,7 @@ static inline int bnx2x_func_hw_reset(struct bnx2x *bp, | |||
5633 | break; | 5589 | break; |
5634 | } | 5590 | } |
5635 | 5591 | ||
5636 | /* Complete the comand immediatelly: no ramrods have been sent. */ | 5592 | /* Complete the command immediately: no ramrods have been sent. */ |
5637 | o->complete_cmd(bp, o, BNX2X_F_CMD_HW_RESET); | 5593 | o->complete_cmd(bp, o, BNX2X_F_CMD_HW_RESET); |
5638 | 5594 | ||
5639 | return 0; | 5595 | return 0; |
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h index c4fffe0faba0..798dfe996733 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h | |||
@@ -34,8 +34,7 @@ enum { | |||
34 | RAMROD_RESTORE, | 34 | RAMROD_RESTORE, |
35 | /* Execute the next command now */ | 35 | /* Execute the next command now */ |
36 | RAMROD_EXEC, | 36 | RAMROD_EXEC, |
37 | /* | 37 | /* Don't add a new command and continue execution of postponed |
38 | * Don't add a new command and continue execution of posponed | ||
39 | * commands. If not set a new command will be added to the | 38 | * commands. If not set a new command will be added to the |
40 | * pending commands list. | 39 | * pending commands list. |
41 | */ | 40 | */ |
@@ -129,8 +128,7 @@ enum bnx2x_vlan_mac_cmd { | |||
129 | struct bnx2x_vlan_mac_data { | 128 | struct bnx2x_vlan_mac_data { |
130 | /* Requested command: BNX2X_VLAN_MAC_XX */ | 129 | /* Requested command: BNX2X_VLAN_MAC_XX */ |
131 | enum bnx2x_vlan_mac_cmd cmd; | 130 | enum bnx2x_vlan_mac_cmd cmd; |
132 | /* | 131 | /* used to contain the data related vlan_mac_flags bits from |
133 | * used to contain the data related vlan_mac_flags bits from | ||
134 | * ramrod parameters. | 132 | * ramrod parameters. |
135 | */ | 133 | */ |
136 | unsigned long vlan_mac_flags; | 134 | unsigned long vlan_mac_flags; |
@@ -190,14 +188,10 @@ typedef struct bnx2x_exeq_elem * | |||
190 | struct bnx2x_exeq_elem *elem); | 188 | struct bnx2x_exeq_elem *elem); |
191 | 189 | ||
192 | struct bnx2x_exe_queue_obj { | 190 | struct bnx2x_exe_queue_obj { |
193 | /* | 191 | /* Commands pending for an execution. */ |
194 | * Commands pending for an execution. | ||
195 | */ | ||
196 | struct list_head exe_queue; | 192 | struct list_head exe_queue; |
197 | 193 | ||
198 | /* | 194 | /* Commands pending for an completion. */ |
199 | * Commands pending for an completion. | ||
200 | */ | ||
201 | struct list_head pending_comp; | 195 | struct list_head pending_comp; |
202 | 196 | ||
203 | spinlock_t lock; | 197 | spinlock_t lock; |
@@ -245,14 +239,13 @@ struct bnx2x_exe_queue_obj { | |||
245 | }; | 239 | }; |
246 | /***************** Classification verbs: Set/Del MAC/VLAN/VLAN-MAC ************/ | 240 | /***************** Classification verbs: Set/Del MAC/VLAN/VLAN-MAC ************/ |
247 | /* | 241 | /* |
248 | * Element in the VLAN_MAC registry list having all currenty configured | 242 | * Element in the VLAN_MAC registry list having all currently configured |
249 | * rules. | 243 | * rules. |
250 | */ | 244 | */ |
251 | struct bnx2x_vlan_mac_registry_elem { | 245 | struct bnx2x_vlan_mac_registry_elem { |
252 | struct list_head link; | 246 | struct list_head link; |
253 | 247 | ||
254 | /* | 248 | /* Used to store the cam offset used for the mac/vlan/vlan-mac. |
255 | * Used to store the cam offset used for the mac/vlan/vlan-mac. | ||
256 | * Relevant for 57710 and 57711 only. VLANs and MACs share the | 249 | * Relevant for 57710 and 57711 only. VLANs and MACs share the |
257 | * same CAM for these chips. | 250 | * same CAM for these chips. |
258 | */ | 251 | */ |
@@ -310,7 +303,7 @@ struct bnx2x_vlan_mac_obj { | |||
310 | * @param n number of elements to get | 303 | * @param n number of elements to get |
311 | * @param buf buffer preallocated by caller into which elements | 304 | * @param buf buffer preallocated by caller into which elements |
312 | * will be copied. Note elements are 4-byte aligned | 305 | * will be copied. Note elements are 4-byte aligned |
313 | * so buffer size must be able to accomodate the | 306 | * so buffer size must be able to accommodate the |
314 | * aligned elements. | 307 | * aligned elements. |
315 | * | 308 | * |
316 | * @return number of copied bytes | 309 | * @return number of copied bytes |
@@ -395,7 +388,7 @@ struct bnx2x_vlan_mac_obj { | |||
395 | * @param bp | 388 | * @param bp |
396 | * @param p Command parameters (RAMROD_COMP_WAIT bit in | 389 | * @param p Command parameters (RAMROD_COMP_WAIT bit in |
397 | * ramrod_flags is only taken into an account) | 390 | * ramrod_flags is only taken into an account) |
398 | * @param ppos a pointer to the cooky that should be given back in the | 391 | * @param ppos a pointer to the cookie that should be given back in the |
399 | * next call to make function handle the next element. If | 392 | * next call to make function handle the next element. If |
400 | * *ppos is set to NULL it will restart the iterator. | 393 | * *ppos is set to NULL it will restart the iterator. |
401 | * If returned *ppos == NULL this means that the last | 394 | * If returned *ppos == NULL this means that the last |
@@ -408,7 +401,7 @@ struct bnx2x_vlan_mac_obj { | |||
408 | struct bnx2x_vlan_mac_registry_elem **ppos); | 401 | struct bnx2x_vlan_mac_registry_elem **ppos); |
409 | 402 | ||
410 | /** | 403 | /** |
411 | * Should be called on a completion arival. | 404 | * Should be called on a completion arrival. |
412 | * | 405 | * |
413 | * @param bp | 406 | * @param bp |
414 | * @param o | 407 | * @param o |
@@ -447,7 +440,7 @@ void bnx2x_set_mac_in_nig(struct bnx2x *bp, | |||
447 | 440 | ||
448 | /** RX_MODE verbs:DROP_ALL/ACCEPT_ALL/ACCEPT_ALL_MULTI/ACCEPT_ALL_VLAN/NORMAL */ | 441 | /** RX_MODE verbs:DROP_ALL/ACCEPT_ALL/ACCEPT_ALL_MULTI/ACCEPT_ALL_VLAN/NORMAL */ |
449 | 442 | ||
450 | /* RX_MODE ramrod spesial flags: set in rx_mode_flags field in | 443 | /* RX_MODE ramrod special flags: set in rx_mode_flags field in |
451 | * a bnx2x_rx_mode_ramrod_params. | 444 | * a bnx2x_rx_mode_ramrod_params. |
452 | */ | 445 | */ |
453 | enum { | 446 | enum { |
@@ -475,8 +468,7 @@ struct bnx2x_rx_mode_ramrod_params { | |||
475 | unsigned long ramrod_flags; | 468 | unsigned long ramrod_flags; |
476 | unsigned long rx_mode_flags; | 469 | unsigned long rx_mode_flags; |
477 | 470 | ||
478 | /* | 471 | /* rdata is either a pointer to eth_filter_rules_ramrod_data(e2) or to |
479 | * rdata is either a pointer to eth_filter_rules_ramrod_data(e2) or to | ||
480 | * a tstorm_eth_mac_filter_config (e1x). | 472 | * a tstorm_eth_mac_filter_config (e1x). |
481 | */ | 473 | */ |
482 | void *rdata; | 474 | void *rdata; |
@@ -646,12 +638,11 @@ struct bnx2x_credit_pool_obj { | |||
646 | /* Maximum allowed credit. put() will check against it. */ | 638 | /* Maximum allowed credit. put() will check against it. */ |
647 | int pool_sz; | 639 | int pool_sz; |
648 | 640 | ||
649 | /* | 641 | /* Allocate a pool table statically. |
650 | * Allocate a pool table statically. | ||
651 | * | 642 | * |
652 | * Currently the mamimum allowed size is MAX_MAC_CREDIT_E2(272) | 643 | * Currently the maximum allowed size is MAX_MAC_CREDIT_E2(272) |
653 | * | 644 | * |
654 | * The set bit in the table will mean that the entry is available. | 645 | * The set bit in the table will mean that the entry is available. |
655 | */ | 646 | */ |
656 | #define BNX2X_POOL_VEC_SIZE (MAX_MAC_CREDIT_E2 / 64) | 647 | #define BNX2X_POOL_VEC_SIZE (MAX_MAC_CREDIT_E2 / 64) |
657 | u64 pool_mirror[BNX2X_POOL_VEC_SIZE]; | 648 | u64 pool_mirror[BNX2X_POOL_VEC_SIZE]; |
@@ -832,7 +823,7 @@ enum { | |||
832 | BNX2X_Q_FLG_TUN_INC_INNER_IP_ID | 823 | BNX2X_Q_FLG_TUN_INC_INNER_IP_ID |
833 | }; | 824 | }; |
834 | 825 | ||
835 | /* Queue type options: queue type may be a compination of below. */ | 826 | /* Queue type options: queue type may be a combination of below. */ |
836 | enum bnx2x_q_type { | 827 | enum bnx2x_q_type { |
837 | /** TODO: Consider moving both these flags into the init() | 828 | /** TODO: Consider moving both these flags into the init() |
838 | * ramrod params. | 829 | * ramrod params. |
@@ -1002,10 +993,9 @@ struct bnx2x_queue_sp_obj { | |||
1002 | u8 cl_id; | 993 | u8 cl_id; |
1003 | u8 func_id; | 994 | u8 func_id; |
1004 | 995 | ||
1005 | /* | 996 | /* number of traffic classes supported by queue. |
1006 | * number of traffic classes supported by queue. | 997 | * The primary connection of the queue supports the first traffic |
1007 | * The primary connection of the queue suppotrs the first traffic | 998 | * class. Any further traffic class is supported by a tx-only |
1008 | * class. Any further traffic class is suppoted by a tx-only | ||
1009 | * connection. | 999 | * connection. |
1010 | * | 1000 | * |
1011 | * Therefore max_cos is also a number of valid entries in the cids | 1001 | * Therefore max_cos is also a number of valid entries in the cids |
@@ -1021,7 +1011,7 @@ struct bnx2x_queue_sp_obj { | |||
1021 | 1011 | ||
1022 | /* BNX2X_Q_CMD_XX bits. This object implements "one | 1012 | /* BNX2X_Q_CMD_XX bits. This object implements "one |
1023 | * pending" paradigm but for debug and tracing purposes it's | 1013 | * pending" paradigm but for debug and tracing purposes it's |
1024 | * more convinient to have different bits for different | 1014 | * more convenient to have different bits for different |
1025 | * commands. | 1015 | * commands. |
1026 | */ | 1016 | */ |
1027 | unsigned long pending; | 1017 | unsigned long pending; |
@@ -1210,7 +1200,7 @@ struct bnx2x_func_sp_obj { | |||
1210 | 1200 | ||
1211 | /* BNX2X_FUNC_CMD_XX bits. This object implements "one | 1201 | /* BNX2X_FUNC_CMD_XX bits. This object implements "one |
1212 | * pending" paradigm but for debug and tracing purposes it's | 1202 | * pending" paradigm but for debug and tracing purposes it's |
1213 | * more convinient to have different bits for different | 1203 | * more convenient to have different bits for different |
1214 | * commands. | 1204 | * commands. |
1215 | */ | 1205 | */ |
1216 | unsigned long pending; | 1206 | unsigned long pending; |
@@ -1329,7 +1319,7 @@ void bnx2x_init_rx_mode_obj(struct bnx2x *bp, | |||
1329 | * | 1319 | * |
1330 | * @p: Command parameters | 1320 | * @p: Command parameters |
1331 | * | 1321 | * |
1332 | * Return: 0 - if operation was successfull and there is no pending completions, | 1322 | * Return: 0 - if operation was successful and there is no pending completions, |
1333 | * positive number - if there are pending completions, | 1323 | * positive number - if there are pending completions, |
1334 | * negative - if there were errors | 1324 | * negative - if there were errors |
1335 | */ | 1325 | */ |
@@ -1361,7 +1351,7 @@ void bnx2x_init_mcast_obj(struct bnx2x *bp, | |||
1361 | * the current command will be enqueued to the tail of the | 1351 | * the current command will be enqueued to the tail of the |
1362 | * pending commands list. | 1352 | * pending commands list. |
1363 | * | 1353 | * |
1364 | * Return: 0 is operation was successfull and there are no pending completions, | 1354 | * Return: 0 is operation was successful and there are no pending completions, |
1365 | * negative if there were errors, positive if there are pending | 1355 | * negative if there were errors, positive if there are pending |
1366 | * completions. | 1356 | * completions. |
1367 | */ | 1357 | */ |
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c index ea492c7a96a1..b2ab288aaf76 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c | |||
@@ -1341,7 +1341,7 @@ int bnx2x_vfop_qdown_cmd(struct bnx2x *bp, | |||
1341 | */ | 1341 | */ |
1342 | 1342 | ||
1343 | /* internal vf enable - until vf is enabled internally all transactions | 1343 | /* internal vf enable - until vf is enabled internally all transactions |
1344 | * are blocked. this routine should always be called last with pretend. | 1344 | * are blocked. This routine should always be called last with pretend. |
1345 | */ | 1345 | */ |
1346 | static void bnx2x_vf_enable_internal(struct bnx2x *bp, u8 enable) | 1346 | static void bnx2x_vf_enable_internal(struct bnx2x *bp, u8 enable) |
1347 | { | 1347 | { |
@@ -1743,7 +1743,7 @@ void bnx2x_iov_init_dq(struct bnx2x *bp) | |||
1743 | REG_WR(bp, DORQ_REG_VF_TYPE_MIN_MCID_0, 0); | 1743 | REG_WR(bp, DORQ_REG_VF_TYPE_MIN_MCID_0, 0); |
1744 | REG_WR(bp, DORQ_REG_VF_TYPE_MAX_MCID_0, 0x1ffff); | 1744 | REG_WR(bp, DORQ_REG_VF_TYPE_MAX_MCID_0, 0x1ffff); |
1745 | 1745 | ||
1746 | /* set the number of VF alllowed doorbells to the full DQ range */ | 1746 | /* set the number of VF allowed doorbells to the full DQ range */ |
1747 | REG_WR(bp, DORQ_REG_VF_NORM_MAX_CID_COUNT, 0x20000); | 1747 | REG_WR(bp, DORQ_REG_VF_NORM_MAX_CID_COUNT, 0x20000); |
1748 | 1748 | ||
1749 | /* set the VF doorbell threshold */ | 1749 | /* set the VF doorbell threshold */ |
@@ -2403,7 +2403,7 @@ int bnx2x_iov_eq_sp_event(struct bnx2x *bp, union event_ring_elem *elem) | |||
2403 | 2403 | ||
2404 | /* extract vf and rxq index from vf_cid - relies on the following: | 2404 | /* extract vf and rxq index from vf_cid - relies on the following: |
2405 | * 1. vfid on cid reflects the true abs_vfid | 2405 | * 1. vfid on cid reflects the true abs_vfid |
2406 | * 2. the max number of VFs (per path) is 64 | 2406 | * 2. The max number of VFs (per path) is 64 |
2407 | */ | 2407 | */ |
2408 | qidx = cid & ((1 << BNX2X_VF_CID_WND)-1); | 2408 | qidx = cid & ((1 << BNX2X_VF_CID_WND)-1); |
2409 | abs_vfid = (cid >> BNX2X_VF_CID_WND) & (BNX2X_MAX_NUM_OF_VFS-1); | 2409 | abs_vfid = (cid >> BNX2X_VF_CID_WND) & (BNX2X_MAX_NUM_OF_VFS-1); |
@@ -2461,7 +2461,7 @@ static struct bnx2x_virtf *bnx2x_vf_by_cid(struct bnx2x *bp, int vf_cid) | |||
2461 | { | 2461 | { |
2462 | /* extract the vf from vf_cid - relies on the following: | 2462 | /* extract the vf from vf_cid - relies on the following: |
2463 | * 1. vfid on cid reflects the true abs_vfid | 2463 | * 1. vfid on cid reflects the true abs_vfid |
2464 | * 2. the max number of VFs (per path) is 64 | 2464 | * 2. The max number of VFs (per path) is 64 |
2465 | */ | 2465 | */ |
2466 | int abs_vfid = (vf_cid >> BNX2X_VF_CID_WND) & (BNX2X_MAX_NUM_OF_VFS-1); | 2466 | int abs_vfid = (vf_cid >> BNX2X_VF_CID_WND) & (BNX2X_MAX_NUM_OF_VFS-1); |
2467 | return bnx2x_vf_by_abs_fid(bp, abs_vfid); | 2467 | return bnx2x_vf_by_abs_fid(bp, abs_vfid); |
@@ -2480,7 +2480,7 @@ void bnx2x_iov_set_queue_sp_obj(struct bnx2x *bp, int vf_cid, | |||
2480 | if (vf) { | 2480 | if (vf) { |
2481 | /* extract queue index from vf_cid - relies on the following: | 2481 | /* extract queue index from vf_cid - relies on the following: |
2482 | * 1. vfid on cid reflects the true abs_vfid | 2482 | * 1. vfid on cid reflects the true abs_vfid |
2483 | * 2. the max number of VFs (per path) is 64 | 2483 | * 2. The max number of VFs (per path) is 64 |
2484 | */ | 2484 | */ |
2485 | int q_index = vf_cid & ((1 << BNX2X_VF_CID_WND)-1); | 2485 | int q_index = vf_cid & ((1 << BNX2X_VF_CID_WND)-1); |
2486 | *q_obj = &bnx2x_vfq(vf, q_index, sp_obj); | 2486 | *q_obj = &bnx2x_vfq(vf, q_index, sp_obj); |
@@ -2705,7 +2705,7 @@ int bnx2x_vf_acquire(struct bnx2x *bp, struct bnx2x_virtf *vf, | |||
2705 | } | 2705 | } |
2706 | 2706 | ||
2707 | /* static allocation: | 2707 | /* static allocation: |
2708 | * the global maximum number are fixed per VF. fail the request if | 2708 | * the global maximum number are fixed per VF. Fail the request if |
2709 | * requested number exceed these globals | 2709 | * requested number exceed these globals |
2710 | */ | 2710 | */ |
2711 | if (!bnx2x_vf_chk_avail_resc(bp, vf, resc)) { | 2711 | if (!bnx2x_vf_chk_avail_resc(bp, vf, resc)) { |
@@ -2890,7 +2890,7 @@ int bnx2x_vfop_close_cmd(struct bnx2x *bp, | |||
2890 | return -ENOMEM; | 2890 | return -ENOMEM; |
2891 | } | 2891 | } |
2892 | 2892 | ||
2893 | /* VF release can be called either: 1. the VF was acquired but | 2893 | /* VF release can be called either: 1. The VF was acquired but |
2894 | * not enabled 2. the vf was enabled or in the process of being | 2894 | * not enabled 2. the vf was enabled or in the process of being |
2895 | * enabled | 2895 | * enabled |
2896 | */ | 2896 | */ |
@@ -3140,7 +3140,7 @@ int bnx2x_get_vf_config(struct net_device *dev, int vfidx, | |||
3140 | /* mac configured by ndo so its in bulletin board */ | 3140 | /* mac configured by ndo so its in bulletin board */ |
3141 | memcpy(&ivi->mac, bulletin->mac, ETH_ALEN); | 3141 | memcpy(&ivi->mac, bulletin->mac, ETH_ALEN); |
3142 | else | 3142 | else |
3143 | /* funtion has not been loaded yet. Show mac as 0s */ | 3143 | /* function has not been loaded yet. Show mac as 0s */ |
3144 | memset(&ivi->mac, 0, ETH_ALEN); | 3144 | memset(&ivi->mac, 0, ETH_ALEN); |
3145 | 3145 | ||
3146 | /* vlan */ | 3146 | /* vlan */ |
@@ -3148,7 +3148,7 @@ int bnx2x_get_vf_config(struct net_device *dev, int vfidx, | |||
3148 | /* vlan configured by ndo so its in bulletin board */ | 3148 | /* vlan configured by ndo so its in bulletin board */ |
3149 | memcpy(&ivi->vlan, &bulletin->vlan, VLAN_HLEN); | 3149 | memcpy(&ivi->vlan, &bulletin->vlan, VLAN_HLEN); |
3150 | else | 3150 | else |
3151 | /* funtion has not been loaded yet. Show vlans as 0s */ | 3151 | /* function has not been loaded yet. Show vlans as 0s */ |
3152 | memset(&ivi->vlan, 0, VLAN_HLEN); | 3152 | memset(&ivi->vlan, 0, VLAN_HLEN); |
3153 | } | 3153 | } |
3154 | 3154 | ||
@@ -3188,7 +3188,7 @@ int bnx2x_set_vf_mac(struct net_device *dev, int vfidx, u8 *mac) | |||
3188 | return -EINVAL; | 3188 | return -EINVAL; |
3189 | } | 3189 | } |
3190 | 3190 | ||
3191 | /* update PF's copy of the VF's bulletin. will no longer accept mac | 3191 | /* update PF's copy of the VF's bulletin. Will no longer accept mac |
3192 | * configuration requests from vf unless match this mac | 3192 | * configuration requests from vf unless match this mac |
3193 | */ | 3193 | */ |
3194 | bulletin->valid_bitmap |= 1 << MAC_ADDR_VALID; | 3194 | bulletin->valid_bitmap |= 1 << MAC_ADDR_VALID; |
@@ -3357,8 +3357,11 @@ int bnx2x_set_vf_vlan(struct net_device *dev, int vfidx, u16 vlan, u8 qos) | |||
3357 | return 0; | 3357 | return 0; |
3358 | } | 3358 | } |
3359 | 3359 | ||
3360 | /* crc is the first field in the bulletin board. compute the crc over the | 3360 | /* crc is the first field in the bulletin board. Compute the crc over the |
3361 | * entire bulletin board excluding the crc field itself | 3361 | * entire bulletin board excluding the crc field itself. Use the length field |
3362 | * as the Bulletin Board was posted by a PF with possibly a different version | ||
3363 | * from the vf which will sample it. Therefore, the length is computed by the | ||
3364 | * PF and the used blindly by the VF. | ||
3362 | */ | 3365 | */ |
3363 | u32 bnx2x_crc_vf_bulletin(struct bnx2x *bp, | 3366 | u32 bnx2x_crc_vf_bulletin(struct bnx2x *bp, |
3364 | struct pf_vf_bulletin_content *bulletin) | 3367 | struct pf_vf_bulletin_content *bulletin) |
@@ -3451,7 +3454,7 @@ int bnx2x_open_epilog(struct bnx2x *bp) | |||
3451 | * register_netdevice which must have rtnl lock taken. As we are holding | 3454 | * register_netdevice which must have rtnl lock taken. As we are holding |
3452 | * the lock right now, that could only work if the probe would not take | 3455 | * the lock right now, that could only work if the probe would not take |
3453 | * the lock. However, as the probe of the vf may be called from other | 3456 | * the lock. However, as the probe of the vf may be called from other |
3454 | * contexts as well (such as passthrough to vm failes) it can't assume | 3457 | * contexts as well (such as passthrough to vm fails) it can't assume |
3455 | * the lock is being held for it. Using delayed work here allows the | 3458 | * the lock is being held for it. Using delayed work here allows the |
3456 | * probe code to simply take the lock (i.e. wait for it to be released | 3459 | * probe code to simply take the lock (i.e. wait for it to be released |
3457 | * if it is being held). We only want to do this if the number of VFs | 3460 | * if it is being held). We only want to do this if the number of VFs |
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h index 039792299c3b..3e7cb8341841 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h | |||
@@ -197,7 +197,7 @@ struct bnx2x_virtf { | |||
197 | 197 | ||
198 | u8 state; | 198 | u8 state; |
199 | #define VF_FREE 0 /* VF ready to be acquired holds no resc */ | 199 | #define VF_FREE 0 /* VF ready to be acquired holds no resc */ |
200 | #define VF_ACQUIRED 1 /* VF aquired, but not initalized */ | 200 | #define VF_ACQUIRED 1 /* VF acquired, but not initialized */ |
201 | #define VF_ENABLED 2 /* VF Enabled */ | 201 | #define VF_ENABLED 2 /* VF Enabled */ |
202 | #define VF_RESET 3 /* VF FLR'd, pending cleanup */ | 202 | #define VF_RESET 3 /* VF FLR'd, pending cleanup */ |
203 | 203 | ||
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c index 282606677bca..f55fa0725680 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c | |||
@@ -233,7 +233,7 @@ int bnx2x_vfpf_acquire(struct bnx2x *bp, u8 tx_count, u8 rx_count) | |||
233 | 233 | ||
234 | attempts++; | 234 | attempts++; |
235 | 235 | ||
236 | /* test whether the PF accepted our request. If not, humble the | 236 | /* test whether the PF accepted our request. If not, humble |
237 | * the request and try again. | 237 | * the request and try again. |
238 | */ | 238 | */ |
239 | if (bp->acquire_resp.hdr.status == PFVF_STATUS_SUCCESS) { | 239 | if (bp->acquire_resp.hdr.status == PFVF_STATUS_SUCCESS) { |
@@ -787,7 +787,7 @@ static inline void bnx2x_set_vf_mbxs_valid(struct bnx2x *bp) | |||
787 | storm_memset_vf_mbx_valid(bp, bnx2x_vf(bp, i, abs_vfid)); | 787 | storm_memset_vf_mbx_valid(bp, bnx2x_vf(bp, i, abs_vfid)); |
788 | } | 788 | } |
789 | 789 | ||
790 | /* enable vf_pf mailbox (aka vf-pf-chanell) */ | 790 | /* enable vf_pf mailbox (aka vf-pf-channel) */ |
791 | void bnx2x_vf_enable_mbx(struct bnx2x *bp, u8 abs_vfid) | 791 | void bnx2x_vf_enable_mbx(struct bnx2x *bp, u8 abs_vfid) |
792 | { | 792 | { |
793 | bnx2x_vf_flr_clnup_epilog(bp, abs_vfid); | 793 | bnx2x_vf_flr_clnup_epilog(bp, abs_vfid); |
@@ -1072,7 +1072,7 @@ static void bnx2x_vf_mbx_set_q_flags(struct bnx2x *bp, u32 mbx_q_flags, | |||
1072 | if (mbx_q_flags & VFPF_QUEUE_FLG_DHC) | 1072 | if (mbx_q_flags & VFPF_QUEUE_FLG_DHC) |
1073 | __set_bit(BNX2X_Q_FLG_DHC, sp_q_flags); | 1073 | __set_bit(BNX2X_Q_FLG_DHC, sp_q_flags); |
1074 | 1074 | ||
1075 | /* outer vlan removal is set according to the PF's multi fuction mode */ | 1075 | /* outer vlan removal is set according to PF's multi function mode */ |
1076 | if (IS_MF_SD(bp)) | 1076 | if (IS_MF_SD(bp)) |
1077 | __set_bit(BNX2X_Q_FLG_OV, sp_q_flags); | 1077 | __set_bit(BNX2X_Q_FLG_OV, sp_q_flags); |
1078 | } | 1078 | } |
@@ -1104,7 +1104,7 @@ static void bnx2x_vf_mbx_setup_q(struct bnx2x *bp, struct bnx2x_virtf *vf, | |||
1104 | struct bnx2x_queue_init_params *init_p; | 1104 | struct bnx2x_queue_init_params *init_p; |
1105 | struct bnx2x_queue_setup_params *setup_p; | 1105 | struct bnx2x_queue_setup_params *setup_p; |
1106 | 1106 | ||
1107 | /* reinit the VF operation context */ | 1107 | /* re-init the VF operation context */ |
1108 | memset(&vf->op_params.qctor, 0 , sizeof(vf->op_params.qctor)); | 1108 | memset(&vf->op_params.qctor, 0 , sizeof(vf->op_params.qctor)); |
1109 | setup_p = &vf->op_params.qctor.prep_qsetup; | 1109 | setup_p = &vf->op_params.qctor.prep_qsetup; |
1110 | init_p = &vf->op_params.qctor.qstate.params.init; | 1110 | init_p = &vf->op_params.qctor.qstate.params.init; |