aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet/broadcom
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ethernet/broadcom')
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x.h51
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c28
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h3
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c94
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c6
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h1
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c15
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h7
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c259
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h1
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c28
-rw-r--r--drivers/net/ethernet/broadcom/tg3.c31
12 files changed, 335 insertions, 189 deletions
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
index a1f66e2c9a86..ec6119089b82 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
@@ -520,10 +520,12 @@ struct bnx2x_fastpath {
520#define BNX2X_FP_STATE_IDLE 0 520#define BNX2X_FP_STATE_IDLE 0
521#define BNX2X_FP_STATE_NAPI (1 << 0) /* NAPI owns this FP */ 521#define BNX2X_FP_STATE_NAPI (1 << 0) /* NAPI owns this FP */
522#define BNX2X_FP_STATE_POLL (1 << 1) /* poll owns this FP */ 522#define BNX2X_FP_STATE_POLL (1 << 1) /* poll owns this FP */
523#define BNX2X_FP_STATE_NAPI_YIELD (1 << 2) /* NAPI yielded this FP */ 523#define BNX2X_FP_STATE_DISABLED (1 << 2)
524#define BNX2X_FP_STATE_POLL_YIELD (1 << 3) /* poll yielded this FP */ 524#define BNX2X_FP_STATE_NAPI_YIELD (1 << 3) /* NAPI yielded this FP */
525#define BNX2X_FP_STATE_POLL_YIELD (1 << 4) /* poll yielded this FP */
526#define BNX2X_FP_OWNED (BNX2X_FP_STATE_NAPI | BNX2X_FP_STATE_POLL)
525#define BNX2X_FP_YIELD (BNX2X_FP_STATE_NAPI_YIELD | BNX2X_FP_STATE_POLL_YIELD) 527#define BNX2X_FP_YIELD (BNX2X_FP_STATE_NAPI_YIELD | BNX2X_FP_STATE_POLL_YIELD)
526#define BNX2X_FP_LOCKED (BNX2X_FP_STATE_NAPI | BNX2X_FP_STATE_POLL) 528#define BNX2X_FP_LOCKED (BNX2X_FP_OWNED | BNX2X_FP_STATE_DISABLED)
527#define BNX2X_FP_USER_PEND (BNX2X_FP_STATE_POLL | BNX2X_FP_STATE_POLL_YIELD) 529#define BNX2X_FP_USER_PEND (BNX2X_FP_STATE_POLL | BNX2X_FP_STATE_POLL_YIELD)
528 /* protect state */ 530 /* protect state */
529 spinlock_t lock; 531 spinlock_t lock;
@@ -613,7 +615,7 @@ static inline bool bnx2x_fp_lock_napi(struct bnx2x_fastpath *fp)
613{ 615{
614 bool rc = true; 616 bool rc = true;
615 617
616 spin_lock(&fp->lock); 618 spin_lock_bh(&fp->lock);
617 if (fp->state & BNX2X_FP_LOCKED) { 619 if (fp->state & BNX2X_FP_LOCKED) {
618 WARN_ON(fp->state & BNX2X_FP_STATE_NAPI); 620 WARN_ON(fp->state & BNX2X_FP_STATE_NAPI);
619 fp->state |= BNX2X_FP_STATE_NAPI_YIELD; 621 fp->state |= BNX2X_FP_STATE_NAPI_YIELD;
@@ -622,7 +624,7 @@ static inline bool bnx2x_fp_lock_napi(struct bnx2x_fastpath *fp)
622 /* we don't care if someone yielded */ 624 /* we don't care if someone yielded */
623 fp->state = BNX2X_FP_STATE_NAPI; 625 fp->state = BNX2X_FP_STATE_NAPI;
624 } 626 }
625 spin_unlock(&fp->lock); 627 spin_unlock_bh(&fp->lock);
626 return rc; 628 return rc;
627} 629}
628 630
@@ -631,14 +633,16 @@ static inline bool bnx2x_fp_unlock_napi(struct bnx2x_fastpath *fp)
631{ 633{
632 bool rc = false; 634 bool rc = false;
633 635
634 spin_lock(&fp->lock); 636 spin_lock_bh(&fp->lock);
635 WARN_ON(fp->state & 637 WARN_ON(fp->state &
636 (BNX2X_FP_STATE_POLL | BNX2X_FP_STATE_NAPI_YIELD)); 638 (BNX2X_FP_STATE_POLL | BNX2X_FP_STATE_NAPI_YIELD));
637 639
638 if (fp->state & BNX2X_FP_STATE_POLL_YIELD) 640 if (fp->state & BNX2X_FP_STATE_POLL_YIELD)
639 rc = true; 641 rc = true;
640 fp->state = BNX2X_FP_STATE_IDLE; 642
641 spin_unlock(&fp->lock); 643 /* state ==> idle, unless currently disabled */
644 fp->state &= BNX2X_FP_STATE_DISABLED;
645 spin_unlock_bh(&fp->lock);
642 return rc; 646 return rc;
643} 647}
644 648
@@ -669,7 +673,9 @@ static inline bool bnx2x_fp_unlock_poll(struct bnx2x_fastpath *fp)
669 673
670 if (fp->state & BNX2X_FP_STATE_POLL_YIELD) 674 if (fp->state & BNX2X_FP_STATE_POLL_YIELD)
671 rc = true; 675 rc = true;
672 fp->state = BNX2X_FP_STATE_IDLE; 676
677 /* state ==> idle, unless currently disabled */
678 fp->state &= BNX2X_FP_STATE_DISABLED;
673 spin_unlock_bh(&fp->lock); 679 spin_unlock_bh(&fp->lock);
674 return rc; 680 return rc;
675} 681}
@@ -677,9 +683,23 @@ static inline bool bnx2x_fp_unlock_poll(struct bnx2x_fastpath *fp)
677/* true if a socket is polling, even if it did not get the lock */ 683/* true if a socket is polling, even if it did not get the lock */
678static inline bool bnx2x_fp_ll_polling(struct bnx2x_fastpath *fp) 684static inline bool bnx2x_fp_ll_polling(struct bnx2x_fastpath *fp)
679{ 685{
680 WARN_ON(!(fp->state & BNX2X_FP_LOCKED)); 686 WARN_ON(!(fp->state & BNX2X_FP_OWNED));
681 return fp->state & BNX2X_FP_USER_PEND; 687 return fp->state & BNX2X_FP_USER_PEND;
682} 688}
689
690/* false if fp is currently owned */
691static inline bool bnx2x_fp_ll_disable(struct bnx2x_fastpath *fp)
692{
693 int rc = true;
694
695 spin_lock_bh(&fp->lock);
696 if (fp->state & BNX2X_FP_OWNED)
697 rc = false;
698 fp->state |= BNX2X_FP_STATE_DISABLED;
699 spin_unlock_bh(&fp->lock);
700
701 return rc;
702}
683#else 703#else
684static inline void bnx2x_fp_init_lock(struct bnx2x_fastpath *fp) 704static inline void bnx2x_fp_init_lock(struct bnx2x_fastpath *fp)
685{ 705{
@@ -709,6 +729,10 @@ static inline bool bnx2x_fp_ll_polling(struct bnx2x_fastpath *fp)
709{ 729{
710 return false; 730 return false;
711} 731}
732static inline bool bnx2x_fp_ll_disable(struct bnx2x_fastpath *fp)
733{
734 return true;
735}
712#endif /* CONFIG_NET_RX_BUSY_POLL */ 736#endif /* CONFIG_NET_RX_BUSY_POLL */
713 737
714/* Use 2500 as a mini-jumbo MTU for FCoE */ 738/* Use 2500 as a mini-jumbo MTU for FCoE */
@@ -1250,7 +1274,10 @@ struct bnx2x_slowpath {
1250 * Therefore, if they would have been defined in the same union, 1274 * Therefore, if they would have been defined in the same union,
1251 * data can get corrupted. 1275 * data can get corrupted.
1252 */ 1276 */
1253 struct afex_vif_list_ramrod_data func_afex_rdata; 1277 union {
1278 struct afex_vif_list_ramrod_data viflist_data;
1279 struct function_update_data func_update;
1280 } func_afex_rdata;
1254 1281
1255 /* used by dmae command executer */ 1282 /* used by dmae command executer */
1256 struct dmae_command dmae[MAX_DMAE_C]; 1283 struct dmae_command dmae[MAX_DMAE_C];
@@ -2499,4 +2526,6 @@ void bnx2x_set_local_cmng(struct bnx2x *bp);
2499#define MCPR_SCRATCH_BASE(bp) \ 2526#define MCPR_SCRATCH_BASE(bp) \
2500 (CHIP_IS_E1x(bp) ? MCP_REG_MCPR_SCRATCH : MCP_A_REG_MCPR_SCRATCH) 2527 (CHIP_IS_E1x(bp) ? MCP_REG_MCPR_SCRATCH : MCP_A_REG_MCPR_SCRATCH)
2501 2528
2529#define E1H_MAX_MF_SB_COUNT (HC_SB_MAX_SB_E1X/(E1HVN_MAX * PORT_MAX))
2530
2502#endif /* bnx2x.h */ 2531#endif /* bnx2x.h */
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
index ec96130533cc..bf811565ee24 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
@@ -160,6 +160,7 @@ static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata,
160 struct sk_buff *skb = tx_buf->skb; 160 struct sk_buff *skb = tx_buf->skb;
161 u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons; 161 u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
162 int nbd; 162 int nbd;
163 u16 split_bd_len = 0;
163 164
164 /* prefetch skb end pointer to speedup dev_kfree_skb() */ 165 /* prefetch skb end pointer to speedup dev_kfree_skb() */
165 prefetch(&skb->end); 166 prefetch(&skb->end);
@@ -167,10 +168,7 @@ static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata,
167 DP(NETIF_MSG_TX_DONE, "fp[%d]: pkt_idx %d buff @(%p)->skb %p\n", 168 DP(NETIF_MSG_TX_DONE, "fp[%d]: pkt_idx %d buff @(%p)->skb %p\n",
168 txdata->txq_index, idx, tx_buf, skb); 169 txdata->txq_index, idx, tx_buf, skb);
169 170
170 /* unmap first bd */
171 tx_start_bd = &txdata->tx_desc_ring[bd_idx].start_bd; 171 tx_start_bd = &txdata->tx_desc_ring[bd_idx].start_bd;
172 dma_unmap_single(&bp->pdev->dev, BD_UNMAP_ADDR(tx_start_bd),
173 BD_UNMAP_LEN(tx_start_bd), DMA_TO_DEVICE);
174 172
175 nbd = le16_to_cpu(tx_start_bd->nbd) - 1; 173 nbd = le16_to_cpu(tx_start_bd->nbd) - 1;
176#ifdef BNX2X_STOP_ON_ERROR 174#ifdef BNX2X_STOP_ON_ERROR
@@ -188,12 +186,19 @@ static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata,
188 --nbd; 186 --nbd;
189 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx)); 187 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
190 188
191 /* ...and the TSO split header bd since they have no mapping */ 189 /* TSO headers+data bds share a common mapping. See bnx2x_tx_split() */
192 if (tx_buf->flags & BNX2X_TSO_SPLIT_BD) { 190 if (tx_buf->flags & BNX2X_TSO_SPLIT_BD) {
191 tx_data_bd = &txdata->tx_desc_ring[bd_idx].reg_bd;
192 split_bd_len = BD_UNMAP_LEN(tx_data_bd);
193 --nbd; 193 --nbd;
194 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx)); 194 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
195 } 195 }
196 196
197 /* unmap first bd */
198 dma_unmap_single(&bp->pdev->dev, BD_UNMAP_ADDR(tx_start_bd),
199 BD_UNMAP_LEN(tx_start_bd) + split_bd_len,
200 DMA_TO_DEVICE);
201
197 /* now free frags */ 202 /* now free frags */
198 while (nbd > 0) { 203 while (nbd > 0) {
199 204
@@ -1790,26 +1795,22 @@ static void bnx2x_napi_disable_cnic(struct bnx2x *bp)
1790{ 1795{
1791 int i; 1796 int i;
1792 1797
1793 local_bh_disable();
1794 for_each_rx_queue_cnic(bp, i) { 1798 for_each_rx_queue_cnic(bp, i) {
1795 napi_disable(&bnx2x_fp(bp, i, napi)); 1799 napi_disable(&bnx2x_fp(bp, i, napi));
1796 while (!bnx2x_fp_lock_napi(&bp->fp[i])) 1800 while (!bnx2x_fp_ll_disable(&bp->fp[i]))
1797 mdelay(1); 1801 usleep_range(1000, 2000);
1798 } 1802 }
1799 local_bh_enable();
1800} 1803}
1801 1804
1802static void bnx2x_napi_disable(struct bnx2x *bp) 1805static void bnx2x_napi_disable(struct bnx2x *bp)
1803{ 1806{
1804 int i; 1807 int i;
1805 1808
1806 local_bh_disable();
1807 for_each_eth_queue(bp, i) { 1809 for_each_eth_queue(bp, i) {
1808 napi_disable(&bnx2x_fp(bp, i, napi)); 1810 napi_disable(&bnx2x_fp(bp, i, napi));
1809 while (!bnx2x_fp_lock_napi(&bp->fp[i])) 1811 while (!bnx2x_fp_ll_disable(&bp->fp[i]))
1810 mdelay(1); 1812 usleep_range(1000, 2000);
1811 } 1813 }
1812 local_bh_enable();
1813} 1814}
1814 1815
1815void bnx2x_netif_start(struct bnx2x *bp) 1816void bnx2x_netif_start(struct bnx2x *bp)
@@ -1832,7 +1833,8 @@ void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
1832 bnx2x_napi_disable_cnic(bp); 1833 bnx2x_napi_disable_cnic(bp);
1833} 1834}
1834 1835
1835u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb) 1836u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb,
1837 void *accel_priv)
1836{ 1838{
1837 struct bnx2x *bp = netdev_priv(dev); 1839 struct bnx2x *bp = netdev_priv(dev);
1838 1840
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
index da8fcaa74495..41f3ca5ad972 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
@@ -524,7 +524,8 @@ int bnx2x_set_vf_mac(struct net_device *dev, int queue, u8 *mac);
524int bnx2x_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos); 524int bnx2x_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos);
525 525
526/* select_queue callback */ 526/* select_queue callback */
527u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb); 527u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb,
528 void *accel_priv);
528 529
529static inline void bnx2x_update_rx_prod(struct bnx2x *bp, 530static inline void bnx2x_update_rx_prod(struct bnx2x *bp,
530 struct bnx2x_fastpath *fp, 531 struct bnx2x_fastpath *fp,
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
index 20dcc02431ca..11fc79585491 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
@@ -3865,6 +3865,19 @@ static void bnx2x_warpcore_enable_AN_KR(struct bnx2x_phy *phy,
3865 3865
3866 bnx2x_warpcore_enable_AN_KR2(phy, params, vars); 3866 bnx2x_warpcore_enable_AN_KR2(phy, params, vars);
3867 } else { 3867 } else {
3868 /* Enable Auto-Detect to support 1G over CL37 as well */
3869 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
3870 MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X1, 0x10);
3871
3872 /* Force cl48 sync_status LOW to avoid getting stuck in CL73
3873 * parallel-detect loop when CL73 and CL37 are enabled.
3874 */
3875 CL22_WR_OVER_CL45(bp, phy, MDIO_REG_BANK_AER_BLOCK,
3876 MDIO_AER_BLOCK_AER_REG, 0);
3877 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
3878 MDIO_WC_REG_RXB_ANA_RX_CONTROL_PCI, 0x0800);
3879 bnx2x_set_aer_mmd(params, phy);
3880
3868 bnx2x_disable_kr2(params, vars, phy); 3881 bnx2x_disable_kr2(params, vars, phy);
3869 } 3882 }
3870 3883
@@ -8120,17 +8133,20 @@ static int bnx2x_get_edc_mode(struct bnx2x_phy *phy,
8120 *edc_mode = EDC_MODE_ACTIVE_DAC; 8133 *edc_mode = EDC_MODE_ACTIVE_DAC;
8121 else 8134 else
8122 check_limiting_mode = 1; 8135 check_limiting_mode = 1;
8123 } else if (copper_module_type & 8136 } else {
8124 SFP_EEPROM_FC_TX_TECH_BITMASK_COPPER_PASSIVE) { 8137 *edc_mode = EDC_MODE_PASSIVE_DAC;
8138 /* Even in case PASSIVE_DAC indication is not set,
8139 * treat it as a passive DAC cable, since some cables
8140 * don't have this indication.
8141 */
8142 if (copper_module_type &
8143 SFP_EEPROM_FC_TX_TECH_BITMASK_COPPER_PASSIVE) {
8125 DP(NETIF_MSG_LINK, 8144 DP(NETIF_MSG_LINK,
8126 "Passive Copper cable detected\n"); 8145 "Passive Copper cable detected\n");
8127 *edc_mode = 8146 } else {
8128 EDC_MODE_PASSIVE_DAC; 8147 DP(NETIF_MSG_LINK,
8129 } else { 8148 "Unknown copper-cable-type\n");
8130 DP(NETIF_MSG_LINK, 8149 }
8131 "Unknown copper-cable-type 0x%x !!!\n",
8132 copper_module_type);
8133 return -EINVAL;
8134 } 8150 }
8135 break; 8151 break;
8136 } 8152 }
@@ -10825,9 +10841,9 @@ static int bnx2x_54618se_config_init(struct bnx2x_phy *phy,
10825 (1<<11)); 10841 (1<<11));
10826 10842
10827 if (((phy->req_line_speed == SPEED_AUTO_NEG) && 10843 if (((phy->req_line_speed == SPEED_AUTO_NEG) &&
10828 (phy->speed_cap_mask & 10844 (phy->speed_cap_mask &
10829 PORT_HW_CFG_SPEED_CAPABILITY_D0_1G)) || 10845 PORT_HW_CFG_SPEED_CAPABILITY_D0_1G)) ||
10830 (phy->req_line_speed == SPEED_1000)) { 10846 (phy->req_line_speed == SPEED_1000)) {
10831 an_1000_val |= (1<<8); 10847 an_1000_val |= (1<<8);
10832 autoneg_val |= (1<<9 | 1<<12); 10848 autoneg_val |= (1<<9 | 1<<12);
10833 if (phy->req_duplex == DUPLEX_FULL) 10849 if (phy->req_duplex == DUPLEX_FULL)
@@ -10843,30 +10859,32 @@ static int bnx2x_54618se_config_init(struct bnx2x_phy *phy,
10843 0x09, 10859 0x09,
10844 &an_1000_val); 10860 &an_1000_val);
10845 10861
10846 /* Set 100 speed advertisement */ 10862 /* Advertise 10/100 link speed */
10847 if (((phy->req_line_speed == SPEED_AUTO_NEG) && 10863 if (phy->req_line_speed == SPEED_AUTO_NEG) {
10848 (phy->speed_cap_mask & 10864 if (phy->speed_cap_mask &
10849 (PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL | 10865 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF) {
10850 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF)))) { 10866 an_10_100_val |= (1<<5);
10851 an_10_100_val |= (1<<7); 10867 autoneg_val |= (1<<9 | 1<<12);
10852 /* Enable autoneg and restart autoneg for legacy speeds */ 10868 DP(NETIF_MSG_LINK, "Advertising 10M-HD\n");
10853 autoneg_val |= (1<<9 | 1<<12); 10869 }
10854 10870 if (phy->speed_cap_mask &
10855 if (phy->req_duplex == DUPLEX_FULL) 10871 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL) {
10856 an_10_100_val |= (1<<8);
10857 DP(NETIF_MSG_LINK, "Advertising 100M\n");
10858 }
10859
10860 /* Set 10 speed advertisement */
10861 if (((phy->req_line_speed == SPEED_AUTO_NEG) &&
10862 (phy->speed_cap_mask &
10863 (PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL |
10864 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF)))) {
10865 an_10_100_val |= (1<<5);
10866 autoneg_val |= (1<<9 | 1<<12);
10867 if (phy->req_duplex == DUPLEX_FULL)
10868 an_10_100_val |= (1<<6); 10872 an_10_100_val |= (1<<6);
10869 DP(NETIF_MSG_LINK, "Advertising 10M\n"); 10873 autoneg_val |= (1<<9 | 1<<12);
10874 DP(NETIF_MSG_LINK, "Advertising 10M-FD\n");
10875 }
10876 if (phy->speed_cap_mask &
10877 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF) {
10878 an_10_100_val |= (1<<7);
10879 autoneg_val |= (1<<9 | 1<<12);
10880 DP(NETIF_MSG_LINK, "Advertising 100M-HD\n");
10881 }
10882 if (phy->speed_cap_mask &
10883 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL) {
10884 an_10_100_val |= (1<<8);
10885 autoneg_val |= (1<<9 | 1<<12);
10886 DP(NETIF_MSG_LINK, "Advertising 100M-FD\n");
10887 }
10870 } 10888 }
10871 10889
10872 /* Only 10/100 are allowed to work in FORCE mode */ 10890 /* Only 10/100 are allowed to work in FORCE mode */
@@ -13342,6 +13360,10 @@ static u8 bnx2x_analyze_link_error(struct link_params *params,
13342 DP(NETIF_MSG_LINK, "Link changed:[%x %x]->%x\n", vars->link_up, 13360 DP(NETIF_MSG_LINK, "Link changed:[%x %x]->%x\n", vars->link_up,
13343 old_status, status); 13361 old_status, status);
13344 13362
13363 /* Do not touch the link in case physical link down */
13364 if ((vars->phy_flags & PHY_PHYSICAL_LINK_FLAG) == 0)
13365 return 1;
13366
13345 /* a. Update shmem->link_status accordingly 13367 /* a. Update shmem->link_status accordingly
13346 * b. Update link_vars->link_up 13368 * b. Update link_vars->link_up
13347 */ 13369 */
@@ -13550,7 +13572,7 @@ static void bnx2x_check_kr2_wa(struct link_params *params,
13550 */ 13572 */
13551 not_kr2_device = (((base_page & 0x8000) == 0) || 13573 not_kr2_device = (((base_page & 0x8000) == 0) ||
13552 (((base_page & 0x8000) && 13574 (((base_page & 0x8000) &&
13553 ((next_page & 0xe0) == 0x2)))); 13575 ((next_page & 0xe0) == 0x20))));
13554 13576
13555 /* In case KR2 is already disabled, check if we need to re-enable it */ 13577 /* In case KR2 is already disabled, check if we need to re-enable it */
13556 if (!(vars->link_attr_sync & LINK_ATTR_SYNC_KR2_ENABLE)) { 13578 if (!(vars->link_attr_sync & LINK_ATTR_SYNC_KR2_ENABLE)) {
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index 814d0eca9b33..8b3107b2fcc1 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -11447,9 +11447,9 @@ static int bnx2x_get_hwinfo(struct bnx2x *bp)
11447 } 11447 }
11448 } 11448 }
11449 11449
11450 /* adjust igu_sb_cnt to MF for E1x */ 11450 /* adjust igu_sb_cnt to MF for E1H */
11451 if (CHIP_IS_E1x(bp) && IS_MF(bp)) 11451 if (CHIP_IS_E1H(bp) && IS_MF(bp))
11452 bp->igu_sb_cnt /= E1HVN_MAX; 11452 bp->igu_sb_cnt = min_t(u8, bp->igu_sb_cnt, E1H_MAX_MF_SB_COUNT);
11453 11453
11454 /* port info */ 11454 /* port info */
11455 bnx2x_get_port_hwinfo(bp); 11455 bnx2x_get_port_hwinfo(bp);
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h
index 3efbb35267c8..14ffb6e56e59 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h
@@ -7179,6 +7179,7 @@ Theotherbitsarereservedandshouldbezero*/
7179#define MDIO_WC_REG_RX1_PCI_CTRL 0x80ca 7179#define MDIO_WC_REG_RX1_PCI_CTRL 0x80ca
7180#define MDIO_WC_REG_RX2_PCI_CTRL 0x80da 7180#define MDIO_WC_REG_RX2_PCI_CTRL 0x80da
7181#define MDIO_WC_REG_RX3_PCI_CTRL 0x80ea 7181#define MDIO_WC_REG_RX3_PCI_CTRL 0x80ea
7182#define MDIO_WC_REG_RXB_ANA_RX_CONTROL_PCI 0x80fa
7182#define MDIO_WC_REG_XGXSBLK2_UNICORE_MODE_10G 0x8104 7183#define MDIO_WC_REG_XGXSBLK2_UNICORE_MODE_10G 0x8104
7183#define MDIO_WC_REG_XGXS_STATUS3 0x8129 7184#define MDIO_WC_REG_XGXS_STATUS3 0x8129
7184#define MDIO_WC_REG_PAR_DET_10G_STATUS 0x8130 7185#define MDIO_WC_REG_PAR_DET_10G_STATUS 0x8130
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
index 32c92abf5094..18438a504d57 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
@@ -2038,6 +2038,7 @@ static int bnx2x_vlan_mac_del_all(struct bnx2x *bp,
2038 struct bnx2x_vlan_mac_ramrod_params p; 2038 struct bnx2x_vlan_mac_ramrod_params p;
2039 struct bnx2x_exe_queue_obj *exeq = &o->exe_queue; 2039 struct bnx2x_exe_queue_obj *exeq = &o->exe_queue;
2040 struct bnx2x_exeq_elem *exeq_pos, *exeq_pos_n; 2040 struct bnx2x_exeq_elem *exeq_pos, *exeq_pos_n;
2041 unsigned long flags;
2041 int read_lock; 2042 int read_lock;
2042 int rc = 0; 2043 int rc = 0;
2043 2044
@@ -2046,8 +2047,9 @@ static int bnx2x_vlan_mac_del_all(struct bnx2x *bp,
2046 spin_lock_bh(&exeq->lock); 2047 spin_lock_bh(&exeq->lock);
2047 2048
2048 list_for_each_entry_safe(exeq_pos, exeq_pos_n, &exeq->exe_queue, link) { 2049 list_for_each_entry_safe(exeq_pos, exeq_pos_n, &exeq->exe_queue, link) {
2049 if (exeq_pos->cmd_data.vlan_mac.vlan_mac_flags == 2050 flags = exeq_pos->cmd_data.vlan_mac.vlan_mac_flags;
2050 *vlan_mac_flags) { 2051 if (BNX2X_VLAN_MAC_CMP_FLAGS(flags) ==
2052 BNX2X_VLAN_MAC_CMP_FLAGS(*vlan_mac_flags)) {
2051 rc = exeq->remove(bp, exeq->owner, exeq_pos); 2053 rc = exeq->remove(bp, exeq->owner, exeq_pos);
2052 if (rc) { 2054 if (rc) {
2053 BNX2X_ERR("Failed to remove command\n"); 2055 BNX2X_ERR("Failed to remove command\n");
@@ -2080,7 +2082,9 @@ static int bnx2x_vlan_mac_del_all(struct bnx2x *bp,
2080 return read_lock; 2082 return read_lock;
2081 2083
2082 list_for_each_entry(pos, &o->head, link) { 2084 list_for_each_entry(pos, &o->head, link) {
2083 if (pos->vlan_mac_flags == *vlan_mac_flags) { 2085 flags = pos->vlan_mac_flags;
2086 if (BNX2X_VLAN_MAC_CMP_FLAGS(flags) ==
2087 BNX2X_VLAN_MAC_CMP_FLAGS(*vlan_mac_flags)) {
2084 p.user_req.vlan_mac_flags = pos->vlan_mac_flags; 2088 p.user_req.vlan_mac_flags = pos->vlan_mac_flags;
2085 memcpy(&p.user_req.u, &pos->u, sizeof(pos->u)); 2089 memcpy(&p.user_req.u, &pos->u, sizeof(pos->u));
2086 rc = bnx2x_config_vlan_mac(bp, &p); 2090 rc = bnx2x_config_vlan_mac(bp, &p);
@@ -4382,8 +4386,11 @@ int bnx2x_config_rss(struct bnx2x *bp,
4382 struct bnx2x_raw_obj *r = &o->raw; 4386 struct bnx2x_raw_obj *r = &o->raw;
4383 4387
4384 /* Do nothing if only driver cleanup was requested */ 4388 /* Do nothing if only driver cleanup was requested */
4385 if (test_bit(RAMROD_DRV_CLR_ONLY, &p->ramrod_flags)) 4389 if (test_bit(RAMROD_DRV_CLR_ONLY, &p->ramrod_flags)) {
4390 DP(BNX2X_MSG_SP, "Not configuring RSS ramrod_flags=%lx\n",
4391 p->ramrod_flags);
4386 return 0; 4392 return 0;
4393 }
4387 4394
4388 r->set_pending(r); 4395 r->set_pending(r);
4389 4396
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
index 658f4e33abf9..6a53c15c85a3 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
@@ -266,6 +266,13 @@ enum {
266 BNX2X_DONT_CONSUME_CAM_CREDIT, 266 BNX2X_DONT_CONSUME_CAM_CREDIT,
267 BNX2X_DONT_CONSUME_CAM_CREDIT_DEST, 267 BNX2X_DONT_CONSUME_CAM_CREDIT_DEST,
268}; 268};
269/* When looking for matching filters, some flags are not interesting */
270#define BNX2X_VLAN_MAC_CMP_MASK (1 << BNX2X_UC_LIST_MAC | \
271 1 << BNX2X_ETH_MAC | \
272 1 << BNX2X_ISCSI_ETH_MAC | \
273 1 << BNX2X_NETQ_ETH_MAC)
274#define BNX2X_VLAN_MAC_CMP_FLAGS(flags) \
275 ((flags) & BNX2X_VLAN_MAC_CMP_MASK)
269 276
270struct bnx2x_vlan_mac_ramrod_params { 277struct bnx2x_vlan_mac_ramrod_params {
271 /* Object to run the command from */ 278 /* Object to run the command from */
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
index 0216d592d0ce..e7845e5be1c7 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
@@ -1209,6 +1209,11 @@ static void bnx2x_vfop_rxmode(struct bnx2x *bp, struct bnx2x_virtf *vf)
1209 /* next state */ 1209 /* next state */
1210 vfop->state = BNX2X_VFOP_RXMODE_DONE; 1210 vfop->state = BNX2X_VFOP_RXMODE_DONE;
1211 1211
1212 /* record the accept flags in vfdb so hypervisor can modify them
1213 * if necessary
1214 */
1215 bnx2x_vfq(vf, ramrod->cl_id - vf->igu_base_id, accept_flags) =
1216 ramrod->rx_accept_flags;
1212 vfop->rc = bnx2x_config_rx_mode(bp, ramrod); 1217 vfop->rc = bnx2x_config_rx_mode(bp, ramrod);
1213 bnx2x_vfop_finalize(vf, vfop->rc, VFOP_DONE); 1218 bnx2x_vfop_finalize(vf, vfop->rc, VFOP_DONE);
1214op_err: 1219op_err:
@@ -1224,39 +1229,43 @@ op_pending:
1224 return; 1229 return;
1225} 1230}
1226 1231
1232static void bnx2x_vf_prep_rx_mode(struct bnx2x *bp, u8 qid,
1233 struct bnx2x_rx_mode_ramrod_params *ramrod,
1234 struct bnx2x_virtf *vf,
1235 unsigned long accept_flags)
1236{
1237 struct bnx2x_vf_queue *vfq = vfq_get(vf, qid);
1238
1239 memset(ramrod, 0, sizeof(*ramrod));
1240 ramrod->cid = vfq->cid;
1241 ramrod->cl_id = vfq_cl_id(vf, vfq);
1242 ramrod->rx_mode_obj = &bp->rx_mode_obj;
1243 ramrod->func_id = FW_VF_HANDLE(vf->abs_vfid);
1244 ramrod->rx_accept_flags = accept_flags;
1245 ramrod->tx_accept_flags = accept_flags;
1246 ramrod->pstate = &vf->filter_state;
1247 ramrod->state = BNX2X_FILTER_RX_MODE_PENDING;
1248
1249 set_bit(BNX2X_FILTER_RX_MODE_PENDING, &vf->filter_state);
1250 set_bit(RAMROD_RX, &ramrod->ramrod_flags);
1251 set_bit(RAMROD_TX, &ramrod->ramrod_flags);
1252
1253 ramrod->rdata = bnx2x_vf_sp(bp, vf, rx_mode_rdata.e2);
1254 ramrod->rdata_mapping = bnx2x_vf_sp_map(bp, vf, rx_mode_rdata.e2);
1255}
1256
1227int bnx2x_vfop_rxmode_cmd(struct bnx2x *bp, 1257int bnx2x_vfop_rxmode_cmd(struct bnx2x *bp,
1228 struct bnx2x_virtf *vf, 1258 struct bnx2x_virtf *vf,
1229 struct bnx2x_vfop_cmd *cmd, 1259 struct bnx2x_vfop_cmd *cmd,
1230 int qid, unsigned long accept_flags) 1260 int qid, unsigned long accept_flags)
1231{ 1261{
1232 struct bnx2x_vf_queue *vfq = vfq_get(vf, qid);
1233 struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf); 1262 struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf);
1234 1263
1235 if (vfop) { 1264 if (vfop) {
1236 struct bnx2x_rx_mode_ramrod_params *ramrod = 1265 struct bnx2x_rx_mode_ramrod_params *ramrod =
1237 &vf->op_params.rx_mode; 1266 &vf->op_params.rx_mode;
1238 1267
1239 memset(ramrod, 0, sizeof(*ramrod)); 1268 bnx2x_vf_prep_rx_mode(bp, qid, ramrod, vf, accept_flags);
1240
1241 /* Prepare ramrod parameters */
1242 ramrod->cid = vfq->cid;
1243 ramrod->cl_id = vfq_cl_id(vf, vfq);
1244 ramrod->rx_mode_obj = &bp->rx_mode_obj;
1245 ramrod->func_id = FW_VF_HANDLE(vf->abs_vfid);
1246
1247 ramrod->rx_accept_flags = accept_flags;
1248 ramrod->tx_accept_flags = accept_flags;
1249 ramrod->pstate = &vf->filter_state;
1250 ramrod->state = BNX2X_FILTER_RX_MODE_PENDING;
1251
1252 set_bit(BNX2X_FILTER_RX_MODE_PENDING, &vf->filter_state);
1253 set_bit(RAMROD_RX, &ramrod->ramrod_flags);
1254 set_bit(RAMROD_TX, &ramrod->ramrod_flags);
1255
1256 ramrod->rdata =
1257 bnx2x_vf_sp(bp, vf, rx_mode_rdata.e2);
1258 ramrod->rdata_mapping =
1259 bnx2x_vf_sp_map(bp, vf, rx_mode_rdata.e2);
1260 1269
1261 bnx2x_vfop_opset(BNX2X_VFOP_RXMODE_CONFIG, 1270 bnx2x_vfop_opset(BNX2X_VFOP_RXMODE_CONFIG,
1262 bnx2x_vfop_rxmode, cmd->done); 1271 bnx2x_vfop_rxmode, cmd->done);
@@ -3114,6 +3123,11 @@ int bnx2x_sriov_configure(struct pci_dev *dev, int num_vfs_param)
3114{ 3123{
3115 struct bnx2x *bp = netdev_priv(pci_get_drvdata(dev)); 3124 struct bnx2x *bp = netdev_priv(pci_get_drvdata(dev));
3116 3125
3126 if (!IS_SRIOV(bp)) {
3127 BNX2X_ERR("failed to configure SR-IOV since vfdb was not allocated. Check dmesg for errors in probe stage\n");
3128 return -EINVAL;
3129 }
3130
3117 DP(BNX2X_MSG_IOV, "bnx2x_sriov_configure called with %d, BNX2X_NR_VIRTFN(bp) was %d\n", 3131 DP(BNX2X_MSG_IOV, "bnx2x_sriov_configure called with %d, BNX2X_NR_VIRTFN(bp) was %d\n",
3118 num_vfs_param, BNX2X_NR_VIRTFN(bp)); 3132 num_vfs_param, BNX2X_NR_VIRTFN(bp));
3119 3133
@@ -3197,13 +3211,16 @@ int bnx2x_enable_sriov(struct bnx2x *bp)
3197 bnx2x_iov_static_resc(bp, vf); 3211 bnx2x_iov_static_resc(bp, vf);
3198 } 3212 }
3199 3213
3200 /* prepare msix vectors in VF configuration space */ 3214 /* prepare msix vectors in VF configuration space - the value in the
3215 * PCI configuration space should be the index of the last entry,
3216 * namely one less than the actual size of the table
3217 */
3201 for (vf_idx = first_vf; vf_idx < first_vf + req_vfs; vf_idx++) { 3218 for (vf_idx = first_vf; vf_idx < first_vf + req_vfs; vf_idx++) {
3202 bnx2x_pretend_func(bp, HW_VF_HANDLE(bp, vf_idx)); 3219 bnx2x_pretend_func(bp, HW_VF_HANDLE(bp, vf_idx));
3203 REG_WR(bp, PCICFG_OFFSET + GRC_CONFIG_REG_VF_MSIX_CONTROL, 3220 REG_WR(bp, PCICFG_OFFSET + GRC_CONFIG_REG_VF_MSIX_CONTROL,
3204 num_vf_queues); 3221 num_vf_queues - 1);
3205 DP(BNX2X_MSG_IOV, "set msix vec num in VF %d cfg space to %d\n", 3222 DP(BNX2X_MSG_IOV, "set msix vec num in VF %d cfg space to %d\n",
3206 vf_idx, num_vf_queues); 3223 vf_idx, num_vf_queues - 1);
3207 } 3224 }
3208 bnx2x_pretend_func(bp, BP_ABS_FUNC(bp)); 3225 bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
3209 3226
@@ -3431,10 +3448,18 @@ out:
3431 3448
3432int bnx2x_set_vf_vlan(struct net_device *dev, int vfidx, u16 vlan, u8 qos) 3449int bnx2x_set_vf_vlan(struct net_device *dev, int vfidx, u16 vlan, u8 qos)
3433{ 3450{
3451 struct bnx2x_queue_state_params q_params = {NULL};
3452 struct bnx2x_vlan_mac_ramrod_params ramrod_param;
3453 struct bnx2x_queue_update_params *update_params;
3454 struct pf_vf_bulletin_content *bulletin = NULL;
3455 struct bnx2x_rx_mode_ramrod_params rx_ramrod;
3434 struct bnx2x *bp = netdev_priv(dev); 3456 struct bnx2x *bp = netdev_priv(dev);
3435 int rc, q_logical_state; 3457 struct bnx2x_vlan_mac_obj *vlan_obj;
3458 unsigned long vlan_mac_flags = 0;
3459 unsigned long ramrod_flags = 0;
3436 struct bnx2x_virtf *vf = NULL; 3460 struct bnx2x_virtf *vf = NULL;
3437 struct pf_vf_bulletin_content *bulletin = NULL; 3461 unsigned long accept_flags;
3462 int rc;
3438 3463
3439 /* sanity and init */ 3464 /* sanity and init */
3440 rc = bnx2x_vf_ndo_prep(bp, vfidx, &vf, &bulletin); 3465 rc = bnx2x_vf_ndo_prep(bp, vfidx, &vf, &bulletin);
@@ -3452,104 +3477,118 @@ int bnx2x_set_vf_vlan(struct net_device *dev, int vfidx, u16 vlan, u8 qos)
3452 /* update PF's copy of the VF's bulletin. No point in posting the vlan 3477 /* update PF's copy of the VF's bulletin. No point in posting the vlan
3453 * to the VF since it doesn't have anything to do with it. But it useful 3478 * to the VF since it doesn't have anything to do with it. But it useful
3454 * to store it here in case the VF is not up yet and we can only 3479 * to store it here in case the VF is not up yet and we can only
3455 * configure the vlan later when it does. 3480 * configure the vlan later when it does. Treat vlan id 0 as remove the
3481 * Host tag.
3456 */ 3482 */
3457 bulletin->valid_bitmap |= 1 << VLAN_VALID; 3483 if (vlan > 0)
3484 bulletin->valid_bitmap |= 1 << VLAN_VALID;
3485 else
3486 bulletin->valid_bitmap &= ~(1 << VLAN_VALID);
3458 bulletin->vlan = vlan; 3487 bulletin->vlan = vlan;
3459 3488
3460 /* is vf initialized and queue set up? */ 3489 /* is vf initialized and queue set up? */
3461 q_logical_state = 3490 if (vf->state != VF_ENABLED ||
3462 bnx2x_get_q_logical_state(bp, &bnx2x_leading_vfq(vf, sp_obj)); 3491 bnx2x_get_q_logical_state(bp, &bnx2x_leading_vfq(vf, sp_obj)) !=
3463 if (vf->state == VF_ENABLED && 3492 BNX2X_Q_LOGICAL_STATE_ACTIVE)
3464 q_logical_state == BNX2X_Q_LOGICAL_STATE_ACTIVE) { 3493 return rc;
3465 /* configure the vlan in device on this vf's queue */
3466 unsigned long ramrod_flags = 0;
3467 unsigned long vlan_mac_flags = 0;
3468 struct bnx2x_vlan_mac_obj *vlan_obj =
3469 &bnx2x_leading_vfq(vf, vlan_obj);
3470 struct bnx2x_vlan_mac_ramrod_params ramrod_param;
3471 struct bnx2x_queue_state_params q_params = {NULL};
3472 struct bnx2x_queue_update_params *update_params;
3473 3494
3474 rc = validate_vlan_mac(bp, &bnx2x_leading_vfq(vf, mac_obj)); 3495 /* configure the vlan in device on this vf's queue */
3475 if (rc) 3496 vlan_obj = &bnx2x_leading_vfq(vf, vlan_obj);
3476 return rc; 3497 rc = validate_vlan_mac(bp, &bnx2x_leading_vfq(vf, mac_obj));
3477 memset(&ramrod_param, 0, sizeof(ramrod_param)); 3498 if (rc)
3499 return rc;
3478 3500
3479 /* must lock vfpf channel to protect against vf flows */ 3501 /* must lock vfpf channel to protect against vf flows */
3480 bnx2x_lock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_SET_VLAN); 3502 bnx2x_lock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_SET_VLAN);
3481 3503
3482 /* remove existing vlans */ 3504 /* remove existing vlans */
3483 __set_bit(RAMROD_COMP_WAIT, &ramrod_flags); 3505 __set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
3484 rc = vlan_obj->delete_all(bp, vlan_obj, &vlan_mac_flags, 3506 rc = vlan_obj->delete_all(bp, vlan_obj, &vlan_mac_flags,
3485 &ramrod_flags); 3507 &ramrod_flags);
3486 if (rc) { 3508 if (rc) {
3487 BNX2X_ERR("failed to delete vlans\n"); 3509 BNX2X_ERR("failed to delete vlans\n");
3488 rc = -EINVAL; 3510 rc = -EINVAL;
3489 goto out; 3511 goto out;
3490 } 3512 }
3513
3514 /* need to remove/add the VF's accept_any_vlan bit */
3515 accept_flags = bnx2x_leading_vfq(vf, accept_flags);
3516 if (vlan)
3517 clear_bit(BNX2X_ACCEPT_ANY_VLAN, &accept_flags);
3518 else
3519 set_bit(BNX2X_ACCEPT_ANY_VLAN, &accept_flags);
3520
3521 bnx2x_vf_prep_rx_mode(bp, LEADING_IDX, &rx_ramrod, vf,
3522 accept_flags);
3523 bnx2x_leading_vfq(vf, accept_flags) = accept_flags;
3524 bnx2x_config_rx_mode(bp, &rx_ramrod);
3525
3526 /* configure the new vlan to device */
3527 memset(&ramrod_param, 0, sizeof(ramrod_param));
3528 __set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
3529 ramrod_param.vlan_mac_obj = vlan_obj;
3530 ramrod_param.ramrod_flags = ramrod_flags;
3531 set_bit(BNX2X_DONT_CONSUME_CAM_CREDIT,
3532 &ramrod_param.user_req.vlan_mac_flags);
3533 ramrod_param.user_req.u.vlan.vlan = vlan;
3534 ramrod_param.user_req.cmd = BNX2X_VLAN_MAC_ADD;
3535 rc = bnx2x_config_vlan_mac(bp, &ramrod_param);
3536 if (rc) {
3537 BNX2X_ERR("failed to configure vlan\n");
3538 rc = -EINVAL;
3539 goto out;
3540 }
3491 3541
3492 /* send queue update ramrod to configure default vlan and silent 3542 /* send queue update ramrod to configure default vlan and silent
3493 * vlan removal 3543 * vlan removal
3544 */
3545 __set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags);
3546 q_params.cmd = BNX2X_Q_CMD_UPDATE;
3547 q_params.q_obj = &bnx2x_leading_vfq(vf, sp_obj);
3548 update_params = &q_params.params.update;
3549 __set_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN_CHNG,
3550 &update_params->update_flags);
3551 __set_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM_CHNG,
3552 &update_params->update_flags);
3553 if (vlan == 0) {
3554 /* if vlan is 0 then we want to leave the VF traffic
3555 * untagged, and leave the incoming traffic untouched
3556 * (i.e. do not remove any vlan tags).
3557 */
3558 __clear_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN,
3559 &update_params->update_flags);
3560 __clear_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM,
3561 &update_params->update_flags);
3562 } else {
3563 /* configure default vlan to vf queue and set silent
3564 * vlan removal (the vf remains unaware of this vlan).
3494 */ 3565 */
3495 __set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags); 3566 __set_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN,
3496 q_params.cmd = BNX2X_Q_CMD_UPDATE;
3497 q_params.q_obj = &bnx2x_leading_vfq(vf, sp_obj);
3498 update_params = &q_params.params.update;
3499 __set_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN_CHNG,
3500 &update_params->update_flags); 3567 &update_params->update_flags);
3501 __set_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM_CHNG, 3568 __set_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM,
3502 &update_params->update_flags); 3569 &update_params->update_flags);
3570 update_params->def_vlan = vlan;
3571 update_params->silent_removal_value =
3572 vlan & VLAN_VID_MASK;
3573 update_params->silent_removal_mask = VLAN_VID_MASK;
3574 }
3503 3575
3504 if (vlan == 0) { 3576 /* Update the Queue state */
3505 /* if vlan is 0 then we want to leave the VF traffic 3577 rc = bnx2x_queue_state_change(bp, &q_params);
3506 * untagged, and leave the incoming traffic untouched 3578 if (rc) {
3507 * (i.e. do not remove any vlan tags). 3579 BNX2X_ERR("Failed to configure default VLAN\n");
3508 */ 3580 goto out;
3509 __clear_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN, 3581 }
3510 &update_params->update_flags);
3511 __clear_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM,
3512 &update_params->update_flags);
3513 } else {
3514 /* configure the new vlan to device */
3515 __set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
3516 ramrod_param.vlan_mac_obj = vlan_obj;
3517 ramrod_param.ramrod_flags = ramrod_flags;
3518 ramrod_param.user_req.u.vlan.vlan = vlan;
3519 ramrod_param.user_req.cmd = BNX2X_VLAN_MAC_ADD;
3520 rc = bnx2x_config_vlan_mac(bp, &ramrod_param);
3521 if (rc) {
3522 BNX2X_ERR("failed to configure vlan\n");
3523 rc = -EINVAL;
3524 goto out;
3525 }
3526
3527 /* configure default vlan to vf queue and set silent
3528 * vlan removal (the vf remains unaware of this vlan).
3529 */
3530 update_params = &q_params.params.update;
3531 __set_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN,
3532 &update_params->update_flags);
3533 __set_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM,
3534 &update_params->update_flags);
3535 update_params->def_vlan = vlan;
3536 }
3537 3582
3538 /* Update the Queue state */
3539 rc = bnx2x_queue_state_change(bp, &q_params);
3540 if (rc) {
3541 BNX2X_ERR("Failed to configure default VLAN\n");
3542 goto out;
3543 }
3544 3583
3545 /* clear the flag indicating that this VF needs its vlan 3584 /* clear the flag indicating that this VF needs its vlan
3546 * (will only be set if the HV configured the Vlan before vf was 3585 * (will only be set if the HV configured the Vlan before vf was
3547 * up and we were called because the VF came up later 3586 * up and we were called because the VF came up later
3548 */ 3587 */
3549out: 3588out:
3550 vf->cfg_flags &= ~VF_CFG_VLAN; 3589 vf->cfg_flags &= ~VF_CFG_VLAN;
3551 bnx2x_unlock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_SET_VLAN); 3590 bnx2x_unlock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_SET_VLAN);
3552 } 3591
3553 return rc; 3592 return rc;
3554} 3593}
3555 3594
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h
index 1ff6a9366629..8c213fa52174 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h
@@ -74,6 +74,7 @@ struct bnx2x_vf_queue {
74 /* VLANs object */ 74 /* VLANs object */
75 struct bnx2x_vlan_mac_obj vlan_obj; 75 struct bnx2x_vlan_mac_obj vlan_obj;
76 atomic_t vlan_count; /* 0 means vlan-0 is set ~ untagged */ 76 atomic_t vlan_count; /* 0 means vlan-0 is set ~ untagged */
77 unsigned long accept_flags; /* last accept flags configured */
77 78
78 /* Queue Slow-path State object */ 79 /* Queue Slow-path State object */
79 struct bnx2x_queue_sp_obj sp_obj; 80 struct bnx2x_queue_sp_obj sp_obj;
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
index efa8a151d789..0756d7dabdd5 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
@@ -208,7 +208,7 @@ static int bnx2x_get_vf_id(struct bnx2x *bp, u32 *vf_id)
208 return -EINVAL; 208 return -EINVAL;
209 } 209 }
210 210
211 BNX2X_ERR("valid ME register value: 0x%08x\n", me_reg); 211 DP(BNX2X_MSG_IOV, "valid ME register value: 0x%08x\n", me_reg);
212 212
213 *vf_id = (me_reg & ME_REG_VF_NUM_MASK) >> ME_REG_VF_NUM_SHIFT; 213 *vf_id = (me_reg & ME_REG_VF_NUM_MASK) >> ME_REG_VF_NUM_SHIFT;
214 214
@@ -1598,6 +1598,8 @@ static void bnx2x_vfop_mbx_qfilters(struct bnx2x *bp, struct bnx2x_virtf *vf)
1598 1598
1599 if (msg->flags & VFPF_SET_Q_FILTERS_RX_MASK_CHANGED) { 1599 if (msg->flags & VFPF_SET_Q_FILTERS_RX_MASK_CHANGED) {
1600 unsigned long accept = 0; 1600 unsigned long accept = 0;
1601 struct pf_vf_bulletin_content *bulletin =
1602 BP_VF_BULLETIN(bp, vf->index);
1601 1603
1602 /* covert VF-PF if mask to bnx2x accept flags */ 1604 /* covert VF-PF if mask to bnx2x accept flags */
1603 if (msg->rx_mask & VFPF_RX_MASK_ACCEPT_MATCHED_UNICAST) 1605 if (msg->rx_mask & VFPF_RX_MASK_ACCEPT_MATCHED_UNICAST)
@@ -1617,9 +1619,11 @@ static void bnx2x_vfop_mbx_qfilters(struct bnx2x *bp, struct bnx2x_virtf *vf)
1617 __set_bit(BNX2X_ACCEPT_BROADCAST, &accept); 1619 __set_bit(BNX2X_ACCEPT_BROADCAST, &accept);
1618 1620
1619 /* A packet arriving the vf's mac should be accepted 1621 /* A packet arriving the vf's mac should be accepted
1620 * with any vlan 1622 * with any vlan, unless a vlan has already been
1623 * configured.
1621 */ 1624 */
1622 __set_bit(BNX2X_ACCEPT_ANY_VLAN, &accept); 1625 if (!(bulletin->valid_bitmap & (1 << VLAN_VALID)))
1626 __set_bit(BNX2X_ACCEPT_ANY_VLAN, &accept);
1623 1627
1624 /* set rx-mode */ 1628 /* set rx-mode */
1625 rc = bnx2x_vfop_rxmode_cmd(bp, vf, &cmd, 1629 rc = bnx2x_vfop_rxmode_cmd(bp, vf, &cmd,
@@ -1710,6 +1714,21 @@ static void bnx2x_vf_mbx_set_q_filters(struct bnx2x *bp,
1710 goto response; 1714 goto response;
1711 } 1715 }
1712 } 1716 }
1717 /* if vlan was set by hypervisor we don't allow guest to config vlan */
1718 if (bulletin->valid_bitmap & 1 << VLAN_VALID) {
1719 int i;
1720
1721 /* search for vlan filters */
1722 for (i = 0; i < filters->n_mac_vlan_filters; i++) {
1723 if (filters->filters[i].flags &
1724 VFPF_Q_FILTER_VLAN_TAG_VALID) {
1725 BNX2X_ERR("VF[%d] attempted to configure vlan but one was already set by Hypervisor. Aborting request\n",
1726 vf->abs_vfid);
1727 vf->op_rc = -EPERM;
1728 goto response;
1729 }
1730 }
1731 }
1713 1732
1714 /* verify vf_qid */ 1733 /* verify vf_qid */
1715 if (filters->vf_qid > vf_rxq_count(vf)) 1734 if (filters->vf_qid > vf_rxq_count(vf))
@@ -1805,6 +1824,9 @@ static void bnx2x_vf_mbx_update_rss(struct bnx2x *bp, struct bnx2x_virtf *vf,
1805 vf_op_params->rss_result_mask = rss_tlv->rss_result_mask; 1824 vf_op_params->rss_result_mask = rss_tlv->rss_result_mask;
1806 1825
1807 /* flags handled individually for backward/forward compatability */ 1826 /* flags handled individually for backward/forward compatability */
1827 vf_op_params->rss_flags = 0;
1828 vf_op_params->ramrod_flags = 0;
1829
1808 if (rss_tlv->rss_flags & VFPF_RSS_MODE_DISABLED) 1830 if (rss_tlv->rss_flags & VFPF_RSS_MODE_DISABLED)
1809 __set_bit(BNX2X_RSS_MODE_DISABLED, &vf_op_params->rss_flags); 1831 __set_bit(BNX2X_RSS_MODE_DISABLED, &vf_op_params->rss_flags);
1810 if (rss_tlv->rss_flags & VFPF_RSS_MODE_REGULAR) 1832 if (rss_tlv->rss_flags & VFPF_RSS_MODE_REGULAR)
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
index 369b736dde05..15a66e4b1f57 100644
--- a/drivers/net/ethernet/broadcom/tg3.c
+++ b/drivers/net/ethernet/broadcom/tg3.c
@@ -7622,7 +7622,7 @@ static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
7622{ 7622{
7623 u32 base = (u32) mapping & 0xffffffff; 7623 u32 base = (u32) mapping & 0xffffffff;
7624 7624
7625 return (base > 0xffffdcc0) && (base + len + 8 < base); 7625 return base + len + 8 < base;
7626} 7626}
7627 7627
7628/* Test for TSO DMA buffers that cross into regions which are within MSS bytes 7628/* Test for TSO DMA buffers that cross into regions which are within MSS bytes
@@ -8932,6 +8932,9 @@ static int tg3_chip_reset(struct tg3 *tp)
8932 void (*write_op)(struct tg3 *, u32, u32); 8932 void (*write_op)(struct tg3 *, u32, u32);
8933 int i, err; 8933 int i, err;
8934 8934
8935 if (!pci_device_is_present(tp->pdev))
8936 return -ENODEV;
8937
8935 tg3_nvram_lock(tp); 8938 tg3_nvram_lock(tp);
8936 8939
8937 tg3_ape_lock(tp, TG3_APE_LOCK_GRC); 8940 tg3_ape_lock(tp, TG3_APE_LOCK_GRC);
@@ -11581,10 +11584,11 @@ static int tg3_close(struct net_device *dev)
11581 memset(&tp->net_stats_prev, 0, sizeof(tp->net_stats_prev)); 11584 memset(&tp->net_stats_prev, 0, sizeof(tp->net_stats_prev));
11582 memset(&tp->estats_prev, 0, sizeof(tp->estats_prev)); 11585 memset(&tp->estats_prev, 0, sizeof(tp->estats_prev));
11583 11586
11584 tg3_power_down_prepare(tp); 11587 if (pci_device_is_present(tp->pdev)) {
11585 11588 tg3_power_down_prepare(tp);
11586 tg3_carrier_off(tp);
11587 11589
11590 tg3_carrier_off(tp);
11591 }
11588 return 0; 11592 return 0;
11589} 11593}
11590 11594
@@ -16499,6 +16503,9 @@ static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent)
16499 /* Clear this out for sanity. */ 16503 /* Clear this out for sanity. */
16500 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0); 16504 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
16501 16505
16506 /* Clear TG3PCI_REG_BASE_ADDR to prevent hangs. */
16507 tw32(TG3PCI_REG_BASE_ADDR, 0);
16508
16502 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE, 16509 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
16503 &pci_state_reg); 16510 &pci_state_reg);
16504 if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 && 16511 if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
@@ -17726,10 +17733,12 @@ static int tg3_suspend(struct device *device)
17726 struct pci_dev *pdev = to_pci_dev(device); 17733 struct pci_dev *pdev = to_pci_dev(device);
17727 struct net_device *dev = pci_get_drvdata(pdev); 17734 struct net_device *dev = pci_get_drvdata(pdev);
17728 struct tg3 *tp = netdev_priv(dev); 17735 struct tg3 *tp = netdev_priv(dev);
17729 int err; 17736 int err = 0;
17737
17738 rtnl_lock();
17730 17739
17731 if (!netif_running(dev)) 17740 if (!netif_running(dev))
17732 return 0; 17741 goto unlock;
17733 17742
17734 tg3_reset_task_cancel(tp); 17743 tg3_reset_task_cancel(tp);
17735 tg3_phy_stop(tp); 17744 tg3_phy_stop(tp);
@@ -17771,6 +17780,8 @@ out:
17771 tg3_phy_start(tp); 17780 tg3_phy_start(tp);
17772 } 17781 }
17773 17782
17783unlock:
17784 rtnl_unlock();
17774 return err; 17785 return err;
17775} 17786}
17776 17787
@@ -17779,10 +17790,12 @@ static int tg3_resume(struct device *device)
17779 struct pci_dev *pdev = to_pci_dev(device); 17790 struct pci_dev *pdev = to_pci_dev(device);
17780 struct net_device *dev = pci_get_drvdata(pdev); 17791 struct net_device *dev = pci_get_drvdata(pdev);
17781 struct tg3 *tp = netdev_priv(dev); 17792 struct tg3 *tp = netdev_priv(dev);
17782 int err; 17793 int err = 0;
17794
17795 rtnl_lock();
17783 17796
17784 if (!netif_running(dev)) 17797 if (!netif_running(dev))
17785 return 0; 17798 goto unlock;
17786 17799
17787 netif_device_attach(dev); 17800 netif_device_attach(dev);
17788 17801
@@ -17806,6 +17819,8 @@ out:
17806 if (!err) 17819 if (!err)
17807 tg3_phy_start(tp); 17820 tg3_phy_start(tp);
17808 17821
17822unlock:
17823 rtnl_unlock();
17809 return err; 17824 return err;
17810} 17825}
17811#endif /* CONFIG_PM_SLEEP */ 17826#endif /* CONFIG_PM_SLEEP */