diff options
Diffstat (limited to 'drivers/net/ethernet')
90 files changed, 1607 insertions, 938 deletions
diff --git a/drivers/net/ethernet/allwinner/sun4i-emac.c b/drivers/net/ethernet/allwinner/sun4i-emac.c index 50b853a79d77..46dfb1378c17 100644 --- a/drivers/net/ethernet/allwinner/sun4i-emac.c +++ b/drivers/net/ethernet/allwinner/sun4i-emac.c | |||
@@ -717,8 +717,7 @@ static int emac_open(struct net_device *dev) | |||
717 | if (netif_msg_ifup(db)) | 717 | if (netif_msg_ifup(db)) |
718 | dev_dbg(db->dev, "enabling %s\n", dev->name); | 718 | dev_dbg(db->dev, "enabling %s\n", dev->name); |
719 | 719 | ||
720 | if (devm_request_irq(db->dev, dev->irq, &emac_interrupt, | 720 | if (request_irq(dev->irq, &emac_interrupt, 0, dev->name, dev)) |
721 | 0, dev->name, dev)) | ||
722 | return -EAGAIN; | 721 | return -EAGAIN; |
723 | 722 | ||
724 | /* Initialize EMAC board */ | 723 | /* Initialize EMAC board */ |
@@ -774,6 +773,8 @@ static int emac_stop(struct net_device *ndev) | |||
774 | 773 | ||
775 | emac_shutdown(ndev); | 774 | emac_shutdown(ndev); |
776 | 775 | ||
776 | free_irq(ndev->irq, ndev); | ||
777 | |||
777 | return 0; | 778 | return 0; |
778 | } | 779 | } |
779 | 780 | ||
diff --git a/drivers/net/ethernet/arc/emac_main.c b/drivers/net/ethernet/arc/emac_main.c index b2ffad1304d2..248baf6273fb 100644 --- a/drivers/net/ethernet/arc/emac_main.c +++ b/drivers/net/ethernet/arc/emac_main.c | |||
@@ -565,6 +565,8 @@ static int arc_emac_tx(struct sk_buff *skb, struct net_device *ndev) | |||
565 | /* Make sure pointer to data buffer is set */ | 565 | /* Make sure pointer to data buffer is set */ |
566 | wmb(); | 566 | wmb(); |
567 | 567 | ||
568 | skb_tx_timestamp(skb); | ||
569 | |||
568 | *info = cpu_to_le32(FOR_EMAC | FIRST_OR_LAST_MASK | len); | 570 | *info = cpu_to_le32(FOR_EMAC | FIRST_OR_LAST_MASK | len); |
569 | 571 | ||
570 | /* Increment index to point to the next BD */ | 572 | /* Increment index to point to the next BD */ |
@@ -579,8 +581,6 @@ static int arc_emac_tx(struct sk_buff *skb, struct net_device *ndev) | |||
579 | 581 | ||
580 | arc_reg_set(priv, R_STATUS, TXPL_MASK); | 582 | arc_reg_set(priv, R_STATUS, TXPL_MASK); |
581 | 583 | ||
582 | skb_tx_timestamp(skb); | ||
583 | |||
584 | return NETDEV_TX_OK; | 584 | return NETDEV_TX_OK; |
585 | } | 585 | } |
586 | 586 | ||
diff --git a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c index a36a760ada28..29801750f239 100644 --- a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c +++ b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c | |||
@@ -145,9 +145,11 @@ static void atl1c_reset_pcie(struct atl1c_hw *hw, u32 flag) | |||
145 | * Mask some pcie error bits | 145 | * Mask some pcie error bits |
146 | */ | 146 | */ |
147 | pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_ERR); | 147 | pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_ERR); |
148 | pci_read_config_dword(pdev, pos + PCI_ERR_UNCOR_SEVER, &data); | 148 | if (pos) { |
149 | data &= ~(PCI_ERR_UNC_DLP | PCI_ERR_UNC_FCP); | 149 | pci_read_config_dword(pdev, pos + PCI_ERR_UNCOR_SEVER, &data); |
150 | pci_write_config_dword(pdev, pos + PCI_ERR_UNCOR_SEVER, data); | 150 | data &= ~(PCI_ERR_UNC_DLP | PCI_ERR_UNC_FCP); |
151 | pci_write_config_dword(pdev, pos + PCI_ERR_UNCOR_SEVER, data); | ||
152 | } | ||
151 | /* clear error status */ | 153 | /* clear error status */ |
152 | pcie_capability_write_word(pdev, PCI_EXP_DEVSTA, | 154 | pcie_capability_write_word(pdev, PCI_EXP_DEVSTA, |
153 | PCI_EXP_DEVSTA_NFED | | 155 | PCI_EXP_DEVSTA_NFED | |
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h index a1f66e2c9a86..ec6119089b82 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h | |||
@@ -520,10 +520,12 @@ struct bnx2x_fastpath { | |||
520 | #define BNX2X_FP_STATE_IDLE 0 | 520 | #define BNX2X_FP_STATE_IDLE 0 |
521 | #define BNX2X_FP_STATE_NAPI (1 << 0) /* NAPI owns this FP */ | 521 | #define BNX2X_FP_STATE_NAPI (1 << 0) /* NAPI owns this FP */ |
522 | #define BNX2X_FP_STATE_POLL (1 << 1) /* poll owns this FP */ | 522 | #define BNX2X_FP_STATE_POLL (1 << 1) /* poll owns this FP */ |
523 | #define BNX2X_FP_STATE_NAPI_YIELD (1 << 2) /* NAPI yielded this FP */ | 523 | #define BNX2X_FP_STATE_DISABLED (1 << 2) |
524 | #define BNX2X_FP_STATE_POLL_YIELD (1 << 3) /* poll yielded this FP */ | 524 | #define BNX2X_FP_STATE_NAPI_YIELD (1 << 3) /* NAPI yielded this FP */ |
525 | #define BNX2X_FP_STATE_POLL_YIELD (1 << 4) /* poll yielded this FP */ | ||
526 | #define BNX2X_FP_OWNED (BNX2X_FP_STATE_NAPI | BNX2X_FP_STATE_POLL) | ||
525 | #define BNX2X_FP_YIELD (BNX2X_FP_STATE_NAPI_YIELD | BNX2X_FP_STATE_POLL_YIELD) | 527 | #define BNX2X_FP_YIELD (BNX2X_FP_STATE_NAPI_YIELD | BNX2X_FP_STATE_POLL_YIELD) |
526 | #define BNX2X_FP_LOCKED (BNX2X_FP_STATE_NAPI | BNX2X_FP_STATE_POLL) | 528 | #define BNX2X_FP_LOCKED (BNX2X_FP_OWNED | BNX2X_FP_STATE_DISABLED) |
527 | #define BNX2X_FP_USER_PEND (BNX2X_FP_STATE_POLL | BNX2X_FP_STATE_POLL_YIELD) | 529 | #define BNX2X_FP_USER_PEND (BNX2X_FP_STATE_POLL | BNX2X_FP_STATE_POLL_YIELD) |
528 | /* protect state */ | 530 | /* protect state */ |
529 | spinlock_t lock; | 531 | spinlock_t lock; |
@@ -613,7 +615,7 @@ static inline bool bnx2x_fp_lock_napi(struct bnx2x_fastpath *fp) | |||
613 | { | 615 | { |
614 | bool rc = true; | 616 | bool rc = true; |
615 | 617 | ||
616 | spin_lock(&fp->lock); | 618 | spin_lock_bh(&fp->lock); |
617 | if (fp->state & BNX2X_FP_LOCKED) { | 619 | if (fp->state & BNX2X_FP_LOCKED) { |
618 | WARN_ON(fp->state & BNX2X_FP_STATE_NAPI); | 620 | WARN_ON(fp->state & BNX2X_FP_STATE_NAPI); |
619 | fp->state |= BNX2X_FP_STATE_NAPI_YIELD; | 621 | fp->state |= BNX2X_FP_STATE_NAPI_YIELD; |
@@ -622,7 +624,7 @@ static inline bool bnx2x_fp_lock_napi(struct bnx2x_fastpath *fp) | |||
622 | /* we don't care if someone yielded */ | 624 | /* we don't care if someone yielded */ |
623 | fp->state = BNX2X_FP_STATE_NAPI; | 625 | fp->state = BNX2X_FP_STATE_NAPI; |
624 | } | 626 | } |
625 | spin_unlock(&fp->lock); | 627 | spin_unlock_bh(&fp->lock); |
626 | return rc; | 628 | return rc; |
627 | } | 629 | } |
628 | 630 | ||
@@ -631,14 +633,16 @@ static inline bool bnx2x_fp_unlock_napi(struct bnx2x_fastpath *fp) | |||
631 | { | 633 | { |
632 | bool rc = false; | 634 | bool rc = false; |
633 | 635 | ||
634 | spin_lock(&fp->lock); | 636 | spin_lock_bh(&fp->lock); |
635 | WARN_ON(fp->state & | 637 | WARN_ON(fp->state & |
636 | (BNX2X_FP_STATE_POLL | BNX2X_FP_STATE_NAPI_YIELD)); | 638 | (BNX2X_FP_STATE_POLL | BNX2X_FP_STATE_NAPI_YIELD)); |
637 | 639 | ||
638 | if (fp->state & BNX2X_FP_STATE_POLL_YIELD) | 640 | if (fp->state & BNX2X_FP_STATE_POLL_YIELD) |
639 | rc = true; | 641 | rc = true; |
640 | fp->state = BNX2X_FP_STATE_IDLE; | 642 | |
641 | spin_unlock(&fp->lock); | 643 | /* state ==> idle, unless currently disabled */ |
644 | fp->state &= BNX2X_FP_STATE_DISABLED; | ||
645 | spin_unlock_bh(&fp->lock); | ||
642 | return rc; | 646 | return rc; |
643 | } | 647 | } |
644 | 648 | ||
@@ -669,7 +673,9 @@ static inline bool bnx2x_fp_unlock_poll(struct bnx2x_fastpath *fp) | |||
669 | 673 | ||
670 | if (fp->state & BNX2X_FP_STATE_POLL_YIELD) | 674 | if (fp->state & BNX2X_FP_STATE_POLL_YIELD) |
671 | rc = true; | 675 | rc = true; |
672 | fp->state = BNX2X_FP_STATE_IDLE; | 676 | |
677 | /* state ==> idle, unless currently disabled */ | ||
678 | fp->state &= BNX2X_FP_STATE_DISABLED; | ||
673 | spin_unlock_bh(&fp->lock); | 679 | spin_unlock_bh(&fp->lock); |
674 | return rc; | 680 | return rc; |
675 | } | 681 | } |
@@ -677,9 +683,23 @@ static inline bool bnx2x_fp_unlock_poll(struct bnx2x_fastpath *fp) | |||
677 | /* true if a socket is polling, even if it did not get the lock */ | 683 | /* true if a socket is polling, even if it did not get the lock */ |
678 | static inline bool bnx2x_fp_ll_polling(struct bnx2x_fastpath *fp) | 684 | static inline bool bnx2x_fp_ll_polling(struct bnx2x_fastpath *fp) |
679 | { | 685 | { |
680 | WARN_ON(!(fp->state & BNX2X_FP_LOCKED)); | 686 | WARN_ON(!(fp->state & BNX2X_FP_OWNED)); |
681 | return fp->state & BNX2X_FP_USER_PEND; | 687 | return fp->state & BNX2X_FP_USER_PEND; |
682 | } | 688 | } |
689 | |||
690 | /* false if fp is currently owned */ | ||
691 | static inline bool bnx2x_fp_ll_disable(struct bnx2x_fastpath *fp) | ||
692 | { | ||
693 | int rc = true; | ||
694 | |||
695 | spin_lock_bh(&fp->lock); | ||
696 | if (fp->state & BNX2X_FP_OWNED) | ||
697 | rc = false; | ||
698 | fp->state |= BNX2X_FP_STATE_DISABLED; | ||
699 | spin_unlock_bh(&fp->lock); | ||
700 | |||
701 | return rc; | ||
702 | } | ||
683 | #else | 703 | #else |
684 | static inline void bnx2x_fp_init_lock(struct bnx2x_fastpath *fp) | 704 | static inline void bnx2x_fp_init_lock(struct bnx2x_fastpath *fp) |
685 | { | 705 | { |
@@ -709,6 +729,10 @@ static inline bool bnx2x_fp_ll_polling(struct bnx2x_fastpath *fp) | |||
709 | { | 729 | { |
710 | return false; | 730 | return false; |
711 | } | 731 | } |
732 | static inline bool bnx2x_fp_ll_disable(struct bnx2x_fastpath *fp) | ||
733 | { | ||
734 | return true; | ||
735 | } | ||
712 | #endif /* CONFIG_NET_RX_BUSY_POLL */ | 736 | #endif /* CONFIG_NET_RX_BUSY_POLL */ |
713 | 737 | ||
714 | /* Use 2500 as a mini-jumbo MTU for FCoE */ | 738 | /* Use 2500 as a mini-jumbo MTU for FCoE */ |
@@ -1250,7 +1274,10 @@ struct bnx2x_slowpath { | |||
1250 | * Therefore, if they would have been defined in the same union, | 1274 | * Therefore, if they would have been defined in the same union, |
1251 | * data can get corrupted. | 1275 | * data can get corrupted. |
1252 | */ | 1276 | */ |
1253 | struct afex_vif_list_ramrod_data func_afex_rdata; | 1277 | union { |
1278 | struct afex_vif_list_ramrod_data viflist_data; | ||
1279 | struct function_update_data func_update; | ||
1280 | } func_afex_rdata; | ||
1254 | 1281 | ||
1255 | /* used by dmae command executer */ | 1282 | /* used by dmae command executer */ |
1256 | struct dmae_command dmae[MAX_DMAE_C]; | 1283 | struct dmae_command dmae[MAX_DMAE_C]; |
@@ -2499,4 +2526,6 @@ void bnx2x_set_local_cmng(struct bnx2x *bp); | |||
2499 | #define MCPR_SCRATCH_BASE(bp) \ | 2526 | #define MCPR_SCRATCH_BASE(bp) \ |
2500 | (CHIP_IS_E1x(bp) ? MCP_REG_MCPR_SCRATCH : MCP_A_REG_MCPR_SCRATCH) | 2527 | (CHIP_IS_E1x(bp) ? MCP_REG_MCPR_SCRATCH : MCP_A_REG_MCPR_SCRATCH) |
2501 | 2528 | ||
2529 | #define E1H_MAX_MF_SB_COUNT (HC_SB_MAX_SB_E1X/(E1HVN_MAX * PORT_MAX)) | ||
2530 | |||
2502 | #endif /* bnx2x.h */ | 2531 | #endif /* bnx2x.h */ |
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c index ec96130533cc..bf811565ee24 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c | |||
@@ -160,6 +160,7 @@ static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata, | |||
160 | struct sk_buff *skb = tx_buf->skb; | 160 | struct sk_buff *skb = tx_buf->skb; |
161 | u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons; | 161 | u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons; |
162 | int nbd; | 162 | int nbd; |
163 | u16 split_bd_len = 0; | ||
163 | 164 | ||
164 | /* prefetch skb end pointer to speedup dev_kfree_skb() */ | 165 | /* prefetch skb end pointer to speedup dev_kfree_skb() */ |
165 | prefetch(&skb->end); | 166 | prefetch(&skb->end); |
@@ -167,10 +168,7 @@ static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata, | |||
167 | DP(NETIF_MSG_TX_DONE, "fp[%d]: pkt_idx %d buff @(%p)->skb %p\n", | 168 | DP(NETIF_MSG_TX_DONE, "fp[%d]: pkt_idx %d buff @(%p)->skb %p\n", |
168 | txdata->txq_index, idx, tx_buf, skb); | 169 | txdata->txq_index, idx, tx_buf, skb); |
169 | 170 | ||
170 | /* unmap first bd */ | ||
171 | tx_start_bd = &txdata->tx_desc_ring[bd_idx].start_bd; | 171 | tx_start_bd = &txdata->tx_desc_ring[bd_idx].start_bd; |
172 | dma_unmap_single(&bp->pdev->dev, BD_UNMAP_ADDR(tx_start_bd), | ||
173 | BD_UNMAP_LEN(tx_start_bd), DMA_TO_DEVICE); | ||
174 | 172 | ||
175 | nbd = le16_to_cpu(tx_start_bd->nbd) - 1; | 173 | nbd = le16_to_cpu(tx_start_bd->nbd) - 1; |
176 | #ifdef BNX2X_STOP_ON_ERROR | 174 | #ifdef BNX2X_STOP_ON_ERROR |
@@ -188,12 +186,19 @@ static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata, | |||
188 | --nbd; | 186 | --nbd; |
189 | bd_idx = TX_BD(NEXT_TX_IDX(bd_idx)); | 187 | bd_idx = TX_BD(NEXT_TX_IDX(bd_idx)); |
190 | 188 | ||
191 | /* ...and the TSO split header bd since they have no mapping */ | 189 | /* TSO headers+data bds share a common mapping. See bnx2x_tx_split() */ |
192 | if (tx_buf->flags & BNX2X_TSO_SPLIT_BD) { | 190 | if (tx_buf->flags & BNX2X_TSO_SPLIT_BD) { |
191 | tx_data_bd = &txdata->tx_desc_ring[bd_idx].reg_bd; | ||
192 | split_bd_len = BD_UNMAP_LEN(tx_data_bd); | ||
193 | --nbd; | 193 | --nbd; |
194 | bd_idx = TX_BD(NEXT_TX_IDX(bd_idx)); | 194 | bd_idx = TX_BD(NEXT_TX_IDX(bd_idx)); |
195 | } | 195 | } |
196 | 196 | ||
197 | /* unmap first bd */ | ||
198 | dma_unmap_single(&bp->pdev->dev, BD_UNMAP_ADDR(tx_start_bd), | ||
199 | BD_UNMAP_LEN(tx_start_bd) + split_bd_len, | ||
200 | DMA_TO_DEVICE); | ||
201 | |||
197 | /* now free frags */ | 202 | /* now free frags */ |
198 | while (nbd > 0) { | 203 | while (nbd > 0) { |
199 | 204 | ||
@@ -1790,26 +1795,22 @@ static void bnx2x_napi_disable_cnic(struct bnx2x *bp) | |||
1790 | { | 1795 | { |
1791 | int i; | 1796 | int i; |
1792 | 1797 | ||
1793 | local_bh_disable(); | ||
1794 | for_each_rx_queue_cnic(bp, i) { | 1798 | for_each_rx_queue_cnic(bp, i) { |
1795 | napi_disable(&bnx2x_fp(bp, i, napi)); | 1799 | napi_disable(&bnx2x_fp(bp, i, napi)); |
1796 | while (!bnx2x_fp_lock_napi(&bp->fp[i])) | 1800 | while (!bnx2x_fp_ll_disable(&bp->fp[i])) |
1797 | mdelay(1); | 1801 | usleep_range(1000, 2000); |
1798 | } | 1802 | } |
1799 | local_bh_enable(); | ||
1800 | } | 1803 | } |
1801 | 1804 | ||
1802 | static void bnx2x_napi_disable(struct bnx2x *bp) | 1805 | static void bnx2x_napi_disable(struct bnx2x *bp) |
1803 | { | 1806 | { |
1804 | int i; | 1807 | int i; |
1805 | 1808 | ||
1806 | local_bh_disable(); | ||
1807 | for_each_eth_queue(bp, i) { | 1809 | for_each_eth_queue(bp, i) { |
1808 | napi_disable(&bnx2x_fp(bp, i, napi)); | 1810 | napi_disable(&bnx2x_fp(bp, i, napi)); |
1809 | while (!bnx2x_fp_lock_napi(&bp->fp[i])) | 1811 | while (!bnx2x_fp_ll_disable(&bp->fp[i])) |
1810 | mdelay(1); | 1812 | usleep_range(1000, 2000); |
1811 | } | 1813 | } |
1812 | local_bh_enable(); | ||
1813 | } | 1814 | } |
1814 | 1815 | ||
1815 | void bnx2x_netif_start(struct bnx2x *bp) | 1816 | void bnx2x_netif_start(struct bnx2x *bp) |
@@ -1832,7 +1833,8 @@ void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw) | |||
1832 | bnx2x_napi_disable_cnic(bp); | 1833 | bnx2x_napi_disable_cnic(bp); |
1833 | } | 1834 | } |
1834 | 1835 | ||
1835 | u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb) | 1836 | u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb, |
1837 | void *accel_priv) | ||
1836 | { | 1838 | { |
1837 | struct bnx2x *bp = netdev_priv(dev); | 1839 | struct bnx2x *bp = netdev_priv(dev); |
1838 | 1840 | ||
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h index da8fcaa74495..41f3ca5ad972 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h | |||
@@ -524,7 +524,8 @@ int bnx2x_set_vf_mac(struct net_device *dev, int queue, u8 *mac); | |||
524 | int bnx2x_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos); | 524 | int bnx2x_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos); |
525 | 525 | ||
526 | /* select_queue callback */ | 526 | /* select_queue callback */ |
527 | u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb); | 527 | u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb, |
528 | void *accel_priv); | ||
528 | 529 | ||
529 | static inline void bnx2x_update_rx_prod(struct bnx2x *bp, | 530 | static inline void bnx2x_update_rx_prod(struct bnx2x *bp, |
530 | struct bnx2x_fastpath *fp, | 531 | struct bnx2x_fastpath *fp, |
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c index 20dcc02431ca..11fc79585491 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c | |||
@@ -3865,6 +3865,19 @@ static void bnx2x_warpcore_enable_AN_KR(struct bnx2x_phy *phy, | |||
3865 | 3865 | ||
3866 | bnx2x_warpcore_enable_AN_KR2(phy, params, vars); | 3866 | bnx2x_warpcore_enable_AN_KR2(phy, params, vars); |
3867 | } else { | 3867 | } else { |
3868 | /* Enable Auto-Detect to support 1G over CL37 as well */ | ||
3869 | bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, | ||
3870 | MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X1, 0x10); | ||
3871 | |||
3872 | /* Force cl48 sync_status LOW to avoid getting stuck in CL73 | ||
3873 | * parallel-detect loop when CL73 and CL37 are enabled. | ||
3874 | */ | ||
3875 | CL22_WR_OVER_CL45(bp, phy, MDIO_REG_BANK_AER_BLOCK, | ||
3876 | MDIO_AER_BLOCK_AER_REG, 0); | ||
3877 | bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, | ||
3878 | MDIO_WC_REG_RXB_ANA_RX_CONTROL_PCI, 0x0800); | ||
3879 | bnx2x_set_aer_mmd(params, phy); | ||
3880 | |||
3868 | bnx2x_disable_kr2(params, vars, phy); | 3881 | bnx2x_disable_kr2(params, vars, phy); |
3869 | } | 3882 | } |
3870 | 3883 | ||
@@ -8120,17 +8133,20 @@ static int bnx2x_get_edc_mode(struct bnx2x_phy *phy, | |||
8120 | *edc_mode = EDC_MODE_ACTIVE_DAC; | 8133 | *edc_mode = EDC_MODE_ACTIVE_DAC; |
8121 | else | 8134 | else |
8122 | check_limiting_mode = 1; | 8135 | check_limiting_mode = 1; |
8123 | } else if (copper_module_type & | 8136 | } else { |
8124 | SFP_EEPROM_FC_TX_TECH_BITMASK_COPPER_PASSIVE) { | 8137 | *edc_mode = EDC_MODE_PASSIVE_DAC; |
8138 | /* Even in case PASSIVE_DAC indication is not set, | ||
8139 | * treat it as a passive DAC cable, since some cables | ||
8140 | * don't have this indication. | ||
8141 | */ | ||
8142 | if (copper_module_type & | ||
8143 | SFP_EEPROM_FC_TX_TECH_BITMASK_COPPER_PASSIVE) { | ||
8125 | DP(NETIF_MSG_LINK, | 8144 | DP(NETIF_MSG_LINK, |
8126 | "Passive Copper cable detected\n"); | 8145 | "Passive Copper cable detected\n"); |
8127 | *edc_mode = | 8146 | } else { |
8128 | EDC_MODE_PASSIVE_DAC; | 8147 | DP(NETIF_MSG_LINK, |
8129 | } else { | 8148 | "Unknown copper-cable-type\n"); |
8130 | DP(NETIF_MSG_LINK, | 8149 | } |
8131 | "Unknown copper-cable-type 0x%x !!!\n", | ||
8132 | copper_module_type); | ||
8133 | return -EINVAL; | ||
8134 | } | 8150 | } |
8135 | break; | 8151 | break; |
8136 | } | 8152 | } |
@@ -10825,9 +10841,9 @@ static int bnx2x_54618se_config_init(struct bnx2x_phy *phy, | |||
10825 | (1<<11)); | 10841 | (1<<11)); |
10826 | 10842 | ||
10827 | if (((phy->req_line_speed == SPEED_AUTO_NEG) && | 10843 | if (((phy->req_line_speed == SPEED_AUTO_NEG) && |
10828 | (phy->speed_cap_mask & | 10844 | (phy->speed_cap_mask & |
10829 | PORT_HW_CFG_SPEED_CAPABILITY_D0_1G)) || | 10845 | PORT_HW_CFG_SPEED_CAPABILITY_D0_1G)) || |
10830 | (phy->req_line_speed == SPEED_1000)) { | 10846 | (phy->req_line_speed == SPEED_1000)) { |
10831 | an_1000_val |= (1<<8); | 10847 | an_1000_val |= (1<<8); |
10832 | autoneg_val |= (1<<9 | 1<<12); | 10848 | autoneg_val |= (1<<9 | 1<<12); |
10833 | if (phy->req_duplex == DUPLEX_FULL) | 10849 | if (phy->req_duplex == DUPLEX_FULL) |
@@ -10843,30 +10859,32 @@ static int bnx2x_54618se_config_init(struct bnx2x_phy *phy, | |||
10843 | 0x09, | 10859 | 0x09, |
10844 | &an_1000_val); | 10860 | &an_1000_val); |
10845 | 10861 | ||
10846 | /* Set 100 speed advertisement */ | 10862 | /* Advertise 10/100 link speed */ |
10847 | if (((phy->req_line_speed == SPEED_AUTO_NEG) && | 10863 | if (phy->req_line_speed == SPEED_AUTO_NEG) { |
10848 | (phy->speed_cap_mask & | 10864 | if (phy->speed_cap_mask & |
10849 | (PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL | | 10865 | PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF) { |
10850 | PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF)))) { | 10866 | an_10_100_val |= (1<<5); |
10851 | an_10_100_val |= (1<<7); | 10867 | autoneg_val |= (1<<9 | 1<<12); |
10852 | /* Enable autoneg and restart autoneg for legacy speeds */ | 10868 | DP(NETIF_MSG_LINK, "Advertising 10M-HD\n"); |
10853 | autoneg_val |= (1<<9 | 1<<12); | 10869 | } |
10854 | 10870 | if (phy->speed_cap_mask & | |
10855 | if (phy->req_duplex == DUPLEX_FULL) | 10871 | PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL) { |
10856 | an_10_100_val |= (1<<8); | ||
10857 | DP(NETIF_MSG_LINK, "Advertising 100M\n"); | ||
10858 | } | ||
10859 | |||
10860 | /* Set 10 speed advertisement */ | ||
10861 | if (((phy->req_line_speed == SPEED_AUTO_NEG) && | ||
10862 | (phy->speed_cap_mask & | ||
10863 | (PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL | | ||
10864 | PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF)))) { | ||
10865 | an_10_100_val |= (1<<5); | ||
10866 | autoneg_val |= (1<<9 | 1<<12); | ||
10867 | if (phy->req_duplex == DUPLEX_FULL) | ||
10868 | an_10_100_val |= (1<<6); | 10872 | an_10_100_val |= (1<<6); |
10869 | DP(NETIF_MSG_LINK, "Advertising 10M\n"); | 10873 | autoneg_val |= (1<<9 | 1<<12); |
10874 | DP(NETIF_MSG_LINK, "Advertising 10M-FD\n"); | ||
10875 | } | ||
10876 | if (phy->speed_cap_mask & | ||
10877 | PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF) { | ||
10878 | an_10_100_val |= (1<<7); | ||
10879 | autoneg_val |= (1<<9 | 1<<12); | ||
10880 | DP(NETIF_MSG_LINK, "Advertising 100M-HD\n"); | ||
10881 | } | ||
10882 | if (phy->speed_cap_mask & | ||
10883 | PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL) { | ||
10884 | an_10_100_val |= (1<<8); | ||
10885 | autoneg_val |= (1<<9 | 1<<12); | ||
10886 | DP(NETIF_MSG_LINK, "Advertising 100M-FD\n"); | ||
10887 | } | ||
10870 | } | 10888 | } |
10871 | 10889 | ||
10872 | /* Only 10/100 are allowed to work in FORCE mode */ | 10890 | /* Only 10/100 are allowed to work in FORCE mode */ |
@@ -13342,6 +13360,10 @@ static u8 bnx2x_analyze_link_error(struct link_params *params, | |||
13342 | DP(NETIF_MSG_LINK, "Link changed:[%x %x]->%x\n", vars->link_up, | 13360 | DP(NETIF_MSG_LINK, "Link changed:[%x %x]->%x\n", vars->link_up, |
13343 | old_status, status); | 13361 | old_status, status); |
13344 | 13362 | ||
13363 | /* Do not touch the link in case physical link down */ | ||
13364 | if ((vars->phy_flags & PHY_PHYSICAL_LINK_FLAG) == 0) | ||
13365 | return 1; | ||
13366 | |||
13345 | /* a. Update shmem->link_status accordingly | 13367 | /* a. Update shmem->link_status accordingly |
13346 | * b. Update link_vars->link_up | 13368 | * b. Update link_vars->link_up |
13347 | */ | 13369 | */ |
@@ -13550,7 +13572,7 @@ static void bnx2x_check_kr2_wa(struct link_params *params, | |||
13550 | */ | 13572 | */ |
13551 | not_kr2_device = (((base_page & 0x8000) == 0) || | 13573 | not_kr2_device = (((base_page & 0x8000) == 0) || |
13552 | (((base_page & 0x8000) && | 13574 | (((base_page & 0x8000) && |
13553 | ((next_page & 0xe0) == 0x2)))); | 13575 | ((next_page & 0xe0) == 0x20)))); |
13554 | 13576 | ||
13555 | /* In case KR2 is already disabled, check if we need to re-enable it */ | 13577 | /* In case KR2 is already disabled, check if we need to re-enable it */ |
13556 | if (!(vars->link_attr_sync & LINK_ATTR_SYNC_KR2_ENABLE)) { | 13578 | if (!(vars->link_attr_sync & LINK_ATTR_SYNC_KR2_ENABLE)) { |
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c index 814d0eca9b33..8b3107b2fcc1 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c | |||
@@ -11447,9 +11447,9 @@ static int bnx2x_get_hwinfo(struct bnx2x *bp) | |||
11447 | } | 11447 | } |
11448 | } | 11448 | } |
11449 | 11449 | ||
11450 | /* adjust igu_sb_cnt to MF for E1x */ | 11450 | /* adjust igu_sb_cnt to MF for E1H */ |
11451 | if (CHIP_IS_E1x(bp) && IS_MF(bp)) | 11451 | if (CHIP_IS_E1H(bp) && IS_MF(bp)) |
11452 | bp->igu_sb_cnt /= E1HVN_MAX; | 11452 | bp->igu_sb_cnt = min_t(u8, bp->igu_sb_cnt, E1H_MAX_MF_SB_COUNT); |
11453 | 11453 | ||
11454 | /* port info */ | 11454 | /* port info */ |
11455 | bnx2x_get_port_hwinfo(bp); | 11455 | bnx2x_get_port_hwinfo(bp); |
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h index 3efbb35267c8..14ffb6e56e59 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h | |||
@@ -7179,6 +7179,7 @@ Theotherbitsarereservedandshouldbezero*/ | |||
7179 | #define MDIO_WC_REG_RX1_PCI_CTRL 0x80ca | 7179 | #define MDIO_WC_REG_RX1_PCI_CTRL 0x80ca |
7180 | #define MDIO_WC_REG_RX2_PCI_CTRL 0x80da | 7180 | #define MDIO_WC_REG_RX2_PCI_CTRL 0x80da |
7181 | #define MDIO_WC_REG_RX3_PCI_CTRL 0x80ea | 7181 | #define MDIO_WC_REG_RX3_PCI_CTRL 0x80ea |
7182 | #define MDIO_WC_REG_RXB_ANA_RX_CONTROL_PCI 0x80fa | ||
7182 | #define MDIO_WC_REG_XGXSBLK2_UNICORE_MODE_10G 0x8104 | 7183 | #define MDIO_WC_REG_XGXSBLK2_UNICORE_MODE_10G 0x8104 |
7183 | #define MDIO_WC_REG_XGXS_STATUS3 0x8129 | 7184 | #define MDIO_WC_REG_XGXS_STATUS3 0x8129 |
7184 | #define MDIO_WC_REG_PAR_DET_10G_STATUS 0x8130 | 7185 | #define MDIO_WC_REG_PAR_DET_10G_STATUS 0x8130 |
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c index 32c92abf5094..18438a504d57 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c | |||
@@ -2038,6 +2038,7 @@ static int bnx2x_vlan_mac_del_all(struct bnx2x *bp, | |||
2038 | struct bnx2x_vlan_mac_ramrod_params p; | 2038 | struct bnx2x_vlan_mac_ramrod_params p; |
2039 | struct bnx2x_exe_queue_obj *exeq = &o->exe_queue; | 2039 | struct bnx2x_exe_queue_obj *exeq = &o->exe_queue; |
2040 | struct bnx2x_exeq_elem *exeq_pos, *exeq_pos_n; | 2040 | struct bnx2x_exeq_elem *exeq_pos, *exeq_pos_n; |
2041 | unsigned long flags; | ||
2041 | int read_lock; | 2042 | int read_lock; |
2042 | int rc = 0; | 2043 | int rc = 0; |
2043 | 2044 | ||
@@ -2046,8 +2047,9 @@ static int bnx2x_vlan_mac_del_all(struct bnx2x *bp, | |||
2046 | spin_lock_bh(&exeq->lock); | 2047 | spin_lock_bh(&exeq->lock); |
2047 | 2048 | ||
2048 | list_for_each_entry_safe(exeq_pos, exeq_pos_n, &exeq->exe_queue, link) { | 2049 | list_for_each_entry_safe(exeq_pos, exeq_pos_n, &exeq->exe_queue, link) { |
2049 | if (exeq_pos->cmd_data.vlan_mac.vlan_mac_flags == | 2050 | flags = exeq_pos->cmd_data.vlan_mac.vlan_mac_flags; |
2050 | *vlan_mac_flags) { | 2051 | if (BNX2X_VLAN_MAC_CMP_FLAGS(flags) == |
2052 | BNX2X_VLAN_MAC_CMP_FLAGS(*vlan_mac_flags)) { | ||
2051 | rc = exeq->remove(bp, exeq->owner, exeq_pos); | 2053 | rc = exeq->remove(bp, exeq->owner, exeq_pos); |
2052 | if (rc) { | 2054 | if (rc) { |
2053 | BNX2X_ERR("Failed to remove command\n"); | 2055 | BNX2X_ERR("Failed to remove command\n"); |
@@ -2080,7 +2082,9 @@ static int bnx2x_vlan_mac_del_all(struct bnx2x *bp, | |||
2080 | return read_lock; | 2082 | return read_lock; |
2081 | 2083 | ||
2082 | list_for_each_entry(pos, &o->head, link) { | 2084 | list_for_each_entry(pos, &o->head, link) { |
2083 | if (pos->vlan_mac_flags == *vlan_mac_flags) { | 2085 | flags = pos->vlan_mac_flags; |
2086 | if (BNX2X_VLAN_MAC_CMP_FLAGS(flags) == | ||
2087 | BNX2X_VLAN_MAC_CMP_FLAGS(*vlan_mac_flags)) { | ||
2084 | p.user_req.vlan_mac_flags = pos->vlan_mac_flags; | 2088 | p.user_req.vlan_mac_flags = pos->vlan_mac_flags; |
2085 | memcpy(&p.user_req.u, &pos->u, sizeof(pos->u)); | 2089 | memcpy(&p.user_req.u, &pos->u, sizeof(pos->u)); |
2086 | rc = bnx2x_config_vlan_mac(bp, &p); | 2090 | rc = bnx2x_config_vlan_mac(bp, &p); |
@@ -4382,8 +4386,11 @@ int bnx2x_config_rss(struct bnx2x *bp, | |||
4382 | struct bnx2x_raw_obj *r = &o->raw; | 4386 | struct bnx2x_raw_obj *r = &o->raw; |
4383 | 4387 | ||
4384 | /* Do nothing if only driver cleanup was requested */ | 4388 | /* Do nothing if only driver cleanup was requested */ |
4385 | if (test_bit(RAMROD_DRV_CLR_ONLY, &p->ramrod_flags)) | 4389 | if (test_bit(RAMROD_DRV_CLR_ONLY, &p->ramrod_flags)) { |
4390 | DP(BNX2X_MSG_SP, "Not configuring RSS ramrod_flags=%lx\n", | ||
4391 | p->ramrod_flags); | ||
4386 | return 0; | 4392 | return 0; |
4393 | } | ||
4387 | 4394 | ||
4388 | r->set_pending(r); | 4395 | r->set_pending(r); |
4389 | 4396 | ||
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h index 658f4e33abf9..6a53c15c85a3 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h | |||
@@ -266,6 +266,13 @@ enum { | |||
266 | BNX2X_DONT_CONSUME_CAM_CREDIT, | 266 | BNX2X_DONT_CONSUME_CAM_CREDIT, |
267 | BNX2X_DONT_CONSUME_CAM_CREDIT_DEST, | 267 | BNX2X_DONT_CONSUME_CAM_CREDIT_DEST, |
268 | }; | 268 | }; |
269 | /* When looking for matching filters, some flags are not interesting */ | ||
270 | #define BNX2X_VLAN_MAC_CMP_MASK (1 << BNX2X_UC_LIST_MAC | \ | ||
271 | 1 << BNX2X_ETH_MAC | \ | ||
272 | 1 << BNX2X_ISCSI_ETH_MAC | \ | ||
273 | 1 << BNX2X_NETQ_ETH_MAC) | ||
274 | #define BNX2X_VLAN_MAC_CMP_FLAGS(flags) \ | ||
275 | ((flags) & BNX2X_VLAN_MAC_CMP_MASK) | ||
269 | 276 | ||
270 | struct bnx2x_vlan_mac_ramrod_params { | 277 | struct bnx2x_vlan_mac_ramrod_params { |
271 | /* Object to run the command from */ | 278 | /* Object to run the command from */ |
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c index 0216d592d0ce..e7845e5be1c7 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c | |||
@@ -1209,6 +1209,11 @@ static void bnx2x_vfop_rxmode(struct bnx2x *bp, struct bnx2x_virtf *vf) | |||
1209 | /* next state */ | 1209 | /* next state */ |
1210 | vfop->state = BNX2X_VFOP_RXMODE_DONE; | 1210 | vfop->state = BNX2X_VFOP_RXMODE_DONE; |
1211 | 1211 | ||
1212 | /* record the accept flags in vfdb so hypervisor can modify them | ||
1213 | * if necessary | ||
1214 | */ | ||
1215 | bnx2x_vfq(vf, ramrod->cl_id - vf->igu_base_id, accept_flags) = | ||
1216 | ramrod->rx_accept_flags; | ||
1212 | vfop->rc = bnx2x_config_rx_mode(bp, ramrod); | 1217 | vfop->rc = bnx2x_config_rx_mode(bp, ramrod); |
1213 | bnx2x_vfop_finalize(vf, vfop->rc, VFOP_DONE); | 1218 | bnx2x_vfop_finalize(vf, vfop->rc, VFOP_DONE); |
1214 | op_err: | 1219 | op_err: |
@@ -1224,39 +1229,43 @@ op_pending: | |||
1224 | return; | 1229 | return; |
1225 | } | 1230 | } |
1226 | 1231 | ||
1232 | static void bnx2x_vf_prep_rx_mode(struct bnx2x *bp, u8 qid, | ||
1233 | struct bnx2x_rx_mode_ramrod_params *ramrod, | ||
1234 | struct bnx2x_virtf *vf, | ||
1235 | unsigned long accept_flags) | ||
1236 | { | ||
1237 | struct bnx2x_vf_queue *vfq = vfq_get(vf, qid); | ||
1238 | |||
1239 | memset(ramrod, 0, sizeof(*ramrod)); | ||
1240 | ramrod->cid = vfq->cid; | ||
1241 | ramrod->cl_id = vfq_cl_id(vf, vfq); | ||
1242 | ramrod->rx_mode_obj = &bp->rx_mode_obj; | ||
1243 | ramrod->func_id = FW_VF_HANDLE(vf->abs_vfid); | ||
1244 | ramrod->rx_accept_flags = accept_flags; | ||
1245 | ramrod->tx_accept_flags = accept_flags; | ||
1246 | ramrod->pstate = &vf->filter_state; | ||
1247 | ramrod->state = BNX2X_FILTER_RX_MODE_PENDING; | ||
1248 | |||
1249 | set_bit(BNX2X_FILTER_RX_MODE_PENDING, &vf->filter_state); | ||
1250 | set_bit(RAMROD_RX, &ramrod->ramrod_flags); | ||
1251 | set_bit(RAMROD_TX, &ramrod->ramrod_flags); | ||
1252 | |||
1253 | ramrod->rdata = bnx2x_vf_sp(bp, vf, rx_mode_rdata.e2); | ||
1254 | ramrod->rdata_mapping = bnx2x_vf_sp_map(bp, vf, rx_mode_rdata.e2); | ||
1255 | } | ||
1256 | |||
1227 | int bnx2x_vfop_rxmode_cmd(struct bnx2x *bp, | 1257 | int bnx2x_vfop_rxmode_cmd(struct bnx2x *bp, |
1228 | struct bnx2x_virtf *vf, | 1258 | struct bnx2x_virtf *vf, |
1229 | struct bnx2x_vfop_cmd *cmd, | 1259 | struct bnx2x_vfop_cmd *cmd, |
1230 | int qid, unsigned long accept_flags) | 1260 | int qid, unsigned long accept_flags) |
1231 | { | 1261 | { |
1232 | struct bnx2x_vf_queue *vfq = vfq_get(vf, qid); | ||
1233 | struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf); | 1262 | struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf); |
1234 | 1263 | ||
1235 | if (vfop) { | 1264 | if (vfop) { |
1236 | struct bnx2x_rx_mode_ramrod_params *ramrod = | 1265 | struct bnx2x_rx_mode_ramrod_params *ramrod = |
1237 | &vf->op_params.rx_mode; | 1266 | &vf->op_params.rx_mode; |
1238 | 1267 | ||
1239 | memset(ramrod, 0, sizeof(*ramrod)); | 1268 | bnx2x_vf_prep_rx_mode(bp, qid, ramrod, vf, accept_flags); |
1240 | |||
1241 | /* Prepare ramrod parameters */ | ||
1242 | ramrod->cid = vfq->cid; | ||
1243 | ramrod->cl_id = vfq_cl_id(vf, vfq); | ||
1244 | ramrod->rx_mode_obj = &bp->rx_mode_obj; | ||
1245 | ramrod->func_id = FW_VF_HANDLE(vf->abs_vfid); | ||
1246 | |||
1247 | ramrod->rx_accept_flags = accept_flags; | ||
1248 | ramrod->tx_accept_flags = accept_flags; | ||
1249 | ramrod->pstate = &vf->filter_state; | ||
1250 | ramrod->state = BNX2X_FILTER_RX_MODE_PENDING; | ||
1251 | |||
1252 | set_bit(BNX2X_FILTER_RX_MODE_PENDING, &vf->filter_state); | ||
1253 | set_bit(RAMROD_RX, &ramrod->ramrod_flags); | ||
1254 | set_bit(RAMROD_TX, &ramrod->ramrod_flags); | ||
1255 | |||
1256 | ramrod->rdata = | ||
1257 | bnx2x_vf_sp(bp, vf, rx_mode_rdata.e2); | ||
1258 | ramrod->rdata_mapping = | ||
1259 | bnx2x_vf_sp_map(bp, vf, rx_mode_rdata.e2); | ||
1260 | 1269 | ||
1261 | bnx2x_vfop_opset(BNX2X_VFOP_RXMODE_CONFIG, | 1270 | bnx2x_vfop_opset(BNX2X_VFOP_RXMODE_CONFIG, |
1262 | bnx2x_vfop_rxmode, cmd->done); | 1271 | bnx2x_vfop_rxmode, cmd->done); |
@@ -3114,6 +3123,11 @@ int bnx2x_sriov_configure(struct pci_dev *dev, int num_vfs_param) | |||
3114 | { | 3123 | { |
3115 | struct bnx2x *bp = netdev_priv(pci_get_drvdata(dev)); | 3124 | struct bnx2x *bp = netdev_priv(pci_get_drvdata(dev)); |
3116 | 3125 | ||
3126 | if (!IS_SRIOV(bp)) { | ||
3127 | BNX2X_ERR("failed to configure SR-IOV since vfdb was not allocated. Check dmesg for errors in probe stage\n"); | ||
3128 | return -EINVAL; | ||
3129 | } | ||
3130 | |||
3117 | DP(BNX2X_MSG_IOV, "bnx2x_sriov_configure called with %d, BNX2X_NR_VIRTFN(bp) was %d\n", | 3131 | DP(BNX2X_MSG_IOV, "bnx2x_sriov_configure called with %d, BNX2X_NR_VIRTFN(bp) was %d\n", |
3118 | num_vfs_param, BNX2X_NR_VIRTFN(bp)); | 3132 | num_vfs_param, BNX2X_NR_VIRTFN(bp)); |
3119 | 3133 | ||
@@ -3197,13 +3211,16 @@ int bnx2x_enable_sriov(struct bnx2x *bp) | |||
3197 | bnx2x_iov_static_resc(bp, vf); | 3211 | bnx2x_iov_static_resc(bp, vf); |
3198 | } | 3212 | } |
3199 | 3213 | ||
3200 | /* prepare msix vectors in VF configuration space */ | 3214 | /* prepare msix vectors in VF configuration space - the value in the |
3215 | * PCI configuration space should be the index of the last entry, | ||
3216 | * namely one less than the actual size of the table | ||
3217 | */ | ||
3201 | for (vf_idx = first_vf; vf_idx < first_vf + req_vfs; vf_idx++) { | 3218 | for (vf_idx = first_vf; vf_idx < first_vf + req_vfs; vf_idx++) { |
3202 | bnx2x_pretend_func(bp, HW_VF_HANDLE(bp, vf_idx)); | 3219 | bnx2x_pretend_func(bp, HW_VF_HANDLE(bp, vf_idx)); |
3203 | REG_WR(bp, PCICFG_OFFSET + GRC_CONFIG_REG_VF_MSIX_CONTROL, | 3220 | REG_WR(bp, PCICFG_OFFSET + GRC_CONFIG_REG_VF_MSIX_CONTROL, |
3204 | num_vf_queues); | 3221 | num_vf_queues - 1); |
3205 | DP(BNX2X_MSG_IOV, "set msix vec num in VF %d cfg space to %d\n", | 3222 | DP(BNX2X_MSG_IOV, "set msix vec num in VF %d cfg space to %d\n", |
3206 | vf_idx, num_vf_queues); | 3223 | vf_idx, num_vf_queues - 1); |
3207 | } | 3224 | } |
3208 | bnx2x_pretend_func(bp, BP_ABS_FUNC(bp)); | 3225 | bnx2x_pretend_func(bp, BP_ABS_FUNC(bp)); |
3209 | 3226 | ||
@@ -3431,10 +3448,18 @@ out: | |||
3431 | 3448 | ||
3432 | int bnx2x_set_vf_vlan(struct net_device *dev, int vfidx, u16 vlan, u8 qos) | 3449 | int bnx2x_set_vf_vlan(struct net_device *dev, int vfidx, u16 vlan, u8 qos) |
3433 | { | 3450 | { |
3451 | struct bnx2x_queue_state_params q_params = {NULL}; | ||
3452 | struct bnx2x_vlan_mac_ramrod_params ramrod_param; | ||
3453 | struct bnx2x_queue_update_params *update_params; | ||
3454 | struct pf_vf_bulletin_content *bulletin = NULL; | ||
3455 | struct bnx2x_rx_mode_ramrod_params rx_ramrod; | ||
3434 | struct bnx2x *bp = netdev_priv(dev); | 3456 | struct bnx2x *bp = netdev_priv(dev); |
3435 | int rc, q_logical_state; | 3457 | struct bnx2x_vlan_mac_obj *vlan_obj; |
3458 | unsigned long vlan_mac_flags = 0; | ||
3459 | unsigned long ramrod_flags = 0; | ||
3436 | struct bnx2x_virtf *vf = NULL; | 3460 | struct bnx2x_virtf *vf = NULL; |
3437 | struct pf_vf_bulletin_content *bulletin = NULL; | 3461 | unsigned long accept_flags; |
3462 | int rc; | ||
3438 | 3463 | ||
3439 | /* sanity and init */ | 3464 | /* sanity and init */ |
3440 | rc = bnx2x_vf_ndo_prep(bp, vfidx, &vf, &bulletin); | 3465 | rc = bnx2x_vf_ndo_prep(bp, vfidx, &vf, &bulletin); |
@@ -3452,104 +3477,118 @@ int bnx2x_set_vf_vlan(struct net_device *dev, int vfidx, u16 vlan, u8 qos) | |||
3452 | /* update PF's copy of the VF's bulletin. No point in posting the vlan | 3477 | /* update PF's copy of the VF's bulletin. No point in posting the vlan |
3453 | * to the VF since it doesn't have anything to do with it. But it useful | 3478 | * to the VF since it doesn't have anything to do with it. But it useful |
3454 | * to store it here in case the VF is not up yet and we can only | 3479 | * to store it here in case the VF is not up yet and we can only |
3455 | * configure the vlan later when it does. | 3480 | * configure the vlan later when it does. Treat vlan id 0 as remove the |
3481 | * Host tag. | ||
3456 | */ | 3482 | */ |
3457 | bulletin->valid_bitmap |= 1 << VLAN_VALID; | 3483 | if (vlan > 0) |
3484 | bulletin->valid_bitmap |= 1 << VLAN_VALID; | ||
3485 | else | ||
3486 | bulletin->valid_bitmap &= ~(1 << VLAN_VALID); | ||
3458 | bulletin->vlan = vlan; | 3487 | bulletin->vlan = vlan; |
3459 | 3488 | ||
3460 | /* is vf initialized and queue set up? */ | 3489 | /* is vf initialized and queue set up? */ |
3461 | q_logical_state = | 3490 | if (vf->state != VF_ENABLED || |
3462 | bnx2x_get_q_logical_state(bp, &bnx2x_leading_vfq(vf, sp_obj)); | 3491 | bnx2x_get_q_logical_state(bp, &bnx2x_leading_vfq(vf, sp_obj)) != |
3463 | if (vf->state == VF_ENABLED && | 3492 | BNX2X_Q_LOGICAL_STATE_ACTIVE) |
3464 | q_logical_state == BNX2X_Q_LOGICAL_STATE_ACTIVE) { | 3493 | return rc; |
3465 | /* configure the vlan in device on this vf's queue */ | ||
3466 | unsigned long ramrod_flags = 0; | ||
3467 | unsigned long vlan_mac_flags = 0; | ||
3468 | struct bnx2x_vlan_mac_obj *vlan_obj = | ||
3469 | &bnx2x_leading_vfq(vf, vlan_obj); | ||
3470 | struct bnx2x_vlan_mac_ramrod_params ramrod_param; | ||
3471 | struct bnx2x_queue_state_params q_params = {NULL}; | ||
3472 | struct bnx2x_queue_update_params *update_params; | ||
3473 | 3494 | ||
3474 | rc = validate_vlan_mac(bp, &bnx2x_leading_vfq(vf, mac_obj)); | 3495 | /* configure the vlan in device on this vf's queue */ |
3475 | if (rc) | 3496 | vlan_obj = &bnx2x_leading_vfq(vf, vlan_obj); |
3476 | return rc; | 3497 | rc = validate_vlan_mac(bp, &bnx2x_leading_vfq(vf, mac_obj)); |
3477 | memset(&ramrod_param, 0, sizeof(ramrod_param)); | 3498 | if (rc) |
3499 | return rc; | ||
3478 | 3500 | ||
3479 | /* must lock vfpf channel to protect against vf flows */ | 3501 | /* must lock vfpf channel to protect against vf flows */ |
3480 | bnx2x_lock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_SET_VLAN); | 3502 | bnx2x_lock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_SET_VLAN); |
3481 | 3503 | ||
3482 | /* remove existing vlans */ | 3504 | /* remove existing vlans */ |
3483 | __set_bit(RAMROD_COMP_WAIT, &ramrod_flags); | 3505 | __set_bit(RAMROD_COMP_WAIT, &ramrod_flags); |
3484 | rc = vlan_obj->delete_all(bp, vlan_obj, &vlan_mac_flags, | 3506 | rc = vlan_obj->delete_all(bp, vlan_obj, &vlan_mac_flags, |
3485 | &ramrod_flags); | 3507 | &ramrod_flags); |
3486 | if (rc) { | 3508 | if (rc) { |
3487 | BNX2X_ERR("failed to delete vlans\n"); | 3509 | BNX2X_ERR("failed to delete vlans\n"); |
3488 | rc = -EINVAL; | 3510 | rc = -EINVAL; |
3489 | goto out; | 3511 | goto out; |
3490 | } | 3512 | } |
3513 | |||
3514 | /* need to remove/add the VF's accept_any_vlan bit */ | ||
3515 | accept_flags = bnx2x_leading_vfq(vf, accept_flags); | ||
3516 | if (vlan) | ||
3517 | clear_bit(BNX2X_ACCEPT_ANY_VLAN, &accept_flags); | ||
3518 | else | ||
3519 | set_bit(BNX2X_ACCEPT_ANY_VLAN, &accept_flags); | ||
3520 | |||
3521 | bnx2x_vf_prep_rx_mode(bp, LEADING_IDX, &rx_ramrod, vf, | ||
3522 | accept_flags); | ||
3523 | bnx2x_leading_vfq(vf, accept_flags) = accept_flags; | ||
3524 | bnx2x_config_rx_mode(bp, &rx_ramrod); | ||
3525 | |||
3526 | /* configure the new vlan to device */ | ||
3527 | memset(&ramrod_param, 0, sizeof(ramrod_param)); | ||
3528 | __set_bit(RAMROD_COMP_WAIT, &ramrod_flags); | ||
3529 | ramrod_param.vlan_mac_obj = vlan_obj; | ||
3530 | ramrod_param.ramrod_flags = ramrod_flags; | ||
3531 | set_bit(BNX2X_DONT_CONSUME_CAM_CREDIT, | ||
3532 | &ramrod_param.user_req.vlan_mac_flags); | ||
3533 | ramrod_param.user_req.u.vlan.vlan = vlan; | ||
3534 | ramrod_param.user_req.cmd = BNX2X_VLAN_MAC_ADD; | ||
3535 | rc = bnx2x_config_vlan_mac(bp, &ramrod_param); | ||
3536 | if (rc) { | ||
3537 | BNX2X_ERR("failed to configure vlan\n"); | ||
3538 | rc = -EINVAL; | ||
3539 | goto out; | ||
3540 | } | ||
3491 | 3541 | ||
3492 | /* send queue update ramrod to configure default vlan and silent | 3542 | /* send queue update ramrod to configure default vlan and silent |
3493 | * vlan removal | 3543 | * vlan removal |
3544 | */ | ||
3545 | __set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags); | ||
3546 | q_params.cmd = BNX2X_Q_CMD_UPDATE; | ||
3547 | q_params.q_obj = &bnx2x_leading_vfq(vf, sp_obj); | ||
3548 | update_params = &q_params.params.update; | ||
3549 | __set_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN_CHNG, | ||
3550 | &update_params->update_flags); | ||
3551 | __set_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM_CHNG, | ||
3552 | &update_params->update_flags); | ||
3553 | if (vlan == 0) { | ||
3554 | /* if vlan is 0 then we want to leave the VF traffic | ||
3555 | * untagged, and leave the incoming traffic untouched | ||
3556 | * (i.e. do not remove any vlan tags). | ||
3557 | */ | ||
3558 | __clear_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN, | ||
3559 | &update_params->update_flags); | ||
3560 | __clear_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM, | ||
3561 | &update_params->update_flags); | ||
3562 | } else { | ||
3563 | /* configure default vlan to vf queue and set silent | ||
3564 | * vlan removal (the vf remains unaware of this vlan). | ||
3494 | */ | 3565 | */ |
3495 | __set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags); | 3566 | __set_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN, |
3496 | q_params.cmd = BNX2X_Q_CMD_UPDATE; | ||
3497 | q_params.q_obj = &bnx2x_leading_vfq(vf, sp_obj); | ||
3498 | update_params = &q_params.params.update; | ||
3499 | __set_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN_CHNG, | ||
3500 | &update_params->update_flags); | 3567 | &update_params->update_flags); |
3501 | __set_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM_CHNG, | 3568 | __set_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM, |
3502 | &update_params->update_flags); | 3569 | &update_params->update_flags); |
3570 | update_params->def_vlan = vlan; | ||
3571 | update_params->silent_removal_value = | ||
3572 | vlan & VLAN_VID_MASK; | ||
3573 | update_params->silent_removal_mask = VLAN_VID_MASK; | ||
3574 | } | ||
3503 | 3575 | ||
3504 | if (vlan == 0) { | 3576 | /* Update the Queue state */ |
3505 | /* if vlan is 0 then we want to leave the VF traffic | 3577 | rc = bnx2x_queue_state_change(bp, &q_params); |
3506 | * untagged, and leave the incoming traffic untouched | 3578 | if (rc) { |
3507 | * (i.e. do not remove any vlan tags). | 3579 | BNX2X_ERR("Failed to configure default VLAN\n"); |
3508 | */ | 3580 | goto out; |
3509 | __clear_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN, | 3581 | } |
3510 | &update_params->update_flags); | ||
3511 | __clear_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM, | ||
3512 | &update_params->update_flags); | ||
3513 | } else { | ||
3514 | /* configure the new vlan to device */ | ||
3515 | __set_bit(RAMROD_COMP_WAIT, &ramrod_flags); | ||
3516 | ramrod_param.vlan_mac_obj = vlan_obj; | ||
3517 | ramrod_param.ramrod_flags = ramrod_flags; | ||
3518 | ramrod_param.user_req.u.vlan.vlan = vlan; | ||
3519 | ramrod_param.user_req.cmd = BNX2X_VLAN_MAC_ADD; | ||
3520 | rc = bnx2x_config_vlan_mac(bp, &ramrod_param); | ||
3521 | if (rc) { | ||
3522 | BNX2X_ERR("failed to configure vlan\n"); | ||
3523 | rc = -EINVAL; | ||
3524 | goto out; | ||
3525 | } | ||
3526 | |||
3527 | /* configure default vlan to vf queue and set silent | ||
3528 | * vlan removal (the vf remains unaware of this vlan). | ||
3529 | */ | ||
3530 | update_params = &q_params.params.update; | ||
3531 | __set_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN, | ||
3532 | &update_params->update_flags); | ||
3533 | __set_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM, | ||
3534 | &update_params->update_flags); | ||
3535 | update_params->def_vlan = vlan; | ||
3536 | } | ||
3537 | 3582 | ||
3538 | /* Update the Queue state */ | ||
3539 | rc = bnx2x_queue_state_change(bp, &q_params); | ||
3540 | if (rc) { | ||
3541 | BNX2X_ERR("Failed to configure default VLAN\n"); | ||
3542 | goto out; | ||
3543 | } | ||
3544 | 3583 | ||
3545 | /* clear the flag indicating that this VF needs its vlan | 3584 | /* clear the flag indicating that this VF needs its vlan |
3546 | * (will only be set if the HV configured the Vlan before vf was | 3585 | * (will only be set if the HV configured the Vlan before vf was |
3547 | * up and we were called because the VF came up later | 3586 | * up and we were called because the VF came up later |
3548 | */ | 3587 | */ |
3549 | out: | 3588 | out: |
3550 | vf->cfg_flags &= ~VF_CFG_VLAN; | 3589 | vf->cfg_flags &= ~VF_CFG_VLAN; |
3551 | bnx2x_unlock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_SET_VLAN); | 3590 | bnx2x_unlock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_SET_VLAN); |
3552 | } | 3591 | |
3553 | return rc; | 3592 | return rc; |
3554 | } | 3593 | } |
3555 | 3594 | ||
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h index 1ff6a9366629..8c213fa52174 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h | |||
@@ -74,6 +74,7 @@ struct bnx2x_vf_queue { | |||
74 | /* VLANs object */ | 74 | /* VLANs object */ |
75 | struct bnx2x_vlan_mac_obj vlan_obj; | 75 | struct bnx2x_vlan_mac_obj vlan_obj; |
76 | atomic_t vlan_count; /* 0 means vlan-0 is set ~ untagged */ | 76 | atomic_t vlan_count; /* 0 means vlan-0 is set ~ untagged */ |
77 | unsigned long accept_flags; /* last accept flags configured */ | ||
77 | 78 | ||
78 | /* Queue Slow-path State object */ | 79 | /* Queue Slow-path State object */ |
79 | struct bnx2x_queue_sp_obj sp_obj; | 80 | struct bnx2x_queue_sp_obj sp_obj; |
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c index efa8a151d789..0756d7dabdd5 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c | |||
@@ -208,7 +208,7 @@ static int bnx2x_get_vf_id(struct bnx2x *bp, u32 *vf_id) | |||
208 | return -EINVAL; | 208 | return -EINVAL; |
209 | } | 209 | } |
210 | 210 | ||
211 | BNX2X_ERR("valid ME register value: 0x%08x\n", me_reg); | 211 | DP(BNX2X_MSG_IOV, "valid ME register value: 0x%08x\n", me_reg); |
212 | 212 | ||
213 | *vf_id = (me_reg & ME_REG_VF_NUM_MASK) >> ME_REG_VF_NUM_SHIFT; | 213 | *vf_id = (me_reg & ME_REG_VF_NUM_MASK) >> ME_REG_VF_NUM_SHIFT; |
214 | 214 | ||
@@ -1598,6 +1598,8 @@ static void bnx2x_vfop_mbx_qfilters(struct bnx2x *bp, struct bnx2x_virtf *vf) | |||
1598 | 1598 | ||
1599 | if (msg->flags & VFPF_SET_Q_FILTERS_RX_MASK_CHANGED) { | 1599 | if (msg->flags & VFPF_SET_Q_FILTERS_RX_MASK_CHANGED) { |
1600 | unsigned long accept = 0; | 1600 | unsigned long accept = 0; |
1601 | struct pf_vf_bulletin_content *bulletin = | ||
1602 | BP_VF_BULLETIN(bp, vf->index); | ||
1601 | 1603 | ||
1602 | /* covert VF-PF if mask to bnx2x accept flags */ | 1604 | /* covert VF-PF if mask to bnx2x accept flags */ |
1603 | if (msg->rx_mask & VFPF_RX_MASK_ACCEPT_MATCHED_UNICAST) | 1605 | if (msg->rx_mask & VFPF_RX_MASK_ACCEPT_MATCHED_UNICAST) |
@@ -1617,9 +1619,11 @@ static void bnx2x_vfop_mbx_qfilters(struct bnx2x *bp, struct bnx2x_virtf *vf) | |||
1617 | __set_bit(BNX2X_ACCEPT_BROADCAST, &accept); | 1619 | __set_bit(BNX2X_ACCEPT_BROADCAST, &accept); |
1618 | 1620 | ||
1619 | /* A packet arriving the vf's mac should be accepted | 1621 | /* A packet arriving the vf's mac should be accepted |
1620 | * with any vlan | 1622 | * with any vlan, unless a vlan has already been |
1623 | * configured. | ||
1621 | */ | 1624 | */ |
1622 | __set_bit(BNX2X_ACCEPT_ANY_VLAN, &accept); | 1625 | if (!(bulletin->valid_bitmap & (1 << VLAN_VALID))) |
1626 | __set_bit(BNX2X_ACCEPT_ANY_VLAN, &accept); | ||
1623 | 1627 | ||
1624 | /* set rx-mode */ | 1628 | /* set rx-mode */ |
1625 | rc = bnx2x_vfop_rxmode_cmd(bp, vf, &cmd, | 1629 | rc = bnx2x_vfop_rxmode_cmd(bp, vf, &cmd, |
@@ -1710,6 +1714,21 @@ static void bnx2x_vf_mbx_set_q_filters(struct bnx2x *bp, | |||
1710 | goto response; | 1714 | goto response; |
1711 | } | 1715 | } |
1712 | } | 1716 | } |
1717 | /* if vlan was set by hypervisor we don't allow guest to config vlan */ | ||
1718 | if (bulletin->valid_bitmap & 1 << VLAN_VALID) { | ||
1719 | int i; | ||
1720 | |||
1721 | /* search for vlan filters */ | ||
1722 | for (i = 0; i < filters->n_mac_vlan_filters; i++) { | ||
1723 | if (filters->filters[i].flags & | ||
1724 | VFPF_Q_FILTER_VLAN_TAG_VALID) { | ||
1725 | BNX2X_ERR("VF[%d] attempted to configure vlan but one was already set by Hypervisor. Aborting request\n", | ||
1726 | vf->abs_vfid); | ||
1727 | vf->op_rc = -EPERM; | ||
1728 | goto response; | ||
1729 | } | ||
1730 | } | ||
1731 | } | ||
1713 | 1732 | ||
1714 | /* verify vf_qid */ | 1733 | /* verify vf_qid */ |
1715 | if (filters->vf_qid > vf_rxq_count(vf)) | 1734 | if (filters->vf_qid > vf_rxq_count(vf)) |
@@ -1805,6 +1824,9 @@ static void bnx2x_vf_mbx_update_rss(struct bnx2x *bp, struct bnx2x_virtf *vf, | |||
1805 | vf_op_params->rss_result_mask = rss_tlv->rss_result_mask; | 1824 | vf_op_params->rss_result_mask = rss_tlv->rss_result_mask; |
1806 | 1825 | ||
1807 | /* flags handled individually for backward/forward compatability */ | 1826 | /* flags handled individually for backward/forward compatability */ |
1827 | vf_op_params->rss_flags = 0; | ||
1828 | vf_op_params->ramrod_flags = 0; | ||
1829 | |||
1808 | if (rss_tlv->rss_flags & VFPF_RSS_MODE_DISABLED) | 1830 | if (rss_tlv->rss_flags & VFPF_RSS_MODE_DISABLED) |
1809 | __set_bit(BNX2X_RSS_MODE_DISABLED, &vf_op_params->rss_flags); | 1831 | __set_bit(BNX2X_RSS_MODE_DISABLED, &vf_op_params->rss_flags); |
1810 | if (rss_tlv->rss_flags & VFPF_RSS_MODE_REGULAR) | 1832 | if (rss_tlv->rss_flags & VFPF_RSS_MODE_REGULAR) |
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c index a9e068423ba0..15a66e4b1f57 100644 --- a/drivers/net/ethernet/broadcom/tg3.c +++ b/drivers/net/ethernet/broadcom/tg3.c | |||
@@ -7622,7 +7622,7 @@ static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len) | |||
7622 | { | 7622 | { |
7623 | u32 base = (u32) mapping & 0xffffffff; | 7623 | u32 base = (u32) mapping & 0xffffffff; |
7624 | 7624 | ||
7625 | return (base > 0xffffdcc0) && (base + len + 8 < base); | 7625 | return base + len + 8 < base; |
7626 | } | 7626 | } |
7627 | 7627 | ||
7628 | /* Test for TSO DMA buffers that cross into regions which are within MSS bytes | 7628 | /* Test for TSO DMA buffers that cross into regions which are within MSS bytes |
@@ -8932,6 +8932,9 @@ static int tg3_chip_reset(struct tg3 *tp) | |||
8932 | void (*write_op)(struct tg3 *, u32, u32); | 8932 | void (*write_op)(struct tg3 *, u32, u32); |
8933 | int i, err; | 8933 | int i, err; |
8934 | 8934 | ||
8935 | if (!pci_device_is_present(tp->pdev)) | ||
8936 | return -ENODEV; | ||
8937 | |||
8935 | tg3_nvram_lock(tp); | 8938 | tg3_nvram_lock(tp); |
8936 | 8939 | ||
8937 | tg3_ape_lock(tp, TG3_APE_LOCK_GRC); | 8940 | tg3_ape_lock(tp, TG3_APE_LOCK_GRC); |
@@ -10629,10 +10632,8 @@ static void tg3_sd_scan_scratchpad(struct tg3 *tp, struct tg3_ocir *ocir) | |||
10629 | static ssize_t tg3_show_temp(struct device *dev, | 10632 | static ssize_t tg3_show_temp(struct device *dev, |
10630 | struct device_attribute *devattr, char *buf) | 10633 | struct device_attribute *devattr, char *buf) |
10631 | { | 10634 | { |
10632 | struct pci_dev *pdev = to_pci_dev(dev); | ||
10633 | struct net_device *netdev = pci_get_drvdata(pdev); | ||
10634 | struct tg3 *tp = netdev_priv(netdev); | ||
10635 | struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr); | 10635 | struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr); |
10636 | struct tg3 *tp = dev_get_drvdata(dev); | ||
10636 | u32 temperature; | 10637 | u32 temperature; |
10637 | 10638 | ||
10638 | spin_lock_bh(&tp->lock); | 10639 | spin_lock_bh(&tp->lock); |
@@ -10650,29 +10651,25 @@ static SENSOR_DEVICE_ATTR(temp1_crit, S_IRUGO, tg3_show_temp, NULL, | |||
10650 | static SENSOR_DEVICE_ATTR(temp1_max, S_IRUGO, tg3_show_temp, NULL, | 10651 | static SENSOR_DEVICE_ATTR(temp1_max, S_IRUGO, tg3_show_temp, NULL, |
10651 | TG3_TEMP_MAX_OFFSET); | 10652 | TG3_TEMP_MAX_OFFSET); |
10652 | 10653 | ||
10653 | static struct attribute *tg3_attributes[] = { | 10654 | static struct attribute *tg3_attrs[] = { |
10654 | &sensor_dev_attr_temp1_input.dev_attr.attr, | 10655 | &sensor_dev_attr_temp1_input.dev_attr.attr, |
10655 | &sensor_dev_attr_temp1_crit.dev_attr.attr, | 10656 | &sensor_dev_attr_temp1_crit.dev_attr.attr, |
10656 | &sensor_dev_attr_temp1_max.dev_attr.attr, | 10657 | &sensor_dev_attr_temp1_max.dev_attr.attr, |
10657 | NULL | 10658 | NULL |
10658 | }; | 10659 | }; |
10659 | 10660 | ATTRIBUTE_GROUPS(tg3); | |
10660 | static const struct attribute_group tg3_group = { | ||
10661 | .attrs = tg3_attributes, | ||
10662 | }; | ||
10663 | 10661 | ||
10664 | static void tg3_hwmon_close(struct tg3 *tp) | 10662 | static void tg3_hwmon_close(struct tg3 *tp) |
10665 | { | 10663 | { |
10666 | if (tp->hwmon_dev) { | 10664 | if (tp->hwmon_dev) { |
10667 | hwmon_device_unregister(tp->hwmon_dev); | 10665 | hwmon_device_unregister(tp->hwmon_dev); |
10668 | tp->hwmon_dev = NULL; | 10666 | tp->hwmon_dev = NULL; |
10669 | sysfs_remove_group(&tp->pdev->dev.kobj, &tg3_group); | ||
10670 | } | 10667 | } |
10671 | } | 10668 | } |
10672 | 10669 | ||
10673 | static void tg3_hwmon_open(struct tg3 *tp) | 10670 | static void tg3_hwmon_open(struct tg3 *tp) |
10674 | { | 10671 | { |
10675 | int i, err; | 10672 | int i; |
10676 | u32 size = 0; | 10673 | u32 size = 0; |
10677 | struct pci_dev *pdev = tp->pdev; | 10674 | struct pci_dev *pdev = tp->pdev; |
10678 | struct tg3_ocir ocirs[TG3_SD_NUM_RECS]; | 10675 | struct tg3_ocir ocirs[TG3_SD_NUM_RECS]; |
@@ -10690,18 +10687,11 @@ static void tg3_hwmon_open(struct tg3 *tp) | |||
10690 | if (!size) | 10687 | if (!size) |
10691 | return; | 10688 | return; |
10692 | 10689 | ||
10693 | /* Register hwmon sysfs hooks */ | 10690 | tp->hwmon_dev = hwmon_device_register_with_groups(&pdev->dev, "tg3", |
10694 | err = sysfs_create_group(&pdev->dev.kobj, &tg3_group); | 10691 | tp, tg3_groups); |
10695 | if (err) { | ||
10696 | dev_err(&pdev->dev, "Cannot create sysfs group, aborting\n"); | ||
10697 | return; | ||
10698 | } | ||
10699 | |||
10700 | tp->hwmon_dev = hwmon_device_register(&pdev->dev); | ||
10701 | if (IS_ERR(tp->hwmon_dev)) { | 10692 | if (IS_ERR(tp->hwmon_dev)) { |
10702 | tp->hwmon_dev = NULL; | 10693 | tp->hwmon_dev = NULL; |
10703 | dev_err(&pdev->dev, "Cannot register hwmon device, aborting\n"); | 10694 | dev_err(&pdev->dev, "Cannot register hwmon device, aborting\n"); |
10704 | sysfs_remove_group(&pdev->dev.kobj, &tg3_group); | ||
10705 | } | 10695 | } |
10706 | } | 10696 | } |
10707 | 10697 | ||
@@ -11594,10 +11584,11 @@ static int tg3_close(struct net_device *dev) | |||
11594 | memset(&tp->net_stats_prev, 0, sizeof(tp->net_stats_prev)); | 11584 | memset(&tp->net_stats_prev, 0, sizeof(tp->net_stats_prev)); |
11595 | memset(&tp->estats_prev, 0, sizeof(tp->estats_prev)); | 11585 | memset(&tp->estats_prev, 0, sizeof(tp->estats_prev)); |
11596 | 11586 | ||
11597 | tg3_power_down_prepare(tp); | 11587 | if (pci_device_is_present(tp->pdev)) { |
11598 | 11588 | tg3_power_down_prepare(tp); | |
11599 | tg3_carrier_off(tp); | ||
11600 | 11589 | ||
11590 | tg3_carrier_off(tp); | ||
11591 | } | ||
11601 | return 0; | 11592 | return 0; |
11602 | } | 11593 | } |
11603 | 11594 | ||
@@ -16512,6 +16503,9 @@ static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent) | |||
16512 | /* Clear this out for sanity. */ | 16503 | /* Clear this out for sanity. */ |
16513 | tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0); | 16504 | tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0); |
16514 | 16505 | ||
16506 | /* Clear TG3PCI_REG_BASE_ADDR to prevent hangs. */ | ||
16507 | tw32(TG3PCI_REG_BASE_ADDR, 0); | ||
16508 | |||
16515 | pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE, | 16509 | pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE, |
16516 | &pci_state_reg); | 16510 | &pci_state_reg); |
16517 | if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 && | 16511 | if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 && |
@@ -17739,10 +17733,12 @@ static int tg3_suspend(struct device *device) | |||
17739 | struct pci_dev *pdev = to_pci_dev(device); | 17733 | struct pci_dev *pdev = to_pci_dev(device); |
17740 | struct net_device *dev = pci_get_drvdata(pdev); | 17734 | struct net_device *dev = pci_get_drvdata(pdev); |
17741 | struct tg3 *tp = netdev_priv(dev); | 17735 | struct tg3 *tp = netdev_priv(dev); |
17742 | int err; | 17736 | int err = 0; |
17737 | |||
17738 | rtnl_lock(); | ||
17743 | 17739 | ||
17744 | if (!netif_running(dev)) | 17740 | if (!netif_running(dev)) |
17745 | return 0; | 17741 | goto unlock; |
17746 | 17742 | ||
17747 | tg3_reset_task_cancel(tp); | 17743 | tg3_reset_task_cancel(tp); |
17748 | tg3_phy_stop(tp); | 17744 | tg3_phy_stop(tp); |
@@ -17784,6 +17780,8 @@ out: | |||
17784 | tg3_phy_start(tp); | 17780 | tg3_phy_start(tp); |
17785 | } | 17781 | } |
17786 | 17782 | ||
17783 | unlock: | ||
17784 | rtnl_unlock(); | ||
17787 | return err; | 17785 | return err; |
17788 | } | 17786 | } |
17789 | 17787 | ||
@@ -17792,10 +17790,12 @@ static int tg3_resume(struct device *device) | |||
17792 | struct pci_dev *pdev = to_pci_dev(device); | 17790 | struct pci_dev *pdev = to_pci_dev(device); |
17793 | struct net_device *dev = pci_get_drvdata(pdev); | 17791 | struct net_device *dev = pci_get_drvdata(pdev); |
17794 | struct tg3 *tp = netdev_priv(dev); | 17792 | struct tg3 *tp = netdev_priv(dev); |
17795 | int err; | 17793 | int err = 0; |
17794 | |||
17795 | rtnl_lock(); | ||
17796 | 17796 | ||
17797 | if (!netif_running(dev)) | 17797 | if (!netif_running(dev)) |
17798 | return 0; | 17798 | goto unlock; |
17799 | 17799 | ||
17800 | netif_device_attach(dev); | 17800 | netif_device_attach(dev); |
17801 | 17801 | ||
@@ -17819,6 +17819,8 @@ out: | |||
17819 | if (!err) | 17819 | if (!err) |
17820 | tg3_phy_start(tp); | 17820 | tg3_phy_start(tp); |
17821 | 17821 | ||
17822 | unlock: | ||
17823 | rtnl_unlock(); | ||
17822 | return err; | 17824 | return err; |
17823 | } | 17825 | } |
17824 | #endif /* CONFIG_PM_SLEEP */ | 17826 | #endif /* CONFIG_PM_SLEEP */ |
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h index ecd2fb3ef695..56e0415f8cdf 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h | |||
@@ -49,13 +49,15 @@ | |||
49 | #include <asm/io.h> | 49 | #include <asm/io.h> |
50 | #include "cxgb4_uld.h" | 50 | #include "cxgb4_uld.h" |
51 | 51 | ||
52 | #define FW_VERSION_MAJOR 1 | 52 | #define T4FW_VERSION_MAJOR 0x01 |
53 | #define FW_VERSION_MINOR 4 | 53 | #define T4FW_VERSION_MINOR 0x06 |
54 | #define FW_VERSION_MICRO 0 | 54 | #define T4FW_VERSION_MICRO 0x18 |
55 | #define T4FW_VERSION_BUILD 0x00 | ||
55 | 56 | ||
56 | #define FW_VERSION_MAJOR_T5 0 | 57 | #define T5FW_VERSION_MAJOR 0x01 |
57 | #define FW_VERSION_MINOR_T5 0 | 58 | #define T5FW_VERSION_MINOR 0x08 |
58 | #define FW_VERSION_MICRO_T5 0 | 59 | #define T5FW_VERSION_MICRO 0x1C |
60 | #define T5FW_VERSION_BUILD 0x00 | ||
59 | 61 | ||
60 | #define CH_WARN(adap, fmt, ...) dev_warn(adap->pdev_dev, fmt, ## __VA_ARGS__) | 62 | #define CH_WARN(adap, fmt, ...) dev_warn(adap->pdev_dev, fmt, ## __VA_ARGS__) |
61 | 63 | ||
@@ -226,6 +228,25 @@ struct tp_params { | |||
226 | 228 | ||
227 | uint32_t dack_re; /* DACK timer resolution */ | 229 | uint32_t dack_re; /* DACK timer resolution */ |
228 | unsigned short tx_modq[NCHAN]; /* channel to modulation queue map */ | 230 | unsigned short tx_modq[NCHAN]; /* channel to modulation queue map */ |
231 | |||
232 | u32 vlan_pri_map; /* cached TP_VLAN_PRI_MAP */ | ||
233 | u32 ingress_config; /* cached TP_INGRESS_CONFIG */ | ||
234 | |||
235 | /* TP_VLAN_PRI_MAP Compressed Filter Tuple field offsets. This is a | ||
236 | * subset of the set of fields which may be present in the Compressed | ||
237 | * Filter Tuple portion of filters and TCP TCB connections. The | ||
238 | * fields which are present are controlled by the TP_VLAN_PRI_MAP. | ||
239 | * Since a variable number of fields may or may not be present, their | ||
240 | * shifted field positions within the Compressed Filter Tuple may | ||
241 | * vary, or not even be present if the field isn't selected in | ||
242 | * TP_VLAN_PRI_MAP. Since some of these fields are needed in various | ||
243 | * places we store their offsets here, or a -1 if the field isn't | ||
244 | * present. | ||
245 | */ | ||
246 | int vlan_shift; | ||
247 | int vnic_shift; | ||
248 | int port_shift; | ||
249 | int protocol_shift; | ||
229 | }; | 250 | }; |
230 | 251 | ||
231 | struct vpd_params { | 252 | struct vpd_params { |
@@ -240,6 +261,26 @@ struct pci_params { | |||
240 | unsigned char width; | 261 | unsigned char width; |
241 | }; | 262 | }; |
242 | 263 | ||
264 | #define CHELSIO_CHIP_CODE(version, revision) (((version) << 4) | (revision)) | ||
265 | #define CHELSIO_CHIP_FPGA 0x100 | ||
266 | #define CHELSIO_CHIP_VERSION(code) (((code) >> 4) & 0xf) | ||
267 | #define CHELSIO_CHIP_RELEASE(code) ((code) & 0xf) | ||
268 | |||
269 | #define CHELSIO_T4 0x4 | ||
270 | #define CHELSIO_T5 0x5 | ||
271 | |||
272 | enum chip_type { | ||
273 | T4_A1 = CHELSIO_CHIP_CODE(CHELSIO_T4, 1), | ||
274 | T4_A2 = CHELSIO_CHIP_CODE(CHELSIO_T4, 2), | ||
275 | T4_FIRST_REV = T4_A1, | ||
276 | T4_LAST_REV = T4_A2, | ||
277 | |||
278 | T5_A0 = CHELSIO_CHIP_CODE(CHELSIO_T5, 0), | ||
279 | T5_A1 = CHELSIO_CHIP_CODE(CHELSIO_T5, 1), | ||
280 | T5_FIRST_REV = T5_A0, | ||
281 | T5_LAST_REV = T5_A1, | ||
282 | }; | ||
283 | |||
243 | struct adapter_params { | 284 | struct adapter_params { |
244 | struct tp_params tp; | 285 | struct tp_params tp; |
245 | struct vpd_params vpd; | 286 | struct vpd_params vpd; |
@@ -259,7 +300,7 @@ struct adapter_params { | |||
259 | 300 | ||
260 | unsigned char nports; /* # of ethernet ports */ | 301 | unsigned char nports; /* # of ethernet ports */ |
261 | unsigned char portvec; | 302 | unsigned char portvec; |
262 | unsigned char rev; /* chip revision */ | 303 | enum chip_type chip; /* chip code */ |
263 | unsigned char offload; | 304 | unsigned char offload; |
264 | 305 | ||
265 | unsigned char bypass; | 306 | unsigned char bypass; |
@@ -267,6 +308,23 @@ struct adapter_params { | |||
267 | unsigned int ofldq_wr_cred; | 308 | unsigned int ofldq_wr_cred; |
268 | }; | 309 | }; |
269 | 310 | ||
311 | #include "t4fw_api.h" | ||
312 | |||
313 | #define FW_VERSION(chip) ( \ | ||
314 | FW_HDR_FW_VER_MAJOR_GET(chip##FW_VERSION_MAJOR) | \ | ||
315 | FW_HDR_FW_VER_MINOR_GET(chip##FW_VERSION_MINOR) | \ | ||
316 | FW_HDR_FW_VER_MICRO_GET(chip##FW_VERSION_MICRO) | \ | ||
317 | FW_HDR_FW_VER_BUILD_GET(chip##FW_VERSION_BUILD)) | ||
318 | #define FW_INTFVER(chip, intf) (FW_HDR_INTFVER_##intf) | ||
319 | |||
320 | struct fw_info { | ||
321 | u8 chip; | ||
322 | char *fs_name; | ||
323 | char *fw_mod_name; | ||
324 | struct fw_hdr fw_hdr; | ||
325 | }; | ||
326 | |||
327 | |||
270 | struct trace_params { | 328 | struct trace_params { |
271 | u32 data[TRACE_LEN / 4]; | 329 | u32 data[TRACE_LEN / 4]; |
272 | u32 mask[TRACE_LEN / 4]; | 330 | u32 mask[TRACE_LEN / 4]; |
@@ -512,25 +570,6 @@ struct sge { | |||
512 | 570 | ||
513 | struct l2t_data; | 571 | struct l2t_data; |
514 | 572 | ||
515 | #define CHELSIO_CHIP_CODE(version, revision) (((version) << 4) | (revision)) | ||
516 | #define CHELSIO_CHIP_VERSION(code) ((code) >> 4) | ||
517 | #define CHELSIO_CHIP_RELEASE(code) ((code) & 0xf) | ||
518 | |||
519 | #define CHELSIO_T4 0x4 | ||
520 | #define CHELSIO_T5 0x5 | ||
521 | |||
522 | enum chip_type { | ||
523 | T4_A1 = CHELSIO_CHIP_CODE(CHELSIO_T4, 0), | ||
524 | T4_A2 = CHELSIO_CHIP_CODE(CHELSIO_T4, 1), | ||
525 | T4_A3 = CHELSIO_CHIP_CODE(CHELSIO_T4, 2), | ||
526 | T4_FIRST_REV = T4_A1, | ||
527 | T4_LAST_REV = T4_A3, | ||
528 | |||
529 | T5_A1 = CHELSIO_CHIP_CODE(CHELSIO_T5, 0), | ||
530 | T5_FIRST_REV = T5_A1, | ||
531 | T5_LAST_REV = T5_A1, | ||
532 | }; | ||
533 | |||
534 | #ifdef CONFIG_PCI_IOV | 573 | #ifdef CONFIG_PCI_IOV |
535 | 574 | ||
536 | /* T4 supports SRIOV on PF0-3 and T5 on PF0-7. However, the Serial | 575 | /* T4 supports SRIOV on PF0-3 and T5 on PF0-7. However, the Serial |
@@ -715,12 +754,12 @@ enum { | |||
715 | 754 | ||
716 | static inline int is_t5(enum chip_type chip) | 755 | static inline int is_t5(enum chip_type chip) |
717 | { | 756 | { |
718 | return (chip >= T5_FIRST_REV && chip <= T5_LAST_REV); | 757 | return CHELSIO_CHIP_VERSION(chip) == CHELSIO_T5; |
719 | } | 758 | } |
720 | 759 | ||
721 | static inline int is_t4(enum chip_type chip) | 760 | static inline int is_t4(enum chip_type chip) |
722 | { | 761 | { |
723 | return (chip >= T4_FIRST_REV && chip <= T4_LAST_REV); | 762 | return CHELSIO_CHIP_VERSION(chip) == CHELSIO_T4; |
724 | } | 763 | } |
725 | 764 | ||
726 | static inline u32 t4_read_reg(struct adapter *adap, u32 reg_addr) | 765 | static inline u32 t4_read_reg(struct adapter *adap, u32 reg_addr) |
@@ -900,8 +939,14 @@ int get_vpd_params(struct adapter *adapter, struct vpd_params *p); | |||
900 | int t4_load_fw(struct adapter *adapter, const u8 *fw_data, unsigned int size); | 939 | int t4_load_fw(struct adapter *adapter, const u8 *fw_data, unsigned int size); |
901 | unsigned int t4_flash_cfg_addr(struct adapter *adapter); | 940 | unsigned int t4_flash_cfg_addr(struct adapter *adapter); |
902 | int t4_load_cfg(struct adapter *adapter, const u8 *cfg_data, unsigned int size); | 941 | int t4_load_cfg(struct adapter *adapter, const u8 *cfg_data, unsigned int size); |
903 | int t4_check_fw_version(struct adapter *adapter); | 942 | int t4_get_fw_version(struct adapter *adapter, u32 *vers); |
943 | int t4_get_tp_version(struct adapter *adapter, u32 *vers); | ||
944 | int t4_prep_fw(struct adapter *adap, struct fw_info *fw_info, | ||
945 | const u8 *fw_data, unsigned int fw_size, | ||
946 | struct fw_hdr *card_fw, enum dev_state state, int *reset); | ||
904 | int t4_prep_adapter(struct adapter *adapter); | 947 | int t4_prep_adapter(struct adapter *adapter); |
948 | int t4_init_tp_params(struct adapter *adap); | ||
949 | int t4_filter_field_shift(const struct adapter *adap, int filter_sel); | ||
905 | int t4_port_init(struct adapter *adap, int mbox, int pf, int vf); | 950 | int t4_port_init(struct adapter *adap, int mbox, int pf, int vf); |
906 | void t4_fatal_err(struct adapter *adapter); | 951 | void t4_fatal_err(struct adapter *adapter); |
907 | int t4_config_rss_range(struct adapter *adapter, int mbox, unsigned int viid, | 952 | int t4_config_rss_range(struct adapter *adapter, int mbox, unsigned int viid, |
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c index 8b929eeecd2d..fff02ed1295e 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c | |||
@@ -276,9 +276,9 @@ static DEFINE_PCI_DEVICE_TABLE(cxgb4_pci_tbl) = { | |||
276 | { 0, } | 276 | { 0, } |
277 | }; | 277 | }; |
278 | 278 | ||
279 | #define FW_FNAME "cxgb4/t4fw.bin" | 279 | #define FW4_FNAME "cxgb4/t4fw.bin" |
280 | #define FW5_FNAME "cxgb4/t5fw.bin" | 280 | #define FW5_FNAME "cxgb4/t5fw.bin" |
281 | #define FW_CFNAME "cxgb4/t4-config.txt" | 281 | #define FW4_CFNAME "cxgb4/t4-config.txt" |
282 | #define FW5_CFNAME "cxgb4/t5-config.txt" | 282 | #define FW5_CFNAME "cxgb4/t5-config.txt" |
283 | 283 | ||
284 | MODULE_DESCRIPTION(DRV_DESC); | 284 | MODULE_DESCRIPTION(DRV_DESC); |
@@ -286,7 +286,7 @@ MODULE_AUTHOR("Chelsio Communications"); | |||
286 | MODULE_LICENSE("Dual BSD/GPL"); | 286 | MODULE_LICENSE("Dual BSD/GPL"); |
287 | MODULE_VERSION(DRV_VERSION); | 287 | MODULE_VERSION(DRV_VERSION); |
288 | MODULE_DEVICE_TABLE(pci, cxgb4_pci_tbl); | 288 | MODULE_DEVICE_TABLE(pci, cxgb4_pci_tbl); |
289 | MODULE_FIRMWARE(FW_FNAME); | 289 | MODULE_FIRMWARE(FW4_FNAME); |
290 | MODULE_FIRMWARE(FW5_FNAME); | 290 | MODULE_FIRMWARE(FW5_FNAME); |
291 | 291 | ||
292 | /* | 292 | /* |
@@ -1071,72 +1071,6 @@ freeout: t4_free_sge_resources(adap); | |||
1071 | } | 1071 | } |
1072 | 1072 | ||
1073 | /* | 1073 | /* |
1074 | * Returns 0 if new FW was successfully loaded, a positive errno if a load was | ||
1075 | * started but failed, and a negative errno if flash load couldn't start. | ||
1076 | */ | ||
1077 | static int upgrade_fw(struct adapter *adap) | ||
1078 | { | ||
1079 | int ret; | ||
1080 | u32 vers, exp_major; | ||
1081 | const struct fw_hdr *hdr; | ||
1082 | const struct firmware *fw; | ||
1083 | struct device *dev = adap->pdev_dev; | ||
1084 | char *fw_file_name; | ||
1085 | |||
1086 | switch (CHELSIO_CHIP_VERSION(adap->chip)) { | ||
1087 | case CHELSIO_T4: | ||
1088 | fw_file_name = FW_FNAME; | ||
1089 | exp_major = FW_VERSION_MAJOR; | ||
1090 | break; | ||
1091 | case CHELSIO_T5: | ||
1092 | fw_file_name = FW5_FNAME; | ||
1093 | exp_major = FW_VERSION_MAJOR_T5; | ||
1094 | break; | ||
1095 | default: | ||
1096 | dev_err(dev, "Unsupported chip type, %x\n", adap->chip); | ||
1097 | return -EINVAL; | ||
1098 | } | ||
1099 | |||
1100 | ret = request_firmware(&fw, fw_file_name, dev); | ||
1101 | if (ret < 0) { | ||
1102 | dev_err(dev, "unable to load firmware image %s, error %d\n", | ||
1103 | fw_file_name, ret); | ||
1104 | return ret; | ||
1105 | } | ||
1106 | |||
1107 | hdr = (const struct fw_hdr *)fw->data; | ||
1108 | vers = ntohl(hdr->fw_ver); | ||
1109 | if (FW_HDR_FW_VER_MAJOR_GET(vers) != exp_major) { | ||
1110 | ret = -EINVAL; /* wrong major version, won't do */ | ||
1111 | goto out; | ||
1112 | } | ||
1113 | |||
1114 | /* | ||
1115 | * If the flash FW is unusable or we found something newer, load it. | ||
1116 | */ | ||
1117 | if (FW_HDR_FW_VER_MAJOR_GET(adap->params.fw_vers) != exp_major || | ||
1118 | vers > adap->params.fw_vers) { | ||
1119 | dev_info(dev, "upgrading firmware ...\n"); | ||
1120 | ret = t4_fw_upgrade(adap, adap->mbox, fw->data, fw->size, | ||
1121 | /*force=*/false); | ||
1122 | if (!ret) | ||
1123 | dev_info(dev, | ||
1124 | "firmware upgraded to version %pI4 from %s\n", | ||
1125 | &hdr->fw_ver, fw_file_name); | ||
1126 | else | ||
1127 | dev_err(dev, "firmware upgrade failed! err=%d\n", -ret); | ||
1128 | } else { | ||
1129 | /* | ||
1130 | * Tell our caller that we didn't upgrade the firmware. | ||
1131 | */ | ||
1132 | ret = -EINVAL; | ||
1133 | } | ||
1134 | |||
1135 | out: release_firmware(fw); | ||
1136 | return ret; | ||
1137 | } | ||
1138 | |||
1139 | /* | ||
1140 | * Allocate a chunk of memory using kmalloc or, if that fails, vmalloc. | 1074 | * Allocate a chunk of memory using kmalloc or, if that fails, vmalloc. |
1141 | * The allocated memory is cleared. | 1075 | * The allocated memory is cleared. |
1142 | */ | 1076 | */ |
@@ -1415,7 +1349,7 @@ static int get_sset_count(struct net_device *dev, int sset) | |||
1415 | static int get_regs_len(struct net_device *dev) | 1349 | static int get_regs_len(struct net_device *dev) |
1416 | { | 1350 | { |
1417 | struct adapter *adap = netdev2adap(dev); | 1351 | struct adapter *adap = netdev2adap(dev); |
1418 | if (is_t4(adap->chip)) | 1352 | if (is_t4(adap->params.chip)) |
1419 | return T4_REGMAP_SIZE; | 1353 | return T4_REGMAP_SIZE; |
1420 | else | 1354 | else |
1421 | return T5_REGMAP_SIZE; | 1355 | return T5_REGMAP_SIZE; |
@@ -1499,7 +1433,7 @@ static void get_stats(struct net_device *dev, struct ethtool_stats *stats, | |||
1499 | data += sizeof(struct port_stats) / sizeof(u64); | 1433 | data += sizeof(struct port_stats) / sizeof(u64); |
1500 | collect_sge_port_stats(adapter, pi, (struct queue_port_stats *)data); | 1434 | collect_sge_port_stats(adapter, pi, (struct queue_port_stats *)data); |
1501 | data += sizeof(struct queue_port_stats) / sizeof(u64); | 1435 | data += sizeof(struct queue_port_stats) / sizeof(u64); |
1502 | if (!is_t4(adapter->chip)) { | 1436 | if (!is_t4(adapter->params.chip)) { |
1503 | t4_write_reg(adapter, SGE_STAT_CFG, STATSOURCE_T5(7)); | 1437 | t4_write_reg(adapter, SGE_STAT_CFG, STATSOURCE_T5(7)); |
1504 | val1 = t4_read_reg(adapter, SGE_STAT_TOTAL); | 1438 | val1 = t4_read_reg(adapter, SGE_STAT_TOTAL); |
1505 | val2 = t4_read_reg(adapter, SGE_STAT_MATCH); | 1439 | val2 = t4_read_reg(adapter, SGE_STAT_MATCH); |
@@ -1521,8 +1455,8 @@ static void get_stats(struct net_device *dev, struct ethtool_stats *stats, | |||
1521 | */ | 1455 | */ |
1522 | static inline unsigned int mk_adap_vers(const struct adapter *ap) | 1456 | static inline unsigned int mk_adap_vers(const struct adapter *ap) |
1523 | { | 1457 | { |
1524 | return CHELSIO_CHIP_VERSION(ap->chip) | | 1458 | return CHELSIO_CHIP_VERSION(ap->params.chip) | |
1525 | (CHELSIO_CHIP_RELEASE(ap->chip) << 10) | (1 << 16); | 1459 | (CHELSIO_CHIP_RELEASE(ap->params.chip) << 10) | (1 << 16); |
1526 | } | 1460 | } |
1527 | 1461 | ||
1528 | static void reg_block_dump(struct adapter *ap, void *buf, unsigned int start, | 1462 | static void reg_block_dump(struct adapter *ap, void *buf, unsigned int start, |
@@ -2189,7 +2123,7 @@ static void get_regs(struct net_device *dev, struct ethtool_regs *regs, | |||
2189 | static const unsigned int *reg_ranges; | 2123 | static const unsigned int *reg_ranges; |
2190 | int arr_size = 0, buf_size = 0; | 2124 | int arr_size = 0, buf_size = 0; |
2191 | 2125 | ||
2192 | if (is_t4(ap->chip)) { | 2126 | if (is_t4(ap->params.chip)) { |
2193 | reg_ranges = &t4_reg_ranges[0]; | 2127 | reg_ranges = &t4_reg_ranges[0]; |
2194 | arr_size = ARRAY_SIZE(t4_reg_ranges); | 2128 | arr_size = ARRAY_SIZE(t4_reg_ranges); |
2195 | buf_size = T4_REGMAP_SIZE; | 2129 | buf_size = T4_REGMAP_SIZE; |
@@ -2967,7 +2901,7 @@ static int setup_debugfs(struct adapter *adap) | |||
2967 | size = t4_read_reg(adap, MA_EDRAM1_BAR); | 2901 | size = t4_read_reg(adap, MA_EDRAM1_BAR); |
2968 | add_debugfs_mem(adap, "edc1", MEM_EDC1, EDRAM_SIZE_GET(size)); | 2902 | add_debugfs_mem(adap, "edc1", MEM_EDC1, EDRAM_SIZE_GET(size)); |
2969 | } | 2903 | } |
2970 | if (is_t4(adap->chip)) { | 2904 | if (is_t4(adap->params.chip)) { |
2971 | size = t4_read_reg(adap, MA_EXT_MEMORY_BAR); | 2905 | size = t4_read_reg(adap, MA_EXT_MEMORY_BAR); |
2972 | if (i & EXT_MEM_ENABLE) | 2906 | if (i & EXT_MEM_ENABLE) |
2973 | add_debugfs_mem(adap, "mc", MEM_MC, | 2907 | add_debugfs_mem(adap, "mc", MEM_MC, |
@@ -3052,7 +2986,14 @@ int cxgb4_alloc_stid(struct tid_info *t, int family, void *data) | |||
3052 | if (stid >= 0) { | 2986 | if (stid >= 0) { |
3053 | t->stid_tab[stid].data = data; | 2987 | t->stid_tab[stid].data = data; |
3054 | stid += t->stid_base; | 2988 | stid += t->stid_base; |
3055 | t->stids_in_use++; | 2989 | /* IPv6 requires max of 520 bits or 16 cells in TCAM |
2990 | * This is equivalent to 4 TIDs. With CLIP enabled it | ||
2991 | * needs 2 TIDs. | ||
2992 | */ | ||
2993 | if (family == PF_INET) | ||
2994 | t->stids_in_use++; | ||
2995 | else | ||
2996 | t->stids_in_use += 4; | ||
3056 | } | 2997 | } |
3057 | spin_unlock_bh(&t->stid_lock); | 2998 | spin_unlock_bh(&t->stid_lock); |
3058 | return stid; | 2999 | return stid; |
@@ -3078,7 +3019,8 @@ int cxgb4_alloc_sftid(struct tid_info *t, int family, void *data) | |||
3078 | } | 3019 | } |
3079 | if (stid >= 0) { | 3020 | if (stid >= 0) { |
3080 | t->stid_tab[stid].data = data; | 3021 | t->stid_tab[stid].data = data; |
3081 | stid += t->stid_base; | 3022 | stid -= t->nstids; |
3023 | stid += t->sftid_base; | ||
3082 | t->stids_in_use++; | 3024 | t->stids_in_use++; |
3083 | } | 3025 | } |
3084 | spin_unlock_bh(&t->stid_lock); | 3026 | spin_unlock_bh(&t->stid_lock); |
@@ -3090,14 +3032,24 @@ EXPORT_SYMBOL(cxgb4_alloc_sftid); | |||
3090 | */ | 3032 | */ |
3091 | void cxgb4_free_stid(struct tid_info *t, unsigned int stid, int family) | 3033 | void cxgb4_free_stid(struct tid_info *t, unsigned int stid, int family) |
3092 | { | 3034 | { |
3093 | stid -= t->stid_base; | 3035 | /* Is it a server filter TID? */ |
3036 | if (t->nsftids && (stid >= t->sftid_base)) { | ||
3037 | stid -= t->sftid_base; | ||
3038 | stid += t->nstids; | ||
3039 | } else { | ||
3040 | stid -= t->stid_base; | ||
3041 | } | ||
3042 | |||
3094 | spin_lock_bh(&t->stid_lock); | 3043 | spin_lock_bh(&t->stid_lock); |
3095 | if (family == PF_INET) | 3044 | if (family == PF_INET) |
3096 | __clear_bit(stid, t->stid_bmap); | 3045 | __clear_bit(stid, t->stid_bmap); |
3097 | else | 3046 | else |
3098 | bitmap_release_region(t->stid_bmap, stid, 2); | 3047 | bitmap_release_region(t->stid_bmap, stid, 2); |
3099 | t->stid_tab[stid].data = NULL; | 3048 | t->stid_tab[stid].data = NULL; |
3100 | t->stids_in_use--; | 3049 | if (family == PF_INET) |
3050 | t->stids_in_use--; | ||
3051 | else | ||
3052 | t->stids_in_use -= 4; | ||
3101 | spin_unlock_bh(&t->stid_lock); | 3053 | spin_unlock_bh(&t->stid_lock); |
3102 | } | 3054 | } |
3103 | EXPORT_SYMBOL(cxgb4_free_stid); | 3055 | EXPORT_SYMBOL(cxgb4_free_stid); |
@@ -3200,6 +3152,7 @@ static int tid_init(struct tid_info *t) | |||
3200 | size_t size; | 3152 | size_t size; |
3201 | unsigned int stid_bmap_size; | 3153 | unsigned int stid_bmap_size; |
3202 | unsigned int natids = t->natids; | 3154 | unsigned int natids = t->natids; |
3155 | struct adapter *adap = container_of(t, struct adapter, tids); | ||
3203 | 3156 | ||
3204 | stid_bmap_size = BITS_TO_LONGS(t->nstids + t->nsftids); | 3157 | stid_bmap_size = BITS_TO_LONGS(t->nstids + t->nsftids); |
3205 | size = t->ntids * sizeof(*t->tid_tab) + | 3158 | size = t->ntids * sizeof(*t->tid_tab) + |
@@ -3233,6 +3186,11 @@ static int tid_init(struct tid_info *t) | |||
3233 | t->afree = t->atid_tab; | 3186 | t->afree = t->atid_tab; |
3234 | } | 3187 | } |
3235 | bitmap_zero(t->stid_bmap, t->nstids + t->nsftids); | 3188 | bitmap_zero(t->stid_bmap, t->nstids + t->nsftids); |
3189 | /* Reserve stid 0 for T4/T5 adapters */ | ||
3190 | if (!t->stid_base && | ||
3191 | (is_t4(adap->params.chip) || is_t5(adap->params.chip))) | ||
3192 | __set_bit(0, t->stid_bmap); | ||
3193 | |||
3236 | return 0; | 3194 | return 0; |
3237 | } | 3195 | } |
3238 | 3196 | ||
@@ -3419,7 +3377,7 @@ unsigned int cxgb4_dbfifo_count(const struct net_device *dev, int lpfifo) | |||
3419 | 3377 | ||
3420 | v1 = t4_read_reg(adap, A_SGE_DBFIFO_STATUS); | 3378 | v1 = t4_read_reg(adap, A_SGE_DBFIFO_STATUS); |
3421 | v2 = t4_read_reg(adap, SGE_DBFIFO_STATUS2); | 3379 | v2 = t4_read_reg(adap, SGE_DBFIFO_STATUS2); |
3422 | if (is_t4(adap->chip)) { | 3380 | if (is_t4(adap->params.chip)) { |
3423 | lp_count = G_LP_COUNT(v1); | 3381 | lp_count = G_LP_COUNT(v1); |
3424 | hp_count = G_HP_COUNT(v1); | 3382 | hp_count = G_HP_COUNT(v1); |
3425 | } else { | 3383 | } else { |
@@ -3588,7 +3546,7 @@ static void drain_db_fifo(struct adapter *adap, int usecs) | |||
3588 | do { | 3546 | do { |
3589 | v1 = t4_read_reg(adap, A_SGE_DBFIFO_STATUS); | 3547 | v1 = t4_read_reg(adap, A_SGE_DBFIFO_STATUS); |
3590 | v2 = t4_read_reg(adap, SGE_DBFIFO_STATUS2); | 3548 | v2 = t4_read_reg(adap, SGE_DBFIFO_STATUS2); |
3591 | if (is_t4(adap->chip)) { | 3549 | if (is_t4(adap->params.chip)) { |
3592 | lp_count = G_LP_COUNT(v1); | 3550 | lp_count = G_LP_COUNT(v1); |
3593 | hp_count = G_HP_COUNT(v1); | 3551 | hp_count = G_HP_COUNT(v1); |
3594 | } else { | 3552 | } else { |
@@ -3708,7 +3666,7 @@ static void process_db_drop(struct work_struct *work) | |||
3708 | 3666 | ||
3709 | adap = container_of(work, struct adapter, db_drop_task); | 3667 | adap = container_of(work, struct adapter, db_drop_task); |
3710 | 3668 | ||
3711 | if (is_t4(adap->chip)) { | 3669 | if (is_t4(adap->params.chip)) { |
3712 | disable_dbs(adap); | 3670 | disable_dbs(adap); |
3713 | notify_rdma_uld(adap, CXGB4_CONTROL_DB_DROP); | 3671 | notify_rdma_uld(adap, CXGB4_CONTROL_DB_DROP); |
3714 | drain_db_fifo(adap, 1); | 3672 | drain_db_fifo(adap, 1); |
@@ -3753,7 +3711,7 @@ static void process_db_drop(struct work_struct *work) | |||
3753 | 3711 | ||
3754 | void t4_db_full(struct adapter *adap) | 3712 | void t4_db_full(struct adapter *adap) |
3755 | { | 3713 | { |
3756 | if (is_t4(adap->chip)) { | 3714 | if (is_t4(adap->params.chip)) { |
3757 | t4_set_reg_field(adap, SGE_INT_ENABLE3, | 3715 | t4_set_reg_field(adap, SGE_INT_ENABLE3, |
3758 | DBFIFO_HP_INT | DBFIFO_LP_INT, 0); | 3716 | DBFIFO_HP_INT | DBFIFO_LP_INT, 0); |
3759 | queue_work(workq, &adap->db_full_task); | 3717 | queue_work(workq, &adap->db_full_task); |
@@ -3762,7 +3720,7 @@ void t4_db_full(struct adapter *adap) | |||
3762 | 3720 | ||
3763 | void t4_db_dropped(struct adapter *adap) | 3721 | void t4_db_dropped(struct adapter *adap) |
3764 | { | 3722 | { |
3765 | if (is_t4(adap->chip)) | 3723 | if (is_t4(adap->params.chip)) |
3766 | queue_work(workq, &adap->db_drop_task); | 3724 | queue_work(workq, &adap->db_drop_task); |
3767 | } | 3725 | } |
3768 | 3726 | ||
@@ -3789,7 +3747,7 @@ static void uld_attach(struct adapter *adap, unsigned int uld) | |||
3789 | lli.nchan = adap->params.nports; | 3747 | lli.nchan = adap->params.nports; |
3790 | lli.nports = adap->params.nports; | 3748 | lli.nports = adap->params.nports; |
3791 | lli.wr_cred = adap->params.ofldq_wr_cred; | 3749 | lli.wr_cred = adap->params.ofldq_wr_cred; |
3792 | lli.adapter_type = adap->params.rev; | 3750 | lli.adapter_type = adap->params.chip; |
3793 | lli.iscsi_iolen = MAXRXDATA_GET(t4_read_reg(adap, TP_PARA_REG2)); | 3751 | lli.iscsi_iolen = MAXRXDATA_GET(t4_read_reg(adap, TP_PARA_REG2)); |
3794 | lli.udb_density = 1 << QUEUESPERPAGEPF0_GET( | 3752 | lli.udb_density = 1 << QUEUESPERPAGEPF0_GET( |
3795 | t4_read_reg(adap, SGE_EGRESS_QUEUES_PER_PAGE_PF) >> | 3753 | t4_read_reg(adap, SGE_EGRESS_QUEUES_PER_PAGE_PF) >> |
@@ -3797,7 +3755,7 @@ static void uld_attach(struct adapter *adap, unsigned int uld) | |||
3797 | lli.ucq_density = 1 << QUEUESPERPAGEPF0_GET( | 3755 | lli.ucq_density = 1 << QUEUESPERPAGEPF0_GET( |
3798 | t4_read_reg(adap, SGE_INGRESS_QUEUES_PER_PAGE_PF) >> | 3756 | t4_read_reg(adap, SGE_INGRESS_QUEUES_PER_PAGE_PF) >> |
3799 | (adap->fn * 4)); | 3757 | (adap->fn * 4)); |
3800 | lli.filt_mode = adap->filter_mode; | 3758 | lli.filt_mode = adap->params.tp.vlan_pri_map; |
3801 | /* MODQ_REQ_MAP sets queues 0-3 to chan 0-3 */ | 3759 | /* MODQ_REQ_MAP sets queues 0-3 to chan 0-3 */ |
3802 | for (i = 0; i < NCHAN; i++) | 3760 | for (i = 0; i < NCHAN; i++) |
3803 | lli.tx_modq[i] = i; | 3761 | lli.tx_modq[i] = i; |
@@ -4245,7 +4203,7 @@ int cxgb4_create_server_filter(const struct net_device *dev, unsigned int stid, | |||
4245 | adap = netdev2adap(dev); | 4203 | adap = netdev2adap(dev); |
4246 | 4204 | ||
4247 | /* Adjust stid to correct filter index */ | 4205 | /* Adjust stid to correct filter index */ |
4248 | stid -= adap->tids.nstids; | 4206 | stid -= adap->tids.sftid_base; |
4249 | stid += adap->tids.nftids; | 4207 | stid += adap->tids.nftids; |
4250 | 4208 | ||
4251 | /* Check to make sure the filter requested is writable ... | 4209 | /* Check to make sure the filter requested is writable ... |
@@ -4271,12 +4229,17 @@ int cxgb4_create_server_filter(const struct net_device *dev, unsigned int stid, | |||
4271 | f->fs.val.lip[i] = val[i]; | 4229 | f->fs.val.lip[i] = val[i]; |
4272 | f->fs.mask.lip[i] = ~0; | 4230 | f->fs.mask.lip[i] = ~0; |
4273 | } | 4231 | } |
4274 | if (adap->filter_mode & F_PORT) { | 4232 | if (adap->params.tp.vlan_pri_map & F_PORT) { |
4275 | f->fs.val.iport = port; | 4233 | f->fs.val.iport = port; |
4276 | f->fs.mask.iport = mask; | 4234 | f->fs.mask.iport = mask; |
4277 | } | 4235 | } |
4278 | } | 4236 | } |
4279 | 4237 | ||
4238 | if (adap->params.tp.vlan_pri_map & F_PROTOCOL) { | ||
4239 | f->fs.val.proto = IPPROTO_TCP; | ||
4240 | f->fs.mask.proto = ~0; | ||
4241 | } | ||
4242 | |||
4280 | f->fs.dirsteer = 1; | 4243 | f->fs.dirsteer = 1; |
4281 | f->fs.iq = queue; | 4244 | f->fs.iq = queue; |
4282 | /* Mark filter as locked */ | 4245 | /* Mark filter as locked */ |
@@ -4303,7 +4266,7 @@ int cxgb4_remove_server_filter(const struct net_device *dev, unsigned int stid, | |||
4303 | adap = netdev2adap(dev); | 4266 | adap = netdev2adap(dev); |
4304 | 4267 | ||
4305 | /* Adjust stid to correct filter index */ | 4268 | /* Adjust stid to correct filter index */ |
4306 | stid -= adap->tids.nstids; | 4269 | stid -= adap->tids.sftid_base; |
4307 | stid += adap->tids.nftids; | 4270 | stid += adap->tids.nftids; |
4308 | 4271 | ||
4309 | f = &adap->tids.ftid_tab[stid]; | 4272 | f = &adap->tids.ftid_tab[stid]; |
@@ -4483,7 +4446,7 @@ static void setup_memwin(struct adapter *adap) | |||
4483 | u32 bar0, mem_win0_base, mem_win1_base, mem_win2_base; | 4446 | u32 bar0, mem_win0_base, mem_win1_base, mem_win2_base; |
4484 | 4447 | ||
4485 | bar0 = pci_resource_start(adap->pdev, 0); /* truncation intentional */ | 4448 | bar0 = pci_resource_start(adap->pdev, 0); /* truncation intentional */ |
4486 | if (is_t4(adap->chip)) { | 4449 | if (is_t4(adap->params.chip)) { |
4487 | mem_win0_base = bar0 + MEMWIN0_BASE; | 4450 | mem_win0_base = bar0 + MEMWIN0_BASE; |
4488 | mem_win1_base = bar0 + MEMWIN1_BASE; | 4451 | mem_win1_base = bar0 + MEMWIN1_BASE; |
4489 | mem_win2_base = bar0 + MEMWIN2_BASE; | 4452 | mem_win2_base = bar0 + MEMWIN2_BASE; |
@@ -4668,8 +4631,10 @@ static int adap_init0_config(struct adapter *adapter, int reset) | |||
4668 | const struct firmware *cf; | 4631 | const struct firmware *cf; |
4669 | unsigned long mtype = 0, maddr = 0; | 4632 | unsigned long mtype = 0, maddr = 0; |
4670 | u32 finiver, finicsum, cfcsum; | 4633 | u32 finiver, finicsum, cfcsum; |
4671 | int ret, using_flash; | 4634 | int ret; |
4635 | int config_issued = 0; | ||
4672 | char *fw_config_file, fw_config_file_path[256]; | 4636 | char *fw_config_file, fw_config_file_path[256]; |
4637 | char *config_name = NULL; | ||
4673 | 4638 | ||
4674 | /* | 4639 | /* |
4675 | * Reset device if necessary. | 4640 | * Reset device if necessary. |
@@ -4686,9 +4651,9 @@ static int adap_init0_config(struct adapter *adapter, int reset) | |||
4686 | * then use that. Otherwise, use the configuration file stored | 4651 | * then use that. Otherwise, use the configuration file stored |
4687 | * in the adapter flash ... | 4652 | * in the adapter flash ... |
4688 | */ | 4653 | */ |
4689 | switch (CHELSIO_CHIP_VERSION(adapter->chip)) { | 4654 | switch (CHELSIO_CHIP_VERSION(adapter->params.chip)) { |
4690 | case CHELSIO_T4: | 4655 | case CHELSIO_T4: |
4691 | fw_config_file = FW_CFNAME; | 4656 | fw_config_file = FW4_CFNAME; |
4692 | break; | 4657 | break; |
4693 | case CHELSIO_T5: | 4658 | case CHELSIO_T5: |
4694 | fw_config_file = FW5_CFNAME; | 4659 | fw_config_file = FW5_CFNAME; |
@@ -4702,13 +4667,16 @@ static int adap_init0_config(struct adapter *adapter, int reset) | |||
4702 | 4667 | ||
4703 | ret = request_firmware(&cf, fw_config_file, adapter->pdev_dev); | 4668 | ret = request_firmware(&cf, fw_config_file, adapter->pdev_dev); |
4704 | if (ret < 0) { | 4669 | if (ret < 0) { |
4705 | using_flash = 1; | 4670 | config_name = "On FLASH"; |
4706 | mtype = FW_MEMTYPE_CF_FLASH; | 4671 | mtype = FW_MEMTYPE_CF_FLASH; |
4707 | maddr = t4_flash_cfg_addr(adapter); | 4672 | maddr = t4_flash_cfg_addr(adapter); |
4708 | } else { | 4673 | } else { |
4709 | u32 params[7], val[7]; | 4674 | u32 params[7], val[7]; |
4710 | 4675 | ||
4711 | using_flash = 0; | 4676 | sprintf(fw_config_file_path, |
4677 | "/lib/firmware/%s", fw_config_file); | ||
4678 | config_name = fw_config_file_path; | ||
4679 | |||
4712 | if (cf->size >= FLASH_CFG_MAX_SIZE) | 4680 | if (cf->size >= FLASH_CFG_MAX_SIZE) |
4713 | ret = -ENOMEM; | 4681 | ret = -ENOMEM; |
4714 | else { | 4682 | else { |
@@ -4776,6 +4744,26 @@ static int adap_init0_config(struct adapter *adapter, int reset) | |||
4776 | FW_LEN16(caps_cmd)); | 4744 | FW_LEN16(caps_cmd)); |
4777 | ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd, sizeof(caps_cmd), | 4745 | ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd, sizeof(caps_cmd), |
4778 | &caps_cmd); | 4746 | &caps_cmd); |
4747 | |||
4748 | /* If the CAPS_CONFIG failed with an ENOENT (for a Firmware | ||
4749 | * Configuration File in FLASH), our last gasp effort is to use the | ||
4750 | * Firmware Configuration File which is embedded in the firmware. A | ||
4751 | * very few early versions of the firmware didn't have one embedded | ||
4752 | * but we can ignore those. | ||
4753 | */ | ||
4754 | if (ret == -ENOENT) { | ||
4755 | memset(&caps_cmd, 0, sizeof(caps_cmd)); | ||
4756 | caps_cmd.op_to_write = | ||
4757 | htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) | | ||
4758 | FW_CMD_REQUEST | | ||
4759 | FW_CMD_READ); | ||
4760 | caps_cmd.cfvalid_to_len16 = htonl(FW_LEN16(caps_cmd)); | ||
4761 | ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd, | ||
4762 | sizeof(caps_cmd), &caps_cmd); | ||
4763 | config_name = "Firmware Default"; | ||
4764 | } | ||
4765 | |||
4766 | config_issued = 1; | ||
4779 | if (ret < 0) | 4767 | if (ret < 0) |
4780 | goto bye; | 4768 | goto bye; |
4781 | 4769 | ||
@@ -4816,7 +4804,6 @@ static int adap_init0_config(struct adapter *adapter, int reset) | |||
4816 | if (ret < 0) | 4804 | if (ret < 0) |
4817 | goto bye; | 4805 | goto bye; |
4818 | 4806 | ||
4819 | sprintf(fw_config_file_path, "/lib/firmware/%s", fw_config_file); | ||
4820 | /* | 4807 | /* |
4821 | * Return successfully and note that we're operating with parameters | 4808 | * Return successfully and note that we're operating with parameters |
4822 | * not supplied by the driver, rather than from hard-wired | 4809 | * not supplied by the driver, rather than from hard-wired |
@@ -4824,11 +4811,8 @@ static int adap_init0_config(struct adapter *adapter, int reset) | |||
4824 | */ | 4811 | */ |
4825 | adapter->flags |= USING_SOFT_PARAMS; | 4812 | adapter->flags |= USING_SOFT_PARAMS; |
4826 | dev_info(adapter->pdev_dev, "Successfully configured using Firmware "\ | 4813 | dev_info(adapter->pdev_dev, "Successfully configured using Firmware "\ |
4827 | "Configuration File %s, version %#x, computed checksum %#x\n", | 4814 | "Configuration File \"%s\", version %#x, computed checksum %#x\n", |
4828 | (using_flash | 4815 | config_name, finiver, cfcsum); |
4829 | ? "in device FLASH" | ||
4830 | : fw_config_file_path), | ||
4831 | finiver, cfcsum); | ||
4832 | return 0; | 4816 | return 0; |
4833 | 4817 | ||
4834 | /* | 4818 | /* |
@@ -4837,9 +4821,9 @@ static int adap_init0_config(struct adapter *adapter, int reset) | |||
4837 | * want to issue a warning since this is fairly common.) | 4821 | * want to issue a warning since this is fairly common.) |
4838 | */ | 4822 | */ |
4839 | bye: | 4823 | bye: |
4840 | if (ret != -ENOENT) | 4824 | if (config_issued && ret != -ENOENT) |
4841 | dev_warn(adapter->pdev_dev, "Configuration file error %d\n", | 4825 | dev_warn(adapter->pdev_dev, "\"%s\" configuration file error %d\n", |
4842 | -ret); | 4826 | config_name, -ret); |
4843 | return ret; | 4827 | return ret; |
4844 | } | 4828 | } |
4845 | 4829 | ||
@@ -5086,6 +5070,47 @@ bye: | |||
5086 | return ret; | 5070 | return ret; |
5087 | } | 5071 | } |
5088 | 5072 | ||
5073 | static struct fw_info fw_info_array[] = { | ||
5074 | { | ||
5075 | .chip = CHELSIO_T4, | ||
5076 | .fs_name = FW4_CFNAME, | ||
5077 | .fw_mod_name = FW4_FNAME, | ||
5078 | .fw_hdr = { | ||
5079 | .chip = FW_HDR_CHIP_T4, | ||
5080 | .fw_ver = __cpu_to_be32(FW_VERSION(T4)), | ||
5081 | .intfver_nic = FW_INTFVER(T4, NIC), | ||
5082 | .intfver_vnic = FW_INTFVER(T4, VNIC), | ||
5083 | .intfver_ri = FW_INTFVER(T4, RI), | ||
5084 | .intfver_iscsi = FW_INTFVER(T4, ISCSI), | ||
5085 | .intfver_fcoe = FW_INTFVER(T4, FCOE), | ||
5086 | }, | ||
5087 | }, { | ||
5088 | .chip = CHELSIO_T5, | ||
5089 | .fs_name = FW5_CFNAME, | ||
5090 | .fw_mod_name = FW5_FNAME, | ||
5091 | .fw_hdr = { | ||
5092 | .chip = FW_HDR_CHIP_T5, | ||
5093 | .fw_ver = __cpu_to_be32(FW_VERSION(T5)), | ||
5094 | .intfver_nic = FW_INTFVER(T5, NIC), | ||
5095 | .intfver_vnic = FW_INTFVER(T5, VNIC), | ||
5096 | .intfver_ri = FW_INTFVER(T5, RI), | ||
5097 | .intfver_iscsi = FW_INTFVER(T5, ISCSI), | ||
5098 | .intfver_fcoe = FW_INTFVER(T5, FCOE), | ||
5099 | }, | ||
5100 | } | ||
5101 | }; | ||
5102 | |||
5103 | static struct fw_info *find_fw_info(int chip) | ||
5104 | { | ||
5105 | int i; | ||
5106 | |||
5107 | for (i = 0; i < ARRAY_SIZE(fw_info_array); i++) { | ||
5108 | if (fw_info_array[i].chip == chip) | ||
5109 | return &fw_info_array[i]; | ||
5110 | } | ||
5111 | return NULL; | ||
5112 | } | ||
5113 | |||
5089 | /* | 5114 | /* |
5090 | * Phase 0 of initialization: contact FW, obtain config, perform basic init. | 5115 | * Phase 0 of initialization: contact FW, obtain config, perform basic init. |
5091 | */ | 5116 | */ |
@@ -5096,7 +5121,7 @@ static int adap_init0(struct adapter *adap) | |||
5096 | enum dev_state state; | 5121 | enum dev_state state; |
5097 | u32 params[7], val[7]; | 5122 | u32 params[7], val[7]; |
5098 | struct fw_caps_config_cmd caps_cmd; | 5123 | struct fw_caps_config_cmd caps_cmd; |
5099 | int reset = 1, j; | 5124 | int reset = 1; |
5100 | 5125 | ||
5101 | /* | 5126 | /* |
5102 | * Contact FW, advertising Master capability (and potentially forcing | 5127 | * Contact FW, advertising Master capability (and potentially forcing |
@@ -5123,44 +5148,54 @@ static int adap_init0(struct adapter *adap) | |||
5123 | * later reporting and B. to warn if the currently loaded firmware | 5148 | * later reporting and B. to warn if the currently loaded firmware |
5124 | * is excessively mismatched relative to the driver.) | 5149 | * is excessively mismatched relative to the driver.) |
5125 | */ | 5150 | */ |
5126 | ret = t4_check_fw_version(adap); | 5151 | t4_get_fw_version(adap, &adap->params.fw_vers); |
5127 | 5152 | t4_get_tp_version(adap, &adap->params.tp_vers); | |
5128 | /* The error code -EFAULT is returned by t4_check_fw_version() if | ||
5129 | * firmware on adapter < supported firmware. If firmware on adapter | ||
5130 | * is too old (not supported by driver) and we're the MASTER_PF set | ||
5131 | * adapter state to DEV_STATE_UNINIT to force firmware upgrade | ||
5132 | * and reinitialization. | ||
5133 | */ | ||
5134 | if ((adap->flags & MASTER_PF) && ret == -EFAULT) | ||
5135 | state = DEV_STATE_UNINIT; | ||
5136 | if ((adap->flags & MASTER_PF) && state != DEV_STATE_INIT) { | 5153 | if ((adap->flags & MASTER_PF) && state != DEV_STATE_INIT) { |
5137 | if (ret == -EINVAL || ret == -EFAULT || ret > 0) { | 5154 | struct fw_info *fw_info; |
5138 | if (upgrade_fw(adap) >= 0) { | 5155 | struct fw_hdr *card_fw; |
5139 | /* | 5156 | const struct firmware *fw; |
5140 | * Note that the chip was reset as part of the | 5157 | const u8 *fw_data = NULL; |
5141 | * firmware upgrade so we don't reset it again | 5158 | unsigned int fw_size = 0; |
5142 | * below and grab the new firmware version. | 5159 | |
5143 | */ | 5160 | /* This is the firmware whose headers the driver was compiled |
5144 | reset = 0; | 5161 | * against |
5145 | ret = t4_check_fw_version(adap); | 5162 | */ |
5146 | } else | 5163 | fw_info = find_fw_info(CHELSIO_CHIP_VERSION(adap->params.chip)); |
5147 | if (ret == -EFAULT) { | 5164 | if (fw_info == NULL) { |
5148 | /* | 5165 | dev_err(adap->pdev_dev, |
5149 | * Firmware is old but still might | 5166 | "unable to get firmware info for chip %d.\n", |
5150 | * work if we force reinitialization | 5167 | CHELSIO_CHIP_VERSION(adap->params.chip)); |
5151 | * of the adapter. Ignoring FW upgrade | 5168 | return -EINVAL; |
5152 | * failure. | ||
5153 | */ | ||
5154 | dev_warn(adap->pdev_dev, | ||
5155 | "Ignoring firmware upgrade " | ||
5156 | "failure, and forcing driver " | ||
5157 | "to reinitialize the " | ||
5158 | "adapter.\n"); | ||
5159 | ret = 0; | ||
5160 | } | ||
5161 | } | 5169 | } |
5170 | |||
5171 | /* allocate memory to read the header of the firmware on the | ||
5172 | * card | ||
5173 | */ | ||
5174 | card_fw = t4_alloc_mem(sizeof(*card_fw)); | ||
5175 | |||
5176 | /* Get FW from from /lib/firmware/ */ | ||
5177 | ret = request_firmware(&fw, fw_info->fw_mod_name, | ||
5178 | adap->pdev_dev); | ||
5179 | if (ret < 0) { | ||
5180 | dev_err(adap->pdev_dev, | ||
5181 | "unable to load firmware image %s, error %d\n", | ||
5182 | fw_info->fw_mod_name, ret); | ||
5183 | } else { | ||
5184 | fw_data = fw->data; | ||
5185 | fw_size = fw->size; | ||
5186 | } | ||
5187 | |||
5188 | /* upgrade FW logic */ | ||
5189 | ret = t4_prep_fw(adap, fw_info, fw_data, fw_size, card_fw, | ||
5190 | state, &reset); | ||
5191 | |||
5192 | /* Cleaning up */ | ||
5193 | if (fw != NULL) | ||
5194 | release_firmware(fw); | ||
5195 | t4_free_mem(card_fw); | ||
5196 | |||
5162 | if (ret < 0) | 5197 | if (ret < 0) |
5163 | return ret; | 5198 | goto bye; |
5164 | } | 5199 | } |
5165 | 5200 | ||
5166 | /* | 5201 | /* |
@@ -5245,7 +5280,7 @@ static int adap_init0(struct adapter *adap) | |||
5245 | if (ret == -ENOENT) { | 5280 | if (ret == -ENOENT) { |
5246 | dev_info(adap->pdev_dev, | 5281 | dev_info(adap->pdev_dev, |
5247 | "No Configuration File present " | 5282 | "No Configuration File present " |
5248 | "on adapter. Using hard-wired " | 5283 | "on adapter. Using hard-wired " |
5249 | "configuration parameters.\n"); | 5284 | "configuration parameters.\n"); |
5250 | ret = adap_init0_no_config(adap, reset); | 5285 | ret = adap_init0_no_config(adap, reset); |
5251 | } | 5286 | } |
@@ -5428,21 +5463,11 @@ static int adap_init0(struct adapter *adap) | |||
5428 | /* | 5463 | /* |
5429 | * These are finalized by FW initialization, load their values now. | 5464 | * These are finalized by FW initialization, load their values now. |
5430 | */ | 5465 | */ |
5431 | v = t4_read_reg(adap, TP_TIMER_RESOLUTION); | ||
5432 | adap->params.tp.tre = TIMERRESOLUTION_GET(v); | ||
5433 | adap->params.tp.dack_re = DELAYEDACKRESOLUTION_GET(v); | ||
5434 | t4_read_mtu_tbl(adap, adap->params.mtus, NULL); | 5466 | t4_read_mtu_tbl(adap, adap->params.mtus, NULL); |
5435 | t4_load_mtus(adap, adap->params.mtus, adap->params.a_wnd, | 5467 | t4_load_mtus(adap, adap->params.mtus, adap->params.a_wnd, |
5436 | adap->params.b_wnd); | 5468 | adap->params.b_wnd); |
5437 | 5469 | ||
5438 | /* MODQ_REQ_MAP defaults to setting queues 0-3 to chan 0-3 */ | 5470 | t4_init_tp_params(adap); |
5439 | for (j = 0; j < NCHAN; j++) | ||
5440 | adap->params.tp.tx_modq[j] = j; | ||
5441 | |||
5442 | t4_read_indirect(adap, TP_PIO_ADDR, TP_PIO_DATA, | ||
5443 | &adap->filter_mode, 1, | ||
5444 | TP_VLAN_PRI_MAP); | ||
5445 | |||
5446 | adap->flags |= FW_OK; | 5471 | adap->flags |= FW_OK; |
5447 | return 0; | 5472 | return 0; |
5448 | 5473 | ||
@@ -5787,7 +5812,7 @@ static void print_port_info(const struct net_device *dev) | |||
5787 | 5812 | ||
5788 | netdev_info(dev, "Chelsio %s rev %d %s %sNIC PCIe x%d%s%s\n", | 5813 | netdev_info(dev, "Chelsio %s rev %d %s %sNIC PCIe x%d%s%s\n", |
5789 | adap->params.vpd.id, | 5814 | adap->params.vpd.id, |
5790 | CHELSIO_CHIP_RELEASE(adap->params.rev), buf, | 5815 | CHELSIO_CHIP_RELEASE(adap->params.chip), buf, |
5791 | is_offload(adap) ? "R" : "", adap->params.pci.width, spd, | 5816 | is_offload(adap) ? "R" : "", adap->params.pci.width, spd, |
5792 | (adap->flags & USING_MSIX) ? " MSI-X" : | 5817 | (adap->flags & USING_MSIX) ? " MSI-X" : |
5793 | (adap->flags & USING_MSI) ? " MSI" : ""); | 5818 | (adap->flags & USING_MSI) ? " MSI" : ""); |
@@ -5910,7 +5935,7 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
5910 | if (err) | 5935 | if (err) |
5911 | goto out_unmap_bar0; | 5936 | goto out_unmap_bar0; |
5912 | 5937 | ||
5913 | if (!is_t4(adapter->chip)) { | 5938 | if (!is_t4(adapter->params.chip)) { |
5914 | s_qpp = QUEUESPERPAGEPF1 * adapter->fn; | 5939 | s_qpp = QUEUESPERPAGEPF1 * adapter->fn; |
5915 | qpp = 1 << QUEUESPERPAGEPF0_GET(t4_read_reg(adapter, | 5940 | qpp = 1 << QUEUESPERPAGEPF0_GET(t4_read_reg(adapter, |
5916 | SGE_EGRESS_QUEUES_PER_PAGE_PF) >> s_qpp); | 5941 | SGE_EGRESS_QUEUES_PER_PAGE_PF) >> s_qpp); |
@@ -6064,7 +6089,7 @@ sriov: | |||
6064 | out_free_dev: | 6089 | out_free_dev: |
6065 | free_some_resources(adapter); | 6090 | free_some_resources(adapter); |
6066 | out_unmap_bar: | 6091 | out_unmap_bar: |
6067 | if (!is_t4(adapter->chip)) | 6092 | if (!is_t4(adapter->params.chip)) |
6068 | iounmap(adapter->bar2); | 6093 | iounmap(adapter->bar2); |
6069 | out_unmap_bar0: | 6094 | out_unmap_bar0: |
6070 | iounmap(adapter->regs); | 6095 | iounmap(adapter->regs); |
@@ -6116,7 +6141,7 @@ static void remove_one(struct pci_dev *pdev) | |||
6116 | 6141 | ||
6117 | free_some_resources(adapter); | 6142 | free_some_resources(adapter); |
6118 | iounmap(adapter->regs); | 6143 | iounmap(adapter->regs); |
6119 | if (!is_t4(adapter->chip)) | 6144 | if (!is_t4(adapter->params.chip)) |
6120 | iounmap(adapter->bar2); | 6145 | iounmap(adapter->bar2); |
6121 | kfree(adapter); | 6146 | kfree(adapter); |
6122 | pci_disable_pcie_error_reporting(pdev); | 6147 | pci_disable_pcie_error_reporting(pdev); |
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h index 6f21f2451c30..4dd0a82533e4 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h | |||
@@ -131,7 +131,14 @@ static inline void *lookup_atid(const struct tid_info *t, unsigned int atid) | |||
131 | 131 | ||
132 | static inline void *lookup_stid(const struct tid_info *t, unsigned int stid) | 132 | static inline void *lookup_stid(const struct tid_info *t, unsigned int stid) |
133 | { | 133 | { |
134 | stid -= t->stid_base; | 134 | /* Is it a server filter TID? */ |
135 | if (t->nsftids && (stid >= t->sftid_base)) { | ||
136 | stid -= t->sftid_base; | ||
137 | stid += t->nstids; | ||
138 | } else { | ||
139 | stid -= t->stid_base; | ||
140 | } | ||
141 | |||
135 | return stid < (t->nstids + t->nsftids) ? t->stid_tab[stid].data : NULL; | 142 | return stid < (t->nstids + t->nsftids) ? t->stid_tab[stid].data : NULL; |
136 | } | 143 | } |
137 | 144 | ||
diff --git a/drivers/net/ethernet/chelsio/cxgb4/l2t.c b/drivers/net/ethernet/chelsio/cxgb4/l2t.c index 29878098101e..cb05be905def 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/l2t.c +++ b/drivers/net/ethernet/chelsio/cxgb4/l2t.c | |||
@@ -45,6 +45,7 @@ | |||
45 | #include "l2t.h" | 45 | #include "l2t.h" |
46 | #include "t4_msg.h" | 46 | #include "t4_msg.h" |
47 | #include "t4fw_api.h" | 47 | #include "t4fw_api.h" |
48 | #include "t4_regs.h" | ||
48 | 49 | ||
49 | #define VLAN_NONE 0xfff | 50 | #define VLAN_NONE 0xfff |
50 | 51 | ||
@@ -411,6 +412,40 @@ done: | |||
411 | } | 412 | } |
412 | EXPORT_SYMBOL(cxgb4_l2t_get); | 413 | EXPORT_SYMBOL(cxgb4_l2t_get); |
413 | 414 | ||
415 | u64 cxgb4_select_ntuple(struct net_device *dev, | ||
416 | const struct l2t_entry *l2t) | ||
417 | { | ||
418 | struct adapter *adap = netdev2adap(dev); | ||
419 | struct tp_params *tp = &adap->params.tp; | ||
420 | u64 ntuple = 0; | ||
421 | |||
422 | /* Initialize each of the fields which we care about which are present | ||
423 | * in the Compressed Filter Tuple. | ||
424 | */ | ||
425 | if (tp->vlan_shift >= 0 && l2t->vlan != VLAN_NONE) | ||
426 | ntuple |= (F_FT_VLAN_VLD | l2t->vlan) << tp->vlan_shift; | ||
427 | |||
428 | if (tp->port_shift >= 0) | ||
429 | ntuple |= (u64)l2t->lport << tp->port_shift; | ||
430 | |||
431 | if (tp->protocol_shift >= 0) | ||
432 | ntuple |= (u64)IPPROTO_TCP << tp->protocol_shift; | ||
433 | |||
434 | if (tp->vnic_shift >= 0) { | ||
435 | u32 viid = cxgb4_port_viid(dev); | ||
436 | u32 vf = FW_VIID_VIN_GET(viid); | ||
437 | u32 pf = FW_VIID_PFN_GET(viid); | ||
438 | u32 vld = FW_VIID_VIVLD_GET(viid); | ||
439 | |||
440 | ntuple |= (u64)(V_FT_VNID_ID_VF(vf) | | ||
441 | V_FT_VNID_ID_PF(pf) | | ||
442 | V_FT_VNID_ID_VLD(vld)) << tp->vnic_shift; | ||
443 | } | ||
444 | |||
445 | return ntuple; | ||
446 | } | ||
447 | EXPORT_SYMBOL(cxgb4_select_ntuple); | ||
448 | |||
414 | /* | 449 | /* |
415 | * Called when address resolution fails for an L2T entry to handle packets | 450 | * Called when address resolution fails for an L2T entry to handle packets |
416 | * on the arpq head. If a packet specifies a failure handler it is invoked, | 451 | * on the arpq head. If a packet specifies a failure handler it is invoked, |
diff --git a/drivers/net/ethernet/chelsio/cxgb4/l2t.h b/drivers/net/ethernet/chelsio/cxgb4/l2t.h index 108c0f1fce1c..85eb5c71358d 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/l2t.h +++ b/drivers/net/ethernet/chelsio/cxgb4/l2t.h | |||
@@ -98,7 +98,8 @@ int cxgb4_l2t_send(struct net_device *dev, struct sk_buff *skb, | |||
98 | struct l2t_entry *cxgb4_l2t_get(struct l2t_data *d, struct neighbour *neigh, | 98 | struct l2t_entry *cxgb4_l2t_get(struct l2t_data *d, struct neighbour *neigh, |
99 | const struct net_device *physdev, | 99 | const struct net_device *physdev, |
100 | unsigned int priority); | 100 | unsigned int priority); |
101 | 101 | u64 cxgb4_select_ntuple(struct net_device *dev, | |
102 | const struct l2t_entry *l2t); | ||
102 | void t4_l2t_update(struct adapter *adap, struct neighbour *neigh); | 103 | void t4_l2t_update(struct adapter *adap, struct neighbour *neigh); |
103 | struct l2t_entry *t4_l2t_alloc_switching(struct l2t_data *d); | 104 | struct l2t_entry *t4_l2t_alloc_switching(struct l2t_data *d); |
104 | int t4_l2t_set_switching(struct adapter *adap, struct l2t_entry *e, u16 vlan, | 105 | int t4_l2t_set_switching(struct adapter *adap, struct l2t_entry *e, u16 vlan, |
diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c index ac311f5f3eb9..cc3511a5cd0c 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/sge.c +++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c | |||
@@ -509,7 +509,7 @@ static inline void ring_fl_db(struct adapter *adap, struct sge_fl *q) | |||
509 | u32 val; | 509 | u32 val; |
510 | if (q->pend_cred >= 8) { | 510 | if (q->pend_cred >= 8) { |
511 | val = PIDX(q->pend_cred / 8); | 511 | val = PIDX(q->pend_cred / 8); |
512 | if (!is_t4(adap->chip)) | 512 | if (!is_t4(adap->params.chip)) |
513 | val |= DBTYPE(1); | 513 | val |= DBTYPE(1); |
514 | wmb(); | 514 | wmb(); |
515 | t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL), DBPRIO(1) | | 515 | t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL), DBPRIO(1) | |
@@ -847,7 +847,7 @@ static inline void ring_tx_db(struct adapter *adap, struct sge_txq *q, int n) | |||
847 | wmb(); /* write descriptors before telling HW */ | 847 | wmb(); /* write descriptors before telling HW */ |
848 | spin_lock(&q->db_lock); | 848 | spin_lock(&q->db_lock); |
849 | if (!q->db_disabled) { | 849 | if (!q->db_disabled) { |
850 | if (is_t4(adap->chip)) { | 850 | if (is_t4(adap->params.chip)) { |
851 | t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL), | 851 | t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL), |
852 | QID(q->cntxt_id) | PIDX(n)); | 852 | QID(q->cntxt_id) | PIDX(n)); |
853 | } else { | 853 | } else { |
@@ -1596,7 +1596,7 @@ static noinline int handle_trace_pkt(struct adapter *adap, | |||
1596 | return 0; | 1596 | return 0; |
1597 | } | 1597 | } |
1598 | 1598 | ||
1599 | if (is_t4(adap->chip)) | 1599 | if (is_t4(adap->params.chip)) |
1600 | __skb_pull(skb, sizeof(struct cpl_trace_pkt)); | 1600 | __skb_pull(skb, sizeof(struct cpl_trace_pkt)); |
1601 | else | 1601 | else |
1602 | __skb_pull(skb, sizeof(struct cpl_t5_trace_pkt)); | 1602 | __skb_pull(skb, sizeof(struct cpl_t5_trace_pkt)); |
@@ -1661,7 +1661,7 @@ int t4_ethrx_handler(struct sge_rspq *q, const __be64 *rsp, | |||
1661 | const struct cpl_rx_pkt *pkt; | 1661 | const struct cpl_rx_pkt *pkt; |
1662 | struct sge_eth_rxq *rxq = container_of(q, struct sge_eth_rxq, rspq); | 1662 | struct sge_eth_rxq *rxq = container_of(q, struct sge_eth_rxq, rspq); |
1663 | struct sge *s = &q->adap->sge; | 1663 | struct sge *s = &q->adap->sge; |
1664 | int cpl_trace_pkt = is_t4(q->adap->chip) ? | 1664 | int cpl_trace_pkt = is_t4(q->adap->params.chip) ? |
1665 | CPL_TRACE_PKT : CPL_TRACE_PKT_T5; | 1665 | CPL_TRACE_PKT : CPL_TRACE_PKT_T5; |
1666 | 1666 | ||
1667 | if (unlikely(*(u8 *)rsp == cpl_trace_pkt)) | 1667 | if (unlikely(*(u8 *)rsp == cpl_trace_pkt)) |
@@ -2182,7 +2182,7 @@ err: | |||
2182 | static void init_txq(struct adapter *adap, struct sge_txq *q, unsigned int id) | 2182 | static void init_txq(struct adapter *adap, struct sge_txq *q, unsigned int id) |
2183 | { | 2183 | { |
2184 | q->cntxt_id = id; | 2184 | q->cntxt_id = id; |
2185 | if (!is_t4(adap->chip)) { | 2185 | if (!is_t4(adap->params.chip)) { |
2186 | unsigned int s_qpp; | 2186 | unsigned int s_qpp; |
2187 | unsigned short udb_density; | 2187 | unsigned short udb_density; |
2188 | unsigned long qpshift; | 2188 | unsigned long qpshift; |
@@ -2581,7 +2581,7 @@ static int t4_sge_init_soft(struct adapter *adap) | |||
2581 | #undef READ_FL_BUF | 2581 | #undef READ_FL_BUF |
2582 | 2582 | ||
2583 | if (fl_small_pg != PAGE_SIZE || | 2583 | if (fl_small_pg != PAGE_SIZE || |
2584 | (fl_large_pg != 0 && (fl_large_pg <= fl_small_pg || | 2584 | (fl_large_pg != 0 && (fl_large_pg < fl_small_pg || |
2585 | (fl_large_pg & (fl_large_pg-1)) != 0))) { | 2585 | (fl_large_pg & (fl_large_pg-1)) != 0))) { |
2586 | dev_err(adap->pdev_dev, "bad SGE FL page buffer sizes [%d, %d]\n", | 2586 | dev_err(adap->pdev_dev, "bad SGE FL page buffer sizes [%d, %d]\n", |
2587 | fl_small_pg, fl_large_pg); | 2587 | fl_small_pg, fl_large_pg); |
@@ -2641,7 +2641,7 @@ static int t4_sge_init_hard(struct adapter *adap) | |||
2641 | * Set up to drop DOORBELL writes when the DOORBELL FIFO overflows | 2641 | * Set up to drop DOORBELL writes when the DOORBELL FIFO overflows |
2642 | * and generate an interrupt when this occurs so we can recover. | 2642 | * and generate an interrupt when this occurs so we can recover. |
2643 | */ | 2643 | */ |
2644 | if (is_t4(adap->chip)) { | 2644 | if (is_t4(adap->params.chip)) { |
2645 | t4_set_reg_field(adap, A_SGE_DBFIFO_STATUS, | 2645 | t4_set_reg_field(adap, A_SGE_DBFIFO_STATUS, |
2646 | V_HP_INT_THRESH(M_HP_INT_THRESH) | | 2646 | V_HP_INT_THRESH(M_HP_INT_THRESH) | |
2647 | V_LP_INT_THRESH(M_LP_INT_THRESH), | 2647 | V_LP_INT_THRESH(M_LP_INT_THRESH), |
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c index 4cbb2f9850be..e1413eacdbd2 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c | |||
@@ -296,7 +296,7 @@ int t4_mc_read(struct adapter *adap, int idx, u32 addr, __be32 *data, u64 *ecc) | |||
296 | u32 mc_bist_cmd, mc_bist_cmd_addr, mc_bist_cmd_len; | 296 | u32 mc_bist_cmd, mc_bist_cmd_addr, mc_bist_cmd_len; |
297 | u32 mc_bist_status_rdata, mc_bist_data_pattern; | 297 | u32 mc_bist_status_rdata, mc_bist_data_pattern; |
298 | 298 | ||
299 | if (is_t4(adap->chip)) { | 299 | if (is_t4(adap->params.chip)) { |
300 | mc_bist_cmd = MC_BIST_CMD; | 300 | mc_bist_cmd = MC_BIST_CMD; |
301 | mc_bist_cmd_addr = MC_BIST_CMD_ADDR; | 301 | mc_bist_cmd_addr = MC_BIST_CMD_ADDR; |
302 | mc_bist_cmd_len = MC_BIST_CMD_LEN; | 302 | mc_bist_cmd_len = MC_BIST_CMD_LEN; |
@@ -349,7 +349,7 @@ int t4_edc_read(struct adapter *adap, int idx, u32 addr, __be32 *data, u64 *ecc) | |||
349 | u32 edc_bist_cmd, edc_bist_cmd_addr, edc_bist_cmd_len; | 349 | u32 edc_bist_cmd, edc_bist_cmd_addr, edc_bist_cmd_len; |
350 | u32 edc_bist_cmd_data_pattern, edc_bist_status_rdata; | 350 | u32 edc_bist_cmd_data_pattern, edc_bist_status_rdata; |
351 | 351 | ||
352 | if (is_t4(adap->chip)) { | 352 | if (is_t4(adap->params.chip)) { |
353 | edc_bist_cmd = EDC_REG(EDC_BIST_CMD, idx); | 353 | edc_bist_cmd = EDC_REG(EDC_BIST_CMD, idx); |
354 | edc_bist_cmd_addr = EDC_REG(EDC_BIST_CMD_ADDR, idx); | 354 | edc_bist_cmd_addr = EDC_REG(EDC_BIST_CMD_ADDR, idx); |
355 | edc_bist_cmd_len = EDC_REG(EDC_BIST_CMD_LEN, idx); | 355 | edc_bist_cmd_len = EDC_REG(EDC_BIST_CMD_LEN, idx); |
@@ -402,7 +402,7 @@ int t4_edc_read(struct adapter *adap, int idx, u32 addr, __be32 *data, u64 *ecc) | |||
402 | static int t4_mem_win_rw(struct adapter *adap, u32 addr, __be32 *data, int dir) | 402 | static int t4_mem_win_rw(struct adapter *adap, u32 addr, __be32 *data, int dir) |
403 | { | 403 | { |
404 | int i; | 404 | int i; |
405 | u32 win_pf = is_t4(adap->chip) ? 0 : V_PFNUM(adap->fn); | 405 | u32 win_pf = is_t4(adap->params.chip) ? 0 : V_PFNUM(adap->fn); |
406 | 406 | ||
407 | /* | 407 | /* |
408 | * Setup offset into PCIE memory window. Address must be a | 408 | * Setup offset into PCIE memory window. Address must be a |
@@ -863,104 +863,169 @@ unlock: | |||
863 | } | 863 | } |
864 | 864 | ||
865 | /** | 865 | /** |
866 | * get_fw_version - read the firmware version | 866 | * t4_get_fw_version - read the firmware version |
867 | * @adapter: the adapter | 867 | * @adapter: the adapter |
868 | * @vers: where to place the version | 868 | * @vers: where to place the version |
869 | * | 869 | * |
870 | * Reads the FW version from flash. | 870 | * Reads the FW version from flash. |
871 | */ | 871 | */ |
872 | static int get_fw_version(struct adapter *adapter, u32 *vers) | 872 | int t4_get_fw_version(struct adapter *adapter, u32 *vers) |
873 | { | 873 | { |
874 | return t4_read_flash(adapter, adapter->params.sf_fw_start + | 874 | return t4_read_flash(adapter, FLASH_FW_START + |
875 | offsetof(struct fw_hdr, fw_ver), 1, vers, 0); | 875 | offsetof(struct fw_hdr, fw_ver), 1, |
876 | vers, 0); | ||
876 | } | 877 | } |
877 | 878 | ||
878 | /** | 879 | /** |
879 | * get_tp_version - read the TP microcode version | 880 | * t4_get_tp_version - read the TP microcode version |
880 | * @adapter: the adapter | 881 | * @adapter: the adapter |
881 | * @vers: where to place the version | 882 | * @vers: where to place the version |
882 | * | 883 | * |
883 | * Reads the TP microcode version from flash. | 884 | * Reads the TP microcode version from flash. |
884 | */ | 885 | */ |
885 | static int get_tp_version(struct adapter *adapter, u32 *vers) | 886 | int t4_get_tp_version(struct adapter *adapter, u32 *vers) |
886 | { | 887 | { |
887 | return t4_read_flash(adapter, adapter->params.sf_fw_start + | 888 | return t4_read_flash(adapter, FLASH_FW_START + |
888 | offsetof(struct fw_hdr, tp_microcode_ver), | 889 | offsetof(struct fw_hdr, tp_microcode_ver), |
889 | 1, vers, 0); | 890 | 1, vers, 0); |
890 | } | 891 | } |
891 | 892 | ||
892 | /** | 893 | /* Is the given firmware API compatible with the one the driver was compiled |
893 | * t4_check_fw_version - check if the FW is compatible with this driver | 894 | * with? |
894 | * @adapter: the adapter | ||
895 | * | ||
896 | * Checks if an adapter's FW is compatible with the driver. Returns 0 | ||
897 | * if there's exact match, a negative error if the version could not be | ||
898 | * read or there's a major version mismatch, and a positive value if the | ||
899 | * expected major version is found but there's a minor version mismatch. | ||
900 | */ | 895 | */ |
901 | int t4_check_fw_version(struct adapter *adapter) | 896 | static int fw_compatible(const struct fw_hdr *hdr1, const struct fw_hdr *hdr2) |
902 | { | 897 | { |
903 | u32 api_vers[2]; | ||
904 | int ret, major, minor, micro; | ||
905 | int exp_major, exp_minor, exp_micro; | ||
906 | 898 | ||
907 | ret = get_fw_version(adapter, &adapter->params.fw_vers); | 899 | /* short circuit if it's the exact same firmware version */ |
908 | if (!ret) | 900 | if (hdr1->chip == hdr2->chip && hdr1->fw_ver == hdr2->fw_ver) |
909 | ret = get_tp_version(adapter, &adapter->params.tp_vers); | 901 | return 1; |
910 | if (!ret) | ||
911 | ret = t4_read_flash(adapter, adapter->params.sf_fw_start + | ||
912 | offsetof(struct fw_hdr, intfver_nic), | ||
913 | 2, api_vers, 1); | ||
914 | if (ret) | ||
915 | return ret; | ||
916 | 902 | ||
917 | major = FW_HDR_FW_VER_MAJOR_GET(adapter->params.fw_vers); | 903 | #define SAME_INTF(x) (hdr1->intfver_##x == hdr2->intfver_##x) |
918 | minor = FW_HDR_FW_VER_MINOR_GET(adapter->params.fw_vers); | 904 | if (hdr1->chip == hdr2->chip && SAME_INTF(nic) && SAME_INTF(vnic) && |
919 | micro = FW_HDR_FW_VER_MICRO_GET(adapter->params.fw_vers); | 905 | SAME_INTF(ri) && SAME_INTF(iscsi) && SAME_INTF(fcoe)) |
906 | return 1; | ||
907 | #undef SAME_INTF | ||
920 | 908 | ||
921 | switch (CHELSIO_CHIP_VERSION(adapter->chip)) { | 909 | return 0; |
922 | case CHELSIO_T4: | 910 | } |
923 | exp_major = FW_VERSION_MAJOR; | ||
924 | exp_minor = FW_VERSION_MINOR; | ||
925 | exp_micro = FW_VERSION_MICRO; | ||
926 | break; | ||
927 | case CHELSIO_T5: | ||
928 | exp_major = FW_VERSION_MAJOR_T5; | ||
929 | exp_minor = FW_VERSION_MINOR_T5; | ||
930 | exp_micro = FW_VERSION_MICRO_T5; | ||
931 | break; | ||
932 | default: | ||
933 | dev_err(adapter->pdev_dev, "Unsupported chip type, %x\n", | ||
934 | adapter->chip); | ||
935 | return -EINVAL; | ||
936 | } | ||
937 | 911 | ||
938 | memcpy(adapter->params.api_vers, api_vers, | 912 | /* The firmware in the filesystem is usable, but should it be installed? |
939 | sizeof(adapter->params.api_vers)); | 913 | * This routine explains itself in detail if it indicates the filesystem |
914 | * firmware should be installed. | ||
915 | */ | ||
916 | static int should_install_fs_fw(struct adapter *adap, int card_fw_usable, | ||
917 | int k, int c) | ||
918 | { | ||
919 | const char *reason; | ||
940 | 920 | ||
941 | if (major < exp_major || (major == exp_major && minor < exp_minor) || | 921 | if (!card_fw_usable) { |
942 | (major == exp_major && minor == exp_minor && micro < exp_micro)) { | 922 | reason = "incompatible or unusable"; |
943 | dev_err(adapter->pdev_dev, | 923 | goto install; |
944 | "Card has firmware version %u.%u.%u, minimum " | ||
945 | "supported firmware is %u.%u.%u.\n", major, minor, | ||
946 | micro, exp_major, exp_minor, exp_micro); | ||
947 | return -EFAULT; | ||
948 | } | 924 | } |
949 | 925 | ||
950 | if (major != exp_major) { /* major mismatch - fail */ | 926 | if (k > c) { |
951 | dev_err(adapter->pdev_dev, | 927 | reason = "older than the version supported with this driver"; |
952 | "card FW has major version %u, driver wants %u\n", | 928 | goto install; |
953 | major, exp_major); | ||
954 | return -EINVAL; | ||
955 | } | 929 | } |
956 | 930 | ||
957 | if (minor == exp_minor && micro == exp_micro) | 931 | return 0; |
958 | return 0; /* perfect match */ | 932 | |
933 | install: | ||
934 | dev_err(adap->pdev_dev, "firmware on card (%u.%u.%u.%u) is %s, " | ||
935 | "installing firmware %u.%u.%u.%u on card.\n", | ||
936 | FW_HDR_FW_VER_MAJOR_GET(c), FW_HDR_FW_VER_MINOR_GET(c), | ||
937 | FW_HDR_FW_VER_MICRO_GET(c), FW_HDR_FW_VER_BUILD_GET(c), reason, | ||
938 | FW_HDR_FW_VER_MAJOR_GET(k), FW_HDR_FW_VER_MINOR_GET(k), | ||
939 | FW_HDR_FW_VER_MICRO_GET(k), FW_HDR_FW_VER_BUILD_GET(k)); | ||
959 | 940 | ||
960 | /* Minor/micro version mismatch. Report it but often it's OK. */ | ||
961 | return 1; | 941 | return 1; |
962 | } | 942 | } |
963 | 943 | ||
944 | int t4_prep_fw(struct adapter *adap, struct fw_info *fw_info, | ||
945 | const u8 *fw_data, unsigned int fw_size, | ||
946 | struct fw_hdr *card_fw, enum dev_state state, | ||
947 | int *reset) | ||
948 | { | ||
949 | int ret, card_fw_usable, fs_fw_usable; | ||
950 | const struct fw_hdr *fs_fw; | ||
951 | const struct fw_hdr *drv_fw; | ||
952 | |||
953 | drv_fw = &fw_info->fw_hdr; | ||
954 | |||
955 | /* Read the header of the firmware on the card */ | ||
956 | ret = -t4_read_flash(adap, FLASH_FW_START, | ||
957 | sizeof(*card_fw) / sizeof(uint32_t), | ||
958 | (uint32_t *)card_fw, 1); | ||
959 | if (ret == 0) { | ||
960 | card_fw_usable = fw_compatible(drv_fw, (const void *)card_fw); | ||
961 | } else { | ||
962 | dev_err(adap->pdev_dev, | ||
963 | "Unable to read card's firmware header: %d\n", ret); | ||
964 | card_fw_usable = 0; | ||
965 | } | ||
966 | |||
967 | if (fw_data != NULL) { | ||
968 | fs_fw = (const void *)fw_data; | ||
969 | fs_fw_usable = fw_compatible(drv_fw, fs_fw); | ||
970 | } else { | ||
971 | fs_fw = NULL; | ||
972 | fs_fw_usable = 0; | ||
973 | } | ||
974 | |||
975 | if (card_fw_usable && card_fw->fw_ver == drv_fw->fw_ver && | ||
976 | (!fs_fw_usable || fs_fw->fw_ver == drv_fw->fw_ver)) { | ||
977 | /* Common case: the firmware on the card is an exact match and | ||
978 | * the filesystem one is an exact match too, or the filesystem | ||
979 | * one is absent/incompatible. | ||
980 | */ | ||
981 | } else if (fs_fw_usable && state == DEV_STATE_UNINIT && | ||
982 | should_install_fs_fw(adap, card_fw_usable, | ||
983 | be32_to_cpu(fs_fw->fw_ver), | ||
984 | be32_to_cpu(card_fw->fw_ver))) { | ||
985 | ret = -t4_fw_upgrade(adap, adap->mbox, fw_data, | ||
986 | fw_size, 0); | ||
987 | if (ret != 0) { | ||
988 | dev_err(adap->pdev_dev, | ||
989 | "failed to install firmware: %d\n", ret); | ||
990 | goto bye; | ||
991 | } | ||
992 | |||
993 | /* Installed successfully, update the cached header too. */ | ||
994 | memcpy(card_fw, fs_fw, sizeof(*card_fw)); | ||
995 | card_fw_usable = 1; | ||
996 | *reset = 0; /* already reset as part of load_fw */ | ||
997 | } | ||
998 | |||
999 | if (!card_fw_usable) { | ||
1000 | uint32_t d, c, k; | ||
1001 | |||
1002 | d = be32_to_cpu(drv_fw->fw_ver); | ||
1003 | c = be32_to_cpu(card_fw->fw_ver); | ||
1004 | k = fs_fw ? be32_to_cpu(fs_fw->fw_ver) : 0; | ||
1005 | |||
1006 | dev_err(adap->pdev_dev, "Cannot find a usable firmware: " | ||
1007 | "chip state %d, " | ||
1008 | "driver compiled with %d.%d.%d.%d, " | ||
1009 | "card has %d.%d.%d.%d, filesystem has %d.%d.%d.%d\n", | ||
1010 | state, | ||
1011 | FW_HDR_FW_VER_MAJOR_GET(d), FW_HDR_FW_VER_MINOR_GET(d), | ||
1012 | FW_HDR_FW_VER_MICRO_GET(d), FW_HDR_FW_VER_BUILD_GET(d), | ||
1013 | FW_HDR_FW_VER_MAJOR_GET(c), FW_HDR_FW_VER_MINOR_GET(c), | ||
1014 | FW_HDR_FW_VER_MICRO_GET(c), FW_HDR_FW_VER_BUILD_GET(c), | ||
1015 | FW_HDR_FW_VER_MAJOR_GET(k), FW_HDR_FW_VER_MINOR_GET(k), | ||
1016 | FW_HDR_FW_VER_MICRO_GET(k), FW_HDR_FW_VER_BUILD_GET(k)); | ||
1017 | ret = EINVAL; | ||
1018 | goto bye; | ||
1019 | } | ||
1020 | |||
1021 | /* We're using whatever's on the card and it's known to be good. */ | ||
1022 | adap->params.fw_vers = be32_to_cpu(card_fw->fw_ver); | ||
1023 | adap->params.tp_vers = be32_to_cpu(card_fw->tp_microcode_ver); | ||
1024 | |||
1025 | bye: | ||
1026 | return ret; | ||
1027 | } | ||
1028 | |||
964 | /** | 1029 | /** |
965 | * t4_flash_erase_sectors - erase a range of flash sectors | 1030 | * t4_flash_erase_sectors - erase a range of flash sectors |
966 | * @adapter: the adapter | 1031 | * @adapter: the adapter |
@@ -1368,7 +1433,7 @@ static void pcie_intr_handler(struct adapter *adapter) | |||
1368 | PCIE_CORE_UTL_PCI_EXPRESS_PORT_STATUS, | 1433 | PCIE_CORE_UTL_PCI_EXPRESS_PORT_STATUS, |
1369 | pcie_port_intr_info) + | 1434 | pcie_port_intr_info) + |
1370 | t4_handle_intr_status(adapter, PCIE_INT_CAUSE, | 1435 | t4_handle_intr_status(adapter, PCIE_INT_CAUSE, |
1371 | is_t4(adapter->chip) ? | 1436 | is_t4(adapter->params.chip) ? |
1372 | pcie_intr_info : t5_pcie_intr_info); | 1437 | pcie_intr_info : t5_pcie_intr_info); |
1373 | 1438 | ||
1374 | if (fat) | 1439 | if (fat) |
@@ -1782,7 +1847,7 @@ static void xgmac_intr_handler(struct adapter *adap, int port) | |||
1782 | { | 1847 | { |
1783 | u32 v, int_cause_reg; | 1848 | u32 v, int_cause_reg; |
1784 | 1849 | ||
1785 | if (is_t4(adap->chip)) | 1850 | if (is_t4(adap->params.chip)) |
1786 | int_cause_reg = PORT_REG(port, XGMAC_PORT_INT_CAUSE); | 1851 | int_cause_reg = PORT_REG(port, XGMAC_PORT_INT_CAUSE); |
1787 | else | 1852 | else |
1788 | int_cause_reg = T5_PORT_REG(port, MAC_PORT_INT_CAUSE); | 1853 | int_cause_reg = T5_PORT_REG(port, MAC_PORT_INT_CAUSE); |
@@ -2250,7 +2315,7 @@ void t4_get_port_stats(struct adapter *adap, int idx, struct port_stats *p) | |||
2250 | 2315 | ||
2251 | #define GET_STAT(name) \ | 2316 | #define GET_STAT(name) \ |
2252 | t4_read_reg64(adap, \ | 2317 | t4_read_reg64(adap, \ |
2253 | (is_t4(adap->chip) ? PORT_REG(idx, MPS_PORT_STAT_##name##_L) : \ | 2318 | (is_t4(adap->params.chip) ? PORT_REG(idx, MPS_PORT_STAT_##name##_L) : \ |
2254 | T5_PORT_REG(idx, MPS_PORT_STAT_##name##_L))) | 2319 | T5_PORT_REG(idx, MPS_PORT_STAT_##name##_L))) |
2255 | #define GET_STAT_COM(name) t4_read_reg64(adap, MPS_STAT_##name##_L) | 2320 | #define GET_STAT_COM(name) t4_read_reg64(adap, MPS_STAT_##name##_L) |
2256 | 2321 | ||
@@ -2332,7 +2397,7 @@ void t4_wol_magic_enable(struct adapter *adap, unsigned int port, | |||
2332 | { | 2397 | { |
2333 | u32 mag_id_reg_l, mag_id_reg_h, port_cfg_reg; | 2398 | u32 mag_id_reg_l, mag_id_reg_h, port_cfg_reg; |
2334 | 2399 | ||
2335 | if (is_t4(adap->chip)) { | 2400 | if (is_t4(adap->params.chip)) { |
2336 | mag_id_reg_l = PORT_REG(port, XGMAC_PORT_MAGIC_MACID_LO); | 2401 | mag_id_reg_l = PORT_REG(port, XGMAC_PORT_MAGIC_MACID_LO); |
2337 | mag_id_reg_h = PORT_REG(port, XGMAC_PORT_MAGIC_MACID_HI); | 2402 | mag_id_reg_h = PORT_REG(port, XGMAC_PORT_MAGIC_MACID_HI); |
2338 | port_cfg_reg = PORT_REG(port, XGMAC_PORT_CFG2); | 2403 | port_cfg_reg = PORT_REG(port, XGMAC_PORT_CFG2); |
@@ -2374,7 +2439,7 @@ int t4_wol_pat_enable(struct adapter *adap, unsigned int port, unsigned int map, | |||
2374 | int i; | 2439 | int i; |
2375 | u32 port_cfg_reg; | 2440 | u32 port_cfg_reg; |
2376 | 2441 | ||
2377 | if (is_t4(adap->chip)) | 2442 | if (is_t4(adap->params.chip)) |
2378 | port_cfg_reg = PORT_REG(port, XGMAC_PORT_CFG2); | 2443 | port_cfg_reg = PORT_REG(port, XGMAC_PORT_CFG2); |
2379 | else | 2444 | else |
2380 | port_cfg_reg = T5_PORT_REG(port, MAC_PORT_CFG2); | 2445 | port_cfg_reg = T5_PORT_REG(port, MAC_PORT_CFG2); |
@@ -2387,7 +2452,7 @@ int t4_wol_pat_enable(struct adapter *adap, unsigned int port, unsigned int map, | |||
2387 | return -EINVAL; | 2452 | return -EINVAL; |
2388 | 2453 | ||
2389 | #define EPIO_REG(name) \ | 2454 | #define EPIO_REG(name) \ |
2390 | (is_t4(adap->chip) ? PORT_REG(port, XGMAC_PORT_EPIO_##name) : \ | 2455 | (is_t4(adap->params.chip) ? PORT_REG(port, XGMAC_PORT_EPIO_##name) : \ |
2391 | T5_PORT_REG(port, MAC_PORT_EPIO_##name)) | 2456 | T5_PORT_REG(port, MAC_PORT_EPIO_##name)) |
2392 | 2457 | ||
2393 | t4_write_reg(adap, EPIO_REG(DATA1), mask0 >> 32); | 2458 | t4_write_reg(adap, EPIO_REG(DATA1), mask0 >> 32); |
@@ -2474,7 +2539,7 @@ int t4_fwaddrspace_write(struct adapter *adap, unsigned int mbox, | |||
2474 | int t4_mem_win_read_len(struct adapter *adap, u32 addr, __be32 *data, int len) | 2539 | int t4_mem_win_read_len(struct adapter *adap, u32 addr, __be32 *data, int len) |
2475 | { | 2540 | { |
2476 | int i, off; | 2541 | int i, off; |
2477 | u32 win_pf = is_t4(adap->chip) ? 0 : V_PFNUM(adap->fn); | 2542 | u32 win_pf = is_t4(adap->params.chip) ? 0 : V_PFNUM(adap->fn); |
2478 | 2543 | ||
2479 | /* Align on a 2KB boundary. | 2544 | /* Align on a 2KB boundary. |
2480 | */ | 2545 | */ |
@@ -3306,7 +3371,7 @@ int t4_alloc_mac_filt(struct adapter *adap, unsigned int mbox, | |||
3306 | int i, ret; | 3371 | int i, ret; |
3307 | struct fw_vi_mac_cmd c; | 3372 | struct fw_vi_mac_cmd c; |
3308 | struct fw_vi_mac_exact *p; | 3373 | struct fw_vi_mac_exact *p; |
3309 | unsigned int max_naddr = is_t4(adap->chip) ? | 3374 | unsigned int max_naddr = is_t4(adap->params.chip) ? |
3310 | NUM_MPS_CLS_SRAM_L_INSTANCES : | 3375 | NUM_MPS_CLS_SRAM_L_INSTANCES : |
3311 | NUM_MPS_T5_CLS_SRAM_L_INSTANCES; | 3376 | NUM_MPS_T5_CLS_SRAM_L_INSTANCES; |
3312 | 3377 | ||
@@ -3368,7 +3433,7 @@ int t4_change_mac(struct adapter *adap, unsigned int mbox, unsigned int viid, | |||
3368 | int ret, mode; | 3433 | int ret, mode; |
3369 | struct fw_vi_mac_cmd c; | 3434 | struct fw_vi_mac_cmd c; |
3370 | struct fw_vi_mac_exact *p = c.u.exact; | 3435 | struct fw_vi_mac_exact *p = c.u.exact; |
3371 | unsigned int max_mac_addr = is_t4(adap->chip) ? | 3436 | unsigned int max_mac_addr = is_t4(adap->params.chip) ? |
3372 | NUM_MPS_CLS_SRAM_L_INSTANCES : | 3437 | NUM_MPS_CLS_SRAM_L_INSTANCES : |
3373 | NUM_MPS_T5_CLS_SRAM_L_INSTANCES; | 3438 | NUM_MPS_T5_CLS_SRAM_L_INSTANCES; |
3374 | 3439 | ||
@@ -3699,13 +3764,14 @@ int t4_prep_adapter(struct adapter *adapter) | |||
3699 | { | 3764 | { |
3700 | int ret, ver; | 3765 | int ret, ver; |
3701 | uint16_t device_id; | 3766 | uint16_t device_id; |
3767 | u32 pl_rev; | ||
3702 | 3768 | ||
3703 | ret = t4_wait_dev_ready(adapter); | 3769 | ret = t4_wait_dev_ready(adapter); |
3704 | if (ret < 0) | 3770 | if (ret < 0) |
3705 | return ret; | 3771 | return ret; |
3706 | 3772 | ||
3707 | get_pci_mode(adapter, &adapter->params.pci); | 3773 | get_pci_mode(adapter, &adapter->params.pci); |
3708 | adapter->params.rev = t4_read_reg(adapter, PL_REV); | 3774 | pl_rev = G_REV(t4_read_reg(adapter, PL_REV)); |
3709 | 3775 | ||
3710 | ret = get_flash_params(adapter); | 3776 | ret = get_flash_params(adapter); |
3711 | if (ret < 0) { | 3777 | if (ret < 0) { |
@@ -3717,14 +3783,13 @@ int t4_prep_adapter(struct adapter *adapter) | |||
3717 | */ | 3783 | */ |
3718 | pci_read_config_word(adapter->pdev, PCI_DEVICE_ID, &device_id); | 3784 | pci_read_config_word(adapter->pdev, PCI_DEVICE_ID, &device_id); |
3719 | ver = device_id >> 12; | 3785 | ver = device_id >> 12; |
3786 | adapter->params.chip = 0; | ||
3720 | switch (ver) { | 3787 | switch (ver) { |
3721 | case CHELSIO_T4: | 3788 | case CHELSIO_T4: |
3722 | adapter->chip = CHELSIO_CHIP_CODE(CHELSIO_T4, | 3789 | adapter->params.chip |= CHELSIO_CHIP_CODE(CHELSIO_T4, pl_rev); |
3723 | adapter->params.rev); | ||
3724 | break; | 3790 | break; |
3725 | case CHELSIO_T5: | 3791 | case CHELSIO_T5: |
3726 | adapter->chip = CHELSIO_CHIP_CODE(CHELSIO_T5, | 3792 | adapter->params.chip |= CHELSIO_CHIP_CODE(CHELSIO_T5, pl_rev); |
3727 | adapter->params.rev); | ||
3728 | break; | 3793 | break; |
3729 | default: | 3794 | default: |
3730 | dev_err(adapter->pdev_dev, "Device %d is not supported\n", | 3795 | dev_err(adapter->pdev_dev, "Device %d is not supported\n", |
@@ -3732,9 +3797,6 @@ int t4_prep_adapter(struct adapter *adapter) | |||
3732 | return -EINVAL; | 3797 | return -EINVAL; |
3733 | } | 3798 | } |
3734 | 3799 | ||
3735 | /* Reassign the updated revision field */ | ||
3736 | adapter->params.rev = adapter->chip; | ||
3737 | |||
3738 | init_cong_ctrl(adapter->params.a_wnd, adapter->params.b_wnd); | 3800 | init_cong_ctrl(adapter->params.a_wnd, adapter->params.b_wnd); |
3739 | 3801 | ||
3740 | /* | 3802 | /* |
@@ -3746,6 +3808,109 @@ int t4_prep_adapter(struct adapter *adapter) | |||
3746 | return 0; | 3808 | return 0; |
3747 | } | 3809 | } |
3748 | 3810 | ||
3811 | /** | ||
3812 | * t4_init_tp_params - initialize adap->params.tp | ||
3813 | * @adap: the adapter | ||
3814 | * | ||
3815 | * Initialize various fields of the adapter's TP Parameters structure. | ||
3816 | */ | ||
3817 | int t4_init_tp_params(struct adapter *adap) | ||
3818 | { | ||
3819 | int chan; | ||
3820 | u32 v; | ||
3821 | |||
3822 | v = t4_read_reg(adap, TP_TIMER_RESOLUTION); | ||
3823 | adap->params.tp.tre = TIMERRESOLUTION_GET(v); | ||
3824 | adap->params.tp.dack_re = DELAYEDACKRESOLUTION_GET(v); | ||
3825 | |||
3826 | /* MODQ_REQ_MAP defaults to setting queues 0-3 to chan 0-3 */ | ||
3827 | for (chan = 0; chan < NCHAN; chan++) | ||
3828 | adap->params.tp.tx_modq[chan] = chan; | ||
3829 | |||
3830 | /* Cache the adapter's Compressed Filter Mode and global Incress | ||
3831 | * Configuration. | ||
3832 | */ | ||
3833 | t4_read_indirect(adap, TP_PIO_ADDR, TP_PIO_DATA, | ||
3834 | &adap->params.tp.vlan_pri_map, 1, | ||
3835 | TP_VLAN_PRI_MAP); | ||
3836 | t4_read_indirect(adap, TP_PIO_ADDR, TP_PIO_DATA, | ||
3837 | &adap->params.tp.ingress_config, 1, | ||
3838 | TP_INGRESS_CONFIG); | ||
3839 | |||
3840 | /* Now that we have TP_VLAN_PRI_MAP cached, we can calculate the field | ||
3841 | * shift positions of several elements of the Compressed Filter Tuple | ||
3842 | * for this adapter which we need frequently ... | ||
3843 | */ | ||
3844 | adap->params.tp.vlan_shift = t4_filter_field_shift(adap, F_VLAN); | ||
3845 | adap->params.tp.vnic_shift = t4_filter_field_shift(adap, F_VNIC_ID); | ||
3846 | adap->params.tp.port_shift = t4_filter_field_shift(adap, F_PORT); | ||
3847 | adap->params.tp.protocol_shift = t4_filter_field_shift(adap, | ||
3848 | F_PROTOCOL); | ||
3849 | |||
3850 | /* If TP_INGRESS_CONFIG.VNID == 0, then TP_VLAN_PRI_MAP.VNIC_ID | ||
3851 | * represents the presense of an Outer VLAN instead of a VNIC ID. | ||
3852 | */ | ||
3853 | if ((adap->params.tp.ingress_config & F_VNIC) == 0) | ||
3854 | adap->params.tp.vnic_shift = -1; | ||
3855 | |||
3856 | return 0; | ||
3857 | } | ||
3858 | |||
3859 | /** | ||
3860 | * t4_filter_field_shift - calculate filter field shift | ||
3861 | * @adap: the adapter | ||
3862 | * @filter_sel: the desired field (from TP_VLAN_PRI_MAP bits) | ||
3863 | * | ||
3864 | * Return the shift position of a filter field within the Compressed | ||
3865 | * Filter Tuple. The filter field is specified via its selection bit | ||
3866 | * within TP_VLAN_PRI_MAL (filter mode). E.g. F_VLAN. | ||
3867 | */ | ||
3868 | int t4_filter_field_shift(const struct adapter *adap, int filter_sel) | ||
3869 | { | ||
3870 | unsigned int filter_mode = adap->params.tp.vlan_pri_map; | ||
3871 | unsigned int sel; | ||
3872 | int field_shift; | ||
3873 | |||
3874 | if ((filter_mode & filter_sel) == 0) | ||
3875 | return -1; | ||
3876 | |||
3877 | for (sel = 1, field_shift = 0; sel < filter_sel; sel <<= 1) { | ||
3878 | switch (filter_mode & sel) { | ||
3879 | case F_FCOE: | ||
3880 | field_shift += W_FT_FCOE; | ||
3881 | break; | ||
3882 | case F_PORT: | ||
3883 | field_shift += W_FT_PORT; | ||
3884 | break; | ||
3885 | case F_VNIC_ID: | ||
3886 | field_shift += W_FT_VNIC_ID; | ||
3887 | break; | ||
3888 | case F_VLAN: | ||
3889 | field_shift += W_FT_VLAN; | ||
3890 | break; | ||
3891 | case F_TOS: | ||
3892 | field_shift += W_FT_TOS; | ||
3893 | break; | ||
3894 | case F_PROTOCOL: | ||
3895 | field_shift += W_FT_PROTOCOL; | ||
3896 | break; | ||
3897 | case F_ETHERTYPE: | ||
3898 | field_shift += W_FT_ETHERTYPE; | ||
3899 | break; | ||
3900 | case F_MACMATCH: | ||
3901 | field_shift += W_FT_MACMATCH; | ||
3902 | break; | ||
3903 | case F_MPSHITTYPE: | ||
3904 | field_shift += W_FT_MPSHITTYPE; | ||
3905 | break; | ||
3906 | case F_FRAGMENTATION: | ||
3907 | field_shift += W_FT_FRAGMENTATION; | ||
3908 | break; | ||
3909 | } | ||
3910 | } | ||
3911 | return field_shift; | ||
3912 | } | ||
3913 | |||
3749 | int t4_port_init(struct adapter *adap, int mbox, int pf, int vf) | 3914 | int t4_port_init(struct adapter *adap, int mbox, int pf, int vf) |
3750 | { | 3915 | { |
3751 | u8 addr[6]; | 3916 | u8 addr[6]; |
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h b/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h index ef146c0ba481..4082522d8140 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h | |||
@@ -1092,6 +1092,11 @@ | |||
1092 | 1092 | ||
1093 | #define PL_REV 0x1943c | 1093 | #define PL_REV 0x1943c |
1094 | 1094 | ||
1095 | #define S_REV 0 | ||
1096 | #define M_REV 0xfU | ||
1097 | #define V_REV(x) ((x) << S_REV) | ||
1098 | #define G_REV(x) (((x) >> S_REV) & M_REV) | ||
1099 | |||
1095 | #define LE_DB_CONFIG 0x19c04 | 1100 | #define LE_DB_CONFIG 0x19c04 |
1096 | #define HASHEN 0x00100000U | 1101 | #define HASHEN 0x00100000U |
1097 | 1102 | ||
@@ -1166,10 +1171,50 @@ | |||
1166 | 1171 | ||
1167 | #define A_TP_TX_SCHED_PCMD 0x25 | 1172 | #define A_TP_TX_SCHED_PCMD 0x25 |
1168 | 1173 | ||
1174 | #define S_VNIC 11 | ||
1175 | #define V_VNIC(x) ((x) << S_VNIC) | ||
1176 | #define F_VNIC V_VNIC(1U) | ||
1177 | |||
1178 | #define S_FRAGMENTATION 9 | ||
1179 | #define V_FRAGMENTATION(x) ((x) << S_FRAGMENTATION) | ||
1180 | #define F_FRAGMENTATION V_FRAGMENTATION(1U) | ||
1181 | |||
1182 | #define S_MPSHITTYPE 8 | ||
1183 | #define V_MPSHITTYPE(x) ((x) << S_MPSHITTYPE) | ||
1184 | #define F_MPSHITTYPE V_MPSHITTYPE(1U) | ||
1185 | |||
1186 | #define S_MACMATCH 7 | ||
1187 | #define V_MACMATCH(x) ((x) << S_MACMATCH) | ||
1188 | #define F_MACMATCH V_MACMATCH(1U) | ||
1189 | |||
1190 | #define S_ETHERTYPE 6 | ||
1191 | #define V_ETHERTYPE(x) ((x) << S_ETHERTYPE) | ||
1192 | #define F_ETHERTYPE V_ETHERTYPE(1U) | ||
1193 | |||
1194 | #define S_PROTOCOL 5 | ||
1195 | #define V_PROTOCOL(x) ((x) << S_PROTOCOL) | ||
1196 | #define F_PROTOCOL V_PROTOCOL(1U) | ||
1197 | |||
1198 | #define S_TOS 4 | ||
1199 | #define V_TOS(x) ((x) << S_TOS) | ||
1200 | #define F_TOS V_TOS(1U) | ||
1201 | |||
1202 | #define S_VLAN 3 | ||
1203 | #define V_VLAN(x) ((x) << S_VLAN) | ||
1204 | #define F_VLAN V_VLAN(1U) | ||
1205 | |||
1206 | #define S_VNIC_ID 2 | ||
1207 | #define V_VNIC_ID(x) ((x) << S_VNIC_ID) | ||
1208 | #define F_VNIC_ID V_VNIC_ID(1U) | ||
1209 | |||
1169 | #define S_PORT 1 | 1210 | #define S_PORT 1 |
1170 | #define V_PORT(x) ((x) << S_PORT) | 1211 | #define V_PORT(x) ((x) << S_PORT) |
1171 | #define F_PORT V_PORT(1U) | 1212 | #define F_PORT V_PORT(1U) |
1172 | 1213 | ||
1214 | #define S_FCOE 0 | ||
1215 | #define V_FCOE(x) ((x) << S_FCOE) | ||
1216 | #define F_FCOE V_FCOE(1U) | ||
1217 | |||
1173 | #define NUM_MPS_CLS_SRAM_L_INSTANCES 336 | 1218 | #define NUM_MPS_CLS_SRAM_L_INSTANCES 336 |
1174 | #define NUM_MPS_T5_CLS_SRAM_L_INSTANCES 512 | 1219 | #define NUM_MPS_T5_CLS_SRAM_L_INSTANCES 512 |
1175 | 1220 | ||
@@ -1199,4 +1244,46 @@ | |||
1199 | #define EDC_STRIDE_T5 (EDC_T51_BASE_ADDR - EDC_T50_BASE_ADDR) | 1244 | #define EDC_STRIDE_T5 (EDC_T51_BASE_ADDR - EDC_T50_BASE_ADDR) |
1200 | #define EDC_REG_T5(reg, idx) (reg + EDC_STRIDE_T5 * idx) | 1245 | #define EDC_REG_T5(reg, idx) (reg + EDC_STRIDE_T5 * idx) |
1201 | 1246 | ||
1247 | #define A_PL_VF_REV 0x4 | ||
1248 | #define A_PL_VF_WHOAMI 0x0 | ||
1249 | #define A_PL_VF_REVISION 0x8 | ||
1250 | |||
1251 | #define S_CHIPID 4 | ||
1252 | #define M_CHIPID 0xfU | ||
1253 | #define V_CHIPID(x) ((x) << S_CHIPID) | ||
1254 | #define G_CHIPID(x) (((x) >> S_CHIPID) & M_CHIPID) | ||
1255 | |||
1256 | /* TP_VLAN_PRI_MAP controls which subset of fields will be present in the | ||
1257 | * Compressed Filter Tuple for LE filters. Each bit set in TP_VLAN_PRI_MAP | ||
1258 | * selects for a particular field being present. These fields, when present | ||
1259 | * in the Compressed Filter Tuple, have the following widths in bits. | ||
1260 | */ | ||
1261 | #define W_FT_FCOE 1 | ||
1262 | #define W_FT_PORT 3 | ||
1263 | #define W_FT_VNIC_ID 17 | ||
1264 | #define W_FT_VLAN 17 | ||
1265 | #define W_FT_TOS 8 | ||
1266 | #define W_FT_PROTOCOL 8 | ||
1267 | #define W_FT_ETHERTYPE 16 | ||
1268 | #define W_FT_MACMATCH 9 | ||
1269 | #define W_FT_MPSHITTYPE 3 | ||
1270 | #define W_FT_FRAGMENTATION 1 | ||
1271 | |||
1272 | /* Some of the Compressed Filter Tuple fields have internal structure. These | ||
1273 | * bit shifts/masks describe those structures. All shifts are relative to the | ||
1274 | * base position of the fields within the Compressed Filter Tuple | ||
1275 | */ | ||
1276 | #define S_FT_VLAN_VLD 16 | ||
1277 | #define V_FT_VLAN_VLD(x) ((x) << S_FT_VLAN_VLD) | ||
1278 | #define F_FT_VLAN_VLD V_FT_VLAN_VLD(1U) | ||
1279 | |||
1280 | #define S_FT_VNID_ID_VF 0 | ||
1281 | #define V_FT_VNID_ID_VF(x) ((x) << S_FT_VNID_ID_VF) | ||
1282 | |||
1283 | #define S_FT_VNID_ID_PF 7 | ||
1284 | #define V_FT_VNID_ID_PF(x) ((x) << S_FT_VNID_ID_PF) | ||
1285 | |||
1286 | #define S_FT_VNID_ID_VLD 16 | ||
1287 | #define V_FT_VNID_ID_VLD(x) ((x) << S_FT_VNID_ID_VLD) | ||
1288 | |||
1202 | #endif /* __T4_REGS_H */ | 1289 | #endif /* __T4_REGS_H */ |
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h b/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h index 6f77ac487743..74fea74ce0aa 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h +++ b/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h | |||
@@ -2157,7 +2157,7 @@ struct fw_debug_cmd { | |||
2157 | 2157 | ||
2158 | struct fw_hdr { | 2158 | struct fw_hdr { |
2159 | u8 ver; | 2159 | u8 ver; |
2160 | u8 reserved1; | 2160 | u8 chip; /* terminator chip type */ |
2161 | __be16 len512; /* bin length in units of 512-bytes */ | 2161 | __be16 len512; /* bin length in units of 512-bytes */ |
2162 | __be32 fw_ver; /* firmware version */ | 2162 | __be32 fw_ver; /* firmware version */ |
2163 | __be32 tp_microcode_ver; | 2163 | __be32 tp_microcode_ver; |
@@ -2176,6 +2176,11 @@ struct fw_hdr { | |||
2176 | __be32 reserved6[23]; | 2176 | __be32 reserved6[23]; |
2177 | }; | 2177 | }; |
2178 | 2178 | ||
2179 | enum fw_hdr_chip { | ||
2180 | FW_HDR_CHIP_T4, | ||
2181 | FW_HDR_CHIP_T5 | ||
2182 | }; | ||
2183 | |||
2179 | #define FW_HDR_FW_VER_MAJOR_GET(x) (((x) >> 24) & 0xff) | 2184 | #define FW_HDR_FW_VER_MAJOR_GET(x) (((x) >> 24) & 0xff) |
2180 | #define FW_HDR_FW_VER_MINOR_GET(x) (((x) >> 16) & 0xff) | 2185 | #define FW_HDR_FW_VER_MINOR_GET(x) (((x) >> 16) & 0xff) |
2181 | #define FW_HDR_FW_VER_MICRO_GET(x) (((x) >> 8) & 0xff) | 2186 | #define FW_HDR_FW_VER_MICRO_GET(x) (((x) >> 8) & 0xff) |
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/adapter.h b/drivers/net/ethernet/chelsio/cxgb4vf/adapter.h index be5c7ef6ca93..68eaa9c88c7d 100644 --- a/drivers/net/ethernet/chelsio/cxgb4vf/adapter.h +++ b/drivers/net/ethernet/chelsio/cxgb4vf/adapter.h | |||
@@ -344,7 +344,6 @@ struct adapter { | |||
344 | unsigned long registered_device_map; | 344 | unsigned long registered_device_map; |
345 | unsigned long open_device_map; | 345 | unsigned long open_device_map; |
346 | unsigned long flags; | 346 | unsigned long flags; |
347 | enum chip_type chip; | ||
348 | struct adapter_params params; | 347 | struct adapter_params params; |
349 | 348 | ||
350 | /* queue and interrupt resources */ | 349 | /* queue and interrupt resources */ |
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c index 5f90ec5f7519..0899c0983594 100644 --- a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c +++ b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c | |||
@@ -1064,7 +1064,7 @@ static inline unsigned int mk_adap_vers(const struct adapter *adapter) | |||
1064 | /* | 1064 | /* |
1065 | * Chip version 4, revision 0x3f (cxgb4vf). | 1065 | * Chip version 4, revision 0x3f (cxgb4vf). |
1066 | */ | 1066 | */ |
1067 | return CHELSIO_CHIP_VERSION(adapter->chip) | (0x3f << 10); | 1067 | return CHELSIO_CHIP_VERSION(adapter->params.chip) | (0x3f << 10); |
1068 | } | 1068 | } |
1069 | 1069 | ||
1070 | /* | 1070 | /* |
@@ -1551,9 +1551,13 @@ static void cxgb4vf_get_regs(struct net_device *dev, | |||
1551 | reg_block_dump(adapter, regbuf, | 1551 | reg_block_dump(adapter, regbuf, |
1552 | T4VF_MPS_BASE_ADDR + T4VF_MOD_MAP_MPS_FIRST, | 1552 | T4VF_MPS_BASE_ADDR + T4VF_MOD_MAP_MPS_FIRST, |
1553 | T4VF_MPS_BASE_ADDR + T4VF_MOD_MAP_MPS_LAST); | 1553 | T4VF_MPS_BASE_ADDR + T4VF_MOD_MAP_MPS_LAST); |
1554 | |||
1555 | /* T5 adds new registers in the PL Register map. | ||
1556 | */ | ||
1554 | reg_block_dump(adapter, regbuf, | 1557 | reg_block_dump(adapter, regbuf, |
1555 | T4VF_PL_BASE_ADDR + T4VF_MOD_MAP_PL_FIRST, | 1558 | T4VF_PL_BASE_ADDR + T4VF_MOD_MAP_PL_FIRST, |
1556 | T4VF_PL_BASE_ADDR + T4VF_MOD_MAP_PL_LAST); | 1559 | T4VF_PL_BASE_ADDR + (is_t4(adapter->params.chip) |
1560 | ? A_PL_VF_WHOAMI : A_PL_VF_REVISION)); | ||
1557 | reg_block_dump(adapter, regbuf, | 1561 | reg_block_dump(adapter, regbuf, |
1558 | T4VF_CIM_BASE_ADDR + T4VF_MOD_MAP_CIM_FIRST, | 1562 | T4VF_CIM_BASE_ADDR + T4VF_MOD_MAP_CIM_FIRST, |
1559 | T4VF_CIM_BASE_ADDR + T4VF_MOD_MAP_CIM_LAST); | 1563 | T4VF_CIM_BASE_ADDR + T4VF_MOD_MAP_CIM_LAST); |
@@ -2087,6 +2091,7 @@ static int adap_init0(struct adapter *adapter) | |||
2087 | unsigned int ethqsets; | 2091 | unsigned int ethqsets; |
2088 | int err; | 2092 | int err; |
2089 | u32 param, val = 0; | 2093 | u32 param, val = 0; |
2094 | unsigned int chipid; | ||
2090 | 2095 | ||
2091 | /* | 2096 | /* |
2092 | * Wait for the device to become ready before proceeding ... | 2097 | * Wait for the device to become ready before proceeding ... |
@@ -2114,12 +2119,14 @@ static int adap_init0(struct adapter *adapter) | |||
2114 | return err; | 2119 | return err; |
2115 | } | 2120 | } |
2116 | 2121 | ||
2122 | adapter->params.chip = 0; | ||
2117 | switch (adapter->pdev->device >> 12) { | 2123 | switch (adapter->pdev->device >> 12) { |
2118 | case CHELSIO_T4: | 2124 | case CHELSIO_T4: |
2119 | adapter->chip = CHELSIO_CHIP_CODE(CHELSIO_T4, 0); | 2125 | adapter->params.chip = CHELSIO_CHIP_CODE(CHELSIO_T4, 0); |
2120 | break; | 2126 | break; |
2121 | case CHELSIO_T5: | 2127 | case CHELSIO_T5: |
2122 | adapter->chip = CHELSIO_CHIP_CODE(CHELSIO_T5, 0); | 2128 | chipid = G_REV(t4_read_reg(adapter, A_PL_VF_REV)); |
2129 | adapter->params.chip |= CHELSIO_CHIP_CODE(CHELSIO_T5, chipid); | ||
2123 | break; | 2130 | break; |
2124 | } | 2131 | } |
2125 | 2132 | ||
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c index 8475c4cda9e4..0a89963c48ce 100644 --- a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c +++ b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c | |||
@@ -537,7 +537,7 @@ static inline void ring_fl_db(struct adapter *adapter, struct sge_fl *fl) | |||
537 | */ | 537 | */ |
538 | if (fl->pend_cred >= FL_PER_EQ_UNIT) { | 538 | if (fl->pend_cred >= FL_PER_EQ_UNIT) { |
539 | val = PIDX(fl->pend_cred / FL_PER_EQ_UNIT); | 539 | val = PIDX(fl->pend_cred / FL_PER_EQ_UNIT); |
540 | if (!is_t4(adapter->chip)) | 540 | if (!is_t4(adapter->params.chip)) |
541 | val |= DBTYPE(1); | 541 | val |= DBTYPE(1); |
542 | wmb(); | 542 | wmb(); |
543 | t4_write_reg(adapter, T4VF_SGE_BASE_ADDR + SGE_VF_KDOORBELL, | 543 | t4_write_reg(adapter, T4VF_SGE_BASE_ADDR + SGE_VF_KDOORBELL, |
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h index 53cbfed21d0b..61362450d05b 100644 --- a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h +++ b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h | |||
@@ -39,21 +39,28 @@ | |||
39 | #include "../cxgb4/t4fw_api.h" | 39 | #include "../cxgb4/t4fw_api.h" |
40 | 40 | ||
41 | #define CHELSIO_CHIP_CODE(version, revision) (((version) << 4) | (revision)) | 41 | #define CHELSIO_CHIP_CODE(version, revision) (((version) << 4) | (revision)) |
42 | #define CHELSIO_CHIP_VERSION(code) ((code) >> 4) | 42 | #define CHELSIO_CHIP_VERSION(code) (((code) >> 4) & 0xf) |
43 | #define CHELSIO_CHIP_RELEASE(code) ((code) & 0xf) | 43 | #define CHELSIO_CHIP_RELEASE(code) ((code) & 0xf) |
44 | 44 | ||
45 | /* All T4 and later chips have their PCI-E Device IDs encoded as 0xVFPP where: | ||
46 | * | ||
47 | * V = "4" for T4; "5" for T5, etc. or | ||
48 | * = "a" for T4 FPGA; "b" for T4 FPGA, etc. | ||
49 | * F = "0" for PF 0..3; "4".."7" for PF4..7; and "8" for VFs | ||
50 | * PP = adapter product designation | ||
51 | */ | ||
45 | #define CHELSIO_T4 0x4 | 52 | #define CHELSIO_T4 0x4 |
46 | #define CHELSIO_T5 0x5 | 53 | #define CHELSIO_T5 0x5 |
47 | 54 | ||
48 | enum chip_type { | 55 | enum chip_type { |
49 | T4_A1 = CHELSIO_CHIP_CODE(CHELSIO_T4, 0), | 56 | T4_A1 = CHELSIO_CHIP_CODE(CHELSIO_T4, 1), |
50 | T4_A2 = CHELSIO_CHIP_CODE(CHELSIO_T4, 1), | 57 | T4_A2 = CHELSIO_CHIP_CODE(CHELSIO_T4, 2), |
51 | T4_A3 = CHELSIO_CHIP_CODE(CHELSIO_T4, 2), | ||
52 | T4_FIRST_REV = T4_A1, | 58 | T4_FIRST_REV = T4_A1, |
53 | T4_LAST_REV = T4_A3, | 59 | T4_LAST_REV = T4_A2, |
54 | 60 | ||
55 | T5_A1 = CHELSIO_CHIP_CODE(CHELSIO_T5, 0), | 61 | T5_A0 = CHELSIO_CHIP_CODE(CHELSIO_T5, 0), |
56 | T5_FIRST_REV = T5_A1, | 62 | T5_A1 = CHELSIO_CHIP_CODE(CHELSIO_T5, 1), |
63 | T5_FIRST_REV = T5_A0, | ||
57 | T5_LAST_REV = T5_A1, | 64 | T5_LAST_REV = T5_A1, |
58 | }; | 65 | }; |
59 | 66 | ||
@@ -203,6 +210,7 @@ struct adapter_params { | |||
203 | struct vpd_params vpd; /* Vital Product Data */ | 210 | struct vpd_params vpd; /* Vital Product Data */ |
204 | struct rss_params rss; /* Receive Side Scaling */ | 211 | struct rss_params rss; /* Receive Side Scaling */ |
205 | struct vf_resources vfres; /* Virtual Function Resource limits */ | 212 | struct vf_resources vfres; /* Virtual Function Resource limits */ |
213 | enum chip_type chip; /* chip code */ | ||
206 | u8 nports; /* # of Ethernet "ports" */ | 214 | u8 nports; /* # of Ethernet "ports" */ |
207 | }; | 215 | }; |
208 | 216 | ||
@@ -253,7 +261,7 @@ static inline int t4vf_wr_mbox_ns(struct adapter *adapter, const void *cmd, | |||
253 | 261 | ||
254 | static inline int is_t4(enum chip_type chip) | 262 | static inline int is_t4(enum chip_type chip) |
255 | { | 263 | { |
256 | return (chip >= T4_FIRST_REV && chip <= T4_LAST_REV); | 264 | return CHELSIO_CHIP_VERSION(chip) == CHELSIO_T4; |
257 | } | 265 | } |
258 | 266 | ||
259 | int t4vf_wait_dev_ready(struct adapter *); | 267 | int t4vf_wait_dev_ready(struct adapter *); |
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c index 9f96dc3bb112..d958c44341b5 100644 --- a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c +++ b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c | |||
@@ -1027,7 +1027,7 @@ int t4vf_alloc_mac_filt(struct adapter *adapter, unsigned int viid, bool free, | |||
1027 | unsigned nfilters = 0; | 1027 | unsigned nfilters = 0; |
1028 | unsigned int rem = naddr; | 1028 | unsigned int rem = naddr; |
1029 | struct fw_vi_mac_cmd cmd, rpl; | 1029 | struct fw_vi_mac_cmd cmd, rpl; |
1030 | unsigned int max_naddr = is_t4(adapter->chip) ? | 1030 | unsigned int max_naddr = is_t4(adapter->params.chip) ? |
1031 | NUM_MPS_CLS_SRAM_L_INSTANCES : | 1031 | NUM_MPS_CLS_SRAM_L_INSTANCES : |
1032 | NUM_MPS_T5_CLS_SRAM_L_INSTANCES; | 1032 | NUM_MPS_T5_CLS_SRAM_L_INSTANCES; |
1033 | 1033 | ||
@@ -1121,7 +1121,7 @@ int t4vf_change_mac(struct adapter *adapter, unsigned int viid, | |||
1121 | struct fw_vi_mac_exact *p = &cmd.u.exact[0]; | 1121 | struct fw_vi_mac_exact *p = &cmd.u.exact[0]; |
1122 | size_t len16 = DIV_ROUND_UP(offsetof(struct fw_vi_mac_cmd, | 1122 | size_t len16 = DIV_ROUND_UP(offsetof(struct fw_vi_mac_cmd, |
1123 | u.exact[1]), 16); | 1123 | u.exact[1]), 16); |
1124 | unsigned int max_naddr = is_t4(adapter->chip) ? | 1124 | unsigned int max_naddr = is_t4(adapter->params.chip) ? |
1125 | NUM_MPS_CLS_SRAM_L_INSTANCES : | 1125 | NUM_MPS_CLS_SRAM_L_INSTANCES : |
1126 | NUM_MPS_T5_CLS_SRAM_L_INSTANCES; | 1126 | NUM_MPS_T5_CLS_SRAM_L_INSTANCES; |
1127 | 1127 | ||
diff --git a/drivers/net/ethernet/emulex/benet/be.h b/drivers/net/ethernet/emulex/benet/be.h index f4825db5d179..4ccaf9af6fc9 100644 --- a/drivers/net/ethernet/emulex/benet/be.h +++ b/drivers/net/ethernet/emulex/benet/be.h | |||
@@ -104,6 +104,7 @@ static inline char *nic_name(struct pci_dev *pdev) | |||
104 | #define BE3_MAX_RSS_QS 16 | 104 | #define BE3_MAX_RSS_QS 16 |
105 | #define BE3_MAX_TX_QS 16 | 105 | #define BE3_MAX_TX_QS 16 |
106 | #define BE3_MAX_EVT_QS 16 | 106 | #define BE3_MAX_EVT_QS 16 |
107 | #define BE3_SRIOV_MAX_EVT_QS 8 | ||
107 | 108 | ||
108 | #define MAX_RX_QS 32 | 109 | #define MAX_RX_QS 32 |
109 | #define MAX_EVT_QS 32 | 110 | #define MAX_EVT_QS 32 |
@@ -480,7 +481,7 @@ struct be_adapter { | |||
480 | struct list_head entry; | 481 | struct list_head entry; |
481 | 482 | ||
482 | u32 flash_status; | 483 | u32 flash_status; |
483 | struct completion flash_compl; | 484 | struct completion et_cmd_compl; |
484 | 485 | ||
485 | struct be_resources res; /* resources available for the func */ | 486 | struct be_resources res; /* resources available for the func */ |
486 | u16 num_vfs; /* Number of VFs provisioned by PF */ | 487 | u16 num_vfs; /* Number of VFs provisioned by PF */ |
@@ -503,6 +504,7 @@ struct be_adapter { | |||
503 | }; | 504 | }; |
504 | 505 | ||
505 | #define be_physfn(adapter) (!adapter->virtfn) | 506 | #define be_physfn(adapter) (!adapter->virtfn) |
507 | #define be_virtfn(adapter) (adapter->virtfn) | ||
506 | #define sriov_enabled(adapter) (adapter->num_vfs > 0) | 508 | #define sriov_enabled(adapter) (adapter->num_vfs > 0) |
507 | #define sriov_want(adapter) (be_physfn(adapter) && \ | 509 | #define sriov_want(adapter) (be_physfn(adapter) && \ |
508 | (num_vfs || pci_num_vf(adapter->pdev))) | 510 | (num_vfs || pci_num_vf(adapter->pdev))) |
diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.c b/drivers/net/ethernet/emulex/benet/be_cmds.c index dbcd5262c016..94c35c8d799d 100644 --- a/drivers/net/ethernet/emulex/benet/be_cmds.c +++ b/drivers/net/ethernet/emulex/benet/be_cmds.c | |||
@@ -141,11 +141,17 @@ static int be_mcc_compl_process(struct be_adapter *adapter, | |||
141 | subsystem = resp_hdr->subsystem; | 141 | subsystem = resp_hdr->subsystem; |
142 | } | 142 | } |
143 | 143 | ||
144 | if (opcode == OPCODE_LOWLEVEL_LOOPBACK_TEST && | ||
145 | subsystem == CMD_SUBSYSTEM_LOWLEVEL) { | ||
146 | complete(&adapter->et_cmd_compl); | ||
147 | return 0; | ||
148 | } | ||
149 | |||
144 | if (((opcode == OPCODE_COMMON_WRITE_FLASHROM) || | 150 | if (((opcode == OPCODE_COMMON_WRITE_FLASHROM) || |
145 | (opcode == OPCODE_COMMON_WRITE_OBJECT)) && | 151 | (opcode == OPCODE_COMMON_WRITE_OBJECT)) && |
146 | (subsystem == CMD_SUBSYSTEM_COMMON)) { | 152 | (subsystem == CMD_SUBSYSTEM_COMMON)) { |
147 | adapter->flash_status = compl_status; | 153 | adapter->flash_status = compl_status; |
148 | complete(&adapter->flash_compl); | 154 | complete(&adapter->et_cmd_compl); |
149 | } | 155 | } |
150 | 156 | ||
151 | if (compl_status == MCC_STATUS_SUCCESS) { | 157 | if (compl_status == MCC_STATUS_SUCCESS) { |
@@ -1032,6 +1038,13 @@ int be_cmd_cq_create(struct be_adapter *adapter, struct be_queue_info *cq, | |||
1032 | } else { | 1038 | } else { |
1033 | req->hdr.version = 2; | 1039 | req->hdr.version = 2; |
1034 | req->page_size = 1; /* 1 for 4K */ | 1040 | req->page_size = 1; /* 1 for 4K */ |
1041 | |||
1042 | /* coalesce-wm field in this cmd is not relevant to Lancer. | ||
1043 | * Lancer uses COMMON_MODIFY_CQ to set this field | ||
1044 | */ | ||
1045 | if (!lancer_chip(adapter)) | ||
1046 | AMAP_SET_BITS(struct amap_cq_context_v2, coalescwm, | ||
1047 | ctxt, coalesce_wm); | ||
1035 | AMAP_SET_BITS(struct amap_cq_context_v2, nodelay, ctxt, | 1048 | AMAP_SET_BITS(struct amap_cq_context_v2, nodelay, ctxt, |
1036 | no_delay); | 1049 | no_delay); |
1037 | AMAP_SET_BITS(struct amap_cq_context_v2, count, ctxt, | 1050 | AMAP_SET_BITS(struct amap_cq_context_v2, count, ctxt, |
@@ -2010,6 +2023,9 @@ int be_cmd_rss_config(struct be_adapter *adapter, u8 *rsstable, | |||
2010 | 0x3ea83c02, 0x4a110304}; | 2023 | 0x3ea83c02, 0x4a110304}; |
2011 | int status; | 2024 | int status; |
2012 | 2025 | ||
2026 | if (!(be_if_cap_flags(adapter) & BE_IF_FLAGS_RSS)) | ||
2027 | return 0; | ||
2028 | |||
2013 | if (mutex_lock_interruptible(&adapter->mbox_lock)) | 2029 | if (mutex_lock_interruptible(&adapter->mbox_lock)) |
2014 | return -1; | 2030 | return -1; |
2015 | 2031 | ||
@@ -2153,7 +2169,7 @@ int lancer_cmd_write_object(struct be_adapter *adapter, struct be_dma_mem *cmd, | |||
2153 | be_mcc_notify(adapter); | 2169 | be_mcc_notify(adapter); |
2154 | spin_unlock_bh(&adapter->mcc_lock); | 2170 | spin_unlock_bh(&adapter->mcc_lock); |
2155 | 2171 | ||
2156 | if (!wait_for_completion_timeout(&adapter->flash_compl, | 2172 | if (!wait_for_completion_timeout(&adapter->et_cmd_compl, |
2157 | msecs_to_jiffies(60000))) | 2173 | msecs_to_jiffies(60000))) |
2158 | status = -1; | 2174 | status = -1; |
2159 | else | 2175 | else |
@@ -2248,8 +2264,8 @@ int be_cmd_write_flashrom(struct be_adapter *adapter, struct be_dma_mem *cmd, | |||
2248 | be_mcc_notify(adapter); | 2264 | be_mcc_notify(adapter); |
2249 | spin_unlock_bh(&adapter->mcc_lock); | 2265 | spin_unlock_bh(&adapter->mcc_lock); |
2250 | 2266 | ||
2251 | if (!wait_for_completion_timeout(&adapter->flash_compl, | 2267 | if (!wait_for_completion_timeout(&adapter->et_cmd_compl, |
2252 | msecs_to_jiffies(40000))) | 2268 | msecs_to_jiffies(40000))) |
2253 | status = -1; | 2269 | status = -1; |
2254 | else | 2270 | else |
2255 | status = adapter->flash_status; | 2271 | status = adapter->flash_status; |
@@ -2360,6 +2376,7 @@ int be_cmd_loopback_test(struct be_adapter *adapter, u32 port_num, | |||
2360 | { | 2376 | { |
2361 | struct be_mcc_wrb *wrb; | 2377 | struct be_mcc_wrb *wrb; |
2362 | struct be_cmd_req_loopback_test *req; | 2378 | struct be_cmd_req_loopback_test *req; |
2379 | struct be_cmd_resp_loopback_test *resp; | ||
2363 | int status; | 2380 | int status; |
2364 | 2381 | ||
2365 | spin_lock_bh(&adapter->mcc_lock); | 2382 | spin_lock_bh(&adapter->mcc_lock); |
@@ -2374,8 +2391,8 @@ int be_cmd_loopback_test(struct be_adapter *adapter, u32 port_num, | |||
2374 | 2391 | ||
2375 | be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_LOWLEVEL, | 2392 | be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_LOWLEVEL, |
2376 | OPCODE_LOWLEVEL_LOOPBACK_TEST, sizeof(*req), wrb, NULL); | 2393 | OPCODE_LOWLEVEL_LOOPBACK_TEST, sizeof(*req), wrb, NULL); |
2377 | req->hdr.timeout = cpu_to_le32(4); | ||
2378 | 2394 | ||
2395 | req->hdr.timeout = cpu_to_le32(15); | ||
2379 | req->pattern = cpu_to_le64(pattern); | 2396 | req->pattern = cpu_to_le64(pattern); |
2380 | req->src_port = cpu_to_le32(port_num); | 2397 | req->src_port = cpu_to_le32(port_num); |
2381 | req->dest_port = cpu_to_le32(port_num); | 2398 | req->dest_port = cpu_to_le32(port_num); |
@@ -2383,12 +2400,15 @@ int be_cmd_loopback_test(struct be_adapter *adapter, u32 port_num, | |||
2383 | req->num_pkts = cpu_to_le32(num_pkts); | 2400 | req->num_pkts = cpu_to_le32(num_pkts); |
2384 | req->loopback_type = cpu_to_le32(loopback_type); | 2401 | req->loopback_type = cpu_to_le32(loopback_type); |
2385 | 2402 | ||
2386 | status = be_mcc_notify_wait(adapter); | 2403 | be_mcc_notify(adapter); |
2387 | if (!status) { | 2404 | |
2388 | struct be_cmd_resp_loopback_test *resp = embedded_payload(wrb); | 2405 | spin_unlock_bh(&adapter->mcc_lock); |
2389 | status = le32_to_cpu(resp->status); | 2406 | |
2390 | } | 2407 | wait_for_completion(&adapter->et_cmd_compl); |
2408 | resp = embedded_payload(wrb); | ||
2409 | status = le32_to_cpu(resp->status); | ||
2391 | 2410 | ||
2411 | return status; | ||
2392 | err: | 2412 | err: |
2393 | spin_unlock_bh(&adapter->mcc_lock); | 2413 | spin_unlock_bh(&adapter->mcc_lock); |
2394 | return status; | 2414 | return status; |
diff --git a/drivers/net/ethernet/emulex/benet/be_hw.h b/drivers/net/ethernet/emulex/benet/be_hw.h index 3e2162121601..dc88782185f2 100644 --- a/drivers/net/ethernet/emulex/benet/be_hw.h +++ b/drivers/net/ethernet/emulex/benet/be_hw.h | |||
@@ -64,6 +64,9 @@ | |||
64 | #define SLIPORT_ERROR_NO_RESOURCE1 0x2 | 64 | #define SLIPORT_ERROR_NO_RESOURCE1 0x2 |
65 | #define SLIPORT_ERROR_NO_RESOURCE2 0x9 | 65 | #define SLIPORT_ERROR_NO_RESOURCE2 0x9 |
66 | 66 | ||
67 | #define SLIPORT_ERROR_FW_RESET1 0x2 | ||
68 | #define SLIPORT_ERROR_FW_RESET2 0x0 | ||
69 | |||
67 | /********* Memory BAR register ************/ | 70 | /********* Memory BAR register ************/ |
68 | #define PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET 0xfc | 71 | #define PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET 0xfc |
69 | /* Host Interrupt Enable, if set interrupts are enabled although "PCI Interrupt | 72 | /* Host Interrupt Enable, if set interrupts are enabled although "PCI Interrupt |
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c index abde97471636..bf40fdaecfa3 100644 --- a/drivers/net/ethernet/emulex/benet/be_main.c +++ b/drivers/net/ethernet/emulex/benet/be_main.c | |||
@@ -2464,8 +2464,16 @@ void be_detect_error(struct be_adapter *adapter) | |||
2464 | */ | 2464 | */ |
2465 | if (sliport_status & SLIPORT_STATUS_ERR_MASK) { | 2465 | if (sliport_status & SLIPORT_STATUS_ERR_MASK) { |
2466 | adapter->hw_error = true; | 2466 | adapter->hw_error = true; |
2467 | dev_err(&adapter->pdev->dev, | 2467 | /* Do not log error messages if its a FW reset */ |
2468 | "Error detected in the card\n"); | 2468 | if (sliport_err1 == SLIPORT_ERROR_FW_RESET1 && |
2469 | sliport_err2 == SLIPORT_ERROR_FW_RESET2) { | ||
2470 | dev_info(&adapter->pdev->dev, | ||
2471 | "Firmware update in progress\n"); | ||
2472 | return; | ||
2473 | } else { | ||
2474 | dev_err(&adapter->pdev->dev, | ||
2475 | "Error detected in the card\n"); | ||
2476 | } | ||
2469 | } | 2477 | } |
2470 | 2478 | ||
2471 | if (sliport_status & SLIPORT_STATUS_ERR_MASK) { | 2479 | if (sliport_status & SLIPORT_STATUS_ERR_MASK) { |
@@ -2658,8 +2666,8 @@ static int be_close(struct net_device *netdev) | |||
2658 | 2666 | ||
2659 | be_roce_dev_close(adapter); | 2667 | be_roce_dev_close(adapter); |
2660 | 2668 | ||
2661 | for_all_evt_queues(adapter, eqo, i) { | 2669 | if (adapter->flags & BE_FLAGS_NAPI_ENABLED) { |
2662 | if (adapter->flags & BE_FLAGS_NAPI_ENABLED) { | 2670 | for_all_evt_queues(adapter, eqo, i) { |
2663 | napi_disable(&eqo->napi); | 2671 | napi_disable(&eqo->napi); |
2664 | be_disable_busy_poll(eqo); | 2672 | be_disable_busy_poll(eqo); |
2665 | } | 2673 | } |
@@ -2736,13 +2744,16 @@ static int be_rx_qs_create(struct be_adapter *adapter) | |||
2736 | if (!BEx_chip(adapter)) | 2744 | if (!BEx_chip(adapter)) |
2737 | adapter->rss_flags |= RSS_ENABLE_UDP_IPV4 | | 2745 | adapter->rss_flags |= RSS_ENABLE_UDP_IPV4 | |
2738 | RSS_ENABLE_UDP_IPV6; | 2746 | RSS_ENABLE_UDP_IPV6; |
2747 | } else { | ||
2748 | /* Disable RSS, if only default RX Q is created */ | ||
2749 | adapter->rss_flags = RSS_ENABLE_NONE; | ||
2750 | } | ||
2739 | 2751 | ||
2740 | rc = be_cmd_rss_config(adapter, rsstable, adapter->rss_flags, | 2752 | rc = be_cmd_rss_config(adapter, rsstable, adapter->rss_flags, |
2741 | 128); | 2753 | 128); |
2742 | if (rc) { | 2754 | if (rc) { |
2743 | adapter->rss_flags = 0; | 2755 | adapter->rss_flags = RSS_ENABLE_NONE; |
2744 | return rc; | 2756 | return rc; |
2745 | } | ||
2746 | } | 2757 | } |
2747 | 2758 | ||
2748 | /* First time posting */ | 2759 | /* First time posting */ |
@@ -2932,28 +2943,35 @@ static void be_cancel_worker(struct be_adapter *adapter) | |||
2932 | } | 2943 | } |
2933 | } | 2944 | } |
2934 | 2945 | ||
2935 | static int be_clear(struct be_adapter *adapter) | 2946 | static void be_mac_clear(struct be_adapter *adapter) |
2936 | { | 2947 | { |
2937 | int i; | 2948 | int i; |
2938 | 2949 | ||
2950 | if (adapter->pmac_id) { | ||
2951 | for (i = 0; i < (adapter->uc_macs + 1); i++) | ||
2952 | be_cmd_pmac_del(adapter, adapter->if_handle, | ||
2953 | adapter->pmac_id[i], 0); | ||
2954 | adapter->uc_macs = 0; | ||
2955 | |||
2956 | kfree(adapter->pmac_id); | ||
2957 | adapter->pmac_id = NULL; | ||
2958 | } | ||
2959 | } | ||
2960 | |||
2961 | static int be_clear(struct be_adapter *adapter) | ||
2962 | { | ||
2939 | be_cancel_worker(adapter); | 2963 | be_cancel_worker(adapter); |
2940 | 2964 | ||
2941 | if (sriov_enabled(adapter)) | 2965 | if (sriov_enabled(adapter)) |
2942 | be_vf_clear(adapter); | 2966 | be_vf_clear(adapter); |
2943 | 2967 | ||
2944 | /* delete the primary mac along with the uc-mac list */ | 2968 | /* delete the primary mac along with the uc-mac list */ |
2945 | for (i = 0; i < (adapter->uc_macs + 1); i++) | 2969 | be_mac_clear(adapter); |
2946 | be_cmd_pmac_del(adapter, adapter->if_handle, | ||
2947 | adapter->pmac_id[i], 0); | ||
2948 | adapter->uc_macs = 0; | ||
2949 | 2970 | ||
2950 | be_cmd_if_destroy(adapter, adapter->if_handle, 0); | 2971 | be_cmd_if_destroy(adapter, adapter->if_handle, 0); |
2951 | 2972 | ||
2952 | be_clear_queues(adapter); | 2973 | be_clear_queues(adapter); |
2953 | 2974 | ||
2954 | kfree(adapter->pmac_id); | ||
2955 | adapter->pmac_id = NULL; | ||
2956 | |||
2957 | be_msix_disable(adapter); | 2975 | be_msix_disable(adapter); |
2958 | return 0; | 2976 | return 0; |
2959 | } | 2977 | } |
@@ -3109,11 +3127,11 @@ static void BEx_get_resources(struct be_adapter *adapter, | |||
3109 | { | 3127 | { |
3110 | struct pci_dev *pdev = adapter->pdev; | 3128 | struct pci_dev *pdev = adapter->pdev; |
3111 | bool use_sriov = false; | 3129 | bool use_sriov = false; |
3130 | int max_vfs; | ||
3112 | 3131 | ||
3113 | if (BE3_chip(adapter) && sriov_want(adapter)) { | 3132 | max_vfs = pci_sriov_get_totalvfs(pdev); |
3114 | int max_vfs; | ||
3115 | 3133 | ||
3116 | max_vfs = pci_sriov_get_totalvfs(pdev); | 3134 | if (BE3_chip(adapter) && sriov_want(adapter)) { |
3117 | res->max_vfs = max_vfs > 0 ? min(MAX_VFS, max_vfs) : 0; | 3135 | res->max_vfs = max_vfs > 0 ? min(MAX_VFS, max_vfs) : 0; |
3118 | use_sriov = res->max_vfs; | 3136 | use_sriov = res->max_vfs; |
3119 | } | 3137 | } |
@@ -3144,7 +3162,11 @@ static void BEx_get_resources(struct be_adapter *adapter, | |||
3144 | BE3_MAX_RSS_QS : BE2_MAX_RSS_QS; | 3162 | BE3_MAX_RSS_QS : BE2_MAX_RSS_QS; |
3145 | res->max_rx_qs = res->max_rss_qs + 1; | 3163 | res->max_rx_qs = res->max_rss_qs + 1; |
3146 | 3164 | ||
3147 | res->max_evt_qs = be_physfn(adapter) ? BE3_MAX_EVT_QS : 1; | 3165 | if (be_physfn(adapter)) |
3166 | res->max_evt_qs = (max_vfs > 0) ? | ||
3167 | BE3_SRIOV_MAX_EVT_QS : BE3_MAX_EVT_QS; | ||
3168 | else | ||
3169 | res->max_evt_qs = 1; | ||
3148 | 3170 | ||
3149 | res->if_cap_flags = BE_IF_CAP_FLAGS_WANT; | 3171 | res->if_cap_flags = BE_IF_CAP_FLAGS_WANT; |
3150 | if (!(adapter->function_caps & BE_FUNCTION_CAPS_RSS)) | 3172 | if (!(adapter->function_caps & BE_FUNCTION_CAPS_RSS)) |
@@ -3253,12 +3275,10 @@ static int be_mac_setup(struct be_adapter *adapter) | |||
3253 | memcpy(mac, adapter->netdev->dev_addr, ETH_ALEN); | 3275 | memcpy(mac, adapter->netdev->dev_addr, ETH_ALEN); |
3254 | } | 3276 | } |
3255 | 3277 | ||
3256 | /* On BE3 VFs this cmd may fail due to lack of privilege. | 3278 | /* For BE3-R VFs, the PF programs the initial MAC address */ |
3257 | * Ignore the failure as in this case pmac_id is fetched | 3279 | if (!(BEx_chip(adapter) && be_virtfn(adapter))) |
3258 | * in the IFACE_CREATE cmd. | 3280 | be_cmd_pmac_add(adapter, mac, adapter->if_handle, |
3259 | */ | 3281 | &adapter->pmac_id[0], 0); |
3260 | be_cmd_pmac_add(adapter, mac, adapter->if_handle, | ||
3261 | &adapter->pmac_id[0], 0); | ||
3262 | return 0; | 3282 | return 0; |
3263 | } | 3283 | } |
3264 | 3284 | ||
@@ -3814,6 +3834,8 @@ static int lancer_fw_download(struct be_adapter *adapter, | |||
3814 | } | 3834 | } |
3815 | 3835 | ||
3816 | if (change_status == LANCER_FW_RESET_NEEDED) { | 3836 | if (change_status == LANCER_FW_RESET_NEEDED) { |
3837 | dev_info(&adapter->pdev->dev, | ||
3838 | "Resetting adapter to activate new FW\n"); | ||
3817 | status = lancer_physdev_ctrl(adapter, | 3839 | status = lancer_physdev_ctrl(adapter, |
3818 | PHYSDEV_CONTROL_FW_RESET_MASK); | 3840 | PHYSDEV_CONTROL_FW_RESET_MASK); |
3819 | if (status) { | 3841 | if (status) { |
@@ -4190,7 +4212,7 @@ static int be_ctrl_init(struct be_adapter *adapter) | |||
4190 | spin_lock_init(&adapter->mcc_lock); | 4212 | spin_lock_init(&adapter->mcc_lock); |
4191 | spin_lock_init(&adapter->mcc_cq_lock); | 4213 | spin_lock_init(&adapter->mcc_cq_lock); |
4192 | 4214 | ||
4193 | init_completion(&adapter->flash_compl); | 4215 | init_completion(&adapter->et_cmd_compl); |
4194 | pci_save_state(adapter->pdev); | 4216 | pci_save_state(adapter->pdev); |
4195 | return 0; | 4217 | return 0; |
4196 | 4218 | ||
@@ -4365,13 +4387,13 @@ static int lancer_recover_func(struct be_adapter *adapter) | |||
4365 | goto err; | 4387 | goto err; |
4366 | } | 4388 | } |
4367 | 4389 | ||
4368 | dev_err(dev, "Error recovery successful\n"); | 4390 | dev_err(dev, "Adapter recovery successful\n"); |
4369 | return 0; | 4391 | return 0; |
4370 | err: | 4392 | err: |
4371 | if (status == -EAGAIN) | 4393 | if (status == -EAGAIN) |
4372 | dev_err(dev, "Waiting for resource provisioning\n"); | 4394 | dev_err(dev, "Waiting for resource provisioning\n"); |
4373 | else | 4395 | else |
4374 | dev_err(dev, "Error recovery failed\n"); | 4396 | dev_err(dev, "Adapter recovery failed\n"); |
4375 | 4397 | ||
4376 | return status; | 4398 | return status; |
4377 | } | 4399 | } |
@@ -4599,6 +4621,7 @@ static int be_suspend(struct pci_dev *pdev, pm_message_t state) | |||
4599 | if (adapter->wol) | 4621 | if (adapter->wol) |
4600 | be_setup_wol(adapter, true); | 4622 | be_setup_wol(adapter, true); |
4601 | 4623 | ||
4624 | be_intr_set(adapter, false); | ||
4602 | cancel_delayed_work_sync(&adapter->func_recovery_work); | 4625 | cancel_delayed_work_sync(&adapter->func_recovery_work); |
4603 | 4626 | ||
4604 | netif_device_detach(netdev); | 4627 | netif_device_detach(netdev); |
@@ -4634,6 +4657,7 @@ static int be_resume(struct pci_dev *pdev) | |||
4634 | if (status) | 4657 | if (status) |
4635 | return status; | 4658 | return status; |
4636 | 4659 | ||
4660 | be_intr_set(adapter, true); | ||
4637 | /* tell fw we're ready to fire cmds */ | 4661 | /* tell fw we're ready to fire cmds */ |
4638 | status = be_cmd_fw_init(adapter); | 4662 | status = be_cmd_fw_init(adapter); |
4639 | if (status) | 4663 | if (status) |
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c index 4cbebf3d80eb..50bb71c663e2 100644 --- a/drivers/net/ethernet/freescale/fec_main.c +++ b/drivers/net/ethernet/freescale/fec_main.c | |||
@@ -98,10 +98,6 @@ static void set_multicast_list(struct net_device *ndev); | |||
98 | * detected as not set during a prior frame transmission, then the | 98 | * detected as not set during a prior frame transmission, then the |
99 | * ENET_TDAR[TDAR] bit is cleared at a later time, even if additional TxBDs | 99 | * ENET_TDAR[TDAR] bit is cleared at a later time, even if additional TxBDs |
100 | * were added to the ring and the ENET_TDAR[TDAR] bit is set. This results in | 100 | * were added to the ring and the ENET_TDAR[TDAR] bit is set. This results in |
101 | * If the ready bit in the transmit buffer descriptor (TxBD[R]) is previously | ||
102 | * detected as not set during a prior frame transmission, then the | ||
103 | * ENET_TDAR[TDAR] bit is cleared at a later time, even if additional TxBDs | ||
104 | * were added to the ring and the ENET_TDAR[TDAR] bit is set. This results in | ||
105 | * frames not being transmitted until there is a 0-to-1 transition on | 101 | * frames not being transmitted until there is a 0-to-1 transition on |
106 | * ENET_TDAR[TDAR]. | 102 | * ENET_TDAR[TDAR]. |
107 | */ | 103 | */ |
@@ -385,7 +381,7 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *ndev) | |||
385 | * data. | 381 | * data. |
386 | */ | 382 | */ |
387 | bdp->cbd_bufaddr = dma_map_single(&fep->pdev->dev, bufaddr, | 383 | bdp->cbd_bufaddr = dma_map_single(&fep->pdev->dev, bufaddr, |
388 | FEC_ENET_TX_FRSIZE, DMA_TO_DEVICE); | 384 | skb->len, DMA_TO_DEVICE); |
389 | if (dma_mapping_error(&fep->pdev->dev, bdp->cbd_bufaddr)) { | 385 | if (dma_mapping_error(&fep->pdev->dev, bdp->cbd_bufaddr)) { |
390 | bdp->cbd_bufaddr = 0; | 386 | bdp->cbd_bufaddr = 0; |
391 | fep->tx_skbuff[index] = NULL; | 387 | fep->tx_skbuff[index] = NULL; |
@@ -432,6 +428,8 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *ndev) | |||
432 | /* If this was the last BD in the ring, start at the beginning again. */ | 428 | /* If this was the last BD in the ring, start at the beginning again. */ |
433 | bdp = fec_enet_get_nextdesc(bdp, fep); | 429 | bdp = fec_enet_get_nextdesc(bdp, fep); |
434 | 430 | ||
431 | skb_tx_timestamp(skb); | ||
432 | |||
435 | fep->cur_tx = bdp; | 433 | fep->cur_tx = bdp; |
436 | 434 | ||
437 | if (fep->cur_tx == fep->dirty_tx) | 435 | if (fep->cur_tx == fep->dirty_tx) |
@@ -440,8 +438,6 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *ndev) | |||
440 | /* Trigger transmission start */ | 438 | /* Trigger transmission start */ |
441 | writel(0, fep->hwp + FEC_X_DES_ACTIVE); | 439 | writel(0, fep->hwp + FEC_X_DES_ACTIVE); |
442 | 440 | ||
443 | skb_tx_timestamp(skb); | ||
444 | |||
445 | return NETDEV_TX_OK; | 441 | return NETDEV_TX_OK; |
446 | } | 442 | } |
447 | 443 | ||
@@ -779,11 +775,10 @@ fec_enet_tx(struct net_device *ndev) | |||
779 | else | 775 | else |
780 | index = bdp - fep->tx_bd_base; | 776 | index = bdp - fep->tx_bd_base; |
781 | 777 | ||
782 | dma_unmap_single(&fep->pdev->dev, bdp->cbd_bufaddr, | ||
783 | FEC_ENET_TX_FRSIZE, DMA_TO_DEVICE); | ||
784 | bdp->cbd_bufaddr = 0; | ||
785 | |||
786 | skb = fep->tx_skbuff[index]; | 778 | skb = fep->tx_skbuff[index]; |
779 | dma_unmap_single(&fep->pdev->dev, bdp->cbd_bufaddr, skb->len, | ||
780 | DMA_TO_DEVICE); | ||
781 | bdp->cbd_bufaddr = 0; | ||
787 | 782 | ||
788 | /* Check for errors. */ | 783 | /* Check for errors. */ |
789 | if (status & (BD_ENET_TX_HB | BD_ENET_TX_LC | | 784 | if (status & (BD_ENET_TX_HB | BD_ENET_TX_LC | |
diff --git a/drivers/net/ethernet/ibm/ehea/ehea_main.c b/drivers/net/ethernet/ibm/ehea/ehea_main.c index 2d1c6bdd3618..7628e0fd8455 100644 --- a/drivers/net/ethernet/ibm/ehea/ehea_main.c +++ b/drivers/net/ethernet/ibm/ehea/ehea_main.c | |||
@@ -3033,7 +3033,7 @@ static struct ehea_port *ehea_setup_single_port(struct ehea_adapter *adapter, | |||
3033 | 3033 | ||
3034 | dev->hw_features = NETIF_F_SG | NETIF_F_TSO | | 3034 | dev->hw_features = NETIF_F_SG | NETIF_F_TSO | |
3035 | NETIF_F_IP_CSUM | NETIF_F_HW_VLAN_CTAG_TX; | 3035 | NETIF_F_IP_CSUM | NETIF_F_HW_VLAN_CTAG_TX; |
3036 | dev->features = NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_TSO | | 3036 | dev->features = NETIF_F_SG | NETIF_F_TSO | |
3037 | NETIF_F_HIGHDMA | NETIF_F_IP_CSUM | | 3037 | NETIF_F_HIGHDMA | NETIF_F_IP_CSUM | |
3038 | NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX | | 3038 | NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX | |
3039 | NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_RXCSUM; | 3039 | NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_RXCSUM; |
diff --git a/drivers/net/ethernet/intel/e1000/e1000.h b/drivers/net/ethernet/intel/e1000/e1000.h index 58c147271a36..f9313b36c887 100644 --- a/drivers/net/ethernet/intel/e1000/e1000.h +++ b/drivers/net/ethernet/intel/e1000/e1000.h | |||
@@ -83,6 +83,11 @@ struct e1000_adapter; | |||
83 | 83 | ||
84 | #define E1000_MAX_INTR 10 | 84 | #define E1000_MAX_INTR 10 |
85 | 85 | ||
86 | /* | ||
87 | * Count for polling __E1000_RESET condition every 10-20msec. | ||
88 | */ | ||
89 | #define E1000_CHECK_RESET_COUNT 50 | ||
90 | |||
86 | /* TX/RX descriptor defines */ | 91 | /* TX/RX descriptor defines */ |
87 | #define E1000_DEFAULT_TXD 256 | 92 | #define E1000_DEFAULT_TXD 256 |
88 | #define E1000_MAX_TXD 256 | 93 | #define E1000_MAX_TXD 256 |
@@ -312,8 +317,6 @@ struct e1000_adapter { | |||
312 | struct delayed_work watchdog_task; | 317 | struct delayed_work watchdog_task; |
313 | struct delayed_work fifo_stall_task; | 318 | struct delayed_work fifo_stall_task; |
314 | struct delayed_work phy_info_task; | 319 | struct delayed_work phy_info_task; |
315 | |||
316 | struct mutex mutex; | ||
317 | }; | 320 | }; |
318 | 321 | ||
319 | enum e1000_state_t { | 322 | enum e1000_state_t { |
diff --git a/drivers/net/ethernet/intel/e1000/e1000_main.c b/drivers/net/ethernet/intel/e1000/e1000_main.c index e38622825fa7..46e6544ed1b7 100644 --- a/drivers/net/ethernet/intel/e1000/e1000_main.c +++ b/drivers/net/ethernet/intel/e1000/e1000_main.c | |||
@@ -494,13 +494,20 @@ static void e1000_down_and_stop(struct e1000_adapter *adapter) | |||
494 | { | 494 | { |
495 | set_bit(__E1000_DOWN, &adapter->flags); | 495 | set_bit(__E1000_DOWN, &adapter->flags); |
496 | 496 | ||
497 | /* Only kill reset task if adapter is not resetting */ | ||
498 | if (!test_bit(__E1000_RESETTING, &adapter->flags)) | ||
499 | cancel_work_sync(&adapter->reset_task); | ||
500 | |||
501 | cancel_delayed_work_sync(&adapter->watchdog_task); | 497 | cancel_delayed_work_sync(&adapter->watchdog_task); |
498 | |||
499 | /* | ||
500 | * Since the watchdog task can reschedule other tasks, we should cancel | ||
501 | * it first, otherwise we can run into the situation when a work is | ||
502 | * still running after the adapter has been turned down. | ||
503 | */ | ||
504 | |||
502 | cancel_delayed_work_sync(&adapter->phy_info_task); | 505 | cancel_delayed_work_sync(&adapter->phy_info_task); |
503 | cancel_delayed_work_sync(&adapter->fifo_stall_task); | 506 | cancel_delayed_work_sync(&adapter->fifo_stall_task); |
507 | |||
508 | /* Only kill reset task if adapter is not resetting */ | ||
509 | if (!test_bit(__E1000_RESETTING, &adapter->flags)) | ||
510 | cancel_work_sync(&adapter->reset_task); | ||
504 | } | 511 | } |
505 | 512 | ||
506 | void e1000_down(struct e1000_adapter *adapter) | 513 | void e1000_down(struct e1000_adapter *adapter) |
@@ -544,21 +551,8 @@ void e1000_down(struct e1000_adapter *adapter) | |||
544 | e1000_clean_all_rx_rings(adapter); | 551 | e1000_clean_all_rx_rings(adapter); |
545 | } | 552 | } |
546 | 553 | ||
547 | static void e1000_reinit_safe(struct e1000_adapter *adapter) | ||
548 | { | ||
549 | while (test_and_set_bit(__E1000_RESETTING, &adapter->flags)) | ||
550 | msleep(1); | ||
551 | mutex_lock(&adapter->mutex); | ||
552 | e1000_down(adapter); | ||
553 | e1000_up(adapter); | ||
554 | mutex_unlock(&adapter->mutex); | ||
555 | clear_bit(__E1000_RESETTING, &adapter->flags); | ||
556 | } | ||
557 | |||
558 | void e1000_reinit_locked(struct e1000_adapter *adapter) | 554 | void e1000_reinit_locked(struct e1000_adapter *adapter) |
559 | { | 555 | { |
560 | /* if rtnl_lock is not held the call path is bogus */ | ||
561 | ASSERT_RTNL(); | ||
562 | WARN_ON(in_interrupt()); | 556 | WARN_ON(in_interrupt()); |
563 | while (test_and_set_bit(__E1000_RESETTING, &adapter->flags)) | 557 | while (test_and_set_bit(__E1000_RESETTING, &adapter->flags)) |
564 | msleep(1); | 558 | msleep(1); |
@@ -1316,7 +1310,6 @@ static int e1000_sw_init(struct e1000_adapter *adapter) | |||
1316 | e1000_irq_disable(adapter); | 1310 | e1000_irq_disable(adapter); |
1317 | 1311 | ||
1318 | spin_lock_init(&adapter->stats_lock); | 1312 | spin_lock_init(&adapter->stats_lock); |
1319 | mutex_init(&adapter->mutex); | ||
1320 | 1313 | ||
1321 | set_bit(__E1000_DOWN, &adapter->flags); | 1314 | set_bit(__E1000_DOWN, &adapter->flags); |
1322 | 1315 | ||
@@ -1440,6 +1433,10 @@ static int e1000_close(struct net_device *netdev) | |||
1440 | { | 1433 | { |
1441 | struct e1000_adapter *adapter = netdev_priv(netdev); | 1434 | struct e1000_adapter *adapter = netdev_priv(netdev); |
1442 | struct e1000_hw *hw = &adapter->hw; | 1435 | struct e1000_hw *hw = &adapter->hw; |
1436 | int count = E1000_CHECK_RESET_COUNT; | ||
1437 | |||
1438 | while (test_bit(__E1000_RESETTING, &adapter->flags) && count--) | ||
1439 | usleep_range(10000, 20000); | ||
1443 | 1440 | ||
1444 | WARN_ON(test_bit(__E1000_RESETTING, &adapter->flags)); | 1441 | WARN_ON(test_bit(__E1000_RESETTING, &adapter->flags)); |
1445 | e1000_down(adapter); | 1442 | e1000_down(adapter); |
@@ -2325,11 +2322,8 @@ static void e1000_update_phy_info_task(struct work_struct *work) | |||
2325 | struct e1000_adapter *adapter = container_of(work, | 2322 | struct e1000_adapter *adapter = container_of(work, |
2326 | struct e1000_adapter, | 2323 | struct e1000_adapter, |
2327 | phy_info_task.work); | 2324 | phy_info_task.work); |
2328 | if (test_bit(__E1000_DOWN, &adapter->flags)) | 2325 | |
2329 | return; | ||
2330 | mutex_lock(&adapter->mutex); | ||
2331 | e1000_phy_get_info(&adapter->hw, &adapter->phy_info); | 2326 | e1000_phy_get_info(&adapter->hw, &adapter->phy_info); |
2332 | mutex_unlock(&adapter->mutex); | ||
2333 | } | 2327 | } |
2334 | 2328 | ||
2335 | /** | 2329 | /** |
@@ -2345,9 +2339,6 @@ static void e1000_82547_tx_fifo_stall_task(struct work_struct *work) | |||
2345 | struct net_device *netdev = adapter->netdev; | 2339 | struct net_device *netdev = adapter->netdev; |
2346 | u32 tctl; | 2340 | u32 tctl; |
2347 | 2341 | ||
2348 | if (test_bit(__E1000_DOWN, &adapter->flags)) | ||
2349 | return; | ||
2350 | mutex_lock(&adapter->mutex); | ||
2351 | if (atomic_read(&adapter->tx_fifo_stall)) { | 2342 | if (atomic_read(&adapter->tx_fifo_stall)) { |
2352 | if ((er32(TDT) == er32(TDH)) && | 2343 | if ((er32(TDT) == er32(TDH)) && |
2353 | (er32(TDFT) == er32(TDFH)) && | 2344 | (er32(TDFT) == er32(TDFH)) && |
@@ -2368,7 +2359,6 @@ static void e1000_82547_tx_fifo_stall_task(struct work_struct *work) | |||
2368 | schedule_delayed_work(&adapter->fifo_stall_task, 1); | 2359 | schedule_delayed_work(&adapter->fifo_stall_task, 1); |
2369 | } | 2360 | } |
2370 | } | 2361 | } |
2371 | mutex_unlock(&adapter->mutex); | ||
2372 | } | 2362 | } |
2373 | 2363 | ||
2374 | bool e1000_has_link(struct e1000_adapter *adapter) | 2364 | bool e1000_has_link(struct e1000_adapter *adapter) |
@@ -2422,10 +2412,6 @@ static void e1000_watchdog(struct work_struct *work) | |||
2422 | struct e1000_tx_ring *txdr = adapter->tx_ring; | 2412 | struct e1000_tx_ring *txdr = adapter->tx_ring; |
2423 | u32 link, tctl; | 2413 | u32 link, tctl; |
2424 | 2414 | ||
2425 | if (test_bit(__E1000_DOWN, &adapter->flags)) | ||
2426 | return; | ||
2427 | |||
2428 | mutex_lock(&adapter->mutex); | ||
2429 | link = e1000_has_link(adapter); | 2415 | link = e1000_has_link(adapter); |
2430 | if ((netif_carrier_ok(netdev)) && link) | 2416 | if ((netif_carrier_ok(netdev)) && link) |
2431 | goto link_up; | 2417 | goto link_up; |
@@ -2516,7 +2502,7 @@ link_up: | |||
2516 | adapter->tx_timeout_count++; | 2502 | adapter->tx_timeout_count++; |
2517 | schedule_work(&adapter->reset_task); | 2503 | schedule_work(&adapter->reset_task); |
2518 | /* exit immediately since reset is imminent */ | 2504 | /* exit immediately since reset is imminent */ |
2519 | goto unlock; | 2505 | return; |
2520 | } | 2506 | } |
2521 | } | 2507 | } |
2522 | 2508 | ||
@@ -2544,9 +2530,6 @@ link_up: | |||
2544 | /* Reschedule the task */ | 2530 | /* Reschedule the task */ |
2545 | if (!test_bit(__E1000_DOWN, &adapter->flags)) | 2531 | if (!test_bit(__E1000_DOWN, &adapter->flags)) |
2546 | schedule_delayed_work(&adapter->watchdog_task, 2 * HZ); | 2532 | schedule_delayed_work(&adapter->watchdog_task, 2 * HZ); |
2547 | |||
2548 | unlock: | ||
2549 | mutex_unlock(&adapter->mutex); | ||
2550 | } | 2533 | } |
2551 | 2534 | ||
2552 | enum latency_range { | 2535 | enum latency_range { |
@@ -3495,10 +3478,8 @@ static void e1000_reset_task(struct work_struct *work) | |||
3495 | struct e1000_adapter *adapter = | 3478 | struct e1000_adapter *adapter = |
3496 | container_of(work, struct e1000_adapter, reset_task); | 3479 | container_of(work, struct e1000_adapter, reset_task); |
3497 | 3480 | ||
3498 | if (test_bit(__E1000_DOWN, &adapter->flags)) | ||
3499 | return; | ||
3500 | e_err(drv, "Reset adapter\n"); | 3481 | e_err(drv, "Reset adapter\n"); |
3501 | e1000_reinit_safe(adapter); | 3482 | e1000_reinit_locked(adapter); |
3502 | } | 3483 | } |
3503 | 3484 | ||
3504 | /** | 3485 | /** |
@@ -4963,6 +4944,11 @@ static int __e1000_shutdown(struct pci_dev *pdev, bool *enable_wake) | |||
4963 | netif_device_detach(netdev); | 4944 | netif_device_detach(netdev); |
4964 | 4945 | ||
4965 | if (netif_running(netdev)) { | 4946 | if (netif_running(netdev)) { |
4947 | int count = E1000_CHECK_RESET_COUNT; | ||
4948 | |||
4949 | while (test_bit(__E1000_RESETTING, &adapter->flags) && count--) | ||
4950 | usleep_range(10000, 20000); | ||
4951 | |||
4966 | WARN_ON(test_bit(__E1000_RESETTING, &adapter->flags)); | 4952 | WARN_ON(test_bit(__E1000_RESETTING, &adapter->flags)); |
4967 | e1000_down(adapter); | 4953 | e1000_down(adapter); |
4968 | } | 4954 | } |
diff --git a/drivers/net/ethernet/intel/e1000e/80003es2lan.c b/drivers/net/ethernet/intel/e1000e/80003es2lan.c index 895450e9bb3c..ff2d806eaef7 100644 --- a/drivers/net/ethernet/intel/e1000e/80003es2lan.c +++ b/drivers/net/ethernet/intel/e1000e/80003es2lan.c | |||
@@ -718,8 +718,11 @@ static s32 e1000_reset_hw_80003es2lan(struct e1000_hw *hw) | |||
718 | e1000_release_phy_80003es2lan(hw); | 718 | e1000_release_phy_80003es2lan(hw); |
719 | 719 | ||
720 | /* Disable IBIST slave mode (far-end loopback) */ | 720 | /* Disable IBIST slave mode (far-end loopback) */ |
721 | e1000_read_kmrn_reg_80003es2lan(hw, E1000_KMRNCTRLSTA_INBAND_PARAM, | 721 | ret_val = |
722 | &kum_reg_data); | 722 | e1000_read_kmrn_reg_80003es2lan(hw, E1000_KMRNCTRLSTA_INBAND_PARAM, |
723 | &kum_reg_data); | ||
724 | if (ret_val) | ||
725 | return ret_val; | ||
723 | kum_reg_data |= E1000_KMRNCTRLSTA_IBIST_DISABLE; | 726 | kum_reg_data |= E1000_KMRNCTRLSTA_IBIST_DISABLE; |
724 | e1000_write_kmrn_reg_80003es2lan(hw, E1000_KMRNCTRLSTA_INBAND_PARAM, | 727 | e1000_write_kmrn_reg_80003es2lan(hw, E1000_KMRNCTRLSTA_INBAND_PARAM, |
725 | kum_reg_data); | 728 | kum_reg_data); |
diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c index 8d3945ab7334..c30d41d6e426 100644 --- a/drivers/net/ethernet/intel/e1000e/netdev.c +++ b/drivers/net/ethernet/intel/e1000e/netdev.c | |||
@@ -6174,7 +6174,7 @@ static int __e1000_resume(struct pci_dev *pdev) | |||
6174 | return 0; | 6174 | return 0; |
6175 | } | 6175 | } |
6176 | 6176 | ||
6177 | #ifdef CONFIG_PM_SLEEP | 6177 | #ifdef CONFIG_PM |
6178 | static int e1000_suspend(struct device *dev) | 6178 | static int e1000_suspend(struct device *dev) |
6179 | { | 6179 | { |
6180 | struct pci_dev *pdev = to_pci_dev(dev); | 6180 | struct pci_dev *pdev = to_pci_dev(dev); |
@@ -6193,7 +6193,7 @@ static int e1000_resume(struct device *dev) | |||
6193 | 6193 | ||
6194 | return __e1000_resume(pdev); | 6194 | return __e1000_resume(pdev); |
6195 | } | 6195 | } |
6196 | #endif /* CONFIG_PM_SLEEP */ | 6196 | #endif /* CONFIG_PM */ |
6197 | 6197 | ||
6198 | #ifdef CONFIG_PM_RUNTIME | 6198 | #ifdef CONFIG_PM_RUNTIME |
6199 | static int e1000_runtime_suspend(struct device *dev) | 6199 | static int e1000_runtime_suspend(struct device *dev) |
diff --git a/drivers/net/ethernet/intel/e1000e/phy.c b/drivers/net/ethernet/intel/e1000e/phy.c index da2be59505c0..20e71f4ca426 100644 --- a/drivers/net/ethernet/intel/e1000e/phy.c +++ b/drivers/net/ethernet/intel/e1000e/phy.c | |||
@@ -1757,19 +1757,23 @@ s32 e1000e_phy_has_link_generic(struct e1000_hw *hw, u32 iterations, | |||
1757 | * it across the board. | 1757 | * it across the board. |
1758 | */ | 1758 | */ |
1759 | ret_val = e1e_rphy(hw, MII_BMSR, &phy_status); | 1759 | ret_val = e1e_rphy(hw, MII_BMSR, &phy_status); |
1760 | if (ret_val) | 1760 | if (ret_val) { |
1761 | /* If the first read fails, another entity may have | 1761 | /* If the first read fails, another entity may have |
1762 | * ownership of the resources, wait and try again to | 1762 | * ownership of the resources, wait and try again to |
1763 | * see if they have relinquished the resources yet. | 1763 | * see if they have relinquished the resources yet. |
1764 | */ | 1764 | */ |
1765 | udelay(usec_interval); | 1765 | if (usec_interval >= 1000) |
1766 | msleep(usec_interval / 1000); | ||
1767 | else | ||
1768 | udelay(usec_interval); | ||
1769 | } | ||
1766 | ret_val = e1e_rphy(hw, MII_BMSR, &phy_status); | 1770 | ret_val = e1e_rphy(hw, MII_BMSR, &phy_status); |
1767 | if (ret_val) | 1771 | if (ret_val) |
1768 | break; | 1772 | break; |
1769 | if (phy_status & BMSR_LSTATUS) | 1773 | if (phy_status & BMSR_LSTATUS) |
1770 | break; | 1774 | break; |
1771 | if (usec_interval >= 1000) | 1775 | if (usec_interval >= 1000) |
1772 | mdelay(usec_interval / 1000); | 1776 | msleep(usec_interval / 1000); |
1773 | else | 1777 | else |
1774 | udelay(usec_interval); | 1778 | udelay(usec_interval); |
1775 | } | 1779 | } |
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index be15938ba213..12b0932204ba 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c | |||
@@ -354,6 +354,9 @@ static struct rtnl_link_stats64 *i40e_get_netdev_stats_struct( | |||
354 | struct rtnl_link_stats64 *vsi_stats = i40e_get_vsi_stats_struct(vsi); | 354 | struct rtnl_link_stats64 *vsi_stats = i40e_get_vsi_stats_struct(vsi); |
355 | int i; | 355 | int i; |
356 | 356 | ||
357 | if (!vsi->tx_rings) | ||
358 | return stats; | ||
359 | |||
357 | rcu_read_lock(); | 360 | rcu_read_lock(); |
358 | for (i = 0; i < vsi->num_queue_pairs; i++) { | 361 | for (i = 0; i < vsi->num_queue_pairs; i++) { |
359 | struct i40e_ring *tx_ring, *rx_ring; | 362 | struct i40e_ring *tx_ring, *rx_ring; |
diff --git a/drivers/net/ethernet/intel/igb/e1000_phy.c b/drivers/net/ethernet/intel/igb/e1000_phy.c index c4c4fe332c7e..ad2b74d95138 100644 --- a/drivers/net/ethernet/intel/igb/e1000_phy.c +++ b/drivers/net/ethernet/intel/igb/e1000_phy.c | |||
@@ -1728,7 +1728,10 @@ s32 igb_phy_has_link(struct e1000_hw *hw, u32 iterations, | |||
1728 | * ownership of the resources, wait and try again to | 1728 | * ownership of the resources, wait and try again to |
1729 | * see if they have relinquished the resources yet. | 1729 | * see if they have relinquished the resources yet. |
1730 | */ | 1730 | */ |
1731 | udelay(usec_interval); | 1731 | if (usec_interval >= 1000) |
1732 | mdelay(usec_interval/1000); | ||
1733 | else | ||
1734 | udelay(usec_interval); | ||
1732 | } | 1735 | } |
1733 | ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS, &phy_status); | 1736 | ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS, &phy_status); |
1734 | if (ret_val) | 1737 | if (ret_val) |
diff --git a/drivers/net/ethernet/intel/igb/igb_ethtool.c b/drivers/net/ethernet/intel/igb/igb_ethtool.c index b0f3666b1d7f..c3143da497c8 100644 --- a/drivers/net/ethernet/intel/igb/igb_ethtool.c +++ b/drivers/net/ethernet/intel/igb/igb_ethtool.c | |||
@@ -2062,14 +2062,15 @@ static void igb_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) | |||
2062 | { | 2062 | { |
2063 | struct igb_adapter *adapter = netdev_priv(netdev); | 2063 | struct igb_adapter *adapter = netdev_priv(netdev); |
2064 | 2064 | ||
2065 | wol->supported = WAKE_UCAST | WAKE_MCAST | | ||
2066 | WAKE_BCAST | WAKE_MAGIC | | ||
2067 | WAKE_PHY; | ||
2068 | wol->wolopts = 0; | 2065 | wol->wolopts = 0; |
2069 | 2066 | ||
2070 | if (!(adapter->flags & IGB_FLAG_WOL_SUPPORTED)) | 2067 | if (!(adapter->flags & IGB_FLAG_WOL_SUPPORTED)) |
2071 | return; | 2068 | return; |
2072 | 2069 | ||
2070 | wol->supported = WAKE_UCAST | WAKE_MCAST | | ||
2071 | WAKE_BCAST | WAKE_MAGIC | | ||
2072 | WAKE_PHY; | ||
2073 | |||
2073 | /* apply any specific unsupported masks here */ | 2074 | /* apply any specific unsupported masks here */ |
2074 | switch (adapter->hw.device_id) { | 2075 | switch (adapter->hw.device_id) { |
2075 | default: | 2076 | default: |
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index 0c55079ebee3..5bcc870f8367 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c | |||
@@ -4251,8 +4251,8 @@ static void ixgbe_disable_fwd_ring(struct ixgbe_fwd_adapter *vadapter, | |||
4251 | rx_ring->l2_accel_priv = NULL; | 4251 | rx_ring->l2_accel_priv = NULL; |
4252 | } | 4252 | } |
4253 | 4253 | ||
4254 | int ixgbe_fwd_ring_down(struct net_device *vdev, | 4254 | static int ixgbe_fwd_ring_down(struct net_device *vdev, |
4255 | struct ixgbe_fwd_adapter *accel) | 4255 | struct ixgbe_fwd_adapter *accel) |
4256 | { | 4256 | { |
4257 | struct ixgbe_adapter *adapter = accel->real_adapter; | 4257 | struct ixgbe_adapter *adapter = accel->real_adapter; |
4258 | unsigned int rxbase = accel->rx_base_queue; | 4258 | unsigned int rxbase = accel->rx_base_queue; |
@@ -6827,12 +6827,20 @@ static inline int ixgbe_maybe_stop_tx(struct ixgbe_ring *tx_ring, u16 size) | |||
6827 | return __ixgbe_maybe_stop_tx(tx_ring, size); | 6827 | return __ixgbe_maybe_stop_tx(tx_ring, size); |
6828 | } | 6828 | } |
6829 | 6829 | ||
6830 | #ifdef IXGBE_FCOE | 6830 | static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb, |
6831 | static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb) | 6831 | void *accel_priv) |
6832 | { | 6832 | { |
6833 | struct ixgbe_fwd_adapter *fwd_adapter = accel_priv; | ||
6834 | #ifdef IXGBE_FCOE | ||
6833 | struct ixgbe_adapter *adapter; | 6835 | struct ixgbe_adapter *adapter; |
6834 | struct ixgbe_ring_feature *f; | 6836 | struct ixgbe_ring_feature *f; |
6835 | int txq; | 6837 | int txq; |
6838 | #endif | ||
6839 | |||
6840 | if (fwd_adapter) | ||
6841 | return skb->queue_mapping + fwd_adapter->tx_base_queue; | ||
6842 | |||
6843 | #ifdef IXGBE_FCOE | ||
6836 | 6844 | ||
6837 | /* | 6845 | /* |
6838 | * only execute the code below if protocol is FCoE | 6846 | * only execute the code below if protocol is FCoE |
@@ -6858,9 +6866,11 @@ static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb) | |||
6858 | txq -= f->indices; | 6866 | txq -= f->indices; |
6859 | 6867 | ||
6860 | return txq + f->offset; | 6868 | return txq + f->offset; |
6869 | #else | ||
6870 | return __netdev_pick_tx(dev, skb); | ||
6871 | #endif | ||
6861 | } | 6872 | } |
6862 | 6873 | ||
6863 | #endif | ||
6864 | netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb, | 6874 | netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb, |
6865 | struct ixgbe_adapter *adapter, | 6875 | struct ixgbe_adapter *adapter, |
6866 | struct ixgbe_ring *tx_ring) | 6876 | struct ixgbe_ring *tx_ring) |
@@ -7629,27 +7639,11 @@ static void ixgbe_fwd_del(struct net_device *pdev, void *priv) | |||
7629 | kfree(fwd_adapter); | 7639 | kfree(fwd_adapter); |
7630 | } | 7640 | } |
7631 | 7641 | ||
7632 | static netdev_tx_t ixgbe_fwd_xmit(struct sk_buff *skb, | ||
7633 | struct net_device *dev, | ||
7634 | void *priv) | ||
7635 | { | ||
7636 | struct ixgbe_fwd_adapter *fwd_adapter = priv; | ||
7637 | unsigned int queue; | ||
7638 | struct ixgbe_ring *tx_ring; | ||
7639 | |||
7640 | queue = skb->queue_mapping + fwd_adapter->tx_base_queue; | ||
7641 | tx_ring = fwd_adapter->real_adapter->tx_ring[queue]; | ||
7642 | |||
7643 | return __ixgbe_xmit_frame(skb, dev, tx_ring); | ||
7644 | } | ||
7645 | |||
7646 | static const struct net_device_ops ixgbe_netdev_ops = { | 7642 | static const struct net_device_ops ixgbe_netdev_ops = { |
7647 | .ndo_open = ixgbe_open, | 7643 | .ndo_open = ixgbe_open, |
7648 | .ndo_stop = ixgbe_close, | 7644 | .ndo_stop = ixgbe_close, |
7649 | .ndo_start_xmit = ixgbe_xmit_frame, | 7645 | .ndo_start_xmit = ixgbe_xmit_frame, |
7650 | #ifdef IXGBE_FCOE | ||
7651 | .ndo_select_queue = ixgbe_select_queue, | 7646 | .ndo_select_queue = ixgbe_select_queue, |
7652 | #endif | ||
7653 | .ndo_set_rx_mode = ixgbe_set_rx_mode, | 7647 | .ndo_set_rx_mode = ixgbe_set_rx_mode, |
7654 | .ndo_validate_addr = eth_validate_addr, | 7648 | .ndo_validate_addr = eth_validate_addr, |
7655 | .ndo_set_mac_address = ixgbe_set_mac, | 7649 | .ndo_set_mac_address = ixgbe_set_mac, |
@@ -7689,7 +7683,6 @@ static const struct net_device_ops ixgbe_netdev_ops = { | |||
7689 | .ndo_bridge_getlink = ixgbe_ndo_bridge_getlink, | 7683 | .ndo_bridge_getlink = ixgbe_ndo_bridge_getlink, |
7690 | .ndo_dfwd_add_station = ixgbe_fwd_add, | 7684 | .ndo_dfwd_add_station = ixgbe_fwd_add, |
7691 | .ndo_dfwd_del_station = ixgbe_fwd_del, | 7685 | .ndo_dfwd_del_station = ixgbe_fwd_del, |
7692 | .ndo_dfwd_start_xmit = ixgbe_fwd_xmit, | ||
7693 | }; | 7686 | }; |
7694 | 7687 | ||
7695 | /** | 7688 | /** |
@@ -7986,10 +7979,9 @@ skip_sriov: | |||
7986 | NETIF_F_TSO | | 7979 | NETIF_F_TSO | |
7987 | NETIF_F_TSO6 | | 7980 | NETIF_F_TSO6 | |
7988 | NETIF_F_RXHASH | | 7981 | NETIF_F_RXHASH | |
7989 | NETIF_F_RXCSUM | | 7982 | NETIF_F_RXCSUM; |
7990 | NETIF_F_HW_L2FW_DOFFLOAD; | ||
7991 | 7983 | ||
7992 | netdev->hw_features = netdev->features; | 7984 | netdev->hw_features = netdev->features | NETIF_F_HW_L2FW_DOFFLOAD; |
7993 | 7985 | ||
7994 | switch (adapter->hw.mac.type) { | 7986 | switch (adapter->hw.mac.type) { |
7995 | case ixgbe_mac_82599EB: | 7987 | case ixgbe_mac_82599EB: |
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c index e4c676006be9..39217e5ff7dc 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c | |||
@@ -46,6 +46,7 @@ static bool ixgbe_get_i2c_data(u32 *i2cctl); | |||
46 | static void ixgbe_i2c_bus_clear(struct ixgbe_hw *hw); | 46 | static void ixgbe_i2c_bus_clear(struct ixgbe_hw *hw); |
47 | static enum ixgbe_phy_type ixgbe_get_phy_type_from_id(u32 phy_id); | 47 | static enum ixgbe_phy_type ixgbe_get_phy_type_from_id(u32 phy_id); |
48 | static s32 ixgbe_get_phy_id(struct ixgbe_hw *hw); | 48 | static s32 ixgbe_get_phy_id(struct ixgbe_hw *hw); |
49 | static s32 ixgbe_identify_qsfp_module_generic(struct ixgbe_hw *hw); | ||
49 | 50 | ||
50 | /** | 51 | /** |
51 | * ixgbe_identify_phy_generic - Get physical layer module | 52 | * ixgbe_identify_phy_generic - Get physical layer module |
@@ -1164,7 +1165,7 @@ err_read_i2c_eeprom: | |||
1164 | * | 1165 | * |
1165 | * Searches for and identifies the QSFP module and assigns appropriate PHY type | 1166 | * Searches for and identifies the QSFP module and assigns appropriate PHY type |
1166 | **/ | 1167 | **/ |
1167 | s32 ixgbe_identify_qsfp_module_generic(struct ixgbe_hw *hw) | 1168 | static s32 ixgbe_identify_qsfp_module_generic(struct ixgbe_hw *hw) |
1168 | { | 1169 | { |
1169 | struct ixgbe_adapter *adapter = hw->back; | 1170 | struct ixgbe_adapter *adapter = hw->back; |
1170 | s32 status = IXGBE_ERR_PHY_ADDR_INVALID; | 1171 | s32 status = IXGBE_ERR_PHY_ADDR_INVALID; |
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h index aae900a256da..fffcbdd2bf0e 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h | |||
@@ -145,7 +145,6 @@ s32 ixgbe_get_phy_firmware_version_generic(struct ixgbe_hw *hw, | |||
145 | s32 ixgbe_reset_phy_nl(struct ixgbe_hw *hw); | 145 | s32 ixgbe_reset_phy_nl(struct ixgbe_hw *hw); |
146 | s32 ixgbe_identify_module_generic(struct ixgbe_hw *hw); | 146 | s32 ixgbe_identify_module_generic(struct ixgbe_hw *hw); |
147 | s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw); | 147 | s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw); |
148 | s32 ixgbe_identify_qsfp_module_generic(struct ixgbe_hw *hw); | ||
149 | s32 ixgbe_get_sfp_init_sequence_offsets(struct ixgbe_hw *hw, | 148 | s32 ixgbe_get_sfp_init_sequence_offsets(struct ixgbe_hw *hw, |
150 | u16 *list_offset, | 149 | u16 *list_offset, |
151 | u16 *data_offset); | 150 | u16 *data_offset); |
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c index d6f0c0d8cf11..72084f70adbb 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c | |||
@@ -291,7 +291,9 @@ static int ixgbe_pci_sriov_disable(struct pci_dev *dev) | |||
291 | { | 291 | { |
292 | struct ixgbe_adapter *adapter = pci_get_drvdata(dev); | 292 | struct ixgbe_adapter *adapter = pci_get_drvdata(dev); |
293 | int err; | 293 | int err; |
294 | #ifdef CONFIG_PCI_IOV | ||
294 | u32 current_flags = adapter->flags; | 295 | u32 current_flags = adapter->flags; |
296 | #endif | ||
295 | 297 | ||
296 | err = ixgbe_disable_sriov(adapter); | 298 | err = ixgbe_disable_sriov(adapter); |
297 | 299 | ||
diff --git a/drivers/net/ethernet/lantiq_etop.c b/drivers/net/ethernet/lantiq_etop.c index 6a6c1f76d8e0..ec94a20d7099 100644 --- a/drivers/net/ethernet/lantiq_etop.c +++ b/drivers/net/ethernet/lantiq_etop.c | |||
@@ -619,7 +619,8 @@ ltq_etop_set_multicast_list(struct net_device *dev) | |||
619 | } | 619 | } |
620 | 620 | ||
621 | static u16 | 621 | static u16 |
622 | ltq_etop_select_queue(struct net_device *dev, struct sk_buff *skb) | 622 | ltq_etop_select_queue(struct net_device *dev, struct sk_buff *skb, |
623 | void *accel_priv) | ||
623 | { | 624 | { |
624 | /* we are currently only using the first queue */ | 625 | /* we are currently only using the first queue */ |
625 | return 0; | 626 | return 0; |
diff --git a/drivers/net/ethernet/marvell/mvmdio.c b/drivers/net/ethernet/marvell/mvmdio.c index 7354960b583b..c4eeb69a5bee 100644 --- a/drivers/net/ethernet/marvell/mvmdio.c +++ b/drivers/net/ethernet/marvell/mvmdio.c | |||
@@ -92,6 +92,12 @@ static int orion_mdio_wait_ready(struct mii_bus *bus) | |||
92 | if (time_is_before_jiffies(end)) | 92 | if (time_is_before_jiffies(end)) |
93 | ++timedout; | 93 | ++timedout; |
94 | } else { | 94 | } else { |
95 | /* wait_event_timeout does not guarantee a delay of at | ||
96 | * least one whole jiffie, so timeout must be no less | ||
97 | * than two. | ||
98 | */ | ||
99 | if (timeout < 2) | ||
100 | timeout = 2; | ||
95 | wait_event_timeout(dev->smi_busy_wait, | 101 | wait_event_timeout(dev->smi_busy_wait, |
96 | orion_mdio_smi_is_done(dev), | 102 | orion_mdio_smi_is_done(dev), |
97 | timeout); | 103 | timeout); |
diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c index b8e232b4ea2d..d5f0d72e5e33 100644 --- a/drivers/net/ethernet/marvell/mvneta.c +++ b/drivers/net/ethernet/marvell/mvneta.c | |||
@@ -1378,7 +1378,7 @@ static void mvneta_rxq_drop_pkts(struct mvneta_port *pp, | |||
1378 | 1378 | ||
1379 | dev_kfree_skb_any(skb); | 1379 | dev_kfree_skb_any(skb); |
1380 | dma_unmap_single(pp->dev->dev.parent, rx_desc->buf_phys_addr, | 1380 | dma_unmap_single(pp->dev->dev.parent, rx_desc->buf_phys_addr, |
1381 | rx_desc->data_size, DMA_FROM_DEVICE); | 1381 | MVNETA_RX_BUF_SIZE(pp->pkt_size), DMA_FROM_DEVICE); |
1382 | } | 1382 | } |
1383 | 1383 | ||
1384 | if (rx_done) | 1384 | if (rx_done) |
@@ -1424,7 +1424,7 @@ static int mvneta_rx(struct mvneta_port *pp, int rx_todo, | |||
1424 | } | 1424 | } |
1425 | 1425 | ||
1426 | dma_unmap_single(pp->dev->dev.parent, rx_desc->buf_phys_addr, | 1426 | dma_unmap_single(pp->dev->dev.parent, rx_desc->buf_phys_addr, |
1427 | rx_desc->data_size, DMA_FROM_DEVICE); | 1427 | MVNETA_RX_BUF_SIZE(pp->pkt_size), DMA_FROM_DEVICE); |
1428 | 1428 | ||
1429 | rx_bytes = rx_desc->data_size - | 1429 | rx_bytes = rx_desc->data_size - |
1430 | (ETH_FCS_LEN + MVNETA_MH_SIZE); | 1430 | (ETH_FCS_LEN + MVNETA_MH_SIZE); |
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_selftest.c b/drivers/net/ethernet/mellanox/mlx4/en_selftest.c index 40626690e8a8..c11d063473e5 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_selftest.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_selftest.c | |||
@@ -140,7 +140,6 @@ void mlx4_en_ex_selftest(struct net_device *dev, u32 *flags, u64 *buf) | |||
140 | { | 140 | { |
141 | struct mlx4_en_priv *priv = netdev_priv(dev); | 141 | struct mlx4_en_priv *priv = netdev_priv(dev); |
142 | struct mlx4_en_dev *mdev = priv->mdev; | 142 | struct mlx4_en_dev *mdev = priv->mdev; |
143 | struct mlx4_en_tx_ring *tx_ring; | ||
144 | int i, carrier_ok; | 143 | int i, carrier_ok; |
145 | 144 | ||
146 | memset(buf, 0, sizeof(u64) * MLX4_EN_NUM_SELF_TEST); | 145 | memset(buf, 0, sizeof(u64) * MLX4_EN_NUM_SELF_TEST); |
@@ -150,16 +149,10 @@ void mlx4_en_ex_selftest(struct net_device *dev, u32 *flags, u64 *buf) | |||
150 | carrier_ok = netif_carrier_ok(dev); | 149 | carrier_ok = netif_carrier_ok(dev); |
151 | 150 | ||
152 | netif_carrier_off(dev); | 151 | netif_carrier_off(dev); |
153 | retry_tx: | ||
154 | /* Wait until all tx queues are empty. | 152 | /* Wait until all tx queues are empty. |
155 | * there should not be any additional incoming traffic | 153 | * there should not be any additional incoming traffic |
156 | * since we turned the carrier off */ | 154 | * since we turned the carrier off */ |
157 | msleep(200); | 155 | msleep(200); |
158 | for (i = 0; i < priv->tx_ring_num && carrier_ok; i++) { | ||
159 | tx_ring = priv->tx_ring[i]; | ||
160 | if (tx_ring->prod != (tx_ring->cons + tx_ring->last_nr_txbb)) | ||
161 | goto retry_tx; | ||
162 | } | ||
163 | 156 | ||
164 | if (priv->mdev->dev->caps.flags & | 157 | if (priv->mdev->dev->caps.flags & |
165 | MLX4_DEV_CAP_FLAG_UC_LOOPBACK) { | 158 | MLX4_DEV_CAP_FLAG_UC_LOOPBACK) { |
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c index f54ebd5a1702..a7fcd593b2db 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c | |||
@@ -592,7 +592,8 @@ static void build_inline_wqe(struct mlx4_en_tx_desc *tx_desc, struct sk_buff *sk | |||
592 | } | 592 | } |
593 | } | 593 | } |
594 | 594 | ||
595 | u16 mlx4_en_select_queue(struct net_device *dev, struct sk_buff *skb) | 595 | u16 mlx4_en_select_queue(struct net_device *dev, struct sk_buff *skb, |
596 | void *accel_priv) | ||
596 | { | 597 | { |
597 | struct mlx4_en_priv *priv = netdev_priv(dev); | 598 | struct mlx4_en_priv *priv = netdev_priv(dev); |
598 | u16 rings_p_up = priv->num_tx_rings_p_up; | 599 | u16 rings_p_up = priv->num_tx_rings_p_up; |
diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c index 5789ea2c934d..01fc6515384d 100644 --- a/drivers/net/ethernet/mellanox/mlx4/main.c +++ b/drivers/net/ethernet/mellanox/mlx4/main.c | |||
@@ -2635,6 +2635,8 @@ static int __init mlx4_init(void) | |||
2635 | return -ENOMEM; | 2635 | return -ENOMEM; |
2636 | 2636 | ||
2637 | ret = pci_register_driver(&mlx4_driver); | 2637 | ret = pci_register_driver(&mlx4_driver); |
2638 | if (ret < 0) | ||
2639 | destroy_workqueue(mlx4_wq); | ||
2638 | return ret < 0 ? ret : 0; | 2640 | return ret < 0 ? ret : 0; |
2639 | } | 2641 | } |
2640 | 2642 | ||
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h index f3758de59c05..d5758adceaa2 100644 --- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h +++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h | |||
@@ -714,7 +714,8 @@ int mlx4_en_set_cq_moder(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq); | |||
714 | int mlx4_en_arm_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq); | 714 | int mlx4_en_arm_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq); |
715 | 715 | ||
716 | void mlx4_en_tx_irq(struct mlx4_cq *mcq); | 716 | void mlx4_en_tx_irq(struct mlx4_cq *mcq); |
717 | u16 mlx4_en_select_queue(struct net_device *dev, struct sk_buff *skb); | 717 | u16 mlx4_en_select_queue(struct net_device *dev, struct sk_buff *skb, |
718 | void *accel_priv); | ||
718 | netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev); | 719 | netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev); |
719 | 720 | ||
720 | int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv, | 721 | int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv, |
diff --git a/drivers/net/ethernet/nvidia/forcedeth.c b/drivers/net/ethernet/nvidia/forcedeth.c index 2d045be4b5cf..1e8b9514718b 100644 --- a/drivers/net/ethernet/nvidia/forcedeth.c +++ b/drivers/net/ethernet/nvidia/forcedeth.c | |||
@@ -5150,8 +5150,10 @@ static void nv_self_test(struct net_device *dev, struct ethtool_test *test, u64 | |||
5150 | { | 5150 | { |
5151 | struct fe_priv *np = netdev_priv(dev); | 5151 | struct fe_priv *np = netdev_priv(dev); |
5152 | u8 __iomem *base = get_hwbase(dev); | 5152 | u8 __iomem *base = get_hwbase(dev); |
5153 | int result; | 5153 | int result, count; |
5154 | memset(buffer, 0, nv_get_sset_count(dev, ETH_SS_TEST)*sizeof(u64)); | 5154 | |
5155 | count = nv_get_sset_count(dev, ETH_SS_TEST); | ||
5156 | memset(buffer, 0, count * sizeof(u64)); | ||
5155 | 5157 | ||
5156 | if (!nv_link_test(dev)) { | 5158 | if (!nv_link_test(dev)) { |
5157 | test->flags |= ETH_TEST_FL_FAILED; | 5159 | test->flags |= ETH_TEST_FL_FAILED; |
@@ -5195,7 +5197,7 @@ static void nv_self_test(struct net_device *dev, struct ethtool_test *test, u64 | |||
5195 | return; | 5197 | return; |
5196 | } | 5198 | } |
5197 | 5199 | ||
5198 | if (!nv_loopback_test(dev)) { | 5200 | if (count > NV_TEST_COUNT_BASE && !nv_loopback_test(dev)) { |
5199 | test->flags |= ETH_TEST_FL_FAILED; | 5201 | test->flags |= ETH_TEST_FL_FAILED; |
5200 | buffer[3] = 1; | 5202 | buffer[3] = 1; |
5201 | } | 5203 | } |
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c index 7692dfd4f262..cc68657f0536 100644 --- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c +++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c | |||
@@ -1604,13 +1604,13 @@ netxen_process_lro(struct netxen_adapter *adapter, | |||
1604 | u32 seq_number; | 1604 | u32 seq_number; |
1605 | u8 vhdr_len = 0; | 1605 | u8 vhdr_len = 0; |
1606 | 1606 | ||
1607 | if (unlikely(ring > adapter->max_rds_rings)) | 1607 | if (unlikely(ring >= adapter->max_rds_rings)) |
1608 | return NULL; | 1608 | return NULL; |
1609 | 1609 | ||
1610 | rds_ring = &recv_ctx->rds_rings[ring]; | 1610 | rds_ring = &recv_ctx->rds_rings[ring]; |
1611 | 1611 | ||
1612 | index = netxen_get_lro_sts_refhandle(sts_data0); | 1612 | index = netxen_get_lro_sts_refhandle(sts_data0); |
1613 | if (unlikely(index > rds_ring->num_desc)) | 1613 | if (unlikely(index >= rds_ring->num_desc)) |
1614 | return NULL; | 1614 | return NULL; |
1615 | 1615 | ||
1616 | buffer = &rds_ring->rx_buf_arr[index]; | 1616 | buffer = &rds_ring->rx_buf_arr[index]; |
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h index 631ea0ac1cd8..f2a7c7166e24 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h | |||
@@ -487,6 +487,7 @@ struct qlcnic_hardware_context { | |||
487 | struct qlcnic_mailbox *mailbox; | 487 | struct qlcnic_mailbox *mailbox; |
488 | u8 extend_lb_time; | 488 | u8 extend_lb_time; |
489 | u8 phys_port_id[ETH_ALEN]; | 489 | u8 phys_port_id[ETH_ALEN]; |
490 | u8 lb_mode; | ||
490 | }; | 491 | }; |
491 | 492 | ||
492 | struct qlcnic_adapter_stats { | 493 | struct qlcnic_adapter_stats { |
@@ -578,6 +579,8 @@ struct qlcnic_host_tx_ring { | |||
578 | dma_addr_t phys_addr; | 579 | dma_addr_t phys_addr; |
579 | dma_addr_t hw_cons_phys_addr; | 580 | dma_addr_t hw_cons_phys_addr; |
580 | struct netdev_queue *txq; | 581 | struct netdev_queue *txq; |
582 | /* Lock to protect Tx descriptors cleanup */ | ||
583 | spinlock_t tx_clean_lock; | ||
581 | } ____cacheline_internodealigned_in_smp; | 584 | } ____cacheline_internodealigned_in_smp; |
582 | 585 | ||
583 | /* | 586 | /* |
@@ -808,6 +811,7 @@ struct qlcnic_mac_list_s { | |||
808 | 811 | ||
809 | #define QLCNIC_ILB_MODE 0x1 | 812 | #define QLCNIC_ILB_MODE 0x1 |
810 | #define QLCNIC_ELB_MODE 0x2 | 813 | #define QLCNIC_ELB_MODE 0x2 |
814 | #define QLCNIC_LB_MODE_MASK 0x3 | ||
811 | 815 | ||
812 | #define QLCNIC_LINKEVENT 0x1 | 816 | #define QLCNIC_LINKEVENT 0x1 |
813 | #define QLCNIC_LB_RESPONSE 0x2 | 817 | #define QLCNIC_LB_RESPONSE 0x2 |
@@ -1093,7 +1097,6 @@ struct qlcnic_adapter { | |||
1093 | struct qlcnic_filter_hash rx_fhash; | 1097 | struct qlcnic_filter_hash rx_fhash; |
1094 | struct list_head vf_mc_list; | 1098 | struct list_head vf_mc_list; |
1095 | 1099 | ||
1096 | spinlock_t tx_clean_lock; | ||
1097 | spinlock_t mac_learn_lock; | 1100 | spinlock_t mac_learn_lock; |
1098 | /* spinlock for catching rcv filters for eswitch traffic */ | 1101 | /* spinlock for catching rcv filters for eswitch traffic */ |
1099 | spinlock_t rx_mac_learn_lock; | 1102 | spinlock_t rx_mac_learn_lock; |
@@ -1708,6 +1711,7 @@ int qlcnic_83xx_init_mailbox_work(struct qlcnic_adapter *); | |||
1708 | void qlcnic_83xx_detach_mailbox_work(struct qlcnic_adapter *); | 1711 | void qlcnic_83xx_detach_mailbox_work(struct qlcnic_adapter *); |
1709 | void qlcnic_83xx_reinit_mbx_work(struct qlcnic_mailbox *mbx); | 1712 | void qlcnic_83xx_reinit_mbx_work(struct qlcnic_mailbox *mbx); |
1710 | void qlcnic_83xx_free_mailbox(struct qlcnic_mailbox *mbx); | 1713 | void qlcnic_83xx_free_mailbox(struct qlcnic_mailbox *mbx); |
1714 | void qlcnic_update_stats(struct qlcnic_adapter *); | ||
1711 | 1715 | ||
1712 | /* Adapter hardware abstraction */ | 1716 | /* Adapter hardware abstraction */ |
1713 | struct qlcnic_hardware_ops { | 1717 | struct qlcnic_hardware_ops { |
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c index b1cb0ffb15c7..f776f99f7915 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c | |||
@@ -447,8 +447,9 @@ irqreturn_t qlcnic_83xx_intr(int irq, void *data) | |||
447 | 447 | ||
448 | qlcnic_83xx_poll_process_aen(adapter); | 448 | qlcnic_83xx_poll_process_aen(adapter); |
449 | 449 | ||
450 | if (ahw->diag_test == QLCNIC_INTERRUPT_TEST) { | 450 | if (ahw->diag_test) { |
451 | ahw->diag_cnt++; | 451 | if (ahw->diag_test == QLCNIC_INTERRUPT_TEST) |
452 | ahw->diag_cnt++; | ||
452 | qlcnic_83xx_enable_legacy_msix_mbx_intr(adapter); | 453 | qlcnic_83xx_enable_legacy_msix_mbx_intr(adapter); |
453 | return IRQ_HANDLED; | 454 | return IRQ_HANDLED; |
454 | } | 455 | } |
@@ -1345,11 +1346,6 @@ static int qlcnic_83xx_diag_alloc_res(struct net_device *netdev, int test, | |||
1345 | } | 1346 | } |
1346 | 1347 | ||
1347 | if (adapter->ahw->diag_test == QLCNIC_LOOPBACK_TEST) { | 1348 | if (adapter->ahw->diag_test == QLCNIC_LOOPBACK_TEST) { |
1348 | /* disable and free mailbox interrupt */ | ||
1349 | if (!(adapter->flags & QLCNIC_MSIX_ENABLED)) { | ||
1350 | qlcnic_83xx_enable_mbx_poll(adapter); | ||
1351 | qlcnic_83xx_free_mbx_intr(adapter); | ||
1352 | } | ||
1353 | adapter->ahw->loopback_state = 0; | 1349 | adapter->ahw->loopback_state = 0; |
1354 | adapter->ahw->hw_ops->setup_link_event(adapter, 1); | 1350 | adapter->ahw->hw_ops->setup_link_event(adapter, 1); |
1355 | } | 1351 | } |
@@ -1363,33 +1359,20 @@ static void qlcnic_83xx_diag_free_res(struct net_device *netdev, | |||
1363 | { | 1359 | { |
1364 | struct qlcnic_adapter *adapter = netdev_priv(netdev); | 1360 | struct qlcnic_adapter *adapter = netdev_priv(netdev); |
1365 | struct qlcnic_host_sds_ring *sds_ring; | 1361 | struct qlcnic_host_sds_ring *sds_ring; |
1366 | int ring, err; | 1362 | int ring; |
1367 | 1363 | ||
1368 | clear_bit(__QLCNIC_DEV_UP, &adapter->state); | 1364 | clear_bit(__QLCNIC_DEV_UP, &adapter->state); |
1369 | if (adapter->ahw->diag_test == QLCNIC_INTERRUPT_TEST) { | 1365 | if (adapter->ahw->diag_test == QLCNIC_INTERRUPT_TEST) { |
1370 | for (ring = 0; ring < adapter->drv_sds_rings; ring++) { | 1366 | for (ring = 0; ring < adapter->drv_sds_rings; ring++) { |
1371 | sds_ring = &adapter->recv_ctx->sds_rings[ring]; | 1367 | sds_ring = &adapter->recv_ctx->sds_rings[ring]; |
1372 | qlcnic_83xx_disable_intr(adapter, sds_ring); | 1368 | if (adapter->flags & QLCNIC_MSIX_ENABLED) |
1373 | if (!(adapter->flags & QLCNIC_MSIX_ENABLED)) | 1369 | qlcnic_83xx_disable_intr(adapter, sds_ring); |
1374 | qlcnic_83xx_enable_mbx_poll(adapter); | ||
1375 | } | 1370 | } |
1376 | } | 1371 | } |
1377 | 1372 | ||
1378 | qlcnic_fw_destroy_ctx(adapter); | 1373 | qlcnic_fw_destroy_ctx(adapter); |
1379 | qlcnic_detach(adapter); | 1374 | qlcnic_detach(adapter); |
1380 | 1375 | ||
1381 | if (adapter->ahw->diag_test == QLCNIC_LOOPBACK_TEST) { | ||
1382 | if (!(adapter->flags & QLCNIC_MSIX_ENABLED)) { | ||
1383 | err = qlcnic_83xx_setup_mbx_intr(adapter); | ||
1384 | qlcnic_83xx_disable_mbx_poll(adapter); | ||
1385 | if (err) { | ||
1386 | dev_err(&adapter->pdev->dev, | ||
1387 | "%s: failed to setup mbx interrupt\n", | ||
1388 | __func__); | ||
1389 | goto out; | ||
1390 | } | ||
1391 | } | ||
1392 | } | ||
1393 | adapter->ahw->diag_test = 0; | 1376 | adapter->ahw->diag_test = 0; |
1394 | adapter->drv_sds_rings = drv_sds_rings; | 1377 | adapter->drv_sds_rings = drv_sds_rings; |
1395 | 1378 | ||
@@ -1399,9 +1382,6 @@ static void qlcnic_83xx_diag_free_res(struct net_device *netdev, | |||
1399 | if (netif_running(netdev)) | 1382 | if (netif_running(netdev)) |
1400 | __qlcnic_up(adapter, netdev); | 1383 | __qlcnic_up(adapter, netdev); |
1401 | 1384 | ||
1402 | if (adapter->ahw->diag_test == QLCNIC_INTERRUPT_TEST && | ||
1403 | !(adapter->flags & QLCNIC_MSIX_ENABLED)) | ||
1404 | qlcnic_83xx_disable_mbx_poll(adapter); | ||
1405 | out: | 1385 | out: |
1406 | netif_device_attach(netdev); | 1386 | netif_device_attach(netdev); |
1407 | } | 1387 | } |
@@ -1704,12 +1684,6 @@ int qlcnic_83xx_loopback_test(struct net_device *netdev, u8 mode) | |||
1704 | } | 1684 | } |
1705 | } while ((adapter->ahw->linkup && ahw->has_link_events) != 1); | 1685 | } while ((adapter->ahw->linkup && ahw->has_link_events) != 1); |
1706 | 1686 | ||
1707 | /* Make sure carrier is off and queue is stopped during loopback */ | ||
1708 | if (netif_running(netdev)) { | ||
1709 | netif_carrier_off(netdev); | ||
1710 | netif_tx_stop_all_queues(netdev); | ||
1711 | } | ||
1712 | |||
1713 | ret = qlcnic_do_lb_test(adapter, mode); | 1687 | ret = qlcnic_do_lb_test(adapter, mode); |
1714 | 1688 | ||
1715 | qlcnic_83xx_clear_lb_mode(adapter, mode); | 1689 | qlcnic_83xx_clear_lb_mode(adapter, mode); |
@@ -2141,6 +2115,7 @@ static void qlcnic_83xx_handle_link_aen(struct qlcnic_adapter *adapter, | |||
2141 | ahw->link_autoneg = MSB(MSW(data[3])); | 2115 | ahw->link_autoneg = MSB(MSW(data[3])); |
2142 | ahw->module_type = MSB(LSW(data[3])); | 2116 | ahw->module_type = MSB(LSW(data[3])); |
2143 | ahw->has_link_events = 1; | 2117 | ahw->has_link_events = 1; |
2118 | ahw->lb_mode = data[4] & QLCNIC_LB_MODE_MASK; | ||
2144 | qlcnic_advert_link_change(adapter, link_status); | 2119 | qlcnic_advert_link_change(adapter, link_status); |
2145 | } | 2120 | } |
2146 | 2121 | ||
@@ -3754,6 +3729,19 @@ static void qlcnic_83xx_decode_mbx_rsp(struct qlcnic_adapter *adapter, | |||
3754 | return; | 3729 | return; |
3755 | } | 3730 | } |
3756 | 3731 | ||
3732 | static inline void qlcnic_dump_mailbox_registers(struct qlcnic_adapter *adapter) | ||
3733 | { | ||
3734 | struct qlcnic_hardware_context *ahw = adapter->ahw; | ||
3735 | u32 offset; | ||
3736 | |||
3737 | offset = QLCRDX(ahw, QLCNIC_DEF_INT_MASK); | ||
3738 | dev_info(&adapter->pdev->dev, "Mbx interrupt mask=0x%x, Mbx interrupt enable=0x%x, Host mbx control=0x%x, Fw mbx control=0x%x", | ||
3739 | readl(ahw->pci_base0 + offset), | ||
3740 | QLCRDX(ahw, QLCNIC_MBX_INTR_ENBL), | ||
3741 | QLCRDX(ahw, QLCNIC_HOST_MBX_CTRL), | ||
3742 | QLCRDX(ahw, QLCNIC_FW_MBX_CTRL)); | ||
3743 | } | ||
3744 | |||
3757 | static void qlcnic_83xx_mailbox_worker(struct work_struct *work) | 3745 | static void qlcnic_83xx_mailbox_worker(struct work_struct *work) |
3758 | { | 3746 | { |
3759 | struct qlcnic_mailbox *mbx = container_of(work, struct qlcnic_mailbox, | 3747 | struct qlcnic_mailbox *mbx = container_of(work, struct qlcnic_mailbox, |
@@ -3798,6 +3786,8 @@ static void qlcnic_83xx_mailbox_worker(struct work_struct *work) | |||
3798 | __func__, cmd->cmd_op, cmd->type, ahw->pci_func, | 3786 | __func__, cmd->cmd_op, cmd->type, ahw->pci_func, |
3799 | ahw->op_mode); | 3787 | ahw->op_mode); |
3800 | clear_bit(QLC_83XX_MBX_READY, &mbx->status); | 3788 | clear_bit(QLC_83XX_MBX_READY, &mbx->status); |
3789 | qlcnic_dump_mailbox_registers(adapter); | ||
3790 | qlcnic_83xx_get_mbx_data(adapter, cmd); | ||
3801 | qlcnic_dump_mbx(adapter, cmd); | 3791 | qlcnic_dump_mbx(adapter, cmd); |
3802 | qlcnic_83xx_idc_request_reset(adapter, | 3792 | qlcnic_83xx_idc_request_reset(adapter, |
3803 | QLCNIC_FORCE_FW_DUMP_KEY); | 3793 | QLCNIC_FORCE_FW_DUMP_KEY); |
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h index 4cae6caa6bfa..a6a33508e401 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h | |||
@@ -662,4 +662,5 @@ pci_ers_result_t qlcnic_83xx_io_error_detected(struct pci_dev *, | |||
662 | pci_channel_state_t); | 662 | pci_channel_state_t); |
663 | pci_ers_result_t qlcnic_83xx_io_slot_reset(struct pci_dev *); | 663 | pci_ers_result_t qlcnic_83xx_io_slot_reset(struct pci_dev *); |
664 | void qlcnic_83xx_io_resume(struct pci_dev *); | 664 | void qlcnic_83xx_io_resume(struct pci_dev *); |
665 | void qlcnic_83xx_stop_hw(struct qlcnic_adapter *); | ||
665 | #endif | 666 | #endif |
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c index 89208e5b25d6..918e18ddf038 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c | |||
@@ -740,6 +740,7 @@ static int qlcnic_83xx_idc_unknown_state(struct qlcnic_adapter *adapter) | |||
740 | adapter->ahw->idc.err_code = -EIO; | 740 | adapter->ahw->idc.err_code = -EIO; |
741 | dev_err(&adapter->pdev->dev, | 741 | dev_err(&adapter->pdev->dev, |
742 | "%s: Device in unknown state\n", __func__); | 742 | "%s: Device in unknown state\n", __func__); |
743 | clear_bit(__QLCNIC_RESETTING, &adapter->state); | ||
743 | return 0; | 744 | return 0; |
744 | } | 745 | } |
745 | 746 | ||
@@ -818,7 +819,6 @@ static int qlcnic_83xx_idc_ready_state(struct qlcnic_adapter *adapter) | |||
818 | struct qlcnic_hardware_context *ahw = adapter->ahw; | 819 | struct qlcnic_hardware_context *ahw = adapter->ahw; |
819 | struct qlcnic_mailbox *mbx = ahw->mailbox; | 820 | struct qlcnic_mailbox *mbx = ahw->mailbox; |
820 | int ret = 0; | 821 | int ret = 0; |
821 | u32 owner; | ||
822 | u32 val; | 822 | u32 val; |
823 | 823 | ||
824 | /* Perform NIC configuration based ready state entry actions */ | 824 | /* Perform NIC configuration based ready state entry actions */ |
@@ -848,9 +848,9 @@ static int qlcnic_83xx_idc_ready_state(struct qlcnic_adapter *adapter) | |||
848 | set_bit(__QLCNIC_RESETTING, &adapter->state); | 848 | set_bit(__QLCNIC_RESETTING, &adapter->state); |
849 | qlcnic_83xx_idc_enter_need_reset_state(adapter, 1); | 849 | qlcnic_83xx_idc_enter_need_reset_state(adapter, 1); |
850 | } else { | 850 | } else { |
851 | owner = qlcnic_83xx_idc_find_reset_owner_id(adapter); | 851 | netdev_info(adapter->netdev, "%s: Auto firmware recovery is disabled\n", |
852 | if (ahw->pci_func == owner) | 852 | __func__); |
853 | qlcnic_dump_fw(adapter); | 853 | qlcnic_83xx_idc_enter_failed_state(adapter, 1); |
854 | } | 854 | } |
855 | return -EIO; | 855 | return -EIO; |
856 | } | 856 | } |
@@ -948,13 +948,26 @@ static int qlcnic_83xx_idc_need_quiesce_state(struct qlcnic_adapter *adapter) | |||
948 | return 0; | 948 | return 0; |
949 | } | 949 | } |
950 | 950 | ||
951 | static int qlcnic_83xx_idc_failed_state(struct qlcnic_adapter *adapter) | 951 | static void qlcnic_83xx_idc_failed_state(struct qlcnic_adapter *adapter) |
952 | { | 952 | { |
953 | dev_err(&adapter->pdev->dev, "%s: please restart!!\n", __func__); | 953 | struct qlcnic_hardware_context *ahw = adapter->ahw; |
954 | u32 val, owner; | ||
955 | |||
956 | val = QLCRDX(adapter->ahw, QLC_83XX_IDC_CTRL); | ||
957 | if (val & QLC_83XX_IDC_DISABLE_FW_RESET_RECOVERY) { | ||
958 | owner = qlcnic_83xx_idc_find_reset_owner_id(adapter); | ||
959 | if (ahw->pci_func == owner) { | ||
960 | qlcnic_83xx_stop_hw(adapter); | ||
961 | qlcnic_dump_fw(adapter); | ||
962 | } | ||
963 | } | ||
964 | |||
965 | netdev_warn(adapter->netdev, "%s: Reboot will be required to recover the adapter!!\n", | ||
966 | __func__); | ||
954 | clear_bit(__QLCNIC_RESETTING, &adapter->state); | 967 | clear_bit(__QLCNIC_RESETTING, &adapter->state); |
955 | adapter->ahw->idc.err_code = -EIO; | 968 | ahw->idc.err_code = -EIO; |
956 | 969 | ||
957 | return 0; | 970 | return; |
958 | } | 971 | } |
959 | 972 | ||
960 | static int qlcnic_83xx_idc_quiesce_state(struct qlcnic_adapter *adapter) | 973 | static int qlcnic_83xx_idc_quiesce_state(struct qlcnic_adapter *adapter) |
@@ -1063,12 +1076,6 @@ void qlcnic_83xx_idc_poll_dev_state(struct work_struct *work) | |||
1063 | adapter->ahw->idc.prev_state = adapter->ahw->idc.curr_state; | 1076 | adapter->ahw->idc.prev_state = adapter->ahw->idc.curr_state; |
1064 | qlcnic_83xx_periodic_tasks(adapter); | 1077 | qlcnic_83xx_periodic_tasks(adapter); |
1065 | 1078 | ||
1066 | /* Do not reschedule if firmaware is in hanged state and auto | ||
1067 | * recovery is disabled | ||
1068 | */ | ||
1069 | if ((adapter->flags & QLCNIC_FW_HANG) && !qlcnic_auto_fw_reset) | ||
1070 | return; | ||
1071 | |||
1072 | /* Re-schedule the function */ | 1079 | /* Re-schedule the function */ |
1073 | if (test_bit(QLC_83XX_MODULE_LOADED, &adapter->ahw->idc.status)) | 1080 | if (test_bit(QLC_83XX_MODULE_LOADED, &adapter->ahw->idc.status)) |
1074 | qlcnic_schedule_work(adapter, qlcnic_83xx_idc_poll_dev_state, | 1081 | qlcnic_schedule_work(adapter, qlcnic_83xx_idc_poll_dev_state, |
@@ -1219,10 +1226,10 @@ void qlcnic_83xx_idc_request_reset(struct qlcnic_adapter *adapter, u32 key) | |||
1219 | } | 1226 | } |
1220 | 1227 | ||
1221 | val = QLCRDX(adapter->ahw, QLC_83XX_IDC_CTRL); | 1228 | val = QLCRDX(adapter->ahw, QLC_83XX_IDC_CTRL); |
1222 | if ((val & QLC_83XX_IDC_DISABLE_FW_RESET_RECOVERY) || | 1229 | if (val & QLC_83XX_IDC_DISABLE_FW_RESET_RECOVERY) { |
1223 | !qlcnic_auto_fw_reset) { | 1230 | netdev_info(adapter->netdev, "%s: Auto firmware recovery is disabled\n", |
1224 | dev_err(&adapter->pdev->dev, | 1231 | __func__); |
1225 | "%s:failed, device in non reset mode\n", __func__); | 1232 | qlcnic_83xx_idc_enter_failed_state(adapter, 0); |
1226 | qlcnic_83xx_unlock_driver(adapter); | 1233 | qlcnic_83xx_unlock_driver(adapter); |
1227 | return; | 1234 | return; |
1228 | } | 1235 | } |
@@ -1254,24 +1261,24 @@ static int qlcnic_83xx_copy_bootloader(struct qlcnic_adapter *adapter) | |||
1254 | if (size & 0xF) | 1261 | if (size & 0xF) |
1255 | size = (size + 16) & ~0xF; | 1262 | size = (size + 16) & ~0xF; |
1256 | 1263 | ||
1257 | p_cache = kzalloc(size, GFP_KERNEL); | 1264 | p_cache = vzalloc(size); |
1258 | if (p_cache == NULL) | 1265 | if (p_cache == NULL) |
1259 | return -ENOMEM; | 1266 | return -ENOMEM; |
1260 | 1267 | ||
1261 | ret = qlcnic_83xx_lockless_flash_read32(adapter, src, p_cache, | 1268 | ret = qlcnic_83xx_lockless_flash_read32(adapter, src, p_cache, |
1262 | size / sizeof(u32)); | 1269 | size / sizeof(u32)); |
1263 | if (ret) { | 1270 | if (ret) { |
1264 | kfree(p_cache); | 1271 | vfree(p_cache); |
1265 | return ret; | 1272 | return ret; |
1266 | } | 1273 | } |
1267 | /* 16 byte write to MS memory */ | 1274 | /* 16 byte write to MS memory */ |
1268 | ret = qlcnic_83xx_ms_mem_write128(adapter, dest, (u32 *)p_cache, | 1275 | ret = qlcnic_83xx_ms_mem_write128(adapter, dest, (u32 *)p_cache, |
1269 | size / 16); | 1276 | size / 16); |
1270 | if (ret) { | 1277 | if (ret) { |
1271 | kfree(p_cache); | 1278 | vfree(p_cache); |
1272 | return ret; | 1279 | return ret; |
1273 | } | 1280 | } |
1274 | kfree(p_cache); | 1281 | vfree(p_cache); |
1275 | 1282 | ||
1276 | return ret; | 1283 | return ret; |
1277 | } | 1284 | } |
@@ -1939,7 +1946,7 @@ static void qlcnic_83xx_exec_template_cmd(struct qlcnic_adapter *p_dev, | |||
1939 | p_dev->ahw->reset.seq_index = index; | 1946 | p_dev->ahw->reset.seq_index = index; |
1940 | } | 1947 | } |
1941 | 1948 | ||
1942 | static void qlcnic_83xx_stop_hw(struct qlcnic_adapter *p_dev) | 1949 | void qlcnic_83xx_stop_hw(struct qlcnic_adapter *p_dev) |
1943 | { | 1950 | { |
1944 | p_dev->ahw->reset.seq_index = 0; | 1951 | p_dev->ahw->reset.seq_index = 0; |
1945 | 1952 | ||
@@ -1994,6 +2001,14 @@ static int qlcnic_83xx_restart_hw(struct qlcnic_adapter *adapter) | |||
1994 | val = QLCRDX(adapter->ahw, QLC_83XX_IDC_CTRL); | 2001 | val = QLCRDX(adapter->ahw, QLC_83XX_IDC_CTRL); |
1995 | if (!(val & QLC_83XX_IDC_GRACEFULL_RESET)) | 2002 | if (!(val & QLC_83XX_IDC_GRACEFULL_RESET)) |
1996 | qlcnic_dump_fw(adapter); | 2003 | qlcnic_dump_fw(adapter); |
2004 | |||
2005 | if (val & QLC_83XX_IDC_DISABLE_FW_RESET_RECOVERY) { | ||
2006 | netdev_info(adapter->netdev, "%s: Auto firmware recovery is disabled\n", | ||
2007 | __func__); | ||
2008 | qlcnic_83xx_idc_enter_failed_state(adapter, 1); | ||
2009 | return err; | ||
2010 | } | ||
2011 | |||
1997 | qlcnic_83xx_init_hw(adapter); | 2012 | qlcnic_83xx_init_hw(adapter); |
1998 | 2013 | ||
1999 | if (qlcnic_83xx_copy_bootloader(adapter)) | 2014 | if (qlcnic_83xx_copy_bootloader(adapter)) |
@@ -2073,8 +2088,8 @@ int qlcnic_83xx_configure_opmode(struct qlcnic_adapter *adapter) | |||
2073 | ahw->nic_mode = QLCNIC_DEFAULT_MODE; | 2088 | ahw->nic_mode = QLCNIC_DEFAULT_MODE; |
2074 | adapter->nic_ops->init_driver = qlcnic_83xx_init_default_driver; | 2089 | adapter->nic_ops->init_driver = qlcnic_83xx_init_default_driver; |
2075 | ahw->idc.state_entry = qlcnic_83xx_idc_ready_state_entry; | 2090 | ahw->idc.state_entry = qlcnic_83xx_idc_ready_state_entry; |
2076 | adapter->max_sds_rings = ahw->max_rx_ques; | 2091 | adapter->max_sds_rings = QLCNIC_MAX_SDS_RINGS; |
2077 | adapter->max_tx_rings = ahw->max_tx_ques; | 2092 | adapter->max_tx_rings = QLCNIC_MAX_TX_RINGS; |
2078 | } else { | 2093 | } else { |
2079 | return -EIO; | 2094 | return -EIO; |
2080 | } | 2095 | } |
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c index b36c02fafcfd..6b08194aa0d4 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c | |||
@@ -167,27 +167,35 @@ static const char qlcnic_gstrings_test[][ETH_GSTRING_LEN] = { | |||
167 | 167 | ||
168 | #define QLCNIC_TEST_LEN ARRAY_SIZE(qlcnic_gstrings_test) | 168 | #define QLCNIC_TEST_LEN ARRAY_SIZE(qlcnic_gstrings_test) |
169 | 169 | ||
170 | static inline int qlcnic_82xx_statistics(void) | 170 | static inline int qlcnic_82xx_statistics(struct qlcnic_adapter *adapter) |
171 | { | 171 | { |
172 | return ARRAY_SIZE(qlcnic_device_gstrings_stats) + | 172 | return ARRAY_SIZE(qlcnic_gstrings_stats) + |
173 | ARRAY_SIZE(qlcnic_83xx_mac_stats_strings); | 173 | ARRAY_SIZE(qlcnic_83xx_mac_stats_strings) + |
174 | QLCNIC_TX_STATS_LEN * adapter->drv_tx_rings; | ||
174 | } | 175 | } |
175 | 176 | ||
176 | static inline int qlcnic_83xx_statistics(void) | 177 | static inline int qlcnic_83xx_statistics(struct qlcnic_adapter *adapter) |
177 | { | 178 | { |
178 | return ARRAY_SIZE(qlcnic_83xx_tx_stats_strings) + | 179 | return ARRAY_SIZE(qlcnic_gstrings_stats) + |
180 | ARRAY_SIZE(qlcnic_83xx_tx_stats_strings) + | ||
179 | ARRAY_SIZE(qlcnic_83xx_mac_stats_strings) + | 181 | ARRAY_SIZE(qlcnic_83xx_mac_stats_strings) + |
180 | ARRAY_SIZE(qlcnic_83xx_rx_stats_strings); | 182 | ARRAY_SIZE(qlcnic_83xx_rx_stats_strings) + |
183 | QLCNIC_TX_STATS_LEN * adapter->drv_tx_rings; | ||
181 | } | 184 | } |
182 | 185 | ||
183 | static int qlcnic_dev_statistics_len(struct qlcnic_adapter *adapter) | 186 | static int qlcnic_dev_statistics_len(struct qlcnic_adapter *adapter) |
184 | { | 187 | { |
185 | if (qlcnic_82xx_check(adapter)) | 188 | int len = -1; |
186 | return qlcnic_82xx_statistics(); | 189 | |
187 | else if (qlcnic_83xx_check(adapter)) | 190 | if (qlcnic_82xx_check(adapter)) { |
188 | return qlcnic_83xx_statistics(); | 191 | len = qlcnic_82xx_statistics(adapter); |
189 | else | 192 | if (adapter->flags & QLCNIC_ESWITCH_ENABLED) |
190 | return -1; | 193 | len += ARRAY_SIZE(qlcnic_device_gstrings_stats); |
194 | } else if (qlcnic_83xx_check(adapter)) { | ||
195 | len = qlcnic_83xx_statistics(adapter); | ||
196 | } | ||
197 | |||
198 | return len; | ||
191 | } | 199 | } |
192 | 200 | ||
193 | #define QLCNIC_TX_INTR_NOT_CONFIGURED 0X78563412 | 201 | #define QLCNIC_TX_INTR_NOT_CONFIGURED 0X78563412 |
@@ -667,30 +675,25 @@ qlcnic_set_ringparam(struct net_device *dev, | |||
667 | static int qlcnic_validate_ring_count(struct qlcnic_adapter *adapter, | 675 | static int qlcnic_validate_ring_count(struct qlcnic_adapter *adapter, |
668 | u8 rx_ring, u8 tx_ring) | 676 | u8 rx_ring, u8 tx_ring) |
669 | { | 677 | { |
678 | if (rx_ring == 0 || tx_ring == 0) | ||
679 | return -EINVAL; | ||
680 | |||
670 | if (rx_ring != 0) { | 681 | if (rx_ring != 0) { |
671 | if (rx_ring > adapter->max_sds_rings) { | 682 | if (rx_ring > adapter->max_sds_rings) { |
672 | netdev_err(adapter->netdev, "Invalid ring count, SDS ring count %d should not be greater than max %d driver sds rings.\n", | 683 | netdev_err(adapter->netdev, |
684 | "Invalid ring count, SDS ring count %d should not be greater than max %d driver sds rings.\n", | ||
673 | rx_ring, adapter->max_sds_rings); | 685 | rx_ring, adapter->max_sds_rings); |
674 | return -EINVAL; | 686 | return -EINVAL; |
675 | } | 687 | } |
676 | } | 688 | } |
677 | 689 | ||
678 | if (tx_ring != 0) { | 690 | if (tx_ring != 0) { |
679 | if (qlcnic_82xx_check(adapter) && | 691 | if (tx_ring > adapter->max_tx_rings) { |
680 | (tx_ring > adapter->max_tx_rings)) { | ||
681 | netdev_err(adapter->netdev, | 692 | netdev_err(adapter->netdev, |
682 | "Invalid ring count, Tx ring count %d should not be greater than max %d driver Tx rings.\n", | 693 | "Invalid ring count, Tx ring count %d should not be greater than max %d driver Tx rings.\n", |
683 | tx_ring, adapter->max_tx_rings); | 694 | tx_ring, adapter->max_tx_rings); |
684 | return -EINVAL; | 695 | return -EINVAL; |
685 | } | 696 | } |
686 | |||
687 | if (qlcnic_83xx_check(adapter) && | ||
688 | (tx_ring > QLCNIC_SINGLE_RING)) { | ||
689 | netdev_err(adapter->netdev, | ||
690 | "Invalid ring count, Tx ring count %d should not be greater than %d driver Tx rings.\n", | ||
691 | tx_ring, QLCNIC_SINGLE_RING); | ||
692 | return -EINVAL; | ||
693 | } | ||
694 | } | 697 | } |
695 | 698 | ||
696 | return 0; | 699 | return 0; |
@@ -925,18 +928,13 @@ static int qlcnic_eeprom_test(struct net_device *dev) | |||
925 | 928 | ||
926 | static int qlcnic_get_sset_count(struct net_device *dev, int sset) | 929 | static int qlcnic_get_sset_count(struct net_device *dev, int sset) |
927 | { | 930 | { |
928 | int len; | ||
929 | 931 | ||
930 | struct qlcnic_adapter *adapter = netdev_priv(dev); | 932 | struct qlcnic_adapter *adapter = netdev_priv(dev); |
931 | switch (sset) { | 933 | switch (sset) { |
932 | case ETH_SS_TEST: | 934 | case ETH_SS_TEST: |
933 | return QLCNIC_TEST_LEN; | 935 | return QLCNIC_TEST_LEN; |
934 | case ETH_SS_STATS: | 936 | case ETH_SS_STATS: |
935 | len = qlcnic_dev_statistics_len(adapter) + QLCNIC_STATS_LEN; | 937 | return qlcnic_dev_statistics_len(adapter); |
936 | if ((adapter->flags & QLCNIC_ESWITCH_ENABLED) || | ||
937 | qlcnic_83xx_check(adapter)) | ||
938 | return len; | ||
939 | return qlcnic_82xx_statistics(); | ||
940 | default: | 938 | default: |
941 | return -EOPNOTSUPP; | 939 | return -EOPNOTSUPP; |
942 | } | 940 | } |
@@ -948,6 +946,7 @@ static int qlcnic_irq_test(struct net_device *netdev) | |||
948 | struct qlcnic_hardware_context *ahw = adapter->ahw; | 946 | struct qlcnic_hardware_context *ahw = adapter->ahw; |
949 | struct qlcnic_cmd_args cmd; | 947 | struct qlcnic_cmd_args cmd; |
950 | int ret, drv_sds_rings = adapter->drv_sds_rings; | 948 | int ret, drv_sds_rings = adapter->drv_sds_rings; |
949 | int drv_tx_rings = adapter->drv_tx_rings; | ||
951 | 950 | ||
952 | if (qlcnic_83xx_check(adapter)) | 951 | if (qlcnic_83xx_check(adapter)) |
953 | return qlcnic_83xx_interrupt_test(netdev); | 952 | return qlcnic_83xx_interrupt_test(netdev); |
@@ -980,6 +979,7 @@ free_diag_res: | |||
980 | 979 | ||
981 | clear_diag_irq: | 980 | clear_diag_irq: |
982 | adapter->drv_sds_rings = drv_sds_rings; | 981 | adapter->drv_sds_rings = drv_sds_rings; |
982 | adapter->drv_tx_rings = drv_tx_rings; | ||
983 | clear_bit(__QLCNIC_RESETTING, &adapter->state); | 983 | clear_bit(__QLCNIC_RESETTING, &adapter->state); |
984 | 984 | ||
985 | return ret; | 985 | return ret; |
@@ -1270,7 +1270,7 @@ static u64 *qlcnic_fill_stats(u64 *data, void *stats, int type) | |||
1270 | return data; | 1270 | return data; |
1271 | } | 1271 | } |
1272 | 1272 | ||
1273 | static void qlcnic_update_stats(struct qlcnic_adapter *adapter) | 1273 | void qlcnic_update_stats(struct qlcnic_adapter *adapter) |
1274 | { | 1274 | { |
1275 | struct qlcnic_host_tx_ring *tx_ring; | 1275 | struct qlcnic_host_tx_ring *tx_ring; |
1276 | int ring; | 1276 | int ring; |
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_init.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_init.c index e9c21e5d0ca9..c4262c23ed7c 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_init.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_init.c | |||
@@ -134,6 +134,8 @@ void qlcnic_release_tx_buffers(struct qlcnic_adapter *adapter, | |||
134 | struct qlcnic_skb_frag *buffrag; | 134 | struct qlcnic_skb_frag *buffrag; |
135 | int i, j; | 135 | int i, j; |
136 | 136 | ||
137 | spin_lock(&tx_ring->tx_clean_lock); | ||
138 | |||
137 | cmd_buf = tx_ring->cmd_buf_arr; | 139 | cmd_buf = tx_ring->cmd_buf_arr; |
138 | for (i = 0; i < tx_ring->num_desc; i++) { | 140 | for (i = 0; i < tx_ring->num_desc; i++) { |
139 | buffrag = cmd_buf->frag_array; | 141 | buffrag = cmd_buf->frag_array; |
@@ -157,6 +159,8 @@ void qlcnic_release_tx_buffers(struct qlcnic_adapter *adapter, | |||
157 | } | 159 | } |
158 | cmd_buf++; | 160 | cmd_buf++; |
159 | } | 161 | } |
162 | |||
163 | spin_unlock(&tx_ring->tx_clean_lock); | ||
160 | } | 164 | } |
161 | 165 | ||
162 | void qlcnic_free_sw_resources(struct qlcnic_adapter *adapter) | 166 | void qlcnic_free_sw_resources(struct qlcnic_adapter *adapter) |
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c index 0149c9495347..ad1531ae3aa8 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c | |||
@@ -687,17 +687,15 @@ void qlcnic_advert_link_change(struct qlcnic_adapter *adapter, int linkup) | |||
687 | if (adapter->ahw->linkup && !linkup) { | 687 | if (adapter->ahw->linkup && !linkup) { |
688 | netdev_info(netdev, "NIC Link is down\n"); | 688 | netdev_info(netdev, "NIC Link is down\n"); |
689 | adapter->ahw->linkup = 0; | 689 | adapter->ahw->linkup = 0; |
690 | if (netif_running(netdev)) { | 690 | netif_carrier_off(netdev); |
691 | netif_carrier_off(netdev); | ||
692 | netif_tx_stop_all_queues(netdev); | ||
693 | } | ||
694 | } else if (!adapter->ahw->linkup && linkup) { | 691 | } else if (!adapter->ahw->linkup && linkup) { |
692 | /* Do not advertise Link up if the port is in loopback mode */ | ||
693 | if (qlcnic_83xx_check(adapter) && adapter->ahw->lb_mode) | ||
694 | return; | ||
695 | |||
695 | netdev_info(netdev, "NIC Link is up\n"); | 696 | netdev_info(netdev, "NIC Link is up\n"); |
696 | adapter->ahw->linkup = 1; | 697 | adapter->ahw->linkup = 1; |
697 | if (netif_running(netdev)) { | 698 | netif_carrier_on(netdev); |
698 | netif_carrier_on(netdev); | ||
699 | netif_wake_queue(netdev); | ||
700 | } | ||
701 | } | 699 | } |
702 | } | 700 | } |
703 | 701 | ||
@@ -784,7 +782,7 @@ static int qlcnic_process_cmd_ring(struct qlcnic_adapter *adapter, | |||
784 | struct net_device *netdev = adapter->netdev; | 782 | struct net_device *netdev = adapter->netdev; |
785 | struct qlcnic_skb_frag *frag; | 783 | struct qlcnic_skb_frag *frag; |
786 | 784 | ||
787 | if (!spin_trylock(&adapter->tx_clean_lock)) | 785 | if (!spin_trylock(&tx_ring->tx_clean_lock)) |
788 | return 1; | 786 | return 1; |
789 | 787 | ||
790 | sw_consumer = tx_ring->sw_consumer; | 788 | sw_consumer = tx_ring->sw_consumer; |
@@ -813,8 +811,9 @@ static int qlcnic_process_cmd_ring(struct qlcnic_adapter *adapter, | |||
813 | break; | 811 | break; |
814 | } | 812 | } |
815 | 813 | ||
814 | tx_ring->sw_consumer = sw_consumer; | ||
815 | |||
816 | if (count && netif_running(netdev)) { | 816 | if (count && netif_running(netdev)) { |
817 | tx_ring->sw_consumer = sw_consumer; | ||
818 | smp_mb(); | 817 | smp_mb(); |
819 | if (netif_tx_queue_stopped(tx_ring->txq) && | 818 | if (netif_tx_queue_stopped(tx_ring->txq) && |
820 | netif_carrier_ok(netdev)) { | 819 | netif_carrier_ok(netdev)) { |
@@ -840,7 +839,8 @@ static int qlcnic_process_cmd_ring(struct qlcnic_adapter *adapter, | |||
840 | */ | 839 | */ |
841 | hw_consumer = le32_to_cpu(*(tx_ring->hw_consumer)); | 840 | hw_consumer = le32_to_cpu(*(tx_ring->hw_consumer)); |
842 | done = (sw_consumer == hw_consumer); | 841 | done = (sw_consumer == hw_consumer); |
843 | spin_unlock(&adapter->tx_clean_lock); | 842 | |
843 | spin_unlock(&tx_ring->tx_clean_lock); | ||
844 | 844 | ||
845 | return done; | 845 | return done; |
846 | } | 846 | } |
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c index 05c1eef8df13..550791b8fbae 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c | |||
@@ -1178,6 +1178,7 @@ qlcnic_initialize_nic(struct qlcnic_adapter *adapter) | |||
1178 | } else { | 1178 | } else { |
1179 | adapter->ahw->nic_mode = QLCNIC_DEFAULT_MODE; | 1179 | adapter->ahw->nic_mode = QLCNIC_DEFAULT_MODE; |
1180 | adapter->max_tx_rings = QLCNIC_MAX_HW_TX_RINGS; | 1180 | adapter->max_tx_rings = QLCNIC_MAX_HW_TX_RINGS; |
1181 | adapter->max_sds_rings = QLCNIC_MAX_SDS_RINGS; | ||
1181 | adapter->flags &= ~QLCNIC_ESWITCH_ENABLED; | 1182 | adapter->flags &= ~QLCNIC_ESWITCH_ENABLED; |
1182 | } | 1183 | } |
1183 | 1184 | ||
@@ -1755,7 +1756,6 @@ void __qlcnic_down(struct qlcnic_adapter *adapter, struct net_device *netdev) | |||
1755 | if (qlcnic_sriov_vf_check(adapter)) | 1756 | if (qlcnic_sriov_vf_check(adapter)) |
1756 | qlcnic_sriov_cleanup_async_list(&adapter->ahw->sriov->bc); | 1757 | qlcnic_sriov_cleanup_async_list(&adapter->ahw->sriov->bc); |
1757 | smp_mb(); | 1758 | smp_mb(); |
1758 | spin_lock(&adapter->tx_clean_lock); | ||
1759 | netif_carrier_off(netdev); | 1759 | netif_carrier_off(netdev); |
1760 | adapter->ahw->linkup = 0; | 1760 | adapter->ahw->linkup = 0; |
1761 | netif_tx_disable(netdev); | 1761 | netif_tx_disable(netdev); |
@@ -1776,7 +1776,6 @@ void __qlcnic_down(struct qlcnic_adapter *adapter, struct net_device *netdev) | |||
1776 | 1776 | ||
1777 | for (ring = 0; ring < adapter->drv_tx_rings; ring++) | 1777 | for (ring = 0; ring < adapter->drv_tx_rings; ring++) |
1778 | qlcnic_release_tx_buffers(adapter, &adapter->tx_ring[ring]); | 1778 | qlcnic_release_tx_buffers(adapter, &adapter->tx_ring[ring]); |
1779 | spin_unlock(&adapter->tx_clean_lock); | ||
1780 | } | 1779 | } |
1781 | 1780 | ||
1782 | /* Usage: During suspend and firmware recovery module */ | 1781 | /* Usage: During suspend and firmware recovery module */ |
@@ -1940,7 +1939,6 @@ int qlcnic_diag_alloc_res(struct net_device *netdev, int test) | |||
1940 | qlcnic_detach(adapter); | 1939 | qlcnic_detach(adapter); |
1941 | 1940 | ||
1942 | adapter->drv_sds_rings = QLCNIC_SINGLE_RING; | 1941 | adapter->drv_sds_rings = QLCNIC_SINGLE_RING; |
1943 | adapter->drv_tx_rings = QLCNIC_SINGLE_RING; | ||
1944 | adapter->ahw->diag_test = test; | 1942 | adapter->ahw->diag_test = test; |
1945 | adapter->ahw->linkup = 0; | 1943 | adapter->ahw->linkup = 0; |
1946 | 1944 | ||
@@ -2172,6 +2170,7 @@ int qlcnic_alloc_tx_rings(struct qlcnic_adapter *adapter, | |||
2172 | } | 2170 | } |
2173 | memset(cmd_buf_arr, 0, TX_BUFF_RINGSIZE(tx_ring)); | 2171 | memset(cmd_buf_arr, 0, TX_BUFF_RINGSIZE(tx_ring)); |
2174 | tx_ring->cmd_buf_arr = cmd_buf_arr; | 2172 | tx_ring->cmd_buf_arr = cmd_buf_arr; |
2173 | spin_lock_init(&tx_ring->tx_clean_lock); | ||
2175 | } | 2174 | } |
2176 | 2175 | ||
2177 | if (qlcnic_83xx_check(adapter) || | 2176 | if (qlcnic_83xx_check(adapter) || |
@@ -2299,7 +2298,6 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
2299 | rwlock_init(&adapter->ahw->crb_lock); | 2298 | rwlock_init(&adapter->ahw->crb_lock); |
2300 | mutex_init(&adapter->ahw->mem_lock); | 2299 | mutex_init(&adapter->ahw->mem_lock); |
2301 | 2300 | ||
2302 | spin_lock_init(&adapter->tx_clean_lock); | ||
2303 | INIT_LIST_HEAD(&adapter->mac_list); | 2301 | INIT_LIST_HEAD(&adapter->mac_list); |
2304 | 2302 | ||
2305 | qlcnic_register_dcb(adapter); | 2303 | qlcnic_register_dcb(adapter); |
@@ -2782,6 +2780,9 @@ static struct net_device_stats *qlcnic_get_stats(struct net_device *netdev) | |||
2782 | struct qlcnic_adapter *adapter = netdev_priv(netdev); | 2780 | struct qlcnic_adapter *adapter = netdev_priv(netdev); |
2783 | struct net_device_stats *stats = &netdev->stats; | 2781 | struct net_device_stats *stats = &netdev->stats; |
2784 | 2782 | ||
2783 | if (test_bit(__QLCNIC_DEV_UP, &adapter->state)) | ||
2784 | qlcnic_update_stats(adapter); | ||
2785 | |||
2785 | stats->rx_packets = adapter->stats.rx_pkts + adapter->stats.lro_pkts; | 2786 | stats->rx_packets = adapter->stats.rx_pkts + adapter->stats.lro_pkts; |
2786 | stats->tx_packets = adapter->stats.xmitfinished; | 2787 | stats->tx_packets = adapter->stats.xmitfinished; |
2787 | stats->rx_bytes = adapter->stats.rxbytes + adapter->stats.lrobytes; | 2788 | stats->rx_bytes = adapter->stats.rxbytes + adapter->stats.lrobytes; |
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c index 686f460b1502..024f8161d2fe 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c | |||
@@ -75,7 +75,6 @@ static int qlcnic_sriov_pf_cal_res_limit(struct qlcnic_adapter *adapter, | |||
75 | num_vfs = sriov->num_vfs; | 75 | num_vfs = sriov->num_vfs; |
76 | max = num_vfs + 1; | 76 | max = num_vfs + 1; |
77 | info->bit_offsets = 0xffff; | 77 | info->bit_offsets = 0xffff; |
78 | info->max_tx_ques = res->num_tx_queues / max; | ||
79 | info->max_rx_mcast_mac_filters = res->num_rx_mcast_mac_filters; | 78 | info->max_rx_mcast_mac_filters = res->num_rx_mcast_mac_filters; |
80 | num_vf_macs = QLCNIC_SRIOV_VF_MAX_MAC; | 79 | num_vf_macs = QLCNIC_SRIOV_VF_MAX_MAC; |
81 | 80 | ||
@@ -86,6 +85,7 @@ static int qlcnic_sriov_pf_cal_res_limit(struct qlcnic_adapter *adapter, | |||
86 | info->max_tx_mac_filters = temp; | 85 | info->max_tx_mac_filters = temp; |
87 | info->min_tx_bw = 0; | 86 | info->min_tx_bw = 0; |
88 | info->max_tx_bw = MAX_BW; | 87 | info->max_tx_bw = MAX_BW; |
88 | info->max_tx_ques = res->num_tx_queues - sriov->num_vfs; | ||
89 | } else { | 89 | } else { |
90 | id = qlcnic_sriov_func_to_index(adapter, func); | 90 | id = qlcnic_sriov_func_to_index(adapter, func); |
91 | if (id < 0) | 91 | if (id < 0) |
@@ -95,6 +95,7 @@ static int qlcnic_sriov_pf_cal_res_limit(struct qlcnic_adapter *adapter, | |||
95 | info->max_tx_bw = vp->max_tx_bw; | 95 | info->max_tx_bw = vp->max_tx_bw; |
96 | info->max_rx_ucast_mac_filters = num_vf_macs; | 96 | info->max_rx_ucast_mac_filters = num_vf_macs; |
97 | info->max_tx_mac_filters = num_vf_macs; | 97 | info->max_tx_mac_filters = num_vf_macs; |
98 | info->max_tx_ques = QLCNIC_SINGLE_RING; | ||
98 | } | 99 | } |
99 | 100 | ||
100 | info->max_rx_ip_addr = res->num_destip / max; | 101 | info->max_rx_ip_addr = res->num_destip / max; |
diff --git a/drivers/net/ethernet/qlogic/qlge/qlge.h b/drivers/net/ethernet/qlogic/qlge/qlge.h index 0c9c4e895595..03517478e589 100644 --- a/drivers/net/ethernet/qlogic/qlge/qlge.h +++ b/drivers/net/ethernet/qlogic/qlge/qlge.h | |||
@@ -18,7 +18,7 @@ | |||
18 | */ | 18 | */ |
19 | #define DRV_NAME "qlge" | 19 | #define DRV_NAME "qlge" |
20 | #define DRV_STRING "QLogic 10 Gigabit PCI-E Ethernet Driver " | 20 | #define DRV_STRING "QLogic 10 Gigabit PCI-E Ethernet Driver " |
21 | #define DRV_VERSION "1.00.00.33" | 21 | #define DRV_VERSION "1.00.00.34" |
22 | 22 | ||
23 | #define WQ_ADDR_ALIGN 0x3 /* 4 byte alignment */ | 23 | #define WQ_ADDR_ALIGN 0x3 /* 4 byte alignment */ |
24 | 24 | ||
diff --git a/drivers/net/ethernet/qlogic/qlge/qlge_ethtool.c b/drivers/net/ethernet/qlogic/qlge/qlge_ethtool.c index 0780e039b271..8dee1beb9854 100644 --- a/drivers/net/ethernet/qlogic/qlge/qlge_ethtool.c +++ b/drivers/net/ethernet/qlogic/qlge/qlge_ethtool.c | |||
@@ -181,6 +181,7 @@ static const char ql_gstrings_test[][ETH_GSTRING_LEN] = { | |||
181 | }; | 181 | }; |
182 | #define QLGE_TEST_LEN (sizeof(ql_gstrings_test) / ETH_GSTRING_LEN) | 182 | #define QLGE_TEST_LEN (sizeof(ql_gstrings_test) / ETH_GSTRING_LEN) |
183 | #define QLGE_STATS_LEN ARRAY_SIZE(ql_gstrings_stats) | 183 | #define QLGE_STATS_LEN ARRAY_SIZE(ql_gstrings_stats) |
184 | #define QLGE_RCV_MAC_ERR_STATS 7 | ||
184 | 185 | ||
185 | static int ql_update_ring_coalescing(struct ql_adapter *qdev) | 186 | static int ql_update_ring_coalescing(struct ql_adapter *qdev) |
186 | { | 187 | { |
@@ -280,6 +281,9 @@ static void ql_update_stats(struct ql_adapter *qdev) | |||
280 | iter++; | 281 | iter++; |
281 | } | 282 | } |
282 | 283 | ||
284 | /* Update receive mac error statistics */ | ||
285 | iter += QLGE_RCV_MAC_ERR_STATS; | ||
286 | |||
283 | /* | 287 | /* |
284 | * Get Per-priority TX pause frame counter statistics. | 288 | * Get Per-priority TX pause frame counter statistics. |
285 | */ | 289 | */ |
diff --git a/drivers/net/ethernet/qlogic/qlge/qlge_main.c b/drivers/net/ethernet/qlogic/qlge/qlge_main.c index a245dc18d769..449f506d2e8f 100644 --- a/drivers/net/ethernet/qlogic/qlge/qlge_main.c +++ b/drivers/net/ethernet/qlogic/qlge/qlge_main.c | |||
@@ -2376,14 +2376,6 @@ static netdev_features_t qlge_fix_features(struct net_device *ndev, | |||
2376 | netdev_features_t features) | 2376 | netdev_features_t features) |
2377 | { | 2377 | { |
2378 | int err; | 2378 | int err; |
2379 | /* | ||
2380 | * Since there is no support for separate rx/tx vlan accel | ||
2381 | * enable/disable make sure tx flag is always in same state as rx. | ||
2382 | */ | ||
2383 | if (features & NETIF_F_HW_VLAN_CTAG_RX) | ||
2384 | features |= NETIF_F_HW_VLAN_CTAG_TX; | ||
2385 | else | ||
2386 | features &= ~NETIF_F_HW_VLAN_CTAG_TX; | ||
2387 | 2379 | ||
2388 | /* Update the behavior of vlan accel in the adapter */ | 2380 | /* Update the behavior of vlan accel in the adapter */ |
2389 | err = qlge_update_hw_vlan_features(ndev, features); | 2381 | err = qlge_update_hw_vlan_features(ndev, features); |
diff --git a/drivers/net/ethernet/realtek/8139cp.c b/drivers/net/ethernet/realtek/8139cp.c index f2a2128165dd..737c1a881f78 100644 --- a/drivers/net/ethernet/realtek/8139cp.c +++ b/drivers/net/ethernet/realtek/8139cp.c | |||
@@ -678,9 +678,6 @@ static void cp_tx (struct cp_private *cp) | |||
678 | le32_to_cpu(txd->opts1) & 0xffff, | 678 | le32_to_cpu(txd->opts1) & 0xffff, |
679 | PCI_DMA_TODEVICE); | 679 | PCI_DMA_TODEVICE); |
680 | 680 | ||
681 | bytes_compl += skb->len; | ||
682 | pkts_compl++; | ||
683 | |||
684 | if (status & LastFrag) { | 681 | if (status & LastFrag) { |
685 | if (status & (TxError | TxFIFOUnder)) { | 682 | if (status & (TxError | TxFIFOUnder)) { |
686 | netif_dbg(cp, tx_err, cp->dev, | 683 | netif_dbg(cp, tx_err, cp->dev, |
@@ -702,6 +699,8 @@ static void cp_tx (struct cp_private *cp) | |||
702 | netif_dbg(cp, tx_done, cp->dev, | 699 | netif_dbg(cp, tx_done, cp->dev, |
703 | "tx done, slot %d\n", tx_tail); | 700 | "tx done, slot %d\n", tx_tail); |
704 | } | 701 | } |
702 | bytes_compl += skb->len; | ||
703 | pkts_compl++; | ||
705 | dev_kfree_skb_irq(skb); | 704 | dev_kfree_skb_irq(skb); |
706 | } | 705 | } |
707 | 706 | ||
diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c index 799387570766..c737f0ea5de7 100644 --- a/drivers/net/ethernet/realtek/r8169.c +++ b/drivers/net/ethernet/realtek/r8169.c | |||
@@ -3465,6 +3465,11 @@ static void rtl8168g_1_hw_phy_config(struct rtl8169_private *tp) | |||
3465 | rtl_writephy(tp, 0x14, 0x9065); | 3465 | rtl_writephy(tp, 0x14, 0x9065); |
3466 | rtl_writephy(tp, 0x14, 0x1065); | 3466 | rtl_writephy(tp, 0x14, 0x1065); |
3467 | 3467 | ||
3468 | /* Check ALDPS bit, disable it if enabled */ | ||
3469 | rtl_writephy(tp, 0x1f, 0x0a43); | ||
3470 | if (rtl_readphy(tp, 0x10) & 0x0004) | ||
3471 | rtl_w1w0_phy(tp, 0x10, 0x0000, 0x0004); | ||
3472 | |||
3468 | rtl_writephy(tp, 0x1f, 0x0000); | 3473 | rtl_writephy(tp, 0x1f, 0x0000); |
3469 | } | 3474 | } |
3470 | 3475 | ||
diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c index 2e27837ce6a2..fd844b53e385 100644 --- a/drivers/net/ethernet/sfc/efx.c +++ b/drivers/net/ethernet/sfc/efx.c | |||
@@ -585,7 +585,7 @@ static void efx_start_datapath(struct efx_nic *efx) | |||
585 | EFX_MAX_FRAME_LEN(efx->net_dev->mtu) + | 585 | EFX_MAX_FRAME_LEN(efx->net_dev->mtu) + |
586 | efx->type->rx_buffer_padding); | 586 | efx->type->rx_buffer_padding); |
587 | rx_buf_len = (sizeof(struct efx_rx_page_state) + | 587 | rx_buf_len = (sizeof(struct efx_rx_page_state) + |
588 | NET_IP_ALIGN + efx->rx_dma_len); | 588 | efx->rx_ip_align + efx->rx_dma_len); |
589 | if (rx_buf_len <= PAGE_SIZE) { | 589 | if (rx_buf_len <= PAGE_SIZE) { |
590 | efx->rx_scatter = efx->type->always_rx_scatter; | 590 | efx->rx_scatter = efx->type->always_rx_scatter; |
591 | efx->rx_buffer_order = 0; | 591 | efx->rx_buffer_order = 0; |
@@ -645,6 +645,8 @@ static void efx_start_datapath(struct efx_nic *efx) | |||
645 | WARN_ON(channel->rx_pkt_n_frags); | 645 | WARN_ON(channel->rx_pkt_n_frags); |
646 | } | 646 | } |
647 | 647 | ||
648 | efx_ptp_start_datapath(efx); | ||
649 | |||
648 | if (netif_device_present(efx->net_dev)) | 650 | if (netif_device_present(efx->net_dev)) |
649 | netif_tx_wake_all_queues(efx->net_dev); | 651 | netif_tx_wake_all_queues(efx->net_dev); |
650 | } | 652 | } |
@@ -659,6 +661,8 @@ static void efx_stop_datapath(struct efx_nic *efx) | |||
659 | EFX_ASSERT_RESET_SERIALISED(efx); | 661 | EFX_ASSERT_RESET_SERIALISED(efx); |
660 | BUG_ON(efx->port_enabled); | 662 | BUG_ON(efx->port_enabled); |
661 | 663 | ||
664 | efx_ptp_stop_datapath(efx); | ||
665 | |||
662 | /* Stop RX refill */ | 666 | /* Stop RX refill */ |
663 | efx_for_each_channel(channel, efx) { | 667 | efx_for_each_channel(channel, efx) { |
664 | efx_for_each_channel_rx_queue(rx_queue, channel) | 668 | efx_for_each_channel_rx_queue(rx_queue, channel) |
@@ -2540,6 +2544,8 @@ static int efx_init_struct(struct efx_nic *efx, | |||
2540 | 2544 | ||
2541 | efx->net_dev = net_dev; | 2545 | efx->net_dev = net_dev; |
2542 | efx->rx_prefix_size = efx->type->rx_prefix_size; | 2546 | efx->rx_prefix_size = efx->type->rx_prefix_size; |
2547 | efx->rx_ip_align = | ||
2548 | NET_IP_ALIGN ? (efx->rx_prefix_size + NET_IP_ALIGN) % 4 : 0; | ||
2543 | efx->rx_packet_hash_offset = | 2549 | efx->rx_packet_hash_offset = |
2544 | efx->type->rx_hash_offset - efx->type->rx_prefix_size; | 2550 | efx->type->rx_hash_offset - efx->type->rx_prefix_size; |
2545 | spin_lock_init(&efx->stats_lock); | 2551 | spin_lock_init(&efx->stats_lock); |
diff --git a/drivers/net/ethernet/sfc/mcdi.c b/drivers/net/ethernet/sfc/mcdi.c index 366c8e3e3784..4b0bd8a1514d 100644 --- a/drivers/net/ethernet/sfc/mcdi.c +++ b/drivers/net/ethernet/sfc/mcdi.c | |||
@@ -50,6 +50,7 @@ struct efx_mcdi_async_param { | |||
50 | static void efx_mcdi_timeout_async(unsigned long context); | 50 | static void efx_mcdi_timeout_async(unsigned long context); |
51 | static int efx_mcdi_drv_attach(struct efx_nic *efx, bool driver_operating, | 51 | static int efx_mcdi_drv_attach(struct efx_nic *efx, bool driver_operating, |
52 | bool *was_attached_out); | 52 | bool *was_attached_out); |
53 | static bool efx_mcdi_poll_once(struct efx_nic *efx); | ||
53 | 54 | ||
54 | static inline struct efx_mcdi_iface *efx_mcdi(struct efx_nic *efx) | 55 | static inline struct efx_mcdi_iface *efx_mcdi(struct efx_nic *efx) |
55 | { | 56 | { |
@@ -237,6 +238,21 @@ static void efx_mcdi_read_response_header(struct efx_nic *efx) | |||
237 | } | 238 | } |
238 | } | 239 | } |
239 | 240 | ||
241 | static bool efx_mcdi_poll_once(struct efx_nic *efx) | ||
242 | { | ||
243 | struct efx_mcdi_iface *mcdi = efx_mcdi(efx); | ||
244 | |||
245 | rmb(); | ||
246 | if (!efx->type->mcdi_poll_response(efx)) | ||
247 | return false; | ||
248 | |||
249 | spin_lock_bh(&mcdi->iface_lock); | ||
250 | efx_mcdi_read_response_header(efx); | ||
251 | spin_unlock_bh(&mcdi->iface_lock); | ||
252 | |||
253 | return true; | ||
254 | } | ||
255 | |||
240 | static int efx_mcdi_poll(struct efx_nic *efx) | 256 | static int efx_mcdi_poll(struct efx_nic *efx) |
241 | { | 257 | { |
242 | struct efx_mcdi_iface *mcdi = efx_mcdi(efx); | 258 | struct efx_mcdi_iface *mcdi = efx_mcdi(efx); |
@@ -272,18 +288,13 @@ static int efx_mcdi_poll(struct efx_nic *efx) | |||
272 | 288 | ||
273 | time = jiffies; | 289 | time = jiffies; |
274 | 290 | ||
275 | rmb(); | 291 | if (efx_mcdi_poll_once(efx)) |
276 | if (efx->type->mcdi_poll_response(efx)) | ||
277 | break; | 292 | break; |
278 | 293 | ||
279 | if (time_after(time, finish)) | 294 | if (time_after(time, finish)) |
280 | return -ETIMEDOUT; | 295 | return -ETIMEDOUT; |
281 | } | 296 | } |
282 | 297 | ||
283 | spin_lock_bh(&mcdi->iface_lock); | ||
284 | efx_mcdi_read_response_header(efx); | ||
285 | spin_unlock_bh(&mcdi->iface_lock); | ||
286 | |||
287 | /* Return rc=0 like wait_event_timeout() */ | 298 | /* Return rc=0 like wait_event_timeout() */ |
288 | return 0; | 299 | return 0; |
289 | } | 300 | } |
@@ -619,6 +630,16 @@ int efx_mcdi_rpc_finish(struct efx_nic *efx, unsigned cmd, size_t inlen, | |||
619 | rc = efx_mcdi_await_completion(efx); | 630 | rc = efx_mcdi_await_completion(efx); |
620 | 631 | ||
621 | if (rc != 0) { | 632 | if (rc != 0) { |
633 | netif_err(efx, hw, efx->net_dev, | ||
634 | "MC command 0x%x inlen %d mode %d timed out\n", | ||
635 | cmd, (int)inlen, mcdi->mode); | ||
636 | |||
637 | if (mcdi->mode == MCDI_MODE_EVENTS && efx_mcdi_poll_once(efx)) { | ||
638 | netif_err(efx, hw, efx->net_dev, | ||
639 | "MCDI request was completed without an event\n"); | ||
640 | rc = 0; | ||
641 | } | ||
642 | |||
622 | /* Close the race with efx_mcdi_ev_cpl() executing just too late | 643 | /* Close the race with efx_mcdi_ev_cpl() executing just too late |
623 | * and completing a request we've just cancelled, by ensuring | 644 | * and completing a request we've just cancelled, by ensuring |
624 | * that the seqno check therein fails. | 645 | * that the seqno check therein fails. |
@@ -627,11 +648,9 @@ int efx_mcdi_rpc_finish(struct efx_nic *efx, unsigned cmd, size_t inlen, | |||
627 | ++mcdi->seqno; | 648 | ++mcdi->seqno; |
628 | ++mcdi->credits; | 649 | ++mcdi->credits; |
629 | spin_unlock_bh(&mcdi->iface_lock); | 650 | spin_unlock_bh(&mcdi->iface_lock); |
651 | } | ||
630 | 652 | ||
631 | netif_err(efx, hw, efx->net_dev, | 653 | if (rc == 0) { |
632 | "MC command 0x%x inlen %d mode %d timed out\n", | ||
633 | cmd, (int)inlen, mcdi->mode); | ||
634 | } else { | ||
635 | size_t hdr_len, data_len; | 654 | size_t hdr_len, data_len; |
636 | 655 | ||
637 | /* At the very least we need a memory barrier here to ensure | 656 | /* At the very least we need a memory barrier here to ensure |
diff --git a/drivers/net/ethernet/sfc/mcdi.h b/drivers/net/ethernet/sfc/mcdi.h index 656a3277c2b2..15816cacb548 100644 --- a/drivers/net/ethernet/sfc/mcdi.h +++ b/drivers/net/ethernet/sfc/mcdi.h | |||
@@ -75,6 +75,8 @@ struct efx_mcdi_mon { | |||
75 | unsigned long last_update; | 75 | unsigned long last_update; |
76 | struct device *device; | 76 | struct device *device; |
77 | struct efx_mcdi_mon_attribute *attrs; | 77 | struct efx_mcdi_mon_attribute *attrs; |
78 | struct attribute_group group; | ||
79 | const struct attribute_group *groups[2]; | ||
78 | unsigned int n_attrs; | 80 | unsigned int n_attrs; |
79 | }; | 81 | }; |
80 | 82 | ||
diff --git a/drivers/net/ethernet/sfc/mcdi_mon.c b/drivers/net/ethernet/sfc/mcdi_mon.c index 4cc5d95b2a5a..d72ad4fc3617 100644 --- a/drivers/net/ethernet/sfc/mcdi_mon.c +++ b/drivers/net/ethernet/sfc/mcdi_mon.c | |||
@@ -139,17 +139,10 @@ static int efx_mcdi_mon_update(struct efx_nic *efx) | |||
139 | return rc; | 139 | return rc; |
140 | } | 140 | } |
141 | 141 | ||
142 | static ssize_t efx_mcdi_mon_show_name(struct device *dev, | ||
143 | struct device_attribute *attr, | ||
144 | char *buf) | ||
145 | { | ||
146 | return sprintf(buf, "%s\n", KBUILD_MODNAME); | ||
147 | } | ||
148 | |||
149 | static int efx_mcdi_mon_get_entry(struct device *dev, unsigned int index, | 142 | static int efx_mcdi_mon_get_entry(struct device *dev, unsigned int index, |
150 | efx_dword_t *entry) | 143 | efx_dword_t *entry) |
151 | { | 144 | { |
152 | struct efx_nic *efx = dev_get_drvdata(dev); | 145 | struct efx_nic *efx = dev_get_drvdata(dev->parent); |
153 | struct efx_mcdi_mon *hwmon = efx_mcdi_mon(efx); | 146 | struct efx_mcdi_mon *hwmon = efx_mcdi_mon(efx); |
154 | int rc; | 147 | int rc; |
155 | 148 | ||
@@ -263,7 +256,7 @@ static ssize_t efx_mcdi_mon_show_label(struct device *dev, | |||
263 | efx_mcdi_sensor_type[mon_attr->type].label); | 256 | efx_mcdi_sensor_type[mon_attr->type].label); |
264 | } | 257 | } |
265 | 258 | ||
266 | static int | 259 | static void |
267 | efx_mcdi_mon_add_attr(struct efx_nic *efx, const char *name, | 260 | efx_mcdi_mon_add_attr(struct efx_nic *efx, const char *name, |
268 | ssize_t (*reader)(struct device *, | 261 | ssize_t (*reader)(struct device *, |
269 | struct device_attribute *, char *), | 262 | struct device_attribute *, char *), |
@@ -272,7 +265,6 @@ efx_mcdi_mon_add_attr(struct efx_nic *efx, const char *name, | |||
272 | { | 265 | { |
273 | struct efx_mcdi_mon *hwmon = efx_mcdi_mon(efx); | 266 | struct efx_mcdi_mon *hwmon = efx_mcdi_mon(efx); |
274 | struct efx_mcdi_mon_attribute *attr = &hwmon->attrs[hwmon->n_attrs]; | 267 | struct efx_mcdi_mon_attribute *attr = &hwmon->attrs[hwmon->n_attrs]; |
275 | int rc; | ||
276 | 268 | ||
277 | strlcpy(attr->name, name, sizeof(attr->name)); | 269 | strlcpy(attr->name, name, sizeof(attr->name)); |
278 | attr->index = index; | 270 | attr->index = index; |
@@ -286,10 +278,7 @@ efx_mcdi_mon_add_attr(struct efx_nic *efx, const char *name, | |||
286 | attr->dev_attr.attr.name = attr->name; | 278 | attr->dev_attr.attr.name = attr->name; |
287 | attr->dev_attr.attr.mode = S_IRUGO; | 279 | attr->dev_attr.attr.mode = S_IRUGO; |
288 | attr->dev_attr.show = reader; | 280 | attr->dev_attr.show = reader; |
289 | rc = device_create_file(&efx->pci_dev->dev, &attr->dev_attr); | 281 | hwmon->group.attrs[hwmon->n_attrs++] = &attr->dev_attr.attr; |
290 | if (rc == 0) | ||
291 | ++hwmon->n_attrs; | ||
292 | return rc; | ||
293 | } | 282 | } |
294 | 283 | ||
295 | int efx_mcdi_mon_probe(struct efx_nic *efx) | 284 | int efx_mcdi_mon_probe(struct efx_nic *efx) |
@@ -338,26 +327,22 @@ int efx_mcdi_mon_probe(struct efx_nic *efx) | |||
338 | efx_mcdi_mon_update(efx); | 327 | efx_mcdi_mon_update(efx); |
339 | 328 | ||
340 | /* Allocate space for the maximum possible number of | 329 | /* Allocate space for the maximum possible number of |
341 | * attributes for this set of sensors: name of the driver plus | 330 | * attributes for this set of sensors: |
342 | * value, min, max, crit, alarm and label for each sensor. | 331 | * value, min, max, crit, alarm and label for each sensor. |
343 | */ | 332 | */ |
344 | n_attrs = 1 + 6 * n_sensors; | 333 | n_attrs = 6 * n_sensors; |
345 | hwmon->attrs = kcalloc(n_attrs, sizeof(*hwmon->attrs), GFP_KERNEL); | 334 | hwmon->attrs = kcalloc(n_attrs, sizeof(*hwmon->attrs), GFP_KERNEL); |
346 | if (!hwmon->attrs) { | 335 | if (!hwmon->attrs) { |
347 | rc = -ENOMEM; | 336 | rc = -ENOMEM; |
348 | goto fail; | 337 | goto fail; |
349 | } | 338 | } |
350 | 339 | hwmon->group.attrs = kcalloc(n_attrs + 1, sizeof(struct attribute *), | |
351 | hwmon->device = hwmon_device_register(&efx->pci_dev->dev); | 340 | GFP_KERNEL); |
352 | if (IS_ERR(hwmon->device)) { | 341 | if (!hwmon->group.attrs) { |
353 | rc = PTR_ERR(hwmon->device); | 342 | rc = -ENOMEM; |
354 | goto fail; | 343 | goto fail; |
355 | } | 344 | } |
356 | 345 | ||
357 | rc = efx_mcdi_mon_add_attr(efx, "name", efx_mcdi_mon_show_name, 0, 0, 0); | ||
358 | if (rc) | ||
359 | goto fail; | ||
360 | |||
361 | for (i = 0, j = -1, type = -1; ; i++) { | 346 | for (i = 0, j = -1, type = -1; ; i++) { |
362 | enum efx_hwmon_type hwmon_type; | 347 | enum efx_hwmon_type hwmon_type; |
363 | const char *hwmon_prefix; | 348 | const char *hwmon_prefix; |
@@ -372,7 +357,7 @@ int efx_mcdi_mon_probe(struct efx_nic *efx) | |||
372 | page = type / 32; | 357 | page = type / 32; |
373 | j = -1; | 358 | j = -1; |
374 | if (page == n_pages) | 359 | if (page == n_pages) |
375 | return 0; | 360 | goto hwmon_register; |
376 | 361 | ||
377 | MCDI_SET_DWORD(inbuf, SENSOR_INFO_EXT_IN_PAGE, | 362 | MCDI_SET_DWORD(inbuf, SENSOR_INFO_EXT_IN_PAGE, |
378 | page); | 363 | page); |
@@ -453,28 +438,22 @@ int efx_mcdi_mon_probe(struct efx_nic *efx) | |||
453 | if (min1 != max1) { | 438 | if (min1 != max1) { |
454 | snprintf(name, sizeof(name), "%s%u_input", | 439 | snprintf(name, sizeof(name), "%s%u_input", |
455 | hwmon_prefix, hwmon_index); | 440 | hwmon_prefix, hwmon_index); |
456 | rc = efx_mcdi_mon_add_attr( | 441 | efx_mcdi_mon_add_attr( |
457 | efx, name, efx_mcdi_mon_show_value, i, type, 0); | 442 | efx, name, efx_mcdi_mon_show_value, i, type, 0); |
458 | if (rc) | ||
459 | goto fail; | ||
460 | 443 | ||
461 | if (hwmon_type != EFX_HWMON_POWER) { | 444 | if (hwmon_type != EFX_HWMON_POWER) { |
462 | snprintf(name, sizeof(name), "%s%u_min", | 445 | snprintf(name, sizeof(name), "%s%u_min", |
463 | hwmon_prefix, hwmon_index); | 446 | hwmon_prefix, hwmon_index); |
464 | rc = efx_mcdi_mon_add_attr( | 447 | efx_mcdi_mon_add_attr( |
465 | efx, name, efx_mcdi_mon_show_limit, | 448 | efx, name, efx_mcdi_mon_show_limit, |
466 | i, type, min1); | 449 | i, type, min1); |
467 | if (rc) | ||
468 | goto fail; | ||
469 | } | 450 | } |
470 | 451 | ||
471 | snprintf(name, sizeof(name), "%s%u_max", | 452 | snprintf(name, sizeof(name), "%s%u_max", |
472 | hwmon_prefix, hwmon_index); | 453 | hwmon_prefix, hwmon_index); |
473 | rc = efx_mcdi_mon_add_attr( | 454 | efx_mcdi_mon_add_attr( |
474 | efx, name, efx_mcdi_mon_show_limit, | 455 | efx, name, efx_mcdi_mon_show_limit, |
475 | i, type, max1); | 456 | i, type, max1); |
476 | if (rc) | ||
477 | goto fail; | ||
478 | 457 | ||
479 | if (min2 != max2) { | 458 | if (min2 != max2) { |
480 | /* Assume max2 is critical value. | 459 | /* Assume max2 is critical value. |
@@ -482,32 +461,38 @@ int efx_mcdi_mon_probe(struct efx_nic *efx) | |||
482 | */ | 461 | */ |
483 | snprintf(name, sizeof(name), "%s%u_crit", | 462 | snprintf(name, sizeof(name), "%s%u_crit", |
484 | hwmon_prefix, hwmon_index); | 463 | hwmon_prefix, hwmon_index); |
485 | rc = efx_mcdi_mon_add_attr( | 464 | efx_mcdi_mon_add_attr( |
486 | efx, name, efx_mcdi_mon_show_limit, | 465 | efx, name, efx_mcdi_mon_show_limit, |
487 | i, type, max2); | 466 | i, type, max2); |
488 | if (rc) | ||
489 | goto fail; | ||
490 | } | 467 | } |
491 | } | 468 | } |
492 | 469 | ||
493 | snprintf(name, sizeof(name), "%s%u_alarm", | 470 | snprintf(name, sizeof(name), "%s%u_alarm", |
494 | hwmon_prefix, hwmon_index); | 471 | hwmon_prefix, hwmon_index); |
495 | rc = efx_mcdi_mon_add_attr( | 472 | efx_mcdi_mon_add_attr( |
496 | efx, name, efx_mcdi_mon_show_alarm, i, type, 0); | 473 | efx, name, efx_mcdi_mon_show_alarm, i, type, 0); |
497 | if (rc) | ||
498 | goto fail; | ||
499 | 474 | ||
500 | if (type < ARRAY_SIZE(efx_mcdi_sensor_type) && | 475 | if (type < ARRAY_SIZE(efx_mcdi_sensor_type) && |
501 | efx_mcdi_sensor_type[type].label) { | 476 | efx_mcdi_sensor_type[type].label) { |
502 | snprintf(name, sizeof(name), "%s%u_label", | 477 | snprintf(name, sizeof(name), "%s%u_label", |
503 | hwmon_prefix, hwmon_index); | 478 | hwmon_prefix, hwmon_index); |
504 | rc = efx_mcdi_mon_add_attr( | 479 | efx_mcdi_mon_add_attr( |
505 | efx, name, efx_mcdi_mon_show_label, i, type, 0); | 480 | efx, name, efx_mcdi_mon_show_label, i, type, 0); |
506 | if (rc) | ||
507 | goto fail; | ||
508 | } | 481 | } |
509 | } | 482 | } |
510 | 483 | ||
484 | hwmon_register: | ||
485 | hwmon->groups[0] = &hwmon->group; | ||
486 | hwmon->device = hwmon_device_register_with_groups(&efx->pci_dev->dev, | ||
487 | KBUILD_MODNAME, NULL, | ||
488 | hwmon->groups); | ||
489 | if (IS_ERR(hwmon->device)) { | ||
490 | rc = PTR_ERR(hwmon->device); | ||
491 | goto fail; | ||
492 | } | ||
493 | |||
494 | return 0; | ||
495 | |||
511 | fail: | 496 | fail: |
512 | efx_mcdi_mon_remove(efx); | 497 | efx_mcdi_mon_remove(efx); |
513 | return rc; | 498 | return rc; |
@@ -516,14 +501,11 @@ fail: | |||
516 | void efx_mcdi_mon_remove(struct efx_nic *efx) | 501 | void efx_mcdi_mon_remove(struct efx_nic *efx) |
517 | { | 502 | { |
518 | struct efx_mcdi_mon *hwmon = efx_mcdi_mon(efx); | 503 | struct efx_mcdi_mon *hwmon = efx_mcdi_mon(efx); |
519 | unsigned int i; | ||
520 | 504 | ||
521 | for (i = 0; i < hwmon->n_attrs; i++) | ||
522 | device_remove_file(&efx->pci_dev->dev, | ||
523 | &hwmon->attrs[i].dev_attr); | ||
524 | kfree(hwmon->attrs); | ||
525 | if (hwmon->device) | 505 | if (hwmon->device) |
526 | hwmon_device_unregister(hwmon->device); | 506 | hwmon_device_unregister(hwmon->device); |
507 | kfree(hwmon->attrs); | ||
508 | kfree(hwmon->group.attrs); | ||
527 | efx_nic_free_buffer(efx, &hwmon->dma_buf); | 509 | efx_nic_free_buffer(efx, &hwmon->dma_buf); |
528 | } | 510 | } |
529 | 511 | ||
diff --git a/drivers/net/ethernet/sfc/net_driver.h b/drivers/net/ethernet/sfc/net_driver.h index b14a717ac3e8..542a0d252ae0 100644 --- a/drivers/net/ethernet/sfc/net_driver.h +++ b/drivers/net/ethernet/sfc/net_driver.h | |||
@@ -683,6 +683,8 @@ struct vfdi_status; | |||
683 | * @n_channels: Number of channels in use | 683 | * @n_channels: Number of channels in use |
684 | * @n_rx_channels: Number of channels used for RX (= number of RX queues) | 684 | * @n_rx_channels: Number of channels used for RX (= number of RX queues) |
685 | * @n_tx_channels: Number of channels used for TX | 685 | * @n_tx_channels: Number of channels used for TX |
686 | * @rx_ip_align: RX DMA address offset to have IP header aligned in | ||
687 | * in accordance with NET_IP_ALIGN | ||
686 | * @rx_dma_len: Current maximum RX DMA length | 688 | * @rx_dma_len: Current maximum RX DMA length |
687 | * @rx_buffer_order: Order (log2) of number of pages for each RX buffer | 689 | * @rx_buffer_order: Order (log2) of number of pages for each RX buffer |
688 | * @rx_buffer_truesize: Amortised allocation size of an RX buffer, | 690 | * @rx_buffer_truesize: Amortised allocation size of an RX buffer, |
@@ -816,6 +818,7 @@ struct efx_nic { | |||
816 | unsigned rss_spread; | 818 | unsigned rss_spread; |
817 | unsigned tx_channel_offset; | 819 | unsigned tx_channel_offset; |
818 | unsigned n_tx_channels; | 820 | unsigned n_tx_channels; |
821 | unsigned int rx_ip_align; | ||
819 | unsigned int rx_dma_len; | 822 | unsigned int rx_dma_len; |
820 | unsigned int rx_buffer_order; | 823 | unsigned int rx_buffer_order; |
821 | unsigned int rx_buffer_truesize; | 824 | unsigned int rx_buffer_truesize; |
diff --git a/drivers/net/ethernet/sfc/nic.h b/drivers/net/ethernet/sfc/nic.h index 11b6112d9249..91c63ec79c5f 100644 --- a/drivers/net/ethernet/sfc/nic.h +++ b/drivers/net/ethernet/sfc/nic.h | |||
@@ -560,6 +560,8 @@ void efx_ptp_get_ts_info(struct efx_nic *efx, struct ethtool_ts_info *ts_info); | |||
560 | bool efx_ptp_is_ptp_tx(struct efx_nic *efx, struct sk_buff *skb); | 560 | bool efx_ptp_is_ptp_tx(struct efx_nic *efx, struct sk_buff *skb); |
561 | int efx_ptp_tx(struct efx_nic *efx, struct sk_buff *skb); | 561 | int efx_ptp_tx(struct efx_nic *efx, struct sk_buff *skb); |
562 | void efx_ptp_event(struct efx_nic *efx, efx_qword_t *ev); | 562 | void efx_ptp_event(struct efx_nic *efx, efx_qword_t *ev); |
563 | void efx_ptp_start_datapath(struct efx_nic *efx); | ||
564 | void efx_ptp_stop_datapath(struct efx_nic *efx); | ||
563 | 565 | ||
564 | extern const struct efx_nic_type falcon_a1_nic_type; | 566 | extern const struct efx_nic_type falcon_a1_nic_type; |
565 | extern const struct efx_nic_type falcon_b0_nic_type; | 567 | extern const struct efx_nic_type falcon_b0_nic_type; |
diff --git a/drivers/net/ethernet/sfc/ptp.c b/drivers/net/ethernet/sfc/ptp.c index 03acf57df045..3dd39dcfe36b 100644 --- a/drivers/net/ethernet/sfc/ptp.c +++ b/drivers/net/ethernet/sfc/ptp.c | |||
@@ -220,6 +220,7 @@ struct efx_ptp_timeset { | |||
220 | * @evt_list: List of MC receive events awaiting packets | 220 | * @evt_list: List of MC receive events awaiting packets |
221 | * @evt_free_list: List of free events | 221 | * @evt_free_list: List of free events |
222 | * @evt_lock: Lock for manipulating evt_list and evt_free_list | 222 | * @evt_lock: Lock for manipulating evt_list and evt_free_list |
223 | * @evt_overflow: Boolean indicating that event list has overflowed | ||
223 | * @rx_evts: Instantiated events (on evt_list and evt_free_list) | 224 | * @rx_evts: Instantiated events (on evt_list and evt_free_list) |
224 | * @workwq: Work queue for processing pending PTP operations | 225 | * @workwq: Work queue for processing pending PTP operations |
225 | * @work: Work task | 226 | * @work: Work task |
@@ -270,6 +271,7 @@ struct efx_ptp_data { | |||
270 | struct list_head evt_list; | 271 | struct list_head evt_list; |
271 | struct list_head evt_free_list; | 272 | struct list_head evt_free_list; |
272 | spinlock_t evt_lock; | 273 | spinlock_t evt_lock; |
274 | bool evt_overflow; | ||
273 | struct efx_ptp_event_rx rx_evts[MAX_RECEIVE_EVENTS]; | 275 | struct efx_ptp_event_rx rx_evts[MAX_RECEIVE_EVENTS]; |
274 | struct workqueue_struct *workwq; | 276 | struct workqueue_struct *workwq; |
275 | struct work_struct work; | 277 | struct work_struct work; |
@@ -635,6 +637,11 @@ static void efx_ptp_drop_time_expired_events(struct efx_nic *efx) | |||
635 | } | 637 | } |
636 | } | 638 | } |
637 | } | 639 | } |
640 | /* If the event overflow flag is set and the event list is now empty | ||
641 | * clear the flag to re-enable the overflow warning message. | ||
642 | */ | ||
643 | if (ptp->evt_overflow && list_empty(&ptp->evt_list)) | ||
644 | ptp->evt_overflow = false; | ||
638 | spin_unlock_bh(&ptp->evt_lock); | 645 | spin_unlock_bh(&ptp->evt_lock); |
639 | } | 646 | } |
640 | 647 | ||
@@ -676,6 +683,11 @@ static enum ptp_packet_state efx_ptp_match_rx(struct efx_nic *efx, | |||
676 | break; | 683 | break; |
677 | } | 684 | } |
678 | } | 685 | } |
686 | /* If the event overflow flag is set and the event list is now empty | ||
687 | * clear the flag to re-enable the overflow warning message. | ||
688 | */ | ||
689 | if (ptp->evt_overflow && list_empty(&ptp->evt_list)) | ||
690 | ptp->evt_overflow = false; | ||
679 | spin_unlock_bh(&ptp->evt_lock); | 691 | spin_unlock_bh(&ptp->evt_lock); |
680 | 692 | ||
681 | return rc; | 693 | return rc; |
@@ -705,8 +717,9 @@ static bool efx_ptp_process_events(struct efx_nic *efx, struct sk_buff_head *q) | |||
705 | __skb_queue_tail(q, skb); | 717 | __skb_queue_tail(q, skb); |
706 | } else if (time_after(jiffies, match->expiry)) { | 718 | } else if (time_after(jiffies, match->expiry)) { |
707 | match->state = PTP_PACKET_STATE_TIMED_OUT; | 719 | match->state = PTP_PACKET_STATE_TIMED_OUT; |
708 | netif_warn(efx, rx_err, efx->net_dev, | 720 | if (net_ratelimit()) |
709 | "PTP packet - no timestamp seen\n"); | 721 | netif_warn(efx, rx_err, efx->net_dev, |
722 | "PTP packet - no timestamp seen\n"); | ||
710 | __skb_queue_tail(q, skb); | 723 | __skb_queue_tail(q, skb); |
711 | } else { | 724 | } else { |
712 | /* Replace unprocessed entry and stop */ | 725 | /* Replace unprocessed entry and stop */ |
@@ -788,9 +801,14 @@ fail: | |||
788 | static int efx_ptp_stop(struct efx_nic *efx) | 801 | static int efx_ptp_stop(struct efx_nic *efx) |
789 | { | 802 | { |
790 | struct efx_ptp_data *ptp = efx->ptp_data; | 803 | struct efx_ptp_data *ptp = efx->ptp_data; |
791 | int rc = efx_ptp_disable(efx); | ||
792 | struct list_head *cursor; | 804 | struct list_head *cursor; |
793 | struct list_head *next; | 805 | struct list_head *next; |
806 | int rc; | ||
807 | |||
808 | if (ptp == NULL) | ||
809 | return 0; | ||
810 | |||
811 | rc = efx_ptp_disable(efx); | ||
794 | 812 | ||
795 | if (ptp->rxfilter_installed) { | 813 | if (ptp->rxfilter_installed) { |
796 | efx_filter_remove_id_safe(efx, EFX_FILTER_PRI_REQUIRED, | 814 | efx_filter_remove_id_safe(efx, EFX_FILTER_PRI_REQUIRED, |
@@ -809,11 +827,19 @@ static int efx_ptp_stop(struct efx_nic *efx) | |||
809 | list_for_each_safe(cursor, next, &efx->ptp_data->evt_list) { | 827 | list_for_each_safe(cursor, next, &efx->ptp_data->evt_list) { |
810 | list_move(cursor, &efx->ptp_data->evt_free_list); | 828 | list_move(cursor, &efx->ptp_data->evt_free_list); |
811 | } | 829 | } |
830 | ptp->evt_overflow = false; | ||
812 | spin_unlock_bh(&efx->ptp_data->evt_lock); | 831 | spin_unlock_bh(&efx->ptp_data->evt_lock); |
813 | 832 | ||
814 | return rc; | 833 | return rc; |
815 | } | 834 | } |
816 | 835 | ||
836 | static int efx_ptp_restart(struct efx_nic *efx) | ||
837 | { | ||
838 | if (efx->ptp_data && efx->ptp_data->enabled) | ||
839 | return efx_ptp_start(efx); | ||
840 | return 0; | ||
841 | } | ||
842 | |||
817 | static void efx_ptp_pps_worker(struct work_struct *work) | 843 | static void efx_ptp_pps_worker(struct work_struct *work) |
818 | { | 844 | { |
819 | struct efx_ptp_data *ptp = | 845 | struct efx_ptp_data *ptp = |
@@ -901,6 +927,7 @@ static int efx_ptp_probe_channel(struct efx_channel *channel) | |||
901 | spin_lock_init(&ptp->evt_lock); | 927 | spin_lock_init(&ptp->evt_lock); |
902 | for (pos = 0; pos < MAX_RECEIVE_EVENTS; pos++) | 928 | for (pos = 0; pos < MAX_RECEIVE_EVENTS; pos++) |
903 | list_add(&ptp->rx_evts[pos].link, &ptp->evt_free_list); | 929 | list_add(&ptp->rx_evts[pos].link, &ptp->evt_free_list); |
930 | ptp->evt_overflow = false; | ||
904 | 931 | ||
905 | ptp->phc_clock_info.owner = THIS_MODULE; | 932 | ptp->phc_clock_info.owner = THIS_MODULE; |
906 | snprintf(ptp->phc_clock_info.name, | 933 | snprintf(ptp->phc_clock_info.name, |
@@ -989,7 +1016,11 @@ bool efx_ptp_is_ptp_tx(struct efx_nic *efx, struct sk_buff *skb) | |||
989 | skb->len >= PTP_MIN_LENGTH && | 1016 | skb->len >= PTP_MIN_LENGTH && |
990 | skb->len <= MC_CMD_PTP_IN_TRANSMIT_PACKET_MAXNUM && | 1017 | skb->len <= MC_CMD_PTP_IN_TRANSMIT_PACKET_MAXNUM && |
991 | likely(skb->protocol == htons(ETH_P_IP)) && | 1018 | likely(skb->protocol == htons(ETH_P_IP)) && |
1019 | skb_transport_header_was_set(skb) && | ||
1020 | skb_network_header_len(skb) >= sizeof(struct iphdr) && | ||
992 | ip_hdr(skb)->protocol == IPPROTO_UDP && | 1021 | ip_hdr(skb)->protocol == IPPROTO_UDP && |
1022 | skb_headlen(skb) >= | ||
1023 | skb_transport_offset(skb) + sizeof(struct udphdr) && | ||
993 | udp_hdr(skb)->dest == htons(PTP_EVENT_PORT); | 1024 | udp_hdr(skb)->dest == htons(PTP_EVENT_PORT); |
994 | } | 1025 | } |
995 | 1026 | ||
@@ -1106,7 +1137,7 @@ static int efx_ptp_change_mode(struct efx_nic *efx, bool enable_wanted, | |||
1106 | { | 1137 | { |
1107 | if ((enable_wanted != efx->ptp_data->enabled) || | 1138 | if ((enable_wanted != efx->ptp_data->enabled) || |
1108 | (enable_wanted && (efx->ptp_data->mode != new_mode))) { | 1139 | (enable_wanted && (efx->ptp_data->mode != new_mode))) { |
1109 | int rc; | 1140 | int rc = 0; |
1110 | 1141 | ||
1111 | if (enable_wanted) { | 1142 | if (enable_wanted) { |
1112 | /* Change of mode requires disable */ | 1143 | /* Change of mode requires disable */ |
@@ -1123,7 +1154,8 @@ static int efx_ptp_change_mode(struct efx_nic *efx, bool enable_wanted, | |||
1123 | * succeed. | 1154 | * succeed. |
1124 | */ | 1155 | */ |
1125 | efx->ptp_data->mode = new_mode; | 1156 | efx->ptp_data->mode = new_mode; |
1126 | rc = efx_ptp_start(efx); | 1157 | if (netif_running(efx->net_dev)) |
1158 | rc = efx_ptp_start(efx); | ||
1127 | if (rc == 0) { | 1159 | if (rc == 0) { |
1128 | rc = efx_ptp_synchronize(efx, | 1160 | rc = efx_ptp_synchronize(efx, |
1129 | PTP_SYNC_ATTEMPTS * 2); | 1161 | PTP_SYNC_ATTEMPTS * 2); |
@@ -1295,8 +1327,13 @@ static void ptp_event_rx(struct efx_nic *efx, struct efx_ptp_data *ptp) | |||
1295 | list_add_tail(&evt->link, &ptp->evt_list); | 1327 | list_add_tail(&evt->link, &ptp->evt_list); |
1296 | 1328 | ||
1297 | queue_work(ptp->workwq, &ptp->work); | 1329 | queue_work(ptp->workwq, &ptp->work); |
1298 | } else { | 1330 | } else if (!ptp->evt_overflow) { |
1299 | netif_err(efx, rx_err, efx->net_dev, "No free PTP event"); | 1331 | /* Log a warning message and set the event overflow flag. |
1332 | * The message won't be logged again until the event queue | ||
1333 | * becomes empty. | ||
1334 | */ | ||
1335 | netif_err(efx, rx_err, efx->net_dev, "PTP event queue overflow\n"); | ||
1336 | ptp->evt_overflow = true; | ||
1300 | } | 1337 | } |
1301 | spin_unlock_bh(&ptp->evt_lock); | 1338 | spin_unlock_bh(&ptp->evt_lock); |
1302 | } | 1339 | } |
@@ -1389,7 +1426,7 @@ static int efx_phc_adjfreq(struct ptp_clock_info *ptp, s32 delta) | |||
1389 | if (rc != 0) | 1426 | if (rc != 0) |
1390 | return rc; | 1427 | return rc; |
1391 | 1428 | ||
1392 | ptp_data->current_adjfreq = delta; | 1429 | ptp_data->current_adjfreq = adjustment_ns; |
1393 | return 0; | 1430 | return 0; |
1394 | } | 1431 | } |
1395 | 1432 | ||
@@ -1404,7 +1441,7 @@ static int efx_phc_adjtime(struct ptp_clock_info *ptp, s64 delta) | |||
1404 | 1441 | ||
1405 | MCDI_SET_DWORD(inbuf, PTP_IN_OP, MC_CMD_PTP_OP_ADJUST); | 1442 | MCDI_SET_DWORD(inbuf, PTP_IN_OP, MC_CMD_PTP_OP_ADJUST); |
1406 | MCDI_SET_DWORD(inbuf, PTP_IN_PERIPH_ID, 0); | 1443 | MCDI_SET_DWORD(inbuf, PTP_IN_PERIPH_ID, 0); |
1407 | MCDI_SET_QWORD(inbuf, PTP_IN_ADJUST_FREQ, 0); | 1444 | MCDI_SET_QWORD(inbuf, PTP_IN_ADJUST_FREQ, ptp_data->current_adjfreq); |
1408 | MCDI_SET_DWORD(inbuf, PTP_IN_ADJUST_SECONDS, (u32)delta_ts.tv_sec); | 1445 | MCDI_SET_DWORD(inbuf, PTP_IN_ADJUST_SECONDS, (u32)delta_ts.tv_sec); |
1409 | MCDI_SET_DWORD(inbuf, PTP_IN_ADJUST_NANOSECONDS, (u32)delta_ts.tv_nsec); | 1446 | MCDI_SET_DWORD(inbuf, PTP_IN_ADJUST_NANOSECONDS, (u32)delta_ts.tv_nsec); |
1410 | return efx_mcdi_rpc(efx, MC_CMD_PTP, inbuf, sizeof(inbuf), | 1447 | return efx_mcdi_rpc(efx, MC_CMD_PTP, inbuf, sizeof(inbuf), |
@@ -1491,3 +1528,14 @@ void efx_ptp_probe(struct efx_nic *efx) | |||
1491 | efx->extra_channel_type[EFX_EXTRA_CHANNEL_PTP] = | 1528 | efx->extra_channel_type[EFX_EXTRA_CHANNEL_PTP] = |
1492 | &efx_ptp_channel_type; | 1529 | &efx_ptp_channel_type; |
1493 | } | 1530 | } |
1531 | |||
1532 | void efx_ptp_start_datapath(struct efx_nic *efx) | ||
1533 | { | ||
1534 | if (efx_ptp_restart(efx)) | ||
1535 | netif_err(efx, drv, efx->net_dev, "Failed to restart PTP.\n"); | ||
1536 | } | ||
1537 | |||
1538 | void efx_ptp_stop_datapath(struct efx_nic *efx) | ||
1539 | { | ||
1540 | efx_ptp_stop(efx); | ||
1541 | } | ||
diff --git a/drivers/net/ethernet/sfc/rx.c b/drivers/net/ethernet/sfc/rx.c index 8f09e686fc23..42488df1f4ec 100644 --- a/drivers/net/ethernet/sfc/rx.c +++ b/drivers/net/ethernet/sfc/rx.c | |||
@@ -94,7 +94,7 @@ static inline void efx_sync_rx_buffer(struct efx_nic *efx, | |||
94 | 94 | ||
95 | void efx_rx_config_page_split(struct efx_nic *efx) | 95 | void efx_rx_config_page_split(struct efx_nic *efx) |
96 | { | 96 | { |
97 | efx->rx_page_buf_step = ALIGN(efx->rx_dma_len + NET_IP_ALIGN, | 97 | efx->rx_page_buf_step = ALIGN(efx->rx_dma_len + efx->rx_ip_align, |
98 | EFX_RX_BUF_ALIGNMENT); | 98 | EFX_RX_BUF_ALIGNMENT); |
99 | efx->rx_bufs_per_page = efx->rx_buffer_order ? 1 : | 99 | efx->rx_bufs_per_page = efx->rx_buffer_order ? 1 : |
100 | ((PAGE_SIZE - sizeof(struct efx_rx_page_state)) / | 100 | ((PAGE_SIZE - sizeof(struct efx_rx_page_state)) / |
@@ -189,9 +189,9 @@ static int efx_init_rx_buffers(struct efx_rx_queue *rx_queue) | |||
189 | do { | 189 | do { |
190 | index = rx_queue->added_count & rx_queue->ptr_mask; | 190 | index = rx_queue->added_count & rx_queue->ptr_mask; |
191 | rx_buf = efx_rx_buffer(rx_queue, index); | 191 | rx_buf = efx_rx_buffer(rx_queue, index); |
192 | rx_buf->dma_addr = dma_addr + NET_IP_ALIGN; | 192 | rx_buf->dma_addr = dma_addr + efx->rx_ip_align; |
193 | rx_buf->page = page; | 193 | rx_buf->page = page; |
194 | rx_buf->page_offset = page_offset + NET_IP_ALIGN; | 194 | rx_buf->page_offset = page_offset + efx->rx_ip_align; |
195 | rx_buf->len = efx->rx_dma_len; | 195 | rx_buf->len = efx->rx_dma_len; |
196 | rx_buf->flags = 0; | 196 | rx_buf->flags = 0; |
197 | ++rx_queue->added_count; | 197 | ++rx_queue->added_count; |
diff --git a/drivers/net/ethernet/smsc/smc91x.c b/drivers/net/ethernet/smsc/smc91x.c index 0c9b5d94154f..8bf29eb4a5a0 100644 --- a/drivers/net/ethernet/smsc/smc91x.c +++ b/drivers/net/ethernet/smsc/smc91x.c | |||
@@ -82,6 +82,7 @@ static const char version[] = | |||
82 | #include <linux/mii.h> | 82 | #include <linux/mii.h> |
83 | #include <linux/workqueue.h> | 83 | #include <linux/workqueue.h> |
84 | #include <linux/of.h> | 84 | #include <linux/of.h> |
85 | #include <linux/of_device.h> | ||
85 | 86 | ||
86 | #include <linux/netdevice.h> | 87 | #include <linux/netdevice.h> |
87 | #include <linux/etherdevice.h> | 88 | #include <linux/etherdevice.h> |
@@ -2184,6 +2185,15 @@ static void smc_release_datacs(struct platform_device *pdev, struct net_device * | |||
2184 | } | 2185 | } |
2185 | } | 2186 | } |
2186 | 2187 | ||
2188 | #if IS_BUILTIN(CONFIG_OF) | ||
2189 | static const struct of_device_id smc91x_match[] = { | ||
2190 | { .compatible = "smsc,lan91c94", }, | ||
2191 | { .compatible = "smsc,lan91c111", }, | ||
2192 | {}, | ||
2193 | }; | ||
2194 | MODULE_DEVICE_TABLE(of, smc91x_match); | ||
2195 | #endif | ||
2196 | |||
2187 | /* | 2197 | /* |
2188 | * smc_init(void) | 2198 | * smc_init(void) |
2189 | * Input parameters: | 2199 | * Input parameters: |
@@ -2198,6 +2208,7 @@ static void smc_release_datacs(struct platform_device *pdev, struct net_device * | |||
2198 | static int smc_drv_probe(struct platform_device *pdev) | 2208 | static int smc_drv_probe(struct platform_device *pdev) |
2199 | { | 2209 | { |
2200 | struct smc91x_platdata *pd = dev_get_platdata(&pdev->dev); | 2210 | struct smc91x_platdata *pd = dev_get_platdata(&pdev->dev); |
2211 | const struct of_device_id *match = NULL; | ||
2201 | struct smc_local *lp; | 2212 | struct smc_local *lp; |
2202 | struct net_device *ndev; | 2213 | struct net_device *ndev; |
2203 | struct resource *res, *ires; | 2214 | struct resource *res, *ires; |
@@ -2217,11 +2228,34 @@ static int smc_drv_probe(struct platform_device *pdev) | |||
2217 | */ | 2228 | */ |
2218 | 2229 | ||
2219 | lp = netdev_priv(ndev); | 2230 | lp = netdev_priv(ndev); |
2231 | lp->cfg.flags = 0; | ||
2220 | 2232 | ||
2221 | if (pd) { | 2233 | if (pd) { |
2222 | memcpy(&lp->cfg, pd, sizeof(lp->cfg)); | 2234 | memcpy(&lp->cfg, pd, sizeof(lp->cfg)); |
2223 | lp->io_shift = SMC91X_IO_SHIFT(lp->cfg.flags); | 2235 | lp->io_shift = SMC91X_IO_SHIFT(lp->cfg.flags); |
2224 | } else { | 2236 | } |
2237 | |||
2238 | #if IS_BUILTIN(CONFIG_OF) | ||
2239 | match = of_match_device(of_match_ptr(smc91x_match), &pdev->dev); | ||
2240 | if (match) { | ||
2241 | struct device_node *np = pdev->dev.of_node; | ||
2242 | u32 val; | ||
2243 | |||
2244 | /* Combination of IO widths supported, default to 16-bit */ | ||
2245 | if (!of_property_read_u32(np, "reg-io-width", &val)) { | ||
2246 | if (val & 1) | ||
2247 | lp->cfg.flags |= SMC91X_USE_8BIT; | ||
2248 | if ((val == 0) || (val & 2)) | ||
2249 | lp->cfg.flags |= SMC91X_USE_16BIT; | ||
2250 | if (val & 4) | ||
2251 | lp->cfg.flags |= SMC91X_USE_32BIT; | ||
2252 | } else { | ||
2253 | lp->cfg.flags |= SMC91X_USE_16BIT; | ||
2254 | } | ||
2255 | } | ||
2256 | #endif | ||
2257 | |||
2258 | if (!pd && !match) { | ||
2225 | lp->cfg.flags |= (SMC_CAN_USE_8BIT) ? SMC91X_USE_8BIT : 0; | 2259 | lp->cfg.flags |= (SMC_CAN_USE_8BIT) ? SMC91X_USE_8BIT : 0; |
2226 | lp->cfg.flags |= (SMC_CAN_USE_16BIT) ? SMC91X_USE_16BIT : 0; | 2260 | lp->cfg.flags |= (SMC_CAN_USE_16BIT) ? SMC91X_USE_16BIT : 0; |
2227 | lp->cfg.flags |= (SMC_CAN_USE_32BIT) ? SMC91X_USE_32BIT : 0; | 2261 | lp->cfg.flags |= (SMC_CAN_USE_32BIT) ? SMC91X_USE_32BIT : 0; |
@@ -2370,15 +2404,6 @@ static int smc_drv_resume(struct device *dev) | |||
2370 | return 0; | 2404 | return 0; |
2371 | } | 2405 | } |
2372 | 2406 | ||
2373 | #ifdef CONFIG_OF | ||
2374 | static const struct of_device_id smc91x_match[] = { | ||
2375 | { .compatible = "smsc,lan91c94", }, | ||
2376 | { .compatible = "smsc,lan91c111", }, | ||
2377 | {}, | ||
2378 | }; | ||
2379 | MODULE_DEVICE_TABLE(of, smc91x_match); | ||
2380 | #endif | ||
2381 | |||
2382 | static struct dev_pm_ops smc_drv_pm_ops = { | 2407 | static struct dev_pm_ops smc_drv_pm_ops = { |
2383 | .suspend = smc_drv_suspend, | 2408 | .suspend = smc_drv_suspend, |
2384 | .resume = smc_drv_resume, | 2409 | .resume = smc_drv_resume, |
diff --git a/drivers/net/ethernet/smsc/smc91x.h b/drivers/net/ethernet/smsc/smc91x.h index c9d4c872e81d..749654b976bc 100644 --- a/drivers/net/ethernet/smsc/smc91x.h +++ b/drivers/net/ethernet/smsc/smc91x.h | |||
@@ -46,7 +46,8 @@ | |||
46 | defined(CONFIG_MACH_LITTLETON) ||\ | 46 | defined(CONFIG_MACH_LITTLETON) ||\ |
47 | defined(CONFIG_MACH_ZYLONITE2) ||\ | 47 | defined(CONFIG_MACH_ZYLONITE2) ||\ |
48 | defined(CONFIG_ARCH_VIPER) ||\ | 48 | defined(CONFIG_ARCH_VIPER) ||\ |
49 | defined(CONFIG_MACH_STARGATE2) | 49 | defined(CONFIG_MACH_STARGATE2) ||\ |
50 | defined(CONFIG_ARCH_VERSATILE) | ||
50 | 51 | ||
51 | #include <asm/mach-types.h> | 52 | #include <asm/mach-types.h> |
52 | 53 | ||
@@ -154,6 +155,8 @@ static inline void SMC_outw(u16 val, void __iomem *ioaddr, int reg) | |||
154 | #define SMC_outl(v, a, r) writel(v, (a) + (r)) | 155 | #define SMC_outl(v, a, r) writel(v, (a) + (r)) |
155 | #define SMC_insl(a, r, p, l) readsl((a) + (r), p, l) | 156 | #define SMC_insl(a, r, p, l) readsl((a) + (r), p, l) |
156 | #define SMC_outsl(a, r, p, l) writesl((a) + (r), p, l) | 157 | #define SMC_outsl(a, r, p, l) writesl((a) + (r), p, l) |
158 | #define SMC_insw(a, r, p, l) readsw((a) + (r), p, l) | ||
159 | #define SMC_outsw(a, r, p, l) writesw((a) + (r), p, l) | ||
157 | #define SMC_IRQ_FLAGS (-1) /* from resource */ | 160 | #define SMC_IRQ_FLAGS (-1) /* from resource */ |
158 | 161 | ||
159 | /* We actually can't write halfwords properly if not word aligned */ | 162 | /* We actually can't write halfwords properly if not word aligned */ |
@@ -206,23 +209,6 @@ SMC_outw(u16 val, void __iomem *ioaddr, int reg) | |||
206 | #define RPC_LSA_DEFAULT RPC_LED_TX_RX | 209 | #define RPC_LSA_DEFAULT RPC_LED_TX_RX |
207 | #define RPC_LSB_DEFAULT RPC_LED_100_10 | 210 | #define RPC_LSB_DEFAULT RPC_LED_100_10 |
208 | 211 | ||
209 | #elif defined(CONFIG_ARCH_VERSATILE) | ||
210 | |||
211 | #define SMC_CAN_USE_8BIT 1 | ||
212 | #define SMC_CAN_USE_16BIT 1 | ||
213 | #define SMC_CAN_USE_32BIT 1 | ||
214 | #define SMC_NOWAIT 1 | ||
215 | |||
216 | #define SMC_inb(a, r) readb((a) + (r)) | ||
217 | #define SMC_inw(a, r) readw((a) + (r)) | ||
218 | #define SMC_inl(a, r) readl((a) + (r)) | ||
219 | #define SMC_outb(v, a, r) writeb(v, (a) + (r)) | ||
220 | #define SMC_outw(v, a, r) writew(v, (a) + (r)) | ||
221 | #define SMC_outl(v, a, r) writel(v, (a) + (r)) | ||
222 | #define SMC_insl(a, r, p, l) readsl((a) + (r), p, l) | ||
223 | #define SMC_outsl(a, r, p, l) writesl((a) + (r), p, l) | ||
224 | #define SMC_IRQ_FLAGS (-1) /* from resource */ | ||
225 | |||
226 | #elif defined(CONFIG_MN10300) | 212 | #elif defined(CONFIG_MN10300) |
227 | 213 | ||
228 | /* | 214 | /* |
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index 8a7a23a84ac5..797b56a0efc4 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c | |||
@@ -622,17 +622,15 @@ static int stmmac_init_ptp(struct stmmac_priv *priv) | |||
622 | if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp)) | 622 | if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp)) |
623 | return -EOPNOTSUPP; | 623 | return -EOPNOTSUPP; |
624 | 624 | ||
625 | if (netif_msg_hw(priv)) { | 625 | priv->adv_ts = 0; |
626 | if (priv->dma_cap.time_stamp) { | 626 | if (priv->dma_cap.atime_stamp && priv->extend_desc) |
627 | pr_debug("IEEE 1588-2002 Time Stamp supported\n"); | 627 | priv->adv_ts = 1; |
628 | priv->adv_ts = 0; | 628 | |
629 | } | 629 | if (netif_msg_hw(priv) && priv->dma_cap.time_stamp) |
630 | if (priv->dma_cap.atime_stamp && priv->extend_desc) { | 630 | pr_debug("IEEE 1588-2002 Time Stamp supported\n"); |
631 | pr_debug | 631 | |
632 | ("IEEE 1588-2008 Advanced Time Stamp supported\n"); | 632 | if (netif_msg_hw(priv) && priv->adv_ts) |
633 | priv->adv_ts = 1; | 633 | pr_debug("IEEE 1588-2008 Advanced Time Stamp supported\n"); |
634 | } | ||
635 | } | ||
636 | 634 | ||
637 | priv->hw->ptp = &stmmac_ptp; | 635 | priv->hw->ptp = &stmmac_ptp; |
638 | priv->hwts_tx_en = 0; | 636 | priv->hwts_tx_en = 0; |
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c index b8b0eeed0f92..7680581ebe12 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c | |||
@@ -56,7 +56,7 @@ static int stmmac_adjust_freq(struct ptp_clock_info *ptp, s32 ppb) | |||
56 | 56 | ||
57 | priv->hw->ptp->config_addend(priv->ioaddr, addend); | 57 | priv->hw->ptp->config_addend(priv->ioaddr, addend); |
58 | 58 | ||
59 | spin_unlock_irqrestore(&priv->lock, flags); | 59 | spin_unlock_irqrestore(&priv->ptp_lock, flags); |
60 | 60 | ||
61 | return 0; | 61 | return 0; |
62 | } | 62 | } |
@@ -91,7 +91,7 @@ static int stmmac_adjust_time(struct ptp_clock_info *ptp, s64 delta) | |||
91 | 91 | ||
92 | priv->hw->ptp->adjust_systime(priv->ioaddr, sec, nsec, neg_adj); | 92 | priv->hw->ptp->adjust_systime(priv->ioaddr, sec, nsec, neg_adj); |
93 | 93 | ||
94 | spin_unlock_irqrestore(&priv->lock, flags); | 94 | spin_unlock_irqrestore(&priv->ptp_lock, flags); |
95 | 95 | ||
96 | return 0; | 96 | return 0; |
97 | } | 97 | } |
diff --git a/drivers/net/ethernet/tehuti/tehuti.c b/drivers/net/ethernet/tehuti/tehuti.c index dd0dd6279b4e..4f1d2549130e 100644 --- a/drivers/net/ethernet/tehuti/tehuti.c +++ b/drivers/net/ethernet/tehuti/tehuti.c | |||
@@ -2019,7 +2019,6 @@ bdx_probe(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
2019 | ndev->features = NETIF_F_IP_CSUM | NETIF_F_SG | NETIF_F_TSO | 2019 | ndev->features = NETIF_F_IP_CSUM | NETIF_F_SG | NETIF_F_TSO |
2020 | | NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX | | 2020 | | NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX | |
2021 | NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_RXCSUM | 2021 | NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_RXCSUM |
2022 | /*| NETIF_F_FRAGLIST */ | ||
2023 | ; | 2022 | ; |
2024 | ndev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG | | 2023 | ndev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG | |
2025 | NETIF_F_TSO | NETIF_F_HW_VLAN_CTAG_TX; | 2024 | NETIF_F_TSO | NETIF_F_HW_VLAN_CTAG_TX; |
diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c index 7536a4c01293..5330fd298705 100644 --- a/drivers/net/ethernet/ti/cpsw.c +++ b/drivers/net/ethernet/ti/cpsw.c | |||
@@ -740,6 +740,8 @@ static void _cpsw_adjust_link(struct cpsw_slave *slave, | |||
740 | /* set speed_in input in case RMII mode is used in 100Mbps */ | 740 | /* set speed_in input in case RMII mode is used in 100Mbps */ |
741 | if (phy->speed == 100) | 741 | if (phy->speed == 100) |
742 | mac_control |= BIT(15); | 742 | mac_control |= BIT(15); |
743 | else if (phy->speed == 10) | ||
744 | mac_control |= BIT(18); /* In Band mode */ | ||
743 | 745 | ||
744 | *link = true; | 746 | *link = true; |
745 | } else { | 747 | } else { |
@@ -1151,6 +1153,12 @@ static int cpsw_ndo_open(struct net_device *ndev) | |||
1151 | * receive descs | 1153 | * receive descs |
1152 | */ | 1154 | */ |
1153 | cpsw_info(priv, ifup, "submitted %d rx descriptors\n", i); | 1155 | cpsw_info(priv, ifup, "submitted %d rx descriptors\n", i); |
1156 | |||
1157 | if (cpts_register(&priv->pdev->dev, priv->cpts, | ||
1158 | priv->data.cpts_clock_mult, | ||
1159 | priv->data.cpts_clock_shift)) | ||
1160 | dev_err(priv->dev, "error registering cpts device\n"); | ||
1161 | |||
1154 | } | 1162 | } |
1155 | 1163 | ||
1156 | /* Enable Interrupt pacing if configured */ | 1164 | /* Enable Interrupt pacing if configured */ |
@@ -1197,6 +1205,7 @@ static int cpsw_ndo_stop(struct net_device *ndev) | |||
1197 | netif_carrier_off(priv->ndev); | 1205 | netif_carrier_off(priv->ndev); |
1198 | 1206 | ||
1199 | if (cpsw_common_res_usage_state(priv) <= 1) { | 1207 | if (cpsw_common_res_usage_state(priv) <= 1) { |
1208 | cpts_unregister(priv->cpts); | ||
1200 | cpsw_intr_disable(priv); | 1209 | cpsw_intr_disable(priv); |
1201 | cpdma_ctlr_int_ctrl(priv->dma, false); | 1210 | cpdma_ctlr_int_ctrl(priv->dma, false); |
1202 | cpdma_ctlr_stop(priv->dma); | 1211 | cpdma_ctlr_stop(priv->dma); |
@@ -1816,6 +1825,8 @@ static int cpsw_probe_dt(struct cpsw_platform_data *data, | |||
1816 | } | 1825 | } |
1817 | 1826 | ||
1818 | i++; | 1827 | i++; |
1828 | if (i == data->slaves) | ||
1829 | break; | ||
1819 | } | 1830 | } |
1820 | 1831 | ||
1821 | return 0; | 1832 | return 0; |
@@ -1983,9 +1994,15 @@ static int cpsw_probe(struct platform_device *pdev) | |||
1983 | goto clean_runtime_disable_ret; | 1994 | goto clean_runtime_disable_ret; |
1984 | } | 1995 | } |
1985 | priv->regs = ss_regs; | 1996 | priv->regs = ss_regs; |
1986 | priv->version = __raw_readl(&priv->regs->id_ver); | ||
1987 | priv->host_port = HOST_PORT_NUM; | 1997 | priv->host_port = HOST_PORT_NUM; |
1988 | 1998 | ||
1999 | /* Need to enable clocks with runtime PM api to access module | ||
2000 | * registers | ||
2001 | */ | ||
2002 | pm_runtime_get_sync(&pdev->dev); | ||
2003 | priv->version = readl(&priv->regs->id_ver); | ||
2004 | pm_runtime_put_sync(&pdev->dev); | ||
2005 | |||
1989 | res = platform_get_resource(pdev, IORESOURCE_MEM, 1); | 2006 | res = platform_get_resource(pdev, IORESOURCE_MEM, 1); |
1990 | priv->wr_regs = devm_ioremap_resource(&pdev->dev, res); | 2007 | priv->wr_regs = devm_ioremap_resource(&pdev->dev, res); |
1991 | if (IS_ERR(priv->wr_regs)) { | 2008 | if (IS_ERR(priv->wr_regs)) { |
@@ -2091,7 +2108,7 @@ static int cpsw_probe(struct platform_device *pdev) | |||
2091 | while ((res = platform_get_resource(priv->pdev, IORESOURCE_IRQ, k))) { | 2108 | while ((res = platform_get_resource(priv->pdev, IORESOURCE_IRQ, k))) { |
2092 | for (i = res->start; i <= res->end; i++) { | 2109 | for (i = res->start; i <= res->end; i++) { |
2093 | if (devm_request_irq(&pdev->dev, i, cpsw_interrupt, 0, | 2110 | if (devm_request_irq(&pdev->dev, i, cpsw_interrupt, 0, |
2094 | dev_name(priv->dev), priv)) { | 2111 | dev_name(&pdev->dev), priv)) { |
2095 | dev_err(priv->dev, "error attaching irq\n"); | 2112 | dev_err(priv->dev, "error attaching irq\n"); |
2096 | goto clean_ale_ret; | 2113 | goto clean_ale_ret; |
2097 | } | 2114 | } |
@@ -2155,8 +2172,6 @@ static int cpsw_remove(struct platform_device *pdev) | |||
2155 | unregister_netdev(cpsw_get_slave_ndev(priv, 1)); | 2172 | unregister_netdev(cpsw_get_slave_ndev(priv, 1)); |
2156 | unregister_netdev(ndev); | 2173 | unregister_netdev(ndev); |
2157 | 2174 | ||
2158 | cpts_unregister(priv->cpts); | ||
2159 | |||
2160 | cpsw_ale_destroy(priv->ale); | 2175 | cpsw_ale_destroy(priv->ale); |
2161 | cpdma_chan_destroy(priv->txch); | 2176 | cpdma_chan_destroy(priv->txch); |
2162 | cpdma_chan_destroy(priv->rxch); | 2177 | cpdma_chan_destroy(priv->rxch); |
diff --git a/drivers/net/ethernet/ti/davinci_emac.c b/drivers/net/ethernet/ti/davinci_emac.c index 41ba974bf37c..cd9b164a0434 100644 --- a/drivers/net/ethernet/ti/davinci_emac.c +++ b/drivers/net/ethernet/ti/davinci_emac.c | |||
@@ -61,6 +61,7 @@ | |||
61 | #include <linux/davinci_emac.h> | 61 | #include <linux/davinci_emac.h> |
62 | #include <linux/of.h> | 62 | #include <linux/of.h> |
63 | #include <linux/of_address.h> | 63 | #include <linux/of_address.h> |
64 | #include <linux/of_device.h> | ||
64 | #include <linux/of_irq.h> | 65 | #include <linux/of_irq.h> |
65 | #include <linux/of_net.h> | 66 | #include <linux/of_net.h> |
66 | 67 | ||
@@ -1752,10 +1753,14 @@ static const struct net_device_ops emac_netdev_ops = { | |||
1752 | #endif | 1753 | #endif |
1753 | }; | 1754 | }; |
1754 | 1755 | ||
1756 | static const struct of_device_id davinci_emac_of_match[]; | ||
1757 | |||
1755 | static struct emac_platform_data * | 1758 | static struct emac_platform_data * |
1756 | davinci_emac_of_get_pdata(struct platform_device *pdev, struct emac_priv *priv) | 1759 | davinci_emac_of_get_pdata(struct platform_device *pdev, struct emac_priv *priv) |
1757 | { | 1760 | { |
1758 | struct device_node *np; | 1761 | struct device_node *np; |
1762 | const struct of_device_id *match; | ||
1763 | const struct emac_platform_data *auxdata; | ||
1759 | struct emac_platform_data *pdata = NULL; | 1764 | struct emac_platform_data *pdata = NULL; |
1760 | const u8 *mac_addr; | 1765 | const u8 *mac_addr; |
1761 | 1766 | ||
@@ -1793,7 +1798,20 @@ davinci_emac_of_get_pdata(struct platform_device *pdev, struct emac_priv *priv) | |||
1793 | 1798 | ||
1794 | priv->phy_node = of_parse_phandle(np, "phy-handle", 0); | 1799 | priv->phy_node = of_parse_phandle(np, "phy-handle", 0); |
1795 | if (!priv->phy_node) | 1800 | if (!priv->phy_node) |
1796 | pdata->phy_id = ""; | 1801 | pdata->phy_id = NULL; |
1802 | |||
1803 | auxdata = pdev->dev.platform_data; | ||
1804 | if (auxdata) { | ||
1805 | pdata->interrupt_enable = auxdata->interrupt_enable; | ||
1806 | pdata->interrupt_disable = auxdata->interrupt_disable; | ||
1807 | } | ||
1808 | |||
1809 | match = of_match_device(davinci_emac_of_match, &pdev->dev); | ||
1810 | if (match && match->data) { | ||
1811 | auxdata = match->data; | ||
1812 | pdata->version = auxdata->version; | ||
1813 | pdata->hw_ram_addr = auxdata->hw_ram_addr; | ||
1814 | } | ||
1797 | 1815 | ||
1798 | pdev->dev.platform_data = pdata; | 1816 | pdev->dev.platform_data = pdata; |
1799 | 1817 | ||
@@ -2020,8 +2038,14 @@ static const struct dev_pm_ops davinci_emac_pm_ops = { | |||
2020 | }; | 2038 | }; |
2021 | 2039 | ||
2022 | #if IS_ENABLED(CONFIG_OF) | 2040 | #if IS_ENABLED(CONFIG_OF) |
2041 | static const struct emac_platform_data am3517_emac_data = { | ||
2042 | .version = EMAC_VERSION_2, | ||
2043 | .hw_ram_addr = 0x01e20000, | ||
2044 | }; | ||
2045 | |||
2023 | static const struct of_device_id davinci_emac_of_match[] = { | 2046 | static const struct of_device_id davinci_emac_of_match[] = { |
2024 | {.compatible = "ti,davinci-dm6467-emac", }, | 2047 | {.compatible = "ti,davinci-dm6467-emac", }, |
2048 | {.compatible = "ti,am3517-emac", .data = &am3517_emac_data, }, | ||
2025 | {}, | 2049 | {}, |
2026 | }; | 2050 | }; |
2027 | MODULE_DEVICE_TABLE(of, davinci_emac_of_match); | 2051 | MODULE_DEVICE_TABLE(of, davinci_emac_of_match); |
diff --git a/drivers/net/ethernet/tile/tilegx.c b/drivers/net/ethernet/tile/tilegx.c index 628b736e5ae7..0e9fb3301b11 100644 --- a/drivers/net/ethernet/tile/tilegx.c +++ b/drivers/net/ethernet/tile/tilegx.c | |||
@@ -2080,7 +2080,8 @@ static int tile_net_tx(struct sk_buff *skb, struct net_device *dev) | |||
2080 | } | 2080 | } |
2081 | 2081 | ||
2082 | /* Return subqueue id on this core (one per core). */ | 2082 | /* Return subqueue id on this core (one per core). */ |
2083 | static u16 tile_net_select_queue(struct net_device *dev, struct sk_buff *skb) | 2083 | static u16 tile_net_select_queue(struct net_device *dev, struct sk_buff *skb, |
2084 | void *accel_priv) | ||
2084 | { | 2085 | { |
2085 | return smp_processor_id(); | 2086 | return smp_processor_id(); |
2086 | } | 2087 | } |
diff --git a/drivers/net/ethernet/via/via-velocity.c b/drivers/net/ethernet/via/via-velocity.c index d022bf936572..ad61d26a44f3 100644 --- a/drivers/net/ethernet/via/via-velocity.c +++ b/drivers/net/ethernet/via/via-velocity.c | |||
@@ -2172,16 +2172,13 @@ static int velocity_poll(struct napi_struct *napi, int budget) | |||
2172 | unsigned int rx_done; | 2172 | unsigned int rx_done; |
2173 | unsigned long flags; | 2173 | unsigned long flags; |
2174 | 2174 | ||
2175 | spin_lock_irqsave(&vptr->lock, flags); | ||
2176 | /* | 2175 | /* |
2177 | * Do rx and tx twice for performance (taken from the VIA | 2176 | * Do rx and tx twice for performance (taken from the VIA |
2178 | * out-of-tree driver). | 2177 | * out-of-tree driver). |
2179 | */ | 2178 | */ |
2180 | rx_done = velocity_rx_srv(vptr, budget / 2); | 2179 | rx_done = velocity_rx_srv(vptr, budget); |
2181 | velocity_tx_srv(vptr); | 2180 | spin_lock_irqsave(&vptr->lock, flags); |
2182 | rx_done += velocity_rx_srv(vptr, budget - rx_done); | ||
2183 | velocity_tx_srv(vptr); | 2181 | velocity_tx_srv(vptr); |
2184 | |||
2185 | /* If budget not fully consumed, exit the polling mode */ | 2182 | /* If budget not fully consumed, exit the polling mode */ |
2186 | if (rx_done < budget) { | 2183 | if (rx_done < budget) { |
2187 | napi_complete(napi); | 2184 | napi_complete(napi); |
@@ -2342,6 +2339,8 @@ static int velocity_change_mtu(struct net_device *dev, int new_mtu) | |||
2342 | if (ret < 0) | 2339 | if (ret < 0) |
2343 | goto out_free_tmp_vptr_1; | 2340 | goto out_free_tmp_vptr_1; |
2344 | 2341 | ||
2342 | napi_disable(&vptr->napi); | ||
2343 | |||
2345 | spin_lock_irqsave(&vptr->lock, flags); | 2344 | spin_lock_irqsave(&vptr->lock, flags); |
2346 | 2345 | ||
2347 | netif_stop_queue(dev); | 2346 | netif_stop_queue(dev); |
@@ -2362,6 +2361,8 @@ static int velocity_change_mtu(struct net_device *dev, int new_mtu) | |||
2362 | 2361 | ||
2363 | velocity_give_many_rx_descs(vptr); | 2362 | velocity_give_many_rx_descs(vptr); |
2364 | 2363 | ||
2364 | napi_enable(&vptr->napi); | ||
2365 | |||
2365 | mac_enable_int(vptr->mac_regs); | 2366 | mac_enable_int(vptr->mac_regs); |
2366 | netif_start_queue(dev); | 2367 | netif_start_queue(dev); |
2367 | 2368 | ||
diff --git a/drivers/net/ethernet/xilinx/ll_temac_main.c b/drivers/net/ethernet/xilinx/ll_temac_main.c index 1f2364126323..2166e879a096 100644 --- a/drivers/net/ethernet/xilinx/ll_temac_main.c +++ b/drivers/net/ethernet/xilinx/ll_temac_main.c | |||
@@ -1017,7 +1017,7 @@ static int temac_of_probe(struct platform_device *op) | |||
1017 | platform_set_drvdata(op, ndev); | 1017 | platform_set_drvdata(op, ndev); |
1018 | SET_NETDEV_DEV(ndev, &op->dev); | 1018 | SET_NETDEV_DEV(ndev, &op->dev); |
1019 | ndev->flags &= ~IFF_MULTICAST; /* clear multicast */ | 1019 | ndev->flags &= ~IFF_MULTICAST; /* clear multicast */ |
1020 | ndev->features = NETIF_F_SG | NETIF_F_FRAGLIST; | 1020 | ndev->features = NETIF_F_SG; |
1021 | ndev->netdev_ops = &temac_netdev_ops; | 1021 | ndev->netdev_ops = &temac_netdev_ops; |
1022 | ndev->ethtool_ops = &temac_ethtool_ops; | 1022 | ndev->ethtool_ops = &temac_ethtool_ops; |
1023 | #if 0 | 1023 | #if 0 |
diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c index b2ff038d6d20..f9293da19e26 100644 --- a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c +++ b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c | |||
@@ -1486,7 +1486,7 @@ static int axienet_of_probe(struct platform_device *op) | |||
1486 | 1486 | ||
1487 | SET_NETDEV_DEV(ndev, &op->dev); | 1487 | SET_NETDEV_DEV(ndev, &op->dev); |
1488 | ndev->flags &= ~IFF_MULTICAST; /* clear multicast */ | 1488 | ndev->flags &= ~IFF_MULTICAST; /* clear multicast */ |
1489 | ndev->features = NETIF_F_SG | NETIF_F_FRAGLIST; | 1489 | ndev->features = NETIF_F_SG; |
1490 | ndev->netdev_ops = &axienet_netdev_ops; | 1490 | ndev->netdev_ops = &axienet_netdev_ops; |
1491 | ndev->ethtool_ops = &axienet_ethtool_ops; | 1491 | ndev->ethtool_ops = &axienet_ethtool_ops; |
1492 | 1492 | ||
diff --git a/drivers/net/ethernet/xilinx/xilinx_emaclite.c b/drivers/net/ethernet/xilinx/xilinx_emaclite.c index 74234a51c851..fefb8cd5eb65 100644 --- a/drivers/net/ethernet/xilinx/xilinx_emaclite.c +++ b/drivers/net/ethernet/xilinx/xilinx_emaclite.c | |||
@@ -163,26 +163,9 @@ static void xemaclite_enable_interrupts(struct net_local *drvdata) | |||
163 | __raw_writel(reg_data | XEL_TSR_XMIT_IE_MASK, | 163 | __raw_writel(reg_data | XEL_TSR_XMIT_IE_MASK, |
164 | drvdata->base_addr + XEL_TSR_OFFSET); | 164 | drvdata->base_addr + XEL_TSR_OFFSET); |
165 | 165 | ||
166 | /* Enable the Tx interrupts for the second Buffer if | ||
167 | * configured in HW */ | ||
168 | if (drvdata->tx_ping_pong != 0) { | ||
169 | reg_data = __raw_readl(drvdata->base_addr + | ||
170 | XEL_BUFFER_OFFSET + XEL_TSR_OFFSET); | ||
171 | __raw_writel(reg_data | XEL_TSR_XMIT_IE_MASK, | ||
172 | drvdata->base_addr + XEL_BUFFER_OFFSET + | ||
173 | XEL_TSR_OFFSET); | ||
174 | } | ||
175 | |||
176 | /* Enable the Rx interrupts for the first buffer */ | 166 | /* Enable the Rx interrupts for the first buffer */ |
177 | __raw_writel(XEL_RSR_RECV_IE_MASK, drvdata->base_addr + XEL_RSR_OFFSET); | 167 | __raw_writel(XEL_RSR_RECV_IE_MASK, drvdata->base_addr + XEL_RSR_OFFSET); |
178 | 168 | ||
179 | /* Enable the Rx interrupts for the second Buffer if | ||
180 | * configured in HW */ | ||
181 | if (drvdata->rx_ping_pong != 0) { | ||
182 | __raw_writel(XEL_RSR_RECV_IE_MASK, drvdata->base_addr + | ||
183 | XEL_BUFFER_OFFSET + XEL_RSR_OFFSET); | ||
184 | } | ||
185 | |||
186 | /* Enable the Global Interrupt Enable */ | 169 | /* Enable the Global Interrupt Enable */ |
187 | __raw_writel(XEL_GIER_GIE_MASK, drvdata->base_addr + XEL_GIER_OFFSET); | 170 | __raw_writel(XEL_GIER_GIE_MASK, drvdata->base_addr + XEL_GIER_OFFSET); |
188 | } | 171 | } |
@@ -206,31 +189,10 @@ static void xemaclite_disable_interrupts(struct net_local *drvdata) | |||
206 | __raw_writel(reg_data & (~XEL_TSR_XMIT_IE_MASK), | 189 | __raw_writel(reg_data & (~XEL_TSR_XMIT_IE_MASK), |
207 | drvdata->base_addr + XEL_TSR_OFFSET); | 190 | drvdata->base_addr + XEL_TSR_OFFSET); |
208 | 191 | ||
209 | /* Disable the Tx interrupts for the second Buffer | ||
210 | * if configured in HW */ | ||
211 | if (drvdata->tx_ping_pong != 0) { | ||
212 | reg_data = __raw_readl(drvdata->base_addr + XEL_BUFFER_OFFSET + | ||
213 | XEL_TSR_OFFSET); | ||
214 | __raw_writel(reg_data & (~XEL_TSR_XMIT_IE_MASK), | ||
215 | drvdata->base_addr + XEL_BUFFER_OFFSET + | ||
216 | XEL_TSR_OFFSET); | ||
217 | } | ||
218 | |||
219 | /* Disable the Rx interrupts for the first buffer */ | 192 | /* Disable the Rx interrupts for the first buffer */ |
220 | reg_data = __raw_readl(drvdata->base_addr + XEL_RSR_OFFSET); | 193 | reg_data = __raw_readl(drvdata->base_addr + XEL_RSR_OFFSET); |
221 | __raw_writel(reg_data & (~XEL_RSR_RECV_IE_MASK), | 194 | __raw_writel(reg_data & (~XEL_RSR_RECV_IE_MASK), |
222 | drvdata->base_addr + XEL_RSR_OFFSET); | 195 | drvdata->base_addr + XEL_RSR_OFFSET); |
223 | |||
224 | /* Disable the Rx interrupts for the second buffer | ||
225 | * if configured in HW */ | ||
226 | if (drvdata->rx_ping_pong != 0) { | ||
227 | |||
228 | reg_data = __raw_readl(drvdata->base_addr + XEL_BUFFER_OFFSET + | ||
229 | XEL_RSR_OFFSET); | ||
230 | __raw_writel(reg_data & (~XEL_RSR_RECV_IE_MASK), | ||
231 | drvdata->base_addr + XEL_BUFFER_OFFSET + | ||
232 | XEL_RSR_OFFSET); | ||
233 | } | ||
234 | } | 196 | } |
235 | 197 | ||
236 | /** | 198 | /** |
@@ -258,6 +220,13 @@ static void xemaclite_aligned_write(void *src_ptr, u32 *dest_ptr, | |||
258 | *to_u16_ptr++ = *from_u16_ptr++; | 220 | *to_u16_ptr++ = *from_u16_ptr++; |
259 | *to_u16_ptr++ = *from_u16_ptr++; | 221 | *to_u16_ptr++ = *from_u16_ptr++; |
260 | 222 | ||
223 | /* This barrier resolves occasional issues seen around | ||
224 | * cases where the data is not properly flushed out | ||
225 | * from the processor store buffers to the destination | ||
226 | * memory locations. | ||
227 | */ | ||
228 | wmb(); | ||
229 | |||
261 | /* Output a word */ | 230 | /* Output a word */ |
262 | *to_u32_ptr++ = align_buffer; | 231 | *to_u32_ptr++ = align_buffer; |
263 | } | 232 | } |
@@ -273,6 +242,12 @@ static void xemaclite_aligned_write(void *src_ptr, u32 *dest_ptr, | |||
273 | for (; length > 0; length--) | 242 | for (; length > 0; length--) |
274 | *to_u8_ptr++ = *from_u8_ptr++; | 243 | *to_u8_ptr++ = *from_u8_ptr++; |
275 | 244 | ||
245 | /* This barrier resolves occasional issues seen around | ||
246 | * cases where the data is not properly flushed out | ||
247 | * from the processor store buffers to the destination | ||
248 | * memory locations. | ||
249 | */ | ||
250 | wmb(); | ||
276 | *to_u32_ptr = align_buffer; | 251 | *to_u32_ptr = align_buffer; |
277 | } | 252 | } |
278 | } | 253 | } |