diff options
Diffstat (limited to 'drivers/net/ethernet')
50 files changed, 568 insertions, 1317 deletions
diff --git a/drivers/net/ethernet/8390/ne2k-pci.c b/drivers/net/ethernet/8390/ne2k-pci.c index 89c8d9fc97de..57e97910c728 100644 --- a/drivers/net/ethernet/8390/ne2k-pci.c +++ b/drivers/net/ethernet/8390/ne2k-pci.c | |||
@@ -246,13 +246,13 @@ static int ne2k_pci_init_one(struct pci_dev *pdev, | |||
246 | 246 | ||
247 | if (!ioaddr || ((pci_resource_flags (pdev, 0) & IORESOURCE_IO) == 0)) { | 247 | if (!ioaddr || ((pci_resource_flags (pdev, 0) & IORESOURCE_IO) == 0)) { |
248 | dev_err(&pdev->dev, "no I/O resource at PCI BAR #0\n"); | 248 | dev_err(&pdev->dev, "no I/O resource at PCI BAR #0\n"); |
249 | return -ENODEV; | 249 | goto err_out; |
250 | } | 250 | } |
251 | 251 | ||
252 | if (request_region (ioaddr, NE_IO_EXTENT, DRV_NAME) == NULL) { | 252 | if (request_region (ioaddr, NE_IO_EXTENT, DRV_NAME) == NULL) { |
253 | dev_err(&pdev->dev, "I/O resource 0x%x @ 0x%lx busy\n", | 253 | dev_err(&pdev->dev, "I/O resource 0x%x @ 0x%lx busy\n", |
254 | NE_IO_EXTENT, ioaddr); | 254 | NE_IO_EXTENT, ioaddr); |
255 | return -EBUSY; | 255 | goto err_out; |
256 | } | 256 | } |
257 | 257 | ||
258 | reg0 = inb(ioaddr); | 258 | reg0 = inb(ioaddr); |
@@ -392,6 +392,8 @@ err_out_free_netdev: | |||
392 | free_netdev (dev); | 392 | free_netdev (dev); |
393 | err_out_free_res: | 393 | err_out_free_res: |
394 | release_region (ioaddr, NE_IO_EXTENT); | 394 | release_region (ioaddr, NE_IO_EXTENT); |
395 | err_out: | ||
396 | pci_disable_device(pdev); | ||
395 | return -ENODEV; | 397 | return -ENODEV; |
396 | } | 398 | } |
397 | 399 | ||
diff --git a/drivers/net/ethernet/Kconfig b/drivers/net/ethernet/Kconfig index df76050d0a9d..eadcb053807e 100644 --- a/drivers/net/ethernet/Kconfig +++ b/drivers/net/ethernet/Kconfig | |||
@@ -156,18 +156,6 @@ source "drivers/net/ethernet/realtek/Kconfig" | |||
156 | source "drivers/net/ethernet/renesas/Kconfig" | 156 | source "drivers/net/ethernet/renesas/Kconfig" |
157 | source "drivers/net/ethernet/rdc/Kconfig" | 157 | source "drivers/net/ethernet/rdc/Kconfig" |
158 | source "drivers/net/ethernet/rocker/Kconfig" | 158 | source "drivers/net/ethernet/rocker/Kconfig" |
159 | |||
160 | config S6GMAC | ||
161 | tristate "S6105 GMAC ethernet support" | ||
162 | depends on XTENSA_VARIANT_S6000 | ||
163 | select PHYLIB | ||
164 | ---help--- | ||
165 | This driver supports the on chip ethernet device on the | ||
166 | S6105 xtensa processor. | ||
167 | |||
168 | To compile this driver as a module, choose M here. The module | ||
169 | will be called s6gmac. | ||
170 | |||
171 | source "drivers/net/ethernet/samsung/Kconfig" | 159 | source "drivers/net/ethernet/samsung/Kconfig" |
172 | source "drivers/net/ethernet/seeq/Kconfig" | 160 | source "drivers/net/ethernet/seeq/Kconfig" |
173 | source "drivers/net/ethernet/silan/Kconfig" | 161 | source "drivers/net/ethernet/silan/Kconfig" |
diff --git a/drivers/net/ethernet/Makefile b/drivers/net/ethernet/Makefile index bf56f8b36e90..1367afcd0a8b 100644 --- a/drivers/net/ethernet/Makefile +++ b/drivers/net/ethernet/Makefile | |||
@@ -66,7 +66,6 @@ obj-$(CONFIG_NET_VENDOR_REALTEK) += realtek/ | |||
66 | obj-$(CONFIG_SH_ETH) += renesas/ | 66 | obj-$(CONFIG_SH_ETH) += renesas/ |
67 | obj-$(CONFIG_NET_VENDOR_RDC) += rdc/ | 67 | obj-$(CONFIG_NET_VENDOR_RDC) += rdc/ |
68 | obj-$(CONFIG_NET_VENDOR_ROCKER) += rocker/ | 68 | obj-$(CONFIG_NET_VENDOR_ROCKER) += rocker/ |
69 | obj-$(CONFIG_S6GMAC) += s6gmac.o | ||
70 | obj-$(CONFIG_NET_VENDOR_SAMSUNG) += samsung/ | 69 | obj-$(CONFIG_NET_VENDOR_SAMSUNG) += samsung/ |
71 | obj-$(CONFIG_NET_VENDOR_SEEQ) += seeq/ | 70 | obj-$(CONFIG_NET_VENDOR_SEEQ) += seeq/ |
72 | obj-$(CONFIG_NET_VENDOR_SILAN) += silan/ | 71 | obj-$(CONFIG_NET_VENDOR_SILAN) += silan/ |
diff --git a/drivers/net/ethernet/allwinner/sun4i-emac.c b/drivers/net/ethernet/allwinner/sun4i-emac.c index 1fcd5568a352..f3470d96837a 100644 --- a/drivers/net/ethernet/allwinner/sun4i-emac.c +++ b/drivers/net/ethernet/allwinner/sun4i-emac.c | |||
@@ -850,8 +850,10 @@ static int emac_probe(struct platform_device *pdev) | |||
850 | } | 850 | } |
851 | 851 | ||
852 | db->clk = devm_clk_get(&pdev->dev, NULL); | 852 | db->clk = devm_clk_get(&pdev->dev, NULL); |
853 | if (IS_ERR(db->clk)) | 853 | if (IS_ERR(db->clk)) { |
854 | ret = PTR_ERR(db->clk); | ||
854 | goto out; | 855 | goto out; |
856 | } | ||
855 | 857 | ||
856 | clk_prepare_enable(db->clk); | 858 | clk_prepare_enable(db->clk); |
857 | 859 | ||
diff --git a/drivers/net/ethernet/altera/altera_tse_main.c b/drivers/net/ethernet/altera/altera_tse_main.c index 3498760dc22a..760c72c6e2ac 100644 --- a/drivers/net/ethernet/altera/altera_tse_main.c +++ b/drivers/net/ethernet/altera/altera_tse_main.c | |||
@@ -1170,10 +1170,6 @@ tx_request_irq_error: | |||
1170 | init_error: | 1170 | init_error: |
1171 | free_skbufs(dev); | 1171 | free_skbufs(dev); |
1172 | alloc_skbuf_error: | 1172 | alloc_skbuf_error: |
1173 | if (priv->phydev) { | ||
1174 | phy_disconnect(priv->phydev); | ||
1175 | priv->phydev = NULL; | ||
1176 | } | ||
1177 | phy_error: | 1173 | phy_error: |
1178 | return ret; | 1174 | return ret; |
1179 | } | 1175 | } |
@@ -1186,12 +1182,9 @@ static int tse_shutdown(struct net_device *dev) | |||
1186 | int ret; | 1182 | int ret; |
1187 | unsigned long int flags; | 1183 | unsigned long int flags; |
1188 | 1184 | ||
1189 | /* Stop and disconnect the PHY */ | 1185 | /* Stop the PHY */ |
1190 | if (priv->phydev) { | 1186 | if (priv->phydev) |
1191 | phy_stop(priv->phydev); | 1187 | phy_stop(priv->phydev); |
1192 | phy_disconnect(priv->phydev); | ||
1193 | priv->phydev = NULL; | ||
1194 | } | ||
1195 | 1188 | ||
1196 | netif_stop_queue(dev); | 1189 | netif_stop_queue(dev); |
1197 | napi_disable(&priv->napi); | 1190 | napi_disable(&priv->napi); |
@@ -1525,6 +1518,10 @@ err_free_netdev: | |||
1525 | static int altera_tse_remove(struct platform_device *pdev) | 1518 | static int altera_tse_remove(struct platform_device *pdev) |
1526 | { | 1519 | { |
1527 | struct net_device *ndev = platform_get_drvdata(pdev); | 1520 | struct net_device *ndev = platform_get_drvdata(pdev); |
1521 | struct altera_tse_private *priv = netdev_priv(ndev); | ||
1522 | |||
1523 | if (priv->phydev) | ||
1524 | phy_disconnect(priv->phydev); | ||
1528 | 1525 | ||
1529 | platform_set_drvdata(pdev, NULL); | 1526 | platform_set_drvdata(pdev, NULL); |
1530 | altera_tse_mdio_destroy(ndev); | 1527 | altera_tse_mdio_destroy(ndev); |
diff --git a/drivers/net/ethernet/broadcom/Kconfig b/drivers/net/ethernet/broadcom/Kconfig index 888247ad9068..41a3c9804427 100644 --- a/drivers/net/ethernet/broadcom/Kconfig +++ b/drivers/net/ethernet/broadcom/Kconfig | |||
@@ -64,7 +64,7 @@ config BCMGENET | |||
64 | tristate "Broadcom GENET internal MAC support" | 64 | tristate "Broadcom GENET internal MAC support" |
65 | select MII | 65 | select MII |
66 | select PHYLIB | 66 | select PHYLIB |
67 | select FIXED_PHY if BCMGENET=y | 67 | select FIXED_PHY |
68 | select BCM7XXX_PHY | 68 | select BCM7XXX_PHY |
69 | help | 69 | help |
70 | This driver supports the built-in Ethernet MACs found in the | 70 | This driver supports the built-in Ethernet MACs found in the |
@@ -155,7 +155,7 @@ config SYSTEMPORT | |||
155 | depends on OF | 155 | depends on OF |
156 | select MII | 156 | select MII |
157 | select PHYLIB | 157 | select PHYLIB |
158 | select FIXED_PHY if SYSTEMPORT=y | 158 | select FIXED_PHY |
159 | help | 159 | help |
160 | This driver supports the built-in Ethernet MACs found in the | 160 | This driver supports the built-in Ethernet MACs found in the |
161 | Broadcom BCM7xxx Set Top Box family chipset using an internal | 161 | Broadcom BCM7xxx Set Top Box family chipset using an internal |
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c index 691f0bf09ee1..72eef9fc883e 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c | |||
@@ -12553,9 +12553,11 @@ static int bnx2x_get_phys_port_id(struct net_device *netdev, | |||
12553 | return 0; | 12553 | return 0; |
12554 | } | 12554 | } |
12555 | 12555 | ||
12556 | static bool bnx2x_gso_check(struct sk_buff *skb, struct net_device *dev) | 12556 | static netdev_features_t bnx2x_features_check(struct sk_buff *skb, |
12557 | struct net_device *dev, | ||
12558 | netdev_features_t features) | ||
12557 | { | 12559 | { |
12558 | return vxlan_gso_check(skb); | 12560 | return vxlan_features_check(skb, features); |
12559 | } | 12561 | } |
12560 | 12562 | ||
12561 | static const struct net_device_ops bnx2x_netdev_ops = { | 12563 | static const struct net_device_ops bnx2x_netdev_ops = { |
@@ -12589,7 +12591,7 @@ static const struct net_device_ops bnx2x_netdev_ops = { | |||
12589 | #endif | 12591 | #endif |
12590 | .ndo_get_phys_port_id = bnx2x_get_phys_port_id, | 12592 | .ndo_get_phys_port_id = bnx2x_get_phys_port_id, |
12591 | .ndo_set_vf_link_state = bnx2x_set_vf_link_state, | 12593 | .ndo_set_vf_link_state = bnx2x_set_vf_link_state, |
12592 | .ndo_gso_check = bnx2x_gso_check, | 12594 | .ndo_features_check = bnx2x_features_check, |
12593 | }; | 12595 | }; |
12594 | 12596 | ||
12595 | static int bnx2x_set_coherency_mask(struct bnx2x *bp) | 12597 | static int bnx2x_set_coherency_mask(struct bnx2x *bp) |
@@ -13256,7 +13258,7 @@ static int bnx2x_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb) | |||
13256 | return -EFAULT; | 13258 | return -EFAULT; |
13257 | } | 13259 | } |
13258 | 13260 | ||
13259 | DP(BNX2X_MSG_PTP, "Configrued val = %d, period = %d\n", best_val, | 13261 | DP(BNX2X_MSG_PTP, "Configured val = %d, period = %d\n", best_val, |
13260 | best_period); | 13262 | best_period); |
13261 | 13263 | ||
13262 | return 0; | 13264 | return 0; |
@@ -14784,7 +14786,7 @@ static int bnx2x_hwtstamp_ioctl(struct bnx2x *bp, struct ifreq *ifr) | |||
14784 | -EFAULT : 0; | 14786 | -EFAULT : 0; |
14785 | } | 14787 | } |
14786 | 14788 | ||
14787 | /* Configrues HW for PTP */ | 14789 | /* Configures HW for PTP */ |
14788 | static int bnx2x_configure_ptp(struct bnx2x *bp) | 14790 | static int bnx2x_configure_ptp(struct bnx2x *bp) |
14789 | { | 14791 | { |
14790 | int rc, port = BP_PORT(bp); | 14792 | int rc, port = BP_PORT(bp); |
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h index b0779d773343..6fe547c93e74 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h | |||
@@ -7549,7 +7549,7 @@ Theotherbitsarereservedandshouldbezero*/ | |||
7549 | #define IGU_REG_SISR_MDPC_WOMASK_UPPER 0x05a6 | 7549 | #define IGU_REG_SISR_MDPC_WOMASK_UPPER 0x05a6 |
7550 | 7550 | ||
7551 | #define IGU_REG_RESERVED_UPPER 0x05ff | 7551 | #define IGU_REG_RESERVED_UPPER 0x05ff |
7552 | /* Fields of IGU PF CONFIGRATION REGISTER */ | 7552 | /* Fields of IGU PF CONFIGURATION REGISTER */ |
7553 | #define IGU_PF_CONF_FUNC_EN (0x1<<0) /* function enable */ | 7553 | #define IGU_PF_CONF_FUNC_EN (0x1<<0) /* function enable */ |
7554 | #define IGU_PF_CONF_MSI_MSIX_EN (0x1<<1) /* MSI/MSIX enable */ | 7554 | #define IGU_PF_CONF_MSI_MSIX_EN (0x1<<1) /* MSI/MSIX enable */ |
7555 | #define IGU_PF_CONF_INT_LINE_EN (0x1<<2) /* INT enable */ | 7555 | #define IGU_PF_CONF_INT_LINE_EN (0x1<<2) /* INT enable */ |
@@ -7557,7 +7557,7 @@ Theotherbitsarereservedandshouldbezero*/ | |||
7557 | #define IGU_PF_CONF_SINGLE_ISR_EN (0x1<<4) /* single ISR mode enable */ | 7557 | #define IGU_PF_CONF_SINGLE_ISR_EN (0x1<<4) /* single ISR mode enable */ |
7558 | #define IGU_PF_CONF_SIMD_MODE (0x1<<5) /* simd all ones mode */ | 7558 | #define IGU_PF_CONF_SIMD_MODE (0x1<<5) /* simd all ones mode */ |
7559 | 7559 | ||
7560 | /* Fields of IGU VF CONFIGRATION REGISTER */ | 7560 | /* Fields of IGU VF CONFIGURATION REGISTER */ |
7561 | #define IGU_VF_CONF_FUNC_EN (0x1<<0) /* function enable */ | 7561 | #define IGU_VF_CONF_FUNC_EN (0x1<<0) /* function enable */ |
7562 | #define IGU_VF_CONF_MSI_MSIX_EN (0x1<<1) /* MSI/MSIX enable */ | 7562 | #define IGU_VF_CONF_MSI_MSIX_EN (0x1<<1) /* MSI/MSIX enable */ |
7563 | #define IGU_VF_CONF_PARENT_MASK (0x3<<2) /* Parent PF */ | 7563 | #define IGU_VF_CONF_PARENT_MASK (0x3<<2) /* Parent PF */ |
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c index bb48a610b72a..553dcd8a9df2 100644 --- a/drivers/net/ethernet/broadcom/tg3.c +++ b/drivers/net/ethernet/broadcom/tg3.c | |||
@@ -17800,23 +17800,6 @@ static int tg3_init_one(struct pci_dev *pdev, | |||
17800 | goto err_out_apeunmap; | 17800 | goto err_out_apeunmap; |
17801 | } | 17801 | } |
17802 | 17802 | ||
17803 | /* | ||
17804 | * Reset chip in case UNDI or EFI driver did not shutdown | ||
17805 | * DMA self test will enable WDMAC and we'll see (spurious) | ||
17806 | * pending DMA on the PCI bus at that point. | ||
17807 | */ | ||
17808 | if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) || | ||
17809 | (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) { | ||
17810 | tw32(MEMARB_MODE, MEMARB_MODE_ENABLE); | ||
17811 | tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); | ||
17812 | } | ||
17813 | |||
17814 | err = tg3_test_dma(tp); | ||
17815 | if (err) { | ||
17816 | dev_err(&pdev->dev, "DMA engine test failed, aborting\n"); | ||
17817 | goto err_out_apeunmap; | ||
17818 | } | ||
17819 | |||
17820 | intmbx = MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW; | 17803 | intmbx = MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW; |
17821 | rcvmbx = MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW; | 17804 | rcvmbx = MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW; |
17822 | sndmbx = MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW; | 17805 | sndmbx = MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW; |
@@ -17861,6 +17844,23 @@ static int tg3_init_one(struct pci_dev *pdev, | |||
17861 | sndmbx += 0xc; | 17844 | sndmbx += 0xc; |
17862 | } | 17845 | } |
17863 | 17846 | ||
17847 | /* | ||
17848 | * Reset chip in case UNDI or EFI driver did not shutdown | ||
17849 | * DMA self test will enable WDMAC and we'll see (spurious) | ||
17850 | * pending DMA on the PCI bus at that point. | ||
17851 | */ | ||
17852 | if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) || | ||
17853 | (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) { | ||
17854 | tw32(MEMARB_MODE, MEMARB_MODE_ENABLE); | ||
17855 | tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); | ||
17856 | } | ||
17857 | |||
17858 | err = tg3_test_dma(tp); | ||
17859 | if (err) { | ||
17860 | dev_err(&pdev->dev, "DMA engine test failed, aborting\n"); | ||
17861 | goto err_out_apeunmap; | ||
17862 | } | ||
17863 | |||
17864 | tg3_init_coal(tp); | 17864 | tg3_init_coal(tp); |
17865 | 17865 | ||
17866 | pci_set_drvdata(pdev, dev); | 17866 | pci_set_drvdata(pdev, dev); |
diff --git a/drivers/net/ethernet/brocade/bna/bnad_debugfs.c b/drivers/net/ethernet/brocade/bna/bnad_debugfs.c index 7d6aa8c87df8..619083a860a4 100644 --- a/drivers/net/ethernet/brocade/bna/bnad_debugfs.c +++ b/drivers/net/ethernet/brocade/bna/bnad_debugfs.c | |||
@@ -172,7 +172,7 @@ bnad_get_debug_drvinfo(struct bnad *bnad, void *buffer, u32 len) | |||
172 | 172 | ||
173 | /* Retrieve flash partition info */ | 173 | /* Retrieve flash partition info */ |
174 | fcomp.comp_status = 0; | 174 | fcomp.comp_status = 0; |
175 | init_completion(&fcomp.comp); | 175 | reinit_completion(&fcomp.comp); |
176 | spin_lock_irqsave(&bnad->bna_lock, flags); | 176 | spin_lock_irqsave(&bnad->bna_lock, flags); |
177 | ret = bfa_nw_flash_get_attr(&bnad->bna.flash, &drvinfo->flash_attr, | 177 | ret = bfa_nw_flash_get_attr(&bnad->bna.flash, &drvinfo->flash_attr, |
178 | bnad_cb_completion, &fcomp); | 178 | bnad_cb_completion, &fcomp); |
diff --git a/drivers/net/ethernet/cadence/macb.c b/drivers/net/ethernet/cadence/macb.c index 06dea3dd463c..3767271c7667 100644 --- a/drivers/net/ethernet/cadence/macb.c +++ b/drivers/net/ethernet/cadence/macb.c | |||
@@ -2160,7 +2160,7 @@ static int __init macb_probe(struct platform_device *pdev) | |||
2160 | int err = -ENXIO; | 2160 | int err = -ENXIO; |
2161 | const char *mac; | 2161 | const char *mac; |
2162 | void __iomem *mem; | 2162 | void __iomem *mem; |
2163 | unsigned int hw_q, queue_mask, q, num_queues, q_irq = 0; | 2163 | unsigned int hw_q, queue_mask, q, num_queues; |
2164 | struct clk *pclk, *hclk, *tx_clk; | 2164 | struct clk *pclk, *hclk, *tx_clk; |
2165 | 2165 | ||
2166 | regs = platform_get_resource(pdev, IORESOURCE_MEM, 0); | 2166 | regs = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
@@ -2235,11 +2235,11 @@ static int __init macb_probe(struct platform_device *pdev) | |||
2235 | * register mapping but we don't want to test the queue index then | 2235 | * register mapping but we don't want to test the queue index then |
2236 | * compute the corresponding register offset at run time. | 2236 | * compute the corresponding register offset at run time. |
2237 | */ | 2237 | */ |
2238 | for (hw_q = 0; hw_q < MACB_MAX_QUEUES; ++hw_q) { | 2238 | for (hw_q = 0, q = 0; hw_q < MACB_MAX_QUEUES; ++hw_q) { |
2239 | if (!(queue_mask & (1 << hw_q))) | 2239 | if (!(queue_mask & (1 << hw_q))) |
2240 | continue; | 2240 | continue; |
2241 | 2241 | ||
2242 | queue = &bp->queues[q_irq]; | 2242 | queue = &bp->queues[q]; |
2243 | queue->bp = bp; | 2243 | queue->bp = bp; |
2244 | if (hw_q) { | 2244 | if (hw_q) { |
2245 | queue->ISR = GEM_ISR(hw_q - 1); | 2245 | queue->ISR = GEM_ISR(hw_q - 1); |
@@ -2261,18 +2261,18 @@ static int __init macb_probe(struct platform_device *pdev) | |||
2261 | * must remove the optional gaps that could exist in the | 2261 | * must remove the optional gaps that could exist in the |
2262 | * hardware queue mask. | 2262 | * hardware queue mask. |
2263 | */ | 2263 | */ |
2264 | queue->irq = platform_get_irq(pdev, q_irq); | 2264 | queue->irq = platform_get_irq(pdev, q); |
2265 | err = devm_request_irq(&pdev->dev, queue->irq, macb_interrupt, | 2265 | err = devm_request_irq(&pdev->dev, queue->irq, macb_interrupt, |
2266 | 0, dev->name, queue); | 2266 | 0, dev->name, queue); |
2267 | if (err) { | 2267 | if (err) { |
2268 | dev_err(&pdev->dev, | 2268 | dev_err(&pdev->dev, |
2269 | "Unable to request IRQ %d (error %d)\n", | 2269 | "Unable to request IRQ %d (error %d)\n", |
2270 | queue->irq, err); | 2270 | queue->irq, err); |
2271 | goto err_out_free_irq; | 2271 | goto err_out_free_netdev; |
2272 | } | 2272 | } |
2273 | 2273 | ||
2274 | INIT_WORK(&queue->tx_error_task, macb_tx_error_task); | 2274 | INIT_WORK(&queue->tx_error_task, macb_tx_error_task); |
2275 | q_irq++; | 2275 | q++; |
2276 | } | 2276 | } |
2277 | dev->irq = bp->queues[0].irq; | 2277 | dev->irq = bp->queues[0].irq; |
2278 | 2278 | ||
@@ -2350,7 +2350,7 @@ static int __init macb_probe(struct platform_device *pdev) | |||
2350 | err = register_netdev(dev); | 2350 | err = register_netdev(dev); |
2351 | if (err) { | 2351 | if (err) { |
2352 | dev_err(&pdev->dev, "Cannot register net device, aborting.\n"); | 2352 | dev_err(&pdev->dev, "Cannot register net device, aborting.\n"); |
2353 | goto err_out_free_irq; | 2353 | goto err_out_free_netdev; |
2354 | } | 2354 | } |
2355 | 2355 | ||
2356 | err = macb_mii_init(bp); | 2356 | err = macb_mii_init(bp); |
@@ -2373,9 +2373,7 @@ static int __init macb_probe(struct platform_device *pdev) | |||
2373 | 2373 | ||
2374 | err_out_unregister_netdev: | 2374 | err_out_unregister_netdev: |
2375 | unregister_netdev(dev); | 2375 | unregister_netdev(dev); |
2376 | err_out_free_irq: | 2376 | err_out_free_netdev: |
2377 | for (q = 0, queue = bp->queues; q < q_irq; ++q, ++queue) | ||
2378 | devm_free_irq(&pdev->dev, queue->irq, queue); | ||
2379 | free_netdev(dev); | 2377 | free_netdev(dev); |
2380 | err_out_disable_clocks: | 2378 | err_out_disable_clocks: |
2381 | if (!IS_ERR(tx_clk)) | 2379 | if (!IS_ERR(tx_clk)) |
@@ -2392,8 +2390,6 @@ static int __exit macb_remove(struct platform_device *pdev) | |||
2392 | { | 2390 | { |
2393 | struct net_device *dev; | 2391 | struct net_device *dev; |
2394 | struct macb *bp; | 2392 | struct macb *bp; |
2395 | struct macb_queue *queue; | ||
2396 | unsigned int q; | ||
2397 | 2393 | ||
2398 | dev = platform_get_drvdata(pdev); | 2394 | dev = platform_get_drvdata(pdev); |
2399 | 2395 | ||
@@ -2405,14 +2401,11 @@ static int __exit macb_remove(struct platform_device *pdev) | |||
2405 | kfree(bp->mii_bus->irq); | 2401 | kfree(bp->mii_bus->irq); |
2406 | mdiobus_free(bp->mii_bus); | 2402 | mdiobus_free(bp->mii_bus); |
2407 | unregister_netdev(dev); | 2403 | unregister_netdev(dev); |
2408 | queue = bp->queues; | ||
2409 | for (q = 0; q < bp->num_queues; ++q, ++queue) | ||
2410 | devm_free_irq(&pdev->dev, queue->irq, queue); | ||
2411 | free_netdev(dev); | ||
2412 | if (!IS_ERR(bp->tx_clk)) | 2404 | if (!IS_ERR(bp->tx_clk)) |
2413 | clk_disable_unprepare(bp->tx_clk); | 2405 | clk_disable_unprepare(bp->tx_clk); |
2414 | clk_disable_unprepare(bp->hclk); | 2406 | clk_disable_unprepare(bp->hclk); |
2415 | clk_disable_unprepare(bp->pclk); | 2407 | clk_disable_unprepare(bp->pclk); |
2408 | free_netdev(dev); | ||
2416 | } | 2409 | } |
2417 | 2410 | ||
2418 | return 0; | 2411 | return 0; |
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c index 28d04153f999..c132d9030729 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c | |||
@@ -2376,7 +2376,7 @@ const char *t4_get_port_type_description(enum fw_port_type port_type) | |||
2376 | "KR/KX", | 2376 | "KR/KX", |
2377 | "KR/KX/KX4", | 2377 | "KR/KX/KX4", |
2378 | "R QSFP_10G", | 2378 | "R QSFP_10G", |
2379 | "", | 2379 | "R QSA", |
2380 | "R QSFP", | 2380 | "R QSFP", |
2381 | "R BP40_BA", | 2381 | "R BP40_BA", |
2382 | }; | 2382 | }; |
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h b/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h index 291b6f219708..7c0aec85137a 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h +++ b/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h | |||
@@ -2470,8 +2470,8 @@ enum fw_port_type { | |||
2470 | FW_PORT_TYPE_BP_AP, | 2470 | FW_PORT_TYPE_BP_AP, |
2471 | FW_PORT_TYPE_BP4_AP, | 2471 | FW_PORT_TYPE_BP4_AP, |
2472 | FW_PORT_TYPE_QSFP_10G, | 2472 | FW_PORT_TYPE_QSFP_10G, |
2473 | FW_PORT_TYPE_QSFP, | ||
2474 | FW_PORT_TYPE_QSA, | 2473 | FW_PORT_TYPE_QSA, |
2474 | FW_PORT_TYPE_QSFP, | ||
2475 | FW_PORT_TYPE_BP40_BA, | 2475 | FW_PORT_TYPE_BP40_BA, |
2476 | 2476 | ||
2477 | FW_PORT_TYPE_NONE = FW_PORT_CMD_PTYPE_M | 2477 | FW_PORT_TYPE_NONE = FW_PORT_CMD_PTYPE_M |
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/adapter.h b/drivers/net/ethernet/chelsio/cxgb4vf/adapter.h index d00a751f0588..6049f70e110c 100644 --- a/drivers/net/ethernet/chelsio/cxgb4vf/adapter.h +++ b/drivers/net/ethernet/chelsio/cxgb4vf/adapter.h | |||
@@ -96,6 +96,9 @@ struct port_info { | |||
96 | s16 xact_addr_filt; /* index of our MAC address filter */ | 96 | s16 xact_addr_filt; /* index of our MAC address filter */ |
97 | u16 rss_size; /* size of VI's RSS table slice */ | 97 | u16 rss_size; /* size of VI's RSS table slice */ |
98 | u8 pidx; /* index into adapter port[] */ | 98 | u8 pidx; /* index into adapter port[] */ |
99 | s8 mdio_addr; | ||
100 | u8 port_type; /* firmware port type */ | ||
101 | u8 mod_type; /* firmware module type */ | ||
99 | u8 port_id; /* physical port ID */ | 102 | u8 port_id; /* physical port ID */ |
100 | u8 nqsets; /* # of "Queue Sets" */ | 103 | u8 nqsets; /* # of "Queue Sets" */ |
101 | u8 first_qset; /* index of first "Queue Set" */ | 104 | u8 first_qset; /* index of first "Queue Set" */ |
@@ -522,6 +525,7 @@ static inline struct adapter *netdev2adap(const struct net_device *dev) | |||
522 | * is "contracted" to provide for the common code. | 525 | * is "contracted" to provide for the common code. |
523 | */ | 526 | */ |
524 | void t4vf_os_link_changed(struct adapter *, int, int); | 527 | void t4vf_os_link_changed(struct adapter *, int, int); |
528 | void t4vf_os_portmod_changed(struct adapter *, int); | ||
525 | 529 | ||
526 | /* | 530 | /* |
527 | * SGE function prototype declarations. | 531 | * SGE function prototype declarations. |
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c index aa74ec34a467..2215d432a059 100644 --- a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c +++ b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c | |||
@@ -44,6 +44,7 @@ | |||
44 | #include <linux/etherdevice.h> | 44 | #include <linux/etherdevice.h> |
45 | #include <linux/debugfs.h> | 45 | #include <linux/debugfs.h> |
46 | #include <linux/ethtool.h> | 46 | #include <linux/ethtool.h> |
47 | #include <linux/mdio.h> | ||
47 | 48 | ||
48 | #include "t4vf_common.h" | 49 | #include "t4vf_common.h" |
49 | #include "t4vf_defs.h" | 50 | #include "t4vf_defs.h" |
@@ -210,6 +211,38 @@ void t4vf_os_link_changed(struct adapter *adapter, int pidx, int link_ok) | |||
210 | } | 211 | } |
211 | 212 | ||
212 | /* | 213 | /* |
214 | * THe port module type has changed on the indicated "port" (Virtual | ||
215 | * Interface). | ||
216 | */ | ||
217 | void t4vf_os_portmod_changed(struct adapter *adapter, int pidx) | ||
218 | { | ||
219 | static const char * const mod_str[] = { | ||
220 | NULL, "LR", "SR", "ER", "passive DA", "active DA", "LRM" | ||
221 | }; | ||
222 | const struct net_device *dev = adapter->port[pidx]; | ||
223 | const struct port_info *pi = netdev_priv(dev); | ||
224 | |||
225 | if (pi->mod_type == FW_PORT_MOD_TYPE_NONE) | ||
226 | dev_info(adapter->pdev_dev, "%s: port module unplugged\n", | ||
227 | dev->name); | ||
228 | else if (pi->mod_type < ARRAY_SIZE(mod_str)) | ||
229 | dev_info(adapter->pdev_dev, "%s: %s port module inserted\n", | ||
230 | dev->name, mod_str[pi->mod_type]); | ||
231 | else if (pi->mod_type == FW_PORT_MOD_TYPE_NOTSUPPORTED) | ||
232 | dev_info(adapter->pdev_dev, "%s: unsupported optical port " | ||
233 | "module inserted\n", dev->name); | ||
234 | else if (pi->mod_type == FW_PORT_MOD_TYPE_UNKNOWN) | ||
235 | dev_info(adapter->pdev_dev, "%s: unknown port module inserted," | ||
236 | "forcing TWINAX\n", dev->name); | ||
237 | else if (pi->mod_type == FW_PORT_MOD_TYPE_ERROR) | ||
238 | dev_info(adapter->pdev_dev, "%s: transceiver module error\n", | ||
239 | dev->name); | ||
240 | else | ||
241 | dev_info(adapter->pdev_dev, "%s: unknown module type %d " | ||
242 | "inserted\n", dev->name, pi->mod_type); | ||
243 | } | ||
244 | |||
245 | /* | ||
213 | * Net device operations. | 246 | * Net device operations. |
214 | * ====================== | 247 | * ====================== |
215 | */ | 248 | */ |
@@ -1193,24 +1226,103 @@ static void cxgb4vf_poll_controller(struct net_device *dev) | |||
1193 | * state of the port to which we're linked. | 1226 | * state of the port to which we're linked. |
1194 | */ | 1227 | */ |
1195 | 1228 | ||
1196 | /* | 1229 | static unsigned int t4vf_from_fw_linkcaps(enum fw_port_type type, |
1197 | * Return current port link settings. | 1230 | unsigned int caps) |
1198 | */ | 1231 | { |
1199 | static int cxgb4vf_get_settings(struct net_device *dev, | 1232 | unsigned int v = 0; |
1200 | struct ethtool_cmd *cmd) | 1233 | |
1201 | { | 1234 | if (type == FW_PORT_TYPE_BT_SGMII || type == FW_PORT_TYPE_BT_XFI || |
1202 | const struct port_info *pi = netdev_priv(dev); | 1235 | type == FW_PORT_TYPE_BT_XAUI) { |
1236 | v |= SUPPORTED_TP; | ||
1237 | if (caps & FW_PORT_CAP_SPEED_100M) | ||
1238 | v |= SUPPORTED_100baseT_Full; | ||
1239 | if (caps & FW_PORT_CAP_SPEED_1G) | ||
1240 | v |= SUPPORTED_1000baseT_Full; | ||
1241 | if (caps & FW_PORT_CAP_SPEED_10G) | ||
1242 | v |= SUPPORTED_10000baseT_Full; | ||
1243 | } else if (type == FW_PORT_TYPE_KX4 || type == FW_PORT_TYPE_KX) { | ||
1244 | v |= SUPPORTED_Backplane; | ||
1245 | if (caps & FW_PORT_CAP_SPEED_1G) | ||
1246 | v |= SUPPORTED_1000baseKX_Full; | ||
1247 | if (caps & FW_PORT_CAP_SPEED_10G) | ||
1248 | v |= SUPPORTED_10000baseKX4_Full; | ||
1249 | } else if (type == FW_PORT_TYPE_KR) | ||
1250 | v |= SUPPORTED_Backplane | SUPPORTED_10000baseKR_Full; | ||
1251 | else if (type == FW_PORT_TYPE_BP_AP) | ||
1252 | v |= SUPPORTED_Backplane | SUPPORTED_10000baseR_FEC | | ||
1253 | SUPPORTED_10000baseKR_Full | SUPPORTED_1000baseKX_Full; | ||
1254 | else if (type == FW_PORT_TYPE_BP4_AP) | ||
1255 | v |= SUPPORTED_Backplane | SUPPORTED_10000baseR_FEC | | ||
1256 | SUPPORTED_10000baseKR_Full | SUPPORTED_1000baseKX_Full | | ||
1257 | SUPPORTED_10000baseKX4_Full; | ||
1258 | else if (type == FW_PORT_TYPE_FIBER_XFI || | ||
1259 | type == FW_PORT_TYPE_FIBER_XAUI || | ||
1260 | type == FW_PORT_TYPE_SFP || | ||
1261 | type == FW_PORT_TYPE_QSFP_10G || | ||
1262 | type == FW_PORT_TYPE_QSA) { | ||
1263 | v |= SUPPORTED_FIBRE; | ||
1264 | if (caps & FW_PORT_CAP_SPEED_1G) | ||
1265 | v |= SUPPORTED_1000baseT_Full; | ||
1266 | if (caps & FW_PORT_CAP_SPEED_10G) | ||
1267 | v |= SUPPORTED_10000baseT_Full; | ||
1268 | } else if (type == FW_PORT_TYPE_BP40_BA || | ||
1269 | type == FW_PORT_TYPE_QSFP) { | ||
1270 | v |= SUPPORTED_40000baseSR4_Full; | ||
1271 | v |= SUPPORTED_FIBRE; | ||
1272 | } | ||
1273 | |||
1274 | if (caps & FW_PORT_CAP_ANEG) | ||
1275 | v |= SUPPORTED_Autoneg; | ||
1276 | return v; | ||
1277 | } | ||
1278 | |||
1279 | static int cxgb4vf_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) | ||
1280 | { | ||
1281 | const struct port_info *p = netdev_priv(dev); | ||
1282 | |||
1283 | if (p->port_type == FW_PORT_TYPE_BT_SGMII || | ||
1284 | p->port_type == FW_PORT_TYPE_BT_XFI || | ||
1285 | p->port_type == FW_PORT_TYPE_BT_XAUI) | ||
1286 | cmd->port = PORT_TP; | ||
1287 | else if (p->port_type == FW_PORT_TYPE_FIBER_XFI || | ||
1288 | p->port_type == FW_PORT_TYPE_FIBER_XAUI) | ||
1289 | cmd->port = PORT_FIBRE; | ||
1290 | else if (p->port_type == FW_PORT_TYPE_SFP || | ||
1291 | p->port_type == FW_PORT_TYPE_QSFP_10G || | ||
1292 | p->port_type == FW_PORT_TYPE_QSA || | ||
1293 | p->port_type == FW_PORT_TYPE_QSFP) { | ||
1294 | if (p->mod_type == FW_PORT_MOD_TYPE_LR || | ||
1295 | p->mod_type == FW_PORT_MOD_TYPE_SR || | ||
1296 | p->mod_type == FW_PORT_MOD_TYPE_ER || | ||
1297 | p->mod_type == FW_PORT_MOD_TYPE_LRM) | ||
1298 | cmd->port = PORT_FIBRE; | ||
1299 | else if (p->mod_type == FW_PORT_MOD_TYPE_TWINAX_PASSIVE || | ||
1300 | p->mod_type == FW_PORT_MOD_TYPE_TWINAX_ACTIVE) | ||
1301 | cmd->port = PORT_DA; | ||
1302 | else | ||
1303 | cmd->port = PORT_OTHER; | ||
1304 | } else | ||
1305 | cmd->port = PORT_OTHER; | ||
1203 | 1306 | ||
1204 | cmd->supported = pi->link_cfg.supported; | 1307 | if (p->mdio_addr >= 0) { |
1205 | cmd->advertising = pi->link_cfg.advertising; | 1308 | cmd->phy_address = p->mdio_addr; |
1309 | cmd->transceiver = XCVR_EXTERNAL; | ||
1310 | cmd->mdio_support = p->port_type == FW_PORT_TYPE_BT_SGMII ? | ||
1311 | MDIO_SUPPORTS_C22 : MDIO_SUPPORTS_C45; | ||
1312 | } else { | ||
1313 | cmd->phy_address = 0; /* not really, but no better option */ | ||
1314 | cmd->transceiver = XCVR_INTERNAL; | ||
1315 | cmd->mdio_support = 0; | ||
1316 | } | ||
1317 | |||
1318 | cmd->supported = t4vf_from_fw_linkcaps(p->port_type, | ||
1319 | p->link_cfg.supported); | ||
1320 | cmd->advertising = t4vf_from_fw_linkcaps(p->port_type, | ||
1321 | p->link_cfg.advertising); | ||
1206 | ethtool_cmd_speed_set(cmd, | 1322 | ethtool_cmd_speed_set(cmd, |
1207 | netif_carrier_ok(dev) ? pi->link_cfg.speed : -1); | 1323 | netif_carrier_ok(dev) ? p->link_cfg.speed : 0); |
1208 | cmd->duplex = DUPLEX_FULL; | 1324 | cmd->duplex = DUPLEX_FULL; |
1209 | 1325 | cmd->autoneg = p->link_cfg.autoneg; | |
1210 | cmd->port = (cmd->supported & SUPPORTED_TP) ? PORT_TP : PORT_FIBRE; | ||
1211 | cmd->phy_address = pi->port_id; | ||
1212 | cmd->transceiver = XCVR_EXTERNAL; | ||
1213 | cmd->autoneg = pi->link_cfg.autoneg; | ||
1214 | cmd->maxtxpkt = 0; | 1326 | cmd->maxtxpkt = 0; |
1215 | cmd->maxrxpkt = 0; | 1327 | cmd->maxrxpkt = 0; |
1216 | return 0; | 1328 | return 0; |
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h index 8d3237f5e364..b9debb4f29a3 100644 --- a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h +++ b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h | |||
@@ -230,7 +230,7 @@ struct adapter_params { | |||
230 | 230 | ||
231 | static inline bool is_10g_port(const struct link_config *lc) | 231 | static inline bool is_10g_port(const struct link_config *lc) |
232 | { | 232 | { |
233 | return (lc->supported & SUPPORTED_10000baseT_Full) != 0; | 233 | return (lc->supported & FW_PORT_CAP_SPEED_10G) != 0; |
234 | } | 234 | } |
235 | 235 | ||
236 | static inline bool is_x_10g_port(const struct link_config *lc) | 236 | static inline bool is_x_10g_port(const struct link_config *lc) |
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c index 02e8833b7797..21dc9a20308c 100644 --- a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c +++ b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c | |||
@@ -245,6 +245,10 @@ static int hash_mac_addr(const u8 *addr) | |||
245 | return a & 0x3f; | 245 | return a & 0x3f; |
246 | } | 246 | } |
247 | 247 | ||
248 | #define ADVERT_MASK (FW_PORT_CAP_SPEED_100M | FW_PORT_CAP_SPEED_1G |\ | ||
249 | FW_PORT_CAP_SPEED_10G | FW_PORT_CAP_SPEED_40G | \ | ||
250 | FW_PORT_CAP_SPEED_100G | FW_PORT_CAP_ANEG) | ||
251 | |||
248 | /** | 252 | /** |
249 | * init_link_config - initialize a link's SW state | 253 | * init_link_config - initialize a link's SW state |
250 | * @lc: structure holding the link state | 254 | * @lc: structure holding the link state |
@@ -259,8 +263,8 @@ static void init_link_config(struct link_config *lc, unsigned int caps) | |||
259 | lc->requested_speed = 0; | 263 | lc->requested_speed = 0; |
260 | lc->speed = 0; | 264 | lc->speed = 0; |
261 | lc->requested_fc = lc->fc = PAUSE_RX | PAUSE_TX; | 265 | lc->requested_fc = lc->fc = PAUSE_RX | PAUSE_TX; |
262 | if (lc->supported & SUPPORTED_Autoneg) { | 266 | if (lc->supported & FW_PORT_CAP_ANEG) { |
263 | lc->advertising = lc->supported; | 267 | lc->advertising = lc->supported & ADVERT_MASK; |
264 | lc->autoneg = AUTONEG_ENABLE; | 268 | lc->autoneg = AUTONEG_ENABLE; |
265 | lc->requested_fc |= PAUSE_AUTONEG; | 269 | lc->requested_fc |= PAUSE_AUTONEG; |
266 | } else { | 270 | } else { |
@@ -280,7 +284,6 @@ int t4vf_port_init(struct adapter *adapter, int pidx) | |||
280 | struct fw_vi_cmd vi_cmd, vi_rpl; | 284 | struct fw_vi_cmd vi_cmd, vi_rpl; |
281 | struct fw_port_cmd port_cmd, port_rpl; | 285 | struct fw_port_cmd port_cmd, port_rpl; |
282 | int v; | 286 | int v; |
283 | u32 word; | ||
284 | 287 | ||
285 | /* | 288 | /* |
286 | * Execute a VI Read command to get our Virtual Interface information | 289 | * Execute a VI Read command to get our Virtual Interface information |
@@ -319,19 +322,11 @@ int t4vf_port_init(struct adapter *adapter, int pidx) | |||
319 | if (v) | 322 | if (v) |
320 | return v; | 323 | return v; |
321 | 324 | ||
322 | v = 0; | 325 | v = be32_to_cpu(port_rpl.u.info.lstatus_to_modtype); |
323 | word = be16_to_cpu(port_rpl.u.info.pcap); | 326 | pi->port_type = FW_PORT_CMD_PTYPE_G(v); |
324 | if (word & FW_PORT_CAP_SPEED_100M) | 327 | pi->mod_type = FW_PORT_MOD_TYPE_NA; |
325 | v |= SUPPORTED_100baseT_Full; | 328 | |
326 | if (word & FW_PORT_CAP_SPEED_1G) | 329 | init_link_config(&pi->link_cfg, be16_to_cpu(port_rpl.u.info.pcap)); |
327 | v |= SUPPORTED_1000baseT_Full; | ||
328 | if (word & FW_PORT_CAP_SPEED_10G) | ||
329 | v |= SUPPORTED_10000baseT_Full; | ||
330 | if (word & FW_PORT_CAP_SPEED_40G) | ||
331 | v |= SUPPORTED_40000baseSR4_Full; | ||
332 | if (word & FW_PORT_CAP_ANEG) | ||
333 | v |= SUPPORTED_Autoneg; | ||
334 | init_link_config(&pi->link_cfg, v); | ||
335 | 330 | ||
336 | return 0; | 331 | return 0; |
337 | } | 332 | } |
@@ -1491,7 +1486,7 @@ int t4vf_handle_fw_rpl(struct adapter *adapter, const __be64 *rpl) | |||
1491 | */ | 1486 | */ |
1492 | const struct fw_port_cmd *port_cmd = | 1487 | const struct fw_port_cmd *port_cmd = |
1493 | (const struct fw_port_cmd *)rpl; | 1488 | (const struct fw_port_cmd *)rpl; |
1494 | u32 word; | 1489 | u32 stat, mod; |
1495 | int action, port_id, link_ok, speed, fc, pidx; | 1490 | int action, port_id, link_ok, speed, fc, pidx; |
1496 | 1491 | ||
1497 | /* | 1492 | /* |
@@ -1509,21 +1504,21 @@ int t4vf_handle_fw_rpl(struct adapter *adapter, const __be64 *rpl) | |||
1509 | port_id = FW_PORT_CMD_PORTID_G( | 1504 | port_id = FW_PORT_CMD_PORTID_G( |
1510 | be32_to_cpu(port_cmd->op_to_portid)); | 1505 | be32_to_cpu(port_cmd->op_to_portid)); |
1511 | 1506 | ||
1512 | word = be32_to_cpu(port_cmd->u.info.lstatus_to_modtype); | 1507 | stat = be32_to_cpu(port_cmd->u.info.lstatus_to_modtype); |
1513 | link_ok = (word & FW_PORT_CMD_LSTATUS_F) != 0; | 1508 | link_ok = (stat & FW_PORT_CMD_LSTATUS_F) != 0; |
1514 | speed = 0; | 1509 | speed = 0; |
1515 | fc = 0; | 1510 | fc = 0; |
1516 | if (word & FW_PORT_CMD_RXPAUSE_F) | 1511 | if (stat & FW_PORT_CMD_RXPAUSE_F) |
1517 | fc |= PAUSE_RX; | 1512 | fc |= PAUSE_RX; |
1518 | if (word & FW_PORT_CMD_TXPAUSE_F) | 1513 | if (stat & FW_PORT_CMD_TXPAUSE_F) |
1519 | fc |= PAUSE_TX; | 1514 | fc |= PAUSE_TX; |
1520 | if (word & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_100M)) | 1515 | if (stat & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_100M)) |
1521 | speed = 100; | 1516 | speed = 100; |
1522 | else if (word & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_1G)) | 1517 | else if (stat & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_1G)) |
1523 | speed = 1000; | 1518 | speed = 1000; |
1524 | else if (word & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_10G)) | 1519 | else if (stat & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_10G)) |
1525 | speed = 10000; | 1520 | speed = 10000; |
1526 | else if (word & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_40G)) | 1521 | else if (stat & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_40G)) |
1527 | speed = 40000; | 1522 | speed = 40000; |
1528 | 1523 | ||
1529 | /* | 1524 | /* |
@@ -1540,12 +1535,21 @@ int t4vf_handle_fw_rpl(struct adapter *adapter, const __be64 *rpl) | |||
1540 | continue; | 1535 | continue; |
1541 | 1536 | ||
1542 | lc = &pi->link_cfg; | 1537 | lc = &pi->link_cfg; |
1538 | |||
1539 | mod = FW_PORT_CMD_MODTYPE_G(stat); | ||
1540 | if (mod != pi->mod_type) { | ||
1541 | pi->mod_type = mod; | ||
1542 | t4vf_os_portmod_changed(adapter, pidx); | ||
1543 | } | ||
1544 | |||
1543 | if (link_ok != lc->link_ok || speed != lc->speed || | 1545 | if (link_ok != lc->link_ok || speed != lc->speed || |
1544 | fc != lc->fc) { | 1546 | fc != lc->fc) { |
1545 | /* something changed */ | 1547 | /* something changed */ |
1546 | lc->link_ok = link_ok; | 1548 | lc->link_ok = link_ok; |
1547 | lc->speed = speed; | 1549 | lc->speed = speed; |
1548 | lc->fc = fc; | 1550 | lc->fc = fc; |
1551 | lc->supported = | ||
1552 | be16_to_cpu(port_cmd->u.info.pcap); | ||
1549 | t4vf_os_link_changed(adapter, pidx, link_ok); | 1553 | t4vf_os_link_changed(adapter, pidx, link_ok); |
1550 | } | 1554 | } |
1551 | } | 1555 | } |
diff --git a/drivers/net/ethernet/cirrus/cs89x0.c b/drivers/net/ethernet/cirrus/cs89x0.c index b2427928eb11..d1c025fd9726 100644 --- a/drivers/net/ethernet/cirrus/cs89x0.c +++ b/drivers/net/ethernet/cirrus/cs89x0.c | |||
@@ -60,6 +60,7 @@ | |||
60 | #include <linux/interrupt.h> | 60 | #include <linux/interrupt.h> |
61 | #include <linux/ioport.h> | 61 | #include <linux/ioport.h> |
62 | #include <linux/in.h> | 62 | #include <linux/in.h> |
63 | #include <linux/jiffies.h> | ||
63 | #include <linux/skbuff.h> | 64 | #include <linux/skbuff.h> |
64 | #include <linux/spinlock.h> | 65 | #include <linux/spinlock.h> |
65 | #include <linux/string.h> | 66 | #include <linux/string.h> |
@@ -238,13 +239,13 @@ writereg(struct net_device *dev, u16 regno, u16 value) | |||
238 | static int __init | 239 | static int __init |
239 | wait_eeprom_ready(struct net_device *dev) | 240 | wait_eeprom_ready(struct net_device *dev) |
240 | { | 241 | { |
241 | int timeout = jiffies; | 242 | unsigned long timeout = jiffies; |
242 | /* check to see if the EEPROM is ready, | 243 | /* check to see if the EEPROM is ready, |
243 | * a timeout is used just in case EEPROM is ready when | 244 | * a timeout is used just in case EEPROM is ready when |
244 | * SI_BUSY in the PP_SelfST is clear | 245 | * SI_BUSY in the PP_SelfST is clear |
245 | */ | 246 | */ |
246 | while (readreg(dev, PP_SelfST) & SI_BUSY) | 247 | while (readreg(dev, PP_SelfST) & SI_BUSY) |
247 | if (jiffies - timeout >= 40) | 248 | if (time_after_eq(jiffies, timeout + 40)) |
248 | return -1; | 249 | return -1; |
249 | return 0; | 250 | return 0; |
250 | } | 251 | } |
@@ -485,7 +486,7 @@ control_dc_dc(struct net_device *dev, int on_not_off) | |||
485 | { | 486 | { |
486 | struct net_local *lp = netdev_priv(dev); | 487 | struct net_local *lp = netdev_priv(dev); |
487 | unsigned int selfcontrol; | 488 | unsigned int selfcontrol; |
488 | int timenow = jiffies; | 489 | unsigned long timenow = jiffies; |
489 | /* control the DC to DC convertor in the SelfControl register. | 490 | /* control the DC to DC convertor in the SelfControl register. |
490 | * Note: This is hooked up to a general purpose pin, might not | 491 | * Note: This is hooked up to a general purpose pin, might not |
491 | * always be a DC to DC convertor. | 492 | * always be a DC to DC convertor. |
@@ -499,7 +500,7 @@ control_dc_dc(struct net_device *dev, int on_not_off) | |||
499 | writereg(dev, PP_SelfCTL, selfcontrol); | 500 | writereg(dev, PP_SelfCTL, selfcontrol); |
500 | 501 | ||
501 | /* Wait for the DC/DC converter to power up - 500ms */ | 502 | /* Wait for the DC/DC converter to power up - 500ms */ |
502 | while (jiffies - timenow < HZ) | 503 | while (time_before(jiffies, timenow + HZ)) |
503 | ; | 504 | ; |
504 | } | 505 | } |
505 | 506 | ||
@@ -514,7 +515,7 @@ send_test_pkt(struct net_device *dev) | |||
514 | 0, 0, /* DSAP=0 & SSAP=0 fields */ | 515 | 0, 0, /* DSAP=0 & SSAP=0 fields */ |
515 | 0xf3, 0 /* Control (Test Req + P bit set) */ | 516 | 0xf3, 0 /* Control (Test Req + P bit set) */ |
516 | }; | 517 | }; |
517 | long timenow = jiffies; | 518 | unsigned long timenow = jiffies; |
518 | 519 | ||
519 | writereg(dev, PP_LineCTL, readreg(dev, PP_LineCTL) | SERIAL_TX_ON); | 520 | writereg(dev, PP_LineCTL, readreg(dev, PP_LineCTL) | SERIAL_TX_ON); |
520 | 521 | ||
@@ -525,10 +526,10 @@ send_test_pkt(struct net_device *dev) | |||
525 | iowrite16(ETH_ZLEN, lp->virt_addr + TX_LEN_PORT); | 526 | iowrite16(ETH_ZLEN, lp->virt_addr + TX_LEN_PORT); |
526 | 527 | ||
527 | /* Test to see if the chip has allocated memory for the packet */ | 528 | /* Test to see if the chip has allocated memory for the packet */ |
528 | while (jiffies - timenow < 5) | 529 | while (time_before(jiffies, timenow + 5)) |
529 | if (readreg(dev, PP_BusST) & READY_FOR_TX_NOW) | 530 | if (readreg(dev, PP_BusST) & READY_FOR_TX_NOW) |
530 | break; | 531 | break; |
531 | if (jiffies - timenow >= 5) | 532 | if (time_after_eq(jiffies, timenow + 5)) |
532 | return 0; /* this shouldn't happen */ | 533 | return 0; /* this shouldn't happen */ |
533 | 534 | ||
534 | /* Write the contents of the packet */ | 535 | /* Write the contents of the packet */ |
@@ -536,7 +537,7 @@ send_test_pkt(struct net_device *dev) | |||
536 | 537 | ||
537 | cs89_dbg(1, debug, "Sending test packet "); | 538 | cs89_dbg(1, debug, "Sending test packet "); |
538 | /* wait a couple of jiffies for packet to be received */ | 539 | /* wait a couple of jiffies for packet to be received */ |
539 | for (timenow = jiffies; jiffies - timenow < 3;) | 540 | for (timenow = jiffies; time_before(jiffies, timenow + 3);) |
540 | ; | 541 | ; |
541 | if ((readreg(dev, PP_TxEvent) & TX_SEND_OK_BITS) == TX_OK) { | 542 | if ((readreg(dev, PP_TxEvent) & TX_SEND_OK_BITS) == TX_OK) { |
542 | cs89_dbg(1, cont, "succeeded\n"); | 543 | cs89_dbg(1, cont, "succeeded\n"); |
@@ -556,7 +557,7 @@ static int | |||
556 | detect_tp(struct net_device *dev) | 557 | detect_tp(struct net_device *dev) |
557 | { | 558 | { |
558 | struct net_local *lp = netdev_priv(dev); | 559 | struct net_local *lp = netdev_priv(dev); |
559 | int timenow = jiffies; | 560 | unsigned long timenow = jiffies; |
560 | int fdx; | 561 | int fdx; |
561 | 562 | ||
562 | cs89_dbg(1, debug, "%s: Attempting TP\n", dev->name); | 563 | cs89_dbg(1, debug, "%s: Attempting TP\n", dev->name); |
@@ -574,7 +575,7 @@ detect_tp(struct net_device *dev) | |||
574 | /* Delay for the hardware to work out if the TP cable is present | 575 | /* Delay for the hardware to work out if the TP cable is present |
575 | * - 150ms | 576 | * - 150ms |
576 | */ | 577 | */ |
577 | for (timenow = jiffies; jiffies - timenow < 15;) | 578 | for (timenow = jiffies; time_before(jiffies, timenow + 15);) |
578 | ; | 579 | ; |
579 | if ((readreg(dev, PP_LineST) & LINK_OK) == 0) | 580 | if ((readreg(dev, PP_LineST) & LINK_OK) == 0) |
580 | return DETECTED_NONE; | 581 | return DETECTED_NONE; |
@@ -618,7 +619,7 @@ detect_tp(struct net_device *dev) | |||
618 | if ((lp->auto_neg_cnf & AUTO_NEG_BITS) == AUTO_NEG_ENABLE) { | 619 | if ((lp->auto_neg_cnf & AUTO_NEG_BITS) == AUTO_NEG_ENABLE) { |
619 | pr_info("%s: negotiating duplex...\n", dev->name); | 620 | pr_info("%s: negotiating duplex...\n", dev->name); |
620 | while (readreg(dev, PP_AutoNegST) & AUTO_NEG_BUSY) { | 621 | while (readreg(dev, PP_AutoNegST) & AUTO_NEG_BUSY) { |
621 | if (jiffies - timenow > 4000) { | 622 | if (time_after(jiffies, timenow + 4000)) { |
622 | pr_err("**** Full / half duplex auto-negotiation timed out ****\n"); | 623 | pr_err("**** Full / half duplex auto-negotiation timed out ****\n"); |
623 | break; | 624 | break; |
624 | } | 625 | } |
@@ -1271,7 +1272,7 @@ static void __init reset_chip(struct net_device *dev) | |||
1271 | { | 1272 | { |
1272 | #if !defined(CONFIG_MACH_MX31ADS) | 1273 | #if !defined(CONFIG_MACH_MX31ADS) |
1273 | struct net_local *lp = netdev_priv(dev); | 1274 | struct net_local *lp = netdev_priv(dev); |
1274 | int reset_start_time; | 1275 | unsigned long reset_start_time; |
1275 | 1276 | ||
1276 | writereg(dev, PP_SelfCTL, readreg(dev, PP_SelfCTL) | POWER_ON_RESET); | 1277 | writereg(dev, PP_SelfCTL, readreg(dev, PP_SelfCTL) | POWER_ON_RESET); |
1277 | 1278 | ||
@@ -1294,7 +1295,7 @@ static void __init reset_chip(struct net_device *dev) | |||
1294 | /* Wait until the chip is reset */ | 1295 | /* Wait until the chip is reset */ |
1295 | reset_start_time = jiffies; | 1296 | reset_start_time = jiffies; |
1296 | while ((readreg(dev, PP_SelfST) & INIT_DONE) == 0 && | 1297 | while ((readreg(dev, PP_SelfST) & INIT_DONE) == 0 && |
1297 | jiffies - reset_start_time < 2) | 1298 | time_before(jiffies, reset_start_time + 2)) |
1298 | ; | 1299 | ; |
1299 | #endif /* !CONFIG_MACH_MX31ADS */ | 1300 | #endif /* !CONFIG_MACH_MX31ADS */ |
1300 | } | 1301 | } |
diff --git a/drivers/net/ethernet/cisco/enic/enic_main.c b/drivers/net/ethernet/cisco/enic/enic_main.c index 868d0f605d60..b29e027c476e 100644 --- a/drivers/net/ethernet/cisco/enic/enic_main.c +++ b/drivers/net/ethernet/cisco/enic/enic_main.c | |||
@@ -1060,10 +1060,14 @@ static void enic_rq_indicate_buf(struct vnic_rq *rq, | |||
1060 | PKT_HASH_TYPE_L4 : PKT_HASH_TYPE_L3); | 1060 | PKT_HASH_TYPE_L4 : PKT_HASH_TYPE_L3); |
1061 | } | 1061 | } |
1062 | 1062 | ||
1063 | if ((netdev->features & NETIF_F_RXCSUM) && !csum_not_calc) { | 1063 | /* Hardware does not provide whole packet checksum. It only |
1064 | skb->csum = htons(checksum); | 1064 | * provides pseudo checksum. Since hw validates the packet |
1065 | skb->ip_summed = CHECKSUM_COMPLETE; | 1065 | * checksum but not provide us the checksum value. use |
1066 | } | 1066 | * CHECSUM_UNNECESSARY. |
1067 | */ | ||
1068 | if ((netdev->features & NETIF_F_RXCSUM) && tcp_udp_csum_ok && | ||
1069 | ipv4_csum_ok) | ||
1070 | skb->ip_summed = CHECKSUM_UNNECESSARY; | ||
1067 | 1071 | ||
1068 | if (vlan_stripped) | 1072 | if (vlan_stripped) |
1069 | __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tci); | 1073 | __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tci); |
@@ -1612,7 +1616,7 @@ static int enic_open(struct net_device *netdev) | |||
1612 | if (vnic_rq_desc_used(&enic->rq[i]) == 0) { | 1616 | if (vnic_rq_desc_used(&enic->rq[i]) == 0) { |
1613 | netdev_err(netdev, "Unable to alloc receive buffers\n"); | 1617 | netdev_err(netdev, "Unable to alloc receive buffers\n"); |
1614 | err = -ENOMEM; | 1618 | err = -ENOMEM; |
1615 | goto err_out_notify_unset; | 1619 | goto err_out_free_rq; |
1616 | } | 1620 | } |
1617 | } | 1621 | } |
1618 | 1622 | ||
@@ -1645,7 +1649,9 @@ static int enic_open(struct net_device *netdev) | |||
1645 | 1649 | ||
1646 | return 0; | 1650 | return 0; |
1647 | 1651 | ||
1648 | err_out_notify_unset: | 1652 | err_out_free_rq: |
1653 | for (i = 0; i < enic->rq_count; i++) | ||
1654 | vnic_rq_clean(&enic->rq[i], enic_free_rq_buf); | ||
1649 | enic_dev_notify_unset(enic); | 1655 | enic_dev_notify_unset(enic); |
1650 | err_out_free_intr: | 1656 | err_out_free_intr: |
1651 | enic_free_intr(enic); | 1657 | enic_free_intr(enic); |
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c index 2aacd4731051..41a0a5498da7 100644 --- a/drivers/net/ethernet/emulex/benet/be_main.c +++ b/drivers/net/ethernet/emulex/benet/be_main.c | |||
@@ -3138,6 +3138,7 @@ static void be_disable_vxlan_offloads(struct be_adapter *adapter) | |||
3138 | 3138 | ||
3139 | netdev->hw_enc_features = 0; | 3139 | netdev->hw_enc_features = 0; |
3140 | netdev->hw_features &= ~(NETIF_F_GSO_UDP_TUNNEL); | 3140 | netdev->hw_features &= ~(NETIF_F_GSO_UDP_TUNNEL); |
3141 | netdev->features &= ~(NETIF_F_GSO_UDP_TUNNEL); | ||
3141 | } | 3142 | } |
3142 | #endif | 3143 | #endif |
3143 | 3144 | ||
@@ -4429,6 +4430,7 @@ static void be_add_vxlan_port(struct net_device *netdev, sa_family_t sa_family, | |||
4429 | NETIF_F_TSO | NETIF_F_TSO6 | | 4430 | NETIF_F_TSO | NETIF_F_TSO6 | |
4430 | NETIF_F_GSO_UDP_TUNNEL; | 4431 | NETIF_F_GSO_UDP_TUNNEL; |
4431 | netdev->hw_features |= NETIF_F_GSO_UDP_TUNNEL; | 4432 | netdev->hw_features |= NETIF_F_GSO_UDP_TUNNEL; |
4433 | netdev->features |= NETIF_F_GSO_UDP_TUNNEL; | ||
4432 | 4434 | ||
4433 | dev_info(dev, "Enabled VxLAN offloads for UDP port %d\n", | 4435 | dev_info(dev, "Enabled VxLAN offloads for UDP port %d\n", |
4434 | be16_to_cpu(port)); | 4436 | be16_to_cpu(port)); |
@@ -4457,9 +4459,11 @@ done: | |||
4457 | adapter->vxlan_port_count--; | 4459 | adapter->vxlan_port_count--; |
4458 | } | 4460 | } |
4459 | 4461 | ||
4460 | static bool be_gso_check(struct sk_buff *skb, struct net_device *dev) | 4462 | static netdev_features_t be_features_check(struct sk_buff *skb, |
4463 | struct net_device *dev, | ||
4464 | netdev_features_t features) | ||
4461 | { | 4465 | { |
4462 | return vxlan_gso_check(skb); | 4466 | return vxlan_features_check(skb, features); |
4463 | } | 4467 | } |
4464 | #endif | 4468 | #endif |
4465 | 4469 | ||
@@ -4490,7 +4494,7 @@ static const struct net_device_ops be_netdev_ops = { | |||
4490 | #ifdef CONFIG_BE2NET_VXLAN | 4494 | #ifdef CONFIG_BE2NET_VXLAN |
4491 | .ndo_add_vxlan_port = be_add_vxlan_port, | 4495 | .ndo_add_vxlan_port = be_add_vxlan_port, |
4492 | .ndo_del_vxlan_port = be_del_vxlan_port, | 4496 | .ndo_del_vxlan_port = be_del_vxlan_port, |
4493 | .ndo_gso_check = be_gso_check, | 4497 | .ndo_features_check = be_features_check, |
4494 | #endif | 4498 | #endif |
4495 | }; | 4499 | }; |
4496 | 4500 | ||
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c index ebf76c496e7a..5ebdf8dc8a31 100644 --- a/drivers/net/ethernet/freescale/fec_main.c +++ b/drivers/net/ethernet/freescale/fec_main.c | |||
@@ -1558,20 +1558,21 @@ fec_enet_interrupt(int irq, void *dev_id) | |||
1558 | { | 1558 | { |
1559 | struct net_device *ndev = dev_id; | 1559 | struct net_device *ndev = dev_id; |
1560 | struct fec_enet_private *fep = netdev_priv(ndev); | 1560 | struct fec_enet_private *fep = netdev_priv(ndev); |
1561 | const unsigned napi_mask = FEC_ENET_RXF | FEC_ENET_TXF; | ||
1562 | uint int_events; | 1561 | uint int_events; |
1563 | irqreturn_t ret = IRQ_NONE; | 1562 | irqreturn_t ret = IRQ_NONE; |
1564 | 1563 | ||
1565 | int_events = readl(fep->hwp + FEC_IEVENT); | 1564 | int_events = readl(fep->hwp + FEC_IEVENT); |
1566 | writel(int_events & ~napi_mask, fep->hwp + FEC_IEVENT); | 1565 | writel(int_events, fep->hwp + FEC_IEVENT); |
1567 | fec_enet_collect_events(fep, int_events); | 1566 | fec_enet_collect_events(fep, int_events); |
1568 | 1567 | ||
1569 | if (int_events & napi_mask) { | 1568 | if (fep->work_tx || fep->work_rx) { |
1570 | ret = IRQ_HANDLED; | 1569 | ret = IRQ_HANDLED; |
1571 | 1570 | ||
1572 | /* Disable the NAPI interrupts */ | 1571 | if (napi_schedule_prep(&fep->napi)) { |
1573 | writel(FEC_ENET_MII, fep->hwp + FEC_IMASK); | 1572 | /* Disable the NAPI interrupts */ |
1574 | napi_schedule(&fep->napi); | 1573 | writel(FEC_ENET_MII, fep->hwp + FEC_IMASK); |
1574 | __napi_schedule(&fep->napi); | ||
1575 | } | ||
1575 | } | 1576 | } |
1576 | 1577 | ||
1577 | if (int_events & FEC_ENET_MII) { | 1578 | if (int_events & FEC_ENET_MII) { |
@@ -1591,12 +1592,6 @@ static int fec_enet_rx_napi(struct napi_struct *napi, int budget) | |||
1591 | struct fec_enet_private *fep = netdev_priv(ndev); | 1592 | struct fec_enet_private *fep = netdev_priv(ndev); |
1592 | int pkts; | 1593 | int pkts; |
1593 | 1594 | ||
1594 | /* | ||
1595 | * Clear any pending transmit or receive interrupts before | ||
1596 | * processing the rings to avoid racing with the hardware. | ||
1597 | */ | ||
1598 | writel(FEC_ENET_RXF | FEC_ENET_TXF, fep->hwp + FEC_IEVENT); | ||
1599 | |||
1600 | pkts = fec_enet_rx(ndev, budget); | 1595 | pkts = fec_enet_rx(ndev, budget); |
1601 | 1596 | ||
1602 | fec_enet_tx(ndev); | 1597 | fec_enet_tx(ndev); |
diff --git a/drivers/net/ethernet/intel/e100.c b/drivers/net/ethernet/intel/e100.c index 781065eb5431..e9c3a87e5b11 100644 --- a/drivers/net/ethernet/intel/e100.c +++ b/drivers/net/ethernet/intel/e100.c | |||
@@ -1543,7 +1543,7 @@ static int e100_phy_init(struct nic *nic) | |||
1543 | mdio_write(netdev, nic->mii.phy_id, MII_BMCR, bmcr); | 1543 | mdio_write(netdev, nic->mii.phy_id, MII_BMCR, bmcr); |
1544 | } else if ((nic->mac >= mac_82550_D102) || ((nic->flags & ich) && | 1544 | } else if ((nic->mac >= mac_82550_D102) || ((nic->flags & ich) && |
1545 | (mdio_read(netdev, nic->mii.phy_id, MII_TPISTATUS) & 0x8000) && | 1545 | (mdio_read(netdev, nic->mii.phy_id, MII_TPISTATUS) & 0x8000) && |
1546 | !(nic->eeprom[eeprom_cnfg_mdix] & eeprom_mdix_enabled))) { | 1546 | (nic->eeprom[eeprom_cnfg_mdix] & eeprom_mdix_enabled))) { |
1547 | /* enable/disable MDI/MDI-X auto-switching. */ | 1547 | /* enable/disable MDI/MDI-X auto-switching. */ |
1548 | mdio_write(netdev, nic->mii.phy_id, MII_NCONFIG, | 1548 | mdio_write(netdev, nic->mii.phy_id, MII_NCONFIG, |
1549 | nic->mii.force_media ? 0 : NCONFIG_AUTO_SWITCH); | 1549 | nic->mii.force_media ? 0 : NCONFIG_AUTO_SWITCH); |
diff --git a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c index 433a55886ad2..cb0de455683e 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c +++ b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c | |||
@@ -829,7 +829,7 @@ static void i40e_dbg_dump_desc(int cnt, int vsi_seid, int ring_id, int desc_n, | |||
829 | if (desc_n >= ring->count || desc_n < 0) { | 829 | if (desc_n >= ring->count || desc_n < 0) { |
830 | dev_info(&pf->pdev->dev, | 830 | dev_info(&pf->pdev->dev, |
831 | "descriptor %d not found\n", desc_n); | 831 | "descriptor %d not found\n", desc_n); |
832 | return; | 832 | goto out; |
833 | } | 833 | } |
834 | if (!is_rx_ring) { | 834 | if (!is_rx_ring) { |
835 | txd = I40E_TX_DESC(ring, desc_n); | 835 | txd = I40E_TX_DESC(ring, desc_n); |
@@ -855,6 +855,8 @@ static void i40e_dbg_dump_desc(int cnt, int vsi_seid, int ring_id, int desc_n, | |||
855 | } else { | 855 | } else { |
856 | dev_info(&pf->pdev->dev, "dump desc rx/tx <vsi_seid> <ring_id> [<desc_n>]\n"); | 856 | dev_info(&pf->pdev->dev, "dump desc rx/tx <vsi_seid> <ring_id> [<desc_n>]\n"); |
857 | } | 857 | } |
858 | |||
859 | out: | ||
858 | kfree(ring); | 860 | kfree(ring); |
859 | } | 861 | } |
860 | 862 | ||
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index 0a7ea4c5f9d3..a5f2660d552d 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c | |||
@@ -7549,6 +7549,11 @@ static int i40e_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[], | |||
7549 | if (!(pf->flags & I40E_FLAG_SRIOV_ENABLED)) | 7549 | if (!(pf->flags & I40E_FLAG_SRIOV_ENABLED)) |
7550 | return -EOPNOTSUPP; | 7550 | return -EOPNOTSUPP; |
7551 | 7551 | ||
7552 | if (vid) { | ||
7553 | pr_info("%s: vlans aren't supported yet for dev_uc|mc_add()\n", dev->name); | ||
7554 | return -EINVAL; | ||
7555 | } | ||
7556 | |||
7552 | /* Hardware does not support aging addresses so if a | 7557 | /* Hardware does not support aging addresses so if a |
7553 | * ndm_state is given only allow permanent addresses | 7558 | * ndm_state is given only allow permanent addresses |
7554 | */ | 7559 | */ |
diff --git a/drivers/net/ethernet/intel/igb/e1000_82575.c b/drivers/net/ethernet/intel/igb/e1000_82575.c index 051ea94bdcd3..0f69ef81751a 100644 --- a/drivers/net/ethernet/intel/igb/e1000_82575.c +++ b/drivers/net/ethernet/intel/igb/e1000_82575.c | |||
@@ -1125,7 +1125,7 @@ static s32 igb_acquire_swfw_sync_82575(struct e1000_hw *hw, u16 mask) | |||
1125 | u32 swmask = mask; | 1125 | u32 swmask = mask; |
1126 | u32 fwmask = mask << 16; | 1126 | u32 fwmask = mask << 16; |
1127 | s32 ret_val = 0; | 1127 | s32 ret_val = 0; |
1128 | s32 i = 0, timeout = 200; /* FIXME: find real value to use here */ | 1128 | s32 i = 0, timeout = 200; |
1129 | 1129 | ||
1130 | while (i < timeout) { | 1130 | while (i < timeout) { |
1131 | if (igb_get_hw_semaphore(hw)) { | 1131 | if (igb_get_hw_semaphore(hw)) { |
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c index 6ff214de1111..d0d6dc1b8e46 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c | |||
@@ -1569,8 +1569,15 @@ int mlx4_en_start_port(struct net_device *dev) | |||
1569 | mlx4_en_free_affinity_hint(priv, i); | 1569 | mlx4_en_free_affinity_hint(priv, i); |
1570 | goto cq_err; | 1570 | goto cq_err; |
1571 | } | 1571 | } |
1572 | for (j = 0; j < cq->size; j++) | 1572 | |
1573 | cq->buf[j].owner_sr_opcode = MLX4_CQE_OWNER_MASK; | 1573 | for (j = 0; j < cq->size; j++) { |
1574 | struct mlx4_cqe *cqe = NULL; | ||
1575 | |||
1576 | cqe = mlx4_en_get_cqe(cq->buf, j, priv->cqe_size) + | ||
1577 | priv->cqe_factor; | ||
1578 | cqe->owner_sr_opcode = MLX4_CQE_OWNER_MASK; | ||
1579 | } | ||
1580 | |||
1574 | err = mlx4_en_set_cq_moder(priv, cq); | 1581 | err = mlx4_en_set_cq_moder(priv, cq); |
1575 | if (err) { | 1582 | if (err) { |
1576 | en_err(priv, "Failed setting cq moderation parameters\n"); | 1583 | en_err(priv, "Failed setting cq moderation parameters\n"); |
@@ -2358,9 +2365,11 @@ static void mlx4_en_del_vxlan_port(struct net_device *dev, | |||
2358 | queue_work(priv->mdev->workqueue, &priv->vxlan_del_task); | 2365 | queue_work(priv->mdev->workqueue, &priv->vxlan_del_task); |
2359 | } | 2366 | } |
2360 | 2367 | ||
2361 | static bool mlx4_en_gso_check(struct sk_buff *skb, struct net_device *dev) | 2368 | static netdev_features_t mlx4_en_features_check(struct sk_buff *skb, |
2369 | struct net_device *dev, | ||
2370 | netdev_features_t features) | ||
2362 | { | 2371 | { |
2363 | return vxlan_gso_check(skb); | 2372 | return vxlan_features_check(skb, features); |
2364 | } | 2373 | } |
2365 | #endif | 2374 | #endif |
2366 | 2375 | ||
@@ -2393,7 +2402,7 @@ static const struct net_device_ops mlx4_netdev_ops = { | |||
2393 | #ifdef CONFIG_MLX4_EN_VXLAN | 2402 | #ifdef CONFIG_MLX4_EN_VXLAN |
2394 | .ndo_add_vxlan_port = mlx4_en_add_vxlan_port, | 2403 | .ndo_add_vxlan_port = mlx4_en_add_vxlan_port, |
2395 | .ndo_del_vxlan_port = mlx4_en_del_vxlan_port, | 2404 | .ndo_del_vxlan_port = mlx4_en_del_vxlan_port, |
2396 | .ndo_gso_check = mlx4_en_gso_check, | 2405 | .ndo_features_check = mlx4_en_features_check, |
2397 | #endif | 2406 | #endif |
2398 | }; | 2407 | }; |
2399 | 2408 | ||
@@ -2427,7 +2436,7 @@ static const struct net_device_ops mlx4_netdev_ops_master = { | |||
2427 | #ifdef CONFIG_MLX4_EN_VXLAN | 2436 | #ifdef CONFIG_MLX4_EN_VXLAN |
2428 | .ndo_add_vxlan_port = mlx4_en_add_vxlan_port, | 2437 | .ndo_add_vxlan_port = mlx4_en_add_vxlan_port, |
2429 | .ndo_del_vxlan_port = mlx4_en_del_vxlan_port, | 2438 | .ndo_del_vxlan_port = mlx4_en_del_vxlan_port, |
2430 | .ndo_gso_check = mlx4_en_gso_check, | 2439 | .ndo_features_check = mlx4_en_features_check, |
2431 | #endif | 2440 | #endif |
2432 | }; | 2441 | }; |
2433 | 2442 | ||
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c index a308d41e4de0..e3357bf523df 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c | |||
@@ -962,7 +962,17 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev) | |||
962 | tx_desc->ctrl.owner_opcode = op_own; | 962 | tx_desc->ctrl.owner_opcode = op_own; |
963 | if (send_doorbell) { | 963 | if (send_doorbell) { |
964 | wmb(); | 964 | wmb(); |
965 | iowrite32(ring->doorbell_qpn, | 965 | /* Since there is no iowrite*_native() that writes the |
966 | * value as is, without byteswapping - using the one | ||
967 | * the doesn't do byteswapping in the relevant arch | ||
968 | * endianness. | ||
969 | */ | ||
970 | #if defined(__LITTLE_ENDIAN) | ||
971 | iowrite32( | ||
972 | #else | ||
973 | iowrite32be( | ||
974 | #endif | ||
975 | ring->doorbell_qpn, | ||
966 | ring->bf.uar->map + MLX4_SEND_DOORBELL); | 976 | ring->bf.uar->map + MLX4_SEND_DOORBELL); |
967 | } else { | 977 | } else { |
968 | ring->xmit_more++; | 978 | ring->xmit_more++; |
diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.c b/drivers/net/ethernet/mellanox/mlx4/fw.c index ef3b95bac2ad..982861d1df44 100644 --- a/drivers/net/ethernet/mellanox/mlx4/fw.c +++ b/drivers/net/ethernet/mellanox/mlx4/fw.c | |||
@@ -787,11 +787,8 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap) | |||
787 | if ((1 << (field & 0x3f)) > (PAGE_SIZE / dev_cap->bf_reg_size)) | 787 | if ((1 << (field & 0x3f)) > (PAGE_SIZE / dev_cap->bf_reg_size)) |
788 | field = 3; | 788 | field = 3; |
789 | dev_cap->bf_regs_per_page = 1 << (field & 0x3f); | 789 | dev_cap->bf_regs_per_page = 1 << (field & 0x3f); |
790 | mlx4_dbg(dev, "BlueFlame available (reg size %d, regs/page %d)\n", | ||
791 | dev_cap->bf_reg_size, dev_cap->bf_regs_per_page); | ||
792 | } else { | 790 | } else { |
793 | dev_cap->bf_reg_size = 0; | 791 | dev_cap->bf_reg_size = 0; |
794 | mlx4_dbg(dev, "BlueFlame not available\n"); | ||
795 | } | 792 | } |
796 | 793 | ||
797 | MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_SG_SQ_OFFSET); | 794 | MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_SG_SQ_OFFSET); |
@@ -902,9 +899,6 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap) | |||
902 | goto out; | 899 | goto out; |
903 | } | 900 | } |
904 | 901 | ||
905 | mlx4_dbg(dev, "Base MM extensions: flags %08x, rsvd L_Key %08x\n", | ||
906 | dev_cap->bmme_flags, dev_cap->reserved_lkey); | ||
907 | |||
908 | /* | 902 | /* |
909 | * Each UAR has 4 EQ doorbells; so if a UAR is reserved, then | 903 | * Each UAR has 4 EQ doorbells; so if a UAR is reserved, then |
910 | * we can't use any EQs whose doorbell falls on that page, | 904 | * we can't use any EQs whose doorbell falls on that page, |
@@ -916,6 +910,21 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap) | |||
916 | else | 910 | else |
917 | dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_SYS_EQS; | 911 | dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_SYS_EQS; |
918 | 912 | ||
913 | out: | ||
914 | mlx4_free_cmd_mailbox(dev, mailbox); | ||
915 | return err; | ||
916 | } | ||
917 | |||
918 | void mlx4_dev_cap_dump(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap) | ||
919 | { | ||
920 | if (dev_cap->bf_reg_size > 0) | ||
921 | mlx4_dbg(dev, "BlueFlame available (reg size %d, regs/page %d)\n", | ||
922 | dev_cap->bf_reg_size, dev_cap->bf_regs_per_page); | ||
923 | else | ||
924 | mlx4_dbg(dev, "BlueFlame not available\n"); | ||
925 | |||
926 | mlx4_dbg(dev, "Base MM extensions: flags %08x, rsvd L_Key %08x\n", | ||
927 | dev_cap->bmme_flags, dev_cap->reserved_lkey); | ||
919 | mlx4_dbg(dev, "Max ICM size %lld MB\n", | 928 | mlx4_dbg(dev, "Max ICM size %lld MB\n", |
920 | (unsigned long long) dev_cap->max_icm_sz >> 20); | 929 | (unsigned long long) dev_cap->max_icm_sz >> 20); |
921 | mlx4_dbg(dev, "Max QPs: %d, reserved QPs: %d, entry size: %d\n", | 930 | mlx4_dbg(dev, "Max QPs: %d, reserved QPs: %d, entry size: %d\n", |
@@ -949,13 +958,8 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap) | |||
949 | dev_cap->dmfs_high_rate_qpn_base); | 958 | dev_cap->dmfs_high_rate_qpn_base); |
950 | mlx4_dbg(dev, "DMFS high rate steer QPn range: %d\n", | 959 | mlx4_dbg(dev, "DMFS high rate steer QPn range: %d\n", |
951 | dev_cap->dmfs_high_rate_qpn_range); | 960 | dev_cap->dmfs_high_rate_qpn_range); |
952 | |||
953 | dump_dev_cap_flags(dev, dev_cap->flags); | 961 | dump_dev_cap_flags(dev, dev_cap->flags); |
954 | dump_dev_cap_flags2(dev, dev_cap->flags2); | 962 | dump_dev_cap_flags2(dev, dev_cap->flags2); |
955 | |||
956 | out: | ||
957 | mlx4_free_cmd_mailbox(dev, mailbox); | ||
958 | return err; | ||
959 | } | 963 | } |
960 | 964 | ||
961 | int mlx4_QUERY_PORT(struct mlx4_dev *dev, int port, struct mlx4_port_cap *port_cap) | 965 | int mlx4_QUERY_PORT(struct mlx4_dev *dev, int port, struct mlx4_port_cap *port_cap) |
@@ -1848,8 +1852,8 @@ int mlx4_QUERY_HCA(struct mlx4_dev *dev, | |||
1848 | /* CX3 is capable of extending CQEs\EQEs to strides larger than 64B */ | 1852 | /* CX3 is capable of extending CQEs\EQEs to strides larger than 64B */ |
1849 | MLX4_GET(byte_field, outbox, INIT_HCA_EQE_CQE_STRIDE_OFFSET); | 1853 | MLX4_GET(byte_field, outbox, INIT_HCA_EQE_CQE_STRIDE_OFFSET); |
1850 | if (byte_field) { | 1854 | if (byte_field) { |
1851 | param->dev_cap_enabled |= MLX4_DEV_CAP_64B_EQE_ENABLED; | 1855 | param->dev_cap_enabled |= MLX4_DEV_CAP_EQE_STRIDE_ENABLED; |
1852 | param->dev_cap_enabled |= MLX4_DEV_CAP_64B_CQE_ENABLED; | 1856 | param->dev_cap_enabled |= MLX4_DEV_CAP_CQE_STRIDE_ENABLED; |
1853 | param->cqe_size = 1 << ((byte_field & | 1857 | param->cqe_size = 1 << ((byte_field & |
1854 | MLX4_CQE_SIZE_MASK_STRIDE) + 5); | 1858 | MLX4_CQE_SIZE_MASK_STRIDE) + 5); |
1855 | param->eqe_size = 1 << (((byte_field & | 1859 | param->eqe_size = 1 << (((byte_field & |
diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.h b/drivers/net/ethernet/mellanox/mlx4/fw.h index 794e2826609a..62562b60fa87 100644 --- a/drivers/net/ethernet/mellanox/mlx4/fw.h +++ b/drivers/net/ethernet/mellanox/mlx4/fw.h | |||
@@ -224,6 +224,7 @@ struct mlx4_set_ib_param { | |||
224 | u32 cap_mask; | 224 | u32 cap_mask; |
225 | }; | 225 | }; |
226 | 226 | ||
227 | void mlx4_dev_cap_dump(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap); | ||
227 | int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap); | 228 | int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap); |
228 | int mlx4_QUERY_PORT(struct mlx4_dev *dev, int port, struct mlx4_port_cap *port_cap); | 229 | int mlx4_QUERY_PORT(struct mlx4_dev *dev, int port, struct mlx4_port_cap *port_cap); |
229 | int mlx4_QUERY_FUNC_CAP(struct mlx4_dev *dev, u8 gen_or_port, | 230 | int mlx4_QUERY_FUNC_CAP(struct mlx4_dev *dev, u8 gen_or_port, |
diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c index e25436b24ce7..03e9eb0dc761 100644 --- a/drivers/net/ethernet/mellanox/mlx4/main.c +++ b/drivers/net/ethernet/mellanox/mlx4/main.c | |||
@@ -171,9 +171,9 @@ int mlx4_check_port_params(struct mlx4_dev *dev, | |||
171 | { | 171 | { |
172 | int i; | 172 | int i; |
173 | 173 | ||
174 | for (i = 0; i < dev->caps.num_ports - 1; i++) { | 174 | if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_DPDP)) { |
175 | if (port_type[i] != port_type[i + 1]) { | 175 | for (i = 0; i < dev->caps.num_ports - 1; i++) { |
176 | if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_DPDP)) { | 176 | if (port_type[i] != port_type[i + 1]) { |
177 | mlx4_err(dev, "Only same port types supported on this HCA, aborting\n"); | 177 | mlx4_err(dev, "Only same port types supported on this HCA, aborting\n"); |
178 | return -EINVAL; | 178 | return -EINVAL; |
179 | } | 179 | } |
@@ -305,6 +305,7 @@ static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap) | |||
305 | mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting\n"); | 305 | mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting\n"); |
306 | return err; | 306 | return err; |
307 | } | 307 | } |
308 | mlx4_dev_cap_dump(dev, dev_cap); | ||
308 | 309 | ||
309 | if (dev_cap->min_page_sz > PAGE_SIZE) { | 310 | if (dev_cap->min_page_sz > PAGE_SIZE) { |
310 | mlx4_err(dev, "HCA minimum page size of %d bigger than kernel PAGE_SIZE of %ld, aborting\n", | 311 | mlx4_err(dev, "HCA minimum page size of %d bigger than kernel PAGE_SIZE of %ld, aborting\n", |
@@ -1828,7 +1829,7 @@ static int mlx4_init_hca(struct mlx4_dev *dev) | |||
1828 | err = mlx4_dev_cap(dev, &dev_cap); | 1829 | err = mlx4_dev_cap(dev, &dev_cap); |
1829 | if (err) { | 1830 | if (err) { |
1830 | mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting\n"); | 1831 | mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting\n"); |
1831 | goto err_stop_fw; | 1832 | return err; |
1832 | } | 1833 | } |
1833 | 1834 | ||
1834 | choose_steering_mode(dev, &dev_cap); | 1835 | choose_steering_mode(dev, &dev_cap); |
@@ -1859,7 +1860,7 @@ static int mlx4_init_hca(struct mlx4_dev *dev) | |||
1859 | &init_hca); | 1860 | &init_hca); |
1860 | if ((long long) icm_size < 0) { | 1861 | if ((long long) icm_size < 0) { |
1861 | err = icm_size; | 1862 | err = icm_size; |
1862 | goto err_stop_fw; | 1863 | return err; |
1863 | } | 1864 | } |
1864 | 1865 | ||
1865 | dev->caps.max_fmr_maps = (1 << (32 - ilog2(dev->caps.num_mpts))) - 1; | 1866 | dev->caps.max_fmr_maps = (1 << (32 - ilog2(dev->caps.num_mpts))) - 1; |
@@ -1873,7 +1874,7 @@ static int mlx4_init_hca(struct mlx4_dev *dev) | |||
1873 | 1874 | ||
1874 | err = mlx4_init_icm(dev, &dev_cap, &init_hca, icm_size); | 1875 | err = mlx4_init_icm(dev, &dev_cap, &init_hca, icm_size); |
1875 | if (err) | 1876 | if (err) |
1876 | goto err_stop_fw; | 1877 | return err; |
1877 | 1878 | ||
1878 | err = mlx4_INIT_HCA(dev, &init_hca); | 1879 | err = mlx4_INIT_HCA(dev, &init_hca); |
1879 | if (err) { | 1880 | if (err) { |
@@ -1885,7 +1886,7 @@ static int mlx4_init_hca(struct mlx4_dev *dev) | |||
1885 | err = mlx4_query_func(dev, &dev_cap); | 1886 | err = mlx4_query_func(dev, &dev_cap); |
1886 | if (err < 0) { | 1887 | if (err < 0) { |
1887 | mlx4_err(dev, "QUERY_FUNC command failed, aborting.\n"); | 1888 | mlx4_err(dev, "QUERY_FUNC command failed, aborting.\n"); |
1888 | goto err_stop_fw; | 1889 | goto err_close; |
1889 | } else if (err & MLX4_QUERY_FUNC_NUM_SYS_EQS) { | 1890 | } else if (err & MLX4_QUERY_FUNC_NUM_SYS_EQS) { |
1890 | dev->caps.num_eqs = dev_cap.max_eqs; | 1891 | dev->caps.num_eqs = dev_cap.max_eqs; |
1891 | dev->caps.reserved_eqs = dev_cap.reserved_eqs; | 1892 | dev->caps.reserved_eqs = dev_cap.reserved_eqs; |
@@ -2005,11 +2006,6 @@ err_free_icm: | |||
2005 | if (!mlx4_is_slave(dev)) | 2006 | if (!mlx4_is_slave(dev)) |
2006 | mlx4_free_icms(dev); | 2007 | mlx4_free_icms(dev); |
2007 | 2008 | ||
2008 | err_stop_fw: | ||
2009 | if (!mlx4_is_slave(dev)) { | ||
2010 | mlx4_UNMAP_FA(dev); | ||
2011 | mlx4_free_icm(dev, priv->fw.fw_icm, 0); | ||
2012 | } | ||
2013 | return err; | 2009 | return err; |
2014 | } | 2010 | } |
2015 | 2011 | ||
@@ -2488,41 +2484,42 @@ static u64 mlx4_enable_sriov(struct mlx4_dev *dev, struct pci_dev *pdev, | |||
2488 | u8 total_vfs, int existing_vfs) | 2484 | u8 total_vfs, int existing_vfs) |
2489 | { | 2485 | { |
2490 | u64 dev_flags = dev->flags; | 2486 | u64 dev_flags = dev->flags; |
2487 | int err = 0; | ||
2488 | |||
2489 | atomic_inc(&pf_loading); | ||
2490 | if (dev->flags & MLX4_FLAG_SRIOV) { | ||
2491 | if (existing_vfs != total_vfs) { | ||
2492 | mlx4_err(dev, "SR-IOV was already enabled, but with num_vfs (%d) different than requested (%d)\n", | ||
2493 | existing_vfs, total_vfs); | ||
2494 | total_vfs = existing_vfs; | ||
2495 | } | ||
2496 | } | ||
2491 | 2497 | ||
2492 | dev->dev_vfs = kzalloc( | 2498 | dev->dev_vfs = kzalloc(total_vfs * sizeof(*dev->dev_vfs), GFP_KERNEL); |
2493 | total_vfs * sizeof(*dev->dev_vfs), | ||
2494 | GFP_KERNEL); | ||
2495 | if (NULL == dev->dev_vfs) { | 2499 | if (NULL == dev->dev_vfs) { |
2496 | mlx4_err(dev, "Failed to allocate memory for VFs\n"); | 2500 | mlx4_err(dev, "Failed to allocate memory for VFs\n"); |
2497 | goto disable_sriov; | 2501 | goto disable_sriov; |
2498 | } else if (!(dev->flags & MLX4_FLAG_SRIOV)) { | 2502 | } |
2499 | int err = 0; | 2503 | |
2500 | 2504 | if (!(dev->flags & MLX4_FLAG_SRIOV)) { | |
2501 | atomic_inc(&pf_loading); | 2505 | mlx4_warn(dev, "Enabling SR-IOV with %d VFs\n", total_vfs); |
2502 | if (existing_vfs) { | 2506 | err = pci_enable_sriov(pdev, total_vfs); |
2503 | if (existing_vfs != total_vfs) | 2507 | } |
2504 | mlx4_err(dev, "SR-IOV was already enabled, but with num_vfs (%d) different than requested (%d)\n", | 2508 | if (err) { |
2505 | existing_vfs, total_vfs); | 2509 | mlx4_err(dev, "Failed to enable SR-IOV, continuing without SR-IOV (err = %d)\n", |
2506 | } else { | 2510 | err); |
2507 | mlx4_warn(dev, "Enabling SR-IOV with %d VFs\n", total_vfs); | 2511 | goto disable_sriov; |
2508 | err = pci_enable_sriov(pdev, total_vfs); | 2512 | } else { |
2509 | } | 2513 | mlx4_warn(dev, "Running in master mode\n"); |
2510 | if (err) { | 2514 | dev_flags |= MLX4_FLAG_SRIOV | |
2511 | mlx4_err(dev, "Failed to enable SR-IOV, continuing without SR-IOV (err = %d)\n", | 2515 | MLX4_FLAG_MASTER; |
2512 | err); | 2516 | dev_flags &= ~MLX4_FLAG_SLAVE; |
2513 | atomic_dec(&pf_loading); | 2517 | dev->num_vfs = total_vfs; |
2514 | goto disable_sriov; | ||
2515 | } else { | ||
2516 | mlx4_warn(dev, "Running in master mode\n"); | ||
2517 | dev_flags |= MLX4_FLAG_SRIOV | | ||
2518 | MLX4_FLAG_MASTER; | ||
2519 | dev_flags &= ~MLX4_FLAG_SLAVE; | ||
2520 | dev->num_vfs = total_vfs; | ||
2521 | } | ||
2522 | } | 2518 | } |
2523 | return dev_flags; | 2519 | return dev_flags; |
2524 | 2520 | ||
2525 | disable_sriov: | 2521 | disable_sriov: |
2522 | atomic_dec(&pf_loading); | ||
2526 | dev->num_vfs = 0; | 2523 | dev->num_vfs = 0; |
2527 | kfree(dev->dev_vfs); | 2524 | kfree(dev->dev_vfs); |
2528 | return dev_flags & ~MLX4_FLAG_MASTER; | 2525 | return dev_flags & ~MLX4_FLAG_MASTER; |
@@ -2606,8 +2603,10 @@ static int mlx4_load_one(struct pci_dev *pdev, int pci_dev_data, | |||
2606 | } | 2603 | } |
2607 | 2604 | ||
2608 | if (total_vfs) { | 2605 | if (total_vfs) { |
2609 | existing_vfs = pci_num_vf(pdev); | ||
2610 | dev->flags = MLX4_FLAG_MASTER; | 2606 | dev->flags = MLX4_FLAG_MASTER; |
2607 | existing_vfs = pci_num_vf(pdev); | ||
2608 | if (existing_vfs) | ||
2609 | dev->flags |= MLX4_FLAG_SRIOV; | ||
2611 | dev->num_vfs = total_vfs; | 2610 | dev->num_vfs = total_vfs; |
2612 | } | 2611 | } |
2613 | } | 2612 | } |
@@ -2643,6 +2642,7 @@ slave_start: | |||
2643 | } | 2642 | } |
2644 | 2643 | ||
2645 | if (mlx4_is_master(dev)) { | 2644 | if (mlx4_is_master(dev)) { |
2645 | /* when we hit the goto slave_start below, dev_cap already initialized */ | ||
2646 | if (!dev_cap) { | 2646 | if (!dev_cap) { |
2647 | dev_cap = kzalloc(sizeof(*dev_cap), GFP_KERNEL); | 2647 | dev_cap = kzalloc(sizeof(*dev_cap), GFP_KERNEL); |
2648 | 2648 | ||
@@ -2849,6 +2849,7 @@ slave_start: | |||
2849 | if (mlx4_is_master(dev) && dev->num_vfs) | 2849 | if (mlx4_is_master(dev) && dev->num_vfs) |
2850 | atomic_dec(&pf_loading); | 2850 | atomic_dec(&pf_loading); |
2851 | 2851 | ||
2852 | kfree(dev_cap); | ||
2852 | return 0; | 2853 | return 0; |
2853 | 2854 | ||
2854 | err_port: | 2855 | err_port: |
diff --git a/drivers/net/ethernet/mellanox/mlx4/mr.c b/drivers/net/ethernet/mellanox/mlx4/mr.c index d6f549685c0f..7094a9c70fd5 100644 --- a/drivers/net/ethernet/mellanox/mlx4/mr.c +++ b/drivers/net/ethernet/mellanox/mlx4/mr.c | |||
@@ -584,6 +584,7 @@ EXPORT_SYMBOL_GPL(mlx4_mr_free); | |||
584 | void mlx4_mr_rereg_mem_cleanup(struct mlx4_dev *dev, struct mlx4_mr *mr) | 584 | void mlx4_mr_rereg_mem_cleanup(struct mlx4_dev *dev, struct mlx4_mr *mr) |
585 | { | 585 | { |
586 | mlx4_mtt_cleanup(dev, &mr->mtt); | 586 | mlx4_mtt_cleanup(dev, &mr->mtt); |
587 | mr->mtt.order = -1; | ||
587 | } | 588 | } |
588 | EXPORT_SYMBOL_GPL(mlx4_mr_rereg_mem_cleanup); | 589 | EXPORT_SYMBOL_GPL(mlx4_mr_rereg_mem_cleanup); |
589 | 590 | ||
@@ -593,14 +594,14 @@ int mlx4_mr_rereg_mem_write(struct mlx4_dev *dev, struct mlx4_mr *mr, | |||
593 | { | 594 | { |
594 | int err; | 595 | int err; |
595 | 596 | ||
596 | mpt_entry->start = cpu_to_be64(iova); | ||
597 | mpt_entry->length = cpu_to_be64(size); | ||
598 | mpt_entry->entity_size = cpu_to_be32(page_shift); | ||
599 | |||
600 | err = mlx4_mtt_init(dev, npages, page_shift, &mr->mtt); | 597 | err = mlx4_mtt_init(dev, npages, page_shift, &mr->mtt); |
601 | if (err) | 598 | if (err) |
602 | return err; | 599 | return err; |
603 | 600 | ||
601 | mpt_entry->start = cpu_to_be64(mr->iova); | ||
602 | mpt_entry->length = cpu_to_be64(mr->size); | ||
603 | mpt_entry->entity_size = cpu_to_be32(mr->mtt.page_shift); | ||
604 | |||
604 | mpt_entry->pd_flags &= cpu_to_be32(MLX4_MPT_PD_MASK | | 605 | mpt_entry->pd_flags &= cpu_to_be32(MLX4_MPT_PD_MASK | |
605 | MLX4_MPT_PD_FLAG_EN_INV); | 606 | MLX4_MPT_PD_FLAG_EN_INV); |
606 | mpt_entry->flags &= cpu_to_be32(MLX4_MPT_FLAG_FREE | | 607 | mpt_entry->flags &= cpu_to_be32(MLX4_MPT_FLAG_FREE | |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eq.c b/drivers/net/ethernet/mellanox/mlx5/core/eq.c index ab684463780b..da82991239a8 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eq.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eq.c | |||
@@ -157,6 +157,8 @@ static const char *eqe_type_str(u8 type) | |||
157 | return "MLX5_EVENT_TYPE_CMD"; | 157 | return "MLX5_EVENT_TYPE_CMD"; |
158 | case MLX5_EVENT_TYPE_PAGE_REQUEST: | 158 | case MLX5_EVENT_TYPE_PAGE_REQUEST: |
159 | return "MLX5_EVENT_TYPE_PAGE_REQUEST"; | 159 | return "MLX5_EVENT_TYPE_PAGE_REQUEST"; |
160 | case MLX5_EVENT_TYPE_PAGE_FAULT: | ||
161 | return "MLX5_EVENT_TYPE_PAGE_FAULT"; | ||
160 | default: | 162 | default: |
161 | return "Unrecognized event"; | 163 | return "Unrecognized event"; |
162 | } | 164 | } |
@@ -279,6 +281,11 @@ static int mlx5_eq_int(struct mlx5_core_dev *dev, struct mlx5_eq *eq) | |||
279 | } | 281 | } |
280 | break; | 282 | break; |
281 | 283 | ||
284 | #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING | ||
285 | case MLX5_EVENT_TYPE_PAGE_FAULT: | ||
286 | mlx5_eq_pagefault(dev, eqe); | ||
287 | break; | ||
288 | #endif | ||
282 | 289 | ||
283 | default: | 290 | default: |
284 | mlx5_core_warn(dev, "Unhandled event 0x%x on EQ 0x%x\n", | 291 | mlx5_core_warn(dev, "Unhandled event 0x%x on EQ 0x%x\n", |
@@ -446,8 +453,12 @@ void mlx5_eq_cleanup(struct mlx5_core_dev *dev) | |||
446 | int mlx5_start_eqs(struct mlx5_core_dev *dev) | 453 | int mlx5_start_eqs(struct mlx5_core_dev *dev) |
447 | { | 454 | { |
448 | struct mlx5_eq_table *table = &dev->priv.eq_table; | 455 | struct mlx5_eq_table *table = &dev->priv.eq_table; |
456 | u32 async_event_mask = MLX5_ASYNC_EVENT_MASK; | ||
449 | int err; | 457 | int err; |
450 | 458 | ||
459 | if (dev->caps.gen.flags & MLX5_DEV_CAP_FLAG_ON_DMND_PG) | ||
460 | async_event_mask |= (1ull << MLX5_EVENT_TYPE_PAGE_FAULT); | ||
461 | |||
451 | err = mlx5_create_map_eq(dev, &table->cmd_eq, MLX5_EQ_VEC_CMD, | 462 | err = mlx5_create_map_eq(dev, &table->cmd_eq, MLX5_EQ_VEC_CMD, |
452 | MLX5_NUM_CMD_EQE, 1ull << MLX5_EVENT_TYPE_CMD, | 463 | MLX5_NUM_CMD_EQE, 1ull << MLX5_EVENT_TYPE_CMD, |
453 | "mlx5_cmd_eq", &dev->priv.uuari.uars[0]); | 464 | "mlx5_cmd_eq", &dev->priv.uuari.uars[0]); |
@@ -459,7 +470,7 @@ int mlx5_start_eqs(struct mlx5_core_dev *dev) | |||
459 | mlx5_cmd_use_events(dev); | 470 | mlx5_cmd_use_events(dev); |
460 | 471 | ||
461 | err = mlx5_create_map_eq(dev, &table->async_eq, MLX5_EQ_VEC_ASYNC, | 472 | err = mlx5_create_map_eq(dev, &table->async_eq, MLX5_EQ_VEC_ASYNC, |
462 | MLX5_NUM_ASYNC_EQE, MLX5_ASYNC_EVENT_MASK, | 473 | MLX5_NUM_ASYNC_EQE, async_event_mask, |
463 | "mlx5_async_eq", &dev->priv.uuari.uars[0]); | 474 | "mlx5_async_eq", &dev->priv.uuari.uars[0]); |
464 | if (err) { | 475 | if (err) { |
465 | mlx5_core_warn(dev, "failed to create async EQ %d\n", err); | 476 | mlx5_core_warn(dev, "failed to create async EQ %d\n", err); |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fw.c b/drivers/net/ethernet/mellanox/mlx5/core/fw.c index 087c4c797deb..06f9036acd83 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fw.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fw.c | |||
@@ -69,6 +69,46 @@ int mlx5_cmd_query_hca_cap(struct mlx5_core_dev *dev, struct mlx5_caps *caps) | |||
69 | return mlx5_core_get_caps(dev, caps, HCA_CAP_OPMOD_GET_CUR); | 69 | return mlx5_core_get_caps(dev, caps, HCA_CAP_OPMOD_GET_CUR); |
70 | } | 70 | } |
71 | 71 | ||
72 | int mlx5_query_odp_caps(struct mlx5_core_dev *dev, struct mlx5_odp_caps *caps) | ||
73 | { | ||
74 | u8 in[MLX5_ST_SZ_BYTES(query_hca_cap_in)]; | ||
75 | int out_sz = MLX5_ST_SZ_BYTES(query_hca_cap_out); | ||
76 | void *out; | ||
77 | int err; | ||
78 | |||
79 | if (!(dev->caps.gen.flags & MLX5_DEV_CAP_FLAG_ON_DMND_PG)) | ||
80 | return -ENOTSUPP; | ||
81 | |||
82 | memset(in, 0, sizeof(in)); | ||
83 | out = kzalloc(out_sz, GFP_KERNEL); | ||
84 | if (!out) | ||
85 | return -ENOMEM; | ||
86 | MLX5_SET(query_hca_cap_in, in, opcode, MLX5_CMD_OP_QUERY_HCA_CAP); | ||
87 | MLX5_SET(query_hca_cap_in, in, op_mod, HCA_CAP_OPMOD_GET_ODP_CUR); | ||
88 | err = mlx5_cmd_exec(dev, in, sizeof(in), out, out_sz); | ||
89 | if (err) | ||
90 | goto out; | ||
91 | |||
92 | err = mlx5_cmd_status_to_err_v2(out); | ||
93 | if (err) { | ||
94 | mlx5_core_warn(dev, "query cur hca ODP caps failed, %d\n", err); | ||
95 | goto out; | ||
96 | } | ||
97 | |||
98 | memcpy(caps, MLX5_ADDR_OF(query_hca_cap_out, out, capability_struct), | ||
99 | sizeof(*caps)); | ||
100 | |||
101 | mlx5_core_dbg(dev, "on-demand paging capabilities:\nrc: %08x\nuc: %08x\nud: %08x\n", | ||
102 | be32_to_cpu(caps->per_transport_caps.rc_odp_caps), | ||
103 | be32_to_cpu(caps->per_transport_caps.uc_odp_caps), | ||
104 | be32_to_cpu(caps->per_transport_caps.ud_odp_caps)); | ||
105 | |||
106 | out: | ||
107 | kfree(out); | ||
108 | return err; | ||
109 | } | ||
110 | EXPORT_SYMBOL(mlx5_query_odp_caps); | ||
111 | |||
72 | int mlx5_cmd_init_hca(struct mlx5_core_dev *dev) | 112 | int mlx5_cmd_init_hca(struct mlx5_core_dev *dev) |
73 | { | 113 | { |
74 | struct mlx5_cmd_init_hca_mbox_in in; | 114 | struct mlx5_cmd_init_hca_mbox_in in; |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/qp.c b/drivers/net/ethernet/mellanox/mlx5/core/qp.c index 5261a2b0da43..575d853dbe05 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/qp.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/qp.c | |||
@@ -88,6 +88,95 @@ void mlx5_rsc_event(struct mlx5_core_dev *dev, u32 rsn, int event_type) | |||
88 | mlx5_core_put_rsc(common); | 88 | mlx5_core_put_rsc(common); |
89 | } | 89 | } |
90 | 90 | ||
91 | #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING | ||
92 | void mlx5_eq_pagefault(struct mlx5_core_dev *dev, struct mlx5_eqe *eqe) | ||
93 | { | ||
94 | struct mlx5_eqe_page_fault *pf_eqe = &eqe->data.page_fault; | ||
95 | int qpn = be32_to_cpu(pf_eqe->flags_qpn) & MLX5_QPN_MASK; | ||
96 | struct mlx5_core_rsc_common *common = mlx5_get_rsc(dev, qpn); | ||
97 | struct mlx5_core_qp *qp = | ||
98 | container_of(common, struct mlx5_core_qp, common); | ||
99 | struct mlx5_pagefault pfault; | ||
100 | |||
101 | if (!qp) { | ||
102 | mlx5_core_warn(dev, "ODP event for non-existent QP %06x\n", | ||
103 | qpn); | ||
104 | return; | ||
105 | } | ||
106 | |||
107 | pfault.event_subtype = eqe->sub_type; | ||
108 | pfault.flags = (be32_to_cpu(pf_eqe->flags_qpn) >> MLX5_QPN_BITS) & | ||
109 | (MLX5_PFAULT_REQUESTOR | MLX5_PFAULT_WRITE | MLX5_PFAULT_RDMA); | ||
110 | pfault.bytes_committed = be32_to_cpu( | ||
111 | pf_eqe->bytes_committed); | ||
112 | |||
113 | mlx5_core_dbg(dev, | ||
114 | "PAGE_FAULT: subtype: 0x%02x, flags: 0x%02x,\n", | ||
115 | eqe->sub_type, pfault.flags); | ||
116 | |||
117 | switch (eqe->sub_type) { | ||
118 | case MLX5_PFAULT_SUBTYPE_RDMA: | ||
119 | /* RDMA based event */ | ||
120 | pfault.rdma.r_key = | ||
121 | be32_to_cpu(pf_eqe->rdma.r_key); | ||
122 | pfault.rdma.packet_size = | ||
123 | be16_to_cpu(pf_eqe->rdma.packet_length); | ||
124 | pfault.rdma.rdma_op_len = | ||
125 | be32_to_cpu(pf_eqe->rdma.rdma_op_len); | ||
126 | pfault.rdma.rdma_va = | ||
127 | be64_to_cpu(pf_eqe->rdma.rdma_va); | ||
128 | mlx5_core_dbg(dev, | ||
129 | "PAGE_FAULT: qpn: 0x%06x, r_key: 0x%08x,\n", | ||
130 | qpn, pfault.rdma.r_key); | ||
131 | mlx5_core_dbg(dev, | ||
132 | "PAGE_FAULT: rdma_op_len: 0x%08x,\n", | ||
133 | pfault.rdma.rdma_op_len); | ||
134 | mlx5_core_dbg(dev, | ||
135 | "PAGE_FAULT: rdma_va: 0x%016llx,\n", | ||
136 | pfault.rdma.rdma_va); | ||
137 | mlx5_core_dbg(dev, | ||
138 | "PAGE_FAULT: bytes_committed: 0x%06x\n", | ||
139 | pfault.bytes_committed); | ||
140 | break; | ||
141 | |||
142 | case MLX5_PFAULT_SUBTYPE_WQE: | ||
143 | /* WQE based event */ | ||
144 | pfault.wqe.wqe_index = | ||
145 | be16_to_cpu(pf_eqe->wqe.wqe_index); | ||
146 | pfault.wqe.packet_size = | ||
147 | be16_to_cpu(pf_eqe->wqe.packet_length); | ||
148 | mlx5_core_dbg(dev, | ||
149 | "PAGE_FAULT: qpn: 0x%06x, wqe_index: 0x%04x,\n", | ||
150 | qpn, pfault.wqe.wqe_index); | ||
151 | mlx5_core_dbg(dev, | ||
152 | "PAGE_FAULT: bytes_committed: 0x%06x\n", | ||
153 | pfault.bytes_committed); | ||
154 | break; | ||
155 | |||
156 | default: | ||
157 | mlx5_core_warn(dev, | ||
158 | "Unsupported page fault event sub-type: 0x%02hhx, QP %06x\n", | ||
159 | eqe->sub_type, qpn); | ||
160 | /* Unsupported page faults should still be resolved by the | ||
161 | * page fault handler | ||
162 | */ | ||
163 | } | ||
164 | |||
165 | if (qp->pfault_handler) { | ||
166 | qp->pfault_handler(qp, &pfault); | ||
167 | } else { | ||
168 | mlx5_core_err(dev, | ||
169 | "ODP event for QP %08x, without a fault handler in QP\n", | ||
170 | qpn); | ||
171 | /* Page fault will remain unresolved. QP will hang until it is | ||
172 | * destroyed | ||
173 | */ | ||
174 | } | ||
175 | |||
176 | mlx5_core_put_rsc(common); | ||
177 | } | ||
178 | #endif | ||
179 | |||
91 | int mlx5_core_create_qp(struct mlx5_core_dev *dev, | 180 | int mlx5_core_create_qp(struct mlx5_core_dev *dev, |
92 | struct mlx5_core_qp *qp, | 181 | struct mlx5_core_qp *qp, |
93 | struct mlx5_create_qp_mbox_in *in, | 182 | struct mlx5_create_qp_mbox_in *in, |
@@ -322,3 +411,33 @@ int mlx5_core_xrcd_dealloc(struct mlx5_core_dev *dev, u32 xrcdn) | |||
322 | return err; | 411 | return err; |
323 | } | 412 | } |
324 | EXPORT_SYMBOL_GPL(mlx5_core_xrcd_dealloc); | 413 | EXPORT_SYMBOL_GPL(mlx5_core_xrcd_dealloc); |
414 | |||
415 | #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING | ||
416 | int mlx5_core_page_fault_resume(struct mlx5_core_dev *dev, u32 qpn, | ||
417 | u8 flags, int error) | ||
418 | { | ||
419 | struct mlx5_page_fault_resume_mbox_in in; | ||
420 | struct mlx5_page_fault_resume_mbox_out out; | ||
421 | int err; | ||
422 | |||
423 | memset(&in, 0, sizeof(in)); | ||
424 | memset(&out, 0, sizeof(out)); | ||
425 | in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_PAGE_FAULT_RESUME); | ||
426 | in.hdr.opmod = 0; | ||
427 | flags &= (MLX5_PAGE_FAULT_RESUME_REQUESTOR | | ||
428 | MLX5_PAGE_FAULT_RESUME_WRITE | | ||
429 | MLX5_PAGE_FAULT_RESUME_RDMA); | ||
430 | flags |= (error ? MLX5_PAGE_FAULT_RESUME_ERROR : 0); | ||
431 | in.flags_qpn = cpu_to_be32((qpn & MLX5_QPN_MASK) | | ||
432 | (flags << MLX5_QPN_BITS)); | ||
433 | err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out)); | ||
434 | if (err) | ||
435 | return err; | ||
436 | |||
437 | if (out.hdr.status) | ||
438 | err = mlx5_cmd_status_to_err(&out.hdr); | ||
439 | |||
440 | return err; | ||
441 | } | ||
442 | EXPORT_SYMBOL_GPL(mlx5_core_page_fault_resume); | ||
443 | #endif | ||
diff --git a/drivers/net/ethernet/micrel/ksz884x.c b/drivers/net/ethernet/micrel/ksz884x.c index f1ebed6c63b1..2fa6ae026e4f 100644 --- a/drivers/net/ethernet/micrel/ksz884x.c +++ b/drivers/net/ethernet/micrel/ksz884x.c | |||
@@ -2303,12 +2303,6 @@ static inline int port_chk_force_flow_ctrl(struct ksz_hw *hw, int p) | |||
2303 | 2303 | ||
2304 | /* Spanning Tree */ | 2304 | /* Spanning Tree */ |
2305 | 2305 | ||
2306 | static inline void port_cfg_dis_learn(struct ksz_hw *hw, int p, int set) | ||
2307 | { | ||
2308 | port_cfg(hw, p, | ||
2309 | KS8842_PORT_CTRL_2_OFFSET, PORT_LEARN_DISABLE, set); | ||
2310 | } | ||
2311 | |||
2312 | static inline void port_cfg_rx(struct ksz_hw *hw, int p, int set) | 2306 | static inline void port_cfg_rx(struct ksz_hw *hw, int p, int set) |
2313 | { | 2307 | { |
2314 | port_cfg(hw, p, | 2308 | port_cfg(hw, p, |
diff --git a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c index af099057f0e9..71af98bb72cb 100644 --- a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c +++ b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c | |||
@@ -4033,8 +4033,10 @@ static int myri10ge_probe(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
4033 | (void)pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); | 4033 | (void)pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); |
4034 | mgp->cmd = dma_alloc_coherent(&pdev->dev, sizeof(*mgp->cmd), | 4034 | mgp->cmd = dma_alloc_coherent(&pdev->dev, sizeof(*mgp->cmd), |
4035 | &mgp->cmd_bus, GFP_KERNEL); | 4035 | &mgp->cmd_bus, GFP_KERNEL); |
4036 | if (mgp->cmd == NULL) | 4036 | if (!mgp->cmd) { |
4037 | status = -ENOMEM; | ||
4037 | goto abort_with_enabled; | 4038 | goto abort_with_enabled; |
4039 | } | ||
4038 | 4040 | ||
4039 | mgp->board_span = pci_resource_len(pdev, 0); | 4041 | mgp->board_span = pci_resource_len(pdev, 0); |
4040 | mgp->iomem_base = pci_resource_start(pdev, 0); | 4042 | mgp->iomem_base = pci_resource_start(pdev, 0); |
diff --git a/drivers/net/ethernet/qlogic/qla3xxx.c b/drivers/net/ethernet/qlogic/qla3xxx.c index c2f09af5c25b..4847713211ca 100644 --- a/drivers/net/ethernet/qlogic/qla3xxx.c +++ b/drivers/net/ethernet/qlogic/qla3xxx.c | |||
@@ -146,10 +146,7 @@ static int ql_wait_for_drvr_lock(struct ql3_adapter *qdev) | |||
146 | { | 146 | { |
147 | int i = 0; | 147 | int i = 0; |
148 | 148 | ||
149 | while (i < 10) { | 149 | do { |
150 | if (i) | ||
151 | ssleep(1); | ||
152 | |||
153 | if (ql_sem_lock(qdev, | 150 | if (ql_sem_lock(qdev, |
154 | QL_DRVR_SEM_MASK, | 151 | QL_DRVR_SEM_MASK, |
155 | (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) | 152 | (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) |
@@ -158,7 +155,8 @@ static int ql_wait_for_drvr_lock(struct ql3_adapter *qdev) | |||
158 | "driver lock acquired\n"); | 155 | "driver lock acquired\n"); |
159 | return 1; | 156 | return 1; |
160 | } | 157 | } |
161 | } | 158 | ssleep(1); |
159 | } while (++i < 10); | ||
162 | 160 | ||
163 | netdev_err(qdev->ndev, "Timed out waiting for driver lock...\n"); | 161 | netdev_err(qdev->ndev, "Timed out waiting for driver lock...\n"); |
164 | return 0; | 162 | return 0; |
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c index 1aa25b13ace1..2528c3fb6b90 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c | |||
@@ -505,9 +505,11 @@ static void qlcnic_del_vxlan_port(struct net_device *netdev, | |||
505 | adapter->flags |= QLCNIC_DEL_VXLAN_PORT; | 505 | adapter->flags |= QLCNIC_DEL_VXLAN_PORT; |
506 | } | 506 | } |
507 | 507 | ||
508 | static bool qlcnic_gso_check(struct sk_buff *skb, struct net_device *dev) | 508 | static netdev_features_t qlcnic_features_check(struct sk_buff *skb, |
509 | struct net_device *dev, | ||
510 | netdev_features_t features) | ||
509 | { | 511 | { |
510 | return vxlan_gso_check(skb); | 512 | return vxlan_features_check(skb, features); |
511 | } | 513 | } |
512 | #endif | 514 | #endif |
513 | 515 | ||
@@ -532,7 +534,7 @@ static const struct net_device_ops qlcnic_netdev_ops = { | |||
532 | #ifdef CONFIG_QLCNIC_VXLAN | 534 | #ifdef CONFIG_QLCNIC_VXLAN |
533 | .ndo_add_vxlan_port = qlcnic_add_vxlan_port, | 535 | .ndo_add_vxlan_port = qlcnic_add_vxlan_port, |
534 | .ndo_del_vxlan_port = qlcnic_del_vxlan_port, | 536 | .ndo_del_vxlan_port = qlcnic_del_vxlan_port, |
535 | .ndo_gso_check = qlcnic_gso_check, | 537 | .ndo_features_check = qlcnic_features_check, |
536 | #endif | 538 | #endif |
537 | #ifdef CONFIG_NET_POLL_CONTROLLER | 539 | #ifdef CONFIG_NET_POLL_CONTROLLER |
538 | .ndo_poll_controller = qlcnic_poll_controller, | 540 | .ndo_poll_controller = qlcnic_poll_controller, |
@@ -2603,6 +2605,7 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
2603 | } else { | 2605 | } else { |
2604 | dev_err(&pdev->dev, | 2606 | dev_err(&pdev->dev, |
2605 | "%s: failed. Please Reboot\n", __func__); | 2607 | "%s: failed. Please Reboot\n", __func__); |
2608 | err = -ENODEV; | ||
2606 | goto err_out_free_hw; | 2609 | goto err_out_free_hw; |
2607 | } | 2610 | } |
2608 | 2611 | ||
diff --git a/drivers/net/ethernet/realtek/8139too.c b/drivers/net/ethernet/realtek/8139too.c index 6d0b9dfac313..78bb4ceb1cdd 100644 --- a/drivers/net/ethernet/realtek/8139too.c +++ b/drivers/net/ethernet/realtek/8139too.c | |||
@@ -787,10 +787,10 @@ static struct net_device *rtl8139_init_board(struct pci_dev *pdev) | |||
787 | if (rc) | 787 | if (rc) |
788 | goto err_out; | 788 | goto err_out; |
789 | 789 | ||
790 | disable_dev_on_err = 1; | ||
790 | rc = pci_request_regions (pdev, DRV_NAME); | 791 | rc = pci_request_regions (pdev, DRV_NAME); |
791 | if (rc) | 792 | if (rc) |
792 | goto err_out; | 793 | goto err_out; |
793 | disable_dev_on_err = 1; | ||
794 | 794 | ||
795 | pci_set_master (pdev); | 795 | pci_set_master (pdev); |
796 | 796 | ||
@@ -1110,6 +1110,7 @@ static int rtl8139_init_one(struct pci_dev *pdev, | |||
1110 | return 0; | 1110 | return 0; |
1111 | 1111 | ||
1112 | err_out: | 1112 | err_out: |
1113 | netif_napi_del(&tp->napi); | ||
1113 | __rtl8139_cleanup_dev (dev); | 1114 | __rtl8139_cleanup_dev (dev); |
1114 | pci_disable_device (pdev); | 1115 | pci_disable_device (pdev); |
1115 | return i; | 1116 | return i; |
@@ -1124,6 +1125,7 @@ static void rtl8139_remove_one(struct pci_dev *pdev) | |||
1124 | assert (dev != NULL); | 1125 | assert (dev != NULL); |
1125 | 1126 | ||
1126 | cancel_delayed_work_sync(&tp->thread); | 1127 | cancel_delayed_work_sync(&tp->thread); |
1128 | netif_napi_del(&tp->napi); | ||
1127 | 1129 | ||
1128 | unregister_netdev (dev); | 1130 | unregister_netdev (dev); |
1129 | 1131 | ||
diff --git a/drivers/net/ethernet/s6gmac.c b/drivers/net/ethernet/s6gmac.c deleted file mode 100644 index f537cbea20e5..000000000000 --- a/drivers/net/ethernet/s6gmac.c +++ /dev/null | |||
@@ -1,1058 +0,0 @@ | |||
1 | /* | ||
2 | * Ethernet driver for S6105 on chip network device | ||
3 | * (c)2008 emlix GmbH http://www.emlix.com | ||
4 | * Authors: Oskar Schirmer <oskar@scara.com> | ||
5 | * Daniel Gloeckner <dg@emlix.com> | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or | ||
8 | * modify it under the terms of the GNU General Public License | ||
9 | * as published by the Free Software Foundation; either version | ||
10 | * 2 of the License, or (at your option) any later version. | ||
11 | */ | ||
12 | #include <linux/kernel.h> | ||
13 | #include <linux/module.h> | ||
14 | #include <linux/interrupt.h> | ||
15 | #include <linux/types.h> | ||
16 | #include <linux/delay.h> | ||
17 | #include <linux/spinlock.h> | ||
18 | #include <linux/netdevice.h> | ||
19 | #include <linux/etherdevice.h> | ||
20 | #include <linux/if.h> | ||
21 | #include <linux/stddef.h> | ||
22 | #include <linux/mii.h> | ||
23 | #include <linux/phy.h> | ||
24 | #include <linux/platform_device.h> | ||
25 | #include <variant/hardware.h> | ||
26 | #include <variant/dmac.h> | ||
27 | |||
28 | #define DRV_NAME "s6gmac" | ||
29 | #define DRV_PRMT DRV_NAME ": " | ||
30 | |||
31 | |||
32 | /* register declarations */ | ||
33 | |||
34 | #define S6_GMAC_MACCONF1 0x000 | ||
35 | #define S6_GMAC_MACCONF1_TXENA 0 | ||
36 | #define S6_GMAC_MACCONF1_SYNCTX 1 | ||
37 | #define S6_GMAC_MACCONF1_RXENA 2 | ||
38 | #define S6_GMAC_MACCONF1_SYNCRX 3 | ||
39 | #define S6_GMAC_MACCONF1_TXFLOWCTRL 4 | ||
40 | #define S6_GMAC_MACCONF1_RXFLOWCTRL 5 | ||
41 | #define S6_GMAC_MACCONF1_LOOPBACK 8 | ||
42 | #define S6_GMAC_MACCONF1_RESTXFUNC 16 | ||
43 | #define S6_GMAC_MACCONF1_RESRXFUNC 17 | ||
44 | #define S6_GMAC_MACCONF1_RESTXMACCTRL 18 | ||
45 | #define S6_GMAC_MACCONF1_RESRXMACCTRL 19 | ||
46 | #define S6_GMAC_MACCONF1_SIMULRES 30 | ||
47 | #define S6_GMAC_MACCONF1_SOFTRES 31 | ||
48 | #define S6_GMAC_MACCONF2 0x004 | ||
49 | #define S6_GMAC_MACCONF2_FULL 0 | ||
50 | #define S6_GMAC_MACCONF2_CRCENA 1 | ||
51 | #define S6_GMAC_MACCONF2_PADCRCENA 2 | ||
52 | #define S6_GMAC_MACCONF2_LENGTHFCHK 4 | ||
53 | #define S6_GMAC_MACCONF2_HUGEFRAMENA 5 | ||
54 | #define S6_GMAC_MACCONF2_IFMODE 8 | ||
55 | #define S6_GMAC_MACCONF2_IFMODE_NIBBLE 1 | ||
56 | #define S6_GMAC_MACCONF2_IFMODE_BYTE 2 | ||
57 | #define S6_GMAC_MACCONF2_IFMODE_MASK 3 | ||
58 | #define S6_GMAC_MACCONF2_PREAMBLELEN 12 | ||
59 | #define S6_GMAC_MACCONF2_PREAMBLELEN_MASK 0x0F | ||
60 | #define S6_GMAC_MACIPGIFG 0x008 | ||
61 | #define S6_GMAC_MACIPGIFG_B2BINTERPGAP 0 | ||
62 | #define S6_GMAC_MACIPGIFG_B2BINTERPGAP_MASK 0x7F | ||
63 | #define S6_GMAC_MACIPGIFG_MINIFGENFORCE 8 | ||
64 | #define S6_GMAC_MACIPGIFG_B2BINTERPGAP2 16 | ||
65 | #define S6_GMAC_MACIPGIFG_B2BINTERPGAP1 24 | ||
66 | #define S6_GMAC_MACHALFDUPLEX 0x00C | ||
67 | #define S6_GMAC_MACHALFDUPLEX_COLLISWIN 0 | ||
68 | #define S6_GMAC_MACHALFDUPLEX_COLLISWIN_MASK 0x3F | ||
69 | #define S6_GMAC_MACHALFDUPLEX_RETXMAX 12 | ||
70 | #define S6_GMAC_MACHALFDUPLEX_RETXMAX_MASK 0x0F | ||
71 | #define S6_GMAC_MACHALFDUPLEX_EXCESSDEF 16 | ||
72 | #define S6_GMAC_MACHALFDUPLEX_NOBACKOFF 17 | ||
73 | #define S6_GMAC_MACHALFDUPLEX_BPNOBCKOF 18 | ||
74 | #define S6_GMAC_MACHALFDUPLEX_ALTBEBENA 19 | ||
75 | #define S6_GMAC_MACHALFDUPLEX_ALTBEBTRN 20 | ||
76 | #define S6_GMAC_MACHALFDUPLEX_ALTBEBTR_MASK 0x0F | ||
77 | #define S6_GMAC_MACMAXFRAMELEN 0x010 | ||
78 | #define S6_GMAC_MACMIICONF 0x020 | ||
79 | #define S6_GMAC_MACMIICONF_CSEL 0 | ||
80 | #define S6_GMAC_MACMIICONF_CSEL_DIV10 0 | ||
81 | #define S6_GMAC_MACMIICONF_CSEL_DIV12 1 | ||
82 | #define S6_GMAC_MACMIICONF_CSEL_DIV14 2 | ||
83 | #define S6_GMAC_MACMIICONF_CSEL_DIV18 3 | ||
84 | #define S6_GMAC_MACMIICONF_CSEL_DIV24 4 | ||
85 | #define S6_GMAC_MACMIICONF_CSEL_DIV34 5 | ||
86 | #define S6_GMAC_MACMIICONF_CSEL_DIV68 6 | ||
87 | #define S6_GMAC_MACMIICONF_CSEL_DIV168 7 | ||
88 | #define S6_GMAC_MACMIICONF_CSEL_MASK 7 | ||
89 | #define S6_GMAC_MACMIICONF_PREAMBLESUPR 4 | ||
90 | #define S6_GMAC_MACMIICONF_SCANAUTOINCR 5 | ||
91 | #define S6_GMAC_MACMIICMD 0x024 | ||
92 | #define S6_GMAC_MACMIICMD_READ 0 | ||
93 | #define S6_GMAC_MACMIICMD_SCAN 1 | ||
94 | #define S6_GMAC_MACMIIADDR 0x028 | ||
95 | #define S6_GMAC_MACMIIADDR_REG 0 | ||
96 | #define S6_GMAC_MACMIIADDR_REG_MASK 0x1F | ||
97 | #define S6_GMAC_MACMIIADDR_PHY 8 | ||
98 | #define S6_GMAC_MACMIIADDR_PHY_MASK 0x1F | ||
99 | #define S6_GMAC_MACMIICTRL 0x02C | ||
100 | #define S6_GMAC_MACMIISTAT 0x030 | ||
101 | #define S6_GMAC_MACMIIINDI 0x034 | ||
102 | #define S6_GMAC_MACMIIINDI_BUSY 0 | ||
103 | #define S6_GMAC_MACMIIINDI_SCAN 1 | ||
104 | #define S6_GMAC_MACMIIINDI_INVAL 2 | ||
105 | #define S6_GMAC_MACINTERFSTAT 0x03C | ||
106 | #define S6_GMAC_MACINTERFSTAT_LINKFAIL 3 | ||
107 | #define S6_GMAC_MACINTERFSTAT_EXCESSDEF 9 | ||
108 | #define S6_GMAC_MACSTATADDR1 0x040 | ||
109 | #define S6_GMAC_MACSTATADDR2 0x044 | ||
110 | |||
111 | #define S6_GMAC_FIFOCONF0 0x048 | ||
112 | #define S6_GMAC_FIFOCONF0_HSTRSTWT 0 | ||
113 | #define S6_GMAC_FIFOCONF0_HSTRSTSR 1 | ||
114 | #define S6_GMAC_FIFOCONF0_HSTRSTFR 2 | ||
115 | #define S6_GMAC_FIFOCONF0_HSTRSTST 3 | ||
116 | #define S6_GMAC_FIFOCONF0_HSTRSTFT 4 | ||
117 | #define S6_GMAC_FIFOCONF0_WTMENREQ 8 | ||
118 | #define S6_GMAC_FIFOCONF0_SRFENREQ 9 | ||
119 | #define S6_GMAC_FIFOCONF0_FRFENREQ 10 | ||
120 | #define S6_GMAC_FIFOCONF0_STFENREQ 11 | ||
121 | #define S6_GMAC_FIFOCONF0_FTFENREQ 12 | ||
122 | #define S6_GMAC_FIFOCONF0_WTMENRPLY 16 | ||
123 | #define S6_GMAC_FIFOCONF0_SRFENRPLY 17 | ||
124 | #define S6_GMAC_FIFOCONF0_FRFENRPLY 18 | ||
125 | #define S6_GMAC_FIFOCONF0_STFENRPLY 19 | ||
126 | #define S6_GMAC_FIFOCONF0_FTFENRPLY 20 | ||
127 | #define S6_GMAC_FIFOCONF1 0x04C | ||
128 | #define S6_GMAC_FIFOCONF2 0x050 | ||
129 | #define S6_GMAC_FIFOCONF2_CFGLWM 0 | ||
130 | #define S6_GMAC_FIFOCONF2_CFGHWM 16 | ||
131 | #define S6_GMAC_FIFOCONF3 0x054 | ||
132 | #define S6_GMAC_FIFOCONF3_CFGFTTH 0 | ||
133 | #define S6_GMAC_FIFOCONF3_CFGHWMFT 16 | ||
134 | #define S6_GMAC_FIFOCONF4 0x058 | ||
135 | #define S6_GMAC_FIFOCONF_RSV_PREVDROP 0 | ||
136 | #define S6_GMAC_FIFOCONF_RSV_RUNT 1 | ||
137 | #define S6_GMAC_FIFOCONF_RSV_FALSECAR 2 | ||
138 | #define S6_GMAC_FIFOCONF_RSV_CODEERR 3 | ||
139 | #define S6_GMAC_FIFOCONF_RSV_CRCERR 4 | ||
140 | #define S6_GMAC_FIFOCONF_RSV_LENGTHERR 5 | ||
141 | #define S6_GMAC_FIFOCONF_RSV_LENRANGE 6 | ||
142 | #define S6_GMAC_FIFOCONF_RSV_OK 7 | ||
143 | #define S6_GMAC_FIFOCONF_RSV_MULTICAST 8 | ||
144 | #define S6_GMAC_FIFOCONF_RSV_BROADCAST 9 | ||
145 | #define S6_GMAC_FIFOCONF_RSV_DRIBBLE 10 | ||
146 | #define S6_GMAC_FIFOCONF_RSV_CTRLFRAME 11 | ||
147 | #define S6_GMAC_FIFOCONF_RSV_PAUSECTRL 12 | ||
148 | #define S6_GMAC_FIFOCONF_RSV_UNOPCODE 13 | ||
149 | #define S6_GMAC_FIFOCONF_RSV_VLANTAG 14 | ||
150 | #define S6_GMAC_FIFOCONF_RSV_LONGEVENT 15 | ||
151 | #define S6_GMAC_FIFOCONF_RSV_TRUNCATED 16 | ||
152 | #define S6_GMAC_FIFOCONF_RSV_MASK 0x3FFFF | ||
153 | #define S6_GMAC_FIFOCONF5 0x05C | ||
154 | #define S6_GMAC_FIFOCONF5_DROPLT64 18 | ||
155 | #define S6_GMAC_FIFOCONF5_CFGBYTM 19 | ||
156 | #define S6_GMAC_FIFOCONF5_RXDROPSIZE 20 | ||
157 | #define S6_GMAC_FIFOCONF5_RXDROPSIZE_MASK 0xF | ||
158 | |||
159 | #define S6_GMAC_STAT_REGS 0x080 | ||
160 | #define S6_GMAC_STAT_SIZE_MIN 12 | ||
161 | #define S6_GMAC_STATTR64 0x080 | ||
162 | #define S6_GMAC_STATTR64_SIZE 18 | ||
163 | #define S6_GMAC_STATTR127 0x084 | ||
164 | #define S6_GMAC_STATTR127_SIZE 18 | ||
165 | #define S6_GMAC_STATTR255 0x088 | ||
166 | #define S6_GMAC_STATTR255_SIZE 18 | ||
167 | #define S6_GMAC_STATTR511 0x08C | ||
168 | #define S6_GMAC_STATTR511_SIZE 18 | ||
169 | #define S6_GMAC_STATTR1K 0x090 | ||
170 | #define S6_GMAC_STATTR1K_SIZE 18 | ||
171 | #define S6_GMAC_STATTRMAX 0x094 | ||
172 | #define S6_GMAC_STATTRMAX_SIZE 18 | ||
173 | #define S6_GMAC_STATTRMGV 0x098 | ||
174 | #define S6_GMAC_STATTRMGV_SIZE 18 | ||
175 | #define S6_GMAC_STATRBYT 0x09C | ||
176 | #define S6_GMAC_STATRBYT_SIZE 24 | ||
177 | #define S6_GMAC_STATRPKT 0x0A0 | ||
178 | #define S6_GMAC_STATRPKT_SIZE 18 | ||
179 | #define S6_GMAC_STATRFCS 0x0A4 | ||
180 | #define S6_GMAC_STATRFCS_SIZE 12 | ||
181 | #define S6_GMAC_STATRMCA 0x0A8 | ||
182 | #define S6_GMAC_STATRMCA_SIZE 18 | ||
183 | #define S6_GMAC_STATRBCA 0x0AC | ||
184 | #define S6_GMAC_STATRBCA_SIZE 22 | ||
185 | #define S6_GMAC_STATRXCF 0x0B0 | ||
186 | #define S6_GMAC_STATRXCF_SIZE 18 | ||
187 | #define S6_GMAC_STATRXPF 0x0B4 | ||
188 | #define S6_GMAC_STATRXPF_SIZE 12 | ||
189 | #define S6_GMAC_STATRXUO 0x0B8 | ||
190 | #define S6_GMAC_STATRXUO_SIZE 12 | ||
191 | #define S6_GMAC_STATRALN 0x0BC | ||
192 | #define S6_GMAC_STATRALN_SIZE 12 | ||
193 | #define S6_GMAC_STATRFLR 0x0C0 | ||
194 | #define S6_GMAC_STATRFLR_SIZE 16 | ||
195 | #define S6_GMAC_STATRCDE 0x0C4 | ||
196 | #define S6_GMAC_STATRCDE_SIZE 12 | ||
197 | #define S6_GMAC_STATRCSE 0x0C8 | ||
198 | #define S6_GMAC_STATRCSE_SIZE 12 | ||
199 | #define S6_GMAC_STATRUND 0x0CC | ||
200 | #define S6_GMAC_STATRUND_SIZE 12 | ||
201 | #define S6_GMAC_STATROVR 0x0D0 | ||
202 | #define S6_GMAC_STATROVR_SIZE 12 | ||
203 | #define S6_GMAC_STATRFRG 0x0D4 | ||
204 | #define S6_GMAC_STATRFRG_SIZE 12 | ||
205 | #define S6_GMAC_STATRJBR 0x0D8 | ||
206 | #define S6_GMAC_STATRJBR_SIZE 12 | ||
207 | #define S6_GMAC_STATRDRP 0x0DC | ||
208 | #define S6_GMAC_STATRDRP_SIZE 12 | ||
209 | #define S6_GMAC_STATTBYT 0x0E0 | ||
210 | #define S6_GMAC_STATTBYT_SIZE 24 | ||
211 | #define S6_GMAC_STATTPKT 0x0E4 | ||
212 | #define S6_GMAC_STATTPKT_SIZE 18 | ||
213 | #define S6_GMAC_STATTMCA 0x0E8 | ||
214 | #define S6_GMAC_STATTMCA_SIZE 18 | ||
215 | #define S6_GMAC_STATTBCA 0x0EC | ||
216 | #define S6_GMAC_STATTBCA_SIZE 18 | ||
217 | #define S6_GMAC_STATTXPF 0x0F0 | ||
218 | #define S6_GMAC_STATTXPF_SIZE 12 | ||
219 | #define S6_GMAC_STATTDFR 0x0F4 | ||
220 | #define S6_GMAC_STATTDFR_SIZE 12 | ||
221 | #define S6_GMAC_STATTEDF 0x0F8 | ||
222 | #define S6_GMAC_STATTEDF_SIZE 12 | ||
223 | #define S6_GMAC_STATTSCL 0x0FC | ||
224 | #define S6_GMAC_STATTSCL_SIZE 12 | ||
225 | #define S6_GMAC_STATTMCL 0x100 | ||
226 | #define S6_GMAC_STATTMCL_SIZE 12 | ||
227 | #define S6_GMAC_STATTLCL 0x104 | ||
228 | #define S6_GMAC_STATTLCL_SIZE 12 | ||
229 | #define S6_GMAC_STATTXCL 0x108 | ||
230 | #define S6_GMAC_STATTXCL_SIZE 12 | ||
231 | #define S6_GMAC_STATTNCL 0x10C | ||
232 | #define S6_GMAC_STATTNCL_SIZE 13 | ||
233 | #define S6_GMAC_STATTPFH 0x110 | ||
234 | #define S6_GMAC_STATTPFH_SIZE 12 | ||
235 | #define S6_GMAC_STATTDRP 0x114 | ||
236 | #define S6_GMAC_STATTDRP_SIZE 12 | ||
237 | #define S6_GMAC_STATTJBR 0x118 | ||
238 | #define S6_GMAC_STATTJBR_SIZE 12 | ||
239 | #define S6_GMAC_STATTFCS 0x11C | ||
240 | #define S6_GMAC_STATTFCS_SIZE 12 | ||
241 | #define S6_GMAC_STATTXCF 0x120 | ||
242 | #define S6_GMAC_STATTXCF_SIZE 12 | ||
243 | #define S6_GMAC_STATTOVR 0x124 | ||
244 | #define S6_GMAC_STATTOVR_SIZE 12 | ||
245 | #define S6_GMAC_STATTUND 0x128 | ||
246 | #define S6_GMAC_STATTUND_SIZE 12 | ||
247 | #define S6_GMAC_STATTFRG 0x12C | ||
248 | #define S6_GMAC_STATTFRG_SIZE 12 | ||
249 | #define S6_GMAC_STATCARRY(n) (0x130 + 4*(n)) | ||
250 | #define S6_GMAC_STATCARRYMSK(n) (0x138 + 4*(n)) | ||
251 | #define S6_GMAC_STATCARRY1_RDRP 0 | ||
252 | #define S6_GMAC_STATCARRY1_RJBR 1 | ||
253 | #define S6_GMAC_STATCARRY1_RFRG 2 | ||
254 | #define S6_GMAC_STATCARRY1_ROVR 3 | ||
255 | #define S6_GMAC_STATCARRY1_RUND 4 | ||
256 | #define S6_GMAC_STATCARRY1_RCSE 5 | ||
257 | #define S6_GMAC_STATCARRY1_RCDE 6 | ||
258 | #define S6_GMAC_STATCARRY1_RFLR 7 | ||
259 | #define S6_GMAC_STATCARRY1_RALN 8 | ||
260 | #define S6_GMAC_STATCARRY1_RXUO 9 | ||
261 | #define S6_GMAC_STATCARRY1_RXPF 10 | ||
262 | #define S6_GMAC_STATCARRY1_RXCF 11 | ||
263 | #define S6_GMAC_STATCARRY1_RBCA 12 | ||
264 | #define S6_GMAC_STATCARRY1_RMCA 13 | ||
265 | #define S6_GMAC_STATCARRY1_RFCS 14 | ||
266 | #define S6_GMAC_STATCARRY1_RPKT 15 | ||
267 | #define S6_GMAC_STATCARRY1_RBYT 16 | ||
268 | #define S6_GMAC_STATCARRY1_TRMGV 25 | ||
269 | #define S6_GMAC_STATCARRY1_TRMAX 26 | ||
270 | #define S6_GMAC_STATCARRY1_TR1K 27 | ||
271 | #define S6_GMAC_STATCARRY1_TR511 28 | ||
272 | #define S6_GMAC_STATCARRY1_TR255 29 | ||
273 | #define S6_GMAC_STATCARRY1_TR127 30 | ||
274 | #define S6_GMAC_STATCARRY1_TR64 31 | ||
275 | #define S6_GMAC_STATCARRY2_TDRP 0 | ||
276 | #define S6_GMAC_STATCARRY2_TPFH 1 | ||
277 | #define S6_GMAC_STATCARRY2_TNCL 2 | ||
278 | #define S6_GMAC_STATCARRY2_TXCL 3 | ||
279 | #define S6_GMAC_STATCARRY2_TLCL 4 | ||
280 | #define S6_GMAC_STATCARRY2_TMCL 5 | ||
281 | #define S6_GMAC_STATCARRY2_TSCL 6 | ||
282 | #define S6_GMAC_STATCARRY2_TEDF 7 | ||
283 | #define S6_GMAC_STATCARRY2_TDFR 8 | ||
284 | #define S6_GMAC_STATCARRY2_TXPF 9 | ||
285 | #define S6_GMAC_STATCARRY2_TBCA 10 | ||
286 | #define S6_GMAC_STATCARRY2_TMCA 11 | ||
287 | #define S6_GMAC_STATCARRY2_TPKT 12 | ||
288 | #define S6_GMAC_STATCARRY2_TBYT 13 | ||
289 | #define S6_GMAC_STATCARRY2_TFRG 14 | ||
290 | #define S6_GMAC_STATCARRY2_TUND 15 | ||
291 | #define S6_GMAC_STATCARRY2_TOVR 16 | ||
292 | #define S6_GMAC_STATCARRY2_TXCF 17 | ||
293 | #define S6_GMAC_STATCARRY2_TFCS 18 | ||
294 | #define S6_GMAC_STATCARRY2_TJBR 19 | ||
295 | |||
296 | #define S6_GMAC_HOST_PBLKCTRL 0x140 | ||
297 | #define S6_GMAC_HOST_PBLKCTRL_TXENA 0 | ||
298 | #define S6_GMAC_HOST_PBLKCTRL_RXENA 1 | ||
299 | #define S6_GMAC_HOST_PBLKCTRL_TXSRES 2 | ||
300 | #define S6_GMAC_HOST_PBLKCTRL_RXSRES 3 | ||
301 | #define S6_GMAC_HOST_PBLKCTRL_TXBSIZ 8 | ||
302 | #define S6_GMAC_HOST_PBLKCTRL_RXBSIZ 12 | ||
303 | #define S6_GMAC_HOST_PBLKCTRL_SIZ_16 4 | ||
304 | #define S6_GMAC_HOST_PBLKCTRL_SIZ_32 5 | ||
305 | #define S6_GMAC_HOST_PBLKCTRL_SIZ_64 6 | ||
306 | #define S6_GMAC_HOST_PBLKCTRL_SIZ_128 7 | ||
307 | #define S6_GMAC_HOST_PBLKCTRL_SIZ_MASK 0xF | ||
308 | #define S6_GMAC_HOST_PBLKCTRL_STATENA 16 | ||
309 | #define S6_GMAC_HOST_PBLKCTRL_STATAUTOZ 17 | ||
310 | #define S6_GMAC_HOST_PBLKCTRL_STATCLEAR 18 | ||
311 | #define S6_GMAC_HOST_PBLKCTRL_RGMII 19 | ||
312 | #define S6_GMAC_HOST_INTMASK 0x144 | ||
313 | #define S6_GMAC_HOST_INTSTAT 0x148 | ||
314 | #define S6_GMAC_HOST_INT_TXBURSTOVER 3 | ||
315 | #define S6_GMAC_HOST_INT_TXPREWOVER 4 | ||
316 | #define S6_GMAC_HOST_INT_RXBURSTUNDER 5 | ||
317 | #define S6_GMAC_HOST_INT_RXPOSTRFULL 6 | ||
318 | #define S6_GMAC_HOST_INT_RXPOSTRUNDER 7 | ||
319 | #define S6_GMAC_HOST_RXFIFOHWM 0x14C | ||
320 | #define S6_GMAC_HOST_CTRLFRAMXP 0x150 | ||
321 | #define S6_GMAC_HOST_DSTADDRLO(n) (0x160 + 8*(n)) | ||
322 | #define S6_GMAC_HOST_DSTADDRHI(n) (0x164 + 8*(n)) | ||
323 | #define S6_GMAC_HOST_DSTMASKLO(n) (0x180 + 8*(n)) | ||
324 | #define S6_GMAC_HOST_DSTMASKHI(n) (0x184 + 8*(n)) | ||
325 | |||
326 | #define S6_GMAC_BURST_PREWR 0x1B0 | ||
327 | #define S6_GMAC_BURST_PREWR_LEN 0 | ||
328 | #define S6_GMAC_BURST_PREWR_LEN_MASK ((1 << 20) - 1) | ||
329 | #define S6_GMAC_BURST_PREWR_CFE 20 | ||
330 | #define S6_GMAC_BURST_PREWR_PPE 21 | ||
331 | #define S6_GMAC_BURST_PREWR_FCS 22 | ||
332 | #define S6_GMAC_BURST_PREWR_PAD 23 | ||
333 | #define S6_GMAC_BURST_POSTRD 0x1D0 | ||
334 | #define S6_GMAC_BURST_POSTRD_LEN 0 | ||
335 | #define S6_GMAC_BURST_POSTRD_LEN_MASK ((1 << 20) - 1) | ||
336 | #define S6_GMAC_BURST_POSTRD_DROP 20 | ||
337 | |||
338 | |||
339 | /* data handling */ | ||
340 | |||
341 | #define S6_NUM_TX_SKB 8 /* must be larger than TX fifo size */ | ||
342 | #define S6_NUM_RX_SKB 16 | ||
343 | #define S6_MAX_FRLEN 1536 | ||
344 | |||
345 | struct s6gmac { | ||
346 | u32 reg; | ||
347 | u32 tx_dma; | ||
348 | u32 rx_dma; | ||
349 | u32 io; | ||
350 | u8 tx_chan; | ||
351 | u8 rx_chan; | ||
352 | spinlock_t lock; | ||
353 | u8 tx_skb_i, tx_skb_o; | ||
354 | u8 rx_skb_i, rx_skb_o; | ||
355 | struct sk_buff *tx_skb[S6_NUM_TX_SKB]; | ||
356 | struct sk_buff *rx_skb[S6_NUM_RX_SKB]; | ||
357 | unsigned long carry[sizeof(struct net_device_stats) / sizeof(long)]; | ||
358 | unsigned long stats[sizeof(struct net_device_stats) / sizeof(long)]; | ||
359 | struct phy_device *phydev; | ||
360 | struct { | ||
361 | struct mii_bus *bus; | ||
362 | int irq[PHY_MAX_ADDR]; | ||
363 | } mii; | ||
364 | struct { | ||
365 | unsigned int mbit; | ||
366 | u8 giga; | ||
367 | u8 isup; | ||
368 | u8 full; | ||
369 | } link; | ||
370 | }; | ||
371 | |||
372 | static void s6gmac_rx_fillfifo(struct net_device *dev) | ||
373 | { | ||
374 | struct s6gmac *pd = netdev_priv(dev); | ||
375 | struct sk_buff *skb; | ||
376 | while ((((u8)(pd->rx_skb_i - pd->rx_skb_o)) < S6_NUM_RX_SKB) && | ||
377 | (!s6dmac_fifo_full(pd->rx_dma, pd->rx_chan)) && | ||
378 | (skb = netdev_alloc_skb(dev, S6_MAX_FRLEN + 2))) { | ||
379 | pd->rx_skb[(pd->rx_skb_i++) % S6_NUM_RX_SKB] = skb; | ||
380 | s6dmac_put_fifo_cache(pd->rx_dma, pd->rx_chan, | ||
381 | pd->io, (u32)skb->data, S6_MAX_FRLEN); | ||
382 | } | ||
383 | } | ||
384 | |||
385 | static void s6gmac_rx_interrupt(struct net_device *dev) | ||
386 | { | ||
387 | struct s6gmac *pd = netdev_priv(dev); | ||
388 | u32 pfx; | ||
389 | struct sk_buff *skb; | ||
390 | while (((u8)(pd->rx_skb_i - pd->rx_skb_o)) > | ||
391 | s6dmac_pending_count(pd->rx_dma, pd->rx_chan)) { | ||
392 | skb = pd->rx_skb[(pd->rx_skb_o++) % S6_NUM_RX_SKB]; | ||
393 | pfx = readl(pd->reg + S6_GMAC_BURST_POSTRD); | ||
394 | if (pfx & (1 << S6_GMAC_BURST_POSTRD_DROP)) { | ||
395 | dev_kfree_skb_irq(skb); | ||
396 | } else { | ||
397 | skb_put(skb, (pfx >> S6_GMAC_BURST_POSTRD_LEN) | ||
398 | & S6_GMAC_BURST_POSTRD_LEN_MASK); | ||
399 | skb->protocol = eth_type_trans(skb, dev); | ||
400 | skb->ip_summed = CHECKSUM_UNNECESSARY; | ||
401 | netif_rx(skb); | ||
402 | } | ||
403 | } | ||
404 | } | ||
405 | |||
406 | static void s6gmac_tx_interrupt(struct net_device *dev) | ||
407 | { | ||
408 | struct s6gmac *pd = netdev_priv(dev); | ||
409 | while (((u8)(pd->tx_skb_i - pd->tx_skb_o)) > | ||
410 | s6dmac_pending_count(pd->tx_dma, pd->tx_chan)) { | ||
411 | dev_kfree_skb_irq(pd->tx_skb[(pd->tx_skb_o++) % S6_NUM_TX_SKB]); | ||
412 | } | ||
413 | if (!s6dmac_fifo_full(pd->tx_dma, pd->tx_chan)) | ||
414 | netif_wake_queue(dev); | ||
415 | } | ||
416 | |||
417 | struct s6gmac_statinf { | ||
418 | unsigned reg_size : 4; /* 0: unused */ | ||
419 | unsigned reg_off : 6; | ||
420 | unsigned net_index : 6; | ||
421 | }; | ||
422 | |||
423 | #define S6_STATS_B (8 * sizeof(u32)) | ||
424 | #define S6_STATS_C(b, r, f) [b] = { \ | ||
425 | BUILD_BUG_ON_ZERO(r##_SIZE < S6_GMAC_STAT_SIZE_MIN) + \ | ||
426 | BUILD_BUG_ON_ZERO((r##_SIZE - (S6_GMAC_STAT_SIZE_MIN - 1)) \ | ||
427 | >= (1<<4)) + \ | ||
428 | r##_SIZE - (S6_GMAC_STAT_SIZE_MIN - 1), \ | ||
429 | BUILD_BUG_ON_ZERO(((unsigned)((r - S6_GMAC_STAT_REGS) / sizeof(u32))) \ | ||
430 | >= ((1<<6)-1)) + \ | ||
431 | (r - S6_GMAC_STAT_REGS) / sizeof(u32), \ | ||
432 | BUILD_BUG_ON_ZERO((offsetof(struct net_device_stats, f)) \ | ||
433 | % sizeof(unsigned long)) + \ | ||
434 | BUILD_BUG_ON_ZERO((((unsigned)(offsetof(struct net_device_stats, f)) \ | ||
435 | / sizeof(unsigned long)) >= (1<<6))) + \ | ||
436 | BUILD_BUG_ON_ZERO((sizeof(((struct net_device_stats *)0)->f) \ | ||
437 | != sizeof(unsigned long))) + \ | ||
438 | (offsetof(struct net_device_stats, f)) / sizeof(unsigned long)}, | ||
439 | |||
440 | static const struct s6gmac_statinf statinf[2][S6_STATS_B] = { { | ||
441 | S6_STATS_C(S6_GMAC_STATCARRY1_RBYT, S6_GMAC_STATRBYT, rx_bytes) | ||
442 | S6_STATS_C(S6_GMAC_STATCARRY1_RPKT, S6_GMAC_STATRPKT, rx_packets) | ||
443 | S6_STATS_C(S6_GMAC_STATCARRY1_RFCS, S6_GMAC_STATRFCS, rx_crc_errors) | ||
444 | S6_STATS_C(S6_GMAC_STATCARRY1_RMCA, S6_GMAC_STATRMCA, multicast) | ||
445 | S6_STATS_C(S6_GMAC_STATCARRY1_RALN, S6_GMAC_STATRALN, rx_frame_errors) | ||
446 | S6_STATS_C(S6_GMAC_STATCARRY1_RFLR, S6_GMAC_STATRFLR, rx_length_errors) | ||
447 | S6_STATS_C(S6_GMAC_STATCARRY1_RCDE, S6_GMAC_STATRCDE, rx_missed_errors) | ||
448 | S6_STATS_C(S6_GMAC_STATCARRY1_RUND, S6_GMAC_STATRUND, rx_length_errors) | ||
449 | S6_STATS_C(S6_GMAC_STATCARRY1_ROVR, S6_GMAC_STATROVR, rx_length_errors) | ||
450 | S6_STATS_C(S6_GMAC_STATCARRY1_RFRG, S6_GMAC_STATRFRG, rx_crc_errors) | ||
451 | S6_STATS_C(S6_GMAC_STATCARRY1_RJBR, S6_GMAC_STATRJBR, rx_crc_errors) | ||
452 | S6_STATS_C(S6_GMAC_STATCARRY1_RDRP, S6_GMAC_STATRDRP, rx_dropped) | ||
453 | }, { | ||
454 | S6_STATS_C(S6_GMAC_STATCARRY2_TBYT, S6_GMAC_STATTBYT, tx_bytes) | ||
455 | S6_STATS_C(S6_GMAC_STATCARRY2_TPKT, S6_GMAC_STATTPKT, tx_packets) | ||
456 | S6_STATS_C(S6_GMAC_STATCARRY2_TEDF, S6_GMAC_STATTEDF, tx_aborted_errors) | ||
457 | S6_STATS_C(S6_GMAC_STATCARRY2_TXCL, S6_GMAC_STATTXCL, tx_aborted_errors) | ||
458 | S6_STATS_C(S6_GMAC_STATCARRY2_TNCL, S6_GMAC_STATTNCL, collisions) | ||
459 | S6_STATS_C(S6_GMAC_STATCARRY2_TDRP, S6_GMAC_STATTDRP, tx_dropped) | ||
460 | S6_STATS_C(S6_GMAC_STATCARRY2_TJBR, S6_GMAC_STATTJBR, tx_errors) | ||
461 | S6_STATS_C(S6_GMAC_STATCARRY2_TFCS, S6_GMAC_STATTFCS, tx_errors) | ||
462 | S6_STATS_C(S6_GMAC_STATCARRY2_TOVR, S6_GMAC_STATTOVR, tx_errors) | ||
463 | S6_STATS_C(S6_GMAC_STATCARRY2_TUND, S6_GMAC_STATTUND, tx_errors) | ||
464 | S6_STATS_C(S6_GMAC_STATCARRY2_TFRG, S6_GMAC_STATTFRG, tx_errors) | ||
465 | } }; | ||
466 | |||
467 | static void s6gmac_stats_collect(struct s6gmac *pd, | ||
468 | const struct s6gmac_statinf *inf) | ||
469 | { | ||
470 | int b; | ||
471 | for (b = 0; b < S6_STATS_B; b++) { | ||
472 | if (inf[b].reg_size) { | ||
473 | pd->stats[inf[b].net_index] += | ||
474 | readl(pd->reg + S6_GMAC_STAT_REGS | ||
475 | + sizeof(u32) * inf[b].reg_off); | ||
476 | } | ||
477 | } | ||
478 | } | ||
479 | |||
480 | static void s6gmac_stats_carry(struct s6gmac *pd, | ||
481 | const struct s6gmac_statinf *inf, u32 mask) | ||
482 | { | ||
483 | int b; | ||
484 | while (mask) { | ||
485 | b = fls(mask) - 1; | ||
486 | mask &= ~(1 << b); | ||
487 | pd->carry[inf[b].net_index] += (1 << inf[b].reg_size); | ||
488 | } | ||
489 | } | ||
490 | |||
491 | static inline u32 s6gmac_stats_pending(struct s6gmac *pd, int carry) | ||
492 | { | ||
493 | int r = readl(pd->reg + S6_GMAC_STATCARRY(carry)) & | ||
494 | ~readl(pd->reg + S6_GMAC_STATCARRYMSK(carry)); | ||
495 | return r; | ||
496 | } | ||
497 | |||
498 | static inline void s6gmac_stats_interrupt(struct s6gmac *pd, int carry) | ||
499 | { | ||
500 | u32 mask; | ||
501 | mask = s6gmac_stats_pending(pd, carry); | ||
502 | if (mask) { | ||
503 | writel(mask, pd->reg + S6_GMAC_STATCARRY(carry)); | ||
504 | s6gmac_stats_carry(pd, &statinf[carry][0], mask); | ||
505 | } | ||
506 | } | ||
507 | |||
508 | static irqreturn_t s6gmac_interrupt(int irq, void *dev_id) | ||
509 | { | ||
510 | struct net_device *dev = (struct net_device *)dev_id; | ||
511 | struct s6gmac *pd = netdev_priv(dev); | ||
512 | if (!dev) | ||
513 | return IRQ_NONE; | ||
514 | spin_lock(&pd->lock); | ||
515 | if (s6dmac_termcnt_irq(pd->rx_dma, pd->rx_chan)) | ||
516 | s6gmac_rx_interrupt(dev); | ||
517 | s6gmac_rx_fillfifo(dev); | ||
518 | if (s6dmac_termcnt_irq(pd->tx_dma, pd->tx_chan)) | ||
519 | s6gmac_tx_interrupt(dev); | ||
520 | s6gmac_stats_interrupt(pd, 0); | ||
521 | s6gmac_stats_interrupt(pd, 1); | ||
522 | spin_unlock(&pd->lock); | ||
523 | return IRQ_HANDLED; | ||
524 | } | ||
525 | |||
526 | static inline void s6gmac_set_dstaddr(struct s6gmac *pd, int n, | ||
527 | u32 addrlo, u32 addrhi, u32 masklo, u32 maskhi) | ||
528 | { | ||
529 | writel(addrlo, pd->reg + S6_GMAC_HOST_DSTADDRLO(n)); | ||
530 | writel(addrhi, pd->reg + S6_GMAC_HOST_DSTADDRHI(n)); | ||
531 | writel(masklo, pd->reg + S6_GMAC_HOST_DSTMASKLO(n)); | ||
532 | writel(maskhi, pd->reg + S6_GMAC_HOST_DSTMASKHI(n)); | ||
533 | } | ||
534 | |||
535 | static inline void s6gmac_stop_device(struct net_device *dev) | ||
536 | { | ||
537 | struct s6gmac *pd = netdev_priv(dev); | ||
538 | writel(0, pd->reg + S6_GMAC_MACCONF1); | ||
539 | } | ||
540 | |||
541 | static inline void s6gmac_init_device(struct net_device *dev) | ||
542 | { | ||
543 | struct s6gmac *pd = netdev_priv(dev); | ||
544 | int is_rgmii = !!(pd->phydev->supported | ||
545 | & (SUPPORTED_1000baseT_Full | SUPPORTED_1000baseT_Half)); | ||
546 | #if 0 | ||
547 | writel(1 << S6_GMAC_MACCONF1_SYNCTX | | ||
548 | 1 << S6_GMAC_MACCONF1_SYNCRX | | ||
549 | 1 << S6_GMAC_MACCONF1_TXFLOWCTRL | | ||
550 | 1 << S6_GMAC_MACCONF1_RXFLOWCTRL | | ||
551 | 1 << S6_GMAC_MACCONF1_RESTXFUNC | | ||
552 | 1 << S6_GMAC_MACCONF1_RESRXFUNC | | ||
553 | 1 << S6_GMAC_MACCONF1_RESTXMACCTRL | | ||
554 | 1 << S6_GMAC_MACCONF1_RESRXMACCTRL, | ||
555 | pd->reg + S6_GMAC_MACCONF1); | ||
556 | #endif | ||
557 | writel(1 << S6_GMAC_MACCONF1_SOFTRES, pd->reg + S6_GMAC_MACCONF1); | ||
558 | udelay(1000); | ||
559 | writel(1 << S6_GMAC_MACCONF1_TXENA | 1 << S6_GMAC_MACCONF1_RXENA, | ||
560 | pd->reg + S6_GMAC_MACCONF1); | ||
561 | writel(1 << S6_GMAC_HOST_PBLKCTRL_TXSRES | | ||
562 | 1 << S6_GMAC_HOST_PBLKCTRL_RXSRES, | ||
563 | pd->reg + S6_GMAC_HOST_PBLKCTRL); | ||
564 | writel(S6_GMAC_HOST_PBLKCTRL_SIZ_128 << S6_GMAC_HOST_PBLKCTRL_TXBSIZ | | ||
565 | S6_GMAC_HOST_PBLKCTRL_SIZ_128 << S6_GMAC_HOST_PBLKCTRL_RXBSIZ | | ||
566 | 1 << S6_GMAC_HOST_PBLKCTRL_STATENA | | ||
567 | 1 << S6_GMAC_HOST_PBLKCTRL_STATCLEAR | | ||
568 | is_rgmii << S6_GMAC_HOST_PBLKCTRL_RGMII, | ||
569 | pd->reg + S6_GMAC_HOST_PBLKCTRL); | ||
570 | writel(1 << S6_GMAC_MACCONF1_TXENA | | ||
571 | 1 << S6_GMAC_MACCONF1_RXENA | | ||
572 | (dev->flags & IFF_LOOPBACK ? 1 : 0) | ||
573 | << S6_GMAC_MACCONF1_LOOPBACK, | ||
574 | pd->reg + S6_GMAC_MACCONF1); | ||
575 | writel(dev->mtu && (dev->mtu < (S6_MAX_FRLEN - ETH_HLEN-ETH_FCS_LEN)) ? | ||
576 | dev->mtu+ETH_HLEN+ETH_FCS_LEN : S6_MAX_FRLEN, | ||
577 | pd->reg + S6_GMAC_MACMAXFRAMELEN); | ||
578 | writel((pd->link.full ? 1 : 0) << S6_GMAC_MACCONF2_FULL | | ||
579 | 1 << S6_GMAC_MACCONF2_PADCRCENA | | ||
580 | 1 << S6_GMAC_MACCONF2_LENGTHFCHK | | ||
581 | (pd->link.giga ? | ||
582 | S6_GMAC_MACCONF2_IFMODE_BYTE : | ||
583 | S6_GMAC_MACCONF2_IFMODE_NIBBLE) | ||
584 | << S6_GMAC_MACCONF2_IFMODE | | ||
585 | 7 << S6_GMAC_MACCONF2_PREAMBLELEN, | ||
586 | pd->reg + S6_GMAC_MACCONF2); | ||
587 | writel(0, pd->reg + S6_GMAC_MACSTATADDR1); | ||
588 | writel(0, pd->reg + S6_GMAC_MACSTATADDR2); | ||
589 | writel(1 << S6_GMAC_FIFOCONF0_WTMENREQ | | ||
590 | 1 << S6_GMAC_FIFOCONF0_SRFENREQ | | ||
591 | 1 << S6_GMAC_FIFOCONF0_FRFENREQ | | ||
592 | 1 << S6_GMAC_FIFOCONF0_STFENREQ | | ||
593 | 1 << S6_GMAC_FIFOCONF0_FTFENREQ, | ||
594 | pd->reg + S6_GMAC_FIFOCONF0); | ||
595 | writel(128 << S6_GMAC_FIFOCONF3_CFGFTTH | | ||
596 | 128 << S6_GMAC_FIFOCONF3_CFGHWMFT, | ||
597 | pd->reg + S6_GMAC_FIFOCONF3); | ||
598 | writel((S6_GMAC_FIFOCONF_RSV_MASK & ~( | ||
599 | 1 << S6_GMAC_FIFOCONF_RSV_RUNT | | ||
600 | 1 << S6_GMAC_FIFOCONF_RSV_CRCERR | | ||
601 | 1 << S6_GMAC_FIFOCONF_RSV_OK | | ||
602 | 1 << S6_GMAC_FIFOCONF_RSV_DRIBBLE | | ||
603 | 1 << S6_GMAC_FIFOCONF_RSV_CTRLFRAME | | ||
604 | 1 << S6_GMAC_FIFOCONF_RSV_PAUSECTRL | | ||
605 | 1 << S6_GMAC_FIFOCONF_RSV_UNOPCODE | | ||
606 | 1 << S6_GMAC_FIFOCONF_RSV_TRUNCATED)) | | ||
607 | 1 << S6_GMAC_FIFOCONF5_DROPLT64 | | ||
608 | pd->link.giga << S6_GMAC_FIFOCONF5_CFGBYTM | | ||
609 | 1 << S6_GMAC_FIFOCONF5_RXDROPSIZE, | ||
610 | pd->reg + S6_GMAC_FIFOCONF5); | ||
611 | writel(1 << S6_GMAC_FIFOCONF_RSV_RUNT | | ||
612 | 1 << S6_GMAC_FIFOCONF_RSV_CRCERR | | ||
613 | 1 << S6_GMAC_FIFOCONF_RSV_DRIBBLE | | ||
614 | 1 << S6_GMAC_FIFOCONF_RSV_CTRLFRAME | | ||
615 | 1 << S6_GMAC_FIFOCONF_RSV_PAUSECTRL | | ||
616 | 1 << S6_GMAC_FIFOCONF_RSV_UNOPCODE | | ||
617 | 1 << S6_GMAC_FIFOCONF_RSV_TRUNCATED, | ||
618 | pd->reg + S6_GMAC_FIFOCONF4); | ||
619 | s6gmac_set_dstaddr(pd, 0, | ||
620 | 0xFFFFFFFF, 0x0000FFFF, 0xFFFFFFFF, 0x0000FFFF); | ||
621 | s6gmac_set_dstaddr(pd, 1, | ||
622 | dev->dev_addr[5] | | ||
623 | dev->dev_addr[4] << 8 | | ||
624 | dev->dev_addr[3] << 16 | | ||
625 | dev->dev_addr[2] << 24, | ||
626 | dev->dev_addr[1] | | ||
627 | dev->dev_addr[0] << 8, | ||
628 | 0xFFFFFFFF, 0x0000FFFF); | ||
629 | s6gmac_set_dstaddr(pd, 2, | ||
630 | 0x00000000, 0x00000100, 0x00000000, 0x00000100); | ||
631 | s6gmac_set_dstaddr(pd, 3, | ||
632 | 0x00000000, 0x00000000, 0x00000000, 0x00000000); | ||
633 | writel(1 << S6_GMAC_HOST_PBLKCTRL_TXENA | | ||
634 | 1 << S6_GMAC_HOST_PBLKCTRL_RXENA | | ||
635 | S6_GMAC_HOST_PBLKCTRL_SIZ_128 << S6_GMAC_HOST_PBLKCTRL_TXBSIZ | | ||
636 | S6_GMAC_HOST_PBLKCTRL_SIZ_128 << S6_GMAC_HOST_PBLKCTRL_RXBSIZ | | ||
637 | 1 << S6_GMAC_HOST_PBLKCTRL_STATENA | | ||
638 | 1 << S6_GMAC_HOST_PBLKCTRL_STATCLEAR | | ||
639 | is_rgmii << S6_GMAC_HOST_PBLKCTRL_RGMII, | ||
640 | pd->reg + S6_GMAC_HOST_PBLKCTRL); | ||
641 | } | ||
642 | |||
643 | static void s6mii_enable(struct s6gmac *pd) | ||
644 | { | ||
645 | writel(readl(pd->reg + S6_GMAC_MACCONF1) & | ||
646 | ~(1 << S6_GMAC_MACCONF1_SOFTRES), | ||
647 | pd->reg + S6_GMAC_MACCONF1); | ||
648 | writel((readl(pd->reg + S6_GMAC_MACMIICONF) | ||
649 | & ~(S6_GMAC_MACMIICONF_CSEL_MASK << S6_GMAC_MACMIICONF_CSEL)) | ||
650 | | (S6_GMAC_MACMIICONF_CSEL_DIV168 << S6_GMAC_MACMIICONF_CSEL), | ||
651 | pd->reg + S6_GMAC_MACMIICONF); | ||
652 | } | ||
653 | |||
654 | static int s6mii_busy(struct s6gmac *pd, int tmo) | ||
655 | { | ||
656 | while (readl(pd->reg + S6_GMAC_MACMIIINDI)) { | ||
657 | if (--tmo == 0) | ||
658 | return -ETIME; | ||
659 | udelay(64); | ||
660 | } | ||
661 | return 0; | ||
662 | } | ||
663 | |||
664 | static int s6mii_read(struct mii_bus *bus, int phy_addr, int regnum) | ||
665 | { | ||
666 | struct s6gmac *pd = bus->priv; | ||
667 | s6mii_enable(pd); | ||
668 | if (s6mii_busy(pd, 256)) | ||
669 | return -ETIME; | ||
670 | writel(phy_addr << S6_GMAC_MACMIIADDR_PHY | | ||
671 | regnum << S6_GMAC_MACMIIADDR_REG, | ||
672 | pd->reg + S6_GMAC_MACMIIADDR); | ||
673 | writel(1 << S6_GMAC_MACMIICMD_READ, pd->reg + S6_GMAC_MACMIICMD); | ||
674 | writel(0, pd->reg + S6_GMAC_MACMIICMD); | ||
675 | if (s6mii_busy(pd, 256)) | ||
676 | return -ETIME; | ||
677 | return (u16)readl(pd->reg + S6_GMAC_MACMIISTAT); | ||
678 | } | ||
679 | |||
680 | static int s6mii_write(struct mii_bus *bus, int phy_addr, int regnum, u16 value) | ||
681 | { | ||
682 | struct s6gmac *pd = bus->priv; | ||
683 | s6mii_enable(pd); | ||
684 | if (s6mii_busy(pd, 256)) | ||
685 | return -ETIME; | ||
686 | writel(phy_addr << S6_GMAC_MACMIIADDR_PHY | | ||
687 | regnum << S6_GMAC_MACMIIADDR_REG, | ||
688 | pd->reg + S6_GMAC_MACMIIADDR); | ||
689 | writel(value, pd->reg + S6_GMAC_MACMIICTRL); | ||
690 | if (s6mii_busy(pd, 256)) | ||
691 | return -ETIME; | ||
692 | return 0; | ||
693 | } | ||
694 | |||
695 | static int s6mii_reset(struct mii_bus *bus) | ||
696 | { | ||
697 | struct s6gmac *pd = bus->priv; | ||
698 | s6mii_enable(pd); | ||
699 | if (s6mii_busy(pd, PHY_INIT_TIMEOUT)) | ||
700 | return -ETIME; | ||
701 | return 0; | ||
702 | } | ||
703 | |||
704 | static void s6gmac_set_rgmii_txclock(struct s6gmac *pd) | ||
705 | { | ||
706 | u32 pllsel = readl(S6_REG_GREG1 + S6_GREG1_PLLSEL); | ||
707 | pllsel &= ~(S6_GREG1_PLLSEL_GMAC_MASK << S6_GREG1_PLLSEL_GMAC); | ||
708 | switch (pd->link.mbit) { | ||
709 | case 10: | ||
710 | pllsel |= S6_GREG1_PLLSEL_GMAC_2500KHZ << S6_GREG1_PLLSEL_GMAC; | ||
711 | break; | ||
712 | case 100: | ||
713 | pllsel |= S6_GREG1_PLLSEL_GMAC_25MHZ << S6_GREG1_PLLSEL_GMAC; | ||
714 | break; | ||
715 | case 1000: | ||
716 | pllsel |= S6_GREG1_PLLSEL_GMAC_125MHZ << S6_GREG1_PLLSEL_GMAC; | ||
717 | break; | ||
718 | default: | ||
719 | return; | ||
720 | } | ||
721 | writel(pllsel, S6_REG_GREG1 + S6_GREG1_PLLSEL); | ||
722 | } | ||
723 | |||
724 | static inline void s6gmac_linkisup(struct net_device *dev, int isup) | ||
725 | { | ||
726 | struct s6gmac *pd = netdev_priv(dev); | ||
727 | struct phy_device *phydev = pd->phydev; | ||
728 | |||
729 | pd->link.full = phydev->duplex; | ||
730 | pd->link.giga = (phydev->speed == 1000); | ||
731 | if (pd->link.mbit != phydev->speed) { | ||
732 | pd->link.mbit = phydev->speed; | ||
733 | s6gmac_set_rgmii_txclock(pd); | ||
734 | } | ||
735 | pd->link.isup = isup; | ||
736 | if (isup) | ||
737 | netif_carrier_on(dev); | ||
738 | phy_print_status(phydev); | ||
739 | } | ||
740 | |||
741 | static void s6gmac_adjust_link(struct net_device *dev) | ||
742 | { | ||
743 | struct s6gmac *pd = netdev_priv(dev); | ||
744 | struct phy_device *phydev = pd->phydev; | ||
745 | if (pd->link.isup && | ||
746 | (!phydev->link || | ||
747 | (pd->link.mbit != phydev->speed) || | ||
748 | (pd->link.full != phydev->duplex))) { | ||
749 | pd->link.isup = 0; | ||
750 | netif_tx_disable(dev); | ||
751 | if (!phydev->link) { | ||
752 | netif_carrier_off(dev); | ||
753 | phy_print_status(phydev); | ||
754 | } | ||
755 | } | ||
756 | if (!pd->link.isup && phydev->link) { | ||
757 | if (pd->link.full != phydev->duplex) { | ||
758 | u32 maccfg = readl(pd->reg + S6_GMAC_MACCONF2); | ||
759 | if (phydev->duplex) | ||
760 | maccfg |= 1 << S6_GMAC_MACCONF2_FULL; | ||
761 | else | ||
762 | maccfg &= ~(1 << S6_GMAC_MACCONF2_FULL); | ||
763 | writel(maccfg, pd->reg + S6_GMAC_MACCONF2); | ||
764 | } | ||
765 | |||
766 | if (pd->link.giga != (phydev->speed == 1000)) { | ||
767 | u32 fifocfg = readl(pd->reg + S6_GMAC_FIFOCONF5); | ||
768 | u32 maccfg = readl(pd->reg + S6_GMAC_MACCONF2); | ||
769 | maccfg &= ~(S6_GMAC_MACCONF2_IFMODE_MASK | ||
770 | << S6_GMAC_MACCONF2_IFMODE); | ||
771 | if (phydev->speed == 1000) { | ||
772 | fifocfg |= 1 << S6_GMAC_FIFOCONF5_CFGBYTM; | ||
773 | maccfg |= S6_GMAC_MACCONF2_IFMODE_BYTE | ||
774 | << S6_GMAC_MACCONF2_IFMODE; | ||
775 | } else { | ||
776 | fifocfg &= ~(1 << S6_GMAC_FIFOCONF5_CFGBYTM); | ||
777 | maccfg |= S6_GMAC_MACCONF2_IFMODE_NIBBLE | ||
778 | << S6_GMAC_MACCONF2_IFMODE; | ||
779 | } | ||
780 | writel(fifocfg, pd->reg + S6_GMAC_FIFOCONF5); | ||
781 | writel(maccfg, pd->reg + S6_GMAC_MACCONF2); | ||
782 | } | ||
783 | |||
784 | if (!s6dmac_fifo_full(pd->tx_dma, pd->tx_chan)) | ||
785 | netif_wake_queue(dev); | ||
786 | s6gmac_linkisup(dev, 1); | ||
787 | } | ||
788 | } | ||
789 | |||
790 | static inline int s6gmac_phy_start(struct net_device *dev) | ||
791 | { | ||
792 | struct s6gmac *pd = netdev_priv(dev); | ||
793 | int i = 0; | ||
794 | struct phy_device *p = NULL; | ||
795 | while ((i < PHY_MAX_ADDR) && (!(p = pd->mii.bus->phy_map[i]))) | ||
796 | i++; | ||
797 | p = phy_connect(dev, dev_name(&p->dev), &s6gmac_adjust_link, | ||
798 | PHY_INTERFACE_MODE_RGMII); | ||
799 | if (IS_ERR(p)) { | ||
800 | printk(KERN_ERR "%s: Could not attach to PHY\n", dev->name); | ||
801 | return PTR_ERR(p); | ||
802 | } | ||
803 | p->supported &= PHY_GBIT_FEATURES; | ||
804 | p->advertising = p->supported; | ||
805 | pd->phydev = p; | ||
806 | return 0; | ||
807 | } | ||
808 | |||
809 | static inline void s6gmac_init_stats(struct net_device *dev) | ||
810 | { | ||
811 | struct s6gmac *pd = netdev_priv(dev); | ||
812 | u32 mask; | ||
813 | mask = 1 << S6_GMAC_STATCARRY1_RDRP | | ||
814 | 1 << S6_GMAC_STATCARRY1_RJBR | | ||
815 | 1 << S6_GMAC_STATCARRY1_RFRG | | ||
816 | 1 << S6_GMAC_STATCARRY1_ROVR | | ||
817 | 1 << S6_GMAC_STATCARRY1_RUND | | ||
818 | 1 << S6_GMAC_STATCARRY1_RCDE | | ||
819 | 1 << S6_GMAC_STATCARRY1_RFLR | | ||
820 | 1 << S6_GMAC_STATCARRY1_RALN | | ||
821 | 1 << S6_GMAC_STATCARRY1_RMCA | | ||
822 | 1 << S6_GMAC_STATCARRY1_RFCS | | ||
823 | 1 << S6_GMAC_STATCARRY1_RPKT | | ||
824 | 1 << S6_GMAC_STATCARRY1_RBYT; | ||
825 | writel(mask, pd->reg + S6_GMAC_STATCARRY(0)); | ||
826 | writel(~mask, pd->reg + S6_GMAC_STATCARRYMSK(0)); | ||
827 | mask = 1 << S6_GMAC_STATCARRY2_TDRP | | ||
828 | 1 << S6_GMAC_STATCARRY2_TNCL | | ||
829 | 1 << S6_GMAC_STATCARRY2_TXCL | | ||
830 | 1 << S6_GMAC_STATCARRY2_TEDF | | ||
831 | 1 << S6_GMAC_STATCARRY2_TPKT | | ||
832 | 1 << S6_GMAC_STATCARRY2_TBYT | | ||
833 | 1 << S6_GMAC_STATCARRY2_TFRG | | ||
834 | 1 << S6_GMAC_STATCARRY2_TUND | | ||
835 | 1 << S6_GMAC_STATCARRY2_TOVR | | ||
836 | 1 << S6_GMAC_STATCARRY2_TFCS | | ||
837 | 1 << S6_GMAC_STATCARRY2_TJBR; | ||
838 | writel(mask, pd->reg + S6_GMAC_STATCARRY(1)); | ||
839 | writel(~mask, pd->reg + S6_GMAC_STATCARRYMSK(1)); | ||
840 | } | ||
841 | |||
842 | static inline void s6gmac_init_dmac(struct net_device *dev) | ||
843 | { | ||
844 | struct s6gmac *pd = netdev_priv(dev); | ||
845 | s6dmac_disable_chan(pd->tx_dma, pd->tx_chan); | ||
846 | s6dmac_disable_chan(pd->rx_dma, pd->rx_chan); | ||
847 | s6dmac_disable_error_irqs(pd->tx_dma, 1 << S6_HIFDMA_GMACTX); | ||
848 | s6dmac_disable_error_irqs(pd->rx_dma, 1 << S6_HIFDMA_GMACRX); | ||
849 | } | ||
850 | |||
851 | static int s6gmac_tx(struct sk_buff *skb, struct net_device *dev) | ||
852 | { | ||
853 | struct s6gmac *pd = netdev_priv(dev); | ||
854 | unsigned long flags; | ||
855 | |||
856 | spin_lock_irqsave(&pd->lock, flags); | ||
857 | writel(skb->len << S6_GMAC_BURST_PREWR_LEN | | ||
858 | 0 << S6_GMAC_BURST_PREWR_CFE | | ||
859 | 1 << S6_GMAC_BURST_PREWR_PPE | | ||
860 | 1 << S6_GMAC_BURST_PREWR_FCS | | ||
861 | ((skb->len < ETH_ZLEN) ? 1 : 0) << S6_GMAC_BURST_PREWR_PAD, | ||
862 | pd->reg + S6_GMAC_BURST_PREWR); | ||
863 | s6dmac_put_fifo_cache(pd->tx_dma, pd->tx_chan, | ||
864 | (u32)skb->data, pd->io, skb->len); | ||
865 | if (s6dmac_fifo_full(pd->tx_dma, pd->tx_chan)) | ||
866 | netif_stop_queue(dev); | ||
867 | if (((u8)(pd->tx_skb_i - pd->tx_skb_o)) >= S6_NUM_TX_SKB) { | ||
868 | printk(KERN_ERR "GMAC BUG: skb tx ring overflow [%x, %x]\n", | ||
869 | pd->tx_skb_o, pd->tx_skb_i); | ||
870 | BUG(); | ||
871 | } | ||
872 | pd->tx_skb[(pd->tx_skb_i++) % S6_NUM_TX_SKB] = skb; | ||
873 | spin_unlock_irqrestore(&pd->lock, flags); | ||
874 | return 0; | ||
875 | } | ||
876 | |||
877 | static void s6gmac_tx_timeout(struct net_device *dev) | ||
878 | { | ||
879 | struct s6gmac *pd = netdev_priv(dev); | ||
880 | unsigned long flags; | ||
881 | spin_lock_irqsave(&pd->lock, flags); | ||
882 | s6gmac_tx_interrupt(dev); | ||
883 | spin_unlock_irqrestore(&pd->lock, flags); | ||
884 | } | ||
885 | |||
886 | static int s6gmac_open(struct net_device *dev) | ||
887 | { | ||
888 | struct s6gmac *pd = netdev_priv(dev); | ||
889 | unsigned long flags; | ||
890 | phy_read_status(pd->phydev); | ||
891 | spin_lock_irqsave(&pd->lock, flags); | ||
892 | pd->link.mbit = 0; | ||
893 | s6gmac_linkisup(dev, pd->phydev->link); | ||
894 | s6gmac_init_device(dev); | ||
895 | s6gmac_init_stats(dev); | ||
896 | s6gmac_init_dmac(dev); | ||
897 | s6gmac_rx_fillfifo(dev); | ||
898 | s6dmac_enable_chan(pd->rx_dma, pd->rx_chan, | ||
899 | 2, 1, 0, 1, 0, 0, 0, 7, -1, 2, 0, 1); | ||
900 | s6dmac_enable_chan(pd->tx_dma, pd->tx_chan, | ||
901 | 2, 0, 1, 0, 0, 0, 0, 7, -1, 2, 0, 1); | ||
902 | writel(0 << S6_GMAC_HOST_INT_TXBURSTOVER | | ||
903 | 0 << S6_GMAC_HOST_INT_TXPREWOVER | | ||
904 | 0 << S6_GMAC_HOST_INT_RXBURSTUNDER | | ||
905 | 0 << S6_GMAC_HOST_INT_RXPOSTRFULL | | ||
906 | 0 << S6_GMAC_HOST_INT_RXPOSTRUNDER, | ||
907 | pd->reg + S6_GMAC_HOST_INTMASK); | ||
908 | spin_unlock_irqrestore(&pd->lock, flags); | ||
909 | phy_start(pd->phydev); | ||
910 | netif_start_queue(dev); | ||
911 | return 0; | ||
912 | } | ||
913 | |||
914 | static int s6gmac_stop(struct net_device *dev) | ||
915 | { | ||
916 | struct s6gmac *pd = netdev_priv(dev); | ||
917 | unsigned long flags; | ||
918 | netif_stop_queue(dev); | ||
919 | phy_stop(pd->phydev); | ||
920 | spin_lock_irqsave(&pd->lock, flags); | ||
921 | s6gmac_init_dmac(dev); | ||
922 | s6gmac_stop_device(dev); | ||
923 | while (pd->tx_skb_i != pd->tx_skb_o) | ||
924 | dev_kfree_skb(pd->tx_skb[(pd->tx_skb_o++) % S6_NUM_TX_SKB]); | ||
925 | while (pd->rx_skb_i != pd->rx_skb_o) | ||
926 | dev_kfree_skb(pd->rx_skb[(pd->rx_skb_o++) % S6_NUM_RX_SKB]); | ||
927 | spin_unlock_irqrestore(&pd->lock, flags); | ||
928 | return 0; | ||
929 | } | ||
930 | |||
931 | static struct net_device_stats *s6gmac_stats(struct net_device *dev) | ||
932 | { | ||
933 | struct s6gmac *pd = netdev_priv(dev); | ||
934 | struct net_device_stats *st = (struct net_device_stats *)&pd->stats; | ||
935 | int i; | ||
936 | do { | ||
937 | unsigned long flags; | ||
938 | spin_lock_irqsave(&pd->lock, flags); | ||
939 | for (i = 0; i < ARRAY_SIZE(pd->stats); i++) | ||
940 | pd->stats[i] = | ||
941 | pd->carry[i] << (S6_GMAC_STAT_SIZE_MIN - 1); | ||
942 | s6gmac_stats_collect(pd, &statinf[0][0]); | ||
943 | s6gmac_stats_collect(pd, &statinf[1][0]); | ||
944 | i = s6gmac_stats_pending(pd, 0) | | ||
945 | s6gmac_stats_pending(pd, 1); | ||
946 | spin_unlock_irqrestore(&pd->lock, flags); | ||
947 | } while (i); | ||
948 | st->rx_errors = st->rx_crc_errors + | ||
949 | st->rx_frame_errors + | ||
950 | st->rx_length_errors + | ||
951 | st->rx_missed_errors; | ||
952 | st->tx_errors += st->tx_aborted_errors; | ||
953 | return st; | ||
954 | } | ||
955 | |||
956 | static int s6gmac_probe(struct platform_device *pdev) | ||
957 | { | ||
958 | struct net_device *dev; | ||
959 | struct s6gmac *pd; | ||
960 | int res; | ||
961 | unsigned long i; | ||
962 | struct mii_bus *mb; | ||
963 | |||
964 | dev = alloc_etherdev(sizeof(*pd)); | ||
965 | if (!dev) | ||
966 | return -ENOMEM; | ||
967 | |||
968 | dev->open = s6gmac_open; | ||
969 | dev->stop = s6gmac_stop; | ||
970 | dev->hard_start_xmit = s6gmac_tx; | ||
971 | dev->tx_timeout = s6gmac_tx_timeout; | ||
972 | dev->watchdog_timeo = HZ; | ||
973 | dev->get_stats = s6gmac_stats; | ||
974 | dev->irq = platform_get_irq(pdev, 0); | ||
975 | pd = netdev_priv(dev); | ||
976 | memset(pd, 0, sizeof(*pd)); | ||
977 | spin_lock_init(&pd->lock); | ||
978 | pd->reg = platform_get_resource(pdev, IORESOURCE_MEM, 0)->start; | ||
979 | i = platform_get_resource(pdev, IORESOURCE_DMA, 0)->start; | ||
980 | pd->tx_dma = DMA_MASK_DMAC(i); | ||
981 | pd->tx_chan = DMA_INDEX_CHNL(i); | ||
982 | i = platform_get_resource(pdev, IORESOURCE_DMA, 1)->start; | ||
983 | pd->rx_dma = DMA_MASK_DMAC(i); | ||
984 | pd->rx_chan = DMA_INDEX_CHNL(i); | ||
985 | pd->io = platform_get_resource(pdev, IORESOURCE_IO, 0)->start; | ||
986 | res = request_irq(dev->irq, s6gmac_interrupt, 0, dev->name, dev); | ||
987 | if (res) { | ||
988 | printk(KERN_ERR DRV_PRMT "irq request failed: %d\n", dev->irq); | ||
989 | goto errirq; | ||
990 | } | ||
991 | res = register_netdev(dev); | ||
992 | if (res) { | ||
993 | printk(KERN_ERR DRV_PRMT "error registering device %s\n", | ||
994 | dev->name); | ||
995 | goto errdev; | ||
996 | } | ||
997 | mb = mdiobus_alloc(); | ||
998 | if (!mb) { | ||
999 | printk(KERN_ERR DRV_PRMT "error allocating mii bus\n"); | ||
1000 | res = -ENOMEM; | ||
1001 | goto errmii; | ||
1002 | } | ||
1003 | mb->name = "s6gmac_mii"; | ||
1004 | mb->read = s6mii_read; | ||
1005 | mb->write = s6mii_write; | ||
1006 | mb->reset = s6mii_reset; | ||
1007 | mb->priv = pd; | ||
1008 | snprintf(mb->id, MII_BUS_ID_SIZE, "%s-%x", pdev->name, pdev->id); | ||
1009 | mb->phy_mask = ~(1 << 0); | ||
1010 | mb->irq = &pd->mii.irq[0]; | ||
1011 | for (i = 0; i < PHY_MAX_ADDR; i++) { | ||
1012 | int n = platform_get_irq(pdev, i + 1); | ||
1013 | if (n < 0) | ||
1014 | n = PHY_POLL; | ||
1015 | pd->mii.irq[i] = n; | ||
1016 | } | ||
1017 | mdiobus_register(mb); | ||
1018 | pd->mii.bus = mb; | ||
1019 | res = s6gmac_phy_start(dev); | ||
1020 | if (res) | ||
1021 | return res; | ||
1022 | platform_set_drvdata(pdev, dev); | ||
1023 | return 0; | ||
1024 | errmii: | ||
1025 | unregister_netdev(dev); | ||
1026 | errdev: | ||
1027 | free_irq(dev->irq, dev); | ||
1028 | errirq: | ||
1029 | free_netdev(dev); | ||
1030 | return res; | ||
1031 | } | ||
1032 | |||
1033 | static int s6gmac_remove(struct platform_device *pdev) | ||
1034 | { | ||
1035 | struct net_device *dev = platform_get_drvdata(pdev); | ||
1036 | if (dev) { | ||
1037 | struct s6gmac *pd = netdev_priv(dev); | ||
1038 | mdiobus_unregister(pd->mii.bus); | ||
1039 | unregister_netdev(dev); | ||
1040 | free_irq(dev->irq, dev); | ||
1041 | free_netdev(dev); | ||
1042 | } | ||
1043 | return 0; | ||
1044 | } | ||
1045 | |||
1046 | static struct platform_driver s6gmac_driver = { | ||
1047 | .probe = s6gmac_probe, | ||
1048 | .remove = s6gmac_remove, | ||
1049 | .driver = { | ||
1050 | .name = "s6gmac", | ||
1051 | }, | ||
1052 | }; | ||
1053 | |||
1054 | module_platform_driver(s6gmac_driver); | ||
1055 | |||
1056 | MODULE_LICENSE("GPL"); | ||
1057 | MODULE_DESCRIPTION("S6105 on chip Ethernet driver"); | ||
1058 | MODULE_AUTHOR("Oskar Schirmer <oskar@scara.com>"); | ||
diff --git a/drivers/net/ethernet/smsc/Kconfig b/drivers/net/ethernet/smsc/Kconfig index 627926800ff3..9468e64e6007 100644 --- a/drivers/net/ethernet/smsc/Kconfig +++ b/drivers/net/ethernet/smsc/Kconfig | |||
@@ -39,7 +39,7 @@ config SMC91X | |||
39 | select CRC32 | 39 | select CRC32 |
40 | select MII | 40 | select MII |
41 | depends on (ARM || M32R || SUPERH || MIPS || BLACKFIN || \ | 41 | depends on (ARM || M32R || SUPERH || MIPS || BLACKFIN || \ |
42 | MN10300 || COLDFIRE || ARM64 || XTENSA || NIOS2) | 42 | MN10300 || COLDFIRE || ARM64 || XTENSA || NIOS2) && (!OF || GPIOLIB) |
43 | ---help--- | 43 | ---help--- |
44 | This is a driver for SMC's 91x series of Ethernet chipsets, | 44 | This is a driver for SMC's 91x series of Ethernet chipsets, |
45 | including the SMC91C94 and the SMC91C111. Say Y if you want it | 45 | including the SMC91C94 and the SMC91C111. Say Y if you want it |
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c index 0e137751e76e..056b358b4a72 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c | |||
@@ -309,16 +309,16 @@ static int sti_dwmac_parse_data(struct sti_dwmac *dwmac, | |||
309 | 309 | ||
310 | if (IS_PHY_IF_MODE_GBIT(dwmac->interface)) { | 310 | if (IS_PHY_IF_MODE_GBIT(dwmac->interface)) { |
311 | const char *rs; | 311 | const char *rs; |
312 | dwmac->tx_retime_src = TX_RETIME_SRC_CLKGEN; | ||
313 | 312 | ||
314 | err = of_property_read_string(np, "st,tx-retime-src", &rs); | 313 | err = of_property_read_string(np, "st,tx-retime-src", &rs); |
315 | if (err < 0) | 314 | if (err < 0) { |
316 | dev_warn(dev, "Use internal clock source\n"); | 315 | dev_warn(dev, "Use internal clock source\n"); |
317 | 316 | dwmac->tx_retime_src = TX_RETIME_SRC_CLKGEN; | |
318 | if (!strcasecmp(rs, "clk_125")) | 317 | } else if (!strcasecmp(rs, "clk_125")) { |
319 | dwmac->tx_retime_src = TX_RETIME_SRC_CLK_125; | 318 | dwmac->tx_retime_src = TX_RETIME_SRC_CLK_125; |
320 | else if (!strcasecmp(rs, "txclk")) | 319 | } else if (!strcasecmp(rs, "txclk")) { |
321 | dwmac->tx_retime_src = TX_RETIME_SRC_TXCLK; | 320 | dwmac->tx_retime_src = TX_RETIME_SRC_TXCLK; |
321 | } | ||
322 | 322 | ||
323 | dwmac->speed = SPEED_1000; | 323 | dwmac->speed = SPEED_1000; |
324 | } | 324 | } |
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index 118a427d1942..8c6b7c1651e5 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c | |||
@@ -1671,7 +1671,7 @@ static void stmmac_init_tx_coalesce(struct stmmac_priv *priv) | |||
1671 | * 0 on success and an appropriate (-)ve integer as defined in errno.h | 1671 | * 0 on success and an appropriate (-)ve integer as defined in errno.h |
1672 | * file on failure. | 1672 | * file on failure. |
1673 | */ | 1673 | */ |
1674 | static int stmmac_hw_setup(struct net_device *dev) | 1674 | static int stmmac_hw_setup(struct net_device *dev, bool init_ptp) |
1675 | { | 1675 | { |
1676 | struct stmmac_priv *priv = netdev_priv(dev); | 1676 | struct stmmac_priv *priv = netdev_priv(dev); |
1677 | int ret; | 1677 | int ret; |
@@ -1708,9 +1708,11 @@ static int stmmac_hw_setup(struct net_device *dev) | |||
1708 | 1708 | ||
1709 | stmmac_mmc_setup(priv); | 1709 | stmmac_mmc_setup(priv); |
1710 | 1710 | ||
1711 | ret = stmmac_init_ptp(priv); | 1711 | if (init_ptp) { |
1712 | if (ret && ret != -EOPNOTSUPP) | 1712 | ret = stmmac_init_ptp(priv); |
1713 | pr_warn("%s: failed PTP initialisation\n", __func__); | 1713 | if (ret && ret != -EOPNOTSUPP) |
1714 | pr_warn("%s: failed PTP initialisation\n", __func__); | ||
1715 | } | ||
1714 | 1716 | ||
1715 | #ifdef CONFIG_DEBUG_FS | 1717 | #ifdef CONFIG_DEBUG_FS |
1716 | ret = stmmac_init_fs(dev); | 1718 | ret = stmmac_init_fs(dev); |
@@ -1787,7 +1789,7 @@ static int stmmac_open(struct net_device *dev) | |||
1787 | goto init_error; | 1789 | goto init_error; |
1788 | } | 1790 | } |
1789 | 1791 | ||
1790 | ret = stmmac_hw_setup(dev); | 1792 | ret = stmmac_hw_setup(dev, true); |
1791 | if (ret < 0) { | 1793 | if (ret < 0) { |
1792 | pr_err("%s: Hw setup failed\n", __func__); | 1794 | pr_err("%s: Hw setup failed\n", __func__); |
1793 | goto init_error; | 1795 | goto init_error; |
@@ -3036,7 +3038,7 @@ int stmmac_resume(struct net_device *ndev) | |||
3036 | netif_device_attach(ndev); | 3038 | netif_device_attach(ndev); |
3037 | 3039 | ||
3038 | init_dma_desc_rings(ndev, GFP_ATOMIC); | 3040 | init_dma_desc_rings(ndev, GFP_ATOMIC); |
3039 | stmmac_hw_setup(ndev); | 3041 | stmmac_hw_setup(ndev, false); |
3040 | stmmac_init_tx_coalesce(priv); | 3042 | stmmac_init_tx_coalesce(priv); |
3041 | 3043 | ||
3042 | napi_enable(&priv->napi); | 3044 | napi_enable(&priv->napi); |
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c index 4032b170fe24..3039de2465ba 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c | |||
@@ -430,7 +430,6 @@ static struct platform_driver stmmac_pltfr_driver = { | |||
430 | .remove = stmmac_pltfr_remove, | 430 | .remove = stmmac_pltfr_remove, |
431 | .driver = { | 431 | .driver = { |
432 | .name = STMMAC_RESOURCE_NAME, | 432 | .name = STMMAC_RESOURCE_NAME, |
433 | .owner = THIS_MODULE, | ||
434 | .pm = &stmmac_pltfr_pm_ops, | 433 | .pm = &stmmac_pltfr_pm_ops, |
435 | .of_match_table = of_match_ptr(stmmac_dt_ids), | 434 | .of_match_table = of_match_ptr(stmmac_dt_ids), |
436 | }, | 435 | }, |
diff --git a/drivers/net/ethernet/sun/sunvnet.c b/drivers/net/ethernet/sun/sunvnet.c index 45c408ef67d0..d2835bf7b4fb 100644 --- a/drivers/net/ethernet/sun/sunvnet.c +++ b/drivers/net/ethernet/sun/sunvnet.c | |||
@@ -1201,6 +1201,7 @@ static int vnet_handle_offloads(struct vnet_port *port, struct sk_buff *skb) | |||
1201 | segs = skb_gso_segment(skb, dev->features & ~NETIF_F_TSO); | 1201 | segs = skb_gso_segment(skb, dev->features & ~NETIF_F_TSO); |
1202 | if (IS_ERR(segs)) { | 1202 | if (IS_ERR(segs)) { |
1203 | dev->stats.tx_dropped++; | 1203 | dev->stats.tx_dropped++; |
1204 | dev_kfree_skb_any(skb); | ||
1204 | return NETDEV_TX_OK; | 1205 | return NETDEV_TX_OK; |
1205 | } | 1206 | } |
1206 | 1207 | ||
diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c index c560f9aeb55d..e61ee8351272 100644 --- a/drivers/net/ethernet/ti/cpsw.c +++ b/drivers/net/ethernet/ti/cpsw.c | |||
@@ -757,6 +757,14 @@ requeue: | |||
757 | static irqreturn_t cpsw_interrupt(int irq, void *dev_id) | 757 | static irqreturn_t cpsw_interrupt(int irq, void *dev_id) |
758 | { | 758 | { |
759 | struct cpsw_priv *priv = dev_id; | 759 | struct cpsw_priv *priv = dev_id; |
760 | int value = irq - priv->irqs_table[0]; | ||
761 | |||
762 | /* NOTICE: Ending IRQ here. The trick with the 'value' variable above | ||
763 | * is to make sure we will always write the correct value to the EOI | ||
764 | * register. Namely 0 for RX_THRESH Interrupt, 1 for RX Interrupt, 2 | ||
765 | * for TX Interrupt and 3 for MISC Interrupt. | ||
766 | */ | ||
767 | cpdma_ctlr_eoi(priv->dma, value); | ||
760 | 768 | ||
761 | cpsw_intr_disable(priv); | 769 | cpsw_intr_disable(priv); |
762 | if (priv->irq_enabled == true) { | 770 | if (priv->irq_enabled == true) { |
@@ -786,8 +794,6 @@ static int cpsw_poll(struct napi_struct *napi, int budget) | |||
786 | int num_tx, num_rx; | 794 | int num_tx, num_rx; |
787 | 795 | ||
788 | num_tx = cpdma_chan_process(priv->txch, 128); | 796 | num_tx = cpdma_chan_process(priv->txch, 128); |
789 | if (num_tx) | ||
790 | cpdma_ctlr_eoi(priv->dma, CPDMA_EOI_TX); | ||
791 | 797 | ||
792 | num_rx = cpdma_chan_process(priv->rxch, budget); | 798 | num_rx = cpdma_chan_process(priv->rxch, budget); |
793 | if (num_rx < budget) { | 799 | if (num_rx < budget) { |
@@ -795,7 +801,6 @@ static int cpsw_poll(struct napi_struct *napi, int budget) | |||
795 | 801 | ||
796 | napi_complete(napi); | 802 | napi_complete(napi); |
797 | cpsw_intr_enable(priv); | 803 | cpsw_intr_enable(priv); |
798 | cpdma_ctlr_eoi(priv->dma, CPDMA_EOI_RX); | ||
799 | prim_cpsw = cpsw_get_slave_priv(priv, 0); | 804 | prim_cpsw = cpsw_get_slave_priv(priv, 0); |
800 | if (prim_cpsw->irq_enabled == false) { | 805 | if (prim_cpsw->irq_enabled == false) { |
801 | prim_cpsw->irq_enabled = true; | 806 | prim_cpsw->irq_enabled = true; |
@@ -1310,8 +1315,6 @@ static int cpsw_ndo_open(struct net_device *ndev) | |||
1310 | napi_enable(&priv->napi); | 1315 | napi_enable(&priv->napi); |
1311 | cpdma_ctlr_start(priv->dma); | 1316 | cpdma_ctlr_start(priv->dma); |
1312 | cpsw_intr_enable(priv); | 1317 | cpsw_intr_enable(priv); |
1313 | cpdma_ctlr_eoi(priv->dma, CPDMA_EOI_RX); | ||
1314 | cpdma_ctlr_eoi(priv->dma, CPDMA_EOI_TX); | ||
1315 | 1318 | ||
1316 | prim_cpsw = cpsw_get_slave_priv(priv, 0); | 1319 | prim_cpsw = cpsw_get_slave_priv(priv, 0); |
1317 | if (prim_cpsw->irq_enabled == false) { | 1320 | if (prim_cpsw->irq_enabled == false) { |
@@ -1578,9 +1581,6 @@ static void cpsw_ndo_tx_timeout(struct net_device *ndev) | |||
1578 | cpdma_chan_start(priv->txch); | 1581 | cpdma_chan_start(priv->txch); |
1579 | cpdma_ctlr_int_ctrl(priv->dma, true); | 1582 | cpdma_ctlr_int_ctrl(priv->dma, true); |
1580 | cpsw_intr_enable(priv); | 1583 | cpsw_intr_enable(priv); |
1581 | cpdma_ctlr_eoi(priv->dma, CPDMA_EOI_RX); | ||
1582 | cpdma_ctlr_eoi(priv->dma, CPDMA_EOI_TX); | ||
1583 | |||
1584 | } | 1584 | } |
1585 | 1585 | ||
1586 | static int cpsw_ndo_set_mac_address(struct net_device *ndev, void *p) | 1586 | static int cpsw_ndo_set_mac_address(struct net_device *ndev, void *p) |
@@ -1620,9 +1620,6 @@ static void cpsw_ndo_poll_controller(struct net_device *ndev) | |||
1620 | cpsw_interrupt(ndev->irq, priv); | 1620 | cpsw_interrupt(ndev->irq, priv); |
1621 | cpdma_ctlr_int_ctrl(priv->dma, true); | 1621 | cpdma_ctlr_int_ctrl(priv->dma, true); |
1622 | cpsw_intr_enable(priv); | 1622 | cpsw_intr_enable(priv); |
1623 | cpdma_ctlr_eoi(priv->dma, CPDMA_EOI_RX); | ||
1624 | cpdma_ctlr_eoi(priv->dma, CPDMA_EOI_TX); | ||
1625 | |||
1626 | } | 1623 | } |
1627 | #endif | 1624 | #endif |
1628 | 1625 | ||
diff --git a/drivers/net/ethernet/xilinx/ll_temac_main.c b/drivers/net/ethernet/xilinx/ll_temac_main.c index 9c2d91ea0af4..dbcbf0c5bcfa 100644 --- a/drivers/net/ethernet/xilinx/ll_temac_main.c +++ b/drivers/net/ethernet/xilinx/ll_temac_main.c | |||
@@ -1043,6 +1043,7 @@ static int temac_of_probe(struct platform_device *op) | |||
1043 | lp->regs = of_iomap(op->dev.of_node, 0); | 1043 | lp->regs = of_iomap(op->dev.of_node, 0); |
1044 | if (!lp->regs) { | 1044 | if (!lp->regs) { |
1045 | dev_err(&op->dev, "could not map temac regs.\n"); | 1045 | dev_err(&op->dev, "could not map temac regs.\n"); |
1046 | rc = -ENOMEM; | ||
1046 | goto nodev; | 1047 | goto nodev; |
1047 | } | 1048 | } |
1048 | 1049 | ||
@@ -1062,6 +1063,7 @@ static int temac_of_probe(struct platform_device *op) | |||
1062 | np = of_parse_phandle(op->dev.of_node, "llink-connected", 0); | 1063 | np = of_parse_phandle(op->dev.of_node, "llink-connected", 0); |
1063 | if (!np) { | 1064 | if (!np) { |
1064 | dev_err(&op->dev, "could not find DMA node\n"); | 1065 | dev_err(&op->dev, "could not find DMA node\n"); |
1066 | rc = -ENODEV; | ||
1065 | goto err_iounmap; | 1067 | goto err_iounmap; |
1066 | } | 1068 | } |
1067 | 1069 | ||
diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet.h b/drivers/net/ethernet/xilinx/xilinx_axienet.h index 44b8d2bad8c3..4c9b4fa1d3c1 100644 --- a/drivers/net/ethernet/xilinx/xilinx_axienet.h +++ b/drivers/net/ethernet/xilinx/xilinx_axienet.h | |||
@@ -388,7 +388,6 @@ struct axidma_bd { | |||
388 | * @dma_err_tasklet: Tasklet structure to process Axi DMA errors | 388 | * @dma_err_tasklet: Tasklet structure to process Axi DMA errors |
389 | * @tx_irq: Axidma TX IRQ number | 389 | * @tx_irq: Axidma TX IRQ number |
390 | * @rx_irq: Axidma RX IRQ number | 390 | * @rx_irq: Axidma RX IRQ number |
391 | * @temac_type: axienet type to identify between soft and hard temac | ||
392 | * @phy_type: Phy type to identify between MII/GMII/RGMII/SGMII/1000 Base-X | 391 | * @phy_type: Phy type to identify between MII/GMII/RGMII/SGMII/1000 Base-X |
393 | * @options: AxiEthernet option word | 392 | * @options: AxiEthernet option word |
394 | * @last_link: Phy link state in which the PHY was negotiated earlier | 393 | * @last_link: Phy link state in which the PHY was negotiated earlier |
@@ -431,7 +430,6 @@ struct axienet_local { | |||
431 | 430 | ||
432 | int tx_irq; | 431 | int tx_irq; |
433 | int rx_irq; | 432 | int rx_irq; |
434 | u32 temac_type; | ||
435 | u32 phy_type; | 433 | u32 phy_type; |
436 | 434 | ||
437 | u32 options; /* Current options word */ | 435 | u32 options; /* Current options word */ |
diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c index 4ea2d4e6f1d1..a6d2860b712c 100644 --- a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c +++ b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c | |||
@@ -1501,6 +1501,7 @@ static int axienet_of_probe(struct platform_device *op) | |||
1501 | lp->regs = of_iomap(op->dev.of_node, 0); | 1501 | lp->regs = of_iomap(op->dev.of_node, 0); |
1502 | if (!lp->regs) { | 1502 | if (!lp->regs) { |
1503 | dev_err(&op->dev, "could not map Axi Ethernet regs.\n"); | 1503 | dev_err(&op->dev, "could not map Axi Ethernet regs.\n"); |
1504 | ret = -ENOMEM; | ||
1504 | goto nodev; | 1505 | goto nodev; |
1505 | } | 1506 | } |
1506 | /* Setup checksum offload, but default to off if not specified */ | 1507 | /* Setup checksum offload, but default to off if not specified */ |
@@ -1555,10 +1556,6 @@ static int axienet_of_probe(struct platform_device *op) | |||
1555 | if ((be32_to_cpup(p)) >= 0x4000) | 1556 | if ((be32_to_cpup(p)) >= 0x4000) |
1556 | lp->jumbo_support = 1; | 1557 | lp->jumbo_support = 1; |
1557 | } | 1558 | } |
1558 | p = (__be32 *) of_get_property(op->dev.of_node, "xlnx,temac-type", | ||
1559 | NULL); | ||
1560 | if (p) | ||
1561 | lp->temac_type = be32_to_cpup(p); | ||
1562 | p = (__be32 *) of_get_property(op->dev.of_node, "xlnx,phy-type", NULL); | 1559 | p = (__be32 *) of_get_property(op->dev.of_node, "xlnx,phy-type", NULL); |
1563 | if (p) | 1560 | if (p) |
1564 | lp->phy_type = be32_to_cpup(p); | 1561 | lp->phy_type = be32_to_cpup(p); |
@@ -1567,6 +1564,7 @@ static int axienet_of_probe(struct platform_device *op) | |||
1567 | np = of_parse_phandle(op->dev.of_node, "axistream-connected", 0); | 1564 | np = of_parse_phandle(op->dev.of_node, "axistream-connected", 0); |
1568 | if (!np) { | 1565 | if (!np) { |
1569 | dev_err(&op->dev, "could not find DMA node\n"); | 1566 | dev_err(&op->dev, "could not find DMA node\n"); |
1567 | ret = -ENODEV; | ||
1570 | goto err_iounmap; | 1568 | goto err_iounmap; |
1571 | } | 1569 | } |
1572 | lp->dma_regs = of_iomap(np, 0); | 1570 | lp->dma_regs = of_iomap(np, 0); |
diff --git a/drivers/net/ethernet/xilinx/xilinx_emaclite.c b/drivers/net/ethernet/xilinx/xilinx_emaclite.c index 24858799c204..9d4ce388510a 100644 --- a/drivers/net/ethernet/xilinx/xilinx_emaclite.c +++ b/drivers/net/ethernet/xilinx/xilinx_emaclite.c | |||
@@ -1109,6 +1109,7 @@ static int xemaclite_of_probe(struct platform_device *ofdev) | |||
1109 | res = platform_get_resource(ofdev, IORESOURCE_IRQ, 0); | 1109 | res = platform_get_resource(ofdev, IORESOURCE_IRQ, 0); |
1110 | if (!res) { | 1110 | if (!res) { |
1111 | dev_err(dev, "no IRQ found\n"); | 1111 | dev_err(dev, "no IRQ found\n"); |
1112 | rc = -ENXIO; | ||
1112 | goto error; | 1113 | goto error; |
1113 | } | 1114 | } |
1114 | 1115 | ||