aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/stmmac/stmmac_main.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/stmmac/stmmac_main.c')
-rw-r--r--drivers/net/stmmac/stmmac_main.c565
1 files changed, 267 insertions, 298 deletions
diff --git a/drivers/net/stmmac/stmmac_main.c b/drivers/net/stmmac/stmmac_main.c
index ea0461eb2dbe..e25e44a45c28 100644
--- a/drivers/net/stmmac/stmmac_main.c
+++ b/drivers/net/stmmac/stmmac_main.c
@@ -45,6 +45,7 @@
45#include <linux/if_vlan.h> 45#include <linux/if_vlan.h>
46#include <linux/dma-mapping.h> 46#include <linux/dma-mapping.h>
47#include <linux/slab.h> 47#include <linux/slab.h>
48#include <linux/prefetch.h>
48#include "stmmac.h" 49#include "stmmac.h"
49 50
50#define STMMAC_RESOURCE_NAME "stmmaceth" 51#define STMMAC_RESOURCE_NAME "stmmaceth"
@@ -116,9 +117,6 @@ static int tc = TC_DEFAULT;
116module_param(tc, int, S_IRUGO | S_IWUSR); 117module_param(tc, int, S_IRUGO | S_IWUSR);
117MODULE_PARM_DESC(tc, "DMA threshold control value"); 118MODULE_PARM_DESC(tc, "DMA threshold control value");
118 119
119#define RX_NO_COALESCE 1 /* Always interrupt on completion */
120#define TX_NO_COALESCE -1 /* No moderation by default */
121
122/* Pay attention to tune this parameter; take care of both 120/* Pay attention to tune this parameter; take care of both
123 * hardware capability and network stabitily/performance impact. 121 * hardware capability and network stabitily/performance impact.
124 * Many tests showed that ~4ms latency seems to be good enough. */ 122 * Many tests showed that ~4ms latency seems to be good enough. */
@@ -134,19 +132,11 @@ static int buf_sz = DMA_BUFFER_SIZE;
134module_param(buf_sz, int, S_IRUGO | S_IWUSR); 132module_param(buf_sz, int, S_IRUGO | S_IWUSR);
135MODULE_PARM_DESC(buf_sz, "DMA buffer size"); 133MODULE_PARM_DESC(buf_sz, "DMA buffer size");
136 134
137/* In case of Giga ETH, we can enable/disable the COE for the
138 * transmit HW checksum computation.
139 * Note that, if tx csum is off in HW, SG will be still supported. */
140static int tx_coe = HW_CSUM;
141module_param(tx_coe, int, S_IRUGO | S_IWUSR);
142MODULE_PARM_DESC(tx_coe, "GMAC COE type 2 [on/off]");
143
144static const u32 default_msg_level = (NETIF_MSG_DRV | NETIF_MSG_PROBE | 135static const u32 default_msg_level = (NETIF_MSG_DRV | NETIF_MSG_PROBE |
145 NETIF_MSG_LINK | NETIF_MSG_IFUP | 136 NETIF_MSG_LINK | NETIF_MSG_IFUP |
146 NETIF_MSG_IFDOWN | NETIF_MSG_TIMER); 137 NETIF_MSG_IFDOWN | NETIF_MSG_TIMER);
147 138
148static irqreturn_t stmmac_interrupt(int irq, void *dev_id); 139static irqreturn_t stmmac_interrupt(int irq, void *dev_id);
149static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev);
150 140
151/** 141/**
152 * stmmac_verify_args - verify the driver parameters. 142 * stmmac_verify_args - verify the driver parameters.
@@ -193,6 +183,18 @@ static inline u32 stmmac_tx_avail(struct stmmac_priv *priv)
193 return priv->dirty_tx + priv->dma_tx_size - priv->cur_tx - 1; 183 return priv->dirty_tx + priv->dma_tx_size - priv->cur_tx - 1;
194} 184}
195 185
186/* On some ST platforms, some HW system configuraton registers have to be
187 * set according to the link speed negotiated.
188 */
189static inline void stmmac_hw_fix_mac_speed(struct stmmac_priv *priv)
190{
191 struct phy_device *phydev = priv->phydev;
192
193 if (likely(priv->plat->fix_mac_speed))
194 priv->plat->fix_mac_speed(priv->plat->bsp_priv,
195 phydev->speed);
196}
197
196/** 198/**
197 * stmmac_adjust_link 199 * stmmac_adjust_link
198 * @dev: net device structure 200 * @dev: net device structure
@@ -202,7 +204,6 @@ static void stmmac_adjust_link(struct net_device *dev)
202{ 204{
203 struct stmmac_priv *priv = netdev_priv(dev); 205 struct stmmac_priv *priv = netdev_priv(dev);
204 struct phy_device *phydev = priv->phydev; 206 struct phy_device *phydev = priv->phydev;
205 unsigned long ioaddr = dev->base_addr;
206 unsigned long flags; 207 unsigned long flags;
207 int new_state = 0; 208 int new_state = 0;
208 unsigned int fc = priv->flow_ctrl, pause_time = priv->pause; 209 unsigned int fc = priv->flow_ctrl, pause_time = priv->pause;
@@ -215,7 +216,7 @@ static void stmmac_adjust_link(struct net_device *dev)
215 216
216 spin_lock_irqsave(&priv->lock, flags); 217 spin_lock_irqsave(&priv->lock, flags);
217 if (phydev->link) { 218 if (phydev->link) {
218 u32 ctrl = readl(ioaddr + MAC_CTRL_REG); 219 u32 ctrl = readl(priv->ioaddr + MAC_CTRL_REG);
219 220
220 /* Now we make sure that we can be in full duplex mode. 221 /* Now we make sure that we can be in full duplex mode.
221 * If not, we operate in half-duplex mode. */ 222 * If not, we operate in half-duplex mode. */
@@ -229,19 +230,20 @@ static void stmmac_adjust_link(struct net_device *dev)
229 } 230 }
230 /* Flow Control operation */ 231 /* Flow Control operation */
231 if (phydev->pause) 232 if (phydev->pause)
232 priv->hw->mac->flow_ctrl(ioaddr, phydev->duplex, 233 priv->hw->mac->flow_ctrl(priv->ioaddr, phydev->duplex,
233 fc, pause_time); 234 fc, pause_time);
234 235
235 if (phydev->speed != priv->speed) { 236 if (phydev->speed != priv->speed) {
236 new_state = 1; 237 new_state = 1;
237 switch (phydev->speed) { 238 switch (phydev->speed) {
238 case 1000: 239 case 1000:
239 if (likely(priv->is_gmac)) 240 if (likely(priv->plat->has_gmac))
240 ctrl &= ~priv->hw->link.port; 241 ctrl &= ~priv->hw->link.port;
242 stmmac_hw_fix_mac_speed(priv);
241 break; 243 break;
242 case 100: 244 case 100:
243 case 10: 245 case 10:
244 if (priv->is_gmac) { 246 if (priv->plat->has_gmac) {
245 ctrl |= priv->hw->link.port; 247 ctrl |= priv->hw->link.port;
246 if (phydev->speed == SPEED_100) { 248 if (phydev->speed == SPEED_100) {
247 ctrl |= priv->hw->link.speed; 249 ctrl |= priv->hw->link.speed;
@@ -251,9 +253,7 @@ static void stmmac_adjust_link(struct net_device *dev)
251 } else { 253 } else {
252 ctrl &= ~priv->hw->link.port; 254 ctrl &= ~priv->hw->link.port;
253 } 255 }
254 if (likely(priv->fix_mac_speed)) 256 stmmac_hw_fix_mac_speed(priv);
255 priv->fix_mac_speed(priv->bsp_priv,
256 phydev->speed);
257 break; 257 break;
258 default: 258 default:
259 if (netif_msg_link(priv)) 259 if (netif_msg_link(priv))
@@ -265,7 +265,7 @@ static void stmmac_adjust_link(struct net_device *dev)
265 priv->speed = phydev->speed; 265 priv->speed = phydev->speed;
266 } 266 }
267 267
268 writel(ctrl, ioaddr + MAC_CTRL_REG); 268 writel(ctrl, priv->ioaddr + MAC_CTRL_REG);
269 269
270 if (!priv->oldlink) { 270 if (!priv->oldlink) {
271 new_state = 1; 271 new_state = 1;
@@ -310,7 +310,7 @@ static int stmmac_init_phy(struct net_device *dev)
310 return 0; 310 return 0;
311 } 311 }
312 312
313 snprintf(bus_id, MII_BUS_ID_SIZE, "%x", priv->bus_id); 313 snprintf(bus_id, MII_BUS_ID_SIZE, "%x", priv->plat->bus_id);
314 snprintf(phy_id, MII_BUS_ID_SIZE + 3, PHY_ID_FMT, bus_id, 314 snprintf(phy_id, MII_BUS_ID_SIZE + 3, PHY_ID_FMT, bus_id,
315 priv->phy_addr); 315 priv->phy_addr);
316 pr_debug("stmmac_init_phy: trying to attach to %s\n", phy_id); 316 pr_debug("stmmac_init_phy: trying to attach to %s\n", phy_id);
@@ -342,33 +342,19 @@ static int stmmac_init_phy(struct net_device *dev)
342 return 0; 342 return 0;
343} 343}
344 344
345static inline void stmmac_mac_enable_rx(unsigned long ioaddr) 345static inline void stmmac_enable_mac(void __iomem *ioaddr)
346{ 346{
347 u32 value = readl(ioaddr + MAC_CTRL_REG); 347 u32 value = readl(ioaddr + MAC_CTRL_REG);
348 value |= MAC_RNABLE_RX;
349 /* Set the RE (receive enable bit into the MAC CTRL register). */
350 writel(value, ioaddr + MAC_CTRL_REG);
351}
352 348
353static inline void stmmac_mac_enable_tx(unsigned long ioaddr) 349 value |= MAC_RNABLE_RX | MAC_ENABLE_TX;
354{
355 u32 value = readl(ioaddr + MAC_CTRL_REG);
356 value |= MAC_ENABLE_TX;
357 /* Set the TE (transmit enable bit into the MAC CTRL register). */
358 writel(value, ioaddr + MAC_CTRL_REG); 350 writel(value, ioaddr + MAC_CTRL_REG);
359} 351}
360 352
361static inline void stmmac_mac_disable_rx(unsigned long ioaddr) 353static inline void stmmac_disable_mac(void __iomem *ioaddr)
362{ 354{
363 u32 value = readl(ioaddr + MAC_CTRL_REG); 355 u32 value = readl(ioaddr + MAC_CTRL_REG);
364 value &= ~MAC_RNABLE_RX;
365 writel(value, ioaddr + MAC_CTRL_REG);
366}
367 356
368static inline void stmmac_mac_disable_tx(unsigned long ioaddr) 357 value &= ~(MAC_ENABLE_TX | MAC_RNABLE_RX);
369{
370 u32 value = readl(ioaddr + MAC_CTRL_REG);
371 value &= ~MAC_ENABLE_TX;
372 writel(value, ioaddr + MAC_CTRL_REG); 358 writel(value, ioaddr + MAC_CTRL_REG);
373} 359}
374 360
@@ -567,29 +553,22 @@ static void free_dma_desc_resources(struct stmmac_priv *priv)
567 * stmmac_dma_operation_mode - HW DMA operation mode 553 * stmmac_dma_operation_mode - HW DMA operation mode
568 * @priv : pointer to the private device structure. 554 * @priv : pointer to the private device structure.
569 * Description: it sets the DMA operation mode: tx/rx DMA thresholds 555 * Description: it sets the DMA operation mode: tx/rx DMA thresholds
570 * or Store-And-Forward capability. It also verifies the COE for the 556 * or Store-And-Forward capability.
571 * transmission in case of Giga ETH.
572 */ 557 */
573static void stmmac_dma_operation_mode(struct stmmac_priv *priv) 558static void stmmac_dma_operation_mode(struct stmmac_priv *priv)
574{ 559{
575 if (!priv->is_gmac) { 560 if (likely((priv->plat->tx_coe) && (!priv->no_csum_insertion))) {
576 /* MAC 10/100 */ 561 /* In case of GMAC, SF mode has to be enabled
577 priv->hw->dma->dma_mode(priv->dev->base_addr, tc, 0); 562 * to perform the TX COE. This depends on:
578 priv->tx_coe = NO_HW_CSUM; 563 * 1) TX COE if actually supported
579 } else { 564 * 2) There is no bugged Jumbo frame support
580 if ((priv->dev->mtu <= ETH_DATA_LEN) && (tx_coe)) { 565 * that needs to not insert csum in the TDES.
581 priv->hw->dma->dma_mode(priv->dev->base_addr, 566 */
582 SF_DMA_MODE, SF_DMA_MODE); 567 priv->hw->dma->dma_mode(priv->ioaddr,
583 tc = SF_DMA_MODE; 568 SF_DMA_MODE, SF_DMA_MODE);
584 priv->tx_coe = HW_CSUM; 569 tc = SF_DMA_MODE;
585 } else { 570 } else
586 /* Checksum computation is performed in software. */ 571 priv->hw->dma->dma_mode(priv->ioaddr, tc, SF_DMA_MODE);
587 priv->hw->dma->dma_mode(priv->dev->base_addr, tc,
588 SF_DMA_MODE);
589 priv->tx_coe = NO_HW_CSUM;
590 }
591 }
592 tx_coe = priv->tx_coe;
593} 572}
594 573
595/** 574/**
@@ -600,7 +579,6 @@ static void stmmac_dma_operation_mode(struct stmmac_priv *priv)
600static void stmmac_tx(struct stmmac_priv *priv) 579static void stmmac_tx(struct stmmac_priv *priv)
601{ 580{
602 unsigned int txsize = priv->dma_tx_size; 581 unsigned int txsize = priv->dma_tx_size;
603 unsigned long ioaddr = priv->dev->base_addr;
604 582
605 while (priv->dirty_tx != priv->cur_tx) { 583 while (priv->dirty_tx != priv->cur_tx) {
606 int last; 584 int last;
@@ -618,7 +596,7 @@ static void stmmac_tx(struct stmmac_priv *priv)
618 int tx_error = 596 int tx_error =
619 priv->hw->desc->tx_status(&priv->dev->stats, 597 priv->hw->desc->tx_status(&priv->dev->stats,
620 &priv->xstats, p, 598 &priv->xstats, p,
621 ioaddr); 599 priv->ioaddr);
622 if (likely(tx_error == 0)) { 600 if (likely(tx_error == 0)) {
623 priv->dev->stats.tx_packets++; 601 priv->dev->stats.tx_packets++;
624 priv->xstats.tx_pkt_n++; 602 priv->xstats.tx_pkt_n++;
@@ -674,7 +652,7 @@ static inline void stmmac_enable_irq(struct stmmac_priv *priv)
674 priv->tm->timer_start(tmrate); 652 priv->tm->timer_start(tmrate);
675 else 653 else
676#endif 654#endif
677 priv->hw->dma->enable_dma_irq(priv->dev->base_addr); 655 priv->hw->dma->enable_dma_irq(priv->ioaddr);
678} 656}
679 657
680static inline void stmmac_disable_irq(struct stmmac_priv *priv) 658static inline void stmmac_disable_irq(struct stmmac_priv *priv)
@@ -684,7 +662,7 @@ static inline void stmmac_disable_irq(struct stmmac_priv *priv)
684 priv->tm->timer_stop(); 662 priv->tm->timer_stop();
685 else 663 else
686#endif 664#endif
687 priv->hw->dma->disable_dma_irq(priv->dev->base_addr); 665 priv->hw->dma->disable_dma_irq(priv->ioaddr);
688} 666}
689 667
690static int stmmac_has_work(struct stmmac_priv *priv) 668static int stmmac_has_work(struct stmmac_priv *priv)
@@ -739,14 +717,15 @@ static void stmmac_no_timer_stopped(void)
739 */ 717 */
740static void stmmac_tx_err(struct stmmac_priv *priv) 718static void stmmac_tx_err(struct stmmac_priv *priv)
741{ 719{
720
742 netif_stop_queue(priv->dev); 721 netif_stop_queue(priv->dev);
743 722
744 priv->hw->dma->stop_tx(priv->dev->base_addr); 723 priv->hw->dma->stop_tx(priv->ioaddr);
745 dma_free_tx_skbufs(priv); 724 dma_free_tx_skbufs(priv);
746 priv->hw->desc->init_tx_desc(priv->dma_tx, priv->dma_tx_size); 725 priv->hw->desc->init_tx_desc(priv->dma_tx, priv->dma_tx_size);
747 priv->dirty_tx = 0; 726 priv->dirty_tx = 0;
748 priv->cur_tx = 0; 727 priv->cur_tx = 0;
749 priv->hw->dma->start_tx(priv->dev->base_addr); 728 priv->hw->dma->start_tx(priv->ioaddr);
750 729
751 priv->dev->stats.tx_errors++; 730 priv->dev->stats.tx_errors++;
752 netif_wake_queue(priv->dev); 731 netif_wake_queue(priv->dev);
@@ -755,11 +734,9 @@ static void stmmac_tx_err(struct stmmac_priv *priv)
755 734
756static void stmmac_dma_interrupt(struct stmmac_priv *priv) 735static void stmmac_dma_interrupt(struct stmmac_priv *priv)
757{ 736{
758 unsigned long ioaddr = priv->dev->base_addr;
759 int status; 737 int status;
760 738
761 status = priv->hw->dma->dma_interrupt(priv->dev->base_addr, 739 status = priv->hw->dma->dma_interrupt(priv->ioaddr, &priv->xstats);
762 &priv->xstats);
763 if (likely(status == handle_tx_rx)) 740 if (likely(status == handle_tx_rx))
764 _stmmac_schedule(priv); 741 _stmmac_schedule(priv);
765 742
@@ -767,10 +744,9 @@ static void stmmac_dma_interrupt(struct stmmac_priv *priv)
767 /* Try to bump up the dma threshold on this failure */ 744 /* Try to bump up the dma threshold on this failure */
768 if (unlikely(tc != SF_DMA_MODE) && (tc <= 256)) { 745 if (unlikely(tc != SF_DMA_MODE) && (tc <= 256)) {
769 tc += 64; 746 tc += 64;
770 priv->hw->dma->dma_mode(ioaddr, tc, SF_DMA_MODE); 747 priv->hw->dma->dma_mode(priv->ioaddr, tc, SF_DMA_MODE);
771 priv->xstats.threshold = tc; 748 priv->xstats.threshold = tc;
772 } 749 }
773 stmmac_tx_err(priv);
774 } else if (unlikely(status == tx_hard_error)) 750 } else if (unlikely(status == tx_hard_error))
775 stmmac_tx_err(priv); 751 stmmac_tx_err(priv);
776} 752}
@@ -787,7 +763,6 @@ static void stmmac_dma_interrupt(struct stmmac_priv *priv)
787static int stmmac_open(struct net_device *dev) 763static int stmmac_open(struct net_device *dev)
788{ 764{
789 struct stmmac_priv *priv = netdev_priv(dev); 765 struct stmmac_priv *priv = netdev_priv(dev);
790 unsigned long ioaddr = dev->base_addr;
791 int ret; 766 int ret;
792 767
793 /* Check that the MAC address is valid. If its not, refuse 768 /* Check that the MAC address is valid. If its not, refuse
@@ -802,21 +777,6 @@ static int stmmac_open(struct net_device *dev)
802 777
803 stmmac_verify_args(); 778 stmmac_verify_args();
804 779
805 ret = stmmac_init_phy(dev);
806 if (unlikely(ret)) {
807 pr_err("%s: Cannot attach to PHY (error: %d)\n", __func__, ret);
808 return ret;
809 }
810
811 /* Request the IRQ lines */
812 ret = request_irq(dev->irq, stmmac_interrupt,
813 IRQF_SHARED, dev->name, dev);
814 if (unlikely(ret < 0)) {
815 pr_err("%s: ERROR: allocating the IRQ %d (error: %d)\n",
816 __func__, dev->irq, ret);
817 return ret;
818 }
819
820#ifdef CONFIG_STMMAC_TIMER 780#ifdef CONFIG_STMMAC_TIMER
821 priv->tm = kzalloc(sizeof(struct stmmac_timer *), GFP_KERNEL); 781 priv->tm = kzalloc(sizeof(struct stmmac_timer *), GFP_KERNEL);
822 if (unlikely(priv->tm == NULL)) { 782 if (unlikely(priv->tm == NULL)) {
@@ -835,6 +795,11 @@ static int stmmac_open(struct net_device *dev)
835 } else 795 } else
836 priv->tm->enable = 1; 796 priv->tm->enable = 1;
837#endif 797#endif
798 ret = stmmac_init_phy(dev);
799 if (unlikely(ret)) {
800 pr_err("%s: Cannot attach to PHY (error: %d)\n", __func__, ret);
801 goto open_error;
802 }
838 803
839 /* Create and initialize the TX/RX descriptors chains. */ 804 /* Create and initialize the TX/RX descriptors chains. */
840 priv->dma_tx_size = STMMAC_ALIGN(dma_txsize); 805 priv->dma_tx_size = STMMAC_ALIGN(dma_txsize);
@@ -843,30 +808,43 @@ static int stmmac_open(struct net_device *dev)
843 init_dma_desc_rings(dev); 808 init_dma_desc_rings(dev);
844 809
845 /* DMA initialization and SW reset */ 810 /* DMA initialization and SW reset */
846 if (unlikely(priv->hw->dma->init(ioaddr, priv->pbl, priv->dma_tx_phy, 811 ret = priv->hw->dma->init(priv->ioaddr, priv->plat->pbl,
847 priv->dma_rx_phy) < 0)) { 812 priv->dma_tx_phy, priv->dma_rx_phy);
848 813 if (ret < 0) {
849 pr_err("%s: DMA initialization failed\n", __func__); 814 pr_err("%s: DMA initialization failed\n", __func__);
850 return -1; 815 goto open_error;
851 } 816 }
852 817
853 /* Copy the MAC addr into the HW */ 818 /* Copy the MAC addr into the HW */
854 priv->hw->mac->set_umac_addr(ioaddr, dev->dev_addr, 0); 819 priv->hw->mac->set_umac_addr(priv->ioaddr, dev->dev_addr, 0);
855 /* If required, perform hw setup of the bus. */ 820 /* If required, perform hw setup of the bus. */
856 if (priv->bus_setup) 821 if (priv->plat->bus_setup)
857 priv->bus_setup(ioaddr); 822 priv->plat->bus_setup(priv->ioaddr);
858 /* Initialize the MAC Core */ 823 /* Initialize the MAC Core */
859 priv->hw->mac->core_init(ioaddr); 824 priv->hw->mac->core_init(priv->ioaddr);
860 825
861 priv->shutdown = 0; 826 priv->rx_coe = priv->hw->mac->rx_coe(priv->ioaddr);
827 if (priv->rx_coe)
828 pr_info("stmmac: Rx Checksum Offload Engine supported\n");
829 if (priv->plat->tx_coe)
830 pr_info("\tTX Checksum insertion supported\n");
831 netdev_update_features(dev);
862 832
863 /* Initialise the MMC (if present) to disable all interrupts. */ 833 /* Initialise the MMC (if present) to disable all interrupts. */
864 writel(0xffffffff, ioaddr + MMC_HIGH_INTR_MASK); 834 writel(0xffffffff, priv->ioaddr + MMC_HIGH_INTR_MASK);
865 writel(0xffffffff, ioaddr + MMC_LOW_INTR_MASK); 835 writel(0xffffffff, priv->ioaddr + MMC_LOW_INTR_MASK);
836
837 /* Request the IRQ lines */
838 ret = request_irq(dev->irq, stmmac_interrupt,
839 IRQF_SHARED, dev->name, dev);
840 if (unlikely(ret < 0)) {
841 pr_err("%s: ERROR: allocating the IRQ %d (error: %d)\n",
842 __func__, dev->irq, ret);
843 goto open_error;
844 }
866 845
867 /* Enable the MAC Rx/Tx */ 846 /* Enable the MAC Rx/Tx */
868 stmmac_mac_enable_rx(ioaddr); 847 stmmac_enable_mac(priv->ioaddr);
869 stmmac_mac_enable_tx(ioaddr);
870 848
871 /* Set the HW DMA mode and the COE */ 849 /* Set the HW DMA mode and the COE */
872 stmmac_dma_operation_mode(priv); 850 stmmac_dma_operation_mode(priv);
@@ -877,16 +855,16 @@ static int stmmac_open(struct net_device *dev)
877 855
878 /* Start the ball rolling... */ 856 /* Start the ball rolling... */
879 DBG(probe, DEBUG, "%s: DMA RX/TX processes started...\n", dev->name); 857 DBG(probe, DEBUG, "%s: DMA RX/TX processes started...\n", dev->name);
880 priv->hw->dma->start_tx(ioaddr); 858 priv->hw->dma->start_tx(priv->ioaddr);
881 priv->hw->dma->start_rx(ioaddr); 859 priv->hw->dma->start_rx(priv->ioaddr);
882 860
883#ifdef CONFIG_STMMAC_TIMER 861#ifdef CONFIG_STMMAC_TIMER
884 priv->tm->timer_start(tmrate); 862 priv->tm->timer_start(tmrate);
885#endif 863#endif
886 /* Dump DMA/MAC registers */ 864 /* Dump DMA/MAC registers */
887 if (netif_msg_hw(priv)) { 865 if (netif_msg_hw(priv)) {
888 priv->hw->mac->dump_regs(ioaddr); 866 priv->hw->mac->dump_regs(priv->ioaddr);
889 priv->hw->dma->dump_regs(ioaddr); 867 priv->hw->dma->dump_regs(priv->ioaddr);
890 } 868 }
891 869
892 if (priv->phydev) 870 if (priv->phydev)
@@ -895,7 +873,17 @@ static int stmmac_open(struct net_device *dev)
895 napi_enable(&priv->napi); 873 napi_enable(&priv->napi);
896 skb_queue_head_init(&priv->rx_recycle); 874 skb_queue_head_init(&priv->rx_recycle);
897 netif_start_queue(dev); 875 netif_start_queue(dev);
876
898 return 0; 877 return 0;
878
879open_error:
880#ifdef CONFIG_STMMAC_TIMER
881 kfree(priv->tm);
882#endif
883 if (priv->phydev)
884 phy_disconnect(priv->phydev);
885
886 return ret;
899} 887}
900 888
901/** 889/**
@@ -930,61 +918,20 @@ static int stmmac_release(struct net_device *dev)
930 free_irq(dev->irq, dev); 918 free_irq(dev->irq, dev);
931 919
932 /* Stop TX/RX DMA and clear the descriptors */ 920 /* Stop TX/RX DMA and clear the descriptors */
933 priv->hw->dma->stop_tx(dev->base_addr); 921 priv->hw->dma->stop_tx(priv->ioaddr);
934 priv->hw->dma->stop_rx(dev->base_addr); 922 priv->hw->dma->stop_rx(priv->ioaddr);
935 923
936 /* Release and free the Rx/Tx resources */ 924 /* Release and free the Rx/Tx resources */
937 free_dma_desc_resources(priv); 925 free_dma_desc_resources(priv);
938 926
939 /* Disable the MAC core */ 927 /* Disable the MAC Rx/Tx */
940 stmmac_mac_disable_tx(dev->base_addr); 928 stmmac_disable_mac(priv->ioaddr);
941 stmmac_mac_disable_rx(dev->base_addr);
942 929
943 netif_carrier_off(dev); 930 netif_carrier_off(dev);
944 931
945 return 0; 932 return 0;
946} 933}
947 934
948/*
949 * To perform emulated hardware segmentation on skb.
950 */
951static int stmmac_sw_tso(struct stmmac_priv *priv, struct sk_buff *skb)
952{
953 struct sk_buff *segs, *curr_skb;
954 int gso_segs = skb_shinfo(skb)->gso_segs;
955
956 /* Estimate the number of fragments in the worst case */
957 if (unlikely(stmmac_tx_avail(priv) < gso_segs)) {
958 netif_stop_queue(priv->dev);
959 TX_DBG(KERN_ERR "%s: TSO BUG! Tx Ring full when queue awake\n",
960 __func__);
961 if (stmmac_tx_avail(priv) < gso_segs)
962 return NETDEV_TX_BUSY;
963
964 netif_wake_queue(priv->dev);
965 }
966 TX_DBG("\tstmmac_sw_tso: segmenting: skb %p (len %d)\n",
967 skb, skb->len);
968
969 segs = skb_gso_segment(skb, priv->dev->features & ~NETIF_F_TSO);
970 if (unlikely(IS_ERR(segs)))
971 goto sw_tso_end;
972
973 do {
974 curr_skb = segs;
975 segs = segs->next;
976 TX_DBG("\t\tcurrent skb->len: %d, *curr %p,"
977 "*next %p\n", curr_skb->len, curr_skb, segs);
978 curr_skb->next = NULL;
979 stmmac_xmit(curr_skb, priv->dev);
980 } while (segs);
981
982sw_tso_end:
983 dev_kfree_skb(skb);
984
985 return NETDEV_TX_OK;
986}
987
988static unsigned int stmmac_handle_jumbo_frames(struct sk_buff *skb, 935static unsigned int stmmac_handle_jumbo_frames(struct sk_buff *skb,
989 struct net_device *dev, 936 struct net_device *dev,
990 int csum_insertion) 937 int csum_insertion)
@@ -1062,15 +1009,7 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
1062 !skb_is_gso(skb) ? "isn't" : "is"); 1009 !skb_is_gso(skb) ? "isn't" : "is");
1063#endif 1010#endif
1064 1011
1065 if (unlikely(skb_is_gso(skb))) 1012 csum_insertion = (skb->ip_summed == CHECKSUM_PARTIAL);
1066 return stmmac_sw_tso(priv, skb);
1067
1068 if (likely((skb->ip_summed == CHECKSUM_PARTIAL))) {
1069 if (likely(priv->tx_coe == NO_HW_CSUM))
1070 skb_checksum_help(skb);
1071 else
1072 csum_insertion = 1;
1073 }
1074 1013
1075 desc = priv->dma_tx + entry; 1014 desc = priv->dma_tx + entry;
1076 first = desc; 1015 first = desc;
@@ -1140,7 +1079,7 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
1140 1079
1141 dev->stats.tx_bytes += skb->len; 1080 dev->stats.tx_bytes += skb->len;
1142 1081
1143 priv->hw->dma->enable_dma_transmission(dev->base_addr); 1082 priv->hw->dma->enable_dma_transmission(priv->ioaddr);
1144 1083
1145 return NETDEV_TX_OK; 1084 return NETDEV_TX_OK;
1146} 1085}
@@ -1170,7 +1109,7 @@ static inline void stmmac_rx_refill(struct stmmac_priv *priv)
1170 DMA_FROM_DEVICE); 1109 DMA_FROM_DEVICE);
1171 1110
1172 (p + entry)->des2 = priv->rx_skbuff_dma[entry]; 1111 (p + entry)->des2 = priv->rx_skbuff_dma[entry];
1173 if (unlikely(priv->is_gmac)) { 1112 if (unlikely(priv->plat->has_gmac)) {
1174 if (bfsize >= BUF_SIZE_8KiB) 1113 if (bfsize >= BUF_SIZE_8KiB)
1175 (p + entry)->des3 = 1114 (p + entry)->des3 =
1176 (p + entry)->des2 + BUF_SIZE_8KiB; 1115 (p + entry)->des2 + BUF_SIZE_8KiB;
@@ -1256,7 +1195,7 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit)
1256 1195
1257 if (unlikely(status == csum_none)) { 1196 if (unlikely(status == csum_none)) {
1258 /* always for the old mac 10/100 */ 1197 /* always for the old mac 10/100 */
1259 skb->ip_summed = CHECKSUM_NONE; 1198 skb_checksum_none_assert(skb);
1260 netif_receive_skb(skb); 1199 netif_receive_skb(skb);
1261 } else { 1200 } else {
1262 skb->ip_summed = CHECKSUM_UNNECESSARY; 1201 skb->ip_summed = CHECKSUM_UNNECESSARY;
@@ -1380,7 +1319,7 @@ static int stmmac_change_mtu(struct net_device *dev, int new_mtu)
1380 return -EBUSY; 1319 return -EBUSY;
1381 } 1320 }
1382 1321
1383 if (priv->is_gmac) 1322 if (priv->plat->has_gmac)
1384 max_mtu = JUMBO_LEN; 1323 max_mtu = JUMBO_LEN;
1385 else 1324 else
1386 max_mtu = ETH_DATA_LEN; 1325 max_mtu = ETH_DATA_LEN;
@@ -1391,10 +1330,30 @@ static int stmmac_change_mtu(struct net_device *dev, int new_mtu)
1391 } 1330 }
1392 1331
1393 dev->mtu = new_mtu; 1332 dev->mtu = new_mtu;
1333 netdev_update_features(dev);
1394 1334
1395 return 0; 1335 return 0;
1396} 1336}
1397 1337
1338static u32 stmmac_fix_features(struct net_device *dev, u32 features)
1339{
1340 struct stmmac_priv *priv = netdev_priv(dev);
1341
1342 if (!priv->rx_coe)
1343 features &= ~NETIF_F_RXCSUM;
1344 if (!priv->plat->tx_coe)
1345 features &= ~NETIF_F_ALL_CSUM;
1346
1347 /* Some GMAC devices have a bugged Jumbo frame support that
1348 * needs to have the Tx COE disabled for oversized frames
1349 * (due to limited buffer sizes). In this case we disable
1350 * the TX csum insertionin the TDES and not use SF. */
1351 if (priv->plat->bugged_jumbo && (dev->mtu > ETH_DATA_LEN))
1352 features &= ~NETIF_F_ALL_CSUM;
1353
1354 return features;
1355}
1356
1398static irqreturn_t stmmac_interrupt(int irq, void *dev_id) 1357static irqreturn_t stmmac_interrupt(int irq, void *dev_id)
1399{ 1358{
1400 struct net_device *dev = (struct net_device *)dev_id; 1359 struct net_device *dev = (struct net_device *)dev_id;
@@ -1405,11 +1364,9 @@ static irqreturn_t stmmac_interrupt(int irq, void *dev_id)
1405 return IRQ_NONE; 1364 return IRQ_NONE;
1406 } 1365 }
1407 1366
1408 if (priv->is_gmac) { 1367 if (priv->plat->has_gmac)
1409 unsigned long ioaddr = dev->base_addr;
1410 /* To handle GMAC own interrupts */ 1368 /* To handle GMAC own interrupts */
1411 priv->hw->mac->host_irq_status(ioaddr); 1369 priv->hw->mac->host_irq_status((void __iomem *) dev->base_addr);
1412 }
1413 1370
1414 stmmac_dma_interrupt(priv); 1371 stmmac_dma_interrupt(priv);
1415 1372
@@ -1474,6 +1431,7 @@ static const struct net_device_ops stmmac_netdev_ops = {
1474 .ndo_start_xmit = stmmac_xmit, 1431 .ndo_start_xmit = stmmac_xmit,
1475 .ndo_stop = stmmac_release, 1432 .ndo_stop = stmmac_release,
1476 .ndo_change_mtu = stmmac_change_mtu, 1433 .ndo_change_mtu = stmmac_change_mtu,
1434 .ndo_fix_features = stmmac_fix_features,
1477 .ndo_set_multicast_list = stmmac_multicast_list, 1435 .ndo_set_multicast_list = stmmac_multicast_list,
1478 .ndo_tx_timeout = stmmac_tx_timeout, 1436 .ndo_tx_timeout = stmmac_tx_timeout,
1479 .ndo_do_ioctl = stmmac_ioctl, 1437 .ndo_do_ioctl = stmmac_ioctl,
@@ -1504,7 +1462,8 @@ static int stmmac_probe(struct net_device *dev)
1504 dev->netdev_ops = &stmmac_netdev_ops; 1462 dev->netdev_ops = &stmmac_netdev_ops;
1505 stmmac_set_ethtool_ops(dev); 1463 stmmac_set_ethtool_ops(dev);
1506 1464
1507 dev->features |= (NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_HIGHDMA); 1465 dev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
1466 dev->features |= dev->hw_features | NETIF_F_HIGHDMA;
1508 dev->watchdog_timeo = msecs_to_jiffies(watchdog); 1467 dev->watchdog_timeo = msecs_to_jiffies(watchdog);
1509#ifdef STMMAC_VLAN_TAG_USED 1468#ifdef STMMAC_VLAN_TAG_USED
1510 /* Both mac100 and gmac support receive VLAN tag detection */ 1469 /* Both mac100 and gmac support receive VLAN tag detection */
@@ -1512,9 +1471,6 @@ static int stmmac_probe(struct net_device *dev)
1512#endif 1471#endif
1513 priv->msg_enable = netif_msg_init(debug, default_msg_level); 1472 priv->msg_enable = netif_msg_init(debug, default_msg_level);
1514 1473
1515 if (priv->is_gmac)
1516 priv->rx_csum = 1;
1517
1518 if (flow_ctrl) 1474 if (flow_ctrl)
1519 priv->flow_ctrl = FLOW_AUTO; /* RX/TX pause on */ 1475 priv->flow_ctrl = FLOW_AUTO; /* RX/TX pause on */
1520 1476
@@ -1522,12 +1478,15 @@ static int stmmac_probe(struct net_device *dev)
1522 netif_napi_add(dev, &priv->napi, stmmac_poll, 64); 1478 netif_napi_add(dev, &priv->napi, stmmac_poll, 64);
1523 1479
1524 /* Get the MAC address */ 1480 /* Get the MAC address */
1525 priv->hw->mac->get_umac_addr(dev->base_addr, dev->dev_addr, 0); 1481 priv->hw->mac->get_umac_addr((void __iomem *) dev->base_addr,
1482 dev->dev_addr, 0);
1526 1483
1527 if (!is_valid_ether_addr(dev->dev_addr)) 1484 if (!is_valid_ether_addr(dev->dev_addr))
1528 pr_warning("\tno valid MAC address;" 1485 pr_warning("\tno valid MAC address;"
1529 "please, use ifconfig or nwhwconfig!\n"); 1486 "please, use ifconfig or nwhwconfig!\n");
1530 1487
1488 spin_lock_init(&priv->lock);
1489
1531 ret = register_netdev(dev); 1490 ret = register_netdev(dev);
1532 if (ret) { 1491 if (ret) {
1533 pr_err("%s: ERROR %i registering the device\n", 1492 pr_err("%s: ERROR %i registering the device\n",
@@ -1537,9 +1496,7 @@ static int stmmac_probe(struct net_device *dev)
1537 1496
1538 DBG(probe, DEBUG, "%s: Scatter/Gather: %s - HW checksums: %s\n", 1497 DBG(probe, DEBUG, "%s: Scatter/Gather: %s - HW checksums: %s\n",
1539 dev->name, (dev->features & NETIF_F_SG) ? "on" : "off", 1498 dev->name, (dev->features & NETIF_F_SG) ? "on" : "off",
1540 (dev->features & NETIF_F_HW_CSUM) ? "on" : "off"); 1499 (dev->features & NETIF_F_IP_CSUM) ? "on" : "off");
1541
1542 spin_lock_init(&priv->lock);
1543 1500
1544 return ret; 1501 return ret;
1545} 1502}
@@ -1552,19 +1509,18 @@ static int stmmac_probe(struct net_device *dev)
1552static int stmmac_mac_device_setup(struct net_device *dev) 1509static int stmmac_mac_device_setup(struct net_device *dev)
1553{ 1510{
1554 struct stmmac_priv *priv = netdev_priv(dev); 1511 struct stmmac_priv *priv = netdev_priv(dev);
1555 unsigned long ioaddr = dev->base_addr;
1556 1512
1557 struct mac_device_info *device; 1513 struct mac_device_info *device;
1558 1514
1559 if (priv->is_gmac) 1515 if (priv->plat->has_gmac)
1560 device = dwmac1000_setup(ioaddr); 1516 device = dwmac1000_setup(priv->ioaddr);
1561 else 1517 else
1562 device = dwmac100_setup(ioaddr); 1518 device = dwmac100_setup(priv->ioaddr);
1563 1519
1564 if (!device) 1520 if (!device)
1565 return -ENOMEM; 1521 return -ENOMEM;
1566 1522
1567 if (priv->enh_desc) { 1523 if (priv->plat->enh_desc) {
1568 device->desc = &enh_desc_ops; 1524 device->desc = &enh_desc_ops;
1569 pr_info("\tEnhanced descriptor structure\n"); 1525 pr_info("\tEnhanced descriptor structure\n");
1570 } else 1526 } else
@@ -1572,9 +1528,10 @@ static int stmmac_mac_device_setup(struct net_device *dev)
1572 1528
1573 priv->hw = device; 1529 priv->hw = device;
1574 1530
1575 priv->wolenabled = priv->hw->pmt; /* PMT supported */ 1531 if (device_can_wakeup(priv->device)) {
1576 if (priv->wolenabled == PMT_SUPPORTED) 1532 priv->wolopts = WAKE_MAGIC; /* Magic Frame as default */
1577 priv->wolopts = WAKE_MAGIC; /* Magic Frame */ 1533 enable_irq_wake(dev->irq);
1534 }
1578 1535
1579 return 0; 1536 return 0;
1580} 1537}
@@ -1619,7 +1576,7 @@ static int stmmac_associate_phy(struct device *dev, void *data)
1619 plat_dat->bus_id); 1576 plat_dat->bus_id);
1620 1577
1621 /* Check that this phy is for the MAC being initialised */ 1578 /* Check that this phy is for the MAC being initialised */
1622 if (priv->bus_id != plat_dat->bus_id) 1579 if (priv->plat->bus_id != plat_dat->bus_id)
1623 return 0; 1580 return 0;
1624 1581
1625 /* OK, this PHY is connected to the MAC. 1582 /* OK, this PHY is connected to the MAC.
@@ -1653,40 +1610,37 @@ static int stmmac_dvr_probe(struct platform_device *pdev)
1653{ 1610{
1654 int ret = 0; 1611 int ret = 0;
1655 struct resource *res; 1612 struct resource *res;
1656 unsigned int *addr = NULL; 1613 void __iomem *addr = NULL;
1657 struct net_device *ndev = NULL; 1614 struct net_device *ndev = NULL;
1658 struct stmmac_priv *priv; 1615 struct stmmac_priv *priv = NULL;
1659 struct plat_stmmacenet_data *plat_dat; 1616 struct plat_stmmacenet_data *plat_dat;
1660 1617
1661 pr_info("STMMAC driver:\n\tplatform registration... "); 1618 pr_info("STMMAC driver:\n\tplatform registration... ");
1662 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1619 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1663 if (!res) { 1620 if (!res)
1664 ret = -ENODEV; 1621 return -ENODEV;
1665 goto out; 1622 pr_info("\tdone!\n");
1666 }
1667 pr_info("done!\n");
1668 1623
1669 if (!request_mem_region(res->start, resource_size(res), 1624 if (!request_mem_region(res->start, resource_size(res),
1670 pdev->name)) { 1625 pdev->name)) {
1671 pr_err("%s: ERROR: memory allocation failed" 1626 pr_err("%s: ERROR: memory allocation failed"
1672 "cannot get the I/O addr 0x%x\n", 1627 "cannot get the I/O addr 0x%x\n",
1673 __func__, (unsigned int)res->start); 1628 __func__, (unsigned int)res->start);
1674 ret = -EBUSY; 1629 return -EBUSY;
1675 goto out;
1676 } 1630 }
1677 1631
1678 addr = ioremap(res->start, resource_size(res)); 1632 addr = ioremap(res->start, resource_size(res));
1679 if (!addr) { 1633 if (!addr) {
1680 pr_err("%s: ERROR: memory mapping failed\n", __func__); 1634 pr_err("%s: ERROR: memory mapping failed\n", __func__);
1681 ret = -ENOMEM; 1635 ret = -ENOMEM;
1682 goto out; 1636 goto out_release_region;
1683 } 1637 }
1684 1638
1685 ndev = alloc_etherdev(sizeof(struct stmmac_priv)); 1639 ndev = alloc_etherdev(sizeof(struct stmmac_priv));
1686 if (!ndev) { 1640 if (!ndev) {
1687 pr_err("%s: ERROR: allocating the device\n", __func__); 1641 pr_err("%s: ERROR: allocating the device\n", __func__);
1688 ret = -ENOMEM; 1642 ret = -ENOMEM;
1689 goto out; 1643 goto out_unmap;
1690 } 1644 }
1691 1645
1692 SET_NETDEV_DEV(ndev, &pdev->dev); 1646 SET_NETDEV_DEV(ndev, &pdev->dev);
@@ -1696,38 +1650,46 @@ static int stmmac_dvr_probe(struct platform_device *pdev)
1696 if (ndev->irq == -ENXIO) { 1650 if (ndev->irq == -ENXIO) {
1697 pr_err("%s: ERROR: MAC IRQ configuration " 1651 pr_err("%s: ERROR: MAC IRQ configuration "
1698 "information not found\n", __func__); 1652 "information not found\n", __func__);
1699 ret = -ENODEV; 1653 ret = -ENXIO;
1700 goto out; 1654 goto out_free_ndev;
1701 } 1655 }
1702 1656
1703 priv = netdev_priv(ndev); 1657 priv = netdev_priv(ndev);
1704 priv->device = &(pdev->dev); 1658 priv->device = &(pdev->dev);
1705 priv->dev = ndev; 1659 priv->dev = ndev;
1706 plat_dat = pdev->dev.platform_data; 1660 plat_dat = pdev->dev.platform_data;
1707 priv->bus_id = plat_dat->bus_id; 1661
1708 priv->pbl = plat_dat->pbl; /* TLI */ 1662 priv->plat = plat_dat;
1709 priv->is_gmac = plat_dat->has_gmac; /* GMAC is on board */ 1663
1710 priv->enh_desc = plat_dat->enh_desc; 1664 priv->ioaddr = addr;
1665
1666 /* PMT module is not integrated in all the MAC devices. */
1667 if (plat_dat->pmt) {
1668 pr_info("\tPMT module supported\n");
1669 device_set_wakeup_capable(&pdev->dev, 1);
1670 }
1711 1671
1712 platform_set_drvdata(pdev, ndev); 1672 platform_set_drvdata(pdev, ndev);
1713 1673
1714 /* Set the I/O base addr */ 1674 /* Set the I/O base addr */
1715 ndev->base_addr = (unsigned long)addr; 1675 ndev->base_addr = (unsigned long)addr;
1716 1676
1717 /* Verify embedded resource for the platform */ 1677 /* Custom initialisation */
1718 ret = stmmac_claim_resource(pdev); 1678 if (priv->plat->init) {
1719 if (ret < 0) 1679 ret = priv->plat->init(pdev);
1720 goto out; 1680 if (unlikely(ret))
1681 goto out_free_ndev;
1682 }
1721 1683
1722 /* MAC HW revice detection */ 1684 /* MAC HW revice detection */
1723 ret = stmmac_mac_device_setup(ndev); 1685 ret = stmmac_mac_device_setup(ndev);
1724 if (ret < 0) 1686 if (ret < 0)
1725 goto out; 1687 goto out_plat_exit;
1726 1688
1727 /* Network Device Registration */ 1689 /* Network Device Registration */
1728 ret = stmmac_probe(ndev); 1690 ret = stmmac_probe(ndev);
1729 if (ret < 0) 1691 if (ret < 0)
1730 goto out; 1692 goto out_plat_exit;
1731 1693
1732 /* associate a PHY - it is provided by another platform bus */ 1694 /* associate a PHY - it is provided by another platform bus */
1733 if (!driver_for_each_device 1695 if (!driver_for_each_device
@@ -1735,31 +1697,33 @@ static int stmmac_dvr_probe(struct platform_device *pdev)
1735 stmmac_associate_phy)) { 1697 stmmac_associate_phy)) {
1736 pr_err("No PHY device is associated with this MAC!\n"); 1698 pr_err("No PHY device is associated with this MAC!\n");
1737 ret = -ENODEV; 1699 ret = -ENODEV;
1738 goto out; 1700 goto out_unregister;
1739 } 1701 }
1740 1702
1741 priv->fix_mac_speed = plat_dat->fix_mac_speed;
1742 priv->bus_setup = plat_dat->bus_setup;
1743 priv->bsp_priv = plat_dat->bsp_priv;
1744
1745 pr_info("\t%s - (dev. name: %s - id: %d, IRQ #%d\n" 1703 pr_info("\t%s - (dev. name: %s - id: %d, IRQ #%d\n"
1746 "\tIO base addr: 0x%08x)\n", ndev->name, pdev->name, 1704 "\tIO base addr: 0x%p)\n", ndev->name, pdev->name,
1747 pdev->id, ndev->irq, (unsigned int)addr); 1705 pdev->id, ndev->irq, addr);
1748 1706
1749 /* MDIO bus Registration */ 1707 /* MDIO bus Registration */
1750 pr_debug("\tMDIO bus (id: %d)...", priv->bus_id); 1708 pr_debug("\tMDIO bus (id: %d)...", priv->plat->bus_id);
1751 ret = stmmac_mdio_register(ndev); 1709 ret = stmmac_mdio_register(ndev);
1752 if (ret < 0) 1710 if (ret < 0)
1753 goto out; 1711 goto out_unregister;
1754 pr_debug("registered!\n"); 1712 pr_debug("registered!\n");
1713 return 0;
1755 1714
1756out: 1715out_unregister:
1757 if (ret < 0) { 1716 unregister_netdev(ndev);
1758 platform_set_drvdata(pdev, NULL); 1717out_plat_exit:
1759 release_mem_region(res->start, resource_size(res)); 1718 if (priv->plat->exit)
1760 if (addr != NULL) 1719 priv->plat->exit(pdev);
1761 iounmap(addr); 1720out_free_ndev:
1762 } 1721 free_netdev(ndev);
1722 platform_set_drvdata(pdev, NULL);
1723out_unmap:
1724 iounmap(addr);
1725out_release_region:
1726 release_mem_region(res->start, resource_size(res));
1763 1727
1764 return ret; 1728 return ret;
1765} 1729}
@@ -1779,20 +1743,22 @@ static int stmmac_dvr_remove(struct platform_device *pdev)
1779 1743
1780 pr_info("%s:\n\tremoving driver", __func__); 1744 pr_info("%s:\n\tremoving driver", __func__);
1781 1745
1782 priv->hw->dma->stop_rx(ndev->base_addr); 1746 priv->hw->dma->stop_rx(priv->ioaddr);
1783 priv->hw->dma->stop_tx(ndev->base_addr); 1747 priv->hw->dma->stop_tx(priv->ioaddr);
1784 1748
1785 stmmac_mac_disable_rx(ndev->base_addr); 1749 stmmac_disable_mac(priv->ioaddr);
1786 stmmac_mac_disable_tx(ndev->base_addr);
1787 1750
1788 netif_carrier_off(ndev); 1751 netif_carrier_off(ndev);
1789 1752
1790 stmmac_mdio_unregister(ndev); 1753 stmmac_mdio_unregister(ndev);
1791 1754
1755 if (priv->plat->exit)
1756 priv->plat->exit(pdev);
1757
1792 platform_set_drvdata(pdev, NULL); 1758 platform_set_drvdata(pdev, NULL);
1793 unregister_netdev(ndev); 1759 unregister_netdev(ndev);
1794 1760
1795 iounmap((void *)ndev->base_addr); 1761 iounmap((void *)priv->ioaddr);
1796 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1762 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1797 release_mem_region(res->start, resource_size(res)); 1763 release_mem_region(res->start, resource_size(res));
1798 1764
@@ -1802,75 +1768,54 @@ static int stmmac_dvr_remove(struct platform_device *pdev)
1802} 1768}
1803 1769
1804#ifdef CONFIG_PM 1770#ifdef CONFIG_PM
1805static int stmmac_suspend(struct platform_device *pdev, pm_message_t state) 1771static int stmmac_suspend(struct device *dev)
1806{ 1772{
1807 struct net_device *dev = platform_get_drvdata(pdev); 1773 struct net_device *ndev = dev_get_drvdata(dev);
1808 struct stmmac_priv *priv = netdev_priv(dev); 1774 struct stmmac_priv *priv = netdev_priv(ndev);
1809 int dis_ic = 0; 1775 int dis_ic = 0;
1810 1776
1811 if (!dev || !netif_running(dev)) 1777 if (!ndev || !netif_running(ndev))
1812 return 0; 1778 return 0;
1813 1779
1814 spin_lock(&priv->lock); 1780 spin_lock(&priv->lock);
1815 1781
1816 if (state.event == PM_EVENT_SUSPEND) { 1782 netif_device_detach(ndev);
1817 netif_device_detach(dev); 1783 netif_stop_queue(ndev);
1818 netif_stop_queue(dev); 1784 if (priv->phydev)
1819 if (priv->phydev) 1785 phy_stop(priv->phydev);
1820 phy_stop(priv->phydev);
1821 1786
1822#ifdef CONFIG_STMMAC_TIMER 1787#ifdef CONFIG_STMMAC_TIMER
1823 priv->tm->timer_stop(); 1788 priv->tm->timer_stop();
1824 if (likely(priv->tm->enable)) 1789 if (likely(priv->tm->enable))
1825 dis_ic = 1; 1790 dis_ic = 1;
1826#endif 1791#endif
1827 napi_disable(&priv->napi); 1792 napi_disable(&priv->napi);
1828 1793
1829 /* Stop TX/RX DMA */ 1794 /* Stop TX/RX DMA */
1830 priv->hw->dma->stop_tx(dev->base_addr); 1795 priv->hw->dma->stop_tx(priv->ioaddr);
1831 priv->hw->dma->stop_rx(dev->base_addr); 1796 priv->hw->dma->stop_rx(priv->ioaddr);
1832 /* Clear the Rx/Tx descriptors */ 1797 /* Clear the Rx/Tx descriptors */
1833 priv->hw->desc->init_rx_desc(priv->dma_rx, priv->dma_rx_size, 1798 priv->hw->desc->init_rx_desc(priv->dma_rx, priv->dma_rx_size,
1834 dis_ic); 1799 dis_ic);
1835 priv->hw->desc->init_tx_desc(priv->dma_tx, priv->dma_tx_size); 1800 priv->hw->desc->init_tx_desc(priv->dma_tx, priv->dma_tx_size);
1836 1801
1837 stmmac_mac_disable_tx(dev->base_addr); 1802 /* Enable Power down mode by programming the PMT regs */
1838 1803 if (device_may_wakeup(priv->device))
1839 if (device_may_wakeup(&(pdev->dev))) { 1804 priv->hw->mac->pmt(priv->ioaddr, priv->wolopts);
1840 /* Enable Power down mode by programming the PMT regs */ 1805 else
1841 if (priv->wolenabled == PMT_SUPPORTED) 1806 stmmac_disable_mac(priv->ioaddr);
1842 priv->hw->mac->pmt(dev->base_addr,
1843 priv->wolopts);
1844 } else {
1845 stmmac_mac_disable_rx(dev->base_addr);
1846 }
1847 } else {
1848 priv->shutdown = 1;
1849 /* Although this can appear slightly redundant it actually
1850 * makes fast the standby operation and guarantees the driver
1851 * working if hibernation is on media. */
1852 stmmac_release(dev);
1853 }
1854 1807
1855 spin_unlock(&priv->lock); 1808 spin_unlock(&priv->lock);
1856 return 0; 1809 return 0;
1857} 1810}
1858 1811
1859static int stmmac_resume(struct platform_device *pdev) 1812static int stmmac_resume(struct device *dev)
1860{ 1813{
1861 struct net_device *dev = platform_get_drvdata(pdev); 1814 struct net_device *ndev = dev_get_drvdata(dev);
1862 struct stmmac_priv *priv = netdev_priv(dev); 1815 struct stmmac_priv *priv = netdev_priv(ndev);
1863 unsigned long ioaddr = dev->base_addr;
1864
1865 if (!netif_running(dev))
1866 return 0;
1867 1816
1868 if (priv->shutdown) { 1817 if (!netif_running(ndev))
1869 /* Re-open the interface and re-init the MAC/DMA
1870 and the rings (i.e. on hibernation stage) */
1871 stmmac_open(dev);
1872 return 0; 1818 return 0;
1873 }
1874 1819
1875 spin_lock(&priv->lock); 1820 spin_lock(&priv->lock);
1876 1821
@@ -1879,44 +1824,70 @@ static int stmmac_resume(struct platform_device *pdev)
1879 * is received. Anyway, it's better to manually clear 1824 * is received. Anyway, it's better to manually clear
1880 * this bit because it can generate problems while resuming 1825 * this bit because it can generate problems while resuming
1881 * from another devices (e.g. serial console). */ 1826 * from another devices (e.g. serial console). */
1882 if (device_may_wakeup(&(pdev->dev))) 1827 if (device_may_wakeup(priv->device))
1883 if (priv->wolenabled == PMT_SUPPORTED) 1828 priv->hw->mac->pmt(priv->ioaddr, 0);
1884 priv->hw->mac->pmt(dev->base_addr, 0);
1885 1829
1886 netif_device_attach(dev); 1830 netif_device_attach(ndev);
1887 1831
1888 /* Enable the MAC and DMA */ 1832 /* Enable the MAC and DMA */
1889 stmmac_mac_enable_rx(ioaddr); 1833 stmmac_enable_mac(priv->ioaddr);
1890 stmmac_mac_enable_tx(ioaddr); 1834 priv->hw->dma->start_tx(priv->ioaddr);
1891 priv->hw->dma->start_tx(ioaddr); 1835 priv->hw->dma->start_rx(priv->ioaddr);
1892 priv->hw->dma->start_rx(ioaddr);
1893 1836
1894#ifdef CONFIG_STMMAC_TIMER 1837#ifdef CONFIG_STMMAC_TIMER
1895 priv->tm->timer_start(tmrate); 1838 if (likely(priv->tm->enable))
1839 priv->tm->timer_start(tmrate);
1896#endif 1840#endif
1897 napi_enable(&priv->napi); 1841 napi_enable(&priv->napi);
1898 1842
1899 if (priv->phydev) 1843 if (priv->phydev)
1900 phy_start(priv->phydev); 1844 phy_start(priv->phydev);
1901 1845
1902 netif_start_queue(dev); 1846 netif_start_queue(ndev);
1903 1847
1904 spin_unlock(&priv->lock); 1848 spin_unlock(&priv->lock);
1905 return 0; 1849 return 0;
1906} 1850}
1907#endif
1908 1851
1909static struct platform_driver stmmac_driver = { 1852static int stmmac_freeze(struct device *dev)
1910 .driver = { 1853{
1911 .name = STMMAC_RESOURCE_NAME, 1854 struct net_device *ndev = dev_get_drvdata(dev);
1912 }, 1855
1913 .probe = stmmac_dvr_probe, 1856 if (!ndev || !netif_running(ndev))
1914 .remove = stmmac_dvr_remove, 1857 return 0;
1915#ifdef CONFIG_PM 1858
1859 return stmmac_release(ndev);
1860}
1861
1862static int stmmac_restore(struct device *dev)
1863{
1864 struct net_device *ndev = dev_get_drvdata(dev);
1865
1866 if (!ndev || !netif_running(ndev))
1867 return 0;
1868
1869 return stmmac_open(ndev);
1870}
1871
1872static const struct dev_pm_ops stmmac_pm_ops = {
1916 .suspend = stmmac_suspend, 1873 .suspend = stmmac_suspend,
1917 .resume = stmmac_resume, 1874 .resume = stmmac_resume,
1918#endif 1875 .freeze = stmmac_freeze,
1876 .thaw = stmmac_restore,
1877 .restore = stmmac_restore,
1878};
1879#else
1880static const struct dev_pm_ops stmmac_pm_ops;
1881#endif /* CONFIG_PM */
1919 1882
1883static struct platform_driver stmmac_driver = {
1884 .probe = stmmac_dvr_probe,
1885 .remove = stmmac_dvr_remove,
1886 .driver = {
1887 .name = STMMAC_RESOURCE_NAME,
1888 .owner = THIS_MODULE,
1889 .pm = &stmmac_pm_ops,
1890 },
1920}; 1891};
1921 1892
1922/** 1893/**
@@ -1968,8 +1939,6 @@ static int __init stmmac_cmdline_opt(char *str)
1968 strict_strtoul(opt + 7, 0, (unsigned long *)&buf_sz); 1939 strict_strtoul(opt + 7, 0, (unsigned long *)&buf_sz);
1969 else if (!strncmp(opt, "tc:", 3)) 1940 else if (!strncmp(opt, "tc:", 3))
1970 strict_strtoul(opt + 3, 0, (unsigned long *)&tc); 1941 strict_strtoul(opt + 3, 0, (unsigned long *)&tc);
1971 else if (!strncmp(opt, "tx_coe:", 7))
1972 strict_strtoul(opt + 7, 0, (unsigned long *)&tx_coe);
1973 else if (!strncmp(opt, "watchdog:", 9)) 1942 else if (!strncmp(opt, "watchdog:", 9))
1974 strict_strtoul(opt + 9, 0, (unsigned long *)&watchdog); 1943 strict_strtoul(opt + 9, 0, (unsigned long *)&watchdog);
1975 else if (!strncmp(opt, "flow_ctrl:", 10)) 1944 else if (!strncmp(opt, "flow_ctrl:", 10))