aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/stmmac/stmmac_main.c
diff options
context:
space:
mode:
authorGiuseppe CAVALLARO <peppe.cavallaro@st.com>2010-01-06 18:07:17 -0500
committerDavid S. Miller <davem@davemloft.net>2010-01-07 20:06:08 -0500
commitdb98a0b001df79ffcdd4f231c3516411786a1113 (patch)
tree0a21ac92a40c2c6cefc3e442b1dfa40982da446a /drivers/net/stmmac/stmmac_main.c
parent65818fa744e70a58d230083dda1f1cd8e5c5e2ee (diff)
stmmac: reorganise class operations.
This patch reorganises the internal stmmac ops structure. The stmmac_ops has been splitted into other three structures named: stmmac_ops stmmac_dma_ops stmmac_desc_ops This makes the code more clear and also helps the next work to make the driver more generic. Signed-off-by: Giuseppe Cavallaro <peppe.cavallaro@st.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/stmmac/stmmac_main.c')
-rw-r--r--drivers/net/stmmac/stmmac_main.c138
1 files changed, 66 insertions, 72 deletions
diff --git a/drivers/net/stmmac/stmmac_main.c b/drivers/net/stmmac/stmmac_main.c
index 82ebbc0c8839..86e910300969 100644
--- a/drivers/net/stmmac/stmmac_main.c
+++ b/drivers/net/stmmac/stmmac_main.c
@@ -225,38 +225,34 @@ static void stmmac_adjust_link(struct net_device *dev)
225 if (phydev->duplex != priv->oldduplex) { 225 if (phydev->duplex != priv->oldduplex) {
226 new_state = 1; 226 new_state = 1;
227 if (!(phydev->duplex)) 227 if (!(phydev->duplex))
228 ctrl &= ~priv->mac_type->hw.link.duplex; 228 ctrl &= ~priv->hw->link.duplex;
229 else 229 else
230 ctrl |= priv->mac_type->hw.link.duplex; 230 ctrl |= priv->hw->link.duplex;
231 priv->oldduplex = phydev->duplex; 231 priv->oldduplex = phydev->duplex;
232 } 232 }
233 /* Flow Control operation */ 233 /* Flow Control operation */
234 if (phydev->pause) 234 if (phydev->pause)
235 priv->mac_type->ops->flow_ctrl(ioaddr, phydev->duplex, 235 priv->hw->mac->flow_ctrl(ioaddr, phydev->duplex,
236 fc, pause_time); 236 fc, pause_time);
237 237
238 if (phydev->speed != priv->speed) { 238 if (phydev->speed != priv->speed) {
239 new_state = 1; 239 new_state = 1;
240 switch (phydev->speed) { 240 switch (phydev->speed) {
241 case 1000: 241 case 1000:
242 if (likely(priv->is_gmac)) 242 if (likely(priv->is_gmac))
243 ctrl &= ~priv->mac_type->hw.link.port; 243 ctrl &= ~priv->hw->link.port;
244 break; 244 break;
245 case 100: 245 case 100:
246 case 10: 246 case 10:
247 if (priv->is_gmac) { 247 if (priv->is_gmac) {
248 ctrl |= priv->mac_type->hw.link.port; 248 ctrl |= priv->hw->link.port;
249 if (phydev->speed == SPEED_100) { 249 if (phydev->speed == SPEED_100) {
250 ctrl |= 250 ctrl |= priv->hw->link.speed;
251 priv->mac_type->hw.link.
252 speed;
253 } else { 251 } else {
254 ctrl &= 252 ctrl &= ~(priv->hw->link.speed);
255 ~(priv->mac_type->hw.
256 link.speed);
257 } 253 }
258 } else { 254 } else {
259 ctrl &= ~priv->mac_type->hw.link.port; 255 ctrl &= ~priv->hw->link.port;
260 } 256 }
261 if (likely(priv->fix_mac_speed)) 257 if (likely(priv->fix_mac_speed))
262 priv->fix_mac_speed(priv->bsp_priv, 258 priv->fix_mac_speed(priv->bsp_priv,
@@ -509,8 +505,8 @@ static void init_dma_desc_rings(struct net_device *dev)
509 priv->cur_tx = 0; 505 priv->cur_tx = 0;
510 506
511 /* Clear the Rx/Tx descriptors */ 507 /* Clear the Rx/Tx descriptors */
512 priv->mac_type->ops->init_rx_desc(priv->dma_rx, rxsize, dis_ic); 508 priv->hw->desc->init_rx_desc(priv->dma_rx, rxsize, dis_ic);
513 priv->mac_type->ops->init_tx_desc(priv->dma_tx, txsize); 509 priv->hw->desc->init_tx_desc(priv->dma_tx, txsize);
514 510
515 if (netif_msg_hw(priv)) { 511 if (netif_msg_hw(priv)) {
516 pr_info("RX descriptor ring:\n"); 512 pr_info("RX descriptor ring:\n");
@@ -545,8 +541,8 @@ static void dma_free_tx_skbufs(struct stmmac_priv *priv)
545 struct dma_desc *p = priv->dma_tx + i; 541 struct dma_desc *p = priv->dma_tx + i;
546 if (p->des2) 542 if (p->des2)
547 dma_unmap_single(priv->device, p->des2, 543 dma_unmap_single(priv->device, p->des2,
548 priv->mac_type->ops->get_tx_len(p), 544 priv->hw->desc->get_tx_len(p),
549 DMA_TO_DEVICE); 545 DMA_TO_DEVICE);
550 dev_kfree_skb_any(priv->tx_skbuff[i]); 546 dev_kfree_skb_any(priv->tx_skbuff[i]);
551 priv->tx_skbuff[i] = NULL; 547 priv->tx_skbuff[i] = NULL;
552 } 548 }
@@ -630,18 +626,18 @@ static void stmmac_dma_operation_mode(struct stmmac_priv *priv)
630{ 626{
631 if (!priv->is_gmac) { 627 if (!priv->is_gmac) {
632 /* MAC 10/100 */ 628 /* MAC 10/100 */
633 priv->mac_type->ops->dma_mode(priv->dev->base_addr, tc, 0); 629 priv->hw->dma->dma_mode(priv->dev->base_addr, tc, 0);
634 priv->tx_coe = NO_HW_CSUM; 630 priv->tx_coe = NO_HW_CSUM;
635 } else { 631 } else {
636 if ((priv->dev->mtu <= ETH_DATA_LEN) && (tx_coe)) { 632 if ((priv->dev->mtu <= ETH_DATA_LEN) && (tx_coe)) {
637 priv->mac_type->ops->dma_mode(priv->dev->base_addr, 633 priv->hw->dma->dma_mode(priv->dev->base_addr,
638 SF_DMA_MODE, SF_DMA_MODE); 634 SF_DMA_MODE, SF_DMA_MODE);
639 tc = SF_DMA_MODE; 635 tc = SF_DMA_MODE;
640 priv->tx_coe = HW_CSUM; 636 priv->tx_coe = HW_CSUM;
641 } else { 637 } else {
642 /* Checksum computation is performed in software. */ 638 /* Checksum computation is performed in software. */
643 priv->mac_type->ops->dma_mode(priv->dev->base_addr, tc, 639 priv->hw->dma->dma_mode(priv->dev->base_addr, tc,
644 SF_DMA_MODE); 640 SF_DMA_MODE);
645 priv->tx_coe = NO_HW_CSUM; 641 priv->tx_coe = NO_HW_CSUM;
646 } 642 }
647 } 643 }
@@ -749,16 +745,16 @@ static void stmmac_tx(struct stmmac_priv *priv)
749 struct dma_desc *p = priv->dma_tx + entry; 745 struct dma_desc *p = priv->dma_tx + entry;
750 746
751 /* Check if the descriptor is owned by the DMA. */ 747 /* Check if the descriptor is owned by the DMA. */
752 if (priv->mac_type->ops->get_tx_owner(p)) 748 if (priv->hw->desc->get_tx_owner(p))
753 break; 749 break;
754 750
755 /* Verify tx error by looking at the last segment */ 751 /* Verify tx error by looking at the last segment */
756 last = priv->mac_type->ops->get_tx_ls(p); 752 last = priv->hw->desc->get_tx_ls(p);
757 if (likely(last)) { 753 if (likely(last)) {
758 int tx_error = 754 int tx_error =
759 priv->mac_type->ops->tx_status(&priv->dev->stats, 755 priv->hw->desc->tx_status(&priv->dev->stats,
760 &priv->xstats, 756 &priv->xstats, p,
761 p, ioaddr); 757 ioaddr);
762 if (likely(tx_error == 0)) { 758 if (likely(tx_error == 0)) {
763 priv->dev->stats.tx_packets++; 759 priv->dev->stats.tx_packets++;
764 priv->xstats.tx_pkt_n++; 760 priv->xstats.tx_pkt_n++;
@@ -770,7 +766,7 @@ static void stmmac_tx(struct stmmac_priv *priv)
770 766
771 if (likely(p->des2)) 767 if (likely(p->des2))
772 dma_unmap_single(priv->device, p->des2, 768 dma_unmap_single(priv->device, p->des2,
773 priv->mac_type->ops->get_tx_len(p), 769 priv->hw->desc->get_tx_len(p),
774 DMA_TO_DEVICE); 770 DMA_TO_DEVICE);
775 if (unlikely(p->des3)) 771 if (unlikely(p->des3))
776 p->des3 = 0; 772 p->des3 = 0;
@@ -791,7 +787,7 @@ static void stmmac_tx(struct stmmac_priv *priv)
791 priv->tx_skbuff[entry] = NULL; 787 priv->tx_skbuff[entry] = NULL;
792 } 788 }
793 789
794 priv->mac_type->ops->release_tx_desc(p); 790 priv->hw->desc->release_tx_desc(p);
795 791
796 entry = (++priv->dirty_tx) % txsize; 792 entry = (++priv->dirty_tx) % txsize;
797 } 793 }
@@ -833,7 +829,7 @@ static int stmmac_has_work(struct stmmac_priv *priv)
833 unsigned int has_work = 0; 829 unsigned int has_work = 0;
834 int rxret, tx_work = 0; 830 int rxret, tx_work = 0;
835 831
836 rxret = priv->mac_type->ops->get_rx_owner(priv->dma_rx + 832 rxret = priv->hw->desc->get_rx_owner(priv->dma_rx +
837 (priv->cur_rx % priv->dma_rx_size)); 833 (priv->cur_rx % priv->dma_rx_size));
838 834
839 if (priv->dirty_tx != priv->cur_tx) 835 if (priv->dirty_tx != priv->cur_tx)
@@ -886,7 +882,7 @@ static void stmmac_tx_err(struct stmmac_priv *priv)
886 882
887 stmmac_dma_stop_tx(priv->dev->base_addr); 883 stmmac_dma_stop_tx(priv->dev->base_addr);
888 dma_free_tx_skbufs(priv); 884 dma_free_tx_skbufs(priv);
889 priv->mac_type->ops->init_tx_desc(priv->dma_tx, priv->dma_tx_size); 885 priv->hw->desc->init_tx_desc(priv->dma_tx, priv->dma_tx_size);
890 priv->dirty_tx = 0; 886 priv->dirty_tx = 0;
891 priv->cur_tx = 0; 887 priv->cur_tx = 0;
892 stmmac_dma_start_tx(priv->dev->base_addr); 888 stmmac_dma_start_tx(priv->dev->base_addr);
@@ -926,8 +922,8 @@ static void stmmac_dma_interrupt(struct net_device *dev)
926 if (unlikely(tc != SF_DMA_MODE) && (tc <= 256)) { 922 if (unlikely(tc != SF_DMA_MODE) && (tc <= 256)) {
927 /* Try to bump up the threshold */ 923 /* Try to bump up the threshold */
928 tc += 64; 924 tc += 64;
929 priv->mac_type->ops->dma_mode(ioaddr, tc, 925 priv->hw->dma->dma_mode(ioaddr, tc,
930 SF_DMA_MODE); 926 SF_DMA_MODE);
931 priv->xstats.threshold = tc; 927 priv->xstats.threshold = tc;
932 } 928 }
933 stmmac_tx_err(priv); 929 stmmac_tx_err(priv);
@@ -1059,20 +1055,20 @@ static int stmmac_open(struct net_device *dev)
1059 init_dma_desc_rings(dev); 1055 init_dma_desc_rings(dev);
1060 1056
1061 /* DMA initialization and SW reset */ 1057 /* DMA initialization and SW reset */
1062 if (unlikely(priv->mac_type->ops->dma_init(ioaddr, 1058 if (unlikely(priv->hw->dma->init(ioaddr, priv->pbl, priv->dma_tx_phy,
1063 priv->pbl, priv->dma_tx_phy, priv->dma_rx_phy) < 0)) { 1059 priv->dma_rx_phy) < 0)) {
1064 1060
1065 pr_err("%s: DMA initialization failed\n", __func__); 1061 pr_err("%s: DMA initialization failed\n", __func__);
1066 return -1; 1062 return -1;
1067 } 1063 }
1068 1064
1069 /* Copy the MAC addr into the HW */ 1065 /* Copy the MAC addr into the HW */
1070 priv->mac_type->ops->set_umac_addr(ioaddr, dev->dev_addr, 0); 1066 priv->hw->mac->set_umac_addr(ioaddr, dev->dev_addr, 0);
1071 /* If required, perform hw setup of the bus. */ 1067 /* If required, perform hw setup of the bus. */
1072 if (priv->bus_setup) 1068 if (priv->bus_setup)
1073 priv->bus_setup(ioaddr); 1069 priv->bus_setup(ioaddr);
1074 /* Initialize the MAC Core */ 1070 /* Initialize the MAC Core */
1075 priv->mac_type->ops->core_init(ioaddr); 1071 priv->hw->mac->core_init(ioaddr);
1076 1072
1077 priv->shutdown = 0; 1073 priv->shutdown = 0;
1078 1074
@@ -1101,8 +1097,8 @@ static int stmmac_open(struct net_device *dev)
1101#endif 1097#endif
1102 /* Dump DMA/MAC registers */ 1098 /* Dump DMA/MAC registers */
1103 if (netif_msg_hw(priv)) { 1099 if (netif_msg_hw(priv)) {
1104 priv->mac_type->ops->dump_mac_regs(ioaddr); 1100 priv->hw->mac->dump_regs(ioaddr);
1105 priv->mac_type->ops->dump_dma_regs(ioaddr); 1101 priv->hw->dma->dump_regs(ioaddr);
1106 } 1102 }
1107 1103
1108 if (priv->phydev) 1104 if (priv->phydev)
@@ -1218,8 +1214,8 @@ static unsigned int stmmac_handle_jumbo_frames(struct sk_buff *skb,
1218 desc->des2 = dma_map_single(priv->device, skb->data, 1214 desc->des2 = dma_map_single(priv->device, skb->data,
1219 BUF_SIZE_8KiB, DMA_TO_DEVICE); 1215 BUF_SIZE_8KiB, DMA_TO_DEVICE);
1220 desc->des3 = desc->des2 + BUF_SIZE_4KiB; 1216 desc->des3 = desc->des2 + BUF_SIZE_4KiB;
1221 priv->mac_type->ops->prepare_tx_desc(desc, 1, BUF_SIZE_8KiB, 1217 priv->hw->desc->prepare_tx_desc(desc, 1, BUF_SIZE_8KiB,
1222 csum_insertion); 1218 csum_insertion);
1223 1219
1224 entry = (++priv->cur_tx) % txsize; 1220 entry = (++priv->cur_tx) % txsize;
1225 desc = priv->dma_tx + entry; 1221 desc = priv->dma_tx + entry;
@@ -1228,16 +1224,17 @@ static unsigned int stmmac_handle_jumbo_frames(struct sk_buff *skb,
1228 skb->data + BUF_SIZE_8KiB, 1224 skb->data + BUF_SIZE_8KiB,
1229 buf2_size, DMA_TO_DEVICE); 1225 buf2_size, DMA_TO_DEVICE);
1230 desc->des3 = desc->des2 + BUF_SIZE_4KiB; 1226 desc->des3 = desc->des2 + BUF_SIZE_4KiB;
1231 priv->mac_type->ops->prepare_tx_desc(desc, 0, 1227 priv->hw->desc->prepare_tx_desc(desc, 0, buf2_size,
1232 buf2_size, csum_insertion); 1228 csum_insertion);
1233 priv->mac_type->ops->set_tx_owner(desc); 1229 priv->hw->desc->set_tx_owner(desc);
1230
1234 priv->tx_skbuff[entry] = NULL; 1231 priv->tx_skbuff[entry] = NULL;
1235 } else { 1232 } else {
1236 desc->des2 = dma_map_single(priv->device, skb->data, 1233 desc->des2 = dma_map_single(priv->device, skb->data,
1237 nopaged_len, DMA_TO_DEVICE); 1234 nopaged_len, DMA_TO_DEVICE);
1238 desc->des3 = desc->des2 + BUF_SIZE_4KiB; 1235 desc->des3 = desc->des2 + BUF_SIZE_4KiB;
1239 priv->mac_type->ops->prepare_tx_desc(desc, 1, nopaged_len, 1236 priv->hw->desc->prepare_tx_desc(desc, 1, nopaged_len,
1240 csum_insertion); 1237 csum_insertion);
1241 } 1238 }
1242 return entry; 1239 return entry;
1243} 1240}
@@ -1305,8 +1302,8 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
1305 unsigned int nopaged_len = skb_headlen(skb); 1302 unsigned int nopaged_len = skb_headlen(skb);
1306 desc->des2 = dma_map_single(priv->device, skb->data, 1303 desc->des2 = dma_map_single(priv->device, skb->data,
1307 nopaged_len, DMA_TO_DEVICE); 1304 nopaged_len, DMA_TO_DEVICE);
1308 priv->mac_type->ops->prepare_tx_desc(desc, 1, nopaged_len, 1305 priv->hw->desc->prepare_tx_desc(desc, 1, nopaged_len,
1309 csum_insertion); 1306 csum_insertion);
1310 } 1307 }
1311 1308
1312 for (i = 0; i < nfrags; i++) { 1309 for (i = 0; i < nfrags; i++) {
@@ -1321,21 +1318,20 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
1321 frag->page_offset, 1318 frag->page_offset,
1322 len, DMA_TO_DEVICE); 1319 len, DMA_TO_DEVICE);
1323 priv->tx_skbuff[entry] = NULL; 1320 priv->tx_skbuff[entry] = NULL;
1324 priv->mac_type->ops->prepare_tx_desc(desc, 0, len, 1321 priv->hw->desc->prepare_tx_desc(desc, 0, len, csum_insertion);
1325 csum_insertion); 1322 priv->hw->desc->set_tx_owner(desc);
1326 priv->mac_type->ops->set_tx_owner(desc);
1327 } 1323 }
1328 1324
1329 /* Interrupt on completition only for the latest segment */ 1325 /* Interrupt on completition only for the latest segment */
1330 priv->mac_type->ops->close_tx_desc(desc); 1326 priv->hw->desc->close_tx_desc(desc);
1331 1327
1332#ifdef CONFIG_STMMAC_TIMER 1328#ifdef CONFIG_STMMAC_TIMER
1333 /* Clean IC while using timer */ 1329 /* Clean IC while using timer */
1334 if (likely(priv->tm->enable)) 1330 if (likely(priv->tm->enable))
1335 priv->mac_type->ops->clear_tx_ic(desc); 1331 priv->hw->desc->clear_tx_ic(desc);
1336#endif 1332#endif
1337 /* To avoid raise condition */ 1333 /* To avoid raise condition */
1338 priv->mac_type->ops->set_tx_owner(first); 1334 priv->hw->desc->set_tx_owner(first);
1339 1335
1340 priv->cur_tx++; 1336 priv->cur_tx++;
1341 1337
@@ -1395,7 +1391,7 @@ static inline void stmmac_rx_refill(struct stmmac_priv *priv)
1395 } 1391 }
1396 RX_DBG(KERN_INFO "\trefill entry #%d\n", entry); 1392 RX_DBG(KERN_INFO "\trefill entry #%d\n", entry);
1397 } 1393 }
1398 priv->mac_type->ops->set_rx_owner(p + entry); 1394 priv->hw->desc->set_rx_owner(p + entry);
1399 } 1395 }
1400 return; 1396 return;
1401} 1397}
@@ -1416,7 +1412,7 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit)
1416 } 1412 }
1417#endif 1413#endif
1418 count = 0; 1414 count = 0;
1419 while (!priv->mac_type->ops->get_rx_owner(p)) { 1415 while (!priv->hw->desc->get_rx_owner(p)) {
1420 int status; 1416 int status;
1421 1417
1422 if (count >= limit) 1418 if (count >= limit)
@@ -1429,15 +1425,14 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit)
1429 prefetch(p_next); 1425 prefetch(p_next);
1430 1426
1431 /* read the status of the incoming frame */ 1427 /* read the status of the incoming frame */
1432 status = (priv->mac_type->ops->rx_status(&priv->dev->stats, 1428 status = (priv->hw->desc->rx_status(&priv->dev->stats,
1433 &priv->xstats, p)); 1429 &priv->xstats, p));
1434 if (unlikely(status == discard_frame)) 1430 if (unlikely(status == discard_frame))
1435 priv->dev->stats.rx_errors++; 1431 priv->dev->stats.rx_errors++;
1436 else { 1432 else {
1437 struct sk_buff *skb; 1433 struct sk_buff *skb;
1438 /* Length should omit the CRC */ 1434 /* Length should omit the CRC */
1439 int frame_len = 1435 int frame_len = priv->hw->desc->get_rx_frame_len(p) - 4;
1440 priv->mac_type->ops->get_rx_frame_len(p) - 4;
1441 1436
1442#ifdef STMMAC_RX_DEBUG 1437#ifdef STMMAC_RX_DEBUG
1443 if (frame_len > ETH_FRAME_LEN) 1438 if (frame_len > ETH_FRAME_LEN)
@@ -1573,7 +1568,7 @@ static void stmmac_multicast_list(struct net_device *dev)
1573 struct stmmac_priv *priv = netdev_priv(dev); 1568 struct stmmac_priv *priv = netdev_priv(dev);
1574 1569
1575 spin_lock(&priv->lock); 1570 spin_lock(&priv->lock);
1576 priv->mac_type->ops->set_filter(dev); 1571 priv->hw->mac->set_filter(dev);
1577 spin_unlock(&priv->lock); 1572 spin_unlock(&priv->lock);
1578 return; 1573 return;
1579} 1574}
@@ -1627,7 +1622,7 @@ static irqreturn_t stmmac_interrupt(int irq, void *dev_id)
1627 if (priv->is_gmac) { 1622 if (priv->is_gmac) {
1628 unsigned long ioaddr = dev->base_addr; 1623 unsigned long ioaddr = dev->base_addr;
1629 /* To handle GMAC own interrupts */ 1624 /* To handle GMAC own interrupts */
1630 priv->mac_type->ops->host_irq_status(ioaddr); 1625 priv->hw->mac->host_irq_status(ioaddr);
1631 } 1626 }
1632 stmmac_dma_interrupt(dev); 1627 stmmac_dma_interrupt(dev);
1633 1628
@@ -1748,7 +1743,7 @@ static int stmmac_probe(struct net_device *dev)
1748 netif_napi_add(dev, &priv->napi, stmmac_poll, 64); 1743 netif_napi_add(dev, &priv->napi, stmmac_poll, 64);
1749 1744
1750 /* Get the MAC address */ 1745 /* Get the MAC address */
1751 priv->mac_type->ops->get_umac_addr(dev->base_addr, dev->dev_addr, 0); 1746 priv->hw->mac->get_umac_addr(dev->base_addr, dev->dev_addr, 0);
1752 1747
1753 if (!is_valid_ether_addr(dev->dev_addr)) 1748 if (!is_valid_ether_addr(dev->dev_addr))
1754 pr_warning("\tno valid MAC address;" 1749 pr_warning("\tno valid MAC address;"
@@ -1790,9 +1785,9 @@ static int stmmac_mac_device_setup(struct net_device *dev)
1790 if (!device) 1785 if (!device)
1791 return -ENOMEM; 1786 return -ENOMEM;
1792 1787
1793 priv->mac_type = device; 1788 priv->hw = device;
1794 1789
1795 priv->wolenabled = priv->mac_type->hw.pmt; /* PMT supported */ 1790 priv->wolenabled = priv->hw->pmt; /* PMT supported */
1796 if (priv->wolenabled == PMT_SUPPORTED) 1791 if (priv->wolenabled == PMT_SUPPORTED)
1797 priv->wolopts = WAKE_MAGIC; /* Magic Frame */ 1792 priv->wolopts = WAKE_MAGIC; /* Magic Frame */
1798 1793
@@ -2048,18 +2043,17 @@ static int stmmac_suspend(struct platform_device *pdev, pm_message_t state)
2048 stmmac_dma_stop_tx(dev->base_addr); 2043 stmmac_dma_stop_tx(dev->base_addr);
2049 stmmac_dma_stop_rx(dev->base_addr); 2044 stmmac_dma_stop_rx(dev->base_addr);
2050 /* Clear the Rx/Tx descriptors */ 2045 /* Clear the Rx/Tx descriptors */
2051 priv->mac_type->ops->init_rx_desc(priv->dma_rx, 2046 priv->hw->desc->init_rx_desc(priv->dma_rx, priv->dma_rx_size,
2052 priv->dma_rx_size, dis_ic); 2047 dis_ic);
2053 priv->mac_type->ops->init_tx_desc(priv->dma_tx, 2048 priv->hw->desc->init_tx_desc(priv->dma_tx, priv->dma_tx_size);
2054 priv->dma_tx_size);
2055 2049
2056 stmmac_mac_disable_tx(dev->base_addr); 2050 stmmac_mac_disable_tx(dev->base_addr);
2057 2051
2058 if (device_may_wakeup(&(pdev->dev))) { 2052 if (device_may_wakeup(&(pdev->dev))) {
2059 /* Enable Power down mode by programming the PMT regs */ 2053 /* Enable Power down mode by programming the PMT regs */
2060 if (priv->wolenabled == PMT_SUPPORTED) 2054 if (priv->wolenabled == PMT_SUPPORTED)
2061 priv->mac_type->ops->pmt(dev->base_addr, 2055 priv->hw->mac->pmt(dev->base_addr,
2062 priv->wolopts); 2056 priv->wolopts);
2063 } else { 2057 } else {
2064 stmmac_mac_disable_rx(dev->base_addr); 2058 stmmac_mac_disable_rx(dev->base_addr);
2065 } 2059 }
@@ -2100,7 +2094,7 @@ static int stmmac_resume(struct platform_device *pdev)
2100 * from another devices (e.g. serial console). */ 2094 * from another devices (e.g. serial console). */
2101 if (device_may_wakeup(&(pdev->dev))) 2095 if (device_may_wakeup(&(pdev->dev)))
2102 if (priv->wolenabled == PMT_SUPPORTED) 2096 if (priv->wolenabled == PMT_SUPPORTED)
2103 priv->mac_type->ops->pmt(dev->base_addr, 0); 2097 priv->hw->mac->pmt(dev->base_addr, 0);
2104 2098
2105 netif_device_attach(dev); 2099 netif_device_attach(dev);
2106 2100