diff options
Diffstat (limited to 'drivers/net/davinci_emac.c')
-rw-r--r-- | drivers/net/davinci_emac.c | 74 |
1 files changed, 43 insertions, 31 deletions
diff --git a/drivers/net/davinci_emac.c b/drivers/net/davinci_emac.c index 13f9869927e3..2b8edd2efbf6 100644 --- a/drivers/net/davinci_emac.c +++ b/drivers/net/davinci_emac.c | |||
@@ -29,10 +29,6 @@ | |||
29 | * PHY layer usage | 29 | * PHY layer usage |
30 | */ | 30 | */ |
31 | 31 | ||
32 | /** Pending Items in this driver: | ||
33 | * 1. Use Linux cache infrastcture for DMA'ed memory (dma_xxx functions) | ||
34 | */ | ||
35 | |||
36 | #include <linux/module.h> | 32 | #include <linux/module.h> |
37 | #include <linux/kernel.h> | 33 | #include <linux/kernel.h> |
38 | #include <linux/sched.h> | 34 | #include <linux/sched.h> |
@@ -504,12 +500,6 @@ static unsigned long mdio_max_freq; | |||
504 | 500 | ||
505 | /* Cache macros - Packet buffers would be from skb pool which is cached */ | 501 | /* Cache macros - Packet buffers would be from skb pool which is cached */ |
506 | #define EMAC_VIRT_NOCACHE(addr) (addr) | 502 | #define EMAC_VIRT_NOCACHE(addr) (addr) |
507 | #define EMAC_CACHE_INVALIDATE(addr, size) \ | ||
508 | dma_cache_maint((void *)addr, size, DMA_FROM_DEVICE) | ||
509 | #define EMAC_CACHE_WRITEBACK(addr, size) \ | ||
510 | dma_cache_maint((void *)addr, size, DMA_TO_DEVICE) | ||
511 | #define EMAC_CACHE_WRITEBACK_INVALIDATE(addr, size) \ | ||
512 | dma_cache_maint((void *)addr, size, DMA_BIDIRECTIONAL) | ||
513 | 503 | ||
514 | /* DM644x does not have BD's in cached memory - so no cache functions */ | 504 | /* DM644x does not have BD's in cached memory - so no cache functions */ |
515 | #define BD_CACHE_INVALIDATE(addr, size) | 505 | #define BD_CACHE_INVALIDATE(addr, size) |
@@ -1235,6 +1225,10 @@ static void emac_txch_teardown(struct emac_priv *priv, u32 ch) | |||
1235 | if (1 == txch->queue_active) { | 1225 | if (1 == txch->queue_active) { |
1236 | curr_bd = txch->active_queue_head; | 1226 | curr_bd = txch->active_queue_head; |
1237 | while (curr_bd != NULL) { | 1227 | while (curr_bd != NULL) { |
1228 | dma_unmap_single(emac_dev, curr_bd->buff_ptr, | ||
1229 | curr_bd->off_b_len & EMAC_RX_BD_BUF_SIZE, | ||
1230 | DMA_TO_DEVICE); | ||
1231 | |||
1238 | emac_net_tx_complete(priv, (void __force *) | 1232 | emac_net_tx_complete(priv, (void __force *) |
1239 | &curr_bd->buf_token, 1, ch); | 1233 | &curr_bd->buf_token, 1, ch); |
1240 | if (curr_bd != txch->active_queue_tail) | 1234 | if (curr_bd != txch->active_queue_tail) |
@@ -1327,6 +1321,11 @@ static int emac_tx_bdproc(struct emac_priv *priv, u32 ch, u32 budget) | |||
1327 | txch->queue_active = 0; /* end of queue */ | 1321 | txch->queue_active = 0; /* end of queue */ |
1328 | } | 1322 | } |
1329 | } | 1323 | } |
1324 | |||
1325 | dma_unmap_single(emac_dev, curr_bd->buff_ptr, | ||
1326 | curr_bd->off_b_len & EMAC_RX_BD_BUF_SIZE, | ||
1327 | DMA_TO_DEVICE); | ||
1328 | |||
1330 | *tx_complete_ptr = (u32) curr_bd->buf_token; | 1329 | *tx_complete_ptr = (u32) curr_bd->buf_token; |
1331 | ++tx_complete_ptr; | 1330 | ++tx_complete_ptr; |
1332 | ++tx_complete_cnt; | 1331 | ++tx_complete_cnt; |
@@ -1387,8 +1386,8 @@ static int emac_send(struct emac_priv *priv, struct emac_netpktobj *pkt, u32 ch) | |||
1387 | 1386 | ||
1388 | txch->bd_pool_head = curr_bd->next; | 1387 | txch->bd_pool_head = curr_bd->next; |
1389 | curr_bd->buf_token = buf_list->buf_token; | 1388 | curr_bd->buf_token = buf_list->buf_token; |
1390 | /* FIXME buff_ptr = dma_map_single(... data_ptr ...) */ | 1389 | curr_bd->buff_ptr = dma_map_single(&priv->ndev->dev, buf_list->data_ptr, |
1391 | curr_bd->buff_ptr = virt_to_phys(buf_list->data_ptr); | 1390 | buf_list->length, DMA_TO_DEVICE); |
1392 | curr_bd->off_b_len = buf_list->length; | 1391 | curr_bd->off_b_len = buf_list->length; |
1393 | curr_bd->h_next = 0; | 1392 | curr_bd->h_next = 0; |
1394 | curr_bd->next = NULL; | 1393 | curr_bd->next = NULL; |
@@ -1468,7 +1467,6 @@ static int emac_dev_xmit(struct sk_buff *skb, struct net_device *ndev) | |||
1468 | tx_buf.length = skb->len; | 1467 | tx_buf.length = skb->len; |
1469 | tx_buf.buf_token = (void *)skb; | 1468 | tx_buf.buf_token = (void *)skb; |
1470 | tx_buf.data_ptr = skb->data; | 1469 | tx_buf.data_ptr = skb->data; |
1471 | EMAC_CACHE_WRITEBACK((unsigned long)skb->data, skb->len); | ||
1472 | ndev->trans_start = jiffies; | 1470 | ndev->trans_start = jiffies; |
1473 | ret_code = emac_send(priv, &tx_packet, EMAC_DEF_TX_CH); | 1471 | ret_code = emac_send(priv, &tx_packet, EMAC_DEF_TX_CH); |
1474 | if (unlikely(ret_code != 0)) { | 1472 | if (unlikely(ret_code != 0)) { |
@@ -1543,7 +1541,6 @@ static void *emac_net_alloc_rx_buf(struct emac_priv *priv, int buf_size, | |||
1543 | p_skb->dev = ndev; | 1541 | p_skb->dev = ndev; |
1544 | skb_reserve(p_skb, NET_IP_ALIGN); | 1542 | skb_reserve(p_skb, NET_IP_ALIGN); |
1545 | *data_token = (void *) p_skb; | 1543 | *data_token = (void *) p_skb; |
1546 | EMAC_CACHE_WRITEBACK_INVALIDATE((unsigned long)p_skb->data, buf_size); | ||
1547 | return p_skb->data; | 1544 | return p_skb->data; |
1548 | } | 1545 | } |
1549 | 1546 | ||
@@ -1612,8 +1609,8 @@ static int emac_init_rxch(struct emac_priv *priv, u32 ch, char *param) | |||
1612 | /* populate the hardware descriptor */ | 1609 | /* populate the hardware descriptor */ |
1613 | curr_bd->h_next = emac_virt_to_phys(rxch->active_queue_head, | 1610 | curr_bd->h_next = emac_virt_to_phys(rxch->active_queue_head, |
1614 | priv); | 1611 | priv); |
1615 | /* FIXME buff_ptr = dma_map_single(... data_ptr ...) */ | 1612 | curr_bd->buff_ptr = dma_map_single(emac_dev, curr_bd->data_ptr, |
1616 | curr_bd->buff_ptr = virt_to_phys(curr_bd->data_ptr); | 1613 | rxch->buf_size, DMA_FROM_DEVICE); |
1617 | curr_bd->off_b_len = rxch->buf_size; | 1614 | curr_bd->off_b_len = rxch->buf_size; |
1618 | curr_bd->mode = EMAC_CPPI_OWNERSHIP_BIT; | 1615 | curr_bd->mode = EMAC_CPPI_OWNERSHIP_BIT; |
1619 | 1616 | ||
@@ -1697,6 +1694,12 @@ static void emac_cleanup_rxch(struct emac_priv *priv, u32 ch) | |||
1697 | curr_bd = rxch->active_queue_head; | 1694 | curr_bd = rxch->active_queue_head; |
1698 | while (curr_bd) { | 1695 | while (curr_bd) { |
1699 | if (curr_bd->buf_token) { | 1696 | if (curr_bd->buf_token) { |
1697 | dma_unmap_single(&priv->ndev->dev, | ||
1698 | curr_bd->buff_ptr, | ||
1699 | curr_bd->off_b_len | ||
1700 | & EMAC_RX_BD_BUF_SIZE, | ||
1701 | DMA_FROM_DEVICE); | ||
1702 | |||
1700 | dev_kfree_skb_any((struct sk_buff *)\ | 1703 | dev_kfree_skb_any((struct sk_buff *)\ |
1701 | curr_bd->buf_token); | 1704 | curr_bd->buf_token); |
1702 | } | 1705 | } |
@@ -1871,8 +1874,8 @@ static void emac_addbd_to_rx_queue(struct emac_priv *priv, u32 ch, | |||
1871 | 1874 | ||
1872 | /* populate the hardware descriptor */ | 1875 | /* populate the hardware descriptor */ |
1873 | curr_bd->h_next = 0; | 1876 | curr_bd->h_next = 0; |
1874 | /* FIXME buff_ptr = dma_map_single(... buffer ...) */ | 1877 | curr_bd->buff_ptr = dma_map_single(&priv->ndev->dev, buffer, |
1875 | curr_bd->buff_ptr = virt_to_phys(buffer); | 1878 | rxch->buf_size, DMA_FROM_DEVICE); |
1876 | curr_bd->off_b_len = rxch->buf_size; | 1879 | curr_bd->off_b_len = rxch->buf_size; |
1877 | curr_bd->mode = EMAC_CPPI_OWNERSHIP_BIT; | 1880 | curr_bd->mode = EMAC_CPPI_OWNERSHIP_BIT; |
1878 | curr_bd->next = NULL; | 1881 | curr_bd->next = NULL; |
@@ -1927,7 +1930,6 @@ static int emac_net_rx_cb(struct emac_priv *priv, | |||
1927 | p_skb = (struct sk_buff *)net_pkt_list->pkt_token; | 1930 | p_skb = (struct sk_buff *)net_pkt_list->pkt_token; |
1928 | /* set length of packet */ | 1931 | /* set length of packet */ |
1929 | skb_put(p_skb, net_pkt_list->pkt_length); | 1932 | skb_put(p_skb, net_pkt_list->pkt_length); |
1930 | EMAC_CACHE_INVALIDATE((unsigned long)p_skb->data, p_skb->len); | ||
1931 | p_skb->protocol = eth_type_trans(p_skb, priv->ndev); | 1933 | p_skb->protocol = eth_type_trans(p_skb, priv->ndev); |
1932 | netif_receive_skb(p_skb); | 1934 | netif_receive_skb(p_skb); |
1933 | priv->net_dev_stats.rx_bytes += net_pkt_list->pkt_length; | 1935 | priv->net_dev_stats.rx_bytes += net_pkt_list->pkt_length; |
@@ -1990,6 +1992,11 @@ static int emac_rx_bdproc(struct emac_priv *priv, u32 ch, u32 budget) | |||
1990 | rx_buf_obj->data_ptr = (char *)curr_bd->data_ptr; | 1992 | rx_buf_obj->data_ptr = (char *)curr_bd->data_ptr; |
1991 | rx_buf_obj->length = curr_bd->off_b_len & EMAC_RX_BD_BUF_SIZE; | 1993 | rx_buf_obj->length = curr_bd->off_b_len & EMAC_RX_BD_BUF_SIZE; |
1992 | rx_buf_obj->buf_token = curr_bd->buf_token; | 1994 | rx_buf_obj->buf_token = curr_bd->buf_token; |
1995 | |||
1996 | dma_unmap_single(&priv->ndev->dev, curr_bd->buff_ptr, | ||
1997 | curr_bd->off_b_len & EMAC_RX_BD_BUF_SIZE, | ||
1998 | DMA_FROM_DEVICE); | ||
1999 | |||
1993 | curr_pkt->pkt_token = curr_pkt->buf_list->buf_token; | 2000 | curr_pkt->pkt_token = curr_pkt->buf_list->buf_token; |
1994 | curr_pkt->num_bufs = 1; | 2001 | curr_pkt->num_bufs = 1; |
1995 | curr_pkt->pkt_length = | 2002 | curr_pkt->pkt_length = |
@@ -2385,7 +2392,7 @@ static int emac_dev_open(struct net_device *ndev) | |||
2385 | struct emac_priv *priv = netdev_priv(ndev); | 2392 | struct emac_priv *priv = netdev_priv(ndev); |
2386 | 2393 | ||
2387 | netif_carrier_off(ndev); | 2394 | netif_carrier_off(ndev); |
2388 | for (cnt = 0; cnt <= ETH_ALEN; cnt++) | 2395 | for (cnt = 0; cnt < ETH_ALEN; cnt++) |
2389 | ndev->dev_addr[cnt] = priv->mac_addr[cnt]; | 2396 | ndev->dev_addr[cnt] = priv->mac_addr[cnt]; |
2390 | 2397 | ||
2391 | /* Configuration items */ | 2398 | /* Configuration items */ |
@@ -2820,31 +2827,37 @@ static int __devexit davinci_emac_remove(struct platform_device *pdev) | |||
2820 | return 0; | 2827 | return 0; |
2821 | } | 2828 | } |
2822 | 2829 | ||
2823 | static | 2830 | static int davinci_emac_suspend(struct device *dev) |
2824 | int davinci_emac_suspend(struct platform_device *pdev, pm_message_t state) | ||
2825 | { | 2831 | { |
2826 | struct net_device *dev = platform_get_drvdata(pdev); | 2832 | struct platform_device *pdev = to_platform_device(dev); |
2833 | struct net_device *ndev = platform_get_drvdata(pdev); | ||
2827 | 2834 | ||
2828 | if (netif_running(dev)) | 2835 | if (netif_running(ndev)) |
2829 | emac_dev_stop(dev); | 2836 | emac_dev_stop(ndev); |
2830 | 2837 | ||
2831 | clk_disable(emac_clk); | 2838 | clk_disable(emac_clk); |
2832 | 2839 | ||
2833 | return 0; | 2840 | return 0; |
2834 | } | 2841 | } |
2835 | 2842 | ||
2836 | static int davinci_emac_resume(struct platform_device *pdev) | 2843 | static int davinci_emac_resume(struct device *dev) |
2837 | { | 2844 | { |
2838 | struct net_device *dev = platform_get_drvdata(pdev); | 2845 | struct platform_device *pdev = to_platform_device(dev); |
2846 | struct net_device *ndev = platform_get_drvdata(pdev); | ||
2839 | 2847 | ||
2840 | clk_enable(emac_clk); | 2848 | clk_enable(emac_clk); |
2841 | 2849 | ||
2842 | if (netif_running(dev)) | 2850 | if (netif_running(ndev)) |
2843 | emac_dev_open(dev); | 2851 | emac_dev_open(ndev); |
2844 | 2852 | ||
2845 | return 0; | 2853 | return 0; |
2846 | } | 2854 | } |
2847 | 2855 | ||
2856 | static const struct dev_pm_ops davinci_emac_pm_ops = { | ||
2857 | .suspend = davinci_emac_suspend, | ||
2858 | .resume = davinci_emac_resume, | ||
2859 | }; | ||
2860 | |||
2848 | /** | 2861 | /** |
2849 | * davinci_emac_driver: EMAC platform driver structure | 2862 | * davinci_emac_driver: EMAC platform driver structure |
2850 | */ | 2863 | */ |
@@ -2852,11 +2865,10 @@ static struct platform_driver davinci_emac_driver = { | |||
2852 | .driver = { | 2865 | .driver = { |
2853 | .name = "davinci_emac", | 2866 | .name = "davinci_emac", |
2854 | .owner = THIS_MODULE, | 2867 | .owner = THIS_MODULE, |
2868 | .pm = &davinci_emac_pm_ops, | ||
2855 | }, | 2869 | }, |
2856 | .probe = davinci_emac_probe, | 2870 | .probe = davinci_emac_probe, |
2857 | .remove = __devexit_p(davinci_emac_remove), | 2871 | .remove = __devexit_p(davinci_emac_remove), |
2858 | .suspend = davinci_emac_suspend, | ||
2859 | .resume = davinci_emac_resume, | ||
2860 | }; | 2872 | }; |
2861 | 2873 | ||
2862 | /** | 2874 | /** |