aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net
diff options
context:
space:
mode:
authorSekhar Nori <nsekhar@ti.com>2010-03-08 20:20:37 -0500
committerDavid S. Miller <davem@davemloft.net>2010-03-15 18:46:59 -0400
commitbe5bce2bf5cfe021bc6bdff4d49fa18776bc293d (patch)
treeb5c545837570c42b79e10e603fcdec506e93cae3 /drivers/net
parent211a0d941b1924e667483f822a55e2cc694cd212 (diff)
net: davinci emac: use dma_{map, unmap}_single API for cache coherency
The davinci emac driver uses some ARM specific DMA APIs for cache coherency which have been removed from kernel with the 2.6.34 merge. Modify the driver to use the dma_{map, unmap}_single() APIs defined in dma-mapping.h Without this fix, the driver fails to compile on Linus's tree. Tested on DM365 and OMAP-L138 EVMs. Signed-off-by: Sekhar Nori <nsekhar@ti.com> Acked-by: Kevin Hilman <khilman@deeprootsystems.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net')
-rw-r--r--drivers/net/davinci_emac.c45
1 files changed, 26 insertions, 19 deletions
diff --git a/drivers/net/davinci_emac.c b/drivers/net/davinci_emac.c
index 32960b9b02ae..491e64cbd2a2 100644
--- a/drivers/net/davinci_emac.c
+++ b/drivers/net/davinci_emac.c
@@ -29,10 +29,6 @@
29 * PHY layer usage 29 * PHY layer usage
30 */ 30 */
31 31
32/** Pending Items in this driver:
33 * 1. Use Linux cache infrastcture for DMA'ed memory (dma_xxx functions)
34 */
35
36#include <linux/module.h> 32#include <linux/module.h>
37#include <linux/kernel.h> 33#include <linux/kernel.h>
38#include <linux/sched.h> 34#include <linux/sched.h>
@@ -504,12 +500,6 @@ static unsigned long mdio_max_freq;
504 500
505/* Cache macros - Packet buffers would be from skb pool which is cached */ 501/* Cache macros - Packet buffers would be from skb pool which is cached */
506#define EMAC_VIRT_NOCACHE(addr) (addr) 502#define EMAC_VIRT_NOCACHE(addr) (addr)
507#define EMAC_CACHE_INVALIDATE(addr, size) \
508 dma_cache_maint((void *)addr, size, DMA_FROM_DEVICE)
509#define EMAC_CACHE_WRITEBACK(addr, size) \
510 dma_cache_maint((void *)addr, size, DMA_TO_DEVICE)
511#define EMAC_CACHE_WRITEBACK_INVALIDATE(addr, size) \
512 dma_cache_maint((void *)addr, size, DMA_BIDIRECTIONAL)
513 503
514/* DM644x does not have BD's in cached memory - so no cache functions */ 504/* DM644x does not have BD's in cached memory - so no cache functions */
515#define BD_CACHE_INVALIDATE(addr, size) 505#define BD_CACHE_INVALIDATE(addr, size)
@@ -1235,6 +1225,10 @@ static void emac_txch_teardown(struct emac_priv *priv, u32 ch)
1235 if (1 == txch->queue_active) { 1225 if (1 == txch->queue_active) {
1236 curr_bd = txch->active_queue_head; 1226 curr_bd = txch->active_queue_head;
1237 while (curr_bd != NULL) { 1227 while (curr_bd != NULL) {
1228 dma_unmap_single(emac_dev, curr_bd->buff_ptr,
1229 curr_bd->off_b_len & EMAC_RX_BD_BUF_SIZE,
1230 DMA_TO_DEVICE);
1231
1238 emac_net_tx_complete(priv, (void __force *) 1232 emac_net_tx_complete(priv, (void __force *)
1239 &curr_bd->buf_token, 1, ch); 1233 &curr_bd->buf_token, 1, ch);
1240 if (curr_bd != txch->active_queue_tail) 1234 if (curr_bd != txch->active_queue_tail)
@@ -1327,6 +1321,11 @@ static int emac_tx_bdproc(struct emac_priv *priv, u32 ch, u32 budget)
1327 txch->queue_active = 0; /* end of queue */ 1321 txch->queue_active = 0; /* end of queue */
1328 } 1322 }
1329 } 1323 }
1324
1325 dma_unmap_single(emac_dev, curr_bd->buff_ptr,
1326 curr_bd->off_b_len & EMAC_RX_BD_BUF_SIZE,
1327 DMA_TO_DEVICE);
1328
1330 *tx_complete_ptr = (u32) curr_bd->buf_token; 1329 *tx_complete_ptr = (u32) curr_bd->buf_token;
1331 ++tx_complete_ptr; 1330 ++tx_complete_ptr;
1332 ++tx_complete_cnt; 1331 ++tx_complete_cnt;
@@ -1387,8 +1386,8 @@ static int emac_send(struct emac_priv *priv, struct emac_netpktobj *pkt, u32 ch)
1387 1386
1388 txch->bd_pool_head = curr_bd->next; 1387 txch->bd_pool_head = curr_bd->next;
1389 curr_bd->buf_token = buf_list->buf_token; 1388 curr_bd->buf_token = buf_list->buf_token;
1390 /* FIXME buff_ptr = dma_map_single(... data_ptr ...) */ 1389 curr_bd->buff_ptr = dma_map_single(&priv->ndev->dev, buf_list->data_ptr,
1391 curr_bd->buff_ptr = virt_to_phys(buf_list->data_ptr); 1390 buf_list->length, DMA_TO_DEVICE);
1392 curr_bd->off_b_len = buf_list->length; 1391 curr_bd->off_b_len = buf_list->length;
1393 curr_bd->h_next = 0; 1392 curr_bd->h_next = 0;
1394 curr_bd->next = NULL; 1393 curr_bd->next = NULL;
@@ -1468,7 +1467,6 @@ static int emac_dev_xmit(struct sk_buff *skb, struct net_device *ndev)
1468 tx_buf.length = skb->len; 1467 tx_buf.length = skb->len;
1469 tx_buf.buf_token = (void *)skb; 1468 tx_buf.buf_token = (void *)skb;
1470 tx_buf.data_ptr = skb->data; 1469 tx_buf.data_ptr = skb->data;
1471 EMAC_CACHE_WRITEBACK((unsigned long)skb->data, skb->len);
1472 ndev->trans_start = jiffies; 1470 ndev->trans_start = jiffies;
1473 ret_code = emac_send(priv, &tx_packet, EMAC_DEF_TX_CH); 1471 ret_code = emac_send(priv, &tx_packet, EMAC_DEF_TX_CH);
1474 if (unlikely(ret_code != 0)) { 1472 if (unlikely(ret_code != 0)) {
@@ -1543,7 +1541,6 @@ static void *emac_net_alloc_rx_buf(struct emac_priv *priv, int buf_size,
1543 p_skb->dev = ndev; 1541 p_skb->dev = ndev;
1544 skb_reserve(p_skb, NET_IP_ALIGN); 1542 skb_reserve(p_skb, NET_IP_ALIGN);
1545 *data_token = (void *) p_skb; 1543 *data_token = (void *) p_skb;
1546 EMAC_CACHE_WRITEBACK_INVALIDATE((unsigned long)p_skb->data, buf_size);
1547 return p_skb->data; 1544 return p_skb->data;
1548} 1545}
1549 1546
@@ -1612,8 +1609,8 @@ static int emac_init_rxch(struct emac_priv *priv, u32 ch, char *param)
1612 /* populate the hardware descriptor */ 1609 /* populate the hardware descriptor */
1613 curr_bd->h_next = emac_virt_to_phys(rxch->active_queue_head, 1610 curr_bd->h_next = emac_virt_to_phys(rxch->active_queue_head,
1614 priv); 1611 priv);
1615 /* FIXME buff_ptr = dma_map_single(... data_ptr ...) */ 1612 curr_bd->buff_ptr = dma_map_single(emac_dev, curr_bd->data_ptr,
1616 curr_bd->buff_ptr = virt_to_phys(curr_bd->data_ptr); 1613 rxch->buf_size, DMA_FROM_DEVICE);
1617 curr_bd->off_b_len = rxch->buf_size; 1614 curr_bd->off_b_len = rxch->buf_size;
1618 curr_bd->mode = EMAC_CPPI_OWNERSHIP_BIT; 1615 curr_bd->mode = EMAC_CPPI_OWNERSHIP_BIT;
1619 1616
@@ -1697,6 +1694,12 @@ static void emac_cleanup_rxch(struct emac_priv *priv, u32 ch)
1697 curr_bd = rxch->active_queue_head; 1694 curr_bd = rxch->active_queue_head;
1698 while (curr_bd) { 1695 while (curr_bd) {
1699 if (curr_bd->buf_token) { 1696 if (curr_bd->buf_token) {
1697 dma_unmap_single(&priv->ndev->dev,
1698 curr_bd->buff_ptr,
1699 curr_bd->off_b_len
1700 & EMAC_RX_BD_BUF_SIZE,
1701 DMA_FROM_DEVICE);
1702
1700 dev_kfree_skb_any((struct sk_buff *)\ 1703 dev_kfree_skb_any((struct sk_buff *)\
1701 curr_bd->buf_token); 1704 curr_bd->buf_token);
1702 } 1705 }
@@ -1871,8 +1874,8 @@ static void emac_addbd_to_rx_queue(struct emac_priv *priv, u32 ch,
1871 1874
1872 /* populate the hardware descriptor */ 1875 /* populate the hardware descriptor */
1873 curr_bd->h_next = 0; 1876 curr_bd->h_next = 0;
1874 /* FIXME buff_ptr = dma_map_single(... buffer ...) */ 1877 curr_bd->buff_ptr = dma_map_single(&priv->ndev->dev, buffer,
1875 curr_bd->buff_ptr = virt_to_phys(buffer); 1878 rxch->buf_size, DMA_FROM_DEVICE);
1876 curr_bd->off_b_len = rxch->buf_size; 1879 curr_bd->off_b_len = rxch->buf_size;
1877 curr_bd->mode = EMAC_CPPI_OWNERSHIP_BIT; 1880 curr_bd->mode = EMAC_CPPI_OWNERSHIP_BIT;
1878 curr_bd->next = NULL; 1881 curr_bd->next = NULL;
@@ -1927,7 +1930,6 @@ static int emac_net_rx_cb(struct emac_priv *priv,
1927 p_skb = (struct sk_buff *)net_pkt_list->pkt_token; 1930 p_skb = (struct sk_buff *)net_pkt_list->pkt_token;
1928 /* set length of packet */ 1931 /* set length of packet */
1929 skb_put(p_skb, net_pkt_list->pkt_length); 1932 skb_put(p_skb, net_pkt_list->pkt_length);
1930 EMAC_CACHE_INVALIDATE((unsigned long)p_skb->data, p_skb->len);
1931 p_skb->protocol = eth_type_trans(p_skb, priv->ndev); 1933 p_skb->protocol = eth_type_trans(p_skb, priv->ndev);
1932 netif_receive_skb(p_skb); 1934 netif_receive_skb(p_skb);
1933 priv->net_dev_stats.rx_bytes += net_pkt_list->pkt_length; 1935 priv->net_dev_stats.rx_bytes += net_pkt_list->pkt_length;
@@ -1990,6 +1992,11 @@ static int emac_rx_bdproc(struct emac_priv *priv, u32 ch, u32 budget)
1990 rx_buf_obj->data_ptr = (char *)curr_bd->data_ptr; 1992 rx_buf_obj->data_ptr = (char *)curr_bd->data_ptr;
1991 rx_buf_obj->length = curr_bd->off_b_len & EMAC_RX_BD_BUF_SIZE; 1993 rx_buf_obj->length = curr_bd->off_b_len & EMAC_RX_BD_BUF_SIZE;
1992 rx_buf_obj->buf_token = curr_bd->buf_token; 1994 rx_buf_obj->buf_token = curr_bd->buf_token;
1995
1996 dma_unmap_single(&priv->ndev->dev, curr_bd->buff_ptr,
1997 curr_bd->off_b_len & EMAC_RX_BD_BUF_SIZE,
1998 DMA_FROM_DEVICE);
1999
1993 curr_pkt->pkt_token = curr_pkt->buf_list->buf_token; 2000 curr_pkt->pkt_token = curr_pkt->buf_list->buf_token;
1994 curr_pkt->num_bufs = 1; 2001 curr_pkt->num_bufs = 1;
1995 curr_pkt->pkt_length = 2002 curr_pkt->pkt_length =