diff options
author | Kumar Gala <galak@kernel.crashing.org> | 2009-03-19 02:28:22 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2009-03-19 02:28:22 -0400 |
commit | 4826857f1bf07f9c0f1495e9b05d125552c88a85 (patch) | |
tree | 247387255d4dc1939e88fb640da7cd9ebbae8a73 /drivers/net/gianfar.c | |
parent | 4b704d59d6fb152bcd0883b84af5936a29067f12 (diff) |
gianfar: pass the proper dev to DMA ops
We need to be passing the of_platform device struct into the DMA ops as
its the one that has the archdata setup to know which low-level DMA ops we
should be using (not the net_device one). This isn't an issue until we
expect the archdata to be setup correctly.
Signed-off-by: Kumar Gala <galak@kernel.crashing.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/gianfar.c')
-rw-r--r-- | drivers/net/gianfar.c | 34 |
1 files changed, 18 insertions, 16 deletions
diff --git a/drivers/net/gianfar.c b/drivers/net/gianfar.c index bed30ef43797..8659833f28eb 100644 --- a/drivers/net/gianfar.c +++ b/drivers/net/gianfar.c | |||
@@ -365,8 +365,10 @@ static int gfar_probe(struct of_device *ofdev, | |||
365 | return -ENOMEM; | 365 | return -ENOMEM; |
366 | 366 | ||
367 | priv = netdev_priv(dev); | 367 | priv = netdev_priv(dev); |
368 | priv->dev = dev; | 368 | priv->ndev = dev; |
369 | priv->ofdev = ofdev; | ||
369 | priv->node = ofdev->node; | 370 | priv->node = ofdev->node; |
371 | SET_NETDEV_DEV(dev, &ofdev->dev); | ||
370 | 372 | ||
371 | err = gfar_of_init(dev); | 373 | err = gfar_of_init(dev); |
372 | 374 | ||
@@ -538,7 +540,7 @@ static int gfar_remove(struct of_device *ofdev) | |||
538 | dev_set_drvdata(&ofdev->dev, NULL); | 540 | dev_set_drvdata(&ofdev->dev, NULL); |
539 | 541 | ||
540 | iounmap(priv->regs); | 542 | iounmap(priv->regs); |
541 | free_netdev(priv->dev); | 543 | free_netdev(priv->ndev); |
542 | 544 | ||
543 | return 0; | 545 | return 0; |
544 | } | 546 | } |
@@ -870,7 +872,7 @@ void stop_gfar(struct net_device *dev) | |||
870 | 872 | ||
871 | free_skb_resources(priv); | 873 | free_skb_resources(priv); |
872 | 874 | ||
873 | dma_free_coherent(&dev->dev, | 875 | dma_free_coherent(&priv->ofdev->dev, |
874 | sizeof(struct txbd8)*priv->tx_ring_size | 876 | sizeof(struct txbd8)*priv->tx_ring_size |
875 | + sizeof(struct rxbd8)*priv->rx_ring_size, | 877 | + sizeof(struct rxbd8)*priv->rx_ring_size, |
876 | priv->tx_bd_base, | 878 | priv->tx_bd_base, |
@@ -892,12 +894,12 @@ static void free_skb_resources(struct gfar_private *priv) | |||
892 | if (!priv->tx_skbuff[i]) | 894 | if (!priv->tx_skbuff[i]) |
893 | continue; | 895 | continue; |
894 | 896 | ||
895 | dma_unmap_single(&priv->dev->dev, txbdp->bufPtr, | 897 | dma_unmap_single(&priv->ofdev->dev, txbdp->bufPtr, |
896 | txbdp->length, DMA_TO_DEVICE); | 898 | txbdp->length, DMA_TO_DEVICE); |
897 | txbdp->lstatus = 0; | 899 | txbdp->lstatus = 0; |
898 | for (j = 0; j < skb_shinfo(priv->tx_skbuff[i])->nr_frags; j++) { | 900 | for (j = 0; j < skb_shinfo(priv->tx_skbuff[i])->nr_frags; j++) { |
899 | txbdp++; | 901 | txbdp++; |
900 | dma_unmap_page(&priv->dev->dev, txbdp->bufPtr, | 902 | dma_unmap_page(&priv->ofdev->dev, txbdp->bufPtr, |
901 | txbdp->length, DMA_TO_DEVICE); | 903 | txbdp->length, DMA_TO_DEVICE); |
902 | } | 904 | } |
903 | txbdp++; | 905 | txbdp++; |
@@ -914,7 +916,7 @@ static void free_skb_resources(struct gfar_private *priv) | |||
914 | if(priv->rx_skbuff != NULL) { | 916 | if(priv->rx_skbuff != NULL) { |
915 | for (i = 0; i < priv->rx_ring_size; i++) { | 917 | for (i = 0; i < priv->rx_ring_size; i++) { |
916 | if (priv->rx_skbuff[i]) { | 918 | if (priv->rx_skbuff[i]) { |
917 | dma_unmap_single(&priv->dev->dev, rxbdp->bufPtr, | 919 | dma_unmap_single(&priv->ofdev->dev, rxbdp->bufPtr, |
918 | priv->rx_buffer_size, | 920 | priv->rx_buffer_size, |
919 | DMA_FROM_DEVICE); | 921 | DMA_FROM_DEVICE); |
920 | 922 | ||
@@ -980,7 +982,7 @@ int startup_gfar(struct net_device *dev) | |||
980 | gfar_write(®s->imask, IMASK_INIT_CLEAR); | 982 | gfar_write(®s->imask, IMASK_INIT_CLEAR); |
981 | 983 | ||
982 | /* Allocate memory for the buffer descriptors */ | 984 | /* Allocate memory for the buffer descriptors */ |
983 | vaddr = (unsigned long) dma_alloc_coherent(&dev->dev, | 985 | vaddr = (unsigned long) dma_alloc_coherent(&priv->ofdev->dev, |
984 | sizeof (struct txbd8) * priv->tx_ring_size + | 986 | sizeof (struct txbd8) * priv->tx_ring_size + |
985 | sizeof (struct rxbd8) * priv->rx_ring_size, | 987 | sizeof (struct rxbd8) * priv->rx_ring_size, |
986 | &addr, GFP_KERNEL); | 988 | &addr, GFP_KERNEL); |
@@ -1192,7 +1194,7 @@ err_rxalloc_fail: | |||
1192 | rx_skb_fail: | 1194 | rx_skb_fail: |
1193 | free_skb_resources(priv); | 1195 | free_skb_resources(priv); |
1194 | tx_skb_fail: | 1196 | tx_skb_fail: |
1195 | dma_free_coherent(&dev->dev, | 1197 | dma_free_coherent(&priv->ofdev->dev, |
1196 | sizeof(struct txbd8)*priv->tx_ring_size | 1198 | sizeof(struct txbd8)*priv->tx_ring_size |
1197 | + sizeof(struct rxbd8)*priv->rx_ring_size, | 1199 | + sizeof(struct rxbd8)*priv->rx_ring_size, |
1198 | priv->tx_bd_base, | 1200 | priv->tx_bd_base, |
@@ -1345,7 +1347,7 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
1345 | if (i == nr_frags - 1) | 1347 | if (i == nr_frags - 1) |
1346 | lstatus |= BD_LFLAG(TXBD_LAST | TXBD_INTERRUPT); | 1348 | lstatus |= BD_LFLAG(TXBD_LAST | TXBD_INTERRUPT); |
1347 | 1349 | ||
1348 | bufaddr = dma_map_page(&dev->dev, | 1350 | bufaddr = dma_map_page(&priv->ofdev->dev, |
1349 | skb_shinfo(skb)->frags[i].page, | 1351 | skb_shinfo(skb)->frags[i].page, |
1350 | skb_shinfo(skb)->frags[i].page_offset, | 1352 | skb_shinfo(skb)->frags[i].page_offset, |
1351 | length, | 1353 | length, |
@@ -1377,7 +1379,7 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
1377 | 1379 | ||
1378 | /* setup the TxBD length and buffer pointer for the first BD */ | 1380 | /* setup the TxBD length and buffer pointer for the first BD */ |
1379 | priv->tx_skbuff[priv->skb_curtx] = skb; | 1381 | priv->tx_skbuff[priv->skb_curtx] = skb; |
1380 | txbdp_start->bufPtr = dma_map_single(&dev->dev, skb->data, | 1382 | txbdp_start->bufPtr = dma_map_single(&priv->ofdev->dev, skb->data, |
1381 | skb_headlen(skb), DMA_TO_DEVICE); | 1383 | skb_headlen(skb), DMA_TO_DEVICE); |
1382 | 1384 | ||
1383 | lstatus |= BD_LFLAG(TXBD_CRC | TXBD_READY) | skb_headlen(skb); | 1385 | lstatus |= BD_LFLAG(TXBD_CRC | TXBD_READY) | skb_headlen(skb); |
@@ -1563,7 +1565,7 @@ static void gfar_reset_task(struct work_struct *work) | |||
1563 | { | 1565 | { |
1564 | struct gfar_private *priv = container_of(work, struct gfar_private, | 1566 | struct gfar_private *priv = container_of(work, struct gfar_private, |
1565 | reset_task); | 1567 | reset_task); |
1566 | struct net_device *dev = priv->dev; | 1568 | struct net_device *dev = priv->ndev; |
1567 | 1569 | ||
1568 | if (dev->flags & IFF_UP) { | 1570 | if (dev->flags & IFF_UP) { |
1569 | stop_gfar(dev); | 1571 | stop_gfar(dev); |
@@ -1610,7 +1612,7 @@ static int gfar_clean_tx_ring(struct net_device *dev) | |||
1610 | (lstatus & BD_LENGTH_MASK)) | 1612 | (lstatus & BD_LENGTH_MASK)) |
1611 | break; | 1613 | break; |
1612 | 1614 | ||
1613 | dma_unmap_single(&dev->dev, | 1615 | dma_unmap_single(&priv->ofdev->dev, |
1614 | bdp->bufPtr, | 1616 | bdp->bufPtr, |
1615 | bdp->length, | 1617 | bdp->length, |
1616 | DMA_TO_DEVICE); | 1618 | DMA_TO_DEVICE); |
@@ -1619,7 +1621,7 @@ static int gfar_clean_tx_ring(struct net_device *dev) | |||
1619 | bdp = next_txbd(bdp, base, tx_ring_size); | 1621 | bdp = next_txbd(bdp, base, tx_ring_size); |
1620 | 1622 | ||
1621 | for (i = 0; i < frags; i++) { | 1623 | for (i = 0; i < frags; i++) { |
1622 | dma_unmap_page(&dev->dev, | 1624 | dma_unmap_page(&priv->ofdev->dev, |
1623 | bdp->bufPtr, | 1625 | bdp->bufPtr, |
1624 | bdp->length, | 1626 | bdp->length, |
1625 | DMA_TO_DEVICE); | 1627 | DMA_TO_DEVICE); |
@@ -1696,7 +1698,7 @@ static void gfar_new_rxbdp(struct net_device *dev, struct rxbd8 *bdp, | |||
1696 | struct gfar_private *priv = netdev_priv(dev); | 1698 | struct gfar_private *priv = netdev_priv(dev); |
1697 | u32 lstatus; | 1699 | u32 lstatus; |
1698 | 1700 | ||
1699 | bdp->bufPtr = dma_map_single(&dev->dev, skb->data, | 1701 | bdp->bufPtr = dma_map_single(&priv->ofdev->dev, skb->data, |
1700 | priv->rx_buffer_size, DMA_FROM_DEVICE); | 1702 | priv->rx_buffer_size, DMA_FROM_DEVICE); |
1701 | 1703 | ||
1702 | lstatus = BD_LFLAG(RXBD_EMPTY | RXBD_INTERRUPT); | 1704 | lstatus = BD_LFLAG(RXBD_EMPTY | RXBD_INTERRUPT); |
@@ -1856,7 +1858,7 @@ int gfar_clean_rx_ring(struct net_device *dev, int rx_work_limit) | |||
1856 | 1858 | ||
1857 | skb = priv->rx_skbuff[priv->skb_currx]; | 1859 | skb = priv->rx_skbuff[priv->skb_currx]; |
1858 | 1860 | ||
1859 | dma_unmap_single(&priv->dev->dev, bdp->bufPtr, | 1861 | dma_unmap_single(&priv->ofdev->dev, bdp->bufPtr, |
1860 | priv->rx_buffer_size, DMA_FROM_DEVICE); | 1862 | priv->rx_buffer_size, DMA_FROM_DEVICE); |
1861 | 1863 | ||
1862 | /* We drop the frame if we failed to allocate a new buffer */ | 1864 | /* We drop the frame if we failed to allocate a new buffer */ |
@@ -1916,7 +1918,7 @@ int gfar_clean_rx_ring(struct net_device *dev, int rx_work_limit) | |||
1916 | static int gfar_poll(struct napi_struct *napi, int budget) | 1918 | static int gfar_poll(struct napi_struct *napi, int budget) |
1917 | { | 1919 | { |
1918 | struct gfar_private *priv = container_of(napi, struct gfar_private, napi); | 1920 | struct gfar_private *priv = container_of(napi, struct gfar_private, napi); |
1919 | struct net_device *dev = priv->dev; | 1921 | struct net_device *dev = priv->ndev; |
1920 | int tx_cleaned = 0; | 1922 | int tx_cleaned = 0; |
1921 | int rx_cleaned = 0; | 1923 | int rx_cleaned = 0; |
1922 | unsigned long flags; | 1924 | unsigned long flags; |