aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorMika Westerberg <mika.westerberg@iki.fi>2011-06-11 04:39:55 -0400
committerDavid S. Miller <davem@davemloft.net>2011-06-11 19:25:15 -0400
commitfc9b4910b00039da054f221e2821be0519261101 (patch)
treec4042a8324555eab6e6448620fc4c5514fa44d01
parentfa70cf472c0bc3a0d7e613a418cfc1117b796c6c (diff)
net: ep93xx_eth: pass struct device to DMA API functions
We shouldn't use NULL for any DMA API functions, unless we are dealing with ISA or EISA device. So pass correct struct dev pointer to these functions. Signed-off-by: Mika Westerberg <mika.westerberg@iki.fi> Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--drivers/net/arm/ep93xx_eth.c23
1 files changed, 13 insertions, 10 deletions
diff --git a/drivers/net/arm/ep93xx_eth.c b/drivers/net/arm/ep93xx_eth.c
index 5a77001b6d10..f65dfb6e5705 100644
--- a/drivers/net/arm/ep93xx_eth.c
+++ b/drivers/net/arm/ep93xx_eth.c
@@ -284,7 +284,7 @@ static int ep93xx_rx(struct net_device *dev, int processed, int budget)
284 skb = dev_alloc_skb(length + 2); 284 skb = dev_alloc_skb(length + 2);
285 if (likely(skb != NULL)) { 285 if (likely(skb != NULL)) {
286 skb_reserve(skb, 2); 286 skb_reserve(skb, 2);
287 dma_sync_single_for_cpu(NULL, ep->descs->rdesc[entry].buf_addr, 287 dma_sync_single_for_cpu(dev->dev.parent, ep->descs->rdesc[entry].buf_addr,
288 length, DMA_FROM_DEVICE); 288 length, DMA_FROM_DEVICE);
289 skb_copy_to_linear_data(skb, ep->rx_buf[entry], length); 289 skb_copy_to_linear_data(skb, ep->rx_buf[entry], length);
290 skb_put(skb, length); 290 skb_put(skb, length);
@@ -362,7 +362,7 @@ static int ep93xx_xmit(struct sk_buff *skb, struct net_device *dev)
362 ep->descs->tdesc[entry].tdesc1 = 362 ep->descs->tdesc[entry].tdesc1 =
363 TDESC1_EOF | (entry << 16) | (skb->len & 0xfff); 363 TDESC1_EOF | (entry << 16) | (skb->len & 0xfff);
364 skb_copy_and_csum_dev(skb, ep->tx_buf[entry]); 364 skb_copy_and_csum_dev(skb, ep->tx_buf[entry]);
365 dma_sync_single_for_cpu(NULL, ep->descs->tdesc[entry].buf_addr, 365 dma_sync_single_for_cpu(dev->dev.parent, ep->descs->tdesc[entry].buf_addr,
366 skb->len, DMA_TO_DEVICE); 366 skb->len, DMA_TO_DEVICE);
367 dev_kfree_skb(skb); 367 dev_kfree_skb(skb);
368 368
@@ -457,6 +457,7 @@ static irqreturn_t ep93xx_irq(int irq, void *dev_id)
457 457
458static void ep93xx_free_buffers(struct ep93xx_priv *ep) 458static void ep93xx_free_buffers(struct ep93xx_priv *ep)
459{ 459{
460 struct device *dev = ep->dev->dev.parent;
460 int i; 461 int i;
461 462
462 for (i = 0; i < RX_QUEUE_ENTRIES; i += 2) { 463 for (i = 0; i < RX_QUEUE_ENTRIES; i += 2) {
@@ -464,7 +465,7 @@ static void ep93xx_free_buffers(struct ep93xx_priv *ep)
464 465
465 d = ep->descs->rdesc[i].buf_addr; 466 d = ep->descs->rdesc[i].buf_addr;
466 if (d) 467 if (d)
467 dma_unmap_single(NULL, d, PAGE_SIZE, DMA_FROM_DEVICE); 468 dma_unmap_single(dev, d, PAGE_SIZE, DMA_FROM_DEVICE);
468 469
469 if (ep->rx_buf[i] != NULL) 470 if (ep->rx_buf[i] != NULL)
470 free_page((unsigned long)ep->rx_buf[i]); 471 free_page((unsigned long)ep->rx_buf[i]);
@@ -475,13 +476,13 @@ static void ep93xx_free_buffers(struct ep93xx_priv *ep)
475 476
476 d = ep->descs->tdesc[i].buf_addr; 477 d = ep->descs->tdesc[i].buf_addr;
477 if (d) 478 if (d)
478 dma_unmap_single(NULL, d, PAGE_SIZE, DMA_TO_DEVICE); 479 dma_unmap_single(dev, d, PAGE_SIZE, DMA_TO_DEVICE);
479 480
480 if (ep->tx_buf[i] != NULL) 481 if (ep->tx_buf[i] != NULL)
481 free_page((unsigned long)ep->tx_buf[i]); 482 free_page((unsigned long)ep->tx_buf[i]);
482 } 483 }
483 484
484 dma_free_coherent(NULL, sizeof(struct ep93xx_descs), ep->descs, 485 dma_free_coherent(dev, sizeof(struct ep93xx_descs), ep->descs,
485 ep->descs_dma_addr); 486 ep->descs_dma_addr);
486} 487}
487 488
@@ -491,9 +492,10 @@ static void ep93xx_free_buffers(struct ep93xx_priv *ep)
491 */ 492 */
492static int ep93xx_alloc_buffers(struct ep93xx_priv *ep) 493static int ep93xx_alloc_buffers(struct ep93xx_priv *ep)
493{ 494{
495 struct device *dev = ep->dev->dev.parent;
494 int i; 496 int i;
495 497
496 ep->descs = dma_alloc_coherent(NULL, sizeof(struct ep93xx_descs), 498 ep->descs = dma_alloc_coherent(dev, sizeof(struct ep93xx_descs),
497 &ep->descs_dma_addr, GFP_KERNEL | GFP_DMA); 499 &ep->descs_dma_addr, GFP_KERNEL | GFP_DMA);
498 if (ep->descs == NULL) 500 if (ep->descs == NULL)
499 return 1; 501 return 1;
@@ -506,8 +508,8 @@ static int ep93xx_alloc_buffers(struct ep93xx_priv *ep)
506 if (page == NULL) 508 if (page == NULL)
507 goto err; 509 goto err;
508 510
509 d = dma_map_single(NULL, page, PAGE_SIZE, DMA_FROM_DEVICE); 511 d = dma_map_single(dev, page, PAGE_SIZE, DMA_FROM_DEVICE);
510 if (dma_mapping_error(NULL, d)) { 512 if (dma_mapping_error(dev, d)) {
511 free_page((unsigned long)page); 513 free_page((unsigned long)page);
512 goto err; 514 goto err;
513 } 515 }
@@ -529,8 +531,8 @@ static int ep93xx_alloc_buffers(struct ep93xx_priv *ep)
529 if (page == NULL) 531 if (page == NULL)
530 goto err; 532 goto err;
531 533
532 d = dma_map_single(NULL, page, PAGE_SIZE, DMA_TO_DEVICE); 534 d = dma_map_single(dev, page, PAGE_SIZE, DMA_TO_DEVICE);
533 if (dma_mapping_error(NULL, d)) { 535 if (dma_mapping_error(dev, d)) {
534 free_page((unsigned long)page); 536 free_page((unsigned long)page);
535 goto err; 537 goto err;
536 } 538 }
@@ -829,6 +831,7 @@ static int ep93xx_eth_probe(struct platform_device *pdev)
829 } 831 }
830 ep = netdev_priv(dev); 832 ep = netdev_priv(dev);
831 ep->dev = dev; 833 ep->dev = dev;
834 SET_NETDEV_DEV(dev, &pdev->dev);
832 netif_napi_add(dev, &ep->napi, ep93xx_poll, 64); 835 netif_napi_add(dev, &ep->napi, ep93xx_poll, 64);
833 836
834 platform_set_drvdata(pdev, dev); 837 platform_set_drvdata(pdev, dev);