diff options
Diffstat (limited to 'drivers/net/arm/ep93xx_eth.c')
-rw-r--r-- | drivers/net/arm/ep93xx_eth.c | 82 |
1 files changed, 41 insertions, 41 deletions
diff --git a/drivers/net/arm/ep93xx_eth.c b/drivers/net/arm/ep93xx_eth.c index 5a77001b6d10..0b46b8ea0e80 100644 --- a/drivers/net/arm/ep93xx_eth.c +++ b/drivers/net/arm/ep93xx_eth.c | |||
@@ -283,10 +283,14 @@ static int ep93xx_rx(struct net_device *dev, int processed, int budget) | |||
283 | 283 | ||
284 | skb = dev_alloc_skb(length + 2); | 284 | skb = dev_alloc_skb(length + 2); |
285 | if (likely(skb != NULL)) { | 285 | if (likely(skb != NULL)) { |
286 | struct ep93xx_rdesc *rxd = &ep->descs->rdesc[entry]; | ||
286 | skb_reserve(skb, 2); | 287 | skb_reserve(skb, 2); |
287 | dma_sync_single_for_cpu(NULL, ep->descs->rdesc[entry].buf_addr, | 288 | dma_sync_single_for_cpu(dev->dev.parent, rxd->buf_addr, |
288 | length, DMA_FROM_DEVICE); | 289 | length, DMA_FROM_DEVICE); |
289 | skb_copy_to_linear_data(skb, ep->rx_buf[entry], length); | 290 | skb_copy_to_linear_data(skb, ep->rx_buf[entry], length); |
291 | dma_sync_single_for_device(dev->dev.parent, | ||
292 | rxd->buf_addr, length, | ||
293 | DMA_FROM_DEVICE); | ||
290 | skb_put(skb, length); | 294 | skb_put(skb, length); |
291 | skb->protocol = eth_type_trans(skb, dev); | 295 | skb->protocol = eth_type_trans(skb, dev); |
292 | 296 | ||
@@ -348,6 +352,7 @@ poll_some_more: | |||
348 | static int ep93xx_xmit(struct sk_buff *skb, struct net_device *dev) | 352 | static int ep93xx_xmit(struct sk_buff *skb, struct net_device *dev) |
349 | { | 353 | { |
350 | struct ep93xx_priv *ep = netdev_priv(dev); | 354 | struct ep93xx_priv *ep = netdev_priv(dev); |
355 | struct ep93xx_tdesc *txd; | ||
351 | int entry; | 356 | int entry; |
352 | 357 | ||
353 | if (unlikely(skb->len > MAX_PKT_SIZE)) { | 358 | if (unlikely(skb->len > MAX_PKT_SIZE)) { |
@@ -359,11 +364,14 @@ static int ep93xx_xmit(struct sk_buff *skb, struct net_device *dev) | |||
359 | entry = ep->tx_pointer; | 364 | entry = ep->tx_pointer; |
360 | ep->tx_pointer = (ep->tx_pointer + 1) & (TX_QUEUE_ENTRIES - 1); | 365 | ep->tx_pointer = (ep->tx_pointer + 1) & (TX_QUEUE_ENTRIES - 1); |
361 | 366 | ||
362 | ep->descs->tdesc[entry].tdesc1 = | 367 | txd = &ep->descs->tdesc[entry]; |
363 | TDESC1_EOF | (entry << 16) | (skb->len & 0xfff); | 368 | |
369 | txd->tdesc1 = TDESC1_EOF | (entry << 16) | (skb->len & 0xfff); | ||
370 | dma_sync_single_for_cpu(dev->dev.parent, txd->buf_addr, skb->len, | ||
371 | DMA_TO_DEVICE); | ||
364 | skb_copy_and_csum_dev(skb, ep->tx_buf[entry]); | 372 | skb_copy_and_csum_dev(skb, ep->tx_buf[entry]); |
365 | dma_sync_single_for_cpu(NULL, ep->descs->tdesc[entry].buf_addr, | 373 | dma_sync_single_for_device(dev->dev.parent, txd->buf_addr, skb->len, |
366 | skb->len, DMA_TO_DEVICE); | 374 | DMA_TO_DEVICE); |
367 | dev_kfree_skb(skb); | 375 | dev_kfree_skb(skb); |
368 | 376 | ||
369 | spin_lock_irq(&ep->tx_pending_lock); | 377 | spin_lock_irq(&ep->tx_pending_lock); |
@@ -457,89 +465,80 @@ static irqreturn_t ep93xx_irq(int irq, void *dev_id) | |||
457 | 465 | ||
458 | static void ep93xx_free_buffers(struct ep93xx_priv *ep) | 466 | static void ep93xx_free_buffers(struct ep93xx_priv *ep) |
459 | { | 467 | { |
468 | struct device *dev = ep->dev->dev.parent; | ||
460 | int i; | 469 | int i; |
461 | 470 | ||
462 | for (i = 0; i < RX_QUEUE_ENTRIES; i += 2) { | 471 | for (i = 0; i < RX_QUEUE_ENTRIES; i++) { |
463 | dma_addr_t d; | 472 | dma_addr_t d; |
464 | 473 | ||
465 | d = ep->descs->rdesc[i].buf_addr; | 474 | d = ep->descs->rdesc[i].buf_addr; |
466 | if (d) | 475 | if (d) |
467 | dma_unmap_single(NULL, d, PAGE_SIZE, DMA_FROM_DEVICE); | 476 | dma_unmap_single(dev, d, PKT_BUF_SIZE, DMA_FROM_DEVICE); |
468 | 477 | ||
469 | if (ep->rx_buf[i] != NULL) | 478 | if (ep->rx_buf[i] != NULL) |
470 | free_page((unsigned long)ep->rx_buf[i]); | 479 | kfree(ep->rx_buf[i]); |
471 | } | 480 | } |
472 | 481 | ||
473 | for (i = 0; i < TX_QUEUE_ENTRIES; i += 2) { | 482 | for (i = 0; i < TX_QUEUE_ENTRIES; i++) { |
474 | dma_addr_t d; | 483 | dma_addr_t d; |
475 | 484 | ||
476 | d = ep->descs->tdesc[i].buf_addr; | 485 | d = ep->descs->tdesc[i].buf_addr; |
477 | if (d) | 486 | if (d) |
478 | dma_unmap_single(NULL, d, PAGE_SIZE, DMA_TO_DEVICE); | 487 | dma_unmap_single(dev, d, PKT_BUF_SIZE, DMA_TO_DEVICE); |
479 | 488 | ||
480 | if (ep->tx_buf[i] != NULL) | 489 | if (ep->tx_buf[i] != NULL) |
481 | free_page((unsigned long)ep->tx_buf[i]); | 490 | kfree(ep->tx_buf[i]); |
482 | } | 491 | } |
483 | 492 | ||
484 | dma_free_coherent(NULL, sizeof(struct ep93xx_descs), ep->descs, | 493 | dma_free_coherent(dev, sizeof(struct ep93xx_descs), ep->descs, |
485 | ep->descs_dma_addr); | 494 | ep->descs_dma_addr); |
486 | } | 495 | } |
487 | 496 | ||
488 | /* | ||
489 | * The hardware enforces a sub-2K maximum packet size, so we put | ||
490 | * two buffers on every hardware page. | ||
491 | */ | ||
492 | static int ep93xx_alloc_buffers(struct ep93xx_priv *ep) | 497 | static int ep93xx_alloc_buffers(struct ep93xx_priv *ep) |
493 | { | 498 | { |
499 | struct device *dev = ep->dev->dev.parent; | ||
494 | int i; | 500 | int i; |
495 | 501 | ||
496 | ep->descs = dma_alloc_coherent(NULL, sizeof(struct ep93xx_descs), | 502 | ep->descs = dma_alloc_coherent(dev, sizeof(struct ep93xx_descs), |
497 | &ep->descs_dma_addr, GFP_KERNEL | GFP_DMA); | 503 | &ep->descs_dma_addr, GFP_KERNEL); |
498 | if (ep->descs == NULL) | 504 | if (ep->descs == NULL) |
499 | return 1; | 505 | return 1; |
500 | 506 | ||
501 | for (i = 0; i < RX_QUEUE_ENTRIES; i += 2) { | 507 | for (i = 0; i < RX_QUEUE_ENTRIES; i++) { |
502 | void *page; | 508 | void *buf; |
503 | dma_addr_t d; | 509 | dma_addr_t d; |
504 | 510 | ||
505 | page = (void *)__get_free_page(GFP_KERNEL | GFP_DMA); | 511 | buf = kmalloc(PKT_BUF_SIZE, GFP_KERNEL); |
506 | if (page == NULL) | 512 | if (buf == NULL) |
507 | goto err; | 513 | goto err; |
508 | 514 | ||
509 | d = dma_map_single(NULL, page, PAGE_SIZE, DMA_FROM_DEVICE); | 515 | d = dma_map_single(dev, buf, PKT_BUF_SIZE, DMA_FROM_DEVICE); |
510 | if (dma_mapping_error(NULL, d)) { | 516 | if (dma_mapping_error(dev, d)) { |
511 | free_page((unsigned long)page); | 517 | kfree(buf); |
512 | goto err; | 518 | goto err; |
513 | } | 519 | } |
514 | 520 | ||
515 | ep->rx_buf[i] = page; | 521 | ep->rx_buf[i] = buf; |
516 | ep->descs->rdesc[i].buf_addr = d; | 522 | ep->descs->rdesc[i].buf_addr = d; |
517 | ep->descs->rdesc[i].rdesc1 = (i << 16) | PKT_BUF_SIZE; | 523 | ep->descs->rdesc[i].rdesc1 = (i << 16) | PKT_BUF_SIZE; |
518 | |||
519 | ep->rx_buf[i + 1] = page + PKT_BUF_SIZE; | ||
520 | ep->descs->rdesc[i + 1].buf_addr = d + PKT_BUF_SIZE; | ||
521 | ep->descs->rdesc[i + 1].rdesc1 = ((i + 1) << 16) | PKT_BUF_SIZE; | ||
522 | } | 524 | } |
523 | 525 | ||
524 | for (i = 0; i < TX_QUEUE_ENTRIES; i += 2) { | 526 | for (i = 0; i < TX_QUEUE_ENTRIES; i++) { |
525 | void *page; | 527 | void *buf; |
526 | dma_addr_t d; | 528 | dma_addr_t d; |
527 | 529 | ||
528 | page = (void *)__get_free_page(GFP_KERNEL | GFP_DMA); | 530 | buf = kmalloc(PKT_BUF_SIZE, GFP_KERNEL); |
529 | if (page == NULL) | 531 | if (buf == NULL) |
530 | goto err; | 532 | goto err; |
531 | 533 | ||
532 | d = dma_map_single(NULL, page, PAGE_SIZE, DMA_TO_DEVICE); | 534 | d = dma_map_single(dev, buf, PKT_BUF_SIZE, DMA_TO_DEVICE); |
533 | if (dma_mapping_error(NULL, d)) { | 535 | if (dma_mapping_error(dev, d)) { |
534 | free_page((unsigned long)page); | 536 | kfree(buf); |
535 | goto err; | 537 | goto err; |
536 | } | 538 | } |
537 | 539 | ||
538 | ep->tx_buf[i] = page; | 540 | ep->tx_buf[i] = buf; |
539 | ep->descs->tdesc[i].buf_addr = d; | 541 | ep->descs->tdesc[i].buf_addr = d; |
540 | |||
541 | ep->tx_buf[i + 1] = page + PKT_BUF_SIZE; | ||
542 | ep->descs->tdesc[i + 1].buf_addr = d + PKT_BUF_SIZE; | ||
543 | } | 542 | } |
544 | 543 | ||
545 | return 0; | 544 | return 0; |
@@ -829,6 +828,7 @@ static int ep93xx_eth_probe(struct platform_device *pdev) | |||
829 | } | 828 | } |
830 | ep = netdev_priv(dev); | 829 | ep = netdev_priv(dev); |
831 | ep->dev = dev; | 830 | ep->dev = dev; |
831 | SET_NETDEV_DEV(dev, &pdev->dev); | ||
832 | netif_napi_add(dev, &ep->napi, ep93xx_poll, 64); | 832 | netif_napi_add(dev, &ep->napi, ep93xx_poll, 64); |
833 | 833 | ||
834 | platform_set_drvdata(pdev, dev); | 834 | platform_set_drvdata(pdev, dev); |