aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net
diff options
context:
space:
mode:
authorMika Westerberg <mika.westerberg@iki.fi>2011-06-11 04:39:56 -0400
committerDavid S. Miller <davem@davemloft.net>2011-06-11 19:25:15 -0400
commit3247a1fcee49b571b40c4bd723439ce5c64f56ad (patch)
treea84736c6e3522d42597f8501675fa845d586f473 /drivers/net
parentfc9b4910b00039da054f221e2821be0519261101 (diff)
net: ep93xx_eth: allocate buffers using kmalloc()
We can use simply kmalloc() to allocate the buffers. This also simplifies the code and allows us to perform DMA sync operations more easily. Memory is allocated with only GFP_KERNEL since there are no DMA allocation restrictions on this platform. Signed-off-by: Mika Westerberg <mika.westerberg@iki.fi> Acked-by: Russell King <rmk+kernel@arm.linux.org.uk> Acked-by: H Hartley Sweeten <hsweeten@visionengravers.com> Tested-by: Petr Stetiar <ynezz@true.cz> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net')
-rw-r--r--drivers/net/arm/ep93xx_eth.c51
1 files changed, 20 insertions, 31 deletions
diff --git a/drivers/net/arm/ep93xx_eth.c b/drivers/net/arm/ep93xx_eth.c
index f65dfb6e5705..97bf6b10ce61 100644
--- a/drivers/net/arm/ep93xx_eth.c
+++ b/drivers/net/arm/ep93xx_eth.c
@@ -460,36 +460,32 @@ static void ep93xx_free_buffers(struct ep93xx_priv *ep)
460 struct device *dev = ep->dev->dev.parent; 460 struct device *dev = ep->dev->dev.parent;
461 int i; 461 int i;
462 462
463 for (i = 0; i < RX_QUEUE_ENTRIES; i += 2) { 463 for (i = 0; i < RX_QUEUE_ENTRIES; i++) {
464 dma_addr_t d; 464 dma_addr_t d;
465 465
466 d = ep->descs->rdesc[i].buf_addr; 466 d = ep->descs->rdesc[i].buf_addr;
467 if (d) 467 if (d)
468 dma_unmap_single(dev, d, PAGE_SIZE, DMA_FROM_DEVICE); 468 dma_unmap_single(dev, d, PKT_BUF_SIZE, DMA_FROM_DEVICE);
469 469
470 if (ep->rx_buf[i] != NULL) 470 if (ep->rx_buf[i] != NULL)
471 free_page((unsigned long)ep->rx_buf[i]); 471 kfree(ep->rx_buf[i]);
472 } 472 }
473 473
474 for (i = 0; i < TX_QUEUE_ENTRIES; i += 2) { 474 for (i = 0; i < TX_QUEUE_ENTRIES; i++) {
475 dma_addr_t d; 475 dma_addr_t d;
476 476
477 d = ep->descs->tdesc[i].buf_addr; 477 d = ep->descs->tdesc[i].buf_addr;
478 if (d) 478 if (d)
479 dma_unmap_single(dev, d, PAGE_SIZE, DMA_TO_DEVICE); 479 dma_unmap_single(dev, d, PKT_BUF_SIZE, DMA_TO_DEVICE);
480 480
481 if (ep->tx_buf[i] != NULL) 481 if (ep->tx_buf[i] != NULL)
482 free_page((unsigned long)ep->tx_buf[i]); 482 kfree(ep->tx_buf[i]);
483 } 483 }
484 484
485 dma_free_coherent(dev, sizeof(struct ep93xx_descs), ep->descs, 485 dma_free_coherent(dev, sizeof(struct ep93xx_descs), ep->descs,
486 ep->descs_dma_addr); 486 ep->descs_dma_addr);
487} 487}
488 488
489/*
490 * The hardware enforces a sub-2K maximum packet size, so we put
491 * two buffers on every hardware page.
492 */
493static int ep93xx_alloc_buffers(struct ep93xx_priv *ep) 489static int ep93xx_alloc_buffers(struct ep93xx_priv *ep)
494{ 490{
495 struct device *dev = ep->dev->dev.parent; 491 struct device *dev = ep->dev->dev.parent;
@@ -500,48 +496,41 @@ static int ep93xx_alloc_buffers(struct ep93xx_priv *ep)
500 if (ep->descs == NULL) 496 if (ep->descs == NULL)
501 return 1; 497 return 1;
502 498
503 for (i = 0; i < RX_QUEUE_ENTRIES; i += 2) { 499 for (i = 0; i < RX_QUEUE_ENTRIES; i++) {
504 void *page; 500 void *buf;
505 dma_addr_t d; 501 dma_addr_t d;
506 502
507 page = (void *)__get_free_page(GFP_KERNEL | GFP_DMA); 503 buf = kmalloc(PKT_BUF_SIZE, GFP_KERNEL);
508 if (page == NULL) 504 if (buf == NULL)
509 goto err; 505 goto err;
510 506
511 d = dma_map_single(dev, page, PAGE_SIZE, DMA_FROM_DEVICE); 507 d = dma_map_single(dev, buf, PKT_BUF_SIZE, DMA_FROM_DEVICE);
512 if (dma_mapping_error(dev, d)) { 508 if (dma_mapping_error(dev, d)) {
513 free_page((unsigned long)page); 509 kfree(buf);
514 goto err; 510 goto err;
515 } 511 }
516 512
517 ep->rx_buf[i] = page; 513 ep->rx_buf[i] = buf;
518 ep->descs->rdesc[i].buf_addr = d; 514 ep->descs->rdesc[i].buf_addr = d;
519 ep->descs->rdesc[i].rdesc1 = (i << 16) | PKT_BUF_SIZE; 515 ep->descs->rdesc[i].rdesc1 = (i << 16) | PKT_BUF_SIZE;
520
521 ep->rx_buf[i + 1] = page + PKT_BUF_SIZE;
522 ep->descs->rdesc[i + 1].buf_addr = d + PKT_BUF_SIZE;
523 ep->descs->rdesc[i + 1].rdesc1 = ((i + 1) << 16) | PKT_BUF_SIZE;
524 } 516 }
525 517
526 for (i = 0; i < TX_QUEUE_ENTRIES; i += 2) { 518 for (i = 0; i < TX_QUEUE_ENTRIES; i++) {
527 void *page; 519 void *buf;
528 dma_addr_t d; 520 dma_addr_t d;
529 521
530 page = (void *)__get_free_page(GFP_KERNEL | GFP_DMA); 522 buf = kmalloc(PKT_BUF_SIZE, GFP_KERNEL);
531 if (page == NULL) 523 if (buf == NULL)
532 goto err; 524 goto err;
533 525
534 d = dma_map_single(dev, page, PAGE_SIZE, DMA_TO_DEVICE); 526 d = dma_map_single(dev, buf, PKT_BUF_SIZE, DMA_TO_DEVICE);
535 if (dma_mapping_error(dev, d)) { 527 if (dma_mapping_error(dev, d)) {
536 free_page((unsigned long)page); 528 kfree(buf);
537 goto err; 529 goto err;
538 } 530 }
539 531
540 ep->tx_buf[i] = page; 532 ep->tx_buf[i] = buf;
541 ep->descs->tdesc[i].buf_addr = d; 533 ep->descs->tdesc[i].buf_addr = d;
542
543 ep->tx_buf[i + 1] = page + PKT_BUF_SIZE;
544 ep->descs->tdesc[i + 1].buf_addr = d + PKT_BUF_SIZE;
545 } 534 }
546 535
547 return 0; 536 return 0;