aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/arm/ep93xx_eth.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/arm/ep93xx_eth.c')
-rw-r--r--drivers/net/arm/ep93xx_eth.c121
1 files changed, 56 insertions, 65 deletions
diff --git a/drivers/net/arm/ep93xx_eth.c b/drivers/net/arm/ep93xx_eth.c
index 4a5ec9470aa1..0b46b8ea0e80 100644
--- a/drivers/net/arm/ep93xx_eth.c
+++ b/drivers/net/arm/ep93xx_eth.c
@@ -175,8 +175,6 @@ struct ep93xx_priv
175 struct net_device *dev; 175 struct net_device *dev;
176 struct napi_struct napi; 176 struct napi_struct napi;
177 177
178 struct net_device_stats stats;
179
180 struct mii_if_info mii; 178 struct mii_if_info mii;
181 u8 mdc_divisor; 179 u8 mdc_divisor;
182}; 180};
@@ -230,12 +228,6 @@ static void ep93xx_mdio_write(struct net_device *dev, int phy_id, int reg, int d
230 pr_info("mdio write timed out\n"); 228 pr_info("mdio write timed out\n");
231} 229}
232 230
233static struct net_device_stats *ep93xx_get_stats(struct net_device *dev)
234{
235 struct ep93xx_priv *ep = netdev_priv(dev);
236 return &(ep->stats);
237}
238
239static int ep93xx_rx(struct net_device *dev, int processed, int budget) 231static int ep93xx_rx(struct net_device *dev, int processed, int budget)
240{ 232{
241 struct ep93xx_priv *ep = netdev_priv(dev); 233 struct ep93xx_priv *ep = netdev_priv(dev);
@@ -267,15 +259,15 @@ static int ep93xx_rx(struct net_device *dev, int processed, int budget)
267 pr_crit("entry mismatch %.8x %.8x\n", rstat0, rstat1); 259 pr_crit("entry mismatch %.8x %.8x\n", rstat0, rstat1);
268 260
269 if (!(rstat0 & RSTAT0_RWE)) { 261 if (!(rstat0 & RSTAT0_RWE)) {
270 ep->stats.rx_errors++; 262 dev->stats.rx_errors++;
271 if (rstat0 & RSTAT0_OE) 263 if (rstat0 & RSTAT0_OE)
272 ep->stats.rx_fifo_errors++; 264 dev->stats.rx_fifo_errors++;
273 if (rstat0 & RSTAT0_FE) 265 if (rstat0 & RSTAT0_FE)
274 ep->stats.rx_frame_errors++; 266 dev->stats.rx_frame_errors++;
275 if (rstat0 & (RSTAT0_RUNT | RSTAT0_EDATA)) 267 if (rstat0 & (RSTAT0_RUNT | RSTAT0_EDATA))
276 ep->stats.rx_length_errors++; 268 dev->stats.rx_length_errors++;
277 if (rstat0 & RSTAT0_CRCE) 269 if (rstat0 & RSTAT0_CRCE)
278 ep->stats.rx_crc_errors++; 270 dev->stats.rx_crc_errors++;
279 goto err; 271 goto err;
280 } 272 }
281 273
@@ -291,19 +283,23 @@ static int ep93xx_rx(struct net_device *dev, int processed, int budget)
291 283
292 skb = dev_alloc_skb(length + 2); 284 skb = dev_alloc_skb(length + 2);
293 if (likely(skb != NULL)) { 285 if (likely(skb != NULL)) {
286 struct ep93xx_rdesc *rxd = &ep->descs->rdesc[entry];
294 skb_reserve(skb, 2); 287 skb_reserve(skb, 2);
295 dma_sync_single_for_cpu(NULL, ep->descs->rdesc[entry].buf_addr, 288 dma_sync_single_for_cpu(dev->dev.parent, rxd->buf_addr,
296 length, DMA_FROM_DEVICE); 289 length, DMA_FROM_DEVICE);
297 skb_copy_to_linear_data(skb, ep->rx_buf[entry], length); 290 skb_copy_to_linear_data(skb, ep->rx_buf[entry], length);
291 dma_sync_single_for_device(dev->dev.parent,
292 rxd->buf_addr, length,
293 DMA_FROM_DEVICE);
298 skb_put(skb, length); 294 skb_put(skb, length);
299 skb->protocol = eth_type_trans(skb, dev); 295 skb->protocol = eth_type_trans(skb, dev);
300 296
301 netif_receive_skb(skb); 297 netif_receive_skb(skb);
302 298
303 ep->stats.rx_packets++; 299 dev->stats.rx_packets++;
304 ep->stats.rx_bytes += length; 300 dev->stats.rx_bytes += length;
305 } else { 301 } else {
306 ep->stats.rx_dropped++; 302 dev->stats.rx_dropped++;
307 } 303 }
308 304
309err: 305err:
@@ -356,10 +352,11 @@ poll_some_more:
356static int ep93xx_xmit(struct sk_buff *skb, struct net_device *dev) 352static int ep93xx_xmit(struct sk_buff *skb, struct net_device *dev)
357{ 353{
358 struct ep93xx_priv *ep = netdev_priv(dev); 354 struct ep93xx_priv *ep = netdev_priv(dev);
355 struct ep93xx_tdesc *txd;
359 int entry; 356 int entry;
360 357
361 if (unlikely(skb->len > MAX_PKT_SIZE)) { 358 if (unlikely(skb->len > MAX_PKT_SIZE)) {
362 ep->stats.tx_dropped++; 359 dev->stats.tx_dropped++;
363 dev_kfree_skb(skb); 360 dev_kfree_skb(skb);
364 return NETDEV_TX_OK; 361 return NETDEV_TX_OK;
365 } 362 }
@@ -367,11 +364,14 @@ static int ep93xx_xmit(struct sk_buff *skb, struct net_device *dev)
367 entry = ep->tx_pointer; 364 entry = ep->tx_pointer;
368 ep->tx_pointer = (ep->tx_pointer + 1) & (TX_QUEUE_ENTRIES - 1); 365 ep->tx_pointer = (ep->tx_pointer + 1) & (TX_QUEUE_ENTRIES - 1);
369 366
370 ep->descs->tdesc[entry].tdesc1 = 367 txd = &ep->descs->tdesc[entry];
371 TDESC1_EOF | (entry << 16) | (skb->len & 0xfff); 368
369 txd->tdesc1 = TDESC1_EOF | (entry << 16) | (skb->len & 0xfff);
370 dma_sync_single_for_cpu(dev->dev.parent, txd->buf_addr, skb->len,
371 DMA_TO_DEVICE);
372 skb_copy_and_csum_dev(skb, ep->tx_buf[entry]); 372 skb_copy_and_csum_dev(skb, ep->tx_buf[entry]);
373 dma_sync_single_for_cpu(NULL, ep->descs->tdesc[entry].buf_addr, 373 dma_sync_single_for_device(dev->dev.parent, txd->buf_addr, skb->len,
374 skb->len, DMA_TO_DEVICE); 374 DMA_TO_DEVICE);
375 dev_kfree_skb(skb); 375 dev_kfree_skb(skb);
376 376
377 spin_lock_irq(&ep->tx_pending_lock); 377 spin_lock_irq(&ep->tx_pending_lock);
@@ -415,17 +415,17 @@ static void ep93xx_tx_complete(struct net_device *dev)
415 if (tstat0 & TSTAT0_TXWE) { 415 if (tstat0 & TSTAT0_TXWE) {
416 int length = ep->descs->tdesc[entry].tdesc1 & 0xfff; 416 int length = ep->descs->tdesc[entry].tdesc1 & 0xfff;
417 417
418 ep->stats.tx_packets++; 418 dev->stats.tx_packets++;
419 ep->stats.tx_bytes += length; 419 dev->stats.tx_bytes += length;
420 } else { 420 } else {
421 ep->stats.tx_errors++; 421 dev->stats.tx_errors++;
422 } 422 }
423 423
424 if (tstat0 & TSTAT0_OW) 424 if (tstat0 & TSTAT0_OW)
425 ep->stats.tx_window_errors++; 425 dev->stats.tx_window_errors++;
426 if (tstat0 & TSTAT0_TXU) 426 if (tstat0 & TSTAT0_TXU)
427 ep->stats.tx_fifo_errors++; 427 dev->stats.tx_fifo_errors++;
428 ep->stats.collisions += (tstat0 >> 16) & 0x1f; 428 dev->stats.collisions += (tstat0 >> 16) & 0x1f;
429 429
430 ep->tx_clean_pointer = (entry + 1) & (TX_QUEUE_ENTRIES - 1); 430 ep->tx_clean_pointer = (entry + 1) & (TX_QUEUE_ENTRIES - 1);
431 if (ep->tx_pending == TX_QUEUE_ENTRIES) 431 if (ep->tx_pending == TX_QUEUE_ENTRIES)
@@ -465,89 +465,80 @@ static irqreturn_t ep93xx_irq(int irq, void *dev_id)
465 465
466static void ep93xx_free_buffers(struct ep93xx_priv *ep) 466static void ep93xx_free_buffers(struct ep93xx_priv *ep)
467{ 467{
468 struct device *dev = ep->dev->dev.parent;
468 int i; 469 int i;
469 470
470 for (i = 0; i < RX_QUEUE_ENTRIES; i += 2) { 471 for (i = 0; i < RX_QUEUE_ENTRIES; i++) {
471 dma_addr_t d; 472 dma_addr_t d;
472 473
473 d = ep->descs->rdesc[i].buf_addr; 474 d = ep->descs->rdesc[i].buf_addr;
474 if (d) 475 if (d)
475 dma_unmap_single(NULL, d, PAGE_SIZE, DMA_FROM_DEVICE); 476 dma_unmap_single(dev, d, PKT_BUF_SIZE, DMA_FROM_DEVICE);
476 477
477 if (ep->rx_buf[i] != NULL) 478 if (ep->rx_buf[i] != NULL)
478 free_page((unsigned long)ep->rx_buf[i]); 479 kfree(ep->rx_buf[i]);
479 } 480 }
480 481
481 for (i = 0; i < TX_QUEUE_ENTRIES; i += 2) { 482 for (i = 0; i < TX_QUEUE_ENTRIES; i++) {
482 dma_addr_t d; 483 dma_addr_t d;
483 484
484 d = ep->descs->tdesc[i].buf_addr; 485 d = ep->descs->tdesc[i].buf_addr;
485 if (d) 486 if (d)
486 dma_unmap_single(NULL, d, PAGE_SIZE, DMA_TO_DEVICE); 487 dma_unmap_single(dev, d, PKT_BUF_SIZE, DMA_TO_DEVICE);
487 488
488 if (ep->tx_buf[i] != NULL) 489 if (ep->tx_buf[i] != NULL)
489 free_page((unsigned long)ep->tx_buf[i]); 490 kfree(ep->tx_buf[i]);
490 } 491 }
491 492
492 dma_free_coherent(NULL, sizeof(struct ep93xx_descs), ep->descs, 493 dma_free_coherent(dev, sizeof(struct ep93xx_descs), ep->descs,
493 ep->descs_dma_addr); 494 ep->descs_dma_addr);
494} 495}
495 496
496/*
497 * The hardware enforces a sub-2K maximum packet size, so we put
498 * two buffers on every hardware page.
499 */
500static int ep93xx_alloc_buffers(struct ep93xx_priv *ep) 497static int ep93xx_alloc_buffers(struct ep93xx_priv *ep)
501{ 498{
499 struct device *dev = ep->dev->dev.parent;
502 int i; 500 int i;
503 501
504 ep->descs = dma_alloc_coherent(NULL, sizeof(struct ep93xx_descs), 502 ep->descs = dma_alloc_coherent(dev, sizeof(struct ep93xx_descs),
505 &ep->descs_dma_addr, GFP_KERNEL | GFP_DMA); 503 &ep->descs_dma_addr, GFP_KERNEL);
506 if (ep->descs == NULL) 504 if (ep->descs == NULL)
507 return 1; 505 return 1;
508 506
509 for (i = 0; i < RX_QUEUE_ENTRIES; i += 2) { 507 for (i = 0; i < RX_QUEUE_ENTRIES; i++) {
510 void *page; 508 void *buf;
511 dma_addr_t d; 509 dma_addr_t d;
512 510
513 page = (void *)__get_free_page(GFP_KERNEL | GFP_DMA); 511 buf = kmalloc(PKT_BUF_SIZE, GFP_KERNEL);
514 if (page == NULL) 512 if (buf == NULL)
515 goto err; 513 goto err;
516 514
517 d = dma_map_single(NULL, page, PAGE_SIZE, DMA_FROM_DEVICE); 515 d = dma_map_single(dev, buf, PKT_BUF_SIZE, DMA_FROM_DEVICE);
518 if (dma_mapping_error(NULL, d)) { 516 if (dma_mapping_error(dev, d)) {
519 free_page((unsigned long)page); 517 kfree(buf);
520 goto err; 518 goto err;
521 } 519 }
522 520
523 ep->rx_buf[i] = page; 521 ep->rx_buf[i] = buf;
524 ep->descs->rdesc[i].buf_addr = d; 522 ep->descs->rdesc[i].buf_addr = d;
525 ep->descs->rdesc[i].rdesc1 = (i << 16) | PKT_BUF_SIZE; 523 ep->descs->rdesc[i].rdesc1 = (i << 16) | PKT_BUF_SIZE;
526
527 ep->rx_buf[i + 1] = page + PKT_BUF_SIZE;
528 ep->descs->rdesc[i + 1].buf_addr = d + PKT_BUF_SIZE;
529 ep->descs->rdesc[i + 1].rdesc1 = ((i + 1) << 16) | PKT_BUF_SIZE;
530 } 524 }
531 525
532 for (i = 0; i < TX_QUEUE_ENTRIES; i += 2) { 526 for (i = 0; i < TX_QUEUE_ENTRIES; i++) {
533 void *page; 527 void *buf;
534 dma_addr_t d; 528 dma_addr_t d;
535 529
536 page = (void *)__get_free_page(GFP_KERNEL | GFP_DMA); 530 buf = kmalloc(PKT_BUF_SIZE, GFP_KERNEL);
537 if (page == NULL) 531 if (buf == NULL)
538 goto err; 532 goto err;
539 533
540 d = dma_map_single(NULL, page, PAGE_SIZE, DMA_TO_DEVICE); 534 d = dma_map_single(dev, buf, PKT_BUF_SIZE, DMA_TO_DEVICE);
541 if (dma_mapping_error(NULL, d)) { 535 if (dma_mapping_error(dev, d)) {
542 free_page((unsigned long)page); 536 kfree(buf);
543 goto err; 537 goto err;
544 } 538 }
545 539
546 ep->tx_buf[i] = page; 540 ep->tx_buf[i] = buf;
547 ep->descs->tdesc[i].buf_addr = d; 541 ep->descs->tdesc[i].buf_addr = d;
548
549 ep->tx_buf[i + 1] = page + PKT_BUF_SIZE;
550 ep->descs->tdesc[i + 1].buf_addr = d + PKT_BUF_SIZE;
551 } 542 }
552 543
553 return 0; 544 return 0;
@@ -758,7 +749,6 @@ static const struct net_device_ops ep93xx_netdev_ops = {
758 .ndo_open = ep93xx_open, 749 .ndo_open = ep93xx_open,
759 .ndo_stop = ep93xx_close, 750 .ndo_stop = ep93xx_close,
760 .ndo_start_xmit = ep93xx_xmit, 751 .ndo_start_xmit = ep93xx_xmit,
761 .ndo_get_stats = ep93xx_get_stats,
762 .ndo_do_ioctl = ep93xx_ioctl, 752 .ndo_do_ioctl = ep93xx_ioctl,
763 .ndo_validate_addr = eth_validate_addr, 753 .ndo_validate_addr = eth_validate_addr,
764 .ndo_change_mtu = eth_change_mtu, 754 .ndo_change_mtu = eth_change_mtu,
@@ -838,6 +828,7 @@ static int ep93xx_eth_probe(struct platform_device *pdev)
838 } 828 }
839 ep = netdev_priv(dev); 829 ep = netdev_priv(dev);
840 ep->dev = dev; 830 ep->dev = dev;
831 SET_NETDEV_DEV(dev, &pdev->dev);
841 netif_napi_add(dev, &ep->napi, ep93xx_poll, 64); 832 netif_napi_add(dev, &ep->napi, ep93xx_poll, 64);
842 833
843 platform_set_drvdata(pdev, dev); 834 platform_set_drvdata(pdev, dev);