diff options
-rw-r--r-- | drivers/net/mv643xx_eth.c | 521 |
1 files changed, 275 insertions, 246 deletions
diff --git a/drivers/net/mv643xx_eth.c b/drivers/net/mv643xx_eth.c index 1ceed8798618..3db422b6666b 100644 --- a/drivers/net/mv643xx_eth.c +++ b/drivers/net/mv643xx_eth.c | |||
@@ -57,7 +57,6 @@ | |||
57 | static char mv643xx_eth_driver_name[] = "mv643xx_eth"; | 57 | static char mv643xx_eth_driver_name[] = "mv643xx_eth"; |
58 | static char mv643xx_eth_driver_version[] = "1.3"; | 58 | static char mv643xx_eth_driver_version[] = "1.3"; |
59 | 59 | ||
60 | #define MV643XX_ETH_TX_FAST_REFILL | ||
61 | 60 | ||
62 | /* | 61 | /* |
63 | * Registers shared between all ports. | 62 | * Registers shared between all ports. |
@@ -103,7 +102,6 @@ static char mv643xx_eth_driver_version[] = "1.3"; | |||
103 | #define TX_BW_MTU(p) (0x0458 + ((p) << 10)) | 102 | #define TX_BW_MTU(p) (0x0458 + ((p) << 10)) |
104 | #define TX_BW_BURST(p) (0x045c + ((p) << 10)) | 103 | #define TX_BW_BURST(p) (0x045c + ((p) << 10)) |
105 | #define INT_CAUSE(p) (0x0460 + ((p) << 10)) | 104 | #define INT_CAUSE(p) (0x0460 + ((p) << 10)) |
106 | #define INT_TX_END_0 0x00080000 | ||
107 | #define INT_TX_END 0x07f80000 | 105 | #define INT_TX_END 0x07f80000 |
108 | #define INT_RX 0x000003fc | 106 | #define INT_RX 0x000003fc |
109 | #define INT_EXT 0x00000002 | 107 | #define INT_EXT 0x00000002 |
@@ -355,6 +353,14 @@ struct mv643xx_eth_private { | |||
355 | struct work_struct tx_timeout_task; | 353 | struct work_struct tx_timeout_task; |
356 | struct mii_if_info mii; | 354 | struct mii_if_info mii; |
357 | 355 | ||
356 | struct napi_struct napi; | ||
357 | u8 work_link; | ||
358 | u8 work_tx; | ||
359 | u8 work_tx_end; | ||
360 | u8 work_rx; | ||
361 | u8 work_rx_refill; | ||
362 | u8 work_rx_oom; | ||
363 | |||
358 | /* | 364 | /* |
359 | * RX state. | 365 | * RX state. |
360 | */ | 366 | */ |
@@ -362,7 +368,6 @@ struct mv643xx_eth_private { | |||
362 | unsigned long rx_desc_sram_addr; | 368 | unsigned long rx_desc_sram_addr; |
363 | int rx_desc_sram_size; | 369 | int rx_desc_sram_size; |
364 | int rxq_count; | 370 | int rxq_count; |
365 | struct napi_struct napi; | ||
366 | struct timer_list rx_oom; | 371 | struct timer_list rx_oom; |
367 | struct rx_queue rxq[8]; | 372 | struct rx_queue rxq[8]; |
368 | 373 | ||
@@ -374,9 +379,6 @@ struct mv643xx_eth_private { | |||
374 | int tx_desc_sram_size; | 379 | int tx_desc_sram_size; |
375 | int txq_count; | 380 | int txq_count; |
376 | struct tx_queue txq[8]; | 381 | struct tx_queue txq[8]; |
377 | #ifdef MV643XX_ETH_TX_FAST_REFILL | ||
378 | int tx_clean_threshold; | ||
379 | #endif | ||
380 | }; | 382 | }; |
381 | 383 | ||
382 | 384 | ||
@@ -446,82 +448,19 @@ static void txq_disable(struct tx_queue *txq) | |||
446 | udelay(10); | 448 | udelay(10); |
447 | } | 449 | } |
448 | 450 | ||
449 | static void __txq_maybe_wake(struct tx_queue *txq) | 451 | static void txq_maybe_wake(struct tx_queue *txq) |
450 | { | 452 | { |
451 | struct mv643xx_eth_private *mp = txq_to_mp(txq); | 453 | struct mv643xx_eth_private *mp = txq_to_mp(txq); |
452 | struct netdev_queue *nq = netdev_get_tx_queue(mp->dev, txq->index); | 454 | struct netdev_queue *nq = netdev_get_tx_queue(mp->dev, txq->index); |
453 | 455 | ||
456 | spin_lock(&mp->lock); | ||
454 | if (txq->tx_ring_size - txq->tx_desc_count >= MAX_SKB_FRAGS + 1) | 457 | if (txq->tx_ring_size - txq->tx_desc_count >= MAX_SKB_FRAGS + 1) |
455 | netif_tx_wake_queue(nq); | 458 | netif_tx_wake_queue(nq); |
459 | spin_unlock(&mp->lock); | ||
456 | } | 460 | } |
457 | 461 | ||
458 | 462 | ||
459 | /* rx ***********************************************************************/ | 463 | /* rx napi ******************************************************************/ |
460 | static void txq_reclaim(struct tx_queue *txq, int force); | ||
461 | |||
462 | static int rxq_refill(struct rx_queue *rxq, int budget, int *oom) | ||
463 | { | ||
464 | int skb_size; | ||
465 | int refilled; | ||
466 | |||
467 | /* | ||
468 | * Reserve 2+14 bytes for an ethernet header (the hardware | ||
469 | * automatically prepends 2 bytes of dummy data to each | ||
470 | * received packet), 16 bytes for up to four VLAN tags, and | ||
471 | * 4 bytes for the trailing FCS -- 36 bytes total. | ||
472 | */ | ||
473 | skb_size = rxq_to_mp(rxq)->dev->mtu + 36; | ||
474 | |||
475 | /* | ||
476 | * Make sure that the skb size is a multiple of 8 bytes, as | ||
477 | * the lower three bits of the receive descriptor's buffer | ||
478 | * size field are ignored by the hardware. | ||
479 | */ | ||
480 | skb_size = (skb_size + 7) & ~7; | ||
481 | |||
482 | refilled = 0; | ||
483 | while (refilled < budget && rxq->rx_desc_count < rxq->rx_ring_size) { | ||
484 | struct sk_buff *skb; | ||
485 | int unaligned; | ||
486 | int rx; | ||
487 | |||
488 | skb = dev_alloc_skb(skb_size + dma_get_cache_alignment() - 1); | ||
489 | if (skb == NULL) { | ||
490 | *oom = 1; | ||
491 | break; | ||
492 | } | ||
493 | |||
494 | unaligned = (u32)skb->data & (dma_get_cache_alignment() - 1); | ||
495 | if (unaligned) | ||
496 | skb_reserve(skb, dma_get_cache_alignment() - unaligned); | ||
497 | |||
498 | refilled++; | ||
499 | rxq->rx_desc_count++; | ||
500 | |||
501 | rx = rxq->rx_used_desc++; | ||
502 | if (rxq->rx_used_desc == rxq->rx_ring_size) | ||
503 | rxq->rx_used_desc = 0; | ||
504 | |||
505 | rxq->rx_desc_area[rx].buf_ptr = dma_map_single(NULL, skb->data, | ||
506 | skb_size, DMA_FROM_DEVICE); | ||
507 | rxq->rx_desc_area[rx].buf_size = skb_size; | ||
508 | rxq->rx_skb[rx] = skb; | ||
509 | wmb(); | ||
510 | rxq->rx_desc_area[rx].cmd_sts = BUFFER_OWNED_BY_DMA | | ||
511 | RX_ENABLE_INTERRUPT; | ||
512 | wmb(); | ||
513 | |||
514 | /* | ||
515 | * The hardware automatically prepends 2 bytes of | ||
516 | * dummy data to each received packet, so that the | ||
517 | * IP header ends up 16-byte aligned. | ||
518 | */ | ||
519 | skb_reserve(skb, 2); | ||
520 | } | ||
521 | |||
522 | return refilled; | ||
523 | } | ||
524 | |||
525 | static int rxq_process(struct rx_queue *rxq, int budget) | 464 | static int rxq_process(struct rx_queue *rxq, int budget) |
526 | { | 465 | { |
527 | struct mv643xx_eth_private *mp = rxq_to_mp(rxq); | 466 | struct mv643xx_eth_private *mp = rxq_to_mp(rxq); |
@@ -553,6 +492,8 @@ static int rxq_process(struct rx_queue *rxq, int budget) | |||
553 | rxq->rx_desc_count--; | 492 | rxq->rx_desc_count--; |
554 | rx++; | 493 | rx++; |
555 | 494 | ||
495 | mp->work_rx_refill |= 1 << rxq->index; | ||
496 | |||
556 | /* | 497 | /* |
557 | * Update statistics. | 498 | * Update statistics. |
558 | * | 499 | * |
@@ -605,54 +546,78 @@ static int rxq_process(struct rx_queue *rxq, int budget) | |||
605 | mp->dev->last_rx = jiffies; | 546 | mp->dev->last_rx = jiffies; |
606 | } | 547 | } |
607 | 548 | ||
549 | if (rx < budget) | ||
550 | mp->work_rx &= ~(1 << rxq->index); | ||
551 | |||
608 | return rx; | 552 | return rx; |
609 | } | 553 | } |
610 | 554 | ||
611 | static int mv643xx_eth_poll(struct napi_struct *napi, int budget) | 555 | static int rxq_refill(struct rx_queue *rxq, int budget) |
612 | { | 556 | { |
613 | struct mv643xx_eth_private *mp; | 557 | struct mv643xx_eth_private *mp = rxq_to_mp(rxq); |
614 | int work_done; | 558 | int skb_size; |
615 | int oom; | 559 | int refilled; |
616 | int i; | ||
617 | 560 | ||
618 | mp = container_of(napi, struct mv643xx_eth_private, napi); | 561 | /* |
562 | * Reserve 2+14 bytes for an ethernet header (the hardware | ||
563 | * automatically prepends 2 bytes of dummy data to each | ||
564 | * received packet), 16 bytes for up to four VLAN tags, and | ||
565 | * 4 bytes for the trailing FCS -- 36 bytes total. | ||
566 | */ | ||
567 | skb_size = rxq_to_mp(rxq)->dev->mtu + 36; | ||
619 | 568 | ||
620 | #ifdef MV643XX_ETH_TX_FAST_REFILL | 569 | /* |
621 | if (++mp->tx_clean_threshold > 5) { | 570 | * Make sure that the skb size is a multiple of 8 bytes, as |
622 | mp->tx_clean_threshold = 0; | 571 | * the lower three bits of the receive descriptor's buffer |
623 | for (i = 0; i < mp->txq_count; i++) | 572 | * size field are ignored by the hardware. |
624 | txq_reclaim(mp->txq + i, 0); | 573 | */ |
574 | skb_size = (skb_size + 7) & ~7; | ||
625 | 575 | ||
626 | spin_lock_irq(&mp->lock); | 576 | refilled = 0; |
627 | __txq_maybe_wake(mp->txq); | 577 | while (refilled < budget && rxq->rx_desc_count < rxq->rx_ring_size) { |
628 | spin_unlock_irq(&mp->lock); | 578 | struct sk_buff *skb; |
629 | } | 579 | int unaligned; |
630 | #endif | 580 | int rx; |
631 | 581 | ||
632 | work_done = 0; | 582 | skb = dev_alloc_skb(skb_size + dma_get_cache_alignment() - 1); |
633 | oom = 0; | 583 | if (skb == NULL) { |
634 | for (i = mp->rxq_count - 1; work_done < budget && i >= 0; i--) { | 584 | mp->work_rx_oom |= 1 << rxq->index; |
635 | struct rx_queue *rxq = mp->rxq + i; | 585 | goto oom; |
586 | } | ||
636 | 587 | ||
637 | work_done += rxq_process(rxq, budget - work_done); | 588 | unaligned = (u32)skb->data & (dma_get_cache_alignment() - 1); |
638 | work_done += rxq_refill(rxq, budget - work_done, &oom); | 589 | if (unaligned) |
639 | } | 590 | skb_reserve(skb, dma_get_cache_alignment() - unaligned); |
640 | 591 | ||
641 | if (work_done < budget) { | 592 | refilled++; |
642 | if (oom) | 593 | rxq->rx_desc_count++; |
643 | mod_timer(&mp->rx_oom, jiffies + (HZ / 10)); | ||
644 | netif_rx_complete(mp->dev, napi); | ||
645 | wrl(mp, INT_MASK(mp->port_num), INT_TX_END | INT_RX | INT_EXT); | ||
646 | } | ||
647 | 594 | ||
648 | return work_done; | 595 | rx = rxq->rx_used_desc++; |
649 | } | 596 | if (rxq->rx_used_desc == rxq->rx_ring_size) |
597 | rxq->rx_used_desc = 0; | ||
650 | 598 | ||
651 | static inline void oom_timer_wrapper(unsigned long data) | 599 | rxq->rx_desc_area[rx].buf_ptr = dma_map_single(NULL, skb->data, |
652 | { | 600 | skb_size, DMA_FROM_DEVICE); |
653 | struct mv643xx_eth_private *mp = (void *)data; | 601 | rxq->rx_desc_area[rx].buf_size = skb_size; |
602 | rxq->rx_skb[rx] = skb; | ||
603 | wmb(); | ||
604 | rxq->rx_desc_area[rx].cmd_sts = BUFFER_OWNED_BY_DMA | | ||
605 | RX_ENABLE_INTERRUPT; | ||
606 | wmb(); | ||
654 | 607 | ||
655 | napi_schedule(&mp->napi); | 608 | /* |
609 | * The hardware automatically prepends 2 bytes of | ||
610 | * dummy data to each received packet, so that the | ||
611 | * IP header ends up 16-byte aligned. | ||
612 | */ | ||
613 | skb_reserve(skb, 2); | ||
614 | } | ||
615 | |||
616 | if (refilled < budget) | ||
617 | mp->work_rx_refill &= ~(1 << rxq->index); | ||
618 | |||
619 | oom: | ||
620 | return refilled; | ||
656 | } | 621 | } |
657 | 622 | ||
658 | 623 | ||
@@ -807,9 +772,8 @@ static void txq_submit_skb(struct tx_queue *txq, struct sk_buff *skb) | |||
807 | wmb(); | 772 | wmb(); |
808 | desc->cmd_sts = cmd_sts; | 773 | desc->cmd_sts = cmd_sts; |
809 | 774 | ||
810 | /* clear TX_END interrupt status */ | 775 | /* clear TX_END status */ |
811 | wrl(mp, INT_CAUSE(mp->port_num), ~(INT_TX_END_0 << txq->index)); | 776 | mp->work_tx_end &= ~(1 << txq->index); |
812 | rdl(mp, INT_CAUSE(mp->port_num)); | ||
813 | 777 | ||
814 | /* ensure all descriptors are written before poking hardware */ | 778 | /* ensure all descriptors are written before poking hardware */ |
815 | wmb(); | 779 | wmb(); |
@@ -825,7 +789,6 @@ static int mv643xx_eth_xmit(struct sk_buff *skb, struct net_device *dev) | |||
825 | int queue; | 789 | int queue; |
826 | struct tx_queue *txq; | 790 | struct tx_queue *txq; |
827 | struct netdev_queue *nq; | 791 | struct netdev_queue *nq; |
828 | unsigned long flags; | ||
829 | int entries_left; | 792 | int entries_left; |
830 | 793 | ||
831 | if (has_tiny_unaligned_frags(skb) && __skb_linearize(skb)) { | 794 | if (has_tiny_unaligned_frags(skb) && __skb_linearize(skb)) { |
@@ -840,10 +803,10 @@ static int mv643xx_eth_xmit(struct sk_buff *skb, struct net_device *dev) | |||
840 | txq = mp->txq + queue; | 803 | txq = mp->txq + queue; |
841 | nq = netdev_get_tx_queue(dev, queue); | 804 | nq = netdev_get_tx_queue(dev, queue); |
842 | 805 | ||
843 | spin_lock_irqsave(&mp->lock, flags); | 806 | spin_lock(&mp->lock); |
844 | 807 | ||
845 | if (txq->tx_ring_size - txq->tx_desc_count < MAX_SKB_FRAGS + 1) { | 808 | if (txq->tx_ring_size - txq->tx_desc_count < MAX_SKB_FRAGS + 1) { |
846 | spin_unlock_irqrestore(&mp->lock, flags); | 809 | spin_unlock(&mp->lock); |
847 | if (net_ratelimit()) | 810 | if (net_ratelimit()) |
848 | dev_printk(KERN_ERR, &dev->dev, "tx queue full?!\n"); | 811 | dev_printk(KERN_ERR, &dev->dev, "tx queue full?!\n"); |
849 | kfree_skb(skb); | 812 | kfree_skb(skb); |
@@ -859,12 +822,105 @@ static int mv643xx_eth_xmit(struct sk_buff *skb, struct net_device *dev) | |||
859 | if (entries_left < MAX_SKB_FRAGS + 1) | 822 | if (entries_left < MAX_SKB_FRAGS + 1) |
860 | netif_tx_stop_queue(nq); | 823 | netif_tx_stop_queue(nq); |
861 | 824 | ||
862 | spin_unlock_irqrestore(&mp->lock, flags); | 825 | spin_unlock(&mp->lock); |
863 | 826 | ||
864 | return NETDEV_TX_OK; | 827 | return NETDEV_TX_OK; |
865 | } | 828 | } |
866 | 829 | ||
867 | 830 | ||
831 | /* tx napi ******************************************************************/ | ||
832 | static void txq_kick(struct tx_queue *txq) | ||
833 | { | ||
834 | struct mv643xx_eth_private *mp = txq_to_mp(txq); | ||
835 | u32 hw_desc_ptr; | ||
836 | u32 expected_ptr; | ||
837 | |||
838 | spin_lock(&mp->lock); | ||
839 | |||
840 | if (rdl(mp, TXQ_COMMAND(mp->port_num)) & (1 << txq->index)) | ||
841 | goto out; | ||
842 | |||
843 | hw_desc_ptr = rdl(mp, TXQ_CURRENT_DESC_PTR(mp->port_num, txq->index)); | ||
844 | expected_ptr = (u32)txq->tx_desc_dma + | ||
845 | txq->tx_curr_desc * sizeof(struct tx_desc); | ||
846 | |||
847 | if (hw_desc_ptr != expected_ptr) | ||
848 | txq_enable(txq); | ||
849 | |||
850 | out: | ||
851 | spin_unlock(&mp->lock); | ||
852 | |||
853 | mp->work_tx_end &= ~(1 << txq->index); | ||
854 | } | ||
855 | |||
856 | static int txq_reclaim(struct tx_queue *txq, int budget, int force) | ||
857 | { | ||
858 | struct mv643xx_eth_private *mp = txq_to_mp(txq); | ||
859 | int reclaimed; | ||
860 | |||
861 | spin_lock(&mp->lock); | ||
862 | |||
863 | reclaimed = 0; | ||
864 | while (reclaimed < budget && txq->tx_desc_count > 0) { | ||
865 | int tx_index; | ||
866 | struct tx_desc *desc; | ||
867 | u32 cmd_sts; | ||
868 | struct sk_buff *skb; | ||
869 | dma_addr_t addr; | ||
870 | int count; | ||
871 | |||
872 | tx_index = txq->tx_used_desc; | ||
873 | desc = &txq->tx_desc_area[tx_index]; | ||
874 | cmd_sts = desc->cmd_sts; | ||
875 | |||
876 | if (cmd_sts & BUFFER_OWNED_BY_DMA) { | ||
877 | if (!force) | ||
878 | break; | ||
879 | desc->cmd_sts = cmd_sts & ~BUFFER_OWNED_BY_DMA; | ||
880 | } | ||
881 | |||
882 | txq->tx_used_desc = tx_index + 1; | ||
883 | if (txq->tx_used_desc == txq->tx_ring_size) | ||
884 | txq->tx_used_desc = 0; | ||
885 | |||
886 | reclaimed++; | ||
887 | txq->tx_desc_count--; | ||
888 | |||
889 | addr = desc->buf_ptr; | ||
890 | count = desc->byte_cnt; | ||
891 | skb = txq->tx_skb[tx_index]; | ||
892 | txq->tx_skb[tx_index] = NULL; | ||
893 | |||
894 | if (cmd_sts & ERROR_SUMMARY) { | ||
895 | dev_printk(KERN_INFO, &mp->dev->dev, "tx error\n"); | ||
896 | mp->dev->stats.tx_errors++; | ||
897 | } | ||
898 | |||
899 | /* | ||
900 | * Drop mp->lock while we free the skb. | ||
901 | */ | ||
902 | spin_unlock(&mp->lock); | ||
903 | |||
904 | if (cmd_sts & TX_FIRST_DESC) | ||
905 | dma_unmap_single(NULL, addr, count, DMA_TO_DEVICE); | ||
906 | else | ||
907 | dma_unmap_page(NULL, addr, count, DMA_TO_DEVICE); | ||
908 | |||
909 | if (skb) | ||
910 | dev_kfree_skb(skb); | ||
911 | |||
912 | spin_lock(&mp->lock); | ||
913 | } | ||
914 | |||
915 | if (reclaimed < budget) | ||
916 | mp->work_tx &= ~(1 << txq->index); | ||
917 | |||
918 | spin_unlock(&mp->lock); | ||
919 | |||
920 | return reclaimed; | ||
921 | } | ||
922 | |||
923 | |||
868 | /* tx rate control **********************************************************/ | 924 | /* tx rate control **********************************************************/ |
869 | /* | 925 | /* |
870 | * Set total maximum TX rate (shared by all TX queues for this port) | 926 | * Set total maximum TX rate (shared by all TX queues for this port) |
@@ -1648,7 +1704,6 @@ static int txq_init(struct mv643xx_eth_private *mp, int index) | |||
1648 | 1704 | ||
1649 | return 0; | 1705 | return 0; |
1650 | 1706 | ||
1651 | |||
1652 | out_free: | 1707 | out_free: |
1653 | if (index == 0 && size <= mp->tx_desc_sram_size) | 1708 | if (index == 0 && size <= mp->tx_desc_sram_size) |
1654 | iounmap(txq->tx_desc_area); | 1709 | iounmap(txq->tx_desc_area); |
@@ -1661,84 +1716,74 @@ out: | |||
1661 | return -ENOMEM; | 1716 | return -ENOMEM; |
1662 | } | 1717 | } |
1663 | 1718 | ||
1664 | static void txq_reclaim(struct tx_queue *txq, int force) | 1719 | static void txq_deinit(struct tx_queue *txq) |
1665 | { | 1720 | { |
1666 | struct mv643xx_eth_private *mp = txq_to_mp(txq); | 1721 | struct mv643xx_eth_private *mp = txq_to_mp(txq); |
1667 | unsigned long flags; | ||
1668 | 1722 | ||
1669 | spin_lock_irqsave(&mp->lock, flags); | 1723 | txq_disable(txq); |
1670 | while (txq->tx_desc_count > 0) { | 1724 | txq_reclaim(txq, txq->tx_ring_size, 1); |
1671 | int tx_index; | ||
1672 | struct tx_desc *desc; | ||
1673 | u32 cmd_sts; | ||
1674 | struct sk_buff *skb; | ||
1675 | dma_addr_t addr; | ||
1676 | int count; | ||
1677 | 1725 | ||
1678 | tx_index = txq->tx_used_desc; | 1726 | BUG_ON(txq->tx_used_desc != txq->tx_curr_desc); |
1679 | desc = &txq->tx_desc_area[tx_index]; | ||
1680 | cmd_sts = desc->cmd_sts; | ||
1681 | 1727 | ||
1682 | if (cmd_sts & BUFFER_OWNED_BY_DMA) { | 1728 | if (txq->index == 0 && |
1683 | if (!force) | 1729 | txq->tx_desc_area_size <= mp->tx_desc_sram_size) |
1684 | break; | 1730 | iounmap(txq->tx_desc_area); |
1685 | desc->cmd_sts = cmd_sts & ~BUFFER_OWNED_BY_DMA; | 1731 | else |
1686 | } | 1732 | dma_free_coherent(NULL, txq->tx_desc_area_size, |
1733 | txq->tx_desc_area, txq->tx_desc_dma); | ||
1687 | 1734 | ||
1688 | txq->tx_used_desc = tx_index + 1; | 1735 | kfree(txq->tx_skb); |
1689 | if (txq->tx_used_desc == txq->tx_ring_size) | 1736 | } |
1690 | txq->tx_used_desc = 0; | ||
1691 | txq->tx_desc_count--; | ||
1692 | 1737 | ||
1693 | addr = desc->buf_ptr; | ||
1694 | count = desc->byte_cnt; | ||
1695 | skb = txq->tx_skb[tx_index]; | ||
1696 | txq->tx_skb[tx_index] = NULL; | ||
1697 | 1738 | ||
1698 | if (cmd_sts & ERROR_SUMMARY) { | 1739 | /* netdev ops and related ***************************************************/ |
1699 | dev_printk(KERN_INFO, &mp->dev->dev, "tx error\n"); | 1740 | static int mv643xx_eth_collect_events(struct mv643xx_eth_private *mp) |
1700 | mp->dev->stats.tx_errors++; | 1741 | { |
1701 | } | 1742 | u32 int_cause; |
1743 | u32 int_cause_ext; | ||
1702 | 1744 | ||
1703 | /* | 1745 | int_cause = rdl(mp, INT_CAUSE(mp->port_num)) & |
1704 | * Drop mp->lock while we free the skb. | 1746 | (INT_TX_END | INT_RX | INT_EXT); |
1705 | */ | 1747 | if (int_cause == 0) |
1706 | spin_unlock_irqrestore(&mp->lock, flags); | 1748 | return 0; |
1707 | 1749 | ||
1708 | if (cmd_sts & TX_FIRST_DESC) | 1750 | int_cause_ext = 0; |
1709 | dma_unmap_single(NULL, addr, count, DMA_TO_DEVICE); | 1751 | if (int_cause & INT_EXT) |
1710 | else | 1752 | int_cause_ext = rdl(mp, INT_CAUSE_EXT(mp->port_num)); |
1711 | dma_unmap_page(NULL, addr, count, DMA_TO_DEVICE); | ||
1712 | 1753 | ||
1713 | if (skb) | 1754 | int_cause &= INT_TX_END | INT_RX; |
1714 | dev_kfree_skb_irq(skb); | 1755 | if (int_cause) { |
1756 | wrl(mp, INT_CAUSE(mp->port_num), ~int_cause); | ||
1757 | mp->work_tx_end |= ((int_cause & INT_TX_END) >> 19) & | ||
1758 | ~(rdl(mp, TXQ_COMMAND(mp->port_num)) & 0xff); | ||
1759 | mp->work_rx |= (int_cause & INT_RX) >> 2; | ||
1760 | } | ||
1715 | 1761 | ||
1716 | spin_lock_irqsave(&mp->lock, flags); | 1762 | int_cause_ext &= INT_EXT_LINK_PHY | INT_EXT_TX; |
1763 | if (int_cause_ext) { | ||
1764 | wrl(mp, INT_CAUSE_EXT(mp->port_num), ~int_cause_ext); | ||
1765 | if (int_cause_ext & INT_EXT_LINK_PHY) | ||
1766 | mp->work_link = 1; | ||
1767 | mp->work_tx |= int_cause_ext & INT_EXT_TX; | ||
1717 | } | 1768 | } |
1718 | spin_unlock_irqrestore(&mp->lock, flags); | 1769 | |
1770 | return 1; | ||
1719 | } | 1771 | } |
1720 | 1772 | ||
1721 | static void txq_deinit(struct tx_queue *txq) | 1773 | static irqreturn_t mv643xx_eth_irq(int irq, void *dev_id) |
1722 | { | 1774 | { |
1723 | struct mv643xx_eth_private *mp = txq_to_mp(txq); | 1775 | struct net_device *dev = (struct net_device *)dev_id; |
1724 | 1776 | struct mv643xx_eth_private *mp = netdev_priv(dev); | |
1725 | txq_disable(txq); | ||
1726 | txq_reclaim(txq, 1); | ||
1727 | 1777 | ||
1728 | BUG_ON(txq->tx_used_desc != txq->tx_curr_desc); | 1778 | if (unlikely(!mv643xx_eth_collect_events(mp))) |
1779 | return IRQ_NONE; | ||
1729 | 1780 | ||
1730 | if (txq->index == 0 && | 1781 | wrl(mp, INT_MASK(mp->port_num), 0); |
1731 | txq->tx_desc_area_size <= mp->tx_desc_sram_size) | 1782 | napi_schedule(&mp->napi); |
1732 | iounmap(txq->tx_desc_area); | ||
1733 | else | ||
1734 | dma_free_coherent(NULL, txq->tx_desc_area_size, | ||
1735 | txq->tx_desc_area, txq->tx_desc_dma); | ||
1736 | 1783 | ||
1737 | kfree(txq->tx_skb); | 1784 | return IRQ_HANDLED; |
1738 | } | 1785 | } |
1739 | 1786 | ||
1740 | |||
1741 | /* netdev ops and related ***************************************************/ | ||
1742 | static void handle_link_event(struct mv643xx_eth_private *mp) | 1787 | static void handle_link_event(struct mv643xx_eth_private *mp) |
1743 | { | 1788 | { |
1744 | struct net_device *dev = mp->dev; | 1789 | struct net_device *dev = mp->dev; |
@@ -1759,7 +1804,7 @@ static void handle_link_event(struct mv643xx_eth_private *mp) | |||
1759 | for (i = 0; i < mp->txq_count; i++) { | 1804 | for (i = 0; i < mp->txq_count; i++) { |
1760 | struct tx_queue *txq = mp->txq + i; | 1805 | struct tx_queue *txq = mp->txq + i; |
1761 | 1806 | ||
1762 | txq_reclaim(txq, 1); | 1807 | txq_reclaim(txq, txq->tx_ring_size, 1); |
1763 | txq_reset_hw_ptr(txq); | 1808 | txq_reset_hw_ptr(txq); |
1764 | } | 1809 | } |
1765 | } | 1810 | } |
@@ -1792,86 +1837,72 @@ static void handle_link_event(struct mv643xx_eth_private *mp) | |||
1792 | netif_carrier_on(dev); | 1837 | netif_carrier_on(dev); |
1793 | } | 1838 | } |
1794 | 1839 | ||
1795 | static irqreturn_t mv643xx_eth_irq(int irq, void *dev_id) | 1840 | static int mv643xx_eth_poll(struct napi_struct *napi, int budget) |
1796 | { | 1841 | { |
1797 | struct net_device *dev = (struct net_device *)dev_id; | 1842 | struct mv643xx_eth_private *mp; |
1798 | struct mv643xx_eth_private *mp = netdev_priv(dev); | 1843 | int work_done; |
1799 | u32 int_cause; | ||
1800 | u32 int_cause_ext; | ||
1801 | 1844 | ||
1802 | int_cause = rdl(mp, INT_CAUSE(mp->port_num)) & | 1845 | mp = container_of(napi, struct mv643xx_eth_private, napi); |
1803 | (INT_TX_END | INT_RX | INT_EXT); | ||
1804 | if (int_cause == 0) | ||
1805 | return IRQ_NONE; | ||
1806 | 1846 | ||
1807 | int_cause_ext = 0; | 1847 | mp->work_rx_refill |= mp->work_rx_oom; |
1808 | if (int_cause & INT_EXT) { | 1848 | mp->work_rx_oom = 0; |
1809 | int_cause_ext = rdl(mp, INT_CAUSE_EXT(mp->port_num)) | ||
1810 | & (INT_EXT_LINK_PHY | INT_EXT_TX); | ||
1811 | wrl(mp, INT_CAUSE_EXT(mp->port_num), ~int_cause_ext); | ||
1812 | } | ||
1813 | 1849 | ||
1814 | if (int_cause_ext & INT_EXT_LINK_PHY) | 1850 | work_done = 0; |
1815 | handle_link_event(mp); | 1851 | while (work_done < budget) { |
1852 | u8 queue_mask; | ||
1853 | int queue; | ||
1854 | int work_tbd; | ||
1855 | |||
1856 | if (mp->work_link) { | ||
1857 | mp->work_link = 0; | ||
1858 | handle_link_event(mp); | ||
1859 | continue; | ||
1860 | } | ||
1816 | 1861 | ||
1817 | /* | 1862 | queue_mask = mp->work_tx | mp->work_tx_end | |
1818 | * RxBuffer or RxError set for any of the 8 queues? | 1863 | mp->work_rx | mp->work_rx_refill; |
1819 | */ | 1864 | if (!queue_mask) { |
1820 | if (int_cause & INT_RX) { | 1865 | if (mv643xx_eth_collect_events(mp)) |
1821 | wrl(mp, INT_CAUSE(mp->port_num), ~(int_cause & INT_RX)); | 1866 | continue; |
1822 | wrl(mp, INT_MASK(mp->port_num), 0x00000000); | 1867 | break; |
1823 | rdl(mp, INT_MASK(mp->port_num)); | 1868 | } |
1824 | 1869 | ||
1825 | napi_schedule(&mp->napi); | 1870 | queue = fls(queue_mask) - 1; |
1871 | queue_mask = 1 << queue; | ||
1872 | |||
1873 | work_tbd = budget - work_done; | ||
1874 | if (work_tbd > 16) | ||
1875 | work_tbd = 16; | ||
1876 | |||
1877 | if (mp->work_tx_end & queue_mask) { | ||
1878 | txq_kick(mp->txq + queue); | ||
1879 | } else if (mp->work_tx & queue_mask) { | ||
1880 | work_done += txq_reclaim(mp->txq + queue, work_tbd, 0); | ||
1881 | txq_maybe_wake(mp->txq + queue); | ||
1882 | } else if (mp->work_rx & queue_mask) { | ||
1883 | work_done += rxq_process(mp->rxq + queue, work_tbd); | ||
1884 | } else if (mp->work_rx_refill & queue_mask) { | ||
1885 | work_done += rxq_refill(mp->rxq + queue, work_tbd); | ||
1886 | } else { | ||
1887 | BUG(); | ||
1888 | } | ||
1826 | } | 1889 | } |
1827 | 1890 | ||
1828 | /* | 1891 | if (work_done < budget) { |
1829 | * TxBuffer or TxError set for any of the 8 queues? | 1892 | if (mp->work_rx_oom) |
1830 | */ | 1893 | mod_timer(&mp->rx_oom, jiffies + (HZ / 10)); |
1831 | if (int_cause_ext & INT_EXT_TX) { | 1894 | napi_complete(napi); |
1832 | int i; | 1895 | wrl(mp, INT_MASK(mp->port_num), INT_TX_END | INT_RX | INT_EXT); |
1833 | |||
1834 | for (i = 0; i < mp->txq_count; i++) | ||
1835 | txq_reclaim(mp->txq + i, 0); | ||
1836 | |||
1837 | /* | ||
1838 | * Enough space again in the primary TX queue for a | ||
1839 | * full packet? | ||
1840 | */ | ||
1841 | spin_lock(&mp->lock); | ||
1842 | __txq_maybe_wake(mp->txq); | ||
1843 | spin_unlock(&mp->lock); | ||
1844 | } | 1896 | } |
1845 | 1897 | ||
1846 | /* | 1898 | return work_done; |
1847 | * Any TxEnd interrupts? | 1899 | } |
1848 | */ | ||
1849 | if (int_cause & INT_TX_END) { | ||
1850 | int i; | ||
1851 | |||
1852 | wrl(mp, INT_CAUSE(mp->port_num), ~(int_cause & INT_TX_END)); | ||
1853 | |||
1854 | spin_lock(&mp->lock); | ||
1855 | for (i = 0; i < 8; i++) { | ||
1856 | struct tx_queue *txq = mp->txq + i; | ||
1857 | u32 hw_desc_ptr; | ||
1858 | u32 expected_ptr; | ||
1859 | |||
1860 | if ((int_cause & (INT_TX_END_0 << i)) == 0) | ||
1861 | continue; | ||
1862 | |||
1863 | hw_desc_ptr = | ||
1864 | rdl(mp, TXQ_CURRENT_DESC_PTR(mp->port_num, i)); | ||
1865 | expected_ptr = (u32)txq->tx_desc_dma + | ||
1866 | txq->tx_curr_desc * sizeof(struct tx_desc); | ||
1867 | 1900 | ||
1868 | if (hw_desc_ptr != expected_ptr) | 1901 | static inline void oom_timer_wrapper(unsigned long data) |
1869 | txq_enable(txq); | 1902 | { |
1870 | } | 1903 | struct mv643xx_eth_private *mp = (void *)data; |
1871 | spin_unlock(&mp->lock); | ||
1872 | } | ||
1873 | 1904 | ||
1874 | return IRQ_HANDLED; | 1905 | napi_schedule(&mp->napi); |
1875 | } | 1906 | } |
1876 | 1907 | ||
1877 | static void phy_reset(struct mv643xx_eth_private *mp) | 1908 | static void phy_reset(struct mv643xx_eth_private *mp) |
@@ -2000,7 +2031,6 @@ static int mv643xx_eth_open(struct net_device *dev) | |||
2000 | { | 2031 | { |
2001 | struct mv643xx_eth_private *mp = netdev_priv(dev); | 2032 | struct mv643xx_eth_private *mp = netdev_priv(dev); |
2002 | int err; | 2033 | int err; |
2003 | int oom; | ||
2004 | int i; | 2034 | int i; |
2005 | 2035 | ||
2006 | wrl(mp, INT_CAUSE(mp->port_num), 0); | 2036 | wrl(mp, INT_CAUSE(mp->port_num), 0); |
@@ -2018,7 +2048,6 @@ static int mv643xx_eth_open(struct net_device *dev) | |||
2018 | 2048 | ||
2019 | napi_enable(&mp->napi); | 2049 | napi_enable(&mp->napi); |
2020 | 2050 | ||
2021 | oom = 0; | ||
2022 | for (i = 0; i < mp->rxq_count; i++) { | 2051 | for (i = 0; i < mp->rxq_count; i++) { |
2023 | err = rxq_init(mp, i); | 2052 | err = rxq_init(mp, i); |
2024 | if (err) { | 2053 | if (err) { |
@@ -2027,10 +2056,10 @@ static int mv643xx_eth_open(struct net_device *dev) | |||
2027 | goto out; | 2056 | goto out; |
2028 | } | 2057 | } |
2029 | 2058 | ||
2030 | rxq_refill(mp->rxq + i, INT_MAX, &oom); | 2059 | rxq_refill(mp->rxq + i, INT_MAX); |
2031 | } | 2060 | } |
2032 | 2061 | ||
2033 | if (oom) { | 2062 | if (mp->work_rx_oom) { |
2034 | mp->rx_oom.expires = jiffies + (HZ / 10); | 2063 | mp->rx_oom.expires = jiffies + (HZ / 10); |
2035 | add_timer(&mp->rx_oom); | 2064 | add_timer(&mp->rx_oom); |
2036 | } | 2065 | } |