diff options
Diffstat (limited to 'drivers/net/ethernet/mediatek/mtk_eth_soc.c')
-rw-r--r-- | drivers/net/ethernet/mediatek/mtk_eth_soc.c | 61 |
1 files changed, 42 insertions, 19 deletions
diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c index 4763252bbf85..d1cdc2d76151 100644 --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c | |||
@@ -481,20 +481,23 @@ static inline void mtk_rx_get_desc(struct mtk_rx_dma *rxd, | |||
481 | /* the qdma core needs scratch memory to be setup */ | 481 | /* the qdma core needs scratch memory to be setup */ |
482 | static int mtk_init_fq_dma(struct mtk_eth *eth) | 482 | static int mtk_init_fq_dma(struct mtk_eth *eth) |
483 | { | 483 | { |
484 | dma_addr_t phy_ring_head, phy_ring_tail; | 484 | dma_addr_t phy_ring_tail; |
485 | int cnt = MTK_DMA_SIZE; | 485 | int cnt = MTK_DMA_SIZE; |
486 | dma_addr_t dma_addr; | 486 | dma_addr_t dma_addr; |
487 | int i; | 487 | int i; |
488 | 488 | ||
489 | eth->scratch_ring = dma_alloc_coherent(eth->dev, | 489 | eth->scratch_ring = dma_alloc_coherent(eth->dev, |
490 | cnt * sizeof(struct mtk_tx_dma), | 490 | cnt * sizeof(struct mtk_tx_dma), |
491 | &phy_ring_head, | 491 | ð->phy_scratch_ring, |
492 | GFP_ATOMIC | __GFP_ZERO); | 492 | GFP_ATOMIC | __GFP_ZERO); |
493 | if (unlikely(!eth->scratch_ring)) | 493 | if (unlikely(!eth->scratch_ring)) |
494 | return -ENOMEM; | 494 | return -ENOMEM; |
495 | 495 | ||
496 | eth->scratch_head = kcalloc(cnt, MTK_QDMA_PAGE_SIZE, | 496 | eth->scratch_head = kcalloc(cnt, MTK_QDMA_PAGE_SIZE, |
497 | GFP_KERNEL); | 497 | GFP_KERNEL); |
498 | if (unlikely(!eth->scratch_head)) | ||
499 | return -ENOMEM; | ||
500 | |||
498 | dma_addr = dma_map_single(eth->dev, | 501 | dma_addr = dma_map_single(eth->dev, |
499 | eth->scratch_head, cnt * MTK_QDMA_PAGE_SIZE, | 502 | eth->scratch_head, cnt * MTK_QDMA_PAGE_SIZE, |
500 | DMA_FROM_DEVICE); | 503 | DMA_FROM_DEVICE); |
@@ -502,19 +505,19 @@ static int mtk_init_fq_dma(struct mtk_eth *eth) | |||
502 | return -ENOMEM; | 505 | return -ENOMEM; |
503 | 506 | ||
504 | memset(eth->scratch_ring, 0x0, sizeof(struct mtk_tx_dma) * cnt); | 507 | memset(eth->scratch_ring, 0x0, sizeof(struct mtk_tx_dma) * cnt); |
505 | phy_ring_tail = phy_ring_head + | 508 | phy_ring_tail = eth->phy_scratch_ring + |
506 | (sizeof(struct mtk_tx_dma) * (cnt - 1)); | 509 | (sizeof(struct mtk_tx_dma) * (cnt - 1)); |
507 | 510 | ||
508 | for (i = 0; i < cnt; i++) { | 511 | for (i = 0; i < cnt; i++) { |
509 | eth->scratch_ring[i].txd1 = | 512 | eth->scratch_ring[i].txd1 = |
510 | (dma_addr + (i * MTK_QDMA_PAGE_SIZE)); | 513 | (dma_addr + (i * MTK_QDMA_PAGE_SIZE)); |
511 | if (i < cnt - 1) | 514 | if (i < cnt - 1) |
512 | eth->scratch_ring[i].txd2 = (phy_ring_head + | 515 | eth->scratch_ring[i].txd2 = (eth->phy_scratch_ring + |
513 | ((i + 1) * sizeof(struct mtk_tx_dma))); | 516 | ((i + 1) * sizeof(struct mtk_tx_dma))); |
514 | eth->scratch_ring[i].txd3 = TX_DMA_SDL(MTK_QDMA_PAGE_SIZE); | 517 | eth->scratch_ring[i].txd3 = TX_DMA_SDL(MTK_QDMA_PAGE_SIZE); |
515 | } | 518 | } |
516 | 519 | ||
517 | mtk_w32(eth, phy_ring_head, MTK_QDMA_FQ_HEAD); | 520 | mtk_w32(eth, eth->phy_scratch_ring, MTK_QDMA_FQ_HEAD); |
518 | mtk_w32(eth, phy_ring_tail, MTK_QDMA_FQ_TAIL); | 521 | mtk_w32(eth, phy_ring_tail, MTK_QDMA_FQ_TAIL); |
519 | mtk_w32(eth, (cnt << 16) | cnt, MTK_QDMA_FQ_CNT); | 522 | mtk_w32(eth, (cnt << 16) | cnt, MTK_QDMA_FQ_CNT); |
520 | mtk_w32(eth, MTK_QDMA_PAGE_SIZE << 16, MTK_QDMA_FQ_BLEN); | 523 | mtk_w32(eth, MTK_QDMA_PAGE_SIZE << 16, MTK_QDMA_FQ_BLEN); |
@@ -671,7 +674,7 @@ static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev, | |||
671 | 674 | ||
672 | err_dma: | 675 | err_dma: |
673 | do { | 676 | do { |
674 | tx_buf = mtk_desc_to_tx_buf(ring, txd); | 677 | tx_buf = mtk_desc_to_tx_buf(ring, itxd); |
675 | 678 | ||
676 | /* unmap dma */ | 679 | /* unmap dma */ |
677 | mtk_tx_unmap(&dev->dev, tx_buf); | 680 | mtk_tx_unmap(&dev->dev, tx_buf); |
@@ -701,6 +704,20 @@ static inline int mtk_cal_txd_req(struct sk_buff *skb) | |||
701 | return nfrags; | 704 | return nfrags; |
702 | } | 705 | } |
703 | 706 | ||
707 | static int mtk_queue_stopped(struct mtk_eth *eth) | ||
708 | { | ||
709 | int i; | ||
710 | |||
711 | for (i = 0; i < MTK_MAC_COUNT; i++) { | ||
712 | if (!eth->netdev[i]) | ||
713 | continue; | ||
714 | if (netif_queue_stopped(eth->netdev[i])) | ||
715 | return 1; | ||
716 | } | ||
717 | |||
718 | return 0; | ||
719 | } | ||
720 | |||
704 | static void mtk_wake_queue(struct mtk_eth *eth) | 721 | static void mtk_wake_queue(struct mtk_eth *eth) |
705 | { | 722 | { |
706 | int i; | 723 | int i; |
@@ -766,12 +783,9 @@ static int mtk_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
766 | if (mtk_tx_map(skb, dev, tx_num, ring, gso) < 0) | 783 | if (mtk_tx_map(skb, dev, tx_num, ring, gso) < 0) |
767 | goto drop; | 784 | goto drop; |
768 | 785 | ||
769 | if (unlikely(atomic_read(&ring->free_count) <= ring->thresh)) { | 786 | if (unlikely(atomic_read(&ring->free_count) <= ring->thresh)) |
770 | mtk_stop_queue(eth); | 787 | mtk_stop_queue(eth); |
771 | if (unlikely(atomic_read(&ring->free_count) > | 788 | |
772 | ring->thresh)) | ||
773 | mtk_wake_queue(eth); | ||
774 | } | ||
775 | spin_unlock_irqrestore(ð->page_lock, flags); | 789 | spin_unlock_irqrestore(ð->page_lock, flags); |
776 | 790 | ||
777 | return NETDEV_TX_OK; | 791 | return NETDEV_TX_OK; |
@@ -826,6 +840,7 @@ static int mtk_poll_rx(struct napi_struct *napi, int budget, | |||
826 | DMA_FROM_DEVICE); | 840 | DMA_FROM_DEVICE); |
827 | if (unlikely(dma_mapping_error(&netdev->dev, dma_addr))) { | 841 | if (unlikely(dma_mapping_error(&netdev->dev, dma_addr))) { |
828 | skb_free_frag(new_data); | 842 | skb_free_frag(new_data); |
843 | netdev->stats.rx_dropped++; | ||
829 | goto release_desc; | 844 | goto release_desc; |
830 | } | 845 | } |
831 | 846 | ||
@@ -833,6 +848,7 @@ static int mtk_poll_rx(struct napi_struct *napi, int budget, | |||
833 | skb = build_skb(data, ring->frag_size); | 848 | skb = build_skb(data, ring->frag_size); |
834 | if (unlikely(!skb)) { | 849 | if (unlikely(!skb)) { |
835 | put_page(virt_to_head_page(new_data)); | 850 | put_page(virt_to_head_page(new_data)); |
851 | netdev->stats.rx_dropped++; | ||
836 | goto release_desc; | 852 | goto release_desc; |
837 | } | 853 | } |
838 | skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN); | 854 | skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN); |
@@ -921,7 +937,6 @@ static int mtk_poll_tx(struct mtk_eth *eth, int budget, bool *tx_again) | |||
921 | } | 937 | } |
922 | mtk_tx_unmap(eth->dev, tx_buf); | 938 | mtk_tx_unmap(eth->dev, tx_buf); |
923 | 939 | ||
924 | ring->last_free->txd2 = next_cpu; | ||
925 | ring->last_free = desc; | 940 | ring->last_free = desc; |
926 | atomic_inc(&ring->free_count); | 941 | atomic_inc(&ring->free_count); |
927 | 942 | ||
@@ -946,7 +961,8 @@ static int mtk_poll_tx(struct mtk_eth *eth, int budget, bool *tx_again) | |||
946 | if (!total) | 961 | if (!total) |
947 | return 0; | 962 | return 0; |
948 | 963 | ||
949 | if (atomic_read(&ring->free_count) > ring->thresh) | 964 | if (mtk_queue_stopped(eth) && |
965 | (atomic_read(&ring->free_count) > ring->thresh)) | ||
950 | mtk_wake_queue(eth); | 966 | mtk_wake_queue(eth); |
951 | 967 | ||
952 | return total; | 968 | return total; |
@@ -1027,9 +1043,8 @@ static int mtk_tx_alloc(struct mtk_eth *eth) | |||
1027 | 1043 | ||
1028 | atomic_set(&ring->free_count, MTK_DMA_SIZE - 2); | 1044 | atomic_set(&ring->free_count, MTK_DMA_SIZE - 2); |
1029 | ring->next_free = &ring->dma[0]; | 1045 | ring->next_free = &ring->dma[0]; |
1030 | ring->last_free = &ring->dma[MTK_DMA_SIZE - 2]; | 1046 | ring->last_free = &ring->dma[MTK_DMA_SIZE - 1]; |
1031 | ring->thresh = max((unsigned long)MTK_DMA_SIZE >> 2, | 1047 | ring->thresh = MAX_SKB_FRAGS; |
1032 | MAX_SKB_FRAGS); | ||
1033 | 1048 | ||
1034 | /* make sure that all changes to the dma ring are flushed before we | 1049 | /* make sure that all changes to the dma ring are flushed before we |
1035 | * continue | 1050 | * continue |
@@ -1207,6 +1222,14 @@ static void mtk_dma_free(struct mtk_eth *eth) | |||
1207 | for (i = 0; i < MTK_MAC_COUNT; i++) | 1222 | for (i = 0; i < MTK_MAC_COUNT; i++) |
1208 | if (eth->netdev[i]) | 1223 | if (eth->netdev[i]) |
1209 | netdev_reset_queue(eth->netdev[i]); | 1224 | netdev_reset_queue(eth->netdev[i]); |
1225 | if (eth->scratch_ring) { | ||
1226 | dma_free_coherent(eth->dev, | ||
1227 | MTK_DMA_SIZE * sizeof(struct mtk_tx_dma), | ||
1228 | eth->scratch_ring, | ||
1229 | eth->phy_scratch_ring); | ||
1230 | eth->scratch_ring = NULL; | ||
1231 | eth->phy_scratch_ring = 0; | ||
1232 | } | ||
1210 | mtk_tx_clean(eth); | 1233 | mtk_tx_clean(eth); |
1211 | mtk_rx_clean(eth); | 1234 | mtk_rx_clean(eth); |
1212 | kfree(eth->scratch_head); | 1235 | kfree(eth->scratch_head); |
@@ -1269,7 +1292,7 @@ static int mtk_start_dma(struct mtk_eth *eth) | |||
1269 | mtk_w32(eth, | 1292 | mtk_w32(eth, |
1270 | MTK_TX_WB_DDONE | MTK_RX_DMA_EN | MTK_TX_DMA_EN | | 1293 | MTK_TX_WB_DDONE | MTK_RX_DMA_EN | MTK_TX_DMA_EN | |
1271 | MTK_RX_2B_OFFSET | MTK_DMA_SIZE_16DWORDS | | 1294 | MTK_RX_2B_OFFSET | MTK_DMA_SIZE_16DWORDS | |
1272 | MTK_RX_BT_32DWORDS, | 1295 | MTK_RX_BT_32DWORDS | MTK_NDP_CO_PRO, |
1273 | MTK_QDMA_GLO_CFG); | 1296 | MTK_QDMA_GLO_CFG); |
1274 | 1297 | ||
1275 | return 0; | 1298 | return 0; |
@@ -1383,7 +1406,7 @@ static int __init mtk_hw_init(struct mtk_eth *eth) | |||
1383 | 1406 | ||
1384 | /* disable delay and normal interrupt */ | 1407 | /* disable delay and normal interrupt */ |
1385 | mtk_w32(eth, 0, MTK_QDMA_DELAY_INT); | 1408 | mtk_w32(eth, 0, MTK_QDMA_DELAY_INT); |
1386 | mtk_irq_disable(eth, MTK_TX_DONE_INT | MTK_RX_DONE_INT); | 1409 | mtk_irq_disable(eth, ~0); |
1387 | mtk_w32(eth, RST_GL_PSE, MTK_RST_GL); | 1410 | mtk_w32(eth, RST_GL_PSE, MTK_RST_GL); |
1388 | mtk_w32(eth, 0, MTK_RST_GL); | 1411 | mtk_w32(eth, 0, MTK_RST_GL); |
1389 | 1412 | ||
@@ -1697,7 +1720,7 @@ static int mtk_add_mac(struct mtk_eth *eth, struct device_node *np) | |||
1697 | mac->hw_stats->reg_offset = id * MTK_STAT_OFFSET; | 1720 | mac->hw_stats->reg_offset = id * MTK_STAT_OFFSET; |
1698 | 1721 | ||
1699 | SET_NETDEV_DEV(eth->netdev[id], eth->dev); | 1722 | SET_NETDEV_DEV(eth->netdev[id], eth->dev); |
1700 | eth->netdev[id]->watchdog_timeo = HZ; | 1723 | eth->netdev[id]->watchdog_timeo = 5 * HZ; |
1701 | eth->netdev[id]->netdev_ops = &mtk_netdev_ops; | 1724 | eth->netdev[id]->netdev_ops = &mtk_netdev_ops; |
1702 | eth->netdev[id]->base_addr = (unsigned long)eth->base; | 1725 | eth->netdev[id]->base_addr = (unsigned long)eth->base; |
1703 | eth->netdev[id]->vlan_features = MTK_HW_FEATURES & | 1726 | eth->netdev[id]->vlan_features = MTK_HW_FEATURES & |