diff options
Diffstat (limited to 'drivers')
-rw-r--r-- | drivers/net/ethernet/mediatek/mtk_eth_soc.c | 20 |
1 files changed, 10 insertions, 10 deletions
diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c index 4ebc42e0271a..7b760752d2c1 100644 --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c | |||
@@ -536,7 +536,6 @@ static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev, | |||
536 | struct mtk_eth *eth = mac->hw; | 536 | struct mtk_eth *eth = mac->hw; |
537 | struct mtk_tx_dma *itxd, *txd; | 537 | struct mtk_tx_dma *itxd, *txd; |
538 | struct mtk_tx_buf *tx_buf; | 538 | struct mtk_tx_buf *tx_buf; |
539 | unsigned long flags; | ||
540 | dma_addr_t mapped_addr; | 539 | dma_addr_t mapped_addr; |
541 | unsigned int nr_frags; | 540 | unsigned int nr_frags; |
542 | int i, n_desc = 1; | 541 | int i, n_desc = 1; |
@@ -568,11 +567,6 @@ static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev, | |||
568 | if (unlikely(dma_mapping_error(&dev->dev, mapped_addr))) | 567 | if (unlikely(dma_mapping_error(&dev->dev, mapped_addr))) |
569 | return -ENOMEM; | 568 | return -ENOMEM; |
570 | 569 | ||
571 | /* normally we can rely on the stack not calling this more than once, | ||
572 | * however we have 2 queues running ont he same ring so we need to lock | ||
573 | * the ring access | ||
574 | */ | ||
575 | spin_lock_irqsave(ð->page_lock, flags); | ||
576 | WRITE_ONCE(itxd->txd1, mapped_addr); | 570 | WRITE_ONCE(itxd->txd1, mapped_addr); |
577 | tx_buf->flags |= MTK_TX_FLAGS_SINGLE0; | 571 | tx_buf->flags |= MTK_TX_FLAGS_SINGLE0; |
578 | dma_unmap_addr_set(tx_buf, dma_addr0, mapped_addr); | 572 | dma_unmap_addr_set(tx_buf, dma_addr0, mapped_addr); |
@@ -632,8 +626,6 @@ static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev, | |||
632 | WRITE_ONCE(itxd->txd3, (TX_DMA_SWC | TX_DMA_PLEN0(skb_headlen(skb)) | | 626 | WRITE_ONCE(itxd->txd3, (TX_DMA_SWC | TX_DMA_PLEN0(skb_headlen(skb)) | |
633 | (!nr_frags * TX_DMA_LS0))); | 627 | (!nr_frags * TX_DMA_LS0))); |
634 | 628 | ||
635 | spin_unlock_irqrestore(ð->page_lock, flags); | ||
636 | |||
637 | netdev_sent_queue(dev, skb->len); | 629 | netdev_sent_queue(dev, skb->len); |
638 | skb_tx_timestamp(skb); | 630 | skb_tx_timestamp(skb); |
639 | 631 | ||
@@ -661,8 +653,6 @@ err_dma: | |||
661 | itxd = mtk_qdma_phys_to_virt(ring, itxd->txd2); | 653 | itxd = mtk_qdma_phys_to_virt(ring, itxd->txd2); |
662 | } while (itxd != txd); | 654 | } while (itxd != txd); |
663 | 655 | ||
664 | spin_unlock_irqrestore(ð->page_lock, flags); | ||
665 | |||
666 | return -ENOMEM; | 656 | return -ENOMEM; |
667 | } | 657 | } |
668 | 658 | ||
@@ -712,14 +702,22 @@ static int mtk_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
712 | struct mtk_eth *eth = mac->hw; | 702 | struct mtk_eth *eth = mac->hw; |
713 | struct mtk_tx_ring *ring = ð->tx_ring; | 703 | struct mtk_tx_ring *ring = ð->tx_ring; |
714 | struct net_device_stats *stats = &dev->stats; | 704 | struct net_device_stats *stats = &dev->stats; |
705 | unsigned long flags; | ||
715 | bool gso = false; | 706 | bool gso = false; |
716 | int tx_num; | 707 | int tx_num; |
717 | 708 | ||
709 | /* normally we can rely on the stack not calling this more than once, | ||
710 | * however we have 2 queues running on the same ring so we need to lock | ||
711 | * the ring access | ||
712 | */ | ||
713 | spin_lock_irqsave(ð->page_lock, flags); | ||
714 | |||
718 | tx_num = mtk_cal_txd_req(skb); | 715 | tx_num = mtk_cal_txd_req(skb); |
719 | if (unlikely(atomic_read(&ring->free_count) <= tx_num)) { | 716 | if (unlikely(atomic_read(&ring->free_count) <= tx_num)) { |
720 | mtk_stop_queue(eth); | 717 | mtk_stop_queue(eth); |
721 | netif_err(eth, tx_queued, dev, | 718 | netif_err(eth, tx_queued, dev, |
722 | "Tx Ring full when queue awake!\n"); | 719 | "Tx Ring full when queue awake!\n"); |
720 | spin_unlock_irqrestore(ð->page_lock, flags); | ||
723 | return NETDEV_TX_BUSY; | 721 | return NETDEV_TX_BUSY; |
724 | } | 722 | } |
725 | 723 | ||
@@ -747,10 +745,12 @@ static int mtk_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
747 | ring->thresh)) | 745 | ring->thresh)) |
748 | mtk_wake_queue(eth); | 746 | mtk_wake_queue(eth); |
749 | } | 747 | } |
748 | spin_unlock_irqrestore(ð->page_lock, flags); | ||
750 | 749 | ||
751 | return NETDEV_TX_OK; | 750 | return NETDEV_TX_OK; |
752 | 751 | ||
753 | drop: | 752 | drop: |
753 | spin_unlock_irqrestore(ð->page_lock, flags); | ||
754 | stats->tx_dropped++; | 754 | stats->tx_dropped++; |
755 | dev_kfree_skb(skb); | 755 | dev_kfree_skb(skb); |
756 | return NETDEV_TX_OK; | 756 | return NETDEV_TX_OK; |