aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorsean.wang@mediatek.com <sean.wang@mediatek.com>2016-08-16 01:55:15 -0400
committerDavid S. Miller <davem@davemloft.net>2016-08-16 02:02:44 -0400
commit55a4e778191cfcf675aa1f9716edb71a3014d5fb (patch)
tree08cd98a5025a8072d082e26ba02a2e74452d7f03
parentb2025c7cc92d5bfc8c5ce756c8d8a6f57c776fbd (diff)
net: ethernet: mediatek: fix runtime warning raised by inconsistent struct device pointers passed to DMA API
Runtime warning occurs if DMA-API debug feature is enabled that would be raised by pointers passed to DMA API as arguments to inconsistent struct device objects, so that the patch makes them usage aligned between DMA operations such as dma_map_*() and dma_unmap_*() to eliminate the warning. Signed-off-by: Sean Wang <sean.wang@mediatek.com> Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--drivers/net/ethernet/mediatek/mtk_eth_soc.c26
1 files changed, 13 insertions, 13 deletions
diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
index 9901527a39df..f1609542adf1 100644
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
@@ -558,15 +558,15 @@ static inline struct mtk_tx_buf *mtk_desc_to_tx_buf(struct mtk_tx_ring *ring,
558 return &ring->buf[idx]; 558 return &ring->buf[idx];
559} 559}
560 560
561static void mtk_tx_unmap(struct device *dev, struct mtk_tx_buf *tx_buf) 561static void mtk_tx_unmap(struct mtk_eth *eth, struct mtk_tx_buf *tx_buf)
562{ 562{
563 if (tx_buf->flags & MTK_TX_FLAGS_SINGLE0) { 563 if (tx_buf->flags & MTK_TX_FLAGS_SINGLE0) {
564 dma_unmap_single(dev, 564 dma_unmap_single(eth->dev,
565 dma_unmap_addr(tx_buf, dma_addr0), 565 dma_unmap_addr(tx_buf, dma_addr0),
566 dma_unmap_len(tx_buf, dma_len0), 566 dma_unmap_len(tx_buf, dma_len0),
567 DMA_TO_DEVICE); 567 DMA_TO_DEVICE);
568 } else if (tx_buf->flags & MTK_TX_FLAGS_PAGE0) { 568 } else if (tx_buf->flags & MTK_TX_FLAGS_PAGE0) {
569 dma_unmap_page(dev, 569 dma_unmap_page(eth->dev,
570 dma_unmap_addr(tx_buf, dma_addr0), 570 dma_unmap_addr(tx_buf, dma_addr0),
571 dma_unmap_len(tx_buf, dma_len0), 571 dma_unmap_len(tx_buf, dma_len0),
572 DMA_TO_DEVICE); 572 DMA_TO_DEVICE);
@@ -611,9 +611,9 @@ static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev,
611 if (skb_vlan_tag_present(skb)) 611 if (skb_vlan_tag_present(skb))
612 txd4 |= TX_DMA_INS_VLAN | skb_vlan_tag_get(skb); 612 txd4 |= TX_DMA_INS_VLAN | skb_vlan_tag_get(skb);
613 613
614 mapped_addr = dma_map_single(&dev->dev, skb->data, 614 mapped_addr = dma_map_single(eth->dev, skb->data,
615 skb_headlen(skb), DMA_TO_DEVICE); 615 skb_headlen(skb), DMA_TO_DEVICE);
616 if (unlikely(dma_mapping_error(&dev->dev, mapped_addr))) 616 if (unlikely(dma_mapping_error(eth->dev, mapped_addr)))
617 return -ENOMEM; 617 return -ENOMEM;
618 618
619 WRITE_ONCE(itxd->txd1, mapped_addr); 619 WRITE_ONCE(itxd->txd1, mapped_addr);
@@ -639,10 +639,10 @@ static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev,
639 639
640 n_desc++; 640 n_desc++;
641 frag_map_size = min(frag_size, MTK_TX_DMA_BUF_LEN); 641 frag_map_size = min(frag_size, MTK_TX_DMA_BUF_LEN);
642 mapped_addr = skb_frag_dma_map(&dev->dev, frag, offset, 642 mapped_addr = skb_frag_dma_map(eth->dev, frag, offset,
643 frag_map_size, 643 frag_map_size,
644 DMA_TO_DEVICE); 644 DMA_TO_DEVICE);
645 if (unlikely(dma_mapping_error(&dev->dev, mapped_addr))) 645 if (unlikely(dma_mapping_error(eth->dev, mapped_addr)))
646 goto err_dma; 646 goto err_dma;
647 647
648 if (i == nr_frags - 1 && 648 if (i == nr_frags - 1 &&
@@ -695,7 +695,7 @@ err_dma:
695 tx_buf = mtk_desc_to_tx_buf(ring, itxd); 695 tx_buf = mtk_desc_to_tx_buf(ring, itxd);
696 696
697 /* unmap dma */ 697 /* unmap dma */
698 mtk_tx_unmap(&dev->dev, tx_buf); 698 mtk_tx_unmap(eth, tx_buf);
699 699
700 itxd->txd3 = TX_DMA_LS0 | TX_DMA_OWNER_CPU; 700 itxd->txd3 = TX_DMA_LS0 | TX_DMA_OWNER_CPU;
701 itxd = mtk_qdma_phys_to_virt(ring, itxd->txd2); 701 itxd = mtk_qdma_phys_to_virt(ring, itxd->txd2);
@@ -852,11 +852,11 @@ static int mtk_poll_rx(struct napi_struct *napi, int budget,
852 netdev->stats.rx_dropped++; 852 netdev->stats.rx_dropped++;
853 goto release_desc; 853 goto release_desc;
854 } 854 }
855 dma_addr = dma_map_single(&eth->netdev[mac]->dev, 855 dma_addr = dma_map_single(eth->dev,
856 new_data + NET_SKB_PAD, 856 new_data + NET_SKB_PAD,
857 ring->buf_size, 857 ring->buf_size,
858 DMA_FROM_DEVICE); 858 DMA_FROM_DEVICE);
859 if (unlikely(dma_mapping_error(&netdev->dev, dma_addr))) { 859 if (unlikely(dma_mapping_error(eth->dev, dma_addr))) {
860 skb_free_frag(new_data); 860 skb_free_frag(new_data);
861 netdev->stats.rx_dropped++; 861 netdev->stats.rx_dropped++;
862 goto release_desc; 862 goto release_desc;
@@ -871,7 +871,7 @@ static int mtk_poll_rx(struct napi_struct *napi, int budget,
871 } 871 }
872 skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN); 872 skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN);
873 873
874 dma_unmap_single(&netdev->dev, trxd.rxd1, 874 dma_unmap_single(eth->dev, trxd.rxd1,
875 ring->buf_size, DMA_FROM_DEVICE); 875 ring->buf_size, DMA_FROM_DEVICE);
876 pktlen = RX_DMA_GET_PLEN0(trxd.rxd2); 876 pktlen = RX_DMA_GET_PLEN0(trxd.rxd2);
877 skb->dev = netdev; 877 skb->dev = netdev;
@@ -953,7 +953,7 @@ static int mtk_poll_tx(struct mtk_eth *eth, int budget)
953 done[mac]++; 953 done[mac]++;
954 budget--; 954 budget--;
955 } 955 }
956 mtk_tx_unmap(eth->dev, tx_buf); 956 mtk_tx_unmap(eth, tx_buf);
957 957
958 ring->last_free = desc; 958 ring->last_free = desc;
959 atomic_inc(&ring->free_count); 959 atomic_inc(&ring->free_count);
@@ -1108,7 +1108,7 @@ static void mtk_tx_clean(struct mtk_eth *eth)
1108 1108
1109 if (ring->buf) { 1109 if (ring->buf) {
1110 for (i = 0; i < MTK_DMA_SIZE; i++) 1110 for (i = 0; i < MTK_DMA_SIZE; i++)
1111 mtk_tx_unmap(eth->dev, &ring->buf[i]); 1111 mtk_tx_unmap(eth, &ring->buf[i]);
1112 kfree(ring->buf); 1112 kfree(ring->buf);
1113 ring->buf = NULL; 1113 ring->buf = NULL;
1114 } 1114 }