aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2016-08-16 02:02:45 -0400
committerDavid S. Miller <davem@davemloft.net>2016-08-16 02:02:45 -0400
commita1560dd7a47f983419760aa7f6a481e3b910b54b (patch)
tree08cd98a5025a8072d082e26ba02a2e74452d7f03
parentd2fbdf76b85bcdfe57b8ef2ba09d20e8ada79abd (diff)
parent55a4e778191cfcf675aa1f9716edb71a3014d5fb (diff)
Merge branch 'mediatek-fixes'
Sean Wang says: ==================== mediatek: Fix warning and issue This patch set fixes the following warning and issues v1 -> v2: Fix message typos and add coverletter v2 -> v3: Split from the previous series for submitting bug fixes as a series targeting 'net' ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--drivers/net/ethernet/mediatek/mtk_eth_soc.c46
1 files changed, 30 insertions, 16 deletions
diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
index 003036114a1c..f1609542adf1 100644
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
@@ -245,12 +245,16 @@ static int mtk_phy_connect(struct mtk_mac *mac)
245 case PHY_INTERFACE_MODE_MII: 245 case PHY_INTERFACE_MODE_MII:
246 ge_mode = 1; 246 ge_mode = 1;
247 break; 247 break;
248 case PHY_INTERFACE_MODE_RMII: 248 case PHY_INTERFACE_MODE_REVMII:
249 ge_mode = 2; 249 ge_mode = 2;
250 break; 250 break;
251 case PHY_INTERFACE_MODE_RMII:
252 if (!mac->id)
253 goto err_phy;
254 ge_mode = 3;
255 break;
251 default: 256 default:
252 dev_err(eth->dev, "invalid phy_mode\n"); 257 goto err_phy;
253 return -1;
254 } 258 }
255 259
256 /* put the gmac into the right mode */ 260 /* put the gmac into the right mode */
@@ -263,6 +267,11 @@ static int mtk_phy_connect(struct mtk_mac *mac)
263 mac->phy_dev->autoneg = AUTONEG_ENABLE; 267 mac->phy_dev->autoneg = AUTONEG_ENABLE;
264 mac->phy_dev->speed = 0; 268 mac->phy_dev->speed = 0;
265 mac->phy_dev->duplex = 0; 269 mac->phy_dev->duplex = 0;
270
271 if (of_phy_is_fixed_link(mac->of_node))
272 mac->phy_dev->supported |=
273 SUPPORTED_Pause | SUPPORTED_Asym_Pause;
274
266 mac->phy_dev->supported &= PHY_GBIT_FEATURES | SUPPORTED_Pause | 275 mac->phy_dev->supported &= PHY_GBIT_FEATURES | SUPPORTED_Pause |
267 SUPPORTED_Asym_Pause; 276 SUPPORTED_Asym_Pause;
268 mac->phy_dev->advertising = mac->phy_dev->supported | 277 mac->phy_dev->advertising = mac->phy_dev->supported |
@@ -272,6 +281,11 @@ static int mtk_phy_connect(struct mtk_mac *mac)
272 of_node_put(np); 281 of_node_put(np);
273 282
274 return 0; 283 return 0;
284
285err_phy:
286 of_node_put(np);
287 dev_err(eth->dev, "invalid phy_mode\n");
288 return -EINVAL;
275} 289}
276 290
277static int mtk_mdio_init(struct mtk_eth *eth) 291static int mtk_mdio_init(struct mtk_eth *eth)
@@ -544,15 +558,15 @@ static inline struct mtk_tx_buf *mtk_desc_to_tx_buf(struct mtk_tx_ring *ring,
544 return &ring->buf[idx]; 558 return &ring->buf[idx];
545} 559}
546 560
547static void mtk_tx_unmap(struct device *dev, struct mtk_tx_buf *tx_buf) 561static void mtk_tx_unmap(struct mtk_eth *eth, struct mtk_tx_buf *tx_buf)
548{ 562{
549 if (tx_buf->flags & MTK_TX_FLAGS_SINGLE0) { 563 if (tx_buf->flags & MTK_TX_FLAGS_SINGLE0) {
550 dma_unmap_single(dev, 564 dma_unmap_single(eth->dev,
551 dma_unmap_addr(tx_buf, dma_addr0), 565 dma_unmap_addr(tx_buf, dma_addr0),
552 dma_unmap_len(tx_buf, dma_len0), 566 dma_unmap_len(tx_buf, dma_len0),
553 DMA_TO_DEVICE); 567 DMA_TO_DEVICE);
554 } else if (tx_buf->flags & MTK_TX_FLAGS_PAGE0) { 568 } else if (tx_buf->flags & MTK_TX_FLAGS_PAGE0) {
555 dma_unmap_page(dev, 569 dma_unmap_page(eth->dev,
556 dma_unmap_addr(tx_buf, dma_addr0), 570 dma_unmap_addr(tx_buf, dma_addr0),
557 dma_unmap_len(tx_buf, dma_len0), 571 dma_unmap_len(tx_buf, dma_len0),
558 DMA_TO_DEVICE); 572 DMA_TO_DEVICE);
@@ -597,9 +611,9 @@ static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev,
597 if (skb_vlan_tag_present(skb)) 611 if (skb_vlan_tag_present(skb))
598 txd4 |= TX_DMA_INS_VLAN | skb_vlan_tag_get(skb); 612 txd4 |= TX_DMA_INS_VLAN | skb_vlan_tag_get(skb);
599 613
600 mapped_addr = dma_map_single(&dev->dev, skb->data, 614 mapped_addr = dma_map_single(eth->dev, skb->data,
601 skb_headlen(skb), DMA_TO_DEVICE); 615 skb_headlen(skb), DMA_TO_DEVICE);
602 if (unlikely(dma_mapping_error(&dev->dev, mapped_addr))) 616 if (unlikely(dma_mapping_error(eth->dev, mapped_addr)))
603 return -ENOMEM; 617 return -ENOMEM;
604 618
605 WRITE_ONCE(itxd->txd1, mapped_addr); 619 WRITE_ONCE(itxd->txd1, mapped_addr);
@@ -625,10 +639,10 @@ static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev,
625 639
626 n_desc++; 640 n_desc++;
627 frag_map_size = min(frag_size, MTK_TX_DMA_BUF_LEN); 641 frag_map_size = min(frag_size, MTK_TX_DMA_BUF_LEN);
628 mapped_addr = skb_frag_dma_map(&dev->dev, frag, offset, 642 mapped_addr = skb_frag_dma_map(eth->dev, frag, offset,
629 frag_map_size, 643 frag_map_size,
630 DMA_TO_DEVICE); 644 DMA_TO_DEVICE);
631 if (unlikely(dma_mapping_error(&dev->dev, mapped_addr))) 645 if (unlikely(dma_mapping_error(eth->dev, mapped_addr)))
632 goto err_dma; 646 goto err_dma;
633 647
634 if (i == nr_frags - 1 && 648 if (i == nr_frags - 1 &&
@@ -681,7 +695,7 @@ err_dma:
681 tx_buf = mtk_desc_to_tx_buf(ring, itxd); 695 tx_buf = mtk_desc_to_tx_buf(ring, itxd);
682 696
683 /* unmap dma */ 697 /* unmap dma */
684 mtk_tx_unmap(&dev->dev, tx_buf); 698 mtk_tx_unmap(eth, tx_buf);
685 699
686 itxd->txd3 = TX_DMA_LS0 | TX_DMA_OWNER_CPU; 700 itxd->txd3 = TX_DMA_LS0 | TX_DMA_OWNER_CPU;
687 itxd = mtk_qdma_phys_to_virt(ring, itxd->txd2); 701 itxd = mtk_qdma_phys_to_virt(ring, itxd->txd2);
@@ -838,11 +852,11 @@ static int mtk_poll_rx(struct napi_struct *napi, int budget,
838 netdev->stats.rx_dropped++; 852 netdev->stats.rx_dropped++;
839 goto release_desc; 853 goto release_desc;
840 } 854 }
841 dma_addr = dma_map_single(&eth->netdev[mac]->dev, 855 dma_addr = dma_map_single(eth->dev,
842 new_data + NET_SKB_PAD, 856 new_data + NET_SKB_PAD,
843 ring->buf_size, 857 ring->buf_size,
844 DMA_FROM_DEVICE); 858 DMA_FROM_DEVICE);
845 if (unlikely(dma_mapping_error(&netdev->dev, dma_addr))) { 859 if (unlikely(dma_mapping_error(eth->dev, dma_addr))) {
846 skb_free_frag(new_data); 860 skb_free_frag(new_data);
847 netdev->stats.rx_dropped++; 861 netdev->stats.rx_dropped++;
848 goto release_desc; 862 goto release_desc;
@@ -857,7 +871,7 @@ static int mtk_poll_rx(struct napi_struct *napi, int budget,
857 } 871 }
858 skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN); 872 skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN);
859 873
860 dma_unmap_single(&netdev->dev, trxd.rxd1, 874 dma_unmap_single(eth->dev, trxd.rxd1,
861 ring->buf_size, DMA_FROM_DEVICE); 875 ring->buf_size, DMA_FROM_DEVICE);
862 pktlen = RX_DMA_GET_PLEN0(trxd.rxd2); 876 pktlen = RX_DMA_GET_PLEN0(trxd.rxd2);
863 skb->dev = netdev; 877 skb->dev = netdev;
@@ -939,7 +953,7 @@ static int mtk_poll_tx(struct mtk_eth *eth, int budget)
939 done[mac]++; 953 done[mac]++;
940 budget--; 954 budget--;
941 } 955 }
942 mtk_tx_unmap(eth->dev, tx_buf); 956 mtk_tx_unmap(eth, tx_buf);
943 957
944 ring->last_free = desc; 958 ring->last_free = desc;
945 atomic_inc(&ring->free_count); 959 atomic_inc(&ring->free_count);
@@ -1094,7 +1108,7 @@ static void mtk_tx_clean(struct mtk_eth *eth)
1094 1108
1095 if (ring->buf) { 1109 if (ring->buf) {
1096 for (i = 0; i < MTK_DMA_SIZE; i++) 1110 for (i = 0; i < MTK_DMA_SIZE; i++)
1097 mtk_tx_unmap(eth->dev, &ring->buf[i]); 1111 mtk_tx_unmap(eth, &ring->buf[i]);
1098 kfree(ring->buf); 1112 kfree(ring->buf);
1099 ring->buf = NULL; 1113 ring->buf = NULL;
1100 } 1114 }