aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2014-05-23 15:32:15 -0400
committerDavid S. Miller <davem@davemloft.net>2014-05-23 15:32:15 -0400
commitebb0531ba22042db78348906b7b39d194d5d927f (patch)
tree0421ee0814f5db0888d761b7398356447c9c2779
parentece80490e2c1cefda018b2e5b96d4f39083d9096 (diff)
parentdc03e21a54d9aa77751dd545a8d3aefa80d43833 (diff)
Merge branch 'mvneta-next'
Ezequiel Garcia says: ==================== net: ethernet: marvell: Assorted fixes New round for this assorted fixes and clean-up series. There is more room for clean-ups, and I'll start preparing more patches once these are accepted. This series consists of cleanups and minor improvements on mvneta, mv643xx_eth and mvmdio drivers. None of the patches imply any functionality change, except for the patch six "Change the number of default rx queues to one". This patch reduces the driver's allocated resources and makes the multiqueue path in the poll function not get taken. The previous patchset contains more details: http://permalink.gmane.org/gmane.linux.network/315015 As usual, any feedback on this will be well received! Changes from v2: * Rebased on today's net-next and dropped patch "net: mvneta: Factorize feature setting", merged in the recent TSO series. * As per Sergei suggestion, used devm_kcalloc or devm_kmalloc_array when suitable. Changes from v1: * Added two more clean-up patches to the series. * Added Sebastian's Acked-by's. * Fixed extra empty line in "net: mv643xx_eth: Simplify mv643xx_eth_adjust_link()" as pointed out by David Miller. ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--drivers/net/ethernet/marvell/mv643xx_eth.c14
-rw-r--r--drivers/net/ethernet/marvell/mvmdio.c18
-rw-r--r--drivers/net/ethernet/marvell/mvneta.c91
3 files changed, 45 insertions, 78 deletions
diff --git a/drivers/net/ethernet/marvell/mv643xx_eth.c b/drivers/net/ethernet/marvell/mv643xx_eth.c
index 3b0f818a4f5c..c68ff5deba8c 100644
--- a/drivers/net/ethernet/marvell/mv643xx_eth.c
+++ b/drivers/net/ethernet/marvell/mv643xx_eth.c
@@ -1167,8 +1167,9 @@ static void txq_set_fixed_prio_mode(struct tx_queue *txq)
1167 1167
1168 1168
1169/* mii management interface *************************************************/ 1169/* mii management interface *************************************************/
1170static void mv643xx_adjust_pscr(struct mv643xx_eth_private *mp) 1170static void mv643xx_eth_adjust_link(struct net_device *dev)
1171{ 1171{
1172 struct mv643xx_eth_private *mp = netdev_priv(dev);
1172 u32 pscr = rdlp(mp, PORT_SERIAL_CONTROL); 1173 u32 pscr = rdlp(mp, PORT_SERIAL_CONTROL);
1173 u32 autoneg_disable = FORCE_LINK_PASS | 1174 u32 autoneg_disable = FORCE_LINK_PASS |
1174 DISABLE_AUTO_NEG_SPEED_GMII | 1175 DISABLE_AUTO_NEG_SPEED_GMII |
@@ -1544,7 +1545,7 @@ mv643xx_eth_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1544 1545
1545 ret = phy_ethtool_sset(mp->phy, cmd); 1546 ret = phy_ethtool_sset(mp->phy, cmd);
1546 if (!ret) 1547 if (!ret)
1547 mv643xx_adjust_pscr(mp); 1548 mv643xx_eth_adjust_link(dev);
1548 return ret; 1549 return ret;
1549} 1550}
1550 1551
@@ -2473,7 +2474,7 @@ static int mv643xx_eth_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
2473 2474
2474 ret = phy_mii_ioctl(mp->phy, ifr, cmd); 2475 ret = phy_mii_ioctl(mp->phy, ifr, cmd);
2475 if (!ret) 2476 if (!ret)
2476 mv643xx_adjust_pscr(mp); 2477 mv643xx_eth_adjust_link(dev);
2477 return ret; 2478 return ret;
2478} 2479}
2479 2480
@@ -2871,13 +2872,6 @@ static void set_params(struct mv643xx_eth_private *mp,
2871 mp->txq_count = pd->tx_queue_count ? : 1; 2872 mp->txq_count = pd->tx_queue_count ? : 1;
2872} 2873}
2873 2874
2874static void mv643xx_eth_adjust_link(struct net_device *dev)
2875{
2876 struct mv643xx_eth_private *mp = netdev_priv(dev);
2877
2878 mv643xx_adjust_pscr(mp);
2879}
2880
2881static struct phy_device *phy_scan(struct mv643xx_eth_private *mp, 2875static struct phy_device *phy_scan(struct mv643xx_eth_private *mp,
2882 int phy_addr) 2876 int phy_addr)
2883{ 2877{
diff --git a/drivers/net/ethernet/marvell/mvmdio.c b/drivers/net/ethernet/marvell/mvmdio.c
index 9d5ced263a5e..fc2fb25343f4 100644
--- a/drivers/net/ethernet/marvell/mvmdio.c
+++ b/drivers/net/ethernet/marvell/mvmdio.c
@@ -195,11 +195,10 @@ static int orion_mdio_probe(struct platform_device *pdev)
195 return -ENODEV; 195 return -ENODEV;
196 } 196 }
197 197
198 bus = mdiobus_alloc_size(sizeof(struct orion_mdio_dev)); 198 bus = devm_mdiobus_alloc_size(&pdev->dev,
199 if (!bus) { 199 sizeof(struct orion_mdio_dev));
200 dev_err(&pdev->dev, "Cannot allocate MDIO bus\n"); 200 if (!bus)
201 return -ENOMEM; 201 return -ENOMEM;
202 }
203 202
204 bus->name = "orion_mdio_bus"; 203 bus->name = "orion_mdio_bus";
205 bus->read = orion_mdio_read; 204 bus->read = orion_mdio_read;
@@ -208,11 +207,10 @@ static int orion_mdio_probe(struct platform_device *pdev)
208 dev_name(&pdev->dev)); 207 dev_name(&pdev->dev));
209 bus->parent = &pdev->dev; 208 bus->parent = &pdev->dev;
210 209
211 bus->irq = kmalloc(sizeof(int) * PHY_MAX_ADDR, GFP_KERNEL); 210 bus->irq = devm_kmalloc_array(&pdev->dev, PHY_MAX_ADDR, sizeof(int),
212 if (!bus->irq) { 211 GFP_KERNEL);
213 mdiobus_free(bus); 212 if (!bus->irq)
214 return -ENOMEM; 213 return -ENOMEM;
215 }
216 214
217 for (i = 0; i < PHY_MAX_ADDR; i++) 215 for (i = 0; i < PHY_MAX_ADDR; i++)
218 bus->irq[i] = PHY_POLL; 216 bus->irq[i] = PHY_POLL;
@@ -264,8 +262,6 @@ static int orion_mdio_probe(struct platform_device *pdev)
264out_mdio: 262out_mdio:
265 if (!IS_ERR(dev->clk)) 263 if (!IS_ERR(dev->clk))
266 clk_disable_unprepare(dev->clk); 264 clk_disable_unprepare(dev->clk);
267 kfree(bus->irq);
268 mdiobus_free(bus);
269 return ret; 265 return ret;
270} 266}
271 267
@@ -276,8 +272,6 @@ static int orion_mdio_remove(struct platform_device *pdev)
276 272
277 writel(0, dev->regs + MVMDIO_ERR_INT_MASK); 273 writel(0, dev->regs + MVMDIO_ERR_INT_MASK);
278 mdiobus_unregister(bus); 274 mdiobus_unregister(bus);
279 kfree(bus->irq);
280 mdiobus_free(bus);
281 if (!IS_ERR(dev->clk)) 275 if (!IS_ERR(dev->clk))
282 clk_disable_unprepare(dev->clk); 276 clk_disable_unprepare(dev->clk);
283 277
diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
index 18c698d9ef9b..b8919fa6ed27 100644
--- a/drivers/net/ethernet/marvell/mvneta.c
+++ b/drivers/net/ethernet/marvell/mvneta.c
@@ -283,9 +283,6 @@ struct mvneta_port {
283 u32 cause_rx_tx; 283 u32 cause_rx_tx;
284 struct napi_struct napi; 284 struct napi_struct napi;
285 285
286 /* Napi weight */
287 int weight;
288
289 /* Core clock */ 286 /* Core clock */
290 struct clk *clk; 287 struct clk *clk;
291 u8 mcast_count[256]; 288 u8 mcast_count[256];
@@ -451,7 +448,10 @@ struct mvneta_rx_queue {
451 int next_desc_to_proc; 448 int next_desc_to_proc;
452}; 449};
453 450
454static int rxq_number = 8; 451/* The hardware supports eight (8) rx queues, but we are only allowing
452 * the first one to be used. Therefore, let's just allocate one queue.
453 */
454static int rxq_number = 1;
455static int txq_number = 8; 455static int txq_number = 8;
456 456
457static int rxq_def; 457static int rxq_def;
@@ -1654,9 +1654,9 @@ static int mvneta_tx_frag_process(struct mvneta_port *pp, struct sk_buff *skb,
1654 struct mvneta_tx_queue *txq) 1654 struct mvneta_tx_queue *txq)
1655{ 1655{
1656 struct mvneta_tx_desc *tx_desc; 1656 struct mvneta_tx_desc *tx_desc;
1657 int i; 1657 int i, nr_frags = skb_shinfo(skb)->nr_frags;
1658 1658
1659 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 1659 for (i = 0; i < nr_frags; i++) {
1660 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 1660 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1661 void *addr = page_address(frag->page.p) + frag->page_offset; 1661 void *addr = page_address(frag->page.p) + frag->page_offset;
1662 1662
@@ -1673,20 +1673,16 @@ static int mvneta_tx_frag_process(struct mvneta_port *pp, struct sk_buff *skb,
1673 goto error; 1673 goto error;
1674 } 1674 }
1675 1675
1676 if (i == (skb_shinfo(skb)->nr_frags - 1)) { 1676 if (i == nr_frags - 1) {
1677 /* Last descriptor */ 1677 /* Last descriptor */
1678 tx_desc->command = MVNETA_TXD_L_DESC | MVNETA_TXD_Z_PAD; 1678 tx_desc->command = MVNETA_TXD_L_DESC | MVNETA_TXD_Z_PAD;
1679
1680 txq->tx_skb[txq->txq_put_index] = skb; 1679 txq->tx_skb[txq->txq_put_index] = skb;
1681
1682 mvneta_txq_inc_put(txq);
1683 } else { 1680 } else {
1684 /* Descriptor in the middle: Not First, Not Last */ 1681 /* Descriptor in the middle: Not First, Not Last */
1685 tx_desc->command = 0; 1682 tx_desc->command = 0;
1686
1687 txq->tx_skb[txq->txq_put_index] = NULL; 1683 txq->tx_skb[txq->txq_put_index] = NULL;
1688 mvneta_txq_inc_put(txq);
1689 } 1684 }
1685 mvneta_txq_inc_put(txq);
1690 } 1686 }
1691 1687
1692 return 0; 1688 return 0;
@@ -2137,7 +2133,7 @@ static void mvneta_tx_reset(struct mvneta_port *pp)
2137{ 2133{
2138 int queue; 2134 int queue;
2139 2135
2140 /* free the skb's in the hal tx ring */ 2136 /* free the skb's in the tx ring */
2141 for (queue = 0; queue < txq_number; queue++) 2137 for (queue = 0; queue < txq_number; queue++)
2142 mvneta_txq_done_force(pp, &pp->txqs[queue]); 2138 mvneta_txq_done_force(pp, &pp->txqs[queue]);
2143 2139
@@ -2429,24 +2425,28 @@ static int mvneta_change_mtu(struct net_device *dev, int mtu)
2429 return 0; 2425 return 0;
2430 2426
2431 /* The interface is running, so we have to force a 2427 /* The interface is running, so we have to force a
2432 * reallocation of the RXQs 2428 * reallocation of the queues
2433 */ 2429 */
2434 mvneta_stop_dev(pp); 2430 mvneta_stop_dev(pp);
2435 2431
2436 mvneta_cleanup_txqs(pp); 2432 mvneta_cleanup_txqs(pp);
2437 mvneta_cleanup_rxqs(pp); 2433 mvneta_cleanup_rxqs(pp);
2438 2434
2439 pp->pkt_size = MVNETA_RX_PKT_SIZE(pp->dev->mtu); 2435 pp->pkt_size = MVNETA_RX_PKT_SIZE(dev->mtu);
2440 pp->frag_size = SKB_DATA_ALIGN(MVNETA_RX_BUF_SIZE(pp->pkt_size)) + 2436 pp->frag_size = SKB_DATA_ALIGN(MVNETA_RX_BUF_SIZE(pp->pkt_size)) +
2441 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); 2437 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
2442 2438
2443 ret = mvneta_setup_rxqs(pp); 2439 ret = mvneta_setup_rxqs(pp);
2444 if (ret) { 2440 if (ret) {
2445 netdev_err(pp->dev, "unable to setup rxqs after MTU change\n"); 2441 netdev_err(dev, "unable to setup rxqs after MTU change\n");
2446 return ret; 2442 return ret;
2447 } 2443 }
2448 2444
2449 mvneta_setup_txqs(pp); 2445 ret = mvneta_setup_txqs(pp);
2446 if (ret) {
2447 netdev_err(dev, "unable to setup txqs after MTU change\n");
2448 return ret;
2449 }
2450 2450
2451 mvneta_start_dev(pp); 2451 mvneta_start_dev(pp);
2452 mvneta_port_up(pp); 2452 mvneta_port_up(pp);
@@ -2473,22 +2473,19 @@ static void mvneta_get_mac_addr(struct mvneta_port *pp, unsigned char *addr)
2473static int mvneta_set_mac_addr(struct net_device *dev, void *addr) 2473static int mvneta_set_mac_addr(struct net_device *dev, void *addr)
2474{ 2474{
2475 struct mvneta_port *pp = netdev_priv(dev); 2475 struct mvneta_port *pp = netdev_priv(dev);
2476 u8 *mac = addr + 2; 2476 struct sockaddr *sockaddr = addr;
2477 int i; 2477 int ret;
2478
2479 if (netif_running(dev))
2480 return -EBUSY;
2481 2478
2479 ret = eth_prepare_mac_addr_change(dev, addr);
2480 if (ret < 0)
2481 return ret;
2482 /* Remove previous address table entry */ 2482 /* Remove previous address table entry */
2483 mvneta_mac_addr_set(pp, dev->dev_addr, -1); 2483 mvneta_mac_addr_set(pp, dev->dev_addr, -1);
2484 2484
2485 /* Set new addr in hw */ 2485 /* Set new addr in hw */
2486 mvneta_mac_addr_set(pp, mac, rxq_def); 2486 mvneta_mac_addr_set(pp, sockaddr->sa_data, rxq_def);
2487
2488 /* Set addr in the device */
2489 for (i = 0; i < ETH_ALEN; i++)
2490 dev->dev_addr[i] = mac[i];
2491 2487
2488 eth_commit_mac_addr_change(dev, addr);
2492 return 0; 2489 return 0;
2493} 2490}
2494 2491
@@ -2583,8 +2580,6 @@ static int mvneta_open(struct net_device *dev)
2583 struct mvneta_port *pp = netdev_priv(dev); 2580 struct mvneta_port *pp = netdev_priv(dev);
2584 int ret; 2581 int ret;
2585 2582
2586 mvneta_mac_addr_set(pp, dev->dev_addr, rxq_def);
2587
2588 pp->pkt_size = MVNETA_RX_PKT_SIZE(pp->dev->mtu); 2583 pp->pkt_size = MVNETA_RX_PKT_SIZE(pp->dev->mtu);
2589 pp->frag_size = SKB_DATA_ALIGN(MVNETA_RX_BUF_SIZE(pp->pkt_size)) + 2584 pp->frag_size = SKB_DATA_ALIGN(MVNETA_RX_BUF_SIZE(pp->pkt_size)) +
2590 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); 2585 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
@@ -2788,7 +2783,7 @@ const struct ethtool_ops mvneta_eth_tool_ops = {
2788}; 2783};
2789 2784
2790/* Initialize hw */ 2785/* Initialize hw */
2791static int mvneta_init(struct mvneta_port *pp, int phy_addr) 2786static int mvneta_init(struct device *dev, struct mvneta_port *pp)
2792{ 2787{
2793 int queue; 2788 int queue;
2794 2789
@@ -2798,8 +2793,8 @@ static int mvneta_init(struct mvneta_port *pp, int phy_addr)
2798 /* Set port default values */ 2793 /* Set port default values */
2799 mvneta_defaults_set(pp); 2794 mvneta_defaults_set(pp);
2800 2795
2801 pp->txqs = kzalloc(txq_number * sizeof(struct mvneta_tx_queue), 2796 pp->txqs = devm_kcalloc(dev, txq_number, sizeof(struct mvneta_tx_queue),
2802 GFP_KERNEL); 2797 GFP_KERNEL);
2803 if (!pp->txqs) 2798 if (!pp->txqs)
2804 return -ENOMEM; 2799 return -ENOMEM;
2805 2800
@@ -2811,12 +2806,10 @@ static int mvneta_init(struct mvneta_port *pp, int phy_addr)
2811 txq->done_pkts_coal = MVNETA_TXDONE_COAL_PKTS; 2806 txq->done_pkts_coal = MVNETA_TXDONE_COAL_PKTS;
2812 } 2807 }
2813 2808
2814 pp->rxqs = kzalloc(rxq_number * sizeof(struct mvneta_rx_queue), 2809 pp->rxqs = devm_kcalloc(dev, rxq_number, sizeof(struct mvneta_rx_queue),
2815 GFP_KERNEL); 2810 GFP_KERNEL);
2816 if (!pp->rxqs) { 2811 if (!pp->rxqs)
2817 kfree(pp->txqs);
2818 return -ENOMEM; 2812 return -ENOMEM;
2819 }
2820 2813
2821 /* Create Rx descriptor rings */ 2814 /* Create Rx descriptor rings */
2822 for (queue = 0; queue < rxq_number; queue++) { 2815 for (queue = 0; queue < rxq_number; queue++) {
@@ -2830,12 +2823,6 @@ static int mvneta_init(struct mvneta_port *pp, int phy_addr)
2830 return 0; 2823 return 0;
2831} 2824}
2832 2825
2833static void mvneta_deinit(struct mvneta_port *pp)
2834{
2835 kfree(pp->txqs);
2836 kfree(pp->rxqs);
2837}
2838
2839/* platform glue : initialize decoding windows */ 2826/* platform glue : initialize decoding windows */
2840static void mvneta_conf_mbus_windows(struct mvneta_port *pp, 2827static void mvneta_conf_mbus_windows(struct mvneta_port *pp,
2841 const struct mbus_dram_target_info *dram) 2828 const struct mbus_dram_target_info *dram)
@@ -2918,7 +2905,6 @@ static int mvneta_probe(struct platform_device *pdev)
2918 struct resource *res; 2905 struct resource *res;
2919 struct device_node *dn = pdev->dev.of_node; 2906 struct device_node *dn = pdev->dev.of_node;
2920 struct device_node *phy_node; 2907 struct device_node *phy_node;
2921 u32 phy_addr;
2922 struct mvneta_port *pp; 2908 struct mvneta_port *pp;
2923 struct net_device *dev; 2909 struct net_device *dev;
2924 const char *dt_mac_addr; 2910 const char *dt_mac_addr;
@@ -2979,8 +2965,6 @@ static int mvneta_probe(struct platform_device *pdev)
2979 dev->ethtool_ops = &mvneta_eth_tool_ops; 2965 dev->ethtool_ops = &mvneta_eth_tool_ops;
2980 2966
2981 pp = netdev_priv(dev); 2967 pp = netdev_priv(dev);
2982
2983 pp->weight = MVNETA_RX_POLL_WEIGHT;
2984 pp->phy_node = phy_node; 2968 pp->phy_node = phy_node;
2985 pp->phy_interface = phy_mode; 2969 pp->phy_interface = phy_mode;
2986 2970
@@ -3027,23 +3011,21 @@ static int mvneta_probe(struct platform_device *pdev)
3027 pp->dev = dev; 3011 pp->dev = dev;
3028 SET_NETDEV_DEV(dev, &pdev->dev); 3012 SET_NETDEV_DEV(dev, &pdev->dev);
3029 3013
3030 err = mvneta_init(pp, phy_addr); 3014 err = mvneta_init(&pdev->dev, pp);
3031 if (err < 0) { 3015 if (err < 0)
3032 dev_err(&pdev->dev, "can't init eth hal\n");
3033 goto err_free_stats; 3016 goto err_free_stats;
3034 }
3035 3017
3036 err = mvneta_port_power_up(pp, phy_mode); 3018 err = mvneta_port_power_up(pp, phy_mode);
3037 if (err < 0) { 3019 if (err < 0) {
3038 dev_err(&pdev->dev, "can't power up port\n"); 3020 dev_err(&pdev->dev, "can't power up port\n");
3039 goto err_deinit; 3021 goto err_free_stats;
3040 } 3022 }
3041 3023
3042 dram_target_info = mv_mbus_dram_info(); 3024 dram_target_info = mv_mbus_dram_info();
3043 if (dram_target_info) 3025 if (dram_target_info)
3044 mvneta_conf_mbus_windows(pp, dram_target_info); 3026 mvneta_conf_mbus_windows(pp, dram_target_info);
3045 3027
3046 netif_napi_add(dev, &pp->napi, mvneta_poll, pp->weight); 3028 netif_napi_add(dev, &pp->napi, mvneta_poll, MVNETA_RX_POLL_WEIGHT);
3047 3029
3048 dev->features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO; 3030 dev->features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO;
3049 dev->hw_features |= dev->features; 3031 dev->hw_features |= dev->features;
@@ -3053,7 +3035,7 @@ static int mvneta_probe(struct platform_device *pdev)
3053 err = register_netdev(dev); 3035 err = register_netdev(dev);
3054 if (err < 0) { 3036 if (err < 0) {
3055 dev_err(&pdev->dev, "failed to register\n"); 3037 dev_err(&pdev->dev, "failed to register\n");
3056 goto err_deinit; 3038 goto err_free_stats;
3057 } 3039 }
3058 3040
3059 netdev_info(dev, "Using %s mac address %pM\n", mac_from, 3041 netdev_info(dev, "Using %s mac address %pM\n", mac_from,
@@ -3063,8 +3045,6 @@ static int mvneta_probe(struct platform_device *pdev)
3063 3045
3064 return 0; 3046 return 0;
3065 3047
3066err_deinit:
3067 mvneta_deinit(pp);
3068err_free_stats: 3048err_free_stats:
3069 free_percpu(pp->stats); 3049 free_percpu(pp->stats);
3070err_clk: 3050err_clk:
@@ -3083,7 +3063,6 @@ static int mvneta_remove(struct platform_device *pdev)
3083 struct mvneta_port *pp = netdev_priv(dev); 3063 struct mvneta_port *pp = netdev_priv(dev);
3084 3064
3085 unregister_netdev(dev); 3065 unregister_netdev(dev);
3086 mvneta_deinit(pp);
3087 clk_disable_unprepare(pp->clk); 3066 clk_disable_unprepare(pp->clk);
3088 free_percpu(pp->stats); 3067 free_percpu(pp->stats);
3089 irq_dispose_mapping(dev->irq); 3068 irq_dispose_mapping(dev->irq);