aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net
diff options
context:
space:
mode:
authorDave Airlie <airlied@redhat.com>2015-08-17 00:13:53 -0400
committerDave Airlie <airlied@redhat.com>2015-08-17 00:13:53 -0400
commit4eebf60b7452fbd551fd7dece855ba7825a49cbc (patch)
tree490b4d194ba09c90e10201ab7fc084a0bda0ed27 /drivers/net
parent8f9cb50789e76f3e224e8861adf650e55c747af4 (diff)
parent2c6625cd545bdd66acff14f3394865d43920a5c7 (diff)
Merge tag 'v4.2-rc7' into drm-next
Linux 4.2-rc7 Backmerge master for i915 fixes
Diffstat (limited to 'drivers/net')
-rw-r--r--drivers/net/bonding/bond_main.c35
-rw-r--r--drivers/net/can/at91_can.c8
-rw-r--r--drivers/net/can/bfin_can.c6
-rw-r--r--drivers/net/can/cc770/cc770.c4
-rw-r--r--drivers/net/can/flexcan.c7
-rw-r--r--drivers/net/can/grcan.c3
-rw-r--r--drivers/net/can/sja1000/sja1000.c6
-rw-r--r--drivers/net/can/slcan.c2
-rw-r--r--drivers/net/can/spi/mcp251x.c17
-rw-r--r--drivers/net/can/ti_hecc.c2
-rw-r--r--drivers/net/can/usb/ems_usb.c6
-rw-r--r--drivers/net/can/usb/esd_usb2.c6
-rw-r--r--drivers/net/can/usb/peak_usb/pcan_usb.c7
-rw-r--r--drivers/net/can/usb/peak_usb/pcan_usb_pro.c4
-rw-r--r--drivers/net/can/usb/usb_8dev.c6
-rw-r--r--drivers/net/dsa/bcm_sf2.c15
-rw-r--r--drivers/net/dsa/mv88e6xxx.c2
-rw-r--r--drivers/net/ethernet/3com/3c59x.c17
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c16
-rw-r--r--drivers/net/ethernet/brocade/bna/bnad.c2
-rw-r--r--drivers/net/ethernet/cadence/macb.c125
-rw-r--r--drivers/net/ethernet/cadence/macb.h34
-rw-r--r--drivers/net/ethernet/cavium/Kconfig3
-rw-r--r--drivers/net/ethernet/cavium/thunder/nic.h12
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c26
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_main.c55
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_queues.c17
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_queues.h14
-rw-r--r--drivers/net/ethernet/cavium/thunder/thunder_bgx.c5
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c3
-rw-r--r--drivers/net/ethernet/emulex/benet/be_cmds.h5
-rw-r--r--drivers/net/ethernet/emulex/benet/be_main.c187
-rw-r--r--drivers/net/ethernet/freescale/fec.h1
-rw-r--r--drivers/net/ethernet/freescale/fec_main.c19
-rw-r--r--drivers/net/ethernet/freescale/fec_ptp.c10
-rw-r--r--drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c3
-rw-r--r--drivers/net/ethernet/freescale/fs_enet/mac-fec.c2
-rw-r--r--drivers/net/ethernet/freescale/gianfar.c104
-rw-r--r--drivers/net/ethernet/freescale/gianfar.h3
-rw-r--r--drivers/net/ethernet/freescale/gianfar_ethtool.c350
-rw-r--r--drivers/net/ethernet/marvell/mvneta.c22
-rw-r--r--drivers/net/ethernet/marvell/mvpp2.c244
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/cmd.c17
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_rx.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/eq.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/main.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/main.c2
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c2
-rw-r--r--drivers/net/ethernet/realtek/r8169.c4
-rw-r--r--drivers/net/ethernet/renesas/ravb_main.c74
-rw-r--r--drivers/net/ethernet/rocker/rocker.c1
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c4
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c4
-rw-r--r--drivers/net/ethernet/sun/niu.c4
-rw-r--r--drivers/net/ethernet/ti/cpsw.c9
-rw-r--r--drivers/net/ethernet/ti/netcp.h2
-rw-r--r--drivers/net/ethernet/ti/netcp_core.c51
-rw-r--r--drivers/net/ethernet/ti/netcp_ethss.c67
-rw-r--r--drivers/net/ethernet/ti/netcp_sgmii.c30
-rw-r--r--drivers/net/hamradio/mkiss.c7
-rw-r--r--drivers/net/ipvlan/ipvlan.h9
-rw-r--r--drivers/net/ipvlan/ipvlan_core.c6
-rw-r--r--drivers/net/ipvlan/ipvlan_main.c42
-rw-r--r--drivers/net/macvtap.c7
-rw-r--r--drivers/net/ntb_netdev.c9
-rw-r--r--drivers/net/phy/dp83867.c2
-rw-r--r--drivers/net/phy/mdio_bus.c19
-rw-r--r--drivers/net/usb/qmi_wwan.c1
-rw-r--r--drivers/net/usb/r8152.c189
-rw-r--r--drivers/net/virtio_net.c7
-rw-r--r--drivers/net/wan/cosa.c3
-rw-r--r--drivers/net/wireless/ath/ath9k/hw.c1
-rw-r--r--drivers/net/wireless/b43/tables_nphy.c2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-fh.h6
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-nvm-parse.c12
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h3
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/scan.c5
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/sta.c3
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/time-event.c2
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/tx.c2
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/drv.c5
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/internal.h51
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/rx.c414
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/trans.c74
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/tx.c15
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_sdio_ops.c8
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_usb_ops.c4
-rw-r--r--drivers/net/wireless/rtlwifi/core.c7
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8723be/sw.c1
-rw-r--r--drivers/net/xen-netback/interface.c6
-rw-r--r--drivers/net/xen-netback/netback.c68
93 files changed, 1276 insertions, 1414 deletions
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 317a49480475..a98dd4f1b0e3 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -625,6 +625,23 @@ static void bond_set_dev_addr(struct net_device *bond_dev,
625 call_netdevice_notifiers(NETDEV_CHANGEADDR, bond_dev); 625 call_netdevice_notifiers(NETDEV_CHANGEADDR, bond_dev);
626} 626}
627 627
628static struct slave *bond_get_old_active(struct bonding *bond,
629 struct slave *new_active)
630{
631 struct slave *slave;
632 struct list_head *iter;
633
634 bond_for_each_slave(bond, slave, iter) {
635 if (slave == new_active)
636 continue;
637
638 if (ether_addr_equal(bond->dev->dev_addr, slave->dev->dev_addr))
639 return slave;
640 }
641
642 return NULL;
643}
644
628/* bond_do_fail_over_mac 645/* bond_do_fail_over_mac
629 * 646 *
630 * Perform special MAC address swapping for fail_over_mac settings 647 * Perform special MAC address swapping for fail_over_mac settings
@@ -652,6 +669,9 @@ static void bond_do_fail_over_mac(struct bonding *bond,
652 if (!new_active) 669 if (!new_active)
653 return; 670 return;
654 671
672 if (!old_active)
673 old_active = bond_get_old_active(bond, new_active);
674
655 if (old_active) { 675 if (old_active) {
656 ether_addr_copy(tmp_mac, new_active->dev->dev_addr); 676 ether_addr_copy(tmp_mac, new_active->dev->dev_addr);
657 ether_addr_copy(saddr.sa_data, 677 ether_addr_copy(saddr.sa_data,
@@ -766,6 +786,7 @@ static bool bond_should_notify_peers(struct bonding *bond)
766 slave ? slave->dev->name : "NULL"); 786 slave ? slave->dev->name : "NULL");
767 787
768 if (!slave || !bond->send_peer_notif || 788 if (!slave || !bond->send_peer_notif ||
789 !netif_carrier_ok(bond->dev) ||
769 test_bit(__LINK_STATE_LINKWATCH_PENDING, &slave->dev->state)) 790 test_bit(__LINK_STATE_LINKWATCH_PENDING, &slave->dev->state))
770 return false; 791 return false;
771 792
@@ -1725,9 +1746,16 @@ err_free:
1725 1746
1726err_undo_flags: 1747err_undo_flags:
1727 /* Enslave of first slave has failed and we need to fix master's mac */ 1748 /* Enslave of first slave has failed and we need to fix master's mac */
1728 if (!bond_has_slaves(bond) && 1749 if (!bond_has_slaves(bond)) {
1729 ether_addr_equal_64bits(bond_dev->dev_addr, slave_dev->dev_addr)) 1750 if (ether_addr_equal_64bits(bond_dev->dev_addr,
1730 eth_hw_addr_random(bond_dev); 1751 slave_dev->dev_addr))
1752 eth_hw_addr_random(bond_dev);
1753 if (bond_dev->type != ARPHRD_ETHER) {
1754 ether_setup(bond_dev);
1755 bond_dev->flags |= IFF_MASTER;
1756 bond_dev->priv_flags &= ~IFF_TX_SKB_SHARING;
1757 }
1758 }
1731 1759
1732 return res; 1760 return res;
1733} 1761}
@@ -1916,6 +1944,7 @@ static int bond_release_and_destroy(struct net_device *bond_dev,
1916 bond_dev->priv_flags |= IFF_DISABLE_NETPOLL; 1944 bond_dev->priv_flags |= IFF_DISABLE_NETPOLL;
1917 netdev_info(bond_dev, "Destroying bond %s\n", 1945 netdev_info(bond_dev, "Destroying bond %s\n",
1918 bond_dev->name); 1946 bond_dev->name);
1947 bond_remove_proc_entry(bond);
1919 unregister_netdevice(bond_dev); 1948 unregister_netdevice(bond_dev);
1920 } 1949 }
1921 return ret; 1950 return ret;
diff --git a/drivers/net/can/at91_can.c b/drivers/net/can/at91_can.c
index f4e40aa4d2a2..945c0955a967 100644
--- a/drivers/net/can/at91_can.c
+++ b/drivers/net/can/at91_can.c
@@ -577,10 +577,10 @@ static void at91_rx_overflow_err(struct net_device *dev)
577 577
578 cf->can_id |= CAN_ERR_CRTL; 578 cf->can_id |= CAN_ERR_CRTL;
579 cf->data[1] = CAN_ERR_CRTL_RX_OVERFLOW; 579 cf->data[1] = CAN_ERR_CRTL_RX_OVERFLOW;
580 netif_receive_skb(skb);
581 580
582 stats->rx_packets++; 581 stats->rx_packets++;
583 stats->rx_bytes += cf->can_dlc; 582 stats->rx_bytes += cf->can_dlc;
583 netif_receive_skb(skb);
584} 584}
585 585
586/** 586/**
@@ -642,10 +642,10 @@ static void at91_read_msg(struct net_device *dev, unsigned int mb)
642 } 642 }
643 643
644 at91_read_mb(dev, mb, cf); 644 at91_read_mb(dev, mb, cf);
645 netif_receive_skb(skb);
646 645
647 stats->rx_packets++; 646 stats->rx_packets++;
648 stats->rx_bytes += cf->can_dlc; 647 stats->rx_bytes += cf->can_dlc;
648 netif_receive_skb(skb);
649 649
650 can_led_event(dev, CAN_LED_EVENT_RX); 650 can_led_event(dev, CAN_LED_EVENT_RX);
651} 651}
@@ -802,10 +802,10 @@ static int at91_poll_err(struct net_device *dev, int quota, u32 reg_sr)
802 return 0; 802 return 0;
803 803
804 at91_poll_err_frame(dev, cf, reg_sr); 804 at91_poll_err_frame(dev, cf, reg_sr);
805 netif_receive_skb(skb);
806 805
807 dev->stats.rx_packets++; 806 dev->stats.rx_packets++;
808 dev->stats.rx_bytes += cf->can_dlc; 807 dev->stats.rx_bytes += cf->can_dlc;
808 netif_receive_skb(skb);
809 809
810 return 1; 810 return 1;
811} 811}
@@ -1067,10 +1067,10 @@ static void at91_irq_err(struct net_device *dev)
1067 return; 1067 return;
1068 1068
1069 at91_irq_err_state(dev, cf, new_state); 1069 at91_irq_err_state(dev, cf, new_state);
1070 netif_rx(skb);
1071 1070
1072 dev->stats.rx_packets++; 1071 dev->stats.rx_packets++;
1073 dev->stats.rx_bytes += cf->can_dlc; 1072 dev->stats.rx_bytes += cf->can_dlc;
1073 netif_rx(skb);
1074 1074
1075 priv->can.state = new_state; 1075 priv->can.state = new_state;
1076} 1076}
diff --git a/drivers/net/can/bfin_can.c b/drivers/net/can/bfin_can.c
index 27ad312e7abf..57dadd52b428 100644
--- a/drivers/net/can/bfin_can.c
+++ b/drivers/net/can/bfin_can.c
@@ -424,10 +424,9 @@ static void bfin_can_rx(struct net_device *dev, u16 isrc)
424 cf->data[6 - i] = (6 - i) < cf->can_dlc ? (val >> 8) : 0; 424 cf->data[6 - i] = (6 - i) < cf->can_dlc ? (val >> 8) : 0;
425 } 425 }
426 426
427 netif_rx(skb);
428
429 stats->rx_packets++; 427 stats->rx_packets++;
430 stats->rx_bytes += cf->can_dlc; 428 stats->rx_bytes += cf->can_dlc;
429 netif_rx(skb);
431} 430}
432 431
433static int bfin_can_err(struct net_device *dev, u16 isrc, u16 status) 432static int bfin_can_err(struct net_device *dev, u16 isrc, u16 status)
@@ -508,10 +507,9 @@ static int bfin_can_err(struct net_device *dev, u16 isrc, u16 status)
508 507
509 priv->can.state = state; 508 priv->can.state = state;
510 509
511 netif_rx(skb);
512
513 stats->rx_packets++; 510 stats->rx_packets++;
514 stats->rx_bytes += cf->can_dlc; 511 stats->rx_bytes += cf->can_dlc;
512 netif_rx(skb);
515 513
516 return 0; 514 return 0;
517} 515}
diff --git a/drivers/net/can/cc770/cc770.c b/drivers/net/can/cc770/cc770.c
index c11d44984036..70a8cbb29e75 100644
--- a/drivers/net/can/cc770/cc770.c
+++ b/drivers/net/can/cc770/cc770.c
@@ -504,10 +504,10 @@ static void cc770_rx(struct net_device *dev, unsigned int mo, u8 ctrl1)
504 for (i = 0; i < cf->can_dlc; i++) 504 for (i = 0; i < cf->can_dlc; i++)
505 cf->data[i] = cc770_read_reg(priv, msgobj[mo].data[i]); 505 cf->data[i] = cc770_read_reg(priv, msgobj[mo].data[i]);
506 } 506 }
507 netif_rx(skb);
508 507
509 stats->rx_packets++; 508 stats->rx_packets++;
510 stats->rx_bytes += cf->can_dlc; 509 stats->rx_bytes += cf->can_dlc;
510 netif_rx(skb);
511} 511}
512 512
513static int cc770_err(struct net_device *dev, u8 status) 513static int cc770_err(struct net_device *dev, u8 status)
@@ -584,10 +584,10 @@ static int cc770_err(struct net_device *dev, u8 status)
584 } 584 }
585 } 585 }
586 586
587 netif_rx(skb);
588 587
589 stats->rx_packets++; 588 stats->rx_packets++;
590 stats->rx_bytes += cf->can_dlc; 589 stats->rx_bytes += cf->can_dlc;
590 netif_rx(skb);
591 591
592 return 0; 592 return 0;
593} 593}
diff --git a/drivers/net/can/flexcan.c b/drivers/net/can/flexcan.c
index 6201c5a1a884..b1e8d729851c 100644
--- a/drivers/net/can/flexcan.c
+++ b/drivers/net/can/flexcan.c
@@ -577,10 +577,10 @@ static int flexcan_poll_bus_err(struct net_device *dev, u32 reg_esr)
577 return 0; 577 return 0;
578 578
579 do_bus_err(dev, cf, reg_esr); 579 do_bus_err(dev, cf, reg_esr);
580 netif_receive_skb(skb);
581 580
582 dev->stats.rx_packets++; 581 dev->stats.rx_packets++;
583 dev->stats.rx_bytes += cf->can_dlc; 582 dev->stats.rx_bytes += cf->can_dlc;
583 netif_receive_skb(skb);
584 584
585 return 1; 585 return 1;
586} 586}
@@ -622,10 +622,9 @@ static int flexcan_poll_state(struct net_device *dev, u32 reg_esr)
622 if (unlikely(new_state == CAN_STATE_BUS_OFF)) 622 if (unlikely(new_state == CAN_STATE_BUS_OFF))
623 can_bus_off(dev); 623 can_bus_off(dev);
624 624
625 netif_receive_skb(skb);
626
627 dev->stats.rx_packets++; 625 dev->stats.rx_packets++;
628 dev->stats.rx_bytes += cf->can_dlc; 626 dev->stats.rx_bytes += cf->can_dlc;
627 netif_receive_skb(skb);
629 628
630 return 1; 629 return 1;
631} 630}
@@ -670,10 +669,10 @@ static int flexcan_read_frame(struct net_device *dev)
670 } 669 }
671 670
672 flexcan_read_fifo(dev, cf); 671 flexcan_read_fifo(dev, cf);
673 netif_receive_skb(skb);
674 672
675 stats->rx_packets++; 673 stats->rx_packets++;
676 stats->rx_bytes += cf->can_dlc; 674 stats->rx_bytes += cf->can_dlc;
675 netif_receive_skb(skb);
677 676
678 can_led_event(dev, CAN_LED_EVENT_RX); 677 can_led_event(dev, CAN_LED_EVENT_RX);
679 678
diff --git a/drivers/net/can/grcan.c b/drivers/net/can/grcan.c
index e3d7e22a4fa0..db9538d4b358 100644
--- a/drivers/net/can/grcan.c
+++ b/drivers/net/can/grcan.c
@@ -1216,11 +1216,12 @@ static int grcan_receive(struct net_device *dev, int budget)
1216 cf->data[i] = (u8)(slot[j] >> shift); 1216 cf->data[i] = (u8)(slot[j] >> shift);
1217 } 1217 }
1218 } 1218 }
1219 netif_receive_skb(skb);
1220 1219
1221 /* Update statistics and read pointer */ 1220 /* Update statistics and read pointer */
1222 stats->rx_packets++; 1221 stats->rx_packets++;
1223 stats->rx_bytes += cf->can_dlc; 1222 stats->rx_bytes += cf->can_dlc;
1223 netif_receive_skb(skb);
1224
1224 rd = grcan_ring_add(rd, GRCAN_MSG_SIZE, dma->rx.size); 1225 rd = grcan_ring_add(rd, GRCAN_MSG_SIZE, dma->rx.size);
1225 } 1226 }
1226 1227
diff --git a/drivers/net/can/sja1000/sja1000.c b/drivers/net/can/sja1000/sja1000.c
index 32bd7f451aa4..7b92e911a616 100644
--- a/drivers/net/can/sja1000/sja1000.c
+++ b/drivers/net/can/sja1000/sja1000.c
@@ -377,10 +377,9 @@ static void sja1000_rx(struct net_device *dev)
377 /* release receive buffer */ 377 /* release receive buffer */
378 sja1000_write_cmdreg(priv, CMD_RRB); 378 sja1000_write_cmdreg(priv, CMD_RRB);
379 379
380 netif_rx(skb);
381
382 stats->rx_packets++; 380 stats->rx_packets++;
383 stats->rx_bytes += cf->can_dlc; 381 stats->rx_bytes += cf->can_dlc;
382 netif_rx(skb);
384 383
385 can_led_event(dev, CAN_LED_EVENT_RX); 384 can_led_event(dev, CAN_LED_EVENT_RX);
386} 385}
@@ -484,10 +483,9 @@ static int sja1000_err(struct net_device *dev, uint8_t isrc, uint8_t status)
484 can_bus_off(dev); 483 can_bus_off(dev);
485 } 484 }
486 485
487 netif_rx(skb);
488
489 stats->rx_packets++; 486 stats->rx_packets++;
490 stats->rx_bytes += cf->can_dlc; 487 stats->rx_bytes += cf->can_dlc;
488 netif_rx(skb);
491 489
492 return 0; 490 return 0;
493} 491}
diff --git a/drivers/net/can/slcan.c b/drivers/net/can/slcan.c
index a23a7af8eb9a..9a3f15cb7ef4 100644
--- a/drivers/net/can/slcan.c
+++ b/drivers/net/can/slcan.c
@@ -218,10 +218,10 @@ static void slc_bump(struct slcan *sl)
218 218
219 memcpy(skb_put(skb, sizeof(struct can_frame)), 219 memcpy(skb_put(skb, sizeof(struct can_frame)),
220 &cf, sizeof(struct can_frame)); 220 &cf, sizeof(struct can_frame));
221 netif_rx_ni(skb);
222 221
223 sl->dev->stats.rx_packets++; 222 sl->dev->stats.rx_packets++;
224 sl->dev->stats.rx_bytes += cf.can_dlc; 223 sl->dev->stats.rx_bytes += cf.can_dlc;
224 netif_rx_ni(skb);
225} 225}
226 226
227/* parse tty input stream */ 227/* parse tty input stream */
diff --git a/drivers/net/can/spi/mcp251x.c b/drivers/net/can/spi/mcp251x.c
index c1a95a34d62e..b7e83c212023 100644
--- a/drivers/net/can/spi/mcp251x.c
+++ b/drivers/net/can/spi/mcp251x.c
@@ -1086,8 +1086,8 @@ static int mcp251x_can_probe(struct spi_device *spi)
1086 if (ret) 1086 if (ret)
1087 goto out_clk; 1087 goto out_clk;
1088 1088
1089 priv->power = devm_regulator_get(&spi->dev, "vdd"); 1089 priv->power = devm_regulator_get_optional(&spi->dev, "vdd");
1090 priv->transceiver = devm_regulator_get(&spi->dev, "xceiver"); 1090 priv->transceiver = devm_regulator_get_optional(&spi->dev, "xceiver");
1091 if ((PTR_ERR(priv->power) == -EPROBE_DEFER) || 1091 if ((PTR_ERR(priv->power) == -EPROBE_DEFER) ||
1092 (PTR_ERR(priv->transceiver) == -EPROBE_DEFER)) { 1092 (PTR_ERR(priv->transceiver) == -EPROBE_DEFER)) {
1093 ret = -EPROBE_DEFER; 1093 ret = -EPROBE_DEFER;
@@ -1222,17 +1222,16 @@ static int __maybe_unused mcp251x_can_resume(struct device *dev)
1222 struct spi_device *spi = to_spi_device(dev); 1222 struct spi_device *spi = to_spi_device(dev);
1223 struct mcp251x_priv *priv = spi_get_drvdata(spi); 1223 struct mcp251x_priv *priv = spi_get_drvdata(spi);
1224 1224
1225 if (priv->after_suspend & AFTER_SUSPEND_POWER) { 1225 if (priv->after_suspend & AFTER_SUSPEND_POWER)
1226 mcp251x_power_enable(priv->power, 1); 1226 mcp251x_power_enable(priv->power, 1);
1227
1228 if (priv->after_suspend & AFTER_SUSPEND_UP) {
1229 mcp251x_power_enable(priv->transceiver, 1);
1227 queue_work(priv->wq, &priv->restart_work); 1230 queue_work(priv->wq, &priv->restart_work);
1228 } else { 1231 } else {
1229 if (priv->after_suspend & AFTER_SUSPEND_UP) { 1232 priv->after_suspend = 0;
1230 mcp251x_power_enable(priv->transceiver, 1);
1231 queue_work(priv->wq, &priv->restart_work);
1232 } else {
1233 priv->after_suspend = 0;
1234 }
1235 } 1233 }
1234
1236 priv->force_quit = 0; 1235 priv->force_quit = 0;
1237 enable_irq(spi->irq); 1236 enable_irq(spi->irq);
1238 return 0; 1237 return 0;
diff --git a/drivers/net/can/ti_hecc.c b/drivers/net/can/ti_hecc.c
index e95a9e1a889f..cf345cbfe819 100644
--- a/drivers/net/can/ti_hecc.c
+++ b/drivers/net/can/ti_hecc.c
@@ -747,9 +747,9 @@ static int ti_hecc_error(struct net_device *ndev, int int_status,
747 } 747 }
748 } 748 }
749 749
750 netif_rx(skb);
751 stats->rx_packets++; 750 stats->rx_packets++;
752 stats->rx_bytes += cf->can_dlc; 751 stats->rx_bytes += cf->can_dlc;
752 netif_rx(skb);
753 753
754 return 0; 754 return 0;
755} 755}
diff --git a/drivers/net/can/usb/ems_usb.c b/drivers/net/can/usb/ems_usb.c
index 866bac0ae7e9..2d390384ef3b 100644
--- a/drivers/net/can/usb/ems_usb.c
+++ b/drivers/net/can/usb/ems_usb.c
@@ -324,10 +324,9 @@ static void ems_usb_rx_can_msg(struct ems_usb *dev, struct ems_cpc_msg *msg)
324 cf->data[i] = msg->msg.can_msg.msg[i]; 324 cf->data[i] = msg->msg.can_msg.msg[i];
325 } 325 }
326 326
327 netif_rx(skb);
328
329 stats->rx_packets++; 327 stats->rx_packets++;
330 stats->rx_bytes += cf->can_dlc; 328 stats->rx_bytes += cf->can_dlc;
329 netif_rx(skb);
331} 330}
332 331
333static void ems_usb_rx_err(struct ems_usb *dev, struct ems_cpc_msg *msg) 332static void ems_usb_rx_err(struct ems_usb *dev, struct ems_cpc_msg *msg)
@@ -400,10 +399,9 @@ static void ems_usb_rx_err(struct ems_usb *dev, struct ems_cpc_msg *msg)
400 stats->rx_errors++; 399 stats->rx_errors++;
401 } 400 }
402 401
403 netif_rx(skb);
404
405 stats->rx_packets++; 402 stats->rx_packets++;
406 stats->rx_bytes += cf->can_dlc; 403 stats->rx_bytes += cf->can_dlc;
404 netif_rx(skb);
407} 405}
408 406
409/* 407/*
diff --git a/drivers/net/can/usb/esd_usb2.c b/drivers/net/can/usb/esd_usb2.c
index 411c1af92c62..0e5a4493ba4f 100644
--- a/drivers/net/can/usb/esd_usb2.c
+++ b/drivers/net/can/usb/esd_usb2.c
@@ -301,13 +301,12 @@ static void esd_usb2_rx_event(struct esd_usb2_net_priv *priv,
301 cf->data[7] = rxerr; 301 cf->data[7] = rxerr;
302 } 302 }
303 303
304 netif_rx(skb);
305
306 priv->bec.txerr = txerr; 304 priv->bec.txerr = txerr;
307 priv->bec.rxerr = rxerr; 305 priv->bec.rxerr = rxerr;
308 306
309 stats->rx_packets++; 307 stats->rx_packets++;
310 stats->rx_bytes += cf->can_dlc; 308 stats->rx_bytes += cf->can_dlc;
309 netif_rx(skb);
311 } 310 }
312} 311}
313 312
@@ -347,10 +346,9 @@ static void esd_usb2_rx_can_msg(struct esd_usb2_net_priv *priv,
347 cf->data[i] = msg->msg.rx.data[i]; 346 cf->data[i] = msg->msg.rx.data[i];
348 } 347 }
349 348
350 netif_rx(skb);
351
352 stats->rx_packets++; 349 stats->rx_packets++;
353 stats->rx_bytes += cf->can_dlc; 350 stats->rx_bytes += cf->can_dlc;
351 netif_rx(skb);
354 } 352 }
355 353
356 return; 354 return;
diff --git a/drivers/net/can/usb/peak_usb/pcan_usb.c b/drivers/net/can/usb/peak_usb/pcan_usb.c
index 72427f21edff..6b94007ae052 100644
--- a/drivers/net/can/usb/peak_usb/pcan_usb.c
+++ b/drivers/net/can/usb/peak_usb/pcan_usb.c
@@ -526,9 +526,9 @@ static int pcan_usb_decode_error(struct pcan_usb_msg_context *mc, u8 n,
526 hwts->hwtstamp = timeval_to_ktime(tv); 526 hwts->hwtstamp = timeval_to_ktime(tv);
527 } 527 }
528 528
529 netif_rx(skb);
530 mc->netdev->stats.rx_packets++; 529 mc->netdev->stats.rx_packets++;
531 mc->netdev->stats.rx_bytes += cf->can_dlc; 530 mc->netdev->stats.rx_bytes += cf->can_dlc;
531 netif_rx(skb);
532 532
533 return 0; 533 return 0;
534} 534}
@@ -659,12 +659,11 @@ static int pcan_usb_decode_data(struct pcan_usb_msg_context *mc, u8 status_len)
659 hwts = skb_hwtstamps(skb); 659 hwts = skb_hwtstamps(skb);
660 hwts->hwtstamp = timeval_to_ktime(tv); 660 hwts->hwtstamp = timeval_to_ktime(tv);
661 661
662 /* push the skb */
663 netif_rx(skb);
664
665 /* update statistics */ 662 /* update statistics */
666 mc->netdev->stats.rx_packets++; 663 mc->netdev->stats.rx_packets++;
667 mc->netdev->stats.rx_bytes += cf->can_dlc; 664 mc->netdev->stats.rx_bytes += cf->can_dlc;
665 /* push the skb */
666 netif_rx(skb);
668 667
669 return 0; 668 return 0;
670 669
diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_pro.c b/drivers/net/can/usb/peak_usb/pcan_usb_pro.c
index dec51717635e..7d61b3279798 100644
--- a/drivers/net/can/usb/peak_usb/pcan_usb_pro.c
+++ b/drivers/net/can/usb/peak_usb/pcan_usb_pro.c
@@ -553,9 +553,9 @@ static int pcan_usb_pro_handle_canmsg(struct pcan_usb_pro_interface *usb_if,
553 hwts = skb_hwtstamps(skb); 553 hwts = skb_hwtstamps(skb);
554 hwts->hwtstamp = timeval_to_ktime(tv); 554 hwts->hwtstamp = timeval_to_ktime(tv);
555 555
556 netif_rx(skb);
557 netdev->stats.rx_packets++; 556 netdev->stats.rx_packets++;
558 netdev->stats.rx_bytes += can_frame->can_dlc; 557 netdev->stats.rx_bytes += can_frame->can_dlc;
558 netif_rx(skb);
559 559
560 return 0; 560 return 0;
561} 561}
@@ -670,9 +670,9 @@ static int pcan_usb_pro_handle_error(struct pcan_usb_pro_interface *usb_if,
670 peak_usb_get_ts_tv(&usb_if->time_ref, le32_to_cpu(er->ts32), &tv); 670 peak_usb_get_ts_tv(&usb_if->time_ref, le32_to_cpu(er->ts32), &tv);
671 hwts = skb_hwtstamps(skb); 671 hwts = skb_hwtstamps(skb);
672 hwts->hwtstamp = timeval_to_ktime(tv); 672 hwts->hwtstamp = timeval_to_ktime(tv);
673 netif_rx(skb);
674 netdev->stats.rx_packets++; 673 netdev->stats.rx_packets++;
675 netdev->stats.rx_bytes += can_frame->can_dlc; 674 netdev->stats.rx_bytes += can_frame->can_dlc;
675 netif_rx(skb);
676 676
677 return 0; 677 return 0;
678} 678}
diff --git a/drivers/net/can/usb/usb_8dev.c b/drivers/net/can/usb/usb_8dev.c
index dd52c7a4c80d..de95b1ccba3e 100644
--- a/drivers/net/can/usb/usb_8dev.c
+++ b/drivers/net/can/usb/usb_8dev.c
@@ -461,10 +461,9 @@ static void usb_8dev_rx_err_msg(struct usb_8dev_priv *priv,
461 priv->bec.txerr = txerr; 461 priv->bec.txerr = txerr;
462 priv->bec.rxerr = rxerr; 462 priv->bec.rxerr = rxerr;
463 463
464 netif_rx(skb);
465
466 stats->rx_packets++; 464 stats->rx_packets++;
467 stats->rx_bytes += cf->can_dlc; 465 stats->rx_bytes += cf->can_dlc;
466 netif_rx(skb);
468} 467}
469 468
470/* Read data and status frames */ 469/* Read data and status frames */
@@ -494,10 +493,9 @@ static void usb_8dev_rx_can_msg(struct usb_8dev_priv *priv,
494 else 493 else
495 memcpy(cf->data, msg->data, cf->can_dlc); 494 memcpy(cf->data, msg->data, cf->can_dlc);
496 495
497 netif_rx(skb);
498
499 stats->rx_packets++; 496 stats->rx_packets++;
500 stats->rx_bytes += cf->can_dlc; 497 stats->rx_bytes += cf->can_dlc;
498 netif_rx(skb);
501 499
502 can_led_event(priv->netdev, CAN_LED_EVENT_RX); 500 can_led_event(priv->netdev, CAN_LED_EVENT_RX);
503 } else { 501 } else {
diff --git a/drivers/net/dsa/bcm_sf2.c b/drivers/net/dsa/bcm_sf2.c
index 972982f8bea7..079897b3a955 100644
--- a/drivers/net/dsa/bcm_sf2.c
+++ b/drivers/net/dsa/bcm_sf2.c
@@ -696,9 +696,20 @@ static int bcm_sf2_sw_setup(struct dsa_switch *ds)
696 } 696 }
697 697
698 /* Include the pseudo-PHY address and the broadcast PHY address to 698 /* Include the pseudo-PHY address and the broadcast PHY address to
699 * divert reads towards our workaround 699 * divert reads towards our workaround. This is only required for
700 * 7445D0, since 7445E0 disconnects the internal switch pseudo-PHY such
701 * that we can use the regular SWITCH_MDIO master controller instead.
702 *
703 * By default, DSA initializes ds->phys_mii_mask to ds->phys_port_mask
704 * to have a 1:1 mapping between Port address and PHY address in order
705 * to utilize the slave_mii_bus instance to read from Port PHYs. This is
706 * not what we want here, so we initialize phys_mii_mask 0 to always
707 * utilize the "master" MDIO bus backed by the "mdio-unimac" driver.
700 */ 708 */
701 ds->phys_mii_mask |= ((1 << BRCM_PSEUDO_PHY_ADDR) | (1 << 0)); 709 if (of_machine_is_compatible("brcm,bcm7445d0"))
710 ds->phys_mii_mask |= ((1 << BRCM_PSEUDO_PHY_ADDR) | (1 << 0));
711 else
712 ds->phys_mii_mask = 0;
702 713
703 rev = reg_readl(priv, REG_SWITCH_REVISION); 714 rev = reg_readl(priv, REG_SWITCH_REVISION);
704 priv->hw_params.top_rev = (rev >> SWITCH_TOP_REV_SHIFT) & 715 priv->hw_params.top_rev = (rev >> SWITCH_TOP_REV_SHIFT) &
diff --git a/drivers/net/dsa/mv88e6xxx.c b/drivers/net/dsa/mv88e6xxx.c
index fd8547c2b79d..561342466076 100644
--- a/drivers/net/dsa/mv88e6xxx.c
+++ b/drivers/net/dsa/mv88e6xxx.c
@@ -1163,7 +1163,7 @@ int mv88e6xxx_leave_bridge(struct dsa_switch *ds, int port, u32 br_port_mask)
1163 1163
1164 newfid = __ffs(ps->fid_mask); 1164 newfid = __ffs(ps->fid_mask);
1165 ps->fid[port] = newfid; 1165 ps->fid[port] = newfid;
1166 ps->fid_mask &= (1 << newfid); 1166 ps->fid_mask &= ~(1 << newfid);
1167 ps->bridge_mask[fid] &= ~(1 << port); 1167 ps->bridge_mask[fid] &= ~(1 << port);
1168 ps->bridge_mask[newfid] = 1 << port; 1168 ps->bridge_mask[newfid] = 1 << port;
1169 1169
diff --git a/drivers/net/ethernet/3com/3c59x.c b/drivers/net/ethernet/3com/3c59x.c
index 2d1ce3c5d0dd..753887d02b46 100644
--- a/drivers/net/ethernet/3com/3c59x.c
+++ b/drivers/net/ethernet/3com/3c59x.c
@@ -1763,16 +1763,9 @@ vortex_open(struct net_device *dev)
1763 vp->rx_ring[i].addr = cpu_to_le32(pci_map_single(VORTEX_PCI(vp), skb->data, PKT_BUF_SZ, PCI_DMA_FROMDEVICE)); 1763 vp->rx_ring[i].addr = cpu_to_le32(pci_map_single(VORTEX_PCI(vp), skb->data, PKT_BUF_SZ, PCI_DMA_FROMDEVICE));
1764 } 1764 }
1765 if (i != RX_RING_SIZE) { 1765 if (i != RX_RING_SIZE) {
1766 int j;
1767 pr_emerg("%s: no memory for rx ring\n", dev->name); 1766 pr_emerg("%s: no memory for rx ring\n", dev->name);
1768 for (j = 0; j < i; j++) {
1769 if (vp->rx_skbuff[j]) {
1770 dev_kfree_skb(vp->rx_skbuff[j]);
1771 vp->rx_skbuff[j] = NULL;
1772 }
1773 }
1774 retval = -ENOMEM; 1767 retval = -ENOMEM;
1775 goto err_free_irq; 1768 goto err_free_skb;
1776 } 1769 }
1777 /* Wrap the ring. */ 1770 /* Wrap the ring. */
1778 vp->rx_ring[i-1].next = cpu_to_le32(vp->rx_ring_dma); 1771 vp->rx_ring[i-1].next = cpu_to_le32(vp->rx_ring_dma);
@@ -1782,7 +1775,13 @@ vortex_open(struct net_device *dev)
1782 if (!retval) 1775 if (!retval)
1783 goto out; 1776 goto out;
1784 1777
1785err_free_irq: 1778err_free_skb:
1779 for (i = 0; i < RX_RING_SIZE; i++) {
1780 if (vp->rx_skbuff[i]) {
1781 dev_kfree_skb(vp->rx_skbuff[i]);
1782 vp->rx_skbuff[i] = NULL;
1783 }
1784 }
1786 free_irq(dev->irq, dev); 1785 free_irq(dev->irq, dev);
1787err: 1786err:
1788 if (vortex_debug > 1) 1787 if (vortex_debug > 1)
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
index a90d7364334f..f7fbdc9d1325 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
@@ -262,9 +262,9 @@ static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata,
262 if (likely(skb)) { 262 if (likely(skb)) {
263 (*pkts_compl)++; 263 (*pkts_compl)++;
264 (*bytes_compl) += skb->len; 264 (*bytes_compl) += skb->len;
265 dev_kfree_skb_any(skb);
265 } 266 }
266 267
267 dev_kfree_skb_any(skb);
268 tx_buf->first_bd = 0; 268 tx_buf->first_bd = 0;
269 tx_buf->skb = NULL; 269 tx_buf->skb = NULL;
270 270
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
index 76b9052a961c..5907c821d131 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
@@ -1718,6 +1718,22 @@ static int bnx2x_nvram_write(struct bnx2x *bp, u32 offset, u8 *data_buf,
1718 offset += sizeof(u32); 1718 offset += sizeof(u32);
1719 data_buf += sizeof(u32); 1719 data_buf += sizeof(u32);
1720 written_so_far += sizeof(u32); 1720 written_so_far += sizeof(u32);
1721
1722 /* At end of each 4Kb page, release nvram lock to allow MFW
1723 * chance to take it for its own use.
1724 */
1725 if ((cmd_flags & MCPR_NVM_COMMAND_LAST) &&
1726 (written_so_far < buf_size)) {
1727 DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM,
1728 "Releasing NVM lock after offset 0x%x\n",
1729 (u32)(offset - sizeof(u32)));
1730 bnx2x_release_nvram_lock(bp);
1731 usleep_range(1000, 2000);
1732 rc = bnx2x_acquire_nvram_lock(bp);
1733 if (rc)
1734 return rc;
1735 }
1736
1721 cmd_flags = 0; 1737 cmd_flags = 0;
1722 } 1738 }
1723 1739
diff --git a/drivers/net/ethernet/brocade/bna/bnad.c b/drivers/net/ethernet/brocade/bna/bnad.c
index 0612b19f6313..506047c38607 100644
--- a/drivers/net/ethernet/brocade/bna/bnad.c
+++ b/drivers/net/ethernet/brocade/bna/bnad.c
@@ -676,6 +676,7 @@ bnad_cq_process(struct bnad *bnad, struct bna_ccb *ccb, int budget)
676 if (!next_cmpl->valid) 676 if (!next_cmpl->valid)
677 break; 677 break;
678 } 678 }
679 packets++;
679 680
680 /* TODO: BNA_CQ_EF_LOCAL ? */ 681 /* TODO: BNA_CQ_EF_LOCAL ? */
681 if (unlikely(flags & (BNA_CQ_EF_MAC_ERROR | 682 if (unlikely(flags & (BNA_CQ_EF_MAC_ERROR |
@@ -692,7 +693,6 @@ bnad_cq_process(struct bnad *bnad, struct bna_ccb *ccb, int budget)
692 else 693 else
693 bnad_cq_setup_skb_frags(rcb, skb, sop_ci, nvecs, len); 694 bnad_cq_setup_skb_frags(rcb, skb, sop_ci, nvecs, len);
694 695
695 packets++;
696 rcb->rxq->rx_packets++; 696 rcb->rxq->rx_packets++;
697 rcb->rxq->rx_bytes += totlen; 697 rcb->rxq->rx_bytes += totlen;
698 ccb->bytes_per_intr += totlen; 698 ccb->bytes_per_intr += totlen;
diff --git a/drivers/net/ethernet/cadence/macb.c b/drivers/net/ethernet/cadence/macb.c
index caeb39561567..bf9eb2ecf960 100644
--- a/drivers/net/ethernet/cadence/macb.c
+++ b/drivers/net/ethernet/cadence/macb.c
@@ -104,6 +104,57 @@ static void *macb_rx_buffer(struct macb *bp, unsigned int index)
104 return bp->rx_buffers + bp->rx_buffer_size * macb_rx_ring_wrap(index); 104 return bp->rx_buffers + bp->rx_buffer_size * macb_rx_ring_wrap(index);
105} 105}
106 106
107/* I/O accessors */
108static u32 hw_readl_native(struct macb *bp, int offset)
109{
110 return __raw_readl(bp->regs + offset);
111}
112
113static void hw_writel_native(struct macb *bp, int offset, u32 value)
114{
115 __raw_writel(value, bp->regs + offset);
116}
117
118static u32 hw_readl(struct macb *bp, int offset)
119{
120 return readl_relaxed(bp->regs + offset);
121}
122
123static void hw_writel(struct macb *bp, int offset, u32 value)
124{
125 writel_relaxed(value, bp->regs + offset);
126}
127
128/*
129 * Find the CPU endianness by using the loopback bit of NCR register. When the
130 * CPU is in big endian we need to program swaped mode for management
131 * descriptor access.
132 */
133static bool hw_is_native_io(void __iomem *addr)
134{
135 u32 value = MACB_BIT(LLB);
136
137 __raw_writel(value, addr + MACB_NCR);
138 value = __raw_readl(addr + MACB_NCR);
139
140 /* Write 0 back to disable everything */
141 __raw_writel(0, addr + MACB_NCR);
142
143 return value == MACB_BIT(LLB);
144}
145
146static bool hw_is_gem(void __iomem *addr, bool native_io)
147{
148 u32 id;
149
150 if (native_io)
151 id = __raw_readl(addr + MACB_MID);
152 else
153 id = readl_relaxed(addr + MACB_MID);
154
155 return MACB_BFEXT(IDNUM, id) >= 0x2;
156}
157
107static void macb_set_hwaddr(struct macb *bp) 158static void macb_set_hwaddr(struct macb *bp)
108{ 159{
109 u32 bottom; 160 u32 bottom;
@@ -160,7 +211,7 @@ static void macb_get_hwaddr(struct macb *bp)
160 } 211 }
161 } 212 }
162 213
163 netdev_info(bp->dev, "invalid hw address, using random\n"); 214 dev_info(&bp->pdev->dev, "invalid hw address, using random\n");
164 eth_hw_addr_random(bp->dev); 215 eth_hw_addr_random(bp->dev);
165} 216}
166 217
@@ -252,7 +303,6 @@ static void macb_handle_link_change(struct net_device *dev)
252 struct macb *bp = netdev_priv(dev); 303 struct macb *bp = netdev_priv(dev);
253 struct phy_device *phydev = bp->phy_dev; 304 struct phy_device *phydev = bp->phy_dev;
254 unsigned long flags; 305 unsigned long flags;
255
256 int status_change = 0; 306 int status_change = 0;
257 307
258 spin_lock_irqsave(&bp->lock, flags); 308 spin_lock_irqsave(&bp->lock, flags);
@@ -449,14 +499,14 @@ err_out:
449 499
450static void macb_update_stats(struct macb *bp) 500static void macb_update_stats(struct macb *bp)
451{ 501{
452 u32 __iomem *reg = bp->regs + MACB_PFR;
453 u32 *p = &bp->hw_stats.macb.rx_pause_frames; 502 u32 *p = &bp->hw_stats.macb.rx_pause_frames;
454 u32 *end = &bp->hw_stats.macb.tx_pause_frames + 1; 503 u32 *end = &bp->hw_stats.macb.tx_pause_frames + 1;
504 int offset = MACB_PFR;
455 505
456 WARN_ON((unsigned long)(end - p - 1) != (MACB_TPF - MACB_PFR) / 4); 506 WARN_ON((unsigned long)(end - p - 1) != (MACB_TPF - MACB_PFR) / 4);
457 507
458 for(; p < end; p++, reg++) 508 for(; p < end; p++, offset += 4)
459 *p += readl_relaxed(reg); 509 *p += bp->macb_reg_readl(bp, offset);
460} 510}
461 511
462static int macb_halt_tx(struct macb *bp) 512static int macb_halt_tx(struct macb *bp)
@@ -1107,12 +1157,6 @@ static void macb_poll_controller(struct net_device *dev)
1107} 1157}
1108#endif 1158#endif
1109 1159
1110static inline unsigned int macb_count_tx_descriptors(struct macb *bp,
1111 unsigned int len)
1112{
1113 return (len + bp->max_tx_length - 1) / bp->max_tx_length;
1114}
1115
1116static unsigned int macb_tx_map(struct macb *bp, 1160static unsigned int macb_tx_map(struct macb *bp,
1117 struct macb_queue *queue, 1161 struct macb_queue *queue,
1118 struct sk_buff *skb) 1162 struct sk_buff *skb)
@@ -1263,11 +1307,11 @@ static int macb_start_xmit(struct sk_buff *skb, struct net_device *dev)
1263 * socket buffer: skb fragments of jumbo frames may need to be 1307 * socket buffer: skb fragments of jumbo frames may need to be
1264 * splitted into many buffer descriptors. 1308 * splitted into many buffer descriptors.
1265 */ 1309 */
1266 count = macb_count_tx_descriptors(bp, skb_headlen(skb)); 1310 count = DIV_ROUND_UP(skb_headlen(skb), bp->max_tx_length);
1267 nr_frags = skb_shinfo(skb)->nr_frags; 1311 nr_frags = skb_shinfo(skb)->nr_frags;
1268 for (f = 0; f < nr_frags; f++) { 1312 for (f = 0; f < nr_frags; f++) {
1269 frag_size = skb_frag_size(&skb_shinfo(skb)->frags[f]); 1313 frag_size = skb_frag_size(&skb_shinfo(skb)->frags[f]);
1270 count += macb_count_tx_descriptors(bp, frag_size); 1314 count += DIV_ROUND_UP(frag_size, bp->max_tx_length);
1271 } 1315 }
1272 1316
1273 spin_lock_irqsave(&bp->lock, flags); 1317 spin_lock_irqsave(&bp->lock, flags);
@@ -1603,7 +1647,6 @@ static u32 macb_dbw(struct macb *bp)
1603static void macb_configure_dma(struct macb *bp) 1647static void macb_configure_dma(struct macb *bp)
1604{ 1648{
1605 u32 dmacfg; 1649 u32 dmacfg;
1606 u32 tmp, ncr;
1607 1650
1608 if (macb_is_gem(bp)) { 1651 if (macb_is_gem(bp)) {
1609 dmacfg = gem_readl(bp, DMACFG) & ~GEM_BF(RXBS, -1L); 1652 dmacfg = gem_readl(bp, DMACFG) & ~GEM_BF(RXBS, -1L);
@@ -1613,22 +1656,11 @@ static void macb_configure_dma(struct macb *bp)
1613 dmacfg |= GEM_BIT(TXPBMS) | GEM_BF(RXBMS, -1L); 1656 dmacfg |= GEM_BIT(TXPBMS) | GEM_BF(RXBMS, -1L);
1614 dmacfg &= ~GEM_BIT(ENDIA_PKT); 1657 dmacfg &= ~GEM_BIT(ENDIA_PKT);
1615 1658
1616 /* Find the CPU endianness by using the loopback bit of net_ctrl 1659 if (bp->native_io)
1617 * register. save it first. When the CPU is in big endian we
1618 * need to program swaped mode for management descriptor access.
1619 */
1620 ncr = macb_readl(bp, NCR);
1621 __raw_writel(MACB_BIT(LLB), bp->regs + MACB_NCR);
1622 tmp = __raw_readl(bp->regs + MACB_NCR);
1623
1624 if (tmp == MACB_BIT(LLB))
1625 dmacfg &= ~GEM_BIT(ENDIA_DESC); 1660 dmacfg &= ~GEM_BIT(ENDIA_DESC);
1626 else 1661 else
1627 dmacfg |= GEM_BIT(ENDIA_DESC); /* CPU in big endian */ 1662 dmacfg |= GEM_BIT(ENDIA_DESC); /* CPU in big endian */
1628 1663
1629 /* Restore net_ctrl */
1630 macb_writel(bp, NCR, ncr);
1631
1632 if (bp->dev->features & NETIF_F_HW_CSUM) 1664 if (bp->dev->features & NETIF_F_HW_CSUM)
1633 dmacfg |= GEM_BIT(TXCOEN); 1665 dmacfg |= GEM_BIT(TXCOEN);
1634 else 1666 else
@@ -1897,19 +1929,19 @@ static int macb_change_mtu(struct net_device *dev, int new_mtu)
1897 1929
1898static void gem_update_stats(struct macb *bp) 1930static void gem_update_stats(struct macb *bp)
1899{ 1931{
1900 int i; 1932 unsigned int i;
1901 u32 *p = &bp->hw_stats.gem.tx_octets_31_0; 1933 u32 *p = &bp->hw_stats.gem.tx_octets_31_0;
1902 1934
1903 for (i = 0; i < GEM_STATS_LEN; ++i, ++p) { 1935 for (i = 0; i < GEM_STATS_LEN; ++i, ++p) {
1904 u32 offset = gem_statistics[i].offset; 1936 u32 offset = gem_statistics[i].offset;
1905 u64 val = readl_relaxed(bp->regs + offset); 1937 u64 val = bp->macb_reg_readl(bp, offset);
1906 1938
1907 bp->ethtool_stats[i] += val; 1939 bp->ethtool_stats[i] += val;
1908 *p += val; 1940 *p += val;
1909 1941
1910 if (offset == GEM_OCTTXL || offset == GEM_OCTRXL) { 1942 if (offset == GEM_OCTTXL || offset == GEM_OCTRXL) {
1911 /* Add GEM_OCTTXH, GEM_OCTRXH */ 1943 /* Add GEM_OCTTXH, GEM_OCTRXH */
1912 val = readl_relaxed(bp->regs + offset + 4); 1944 val = bp->macb_reg_readl(bp, offset + 4);
1913 bp->ethtool_stats[i] += ((u64)val) << 32; 1945 bp->ethtool_stats[i] += ((u64)val) << 32;
1914 *(++p) += val; 1946 *(++p) += val;
1915 } 1947 }
@@ -1976,7 +2008,7 @@ static int gem_get_sset_count(struct net_device *dev, int sset)
1976 2008
1977static void gem_get_ethtool_strings(struct net_device *dev, u32 sset, u8 *p) 2009static void gem_get_ethtool_strings(struct net_device *dev, u32 sset, u8 *p)
1978{ 2010{
1979 int i; 2011 unsigned int i;
1980 2012
1981 switch (sset) { 2013 switch (sset) {
1982 case ETH_SS_STATS: 2014 case ETH_SS_STATS:
@@ -2190,7 +2222,7 @@ static void macb_configure_caps(struct macb *bp, const struct macb_config *dt_co
2190 if (dt_conf) 2222 if (dt_conf)
2191 bp->caps = dt_conf->caps; 2223 bp->caps = dt_conf->caps;
2192 2224
2193 if (macb_is_gem_hw(bp->regs)) { 2225 if (hw_is_gem(bp->regs, bp->native_io)) {
2194 bp->caps |= MACB_CAPS_MACB_IS_GEM; 2226 bp->caps |= MACB_CAPS_MACB_IS_GEM;
2195 2227
2196 dcfg = gem_readl(bp, DCFG1); 2228 dcfg = gem_readl(bp, DCFG1);
@@ -2201,10 +2233,11 @@ static void macb_configure_caps(struct macb *bp, const struct macb_config *dt_co
2201 bp->caps |= MACB_CAPS_FIFO_MODE; 2233 bp->caps |= MACB_CAPS_FIFO_MODE;
2202 } 2234 }
2203 2235
2204 netdev_dbg(bp->dev, "Cadence caps 0x%08x\n", bp->caps); 2236 dev_dbg(&bp->pdev->dev, "Cadence caps 0x%08x\n", bp->caps);
2205} 2237}
2206 2238
2207static void macb_probe_queues(void __iomem *mem, 2239static void macb_probe_queues(void __iomem *mem,
2240 bool native_io,
2208 unsigned int *queue_mask, 2241 unsigned int *queue_mask,
2209 unsigned int *num_queues) 2242 unsigned int *num_queues)
2210{ 2243{
@@ -2219,7 +2252,7 @@ static void macb_probe_queues(void __iomem *mem,
2219 * we are early in the probe process and don't have the 2252 * we are early in the probe process and don't have the
2220 * MACB_CAPS_MACB_IS_GEM flag positioned 2253 * MACB_CAPS_MACB_IS_GEM flag positioned
2221 */ 2254 */
2222 if (!macb_is_gem_hw(mem)) 2255 if (!hw_is_gem(mem, native_io))
2223 return; 2256 return;
2224 2257
2225 /* bit 0 is never set but queue 0 always exists */ 2258 /* bit 0 is never set but queue 0 always exists */
@@ -2786,6 +2819,7 @@ static int macb_probe(struct platform_device *pdev)
2786 struct clk *pclk, *hclk, *tx_clk; 2819 struct clk *pclk, *hclk, *tx_clk;
2787 unsigned int queue_mask, num_queues; 2820 unsigned int queue_mask, num_queues;
2788 struct macb_platform_data *pdata; 2821 struct macb_platform_data *pdata;
2822 bool native_io;
2789 struct phy_device *phydev; 2823 struct phy_device *phydev;
2790 struct net_device *dev; 2824 struct net_device *dev;
2791 struct resource *regs; 2825 struct resource *regs;
@@ -2794,6 +2828,11 @@ static int macb_probe(struct platform_device *pdev)
2794 struct macb *bp; 2828 struct macb *bp;
2795 int err; 2829 int err;
2796 2830
2831 regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2832 mem = devm_ioremap_resource(&pdev->dev, regs);
2833 if (IS_ERR(mem))
2834 return PTR_ERR(mem);
2835
2797 if (np) { 2836 if (np) {
2798 const struct of_device_id *match; 2837 const struct of_device_id *match;
2799 2838
@@ -2809,14 +2848,9 @@ static int macb_probe(struct platform_device *pdev)
2809 if (err) 2848 if (err)
2810 return err; 2849 return err;
2811 2850
2812 regs = platform_get_resource(pdev, IORESOURCE_MEM, 0); 2851 native_io = hw_is_native_io(mem);
2813 mem = devm_ioremap_resource(&pdev->dev, regs);
2814 if (IS_ERR(mem)) {
2815 err = PTR_ERR(mem);
2816 goto err_disable_clocks;
2817 }
2818 2852
2819 macb_probe_queues(mem, &queue_mask, &num_queues); 2853 macb_probe_queues(mem, native_io, &queue_mask, &num_queues);
2820 dev = alloc_etherdev_mq(sizeof(*bp), num_queues); 2854 dev = alloc_etherdev_mq(sizeof(*bp), num_queues);
2821 if (!dev) { 2855 if (!dev) {
2822 err = -ENOMEM; 2856 err = -ENOMEM;
@@ -2831,6 +2865,14 @@ static int macb_probe(struct platform_device *pdev)
2831 bp->pdev = pdev; 2865 bp->pdev = pdev;
2832 bp->dev = dev; 2866 bp->dev = dev;
2833 bp->regs = mem; 2867 bp->regs = mem;
2868 bp->native_io = native_io;
2869 if (native_io) {
2870 bp->macb_reg_readl = hw_readl_native;
2871 bp->macb_reg_writel = hw_writel_native;
2872 } else {
2873 bp->macb_reg_readl = hw_readl;
2874 bp->macb_reg_writel = hw_writel;
2875 }
2834 bp->num_queues = num_queues; 2876 bp->num_queues = num_queues;
2835 bp->queue_mask = queue_mask; 2877 bp->queue_mask = queue_mask;
2836 if (macb_config) 2878 if (macb_config)
@@ -2838,9 +2880,8 @@ static int macb_probe(struct platform_device *pdev)
2838 bp->pclk = pclk; 2880 bp->pclk = pclk;
2839 bp->hclk = hclk; 2881 bp->hclk = hclk;
2840 bp->tx_clk = tx_clk; 2882 bp->tx_clk = tx_clk;
2841 if (macb_config->jumbo_max_len) { 2883 if (macb_config)
2842 bp->jumbo_max_len = macb_config->jumbo_max_len; 2884 bp->jumbo_max_len = macb_config->jumbo_max_len;
2843 }
2844 2885
2845 spin_lock_init(&bp->lock); 2886 spin_lock_init(&bp->lock);
2846 2887
diff --git a/drivers/net/ethernet/cadence/macb.h b/drivers/net/ethernet/cadence/macb.h
index d74655993d4b..1895b6b2addd 100644
--- a/drivers/net/ethernet/cadence/macb.h
+++ b/drivers/net/ethernet/cadence/macb.h
@@ -429,18 +429,12 @@
429 | GEM_BF(name, value)) 429 | GEM_BF(name, value))
430 430
431/* Register access macros */ 431/* Register access macros */
432#define macb_readl(port,reg) \ 432#define macb_readl(port, reg) (port)->macb_reg_readl((port), MACB_##reg)
433 readl_relaxed((port)->regs + MACB_##reg) 433#define macb_writel(port, reg, value) (port)->macb_reg_writel((port), MACB_##reg, (value))
434#define macb_writel(port,reg,value) \ 434#define gem_readl(port, reg) (port)->macb_reg_readl((port), GEM_##reg)
435 writel_relaxed((value), (port)->regs + MACB_##reg) 435#define gem_writel(port, reg, value) (port)->macb_reg_writel((port), GEM_##reg, (value))
436#define gem_readl(port, reg) \ 436#define queue_readl(queue, reg) (queue)->bp->macb_reg_readl((queue)->bp, (queue)->reg)
437 readl_relaxed((port)->regs + GEM_##reg) 437#define queue_writel(queue, reg, value) (queue)->bp->macb_reg_writel((queue)->bp, (queue)->reg, (value))
438#define gem_writel(port, reg, value) \
439 writel_relaxed((value), (port)->regs + GEM_##reg)
440#define queue_readl(queue, reg) \
441 readl_relaxed((queue)->bp->regs + (queue)->reg)
442#define queue_writel(queue, reg, value) \
443 writel_relaxed((value), (queue)->bp->regs + (queue)->reg)
444 438
445/* Conditional GEM/MACB macros. These perform the operation to the correct 439/* Conditional GEM/MACB macros. These perform the operation to the correct
446 * register dependent on whether the device is a GEM or a MACB. For registers 440 * register dependent on whether the device is a GEM or a MACB. For registers
@@ -785,6 +779,11 @@ struct macb_queue {
785 779
786struct macb { 780struct macb {
787 void __iomem *regs; 781 void __iomem *regs;
782 bool native_io;
783
784 /* hardware IO accessors */
785 u32 (*macb_reg_readl)(struct macb *bp, int offset);
786 void (*macb_reg_writel)(struct macb *bp, int offset, u32 value);
788 787
789 unsigned int rx_tail; 788 unsigned int rx_tail;
790 unsigned int rx_prepared_head; 789 unsigned int rx_prepared_head;
@@ -817,9 +816,9 @@ struct macb {
817 816
818 struct mii_bus *mii_bus; 817 struct mii_bus *mii_bus;
819 struct phy_device *phy_dev; 818 struct phy_device *phy_dev;
820 unsigned int link; 819 int link;
821 unsigned int speed; 820 int speed;
822 unsigned int duplex; 821 int duplex;
823 822
824 u32 caps; 823 u32 caps;
825 unsigned int dma_burst_length; 824 unsigned int dma_burst_length;
@@ -843,9 +842,4 @@ static inline bool macb_is_gem(struct macb *bp)
843 return !!(bp->caps & MACB_CAPS_MACB_IS_GEM); 842 return !!(bp->caps & MACB_CAPS_MACB_IS_GEM);
844} 843}
845 844
846static inline bool macb_is_gem_hw(void __iomem *addr)
847{
848 return !!(MACB_BFEXT(IDNUM, readl_relaxed(addr + MACB_MID)) >= 0x2);
849}
850
851#endif /* _MACB_H */ 845#endif /* _MACB_H */
diff --git a/drivers/net/ethernet/cavium/Kconfig b/drivers/net/ethernet/cavium/Kconfig
index c4d6bbe9458d..02e23e6f1424 100644
--- a/drivers/net/ethernet/cavium/Kconfig
+++ b/drivers/net/ethernet/cavium/Kconfig
@@ -16,7 +16,6 @@ if NET_VENDOR_CAVIUM
16config THUNDER_NIC_PF 16config THUNDER_NIC_PF
17 tristate "Thunder Physical function driver" 17 tristate "Thunder Physical function driver"
18 depends on 64BIT 18 depends on 64BIT
19 default ARCH_THUNDER
20 select THUNDER_NIC_BGX 19 select THUNDER_NIC_BGX
21 ---help--- 20 ---help---
22 This driver supports Thunder's NIC physical function. 21 This driver supports Thunder's NIC physical function.
@@ -29,14 +28,12 @@ config THUNDER_NIC_PF
29config THUNDER_NIC_VF 28config THUNDER_NIC_VF
30 tristate "Thunder Virtual function driver" 29 tristate "Thunder Virtual function driver"
31 depends on 64BIT 30 depends on 64BIT
32 default ARCH_THUNDER
33 ---help--- 31 ---help---
34 This driver supports Thunder's NIC virtual function 32 This driver supports Thunder's NIC virtual function
35 33
36config THUNDER_NIC_BGX 34config THUNDER_NIC_BGX
37 tristate "Thunder MAC interface driver (BGX)" 35 tristate "Thunder MAC interface driver (BGX)"
38 depends on 64BIT 36 depends on 64BIT
39 default ARCH_THUNDER
40 ---help--- 37 ---help---
41 This driver supports programming and controlling of MAC 38 This driver supports programming and controlling of MAC
42 interface from NIC physical function driver. 39 interface from NIC physical function driver.
diff --git a/drivers/net/ethernet/cavium/thunder/nic.h b/drivers/net/ethernet/cavium/thunder/nic.h
index dda8a02b7322..8aee250904ec 100644
--- a/drivers/net/ethernet/cavium/thunder/nic.h
+++ b/drivers/net/ethernet/cavium/thunder/nic.h
@@ -125,6 +125,15 @@
125 */ 125 */
126#define NICPF_CLK_PER_INT_TICK 2 126#define NICPF_CLK_PER_INT_TICK 2
127 127
128/* Time to wait before we decide that a SQ is stuck.
129 *
130 * Since both pkt rx and tx notifications are done with same CQ,
131 * when packets are being received at very high rate (eg: L2 forwarding)
132 * then freeing transmitted skbs will be delayed and watchdog
133 * will kick in, resetting interface. Hence keeping this value high.
134 */
135#define NICVF_TX_TIMEOUT (50 * HZ)
136
128struct nicvf_cq_poll { 137struct nicvf_cq_poll {
129 u8 cq_idx; /* Completion queue index */ 138 u8 cq_idx; /* Completion queue index */
130 struct napi_struct napi; 139 struct napi_struct napi;
@@ -216,8 +225,9 @@ struct nicvf_drv_stats {
216 /* Tx */ 225 /* Tx */
217 u64 tx_frames_ok; 226 u64 tx_frames_ok;
218 u64 tx_drops; 227 u64 tx_drops;
219 u64 tx_busy;
220 u64 tx_tso; 228 u64 tx_tso;
229 u64 txq_stop;
230 u64 txq_wake;
221}; 231};
222 232
223struct nicvf { 233struct nicvf {
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c b/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c
index 16bd2d772db9..a4228e664567 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c
@@ -66,9 +66,10 @@ static const struct nicvf_stat nicvf_drv_stats[] = {
66 NICVF_DRV_STAT(rx_frames_jumbo), 66 NICVF_DRV_STAT(rx_frames_jumbo),
67 NICVF_DRV_STAT(rx_drops), 67 NICVF_DRV_STAT(rx_drops),
68 NICVF_DRV_STAT(tx_frames_ok), 68 NICVF_DRV_STAT(tx_frames_ok),
69 NICVF_DRV_STAT(tx_busy),
70 NICVF_DRV_STAT(tx_tso), 69 NICVF_DRV_STAT(tx_tso),
71 NICVF_DRV_STAT(tx_drops), 70 NICVF_DRV_STAT(tx_drops),
71 NICVF_DRV_STAT(txq_stop),
72 NICVF_DRV_STAT(txq_wake),
72}; 73};
73 74
74static const struct nicvf_stat nicvf_queue_stats[] = { 75static const struct nicvf_stat nicvf_queue_stats[] = {
@@ -126,6 +127,7 @@ static void nicvf_set_msglevel(struct net_device *netdev, u32 lvl)
126 127
127static void nicvf_get_strings(struct net_device *netdev, u32 sset, u8 *data) 128static void nicvf_get_strings(struct net_device *netdev, u32 sset, u8 *data)
128{ 129{
130 struct nicvf *nic = netdev_priv(netdev);
129 int stats, qidx; 131 int stats, qidx;
130 132
131 if (sset != ETH_SS_STATS) 133 if (sset != ETH_SS_STATS)
@@ -141,7 +143,7 @@ static void nicvf_get_strings(struct net_device *netdev, u32 sset, u8 *data)
141 data += ETH_GSTRING_LEN; 143 data += ETH_GSTRING_LEN;
142 } 144 }
143 145
144 for (qidx = 0; qidx < MAX_RCV_QUEUES_PER_QS; qidx++) { 146 for (qidx = 0; qidx < nic->qs->rq_cnt; qidx++) {
145 for (stats = 0; stats < nicvf_n_queue_stats; stats++) { 147 for (stats = 0; stats < nicvf_n_queue_stats; stats++) {
146 sprintf(data, "rxq%d: %s", qidx, 148 sprintf(data, "rxq%d: %s", qidx,
147 nicvf_queue_stats[stats].name); 149 nicvf_queue_stats[stats].name);
@@ -149,7 +151,7 @@ static void nicvf_get_strings(struct net_device *netdev, u32 sset, u8 *data)
149 } 151 }
150 } 152 }
151 153
152 for (qidx = 0; qidx < MAX_SND_QUEUES_PER_QS; qidx++) { 154 for (qidx = 0; qidx < nic->qs->sq_cnt; qidx++) {
153 for (stats = 0; stats < nicvf_n_queue_stats; stats++) { 155 for (stats = 0; stats < nicvf_n_queue_stats; stats++) {
154 sprintf(data, "txq%d: %s", qidx, 156 sprintf(data, "txq%d: %s", qidx,
155 nicvf_queue_stats[stats].name); 157 nicvf_queue_stats[stats].name);
@@ -170,12 +172,14 @@ static void nicvf_get_strings(struct net_device *netdev, u32 sset, u8 *data)
170 172
171static int nicvf_get_sset_count(struct net_device *netdev, int sset) 173static int nicvf_get_sset_count(struct net_device *netdev, int sset)
172{ 174{
175 struct nicvf *nic = netdev_priv(netdev);
176
173 if (sset != ETH_SS_STATS) 177 if (sset != ETH_SS_STATS)
174 return -EINVAL; 178 return -EINVAL;
175 179
176 return nicvf_n_hw_stats + nicvf_n_drv_stats + 180 return nicvf_n_hw_stats + nicvf_n_drv_stats +
177 (nicvf_n_queue_stats * 181 (nicvf_n_queue_stats *
178 (MAX_RCV_QUEUES_PER_QS + MAX_SND_QUEUES_PER_QS)) + 182 (nic->qs->rq_cnt + nic->qs->sq_cnt)) +
179 BGX_RX_STATS_COUNT + BGX_TX_STATS_COUNT; 183 BGX_RX_STATS_COUNT + BGX_TX_STATS_COUNT;
180} 184}
181 185
@@ -197,13 +201,13 @@ static void nicvf_get_ethtool_stats(struct net_device *netdev,
197 *(data++) = ((u64 *)&nic->drv_stats) 201 *(data++) = ((u64 *)&nic->drv_stats)
198 [nicvf_drv_stats[stat].index]; 202 [nicvf_drv_stats[stat].index];
199 203
200 for (qidx = 0; qidx < MAX_RCV_QUEUES_PER_QS; qidx++) { 204 for (qidx = 0; qidx < nic->qs->rq_cnt; qidx++) {
201 for (stat = 0; stat < nicvf_n_queue_stats; stat++) 205 for (stat = 0; stat < nicvf_n_queue_stats; stat++)
202 *(data++) = ((u64 *)&nic->qs->rq[qidx].stats) 206 *(data++) = ((u64 *)&nic->qs->rq[qidx].stats)
203 [nicvf_queue_stats[stat].index]; 207 [nicvf_queue_stats[stat].index];
204 } 208 }
205 209
206 for (qidx = 0; qidx < MAX_SND_QUEUES_PER_QS; qidx++) { 210 for (qidx = 0; qidx < nic->qs->sq_cnt; qidx++) {
207 for (stat = 0; stat < nicvf_n_queue_stats; stat++) 211 for (stat = 0; stat < nicvf_n_queue_stats; stat++)
208 *(data++) = ((u64 *)&nic->qs->sq[qidx].stats) 212 *(data++) = ((u64 *)&nic->qs->sq[qidx].stats)
209 [nicvf_queue_stats[stat].index]; 213 [nicvf_queue_stats[stat].index];
@@ -543,6 +547,7 @@ static int nicvf_set_channels(struct net_device *dev,
543{ 547{
544 struct nicvf *nic = netdev_priv(dev); 548 struct nicvf *nic = netdev_priv(dev);
545 int err = 0; 549 int err = 0;
550 bool if_up = netif_running(dev);
546 551
547 if (!channel->rx_count || !channel->tx_count) 552 if (!channel->rx_count || !channel->tx_count)
548 return -EINVAL; 553 return -EINVAL;
@@ -551,6 +556,9 @@ static int nicvf_set_channels(struct net_device *dev,
551 if (channel->tx_count > MAX_SND_QUEUES_PER_QS) 556 if (channel->tx_count > MAX_SND_QUEUES_PER_QS)
552 return -EINVAL; 557 return -EINVAL;
553 558
559 if (if_up)
560 nicvf_stop(dev);
561
554 nic->qs->rq_cnt = channel->rx_count; 562 nic->qs->rq_cnt = channel->rx_count;
555 nic->qs->sq_cnt = channel->tx_count; 563 nic->qs->sq_cnt = channel->tx_count;
556 nic->qs->cq_cnt = max(nic->qs->rq_cnt, nic->qs->sq_cnt); 564 nic->qs->cq_cnt = max(nic->qs->rq_cnt, nic->qs->sq_cnt);
@@ -559,11 +567,9 @@ static int nicvf_set_channels(struct net_device *dev,
559 if (err) 567 if (err)
560 return err; 568 return err;
561 569
562 if (!netif_running(dev)) 570 if (if_up)
563 return err; 571 nicvf_open(dev);
564 572
565 nicvf_stop(dev);
566 nicvf_open(dev);
567 netdev_info(dev, "Setting num Tx rings to %d, Rx rings to %d success\n", 573 netdev_info(dev, "Setting num Tx rings to %d, Rx rings to %d success\n",
568 nic->qs->sq_cnt, nic->qs->rq_cnt); 574 nic->qs->sq_cnt, nic->qs->rq_cnt);
569 575
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_main.c b/drivers/net/ethernet/cavium/thunder/nicvf_main.c
index 8b119a035b7e..3b90afb8c293 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_main.c
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_main.c
@@ -234,7 +234,7 @@ static void nicvf_handle_mbx_intr(struct nicvf *nic)
234 nic->duplex == DUPLEX_FULL ? 234 nic->duplex == DUPLEX_FULL ?
235 "Full duplex" : "Half duplex"); 235 "Full duplex" : "Half duplex");
236 netif_carrier_on(nic->netdev); 236 netif_carrier_on(nic->netdev);
237 netif_tx_wake_all_queues(nic->netdev); 237 netif_tx_start_all_queues(nic->netdev);
238 } else { 238 } else {
239 netdev_info(nic->netdev, "%s: Link is Down\n", 239 netdev_info(nic->netdev, "%s: Link is Down\n",
240 nic->netdev->name); 240 nic->netdev->name);
@@ -425,6 +425,7 @@ static void nicvf_snd_pkt_handler(struct net_device *netdev,
425 if (skb) { 425 if (skb) {
426 prefetch(skb); 426 prefetch(skb);
427 dev_consume_skb_any(skb); 427 dev_consume_skb_any(skb);
428 sq->skbuff[cqe_tx->sqe_ptr] = (u64)NULL;
428 } 429 }
429} 430}
430 431
@@ -476,12 +477,13 @@ static void nicvf_rcv_pkt_handler(struct net_device *netdev,
476static int nicvf_cq_intr_handler(struct net_device *netdev, u8 cq_idx, 477static int nicvf_cq_intr_handler(struct net_device *netdev, u8 cq_idx,
477 struct napi_struct *napi, int budget) 478 struct napi_struct *napi, int budget)
478{ 479{
479 int processed_cqe, work_done = 0; 480 int processed_cqe, work_done = 0, tx_done = 0;
480 int cqe_count, cqe_head; 481 int cqe_count, cqe_head;
481 struct nicvf *nic = netdev_priv(netdev); 482 struct nicvf *nic = netdev_priv(netdev);
482 struct queue_set *qs = nic->qs; 483 struct queue_set *qs = nic->qs;
483 struct cmp_queue *cq = &qs->cq[cq_idx]; 484 struct cmp_queue *cq = &qs->cq[cq_idx];
484 struct cqe_rx_t *cq_desc; 485 struct cqe_rx_t *cq_desc;
486 struct netdev_queue *txq;
485 487
486 spin_lock_bh(&cq->lock); 488 spin_lock_bh(&cq->lock);
487loop: 489loop:
@@ -496,8 +498,8 @@ loop:
496 cqe_head = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_HEAD, cq_idx) >> 9; 498 cqe_head = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_HEAD, cq_idx) >> 9;
497 cqe_head &= 0xFFFF; 499 cqe_head &= 0xFFFF;
498 500
499 netdev_dbg(nic->netdev, "%s cqe_count %d cqe_head %d\n", 501 netdev_dbg(nic->netdev, "%s CQ%d cqe_count %d cqe_head %d\n",
500 __func__, cqe_count, cqe_head); 502 __func__, cq_idx, cqe_count, cqe_head);
501 while (processed_cqe < cqe_count) { 503 while (processed_cqe < cqe_count) {
502 /* Get the CQ descriptor */ 504 /* Get the CQ descriptor */
503 cq_desc = (struct cqe_rx_t *)GET_CQ_DESC(cq, cqe_head); 505 cq_desc = (struct cqe_rx_t *)GET_CQ_DESC(cq, cqe_head);
@@ -511,8 +513,8 @@ loop:
511 break; 513 break;
512 } 514 }
513 515
514 netdev_dbg(nic->netdev, "cq_desc->cqe_type %d\n", 516 netdev_dbg(nic->netdev, "CQ%d cq_desc->cqe_type %d\n",
515 cq_desc->cqe_type); 517 cq_idx, cq_desc->cqe_type);
516 switch (cq_desc->cqe_type) { 518 switch (cq_desc->cqe_type) {
517 case CQE_TYPE_RX: 519 case CQE_TYPE_RX:
518 nicvf_rcv_pkt_handler(netdev, napi, cq, 520 nicvf_rcv_pkt_handler(netdev, napi, cq,
@@ -522,6 +524,7 @@ loop:
522 case CQE_TYPE_SEND: 524 case CQE_TYPE_SEND:
523 nicvf_snd_pkt_handler(netdev, cq, 525 nicvf_snd_pkt_handler(netdev, cq,
524 (void *)cq_desc, CQE_TYPE_SEND); 526 (void *)cq_desc, CQE_TYPE_SEND);
527 tx_done++;
525 break; 528 break;
526 case CQE_TYPE_INVALID: 529 case CQE_TYPE_INVALID:
527 case CQE_TYPE_RX_SPLIT: 530 case CQE_TYPE_RX_SPLIT:
@@ -532,8 +535,9 @@ loop:
532 } 535 }
533 processed_cqe++; 536 processed_cqe++;
534 } 537 }
535 netdev_dbg(nic->netdev, "%s processed_cqe %d work_done %d budget %d\n", 538 netdev_dbg(nic->netdev,
536 __func__, processed_cqe, work_done, budget); 539 "%s CQ%d processed_cqe %d work_done %d budget %d\n",
540 __func__, cq_idx, processed_cqe, work_done, budget);
537 541
538 /* Ring doorbell to inform H/W to reuse processed CQEs */ 542 /* Ring doorbell to inform H/W to reuse processed CQEs */
539 nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_DOOR, 543 nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_DOOR,
@@ -543,6 +547,19 @@ loop:
543 goto loop; 547 goto loop;
544 548
545done: 549done:
550 /* Wakeup TXQ if its stopped earlier due to SQ full */
551 if (tx_done) {
552 txq = netdev_get_tx_queue(netdev, cq_idx);
553 if (netif_tx_queue_stopped(txq)) {
554 netif_tx_start_queue(txq);
555 nic->drv_stats.txq_wake++;
556 if (netif_msg_tx_err(nic))
557 netdev_warn(netdev,
558 "%s: Transmit queue wakeup SQ%d\n",
559 netdev->name, cq_idx);
560 }
561 }
562
546 spin_unlock_bh(&cq->lock); 563 spin_unlock_bh(&cq->lock);
547 return work_done; 564 return work_done;
548} 565}
@@ -554,15 +571,10 @@ static int nicvf_poll(struct napi_struct *napi, int budget)
554 struct net_device *netdev = napi->dev; 571 struct net_device *netdev = napi->dev;
555 struct nicvf *nic = netdev_priv(netdev); 572 struct nicvf *nic = netdev_priv(netdev);
556 struct nicvf_cq_poll *cq; 573 struct nicvf_cq_poll *cq;
557 struct netdev_queue *txq;
558 574
559 cq = container_of(napi, struct nicvf_cq_poll, napi); 575 cq = container_of(napi, struct nicvf_cq_poll, napi);
560 work_done = nicvf_cq_intr_handler(netdev, cq->cq_idx, napi, budget); 576 work_done = nicvf_cq_intr_handler(netdev, cq->cq_idx, napi, budget);
561 577
562 txq = netdev_get_tx_queue(netdev, cq->cq_idx);
563 if (netif_tx_queue_stopped(txq))
564 netif_tx_wake_queue(txq);
565
566 if (work_done < budget) { 578 if (work_done < budget) {
567 /* Slow packet rate, exit polling */ 579 /* Slow packet rate, exit polling */
568 napi_complete(napi); 580 napi_complete(napi);
@@ -833,9 +845,9 @@ static netdev_tx_t nicvf_xmit(struct sk_buff *skb, struct net_device *netdev)
833 return NETDEV_TX_OK; 845 return NETDEV_TX_OK;
834 } 846 }
835 847
836 if (!nicvf_sq_append_skb(nic, skb) && !netif_tx_queue_stopped(txq)) { 848 if (!netif_tx_queue_stopped(txq) && !nicvf_sq_append_skb(nic, skb)) {
837 netif_tx_stop_queue(txq); 849 netif_tx_stop_queue(txq);
838 nic->drv_stats.tx_busy++; 850 nic->drv_stats.txq_stop++;
839 if (netif_msg_tx_err(nic)) 851 if (netif_msg_tx_err(nic))
840 netdev_warn(netdev, 852 netdev_warn(netdev,
841 "%s: Transmit ring full, stopping SQ%d\n", 853 "%s: Transmit ring full, stopping SQ%d\n",
@@ -859,7 +871,6 @@ int nicvf_stop(struct net_device *netdev)
859 nicvf_send_msg_to_pf(nic, &mbx); 871 nicvf_send_msg_to_pf(nic, &mbx);
860 872
861 netif_carrier_off(netdev); 873 netif_carrier_off(netdev);
862 netif_tx_disable(netdev);
863 874
864 /* Disable RBDR & QS error interrupts */ 875 /* Disable RBDR & QS error interrupts */
865 for (qidx = 0; qidx < qs->rbdr_cnt; qidx++) { 876 for (qidx = 0; qidx < qs->rbdr_cnt; qidx++) {
@@ -894,6 +905,8 @@ int nicvf_stop(struct net_device *netdev)
894 kfree(cq_poll); 905 kfree(cq_poll);
895 } 906 }
896 907
908 netif_tx_disable(netdev);
909
897 /* Free resources */ 910 /* Free resources */
898 nicvf_config_data_transfer(nic, false); 911 nicvf_config_data_transfer(nic, false);
899 912
@@ -988,6 +1001,9 @@ int nicvf_open(struct net_device *netdev)
988 for (qidx = 0; qidx < qs->rbdr_cnt; qidx++) 1001 for (qidx = 0; qidx < qs->rbdr_cnt; qidx++)
989 nicvf_enable_intr(nic, NICVF_INTR_RBDR, qidx); 1002 nicvf_enable_intr(nic, NICVF_INTR_RBDR, qidx);
990 1003
1004 nic->drv_stats.txq_stop = 0;
1005 nic->drv_stats.txq_wake = 0;
1006
991 netif_carrier_on(netdev); 1007 netif_carrier_on(netdev);
992 netif_tx_start_all_queues(netdev); 1008 netif_tx_start_all_queues(netdev);
993 1009
@@ -1278,6 +1294,7 @@ static int nicvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1278 netdev->hw_features = netdev->features; 1294 netdev->hw_features = netdev->features;
1279 1295
1280 netdev->netdev_ops = &nicvf_netdev_ops; 1296 netdev->netdev_ops = &nicvf_netdev_ops;
1297 netdev->watchdog_timeo = NICVF_TX_TIMEOUT;
1281 1298
1282 INIT_WORK(&nic->reset_task, nicvf_reset_task); 1299 INIT_WORK(&nic->reset_task, nicvf_reset_task);
1283 1300
@@ -1318,11 +1335,17 @@ static void nicvf_remove(struct pci_dev *pdev)
1318 pci_disable_device(pdev); 1335 pci_disable_device(pdev);
1319} 1336}
1320 1337
1338static void nicvf_shutdown(struct pci_dev *pdev)
1339{
1340 nicvf_remove(pdev);
1341}
1342
1321static struct pci_driver nicvf_driver = { 1343static struct pci_driver nicvf_driver = {
1322 .name = DRV_NAME, 1344 .name = DRV_NAME,
1323 .id_table = nicvf_id_table, 1345 .id_table = nicvf_id_table,
1324 .probe = nicvf_probe, 1346 .probe = nicvf_probe,
1325 .remove = nicvf_remove, 1347 .remove = nicvf_remove,
1348 .shutdown = nicvf_shutdown,
1326}; 1349};
1327 1350
1328static int __init nicvf_init_module(void) 1351static int __init nicvf_init_module(void)
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
index d69d228d11a0..ca4240aa6d15 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
@@ -103,9 +103,11 @@ static inline int nicvf_alloc_rcv_buffer(struct nicvf *nic, gfp_t gfp,
103 103
104 /* Allocate a new page */ 104 /* Allocate a new page */
105 if (!nic->rb_page) { 105 if (!nic->rb_page) {
106 nic->rb_page = alloc_pages(gfp | __GFP_COMP, order); 106 nic->rb_page = alloc_pages(gfp | __GFP_COMP | __GFP_NOWARN,
107 order);
107 if (!nic->rb_page) { 108 if (!nic->rb_page) {
108 netdev_err(nic->netdev, "Failed to allocate new rcv buffer\n"); 109 netdev_err(nic->netdev,
110 "Failed to allocate new rcv buffer\n");
109 return -ENOMEM; 111 return -ENOMEM;
110 } 112 }
111 nic->rb_page_offset = 0; 113 nic->rb_page_offset = 0;
@@ -382,7 +384,8 @@ static void nicvf_free_snd_queue(struct nicvf *nic, struct snd_queue *sq)
382 return; 384 return;
383 385
384 if (sq->tso_hdrs) 386 if (sq->tso_hdrs)
385 dma_free_coherent(&nic->pdev->dev, sq->dmem.q_len, 387 dma_free_coherent(&nic->pdev->dev,
388 sq->dmem.q_len * TSO_HEADER_SIZE,
386 sq->tso_hdrs, sq->tso_hdrs_phys); 389 sq->tso_hdrs, sq->tso_hdrs_phys);
387 390
388 kfree(sq->skbuff); 391 kfree(sq->skbuff);
@@ -863,10 +866,11 @@ void nicvf_sq_free_used_descs(struct net_device *netdev, struct snd_queue *sq,
863 continue; 866 continue;
864 } 867 }
865 skb = (struct sk_buff *)sq->skbuff[sq->head]; 868 skb = (struct sk_buff *)sq->skbuff[sq->head];
869 if (skb)
870 dev_kfree_skb_any(skb);
866 atomic64_add(1, (atomic64_t *)&netdev->stats.tx_packets); 871 atomic64_add(1, (atomic64_t *)&netdev->stats.tx_packets);
867 atomic64_add(hdr->tot_len, 872 atomic64_add(hdr->tot_len,
868 (atomic64_t *)&netdev->stats.tx_bytes); 873 (atomic64_t *)&netdev->stats.tx_bytes);
869 dev_kfree_skb_any(skb);
870 nicvf_put_sq_desc(sq, hdr->subdesc_cnt + 1); 874 nicvf_put_sq_desc(sq, hdr->subdesc_cnt + 1);
871 } 875 }
872} 876}
@@ -992,7 +996,7 @@ static inline void nicvf_sq_add_gather_subdesc(struct snd_queue *sq, int qentry,
992 996
993 memset(gather, 0, SND_QUEUE_DESC_SIZE); 997 memset(gather, 0, SND_QUEUE_DESC_SIZE);
994 gather->subdesc_type = SQ_DESC_TYPE_GATHER; 998 gather->subdesc_type = SQ_DESC_TYPE_GATHER;
995 gather->ld_type = NIC_SEND_LD_TYPE_E_LDWB; 999 gather->ld_type = NIC_SEND_LD_TYPE_E_LDD;
996 gather->size = size; 1000 gather->size = size;
997 gather->addr = data; 1001 gather->addr = data;
998} 1002}
@@ -1048,7 +1052,7 @@ static int nicvf_sq_append_tso(struct nicvf *nic, struct snd_queue *sq,
1048 } 1052 }
1049 nicvf_sq_add_hdr_subdesc(sq, hdr_qentry, 1053 nicvf_sq_add_hdr_subdesc(sq, hdr_qentry,
1050 seg_subdescs - 1, skb, seg_len); 1054 seg_subdescs - 1, skb, seg_len);
1051 sq->skbuff[hdr_qentry] = 0; 1055 sq->skbuff[hdr_qentry] = (u64)NULL;
1052 qentry = nicvf_get_nxt_sqentry(sq, qentry); 1056 qentry = nicvf_get_nxt_sqentry(sq, qentry);
1053 1057
1054 desc_cnt += seg_subdescs; 1058 desc_cnt += seg_subdescs;
@@ -1062,6 +1066,7 @@ static int nicvf_sq_append_tso(struct nicvf *nic, struct snd_queue *sq,
1062 /* Inform HW to xmit all TSO segments */ 1066 /* Inform HW to xmit all TSO segments */
1063 nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_DOOR, 1067 nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_DOOR,
1064 skb_get_queue_mapping(skb), desc_cnt); 1068 skb_get_queue_mapping(skb), desc_cnt);
1069 nic->drv_stats.tx_tso++;
1065 return 1; 1070 return 1;
1066} 1071}
1067 1072
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_queues.h b/drivers/net/ethernet/cavium/thunder/nicvf_queues.h
index 8341bdf755d1..f0937b7bfe9f 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_queues.h
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_queues.h
@@ -62,7 +62,7 @@
62#define SND_QUEUE_CNT 8 62#define SND_QUEUE_CNT 8
63#define CMP_QUEUE_CNT 8 /* Max of RCV and SND qcount */ 63#define CMP_QUEUE_CNT 8 /* Max of RCV and SND qcount */
64 64
65#define SND_QSIZE SND_QUEUE_SIZE4 65#define SND_QSIZE SND_QUEUE_SIZE2
66#define SND_QUEUE_LEN (1ULL << (SND_QSIZE + 10)) 66#define SND_QUEUE_LEN (1ULL << (SND_QSIZE + 10))
67#define MAX_SND_QUEUE_LEN (1ULL << (SND_QUEUE_SIZE6 + 10)) 67#define MAX_SND_QUEUE_LEN (1ULL << (SND_QUEUE_SIZE6 + 10))
68#define SND_QUEUE_THRESH 2ULL 68#define SND_QUEUE_THRESH 2ULL
@@ -70,7 +70,10 @@
70/* Since timestamp not enabled, otherwise 2 */ 70/* Since timestamp not enabled, otherwise 2 */
71#define MAX_CQE_PER_PKT_XMIT 1 71#define MAX_CQE_PER_PKT_XMIT 1
72 72
73#define CMP_QSIZE CMP_QUEUE_SIZE4 73/* Keep CQ and SQ sizes same, if timestamping
74 * is enabled this equation will change.
75 */
76#define CMP_QSIZE CMP_QUEUE_SIZE2
74#define CMP_QUEUE_LEN (1ULL << (CMP_QSIZE + 10)) 77#define CMP_QUEUE_LEN (1ULL << (CMP_QSIZE + 10))
75#define CMP_QUEUE_CQE_THRESH 0 78#define CMP_QUEUE_CQE_THRESH 0
76#define CMP_QUEUE_TIMER_THRESH 220 /* 10usec */ 79#define CMP_QUEUE_TIMER_THRESH 220 /* 10usec */
@@ -87,7 +90,12 @@
87 90
88#define MAX_CQES_FOR_TX ((SND_QUEUE_LEN / MIN_SQ_DESC_PER_PKT_XMIT) * \ 91#define MAX_CQES_FOR_TX ((SND_QUEUE_LEN / MIN_SQ_DESC_PER_PKT_XMIT) * \
89 MAX_CQE_PER_PKT_XMIT) 92 MAX_CQE_PER_PKT_XMIT)
90#define RQ_CQ_DROP ((CMP_QUEUE_LEN - MAX_CQES_FOR_TX) / 256) 93/* Calculate number of CQEs to reserve for all SQEs.
94 * Its 1/256th level of CQ size.
95 * '+ 1' to account for pipelining
96 */
97#define RQ_CQ_DROP ((256 / (CMP_QUEUE_LEN / \
98 (CMP_QUEUE_LEN - MAX_CQES_FOR_TX))) + 1)
91 99
92/* Descriptor size in bytes */ 100/* Descriptor size in bytes */
93#define SND_QUEUE_DESC_SIZE 16 101#define SND_QUEUE_DESC_SIZE 16
diff --git a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
index 633ec05dfe05..b961a89dc626 100644
--- a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
+++ b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
@@ -673,7 +673,10 @@ static void bgx_lmac_disable(struct bgx *bgx, u8 lmacid)
673 bgx_reg_write(bgx, lmacid, BGX_CMRX_CFG, cmrx_cfg); 673 bgx_reg_write(bgx, lmacid, BGX_CMRX_CFG, cmrx_cfg);
674 bgx_flush_dmac_addrs(bgx, lmacid); 674 bgx_flush_dmac_addrs(bgx, lmacid);
675 675
676 if (lmac->phydev) 676 if ((bgx->lmac_type != BGX_MODE_XFI) &&
677 (bgx->lmac_type != BGX_MODE_XLAUI) &&
678 (bgx->lmac_type != BGX_MODE_40G_KR) &&
679 (bgx->lmac_type != BGX_MODE_10G_KR) && lmac->phydev)
677 phy_disconnect(lmac->phydev); 680 phy_disconnect(lmac->phydev);
678 681
679 lmac->phydev = NULL; 682 lmac->phydev = NULL;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
index a11485fbb33f..c3c7db41819d 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
@@ -2332,10 +2332,11 @@ int t4_setup_debugfs(struct adapter *adap)
2332 EXT_MEM1_SIZE_G(size)); 2332 EXT_MEM1_SIZE_G(size));
2333 } 2333 }
2334 } else { 2334 } else {
2335 if (i & EXT_MEM_ENABLE_F) 2335 if (i & EXT_MEM_ENABLE_F) {
2336 size = t4_read_reg(adap, MA_EXT_MEMORY_BAR_A); 2336 size = t4_read_reg(adap, MA_EXT_MEMORY_BAR_A);
2337 add_debugfs_mem(adap, "mc", MEM_MC, 2337 add_debugfs_mem(adap, "mc", MEM_MC,
2338 EXT_MEM_SIZE_G(size)); 2338 EXT_MEM_SIZE_G(size));
2339 }
2339 } 2340 }
2340 2341
2341 de = debugfs_create_file_size("flash", S_IRUSR, adap->debugfs_root, adap, 2342 de = debugfs_create_file_size("flash", S_IRUSR, adap->debugfs_root, adap,
diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.h b/drivers/net/ethernet/emulex/benet/be_cmds.h
index 2716e6f30d9a..00e3a6b6b822 100644
--- a/drivers/net/ethernet/emulex/benet/be_cmds.h
+++ b/drivers/net/ethernet/emulex/benet/be_cmds.h
@@ -620,6 +620,11 @@ enum be_if_flags {
620 BE_IF_FLAGS_VLAN_PROMISCUOUS |\ 620 BE_IF_FLAGS_VLAN_PROMISCUOUS |\
621 BE_IF_FLAGS_MCAST_PROMISCUOUS) 621 BE_IF_FLAGS_MCAST_PROMISCUOUS)
622 622
623#define BE_IF_EN_FLAGS (BE_IF_FLAGS_BROADCAST | BE_IF_FLAGS_PASS_L3L4_ERRORS |\
624 BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_UNTAGGED)
625
626#define BE_IF_ALL_FILT_FLAGS (BE_IF_EN_FLAGS | BE_IF_FLAGS_ALL_PROMISCUOUS)
627
623/* An RX interface is an object with one or more MAC addresses and 628/* An RX interface is an object with one or more MAC addresses and
624 * filtering capabilities. */ 629 * filtering capabilities. */
625struct be_cmd_req_if_create { 630struct be_cmd_req_if_create {
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
index 6f642426308c..c28e3bfdccd7 100644
--- a/drivers/net/ethernet/emulex/benet/be_main.c
+++ b/drivers/net/ethernet/emulex/benet/be_main.c
@@ -273,6 +273,10 @@ static int be_mac_addr_set(struct net_device *netdev, void *p)
273 if (ether_addr_equal(addr->sa_data, netdev->dev_addr)) 273 if (ether_addr_equal(addr->sa_data, netdev->dev_addr))
274 return 0; 274 return 0;
275 275
276 /* if device is not running, copy MAC to netdev->dev_addr */
277 if (!netif_running(netdev))
278 goto done;
279
276 /* The PMAC_ADD cmd may fail if the VF doesn't have FILTMGMT 280 /* The PMAC_ADD cmd may fail if the VF doesn't have FILTMGMT
277 * privilege or if PF did not provision the new MAC address. 281 * privilege or if PF did not provision the new MAC address.
278 * On BE3, this cmd will always fail if the VF doesn't have the 282 * On BE3, this cmd will always fail if the VF doesn't have the
@@ -307,9 +311,9 @@ static int be_mac_addr_set(struct net_device *netdev, void *p)
307 status = -EPERM; 311 status = -EPERM;
308 goto err; 312 goto err;
309 } 313 }
310 314done:
311 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); 315 ether_addr_copy(netdev->dev_addr, addr->sa_data);
312 dev_info(dev, "MAC address changed to %pM\n", mac); 316 dev_info(dev, "MAC address changed to %pM\n", addr->sa_data);
313 return 0; 317 return 0;
314err: 318err:
315 dev_warn(dev, "MAC address change to %pM failed\n", addr->sa_data); 319 dev_warn(dev, "MAC address change to %pM failed\n", addr->sa_data);
@@ -2447,10 +2451,24 @@ static void be_eq_clean(struct be_eq_obj *eqo)
2447 be_eq_notify(eqo->adapter, eqo->q.id, false, true, num, 0); 2451 be_eq_notify(eqo->adapter, eqo->q.id, false, true, num, 0);
2448} 2452}
2449 2453
2450static void be_rx_cq_clean(struct be_rx_obj *rxo) 2454/* Free posted rx buffers that were not used */
2455static void be_rxq_clean(struct be_rx_obj *rxo)
2451{ 2456{
2452 struct be_rx_page_info *page_info;
2453 struct be_queue_info *rxq = &rxo->q; 2457 struct be_queue_info *rxq = &rxo->q;
2458 struct be_rx_page_info *page_info;
2459
2460 while (atomic_read(&rxq->used) > 0) {
2461 page_info = get_rx_page_info(rxo);
2462 put_page(page_info->page);
2463 memset(page_info, 0, sizeof(*page_info));
2464 }
2465 BUG_ON(atomic_read(&rxq->used));
2466 rxq->tail = 0;
2467 rxq->head = 0;
2468}
2469
2470static void be_rx_cq_clean(struct be_rx_obj *rxo)
2471{
2454 struct be_queue_info *rx_cq = &rxo->cq; 2472 struct be_queue_info *rx_cq = &rxo->cq;
2455 struct be_rx_compl_info *rxcp; 2473 struct be_rx_compl_info *rxcp;
2456 struct be_adapter *adapter = rxo->adapter; 2474 struct be_adapter *adapter = rxo->adapter;
@@ -2487,16 +2505,6 @@ static void be_rx_cq_clean(struct be_rx_obj *rxo)
2487 2505
2488 /* After cleanup, leave the CQ in unarmed state */ 2506 /* After cleanup, leave the CQ in unarmed state */
2489 be_cq_notify(adapter, rx_cq->id, false, 0); 2507 be_cq_notify(adapter, rx_cq->id, false, 0);
2490
2491 /* Then free posted rx buffers that were not used */
2492 while (atomic_read(&rxq->used) > 0) {
2493 page_info = get_rx_page_info(rxo);
2494 put_page(page_info->page);
2495 memset(page_info, 0, sizeof(*page_info));
2496 }
2497 BUG_ON(atomic_read(&rxq->used));
2498 rxq->tail = 0;
2499 rxq->head = 0;
2500} 2508}
2501 2509
2502static void be_tx_compl_clean(struct be_adapter *adapter) 2510static void be_tx_compl_clean(struct be_adapter *adapter)
@@ -2576,8 +2584,8 @@ static void be_evt_queues_destroy(struct be_adapter *adapter)
2576 be_cmd_q_destroy(adapter, &eqo->q, QTYPE_EQ); 2584 be_cmd_q_destroy(adapter, &eqo->q, QTYPE_EQ);
2577 napi_hash_del(&eqo->napi); 2585 napi_hash_del(&eqo->napi);
2578 netif_napi_del(&eqo->napi); 2586 netif_napi_del(&eqo->napi);
2587 free_cpumask_var(eqo->affinity_mask);
2579 } 2588 }
2580 free_cpumask_var(eqo->affinity_mask);
2581 be_queue_free(adapter, &eqo->q); 2589 be_queue_free(adapter, &eqo->q);
2582 } 2590 }
2583} 2591}
@@ -2594,13 +2602,7 @@ static int be_evt_queues_create(struct be_adapter *adapter)
2594 2602
2595 for_all_evt_queues(adapter, eqo, i) { 2603 for_all_evt_queues(adapter, eqo, i) {
2596 int numa_node = dev_to_node(&adapter->pdev->dev); 2604 int numa_node = dev_to_node(&adapter->pdev->dev);
2597 if (!zalloc_cpumask_var(&eqo->affinity_mask, GFP_KERNEL)) 2605
2598 return -ENOMEM;
2599 cpumask_set_cpu(cpumask_local_spread(i, numa_node),
2600 eqo->affinity_mask);
2601 netif_napi_add(adapter->netdev, &eqo->napi, be_poll,
2602 BE_NAPI_WEIGHT);
2603 napi_hash_add(&eqo->napi);
2604 aic = &adapter->aic_obj[i]; 2606 aic = &adapter->aic_obj[i];
2605 eqo->adapter = adapter; 2607 eqo->adapter = adapter;
2606 eqo->idx = i; 2608 eqo->idx = i;
@@ -2616,6 +2618,14 @@ static int be_evt_queues_create(struct be_adapter *adapter)
2616 rc = be_cmd_eq_create(adapter, eqo); 2618 rc = be_cmd_eq_create(adapter, eqo);
2617 if (rc) 2619 if (rc)
2618 return rc; 2620 return rc;
2621
2622 if (!zalloc_cpumask_var(&eqo->affinity_mask, GFP_KERNEL))
2623 return -ENOMEM;
2624 cpumask_set_cpu(cpumask_local_spread(i, numa_node),
2625 eqo->affinity_mask);
2626 netif_napi_add(adapter->netdev, &eqo->napi, be_poll,
2627 BE_NAPI_WEIGHT);
2628 napi_hash_add(&eqo->napi);
2619 } 2629 }
2620 return 0; 2630 return 0;
2621} 2631}
@@ -3354,13 +3364,54 @@ static void be_rx_qs_destroy(struct be_adapter *adapter)
3354 for_all_rx_queues(adapter, rxo, i) { 3364 for_all_rx_queues(adapter, rxo, i) {
3355 q = &rxo->q; 3365 q = &rxo->q;
3356 if (q->created) { 3366 if (q->created) {
3367 /* If RXQs are destroyed while in an "out of buffer"
3368 * state, there is a possibility of an HW stall on
3369 * Lancer. So, post 64 buffers to each queue to relieve
3370 * the "out of buffer" condition.
3371 * Make sure there's space in the RXQ before posting.
3372 */
3373 if (lancer_chip(adapter)) {
3374 be_rx_cq_clean(rxo);
3375 if (atomic_read(&q->used) == 0)
3376 be_post_rx_frags(rxo, GFP_KERNEL,
3377 MAX_RX_POST);
3378 }
3379
3357 be_cmd_rxq_destroy(adapter, q); 3380 be_cmd_rxq_destroy(adapter, q);
3358 be_rx_cq_clean(rxo); 3381 be_rx_cq_clean(rxo);
3382 be_rxq_clean(rxo);
3359 } 3383 }
3360 be_queue_free(adapter, q); 3384 be_queue_free(adapter, q);
3361 } 3385 }
3362} 3386}
3363 3387
3388static void be_disable_if_filters(struct be_adapter *adapter)
3389{
3390 be_cmd_pmac_del(adapter, adapter->if_handle,
3391 adapter->pmac_id[0], 0);
3392
3393 be_clear_uc_list(adapter);
3394
3395 /* The IFACE flags are enabled in the open path and cleared
3396 * in the close path. When a VF gets detached from the host and
3397 * assigned to a VM the following happens:
3398 * - VF's IFACE flags get cleared in the detach path
3399 * - IFACE create is issued by the VF in the attach path
3400 * Due to a bug in the BE3/Skyhawk-R FW
3401 * (Lancer FW doesn't have the bug), the IFACE capability flags
3402 * specified along with the IFACE create cmd issued by a VF are not
3403 * honoured by FW. As a consequence, if a *new* driver
3404 * (that enables/disables IFACE flags in open/close)
3405 * is loaded in the host and an *old* driver is * used by a VM/VF,
3406 * the IFACE gets created *without* the needed flags.
3407 * To avoid this, disable RX-filter flags only for Lancer.
3408 */
3409 if (lancer_chip(adapter)) {
3410 be_cmd_rx_filter(adapter, BE_IF_ALL_FILT_FLAGS, OFF);
3411 adapter->if_flags &= ~BE_IF_ALL_FILT_FLAGS;
3412 }
3413}
3414
3364static int be_close(struct net_device *netdev) 3415static int be_close(struct net_device *netdev)
3365{ 3416{
3366 struct be_adapter *adapter = netdev_priv(netdev); 3417 struct be_adapter *adapter = netdev_priv(netdev);
@@ -3373,6 +3424,8 @@ static int be_close(struct net_device *netdev)
3373 if (!(adapter->flags & BE_FLAGS_SETUP_DONE)) 3424 if (!(adapter->flags & BE_FLAGS_SETUP_DONE))
3374 return 0; 3425 return 0;
3375 3426
3427 be_disable_if_filters(adapter);
3428
3376 be_roce_dev_close(adapter); 3429 be_roce_dev_close(adapter);
3377 3430
3378 if (adapter->flags & BE_FLAGS_NAPI_ENABLED) { 3431 if (adapter->flags & BE_FLAGS_NAPI_ENABLED) {
@@ -3392,7 +3445,6 @@ static int be_close(struct net_device *netdev)
3392 be_tx_compl_clean(adapter); 3445 be_tx_compl_clean(adapter);
3393 3446
3394 be_rx_qs_destroy(adapter); 3447 be_rx_qs_destroy(adapter);
3395 be_clear_uc_list(adapter);
3396 3448
3397 for_all_evt_queues(adapter, eqo, i) { 3449 for_all_evt_queues(adapter, eqo, i) {
3398 if (msix_enabled(adapter)) 3450 if (msix_enabled(adapter))
@@ -3477,6 +3529,31 @@ static int be_rx_qs_create(struct be_adapter *adapter)
3477 return 0; 3529 return 0;
3478} 3530}
3479 3531
3532static int be_enable_if_filters(struct be_adapter *adapter)
3533{
3534 int status;
3535
3536 status = be_cmd_rx_filter(adapter, BE_IF_EN_FLAGS, ON);
3537 if (status)
3538 return status;
3539
3540 /* For BE3 VFs, the PF programs the initial MAC address */
3541 if (!(BEx_chip(adapter) && be_virtfn(adapter))) {
3542 status = be_cmd_pmac_add(adapter, adapter->netdev->dev_addr,
3543 adapter->if_handle,
3544 &adapter->pmac_id[0], 0);
3545 if (status)
3546 return status;
3547 }
3548
3549 if (adapter->vlans_added)
3550 be_vid_config(adapter);
3551
3552 be_set_rx_mode(adapter->netdev);
3553
3554 return 0;
3555}
3556
3480static int be_open(struct net_device *netdev) 3557static int be_open(struct net_device *netdev)
3481{ 3558{
3482 struct be_adapter *adapter = netdev_priv(netdev); 3559 struct be_adapter *adapter = netdev_priv(netdev);
@@ -3490,6 +3567,10 @@ static int be_open(struct net_device *netdev)
3490 if (status) 3567 if (status)
3491 goto err; 3568 goto err;
3492 3569
3570 status = be_enable_if_filters(adapter);
3571 if (status)
3572 goto err;
3573
3493 status = be_irq_register(adapter); 3574 status = be_irq_register(adapter);
3494 if (status) 3575 if (status)
3495 goto err; 3576 goto err;
@@ -3686,16 +3767,6 @@ static void be_cancel_err_detection(struct be_adapter *adapter)
3686 } 3767 }
3687} 3768}
3688 3769
3689static void be_mac_clear(struct be_adapter *adapter)
3690{
3691 if (adapter->pmac_id) {
3692 be_cmd_pmac_del(adapter, adapter->if_handle,
3693 adapter->pmac_id[0], 0);
3694 kfree(adapter->pmac_id);
3695 adapter->pmac_id = NULL;
3696 }
3697}
3698
3699#ifdef CONFIG_BE2NET_VXLAN 3770#ifdef CONFIG_BE2NET_VXLAN
3700static void be_disable_vxlan_offloads(struct be_adapter *adapter) 3771static void be_disable_vxlan_offloads(struct be_adapter *adapter)
3701{ 3772{
@@ -3770,8 +3841,8 @@ static int be_clear(struct be_adapter *adapter)
3770#ifdef CONFIG_BE2NET_VXLAN 3841#ifdef CONFIG_BE2NET_VXLAN
3771 be_disable_vxlan_offloads(adapter); 3842 be_disable_vxlan_offloads(adapter);
3772#endif 3843#endif
3773 /* delete the primary mac along with the uc-mac list */ 3844 kfree(adapter->pmac_id);
3774 be_mac_clear(adapter); 3845 adapter->pmac_id = NULL;
3775 3846
3776 be_cmd_if_destroy(adapter, adapter->if_handle, 0); 3847 be_cmd_if_destroy(adapter, adapter->if_handle, 0);
3777 3848
@@ -3782,25 +3853,11 @@ static int be_clear(struct be_adapter *adapter)
3782 return 0; 3853 return 0;
3783} 3854}
3784 3855
3785static int be_if_create(struct be_adapter *adapter, u32 *if_handle,
3786 u32 cap_flags, u32 vf)
3787{
3788 u32 en_flags;
3789
3790 en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
3791 BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_PASS_L3L4_ERRORS |
3792 BE_IF_FLAGS_RSS | BE_IF_FLAGS_DEFQ_RSS;
3793
3794 en_flags &= cap_flags;
3795
3796 return be_cmd_if_create(adapter, cap_flags, en_flags, if_handle, vf);
3797}
3798
3799static int be_vfs_if_create(struct be_adapter *adapter) 3856static int be_vfs_if_create(struct be_adapter *adapter)
3800{ 3857{
3801 struct be_resources res = {0}; 3858 struct be_resources res = {0};
3859 u32 cap_flags, en_flags, vf;
3802 struct be_vf_cfg *vf_cfg; 3860 struct be_vf_cfg *vf_cfg;
3803 u32 cap_flags, vf;
3804 int status; 3861 int status;
3805 3862
3806 /* If a FW profile exists, then cap_flags are updated */ 3863 /* If a FW profile exists, then cap_flags are updated */
@@ -3821,8 +3878,12 @@ static int be_vfs_if_create(struct be_adapter *adapter)
3821 } 3878 }
3822 } 3879 }
3823 3880
3824 status = be_if_create(adapter, &vf_cfg->if_handle, 3881 en_flags = cap_flags & (BE_IF_FLAGS_UNTAGGED |
3825 cap_flags, vf + 1); 3882 BE_IF_FLAGS_BROADCAST |
3883 BE_IF_FLAGS_MULTICAST |
3884 BE_IF_FLAGS_PASS_L3L4_ERRORS);
3885 status = be_cmd_if_create(adapter, cap_flags, en_flags,
3886 &vf_cfg->if_handle, vf + 1);
3826 if (status) 3887 if (status)
3827 return status; 3888 return status;
3828 } 3889 }
@@ -4194,15 +4255,8 @@ static int be_mac_setup(struct be_adapter *adapter)
4194 4255
4195 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN); 4256 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
4196 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN); 4257 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
4197 } else {
4198 /* Maybe the HW was reset; dev_addr must be re-programmed */
4199 memcpy(mac, adapter->netdev->dev_addr, ETH_ALEN);
4200 } 4258 }
4201 4259
4202 /* For BE3-R VFs, the PF programs the initial MAC address */
4203 if (!(BEx_chip(adapter) && be_virtfn(adapter)))
4204 be_cmd_pmac_add(adapter, mac, adapter->if_handle,
4205 &adapter->pmac_id[0], 0);
4206 return 0; 4260 return 0;
4207} 4261}
4208 4262
@@ -4342,6 +4396,7 @@ static int be_func_init(struct be_adapter *adapter)
4342static int be_setup(struct be_adapter *adapter) 4396static int be_setup(struct be_adapter *adapter)
4343{ 4397{
4344 struct device *dev = &adapter->pdev->dev; 4398 struct device *dev = &adapter->pdev->dev;
4399 u32 en_flags;
4345 int status; 4400 int status;
4346 4401
4347 status = be_func_init(adapter); 4402 status = be_func_init(adapter);
@@ -4364,8 +4419,11 @@ static int be_setup(struct be_adapter *adapter)
4364 if (status) 4419 if (status)
4365 goto err; 4420 goto err;
4366 4421
4367 status = be_if_create(adapter, &adapter->if_handle, 4422 /* will enable all the needed filter flags in be_open() */
4368 be_if_cap_flags(adapter), 0); 4423 en_flags = BE_IF_FLAGS_RSS | BE_IF_FLAGS_DEFQ_RSS;
4424 en_flags = en_flags & be_if_cap_flags(adapter);
4425 status = be_cmd_if_create(adapter, be_if_cap_flags(adapter), en_flags,
4426 &adapter->if_handle, 0);
4369 if (status) 4427 if (status)
4370 goto err; 4428 goto err;
4371 4429
@@ -4391,11 +4449,6 @@ static int be_setup(struct be_adapter *adapter)
4391 dev_err(dev, "Please upgrade firmware to version >= 4.0\n"); 4449 dev_err(dev, "Please upgrade firmware to version >= 4.0\n");
4392 } 4450 }
4393 4451
4394 if (adapter->vlans_added)
4395 be_vid_config(adapter);
4396
4397 be_set_rx_mode(adapter->netdev);
4398
4399 status = be_cmd_set_flow_control(adapter, adapter->tx_fc, 4452 status = be_cmd_set_flow_control(adapter, adapter->tx_fc,
4400 adapter->rx_fc); 4453 adapter->rx_fc);
4401 if (status) 4454 if (status)
diff --git a/drivers/net/ethernet/freescale/fec.h b/drivers/net/ethernet/freescale/fec.h
index 1eee73cccdf5..99d33e2d35e6 100644
--- a/drivers/net/ethernet/freescale/fec.h
+++ b/drivers/net/ethernet/freescale/fec.h
@@ -562,6 +562,7 @@ struct fec_enet_private {
562}; 562};
563 563
564void fec_ptp_init(struct platform_device *pdev); 564void fec_ptp_init(struct platform_device *pdev);
565void fec_ptp_stop(struct platform_device *pdev);
565void fec_ptp_start_cyclecounter(struct net_device *ndev); 566void fec_ptp_start_cyclecounter(struct net_device *ndev);
566int fec_ptp_set(struct net_device *ndev, struct ifreq *ifr); 567int fec_ptp_set(struct net_device *ndev, struct ifreq *ifr);
567int fec_ptp_get(struct net_device *ndev, struct ifreq *ifr); 568int fec_ptp_get(struct net_device *ndev, struct ifreq *ifr);
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
index 42e20e5385ac..271bb5862346 100644
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -3142,8 +3142,8 @@ static int fec_enet_init(struct net_device *ndev)
3142 fep->bufdesc_size; 3142 fep->bufdesc_size;
3143 3143
3144 /* Allocate memory for buffer descriptors. */ 3144 /* Allocate memory for buffer descriptors. */
3145 cbd_base = dma_alloc_coherent(NULL, bd_size, &bd_dma, 3145 cbd_base = dmam_alloc_coherent(&fep->pdev->dev, bd_size, &bd_dma,
3146 GFP_KERNEL); 3146 GFP_KERNEL);
3147 if (!cbd_base) { 3147 if (!cbd_base) {
3148 return -ENOMEM; 3148 return -ENOMEM;
3149 } 3149 }
@@ -3431,6 +3431,12 @@ fec_probe(struct platform_device *pdev)
3431 fep->reg_phy = NULL; 3431 fep->reg_phy = NULL;
3432 } 3432 }
3433 3433
3434 pm_runtime_set_autosuspend_delay(&pdev->dev, FEC_MDIO_PM_TIMEOUT);
3435 pm_runtime_use_autosuspend(&pdev->dev);
3436 pm_runtime_get_noresume(&pdev->dev);
3437 pm_runtime_set_active(&pdev->dev);
3438 pm_runtime_enable(&pdev->dev);
3439
3434 fec_reset_phy(pdev); 3440 fec_reset_phy(pdev);
3435 3441
3436 if (fep->bufdesc_ex) 3442 if (fep->bufdesc_ex)
@@ -3465,8 +3471,6 @@ fec_probe(struct platform_device *pdev)
3465 netif_carrier_off(ndev); 3471 netif_carrier_off(ndev);
3466 fec_enet_clk_enable(ndev, false); 3472 fec_enet_clk_enable(ndev, false);
3467 pinctrl_pm_select_sleep_state(&pdev->dev); 3473 pinctrl_pm_select_sleep_state(&pdev->dev);
3468 pm_runtime_set_active(&pdev->dev);
3469 pm_runtime_enable(&pdev->dev);
3470 3474
3471 ret = register_netdev(ndev); 3475 ret = register_netdev(ndev);
3472 if (ret) 3476 if (ret)
@@ -3481,8 +3485,6 @@ fec_probe(struct platform_device *pdev)
3481 fep->rx_copybreak = COPYBREAK_DEFAULT; 3485 fep->rx_copybreak = COPYBREAK_DEFAULT;
3482 INIT_WORK(&fep->tx_timeout_work, fec_enet_timeout_work); 3486 INIT_WORK(&fep->tx_timeout_work, fec_enet_timeout_work);
3483 3487
3484 pm_runtime_set_autosuspend_delay(&pdev->dev, FEC_MDIO_PM_TIMEOUT);
3485 pm_runtime_use_autosuspend(&pdev->dev);
3486 pm_runtime_mark_last_busy(&pdev->dev); 3488 pm_runtime_mark_last_busy(&pdev->dev);
3487 pm_runtime_put_autosuspend(&pdev->dev); 3489 pm_runtime_put_autosuspend(&pdev->dev);
3488 3490
@@ -3493,6 +3495,7 @@ failed_register:
3493failed_mii_init: 3495failed_mii_init:
3494failed_irq: 3496failed_irq:
3495failed_init: 3497failed_init:
3498 fec_ptp_stop(pdev);
3496 if (fep->reg_phy) 3499 if (fep->reg_phy)
3497 regulator_disable(fep->reg_phy); 3500 regulator_disable(fep->reg_phy);
3498failed_regulator: 3501failed_regulator:
@@ -3514,14 +3517,12 @@ fec_drv_remove(struct platform_device *pdev)
3514 struct net_device *ndev = platform_get_drvdata(pdev); 3517 struct net_device *ndev = platform_get_drvdata(pdev);
3515 struct fec_enet_private *fep = netdev_priv(ndev); 3518 struct fec_enet_private *fep = netdev_priv(ndev);
3516 3519
3517 cancel_delayed_work_sync(&fep->time_keep);
3518 cancel_work_sync(&fep->tx_timeout_work); 3520 cancel_work_sync(&fep->tx_timeout_work);
3521 fec_ptp_stop(pdev);
3519 unregister_netdev(ndev); 3522 unregister_netdev(ndev);
3520 fec_enet_mii_remove(fep); 3523 fec_enet_mii_remove(fep);
3521 if (fep->reg_phy) 3524 if (fep->reg_phy)
3522 regulator_disable(fep->reg_phy); 3525 regulator_disable(fep->reg_phy);
3523 if (fep->ptp_clock)
3524 ptp_clock_unregister(fep->ptp_clock);
3525 of_node_put(fep->phy_node); 3526 of_node_put(fep->phy_node);
3526 free_netdev(ndev); 3527 free_netdev(ndev);
3527 3528
diff --git a/drivers/net/ethernet/freescale/fec_ptp.c b/drivers/net/ethernet/freescale/fec_ptp.c
index a15663ad7f5e..f457a23d0bfb 100644
--- a/drivers/net/ethernet/freescale/fec_ptp.c
+++ b/drivers/net/ethernet/freescale/fec_ptp.c
@@ -604,6 +604,16 @@ void fec_ptp_init(struct platform_device *pdev)
604 schedule_delayed_work(&fep->time_keep, HZ); 604 schedule_delayed_work(&fep->time_keep, HZ);
605} 605}
606 606
607void fec_ptp_stop(struct platform_device *pdev)
608{
609 struct net_device *ndev = platform_get_drvdata(pdev);
610 struct fec_enet_private *fep = netdev_priv(ndev);
611
612 cancel_delayed_work_sync(&fep->time_keep);
613 if (fep->ptp_clock)
614 ptp_clock_unregister(fep->ptp_clock);
615}
616
607/** 617/**
608 * fec_ptp_check_pps_event 618 * fec_ptp_check_pps_event
609 * @fep: the fec_enet_private structure handle 619 * @fep: the fec_enet_private structure handle
diff --git a/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c b/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
index 56316db6c5a6..cf8e54652df9 100644
--- a/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
+++ b/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
@@ -586,7 +586,8 @@ static int fs_enet_start_xmit(struct sk_buff *skb, struct net_device *dev)
586 frag = skb_shinfo(skb)->frags; 586 frag = skb_shinfo(skb)->frags;
587 while (nr_frags) { 587 while (nr_frags) {
588 CBDC_SC(bdp, 588 CBDC_SC(bdp,
589 BD_ENET_TX_STATS | BD_ENET_TX_LAST | BD_ENET_TX_TC); 589 BD_ENET_TX_STATS | BD_ENET_TX_INTR | BD_ENET_TX_LAST |
590 BD_ENET_TX_TC);
590 CBDS_SC(bdp, BD_ENET_TX_READY); 591 CBDS_SC(bdp, BD_ENET_TX_READY);
591 592
592 if ((CBDR_SC(bdp) & BD_ENET_TX_WRAP) == 0) 593 if ((CBDR_SC(bdp) & BD_ENET_TX_WRAP) == 0)
diff --git a/drivers/net/ethernet/freescale/fs_enet/mac-fec.c b/drivers/net/ethernet/freescale/fs_enet/mac-fec.c
index b34214e2df5f..016743e355de 100644
--- a/drivers/net/ethernet/freescale/fs_enet/mac-fec.c
+++ b/drivers/net/ethernet/freescale/fs_enet/mac-fec.c
@@ -110,7 +110,7 @@ static int do_pd_setup(struct fs_enet_private *fep)
110} 110}
111 111
112#define FEC_NAPI_RX_EVENT_MSK (FEC_ENET_RXF | FEC_ENET_RXB) 112#define FEC_NAPI_RX_EVENT_MSK (FEC_ENET_RXF | FEC_ENET_RXB)
113#define FEC_NAPI_TX_EVENT_MSK (FEC_ENET_TXF | FEC_ENET_TXB) 113#define FEC_NAPI_TX_EVENT_MSK (FEC_ENET_TXF)
114#define FEC_RX_EVENT (FEC_ENET_RXF) 114#define FEC_RX_EVENT (FEC_ENET_RXF)
115#define FEC_TX_EVENT (FEC_ENET_TXF) 115#define FEC_TX_EVENT (FEC_ENET_TXF)
116#define FEC_ERR_EVENT_MSK (FEC_ENET_HBERR | FEC_ENET_BABR | \ 116#define FEC_ERR_EVENT_MSK (FEC_ENET_HBERR | FEC_ENET_BABR | \
diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c
index ff875028fdff..2b7610f341b0 100644
--- a/drivers/net/ethernet/freescale/gianfar.c
+++ b/drivers/net/ethernet/freescale/gianfar.c
@@ -565,22 +565,6 @@ static void gfar_ints_enable(struct gfar_private *priv)
565 } 565 }
566} 566}
567 567
568static void lock_tx_qs(struct gfar_private *priv)
569{
570 int i;
571
572 for (i = 0; i < priv->num_tx_queues; i++)
573 spin_lock(&priv->tx_queue[i]->txlock);
574}
575
576static void unlock_tx_qs(struct gfar_private *priv)
577{
578 int i;
579
580 for (i = 0; i < priv->num_tx_queues; i++)
581 spin_unlock(&priv->tx_queue[i]->txlock);
582}
583
584static int gfar_alloc_tx_queues(struct gfar_private *priv) 568static int gfar_alloc_tx_queues(struct gfar_private *priv)
585{ 569{
586 int i; 570 int i;
@@ -1376,7 +1360,6 @@ static int gfar_probe(struct platform_device *ofdev)
1376 priv->dev = &ofdev->dev; 1360 priv->dev = &ofdev->dev;
1377 SET_NETDEV_DEV(dev, &ofdev->dev); 1361 SET_NETDEV_DEV(dev, &ofdev->dev);
1378 1362
1379 spin_lock_init(&priv->bflock);
1380 INIT_WORK(&priv->reset_task, gfar_reset_task); 1363 INIT_WORK(&priv->reset_task, gfar_reset_task);
1381 1364
1382 platform_set_drvdata(ofdev, priv); 1365 platform_set_drvdata(ofdev, priv);
@@ -1470,9 +1453,8 @@ static int gfar_probe(struct platform_device *ofdev)
1470 goto register_fail; 1453 goto register_fail;
1471 } 1454 }
1472 1455
1473 device_init_wakeup(&dev->dev, 1456 device_set_wakeup_capable(&dev->dev, priv->device_flags &
1474 priv->device_flags & 1457 FSL_GIANFAR_DEV_HAS_MAGIC_PACKET);
1475 FSL_GIANFAR_DEV_HAS_MAGIC_PACKET);
1476 1458
1477 /* fill out IRQ number and name fields */ 1459 /* fill out IRQ number and name fields */
1478 for (i = 0; i < priv->num_grps; i++) { 1460 for (i = 0; i < priv->num_grps; i++) {
@@ -1540,48 +1522,37 @@ static int gfar_suspend(struct device *dev)
1540 struct gfar_private *priv = dev_get_drvdata(dev); 1522 struct gfar_private *priv = dev_get_drvdata(dev);
1541 struct net_device *ndev = priv->ndev; 1523 struct net_device *ndev = priv->ndev;
1542 struct gfar __iomem *regs = priv->gfargrp[0].regs; 1524 struct gfar __iomem *regs = priv->gfargrp[0].regs;
1543 unsigned long flags;
1544 u32 tempval; 1525 u32 tempval;
1545
1546 int magic_packet = priv->wol_en && 1526 int magic_packet = priv->wol_en &&
1547 (priv->device_flags & 1527 (priv->device_flags &
1548 FSL_GIANFAR_DEV_HAS_MAGIC_PACKET); 1528 FSL_GIANFAR_DEV_HAS_MAGIC_PACKET);
1549 1529
1530 if (!netif_running(ndev))
1531 return 0;
1532
1533 disable_napi(priv);
1534 netif_tx_lock(ndev);
1550 netif_device_detach(ndev); 1535 netif_device_detach(ndev);
1536 netif_tx_unlock(ndev);
1551 1537
1552 if (netif_running(ndev)) { 1538 gfar_halt(priv);
1553 1539
1554 local_irq_save(flags); 1540 if (magic_packet) {
1555 lock_tx_qs(priv); 1541 /* Enable interrupt on Magic Packet */
1542 gfar_write(&regs->imask, IMASK_MAG);
1556 1543
1557 gfar_halt_nodisable(priv); 1544 /* Enable Magic Packet mode */
1545 tempval = gfar_read(&regs->maccfg2);
1546 tempval |= MACCFG2_MPEN;
1547 gfar_write(&regs->maccfg2, tempval);
1558 1548
1559 /* Disable Tx, and Rx if wake-on-LAN is disabled. */ 1549 /* re-enable the Rx block */
1560 tempval = gfar_read(&regs->maccfg1); 1550 tempval = gfar_read(&regs->maccfg1);
1561 1551 tempval |= MACCFG1_RX_EN;
1562 tempval &= ~MACCFG1_TX_EN;
1563
1564 if (!magic_packet)
1565 tempval &= ~MACCFG1_RX_EN;
1566
1567 gfar_write(&regs->maccfg1, tempval); 1552 gfar_write(&regs->maccfg1, tempval);
1568 1553
1569 unlock_tx_qs(priv); 1554 } else {
1570 local_irq_restore(flags); 1555 phy_stop(priv->phydev);
1571
1572 disable_napi(priv);
1573
1574 if (magic_packet) {
1575 /* Enable interrupt on Magic Packet */
1576 gfar_write(&regs->imask, IMASK_MAG);
1577
1578 /* Enable Magic Packet mode */
1579 tempval = gfar_read(&regs->maccfg2);
1580 tempval |= MACCFG2_MPEN;
1581 gfar_write(&regs->maccfg2, tempval);
1582 } else {
1583 phy_stop(priv->phydev);
1584 }
1585 } 1556 }
1586 1557
1587 return 0; 1558 return 0;
@@ -1592,37 +1563,26 @@ static int gfar_resume(struct device *dev)
1592 struct gfar_private *priv = dev_get_drvdata(dev); 1563 struct gfar_private *priv = dev_get_drvdata(dev);
1593 struct net_device *ndev = priv->ndev; 1564 struct net_device *ndev = priv->ndev;
1594 struct gfar __iomem *regs = priv->gfargrp[0].regs; 1565 struct gfar __iomem *regs = priv->gfargrp[0].regs;
1595 unsigned long flags;
1596 u32 tempval; 1566 u32 tempval;
1597 int magic_packet = priv->wol_en && 1567 int magic_packet = priv->wol_en &&
1598 (priv->device_flags & 1568 (priv->device_flags &
1599 FSL_GIANFAR_DEV_HAS_MAGIC_PACKET); 1569 FSL_GIANFAR_DEV_HAS_MAGIC_PACKET);
1600 1570
1601 if (!netif_running(ndev)) { 1571 if (!netif_running(ndev))
1602 netif_device_attach(ndev);
1603 return 0; 1572 return 0;
1604 }
1605 1573
1606 if (!magic_packet && priv->phydev) 1574 if (magic_packet) {
1575 /* Disable Magic Packet mode */
1576 tempval = gfar_read(&regs->maccfg2);
1577 tempval &= ~MACCFG2_MPEN;
1578 gfar_write(&regs->maccfg2, tempval);
1579 } else {
1607 phy_start(priv->phydev); 1580 phy_start(priv->phydev);
1608 1581 }
1609 /* Disable Magic Packet mode, in case something
1610 * else woke us up.
1611 */
1612 local_irq_save(flags);
1613 lock_tx_qs(priv);
1614
1615 tempval = gfar_read(&regs->maccfg2);
1616 tempval &= ~MACCFG2_MPEN;
1617 gfar_write(&regs->maccfg2, tempval);
1618 1582
1619 gfar_start(priv); 1583 gfar_start(priv);
1620 1584
1621 unlock_tx_qs(priv);
1622 local_irq_restore(flags);
1623
1624 netif_device_attach(ndev); 1585 netif_device_attach(ndev);
1625
1626 enable_napi(priv); 1586 enable_napi(priv);
1627 1587
1628 return 0; 1588 return 0;
@@ -2045,7 +2005,8 @@ static int register_grp_irqs(struct gfar_priv_grp *grp)
2045 /* Install our interrupt handlers for Error, 2005 /* Install our interrupt handlers for Error,
2046 * Transmit, and Receive 2006 * Transmit, and Receive
2047 */ 2007 */
2048 err = request_irq(gfar_irq(grp, ER)->irq, gfar_error, 0, 2008 err = request_irq(gfar_irq(grp, ER)->irq, gfar_error,
2009 IRQF_NO_SUSPEND,
2049 gfar_irq(grp, ER)->name, grp); 2010 gfar_irq(grp, ER)->name, grp);
2050 if (err < 0) { 2011 if (err < 0) {
2051 netif_err(priv, intr, dev, "Can't get IRQ %d\n", 2012 netif_err(priv, intr, dev, "Can't get IRQ %d\n",
@@ -2068,7 +2029,8 @@ static int register_grp_irqs(struct gfar_priv_grp *grp)
2068 goto rx_irq_fail; 2029 goto rx_irq_fail;
2069 } 2030 }
2070 } else { 2031 } else {
2071 err = request_irq(gfar_irq(grp, TX)->irq, gfar_interrupt, 0, 2032 err = request_irq(gfar_irq(grp, TX)->irq, gfar_interrupt,
2033 IRQF_NO_SUSPEND,
2072 gfar_irq(grp, TX)->name, grp); 2034 gfar_irq(grp, TX)->name, grp);
2073 if (err < 0) { 2035 if (err < 0) {
2074 netif_err(priv, intr, dev, "Can't get IRQ %d\n", 2036 netif_err(priv, intr, dev, "Can't get IRQ %d\n",
@@ -2169,8 +2131,6 @@ static int gfar_enet_open(struct net_device *dev)
2169 if (err) 2131 if (err)
2170 return err; 2132 return err;
2171 2133
2172 device_set_wakeup_enable(&dev->dev, priv->wol_en);
2173
2174 return err; 2134 return err;
2175} 2135}
2176 2136
diff --git a/drivers/net/ethernet/freescale/gianfar.h b/drivers/net/ethernet/freescale/gianfar.h
index daa1d37de642..5545e4103368 100644
--- a/drivers/net/ethernet/freescale/gianfar.h
+++ b/drivers/net/ethernet/freescale/gianfar.h
@@ -1145,9 +1145,6 @@ struct gfar_private {
1145 int oldduplex; 1145 int oldduplex;
1146 int oldlink; 1146 int oldlink;
1147 1147
1148 /* Bitfield update lock */
1149 spinlock_t bflock;
1150
1151 uint32_t msg_enable; 1148 uint32_t msg_enable;
1152 1149
1153 struct work_struct reset_task; 1150 struct work_struct reset_task;
diff --git a/drivers/net/ethernet/freescale/gianfar_ethtool.c b/drivers/net/ethernet/freescale/gianfar_ethtool.c
index fda12fb32ec7..5b90fcf96265 100644
--- a/drivers/net/ethernet/freescale/gianfar_ethtool.c
+++ b/drivers/net/ethernet/freescale/gianfar_ethtool.c
@@ -653,7 +653,6 @@ static void gfar_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
653static int gfar_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol) 653static int gfar_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
654{ 654{
655 struct gfar_private *priv = netdev_priv(dev); 655 struct gfar_private *priv = netdev_priv(dev);
656 unsigned long flags;
657 656
658 if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET) && 657 if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET) &&
659 wol->wolopts != 0) 658 wol->wolopts != 0)
@@ -664,9 +663,7 @@ static int gfar_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
664 663
665 device_set_wakeup_enable(&dev->dev, wol->wolopts & WAKE_MAGIC); 664 device_set_wakeup_enable(&dev->dev, wol->wolopts & WAKE_MAGIC);
666 665
667 spin_lock_irqsave(&priv->bflock, flags); 666 priv->wol_en = !!device_may_wakeup(&dev->dev);
668 priv->wol_en = !!device_may_wakeup(&dev->dev);
669 spin_unlock_irqrestore(&priv->bflock, flags);
670 667
671 return 0; 668 return 0;
672} 669}
@@ -903,27 +900,6 @@ static int gfar_check_filer_hardware(struct gfar_private *priv)
903 return 0; 900 return 0;
904} 901}
905 902
906static int gfar_comp_asc(const void *a, const void *b)
907{
908 return memcmp(a, b, 4);
909}
910
911static int gfar_comp_desc(const void *a, const void *b)
912{
913 return -memcmp(a, b, 4);
914}
915
916static void gfar_swap(void *a, void *b, int size)
917{
918 u32 *_a = a;
919 u32 *_b = b;
920
921 swap(_a[0], _b[0]);
922 swap(_a[1], _b[1]);
923 swap(_a[2], _b[2]);
924 swap(_a[3], _b[3]);
925}
926
927/* Write a mask to filer cache */ 903/* Write a mask to filer cache */
928static void gfar_set_mask(u32 mask, struct filer_table *tab) 904static void gfar_set_mask(u32 mask, struct filer_table *tab)
929{ 905{
@@ -1273,310 +1249,6 @@ static int gfar_convert_to_filer(struct ethtool_rx_flow_spec *rule,
1273 return 0; 1249 return 0;
1274} 1250}
1275 1251
1276/* Copy size filer entries */
1277static void gfar_copy_filer_entries(struct gfar_filer_entry dst[0],
1278 struct gfar_filer_entry src[0], s32 size)
1279{
1280 while (size > 0) {
1281 size--;
1282 dst[size].ctrl = src[size].ctrl;
1283 dst[size].prop = src[size].prop;
1284 }
1285}
1286
1287/* Delete the contents of the filer-table between start and end
1288 * and collapse them
1289 */
1290static int gfar_trim_filer_entries(u32 begin, u32 end, struct filer_table *tab)
1291{
1292 int length;
1293
1294 if (end > MAX_FILER_CACHE_IDX || end < begin)
1295 return -EINVAL;
1296
1297 end++;
1298 length = end - begin;
1299
1300 /* Copy */
1301 while (end < tab->index) {
1302 tab->fe[begin].ctrl = tab->fe[end].ctrl;
1303 tab->fe[begin++].prop = tab->fe[end++].prop;
1304
1305 }
1306 /* Fill up with don't cares */
1307 while (begin < tab->index) {
1308 tab->fe[begin].ctrl = 0x60;
1309 tab->fe[begin].prop = 0xFFFFFFFF;
1310 begin++;
1311 }
1312
1313 tab->index -= length;
1314 return 0;
1315}
1316
1317/* Make space on the wanted location */
1318static int gfar_expand_filer_entries(u32 begin, u32 length,
1319 struct filer_table *tab)
1320{
1321 if (length == 0 || length + tab->index > MAX_FILER_CACHE_IDX ||
1322 begin > MAX_FILER_CACHE_IDX)
1323 return -EINVAL;
1324
1325 gfar_copy_filer_entries(&(tab->fe[begin + length]), &(tab->fe[begin]),
1326 tab->index - length + 1);
1327
1328 tab->index += length;
1329 return 0;
1330}
1331
1332static int gfar_get_next_cluster_start(int start, struct filer_table *tab)
1333{
1334 for (; (start < tab->index) && (start < MAX_FILER_CACHE_IDX - 1);
1335 start++) {
1336 if ((tab->fe[start].ctrl & (RQFCR_AND | RQFCR_CLE)) ==
1337 (RQFCR_AND | RQFCR_CLE))
1338 return start;
1339 }
1340 return -1;
1341}
1342
1343static int gfar_get_next_cluster_end(int start, struct filer_table *tab)
1344{
1345 for (; (start < tab->index) && (start < MAX_FILER_CACHE_IDX - 1);
1346 start++) {
1347 if ((tab->fe[start].ctrl & (RQFCR_AND | RQFCR_CLE)) ==
1348 (RQFCR_CLE))
1349 return start;
1350 }
1351 return -1;
1352}
1353
1354/* Uses hardwares clustering option to reduce
1355 * the number of filer table entries
1356 */
1357static void gfar_cluster_filer(struct filer_table *tab)
1358{
1359 s32 i = -1, j, iend, jend;
1360
1361 while ((i = gfar_get_next_cluster_start(++i, tab)) != -1) {
1362 j = i;
1363 while ((j = gfar_get_next_cluster_start(++j, tab)) != -1) {
1364 /* The cluster entries self and the previous one
1365 * (a mask) must be identical!
1366 */
1367 if (tab->fe[i].ctrl != tab->fe[j].ctrl)
1368 break;
1369 if (tab->fe[i].prop != tab->fe[j].prop)
1370 break;
1371 if (tab->fe[i - 1].ctrl != tab->fe[j - 1].ctrl)
1372 break;
1373 if (tab->fe[i - 1].prop != tab->fe[j - 1].prop)
1374 break;
1375 iend = gfar_get_next_cluster_end(i, tab);
1376 jend = gfar_get_next_cluster_end(j, tab);
1377 if (jend == -1 || iend == -1)
1378 break;
1379
1380 /* First we make some free space, where our cluster
1381 * element should be. Then we copy it there and finally
1382 * delete in from its old location.
1383 */
1384 if (gfar_expand_filer_entries(iend, (jend - j), tab) ==
1385 -EINVAL)
1386 break;
1387
1388 gfar_copy_filer_entries(&(tab->fe[iend + 1]),
1389 &(tab->fe[jend + 1]), jend - j);
1390
1391 if (gfar_trim_filer_entries(jend - 1,
1392 jend + (jend - j),
1393 tab) == -EINVAL)
1394 return;
1395
1396 /* Mask out cluster bit */
1397 tab->fe[iend].ctrl &= ~(RQFCR_CLE);
1398 }
1399 }
1400}
1401
1402/* Swaps the masked bits of a1<>a2 and b1<>b2 */
1403static void gfar_swap_bits(struct gfar_filer_entry *a1,
1404 struct gfar_filer_entry *a2,
1405 struct gfar_filer_entry *b1,
1406 struct gfar_filer_entry *b2, u32 mask)
1407{
1408 u32 temp[4];
1409 temp[0] = a1->ctrl & mask;
1410 temp[1] = a2->ctrl & mask;
1411 temp[2] = b1->ctrl & mask;
1412 temp[3] = b2->ctrl & mask;
1413
1414 a1->ctrl &= ~mask;
1415 a2->ctrl &= ~mask;
1416 b1->ctrl &= ~mask;
1417 b2->ctrl &= ~mask;
1418
1419 a1->ctrl |= temp[1];
1420 a2->ctrl |= temp[0];
1421 b1->ctrl |= temp[3];
1422 b2->ctrl |= temp[2];
1423}
1424
1425/* Generate a list consisting of masks values with their start and
1426 * end of validity and block as indicator for parts belonging
1427 * together (glued by ANDs) in mask_table
1428 */
1429static u32 gfar_generate_mask_table(struct gfar_mask_entry *mask_table,
1430 struct filer_table *tab)
1431{
1432 u32 i, and_index = 0, block_index = 1;
1433
1434 for (i = 0; i < tab->index; i++) {
1435
1436 /* LSByte of control = 0 sets a mask */
1437 if (!(tab->fe[i].ctrl & 0xF)) {
1438 mask_table[and_index].mask = tab->fe[i].prop;
1439 mask_table[and_index].start = i;
1440 mask_table[and_index].block = block_index;
1441 if (and_index >= 1)
1442 mask_table[and_index - 1].end = i - 1;
1443 and_index++;
1444 }
1445 /* cluster starts and ends will be separated because they should
1446 * hold their position
1447 */
1448 if (tab->fe[i].ctrl & RQFCR_CLE)
1449 block_index++;
1450 /* A not set AND indicates the end of a depended block */
1451 if (!(tab->fe[i].ctrl & RQFCR_AND))
1452 block_index++;
1453 }
1454
1455 mask_table[and_index - 1].end = i - 1;
1456
1457 return and_index;
1458}
1459
1460/* Sorts the entries of mask_table by the values of the masks.
1461 * Important: The 0xFF80 flags of the first and last entry of a
1462 * block must hold their position (which queue, CLusterEnable, ReJEct,
1463 * AND)
1464 */
1465static void gfar_sort_mask_table(struct gfar_mask_entry *mask_table,
1466 struct filer_table *temp_table, u32 and_index)
1467{
1468 /* Pointer to compare function (_asc or _desc) */
1469 int (*gfar_comp)(const void *, const void *);
1470
1471 u32 i, size = 0, start = 0, prev = 1;
1472 u32 old_first, old_last, new_first, new_last;
1473
1474 gfar_comp = &gfar_comp_desc;
1475
1476 for (i = 0; i < and_index; i++) {
1477 if (prev != mask_table[i].block) {
1478 old_first = mask_table[start].start + 1;
1479 old_last = mask_table[i - 1].end;
1480 sort(mask_table + start, size,
1481 sizeof(struct gfar_mask_entry),
1482 gfar_comp, &gfar_swap);
1483
1484 /* Toggle order for every block. This makes the
1485 * thing more efficient!
1486 */
1487 if (gfar_comp == gfar_comp_desc)
1488 gfar_comp = &gfar_comp_asc;
1489 else
1490 gfar_comp = &gfar_comp_desc;
1491
1492 new_first = mask_table[start].start + 1;
1493 new_last = mask_table[i - 1].end;
1494
1495 gfar_swap_bits(&temp_table->fe[new_first],
1496 &temp_table->fe[old_first],
1497 &temp_table->fe[new_last],
1498 &temp_table->fe[old_last],
1499 RQFCR_QUEUE | RQFCR_CLE |
1500 RQFCR_RJE | RQFCR_AND);
1501
1502 start = i;
1503 size = 0;
1504 }
1505 size++;
1506 prev = mask_table[i].block;
1507 }
1508}
1509
1510/* Reduces the number of masks needed in the filer table to save entries
1511 * This is done by sorting the masks of a depended block. A depended block is
1512 * identified by gluing ANDs or CLE. The sorting order toggles after every
1513 * block. Of course entries in scope of a mask must change their location with
1514 * it.
1515 */
1516static int gfar_optimize_filer_masks(struct filer_table *tab)
1517{
1518 struct filer_table *temp_table;
1519 struct gfar_mask_entry *mask_table;
1520
1521 u32 and_index = 0, previous_mask = 0, i = 0, j = 0, size = 0;
1522 s32 ret = 0;
1523
1524 /* We need a copy of the filer table because
1525 * we want to change its order
1526 */
1527 temp_table = kmemdup(tab, sizeof(*temp_table), GFP_KERNEL);
1528 if (temp_table == NULL)
1529 return -ENOMEM;
1530
1531 mask_table = kcalloc(MAX_FILER_CACHE_IDX / 2 + 1,
1532 sizeof(struct gfar_mask_entry), GFP_KERNEL);
1533
1534 if (mask_table == NULL) {
1535 ret = -ENOMEM;
1536 goto end;
1537 }
1538
1539 and_index = gfar_generate_mask_table(mask_table, tab);
1540
1541 gfar_sort_mask_table(mask_table, temp_table, and_index);
1542
1543 /* Now we can copy the data from our duplicated filer table to
1544 * the real one in the order the mask table says
1545 */
1546 for (i = 0; i < and_index; i++) {
1547 size = mask_table[i].end - mask_table[i].start + 1;
1548 gfar_copy_filer_entries(&(tab->fe[j]),
1549 &(temp_table->fe[mask_table[i].start]), size);
1550 j += size;
1551 }
1552
1553 /* And finally we just have to check for duplicated masks and drop the
1554 * second ones
1555 */
1556 for (i = 0; i < tab->index && i < MAX_FILER_CACHE_IDX; i++) {
1557 if (tab->fe[i].ctrl == 0x80) {
1558 previous_mask = i++;
1559 break;
1560 }
1561 }
1562 for (; i < tab->index && i < MAX_FILER_CACHE_IDX; i++) {
1563 if (tab->fe[i].ctrl == 0x80) {
1564 if (tab->fe[i].prop == tab->fe[previous_mask].prop) {
1565 /* Two identical ones found!
1566 * So drop the second one!
1567 */
1568 gfar_trim_filer_entries(i, i, tab);
1569 } else
1570 /* Not identical! */
1571 previous_mask = i;
1572 }
1573 }
1574
1575 kfree(mask_table);
1576end: kfree(temp_table);
1577 return ret;
1578}
1579
1580/* Write the bit-pattern from software's buffer to hardware registers */ 1252/* Write the bit-pattern from software's buffer to hardware registers */
1581static int gfar_write_filer_table(struct gfar_private *priv, 1253static int gfar_write_filer_table(struct gfar_private *priv,
1582 struct filer_table *tab) 1254 struct filer_table *tab)
@@ -1586,11 +1258,10 @@ static int gfar_write_filer_table(struct gfar_private *priv,
1586 return -EBUSY; 1258 return -EBUSY;
1587 1259
1588 /* Fill regular entries */ 1260 /* Fill regular entries */
1589 for (; i < MAX_FILER_IDX - 1 && (tab->fe[i].ctrl | tab->fe[i].prop); 1261 for (; i < MAX_FILER_IDX && (tab->fe[i].ctrl | tab->fe[i].prop); i++)
1590 i++)
1591 gfar_write_filer(priv, i, tab->fe[i].ctrl, tab->fe[i].prop); 1262 gfar_write_filer(priv, i, tab->fe[i].ctrl, tab->fe[i].prop);
1592 /* Fill the rest with fall-troughs */ 1263 /* Fill the rest with fall-troughs */
1593 for (; i < MAX_FILER_IDX - 1; i++) 1264 for (; i < MAX_FILER_IDX; i++)
1594 gfar_write_filer(priv, i, 0x60, 0xFFFFFFFF); 1265 gfar_write_filer(priv, i, 0x60, 0xFFFFFFFF);
1595 /* Last entry must be default accept 1266 /* Last entry must be default accept
1596 * because that's what people expect 1267 * because that's what people expect
@@ -1624,7 +1295,6 @@ static int gfar_process_filer_changes(struct gfar_private *priv)
1624{ 1295{
1625 struct ethtool_flow_spec_container *j; 1296 struct ethtool_flow_spec_container *j;
1626 struct filer_table *tab; 1297 struct filer_table *tab;
1627 s32 i = 0;
1628 s32 ret = 0; 1298 s32 ret = 0;
1629 1299
1630 /* So index is set to zero, too! */ 1300 /* So index is set to zero, too! */
@@ -1649,17 +1319,6 @@ static int gfar_process_filer_changes(struct gfar_private *priv)
1649 } 1319 }
1650 } 1320 }
1651 1321
1652 i = tab->index;
1653
1654 /* Optimizations to save entries */
1655 gfar_cluster_filer(tab);
1656 gfar_optimize_filer_masks(tab);
1657
1658 pr_debug("\tSummary:\n"
1659 "\tData on hardware: %d\n"
1660 "\tCompression rate: %d%%\n",
1661 tab->index, 100 - (100 * tab->index) / i);
1662
1663 /* Write everything to hardware */ 1322 /* Write everything to hardware */
1664 ret = gfar_write_filer_table(priv, tab); 1323 ret = gfar_write_filer_table(priv, tab);
1665 if (ret == -EBUSY) { 1324 if (ret == -EBUSY) {
@@ -1725,13 +1384,14 @@ static int gfar_add_cls(struct gfar_private *priv,
1725 } 1384 }
1726 1385
1727process: 1386process:
1387 priv->rx_list.count++;
1728 ret = gfar_process_filer_changes(priv); 1388 ret = gfar_process_filer_changes(priv);
1729 if (ret) 1389 if (ret)
1730 goto clean_list; 1390 goto clean_list;
1731 priv->rx_list.count++;
1732 return ret; 1391 return ret;
1733 1392
1734clean_list: 1393clean_list:
1394 priv->rx_list.count--;
1735 list_del(&temp->list); 1395 list_del(&temp->list);
1736clean_mem: 1396clean_mem:
1737 kfree(temp); 1397 kfree(temp);
diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
index 370e20ed224c..62e48bc0cb23 100644
--- a/drivers/net/ethernet/marvell/mvneta.c
+++ b/drivers/net/ethernet/marvell/mvneta.c
@@ -1462,7 +1462,7 @@ static int mvneta_rx(struct mvneta_port *pp, int rx_todo,
1462 struct mvneta_rx_queue *rxq) 1462 struct mvneta_rx_queue *rxq)
1463{ 1463{
1464 struct net_device *dev = pp->dev; 1464 struct net_device *dev = pp->dev;
1465 int rx_done, rx_filled; 1465 int rx_done;
1466 u32 rcvd_pkts = 0; 1466 u32 rcvd_pkts = 0;
1467 u32 rcvd_bytes = 0; 1467 u32 rcvd_bytes = 0;
1468 1468
@@ -1473,7 +1473,6 @@ static int mvneta_rx(struct mvneta_port *pp, int rx_todo,
1473 rx_todo = rx_done; 1473 rx_todo = rx_done;
1474 1474
1475 rx_done = 0; 1475 rx_done = 0;
1476 rx_filled = 0;
1477 1476
1478 /* Fairness NAPI loop */ 1477 /* Fairness NAPI loop */
1479 while (rx_done < rx_todo) { 1478 while (rx_done < rx_todo) {
@@ -1484,7 +1483,6 @@ static int mvneta_rx(struct mvneta_port *pp, int rx_todo,
1484 int rx_bytes, err; 1483 int rx_bytes, err;
1485 1484
1486 rx_done++; 1485 rx_done++;
1487 rx_filled++;
1488 rx_status = rx_desc->status; 1486 rx_status = rx_desc->status;
1489 rx_bytes = rx_desc->data_size - (ETH_FCS_LEN + MVNETA_MH_SIZE); 1487 rx_bytes = rx_desc->data_size - (ETH_FCS_LEN + MVNETA_MH_SIZE);
1490 data = (unsigned char *)rx_desc->buf_cookie; 1488 data = (unsigned char *)rx_desc->buf_cookie;
@@ -1524,6 +1522,14 @@ static int mvneta_rx(struct mvneta_port *pp, int rx_todo,
1524 continue; 1522 continue;
1525 } 1523 }
1526 1524
1525 /* Refill processing */
1526 err = mvneta_rx_refill(pp, rx_desc);
1527 if (err) {
1528 netdev_err(dev, "Linux processing - Can't refill\n");
1529 rxq->missed++;
1530 goto err_drop_frame;
1531 }
1532
1527 skb = build_skb(data, pp->frag_size > PAGE_SIZE ? 0 : pp->frag_size); 1533 skb = build_skb(data, pp->frag_size > PAGE_SIZE ? 0 : pp->frag_size);
1528 if (!skb) 1534 if (!skb)
1529 goto err_drop_frame; 1535 goto err_drop_frame;
@@ -1543,14 +1549,6 @@ static int mvneta_rx(struct mvneta_port *pp, int rx_todo,
1543 mvneta_rx_csum(pp, rx_status, skb); 1549 mvneta_rx_csum(pp, rx_status, skb);
1544 1550
1545 napi_gro_receive(&pp->napi, skb); 1551 napi_gro_receive(&pp->napi, skb);
1546
1547 /* Refill processing */
1548 err = mvneta_rx_refill(pp, rx_desc);
1549 if (err) {
1550 netdev_err(dev, "Linux processing - Can't refill\n");
1551 rxq->missed++;
1552 rx_filled--;
1553 }
1554 } 1552 }
1555 1553
1556 if (rcvd_pkts) { 1554 if (rcvd_pkts) {
@@ -1563,7 +1561,7 @@ static int mvneta_rx(struct mvneta_port *pp, int rx_todo,
1563 } 1561 }
1564 1562
1565 /* Update rxq management counters */ 1563 /* Update rxq management counters */
1566 mvneta_rxq_desc_num_update(pp, rxq, rx_done, rx_filled); 1564 mvneta_rxq_desc_num_update(pp, rxq, rx_done, rx_done);
1567 1565
1568 return rx_done; 1566 return rx_done;
1569} 1567}
diff --git a/drivers/net/ethernet/marvell/mvpp2.c b/drivers/net/ethernet/marvell/mvpp2.c
index 3e8b1bfb1f2e..d9884fd15b45 100644
--- a/drivers/net/ethernet/marvell/mvpp2.c
+++ b/drivers/net/ethernet/marvell/mvpp2.c
@@ -27,6 +27,8 @@
27#include <linux/of_address.h> 27#include <linux/of_address.h>
28#include <linux/phy.h> 28#include <linux/phy.h>
29#include <linux/clk.h> 29#include <linux/clk.h>
30#include <linux/hrtimer.h>
31#include <linux/ktime.h>
30#include <uapi/linux/ppp_defs.h> 32#include <uapi/linux/ppp_defs.h>
31#include <net/ip.h> 33#include <net/ip.h>
32#include <net/ipv6.h> 34#include <net/ipv6.h>
@@ -299,6 +301,7 @@
299 301
300/* Coalescing */ 302/* Coalescing */
301#define MVPP2_TXDONE_COAL_PKTS_THRESH 15 303#define MVPP2_TXDONE_COAL_PKTS_THRESH 15
304#define MVPP2_TXDONE_HRTIMER_PERIOD_NS 1000000UL
302#define MVPP2_RX_COAL_PKTS 32 305#define MVPP2_RX_COAL_PKTS 32
303#define MVPP2_RX_COAL_USEC 100 306#define MVPP2_RX_COAL_USEC 100
304 307
@@ -660,6 +663,14 @@ struct mvpp2_pcpu_stats {
660 u64 tx_bytes; 663 u64 tx_bytes;
661}; 664};
662 665
666/* Per-CPU port control */
667struct mvpp2_port_pcpu {
668 struct hrtimer tx_done_timer;
669 bool timer_scheduled;
670 /* Tasklet for egress finalization */
671 struct tasklet_struct tx_done_tasklet;
672};
673
663struct mvpp2_port { 674struct mvpp2_port {
664 u8 id; 675 u8 id;
665 676
@@ -679,6 +690,9 @@ struct mvpp2_port {
679 u32 pending_cause_rx; 690 u32 pending_cause_rx;
680 struct napi_struct napi; 691 struct napi_struct napi;
681 692
693 /* Per-CPU port control */
694 struct mvpp2_port_pcpu __percpu *pcpu;
695
682 /* Flags */ 696 /* Flags */
683 unsigned long flags; 697 unsigned long flags;
684 698
@@ -776,6 +790,9 @@ struct mvpp2_txq_pcpu {
776 /* Array of transmitted skb */ 790 /* Array of transmitted skb */
777 struct sk_buff **tx_skb; 791 struct sk_buff **tx_skb;
778 792
793 /* Array of transmitted buffers' physical addresses */
794 dma_addr_t *tx_buffs;
795
779 /* Index of last TX DMA descriptor that was inserted */ 796 /* Index of last TX DMA descriptor that was inserted */
780 int txq_put_index; 797 int txq_put_index;
781 798
@@ -913,8 +930,6 @@ struct mvpp2_bm_pool {
913 /* Occupied buffers indicator */ 930 /* Occupied buffers indicator */
914 atomic_t in_use; 931 atomic_t in_use;
915 int in_use_thresh; 932 int in_use_thresh;
916
917 spinlock_t lock;
918}; 933};
919 934
920struct mvpp2_buff_hdr { 935struct mvpp2_buff_hdr {
@@ -963,9 +978,13 @@ static void mvpp2_txq_inc_get(struct mvpp2_txq_pcpu *txq_pcpu)
963} 978}
964 979
965static void mvpp2_txq_inc_put(struct mvpp2_txq_pcpu *txq_pcpu, 980static void mvpp2_txq_inc_put(struct mvpp2_txq_pcpu *txq_pcpu,
966 struct sk_buff *skb) 981 struct sk_buff *skb,
982 struct mvpp2_tx_desc *tx_desc)
967{ 983{
968 txq_pcpu->tx_skb[txq_pcpu->txq_put_index] = skb; 984 txq_pcpu->tx_skb[txq_pcpu->txq_put_index] = skb;
985 if (skb)
986 txq_pcpu->tx_buffs[txq_pcpu->txq_put_index] =
987 tx_desc->buf_phys_addr;
969 txq_pcpu->txq_put_index++; 988 txq_pcpu->txq_put_index++;
970 if (txq_pcpu->txq_put_index == txq_pcpu->size) 989 if (txq_pcpu->txq_put_index == txq_pcpu->size)
971 txq_pcpu->txq_put_index = 0; 990 txq_pcpu->txq_put_index = 0;
@@ -3376,7 +3395,6 @@ static int mvpp2_bm_pool_create(struct platform_device *pdev,
3376 bm_pool->pkt_size = 0; 3395 bm_pool->pkt_size = 0;
3377 bm_pool->buf_num = 0; 3396 bm_pool->buf_num = 0;
3378 atomic_set(&bm_pool->in_use, 0); 3397 atomic_set(&bm_pool->in_use, 0);
3379 spin_lock_init(&bm_pool->lock);
3380 3398
3381 return 0; 3399 return 0;
3382} 3400}
@@ -3647,7 +3665,6 @@ static struct mvpp2_bm_pool *
3647mvpp2_bm_pool_use(struct mvpp2_port *port, int pool, enum mvpp2_bm_type type, 3665mvpp2_bm_pool_use(struct mvpp2_port *port, int pool, enum mvpp2_bm_type type,
3648 int pkt_size) 3666 int pkt_size)
3649{ 3667{
3650 unsigned long flags = 0;
3651 struct mvpp2_bm_pool *new_pool = &port->priv->bm_pools[pool]; 3668 struct mvpp2_bm_pool *new_pool = &port->priv->bm_pools[pool];
3652 int num; 3669 int num;
3653 3670
@@ -3656,8 +3673,6 @@ mvpp2_bm_pool_use(struct mvpp2_port *port, int pool, enum mvpp2_bm_type type,
3656 return NULL; 3673 return NULL;
3657 } 3674 }
3658 3675
3659 spin_lock_irqsave(&new_pool->lock, flags);
3660
3661 if (new_pool->type == MVPP2_BM_FREE) 3676 if (new_pool->type == MVPP2_BM_FREE)
3662 new_pool->type = type; 3677 new_pool->type = type;
3663 3678
@@ -3686,8 +3701,6 @@ mvpp2_bm_pool_use(struct mvpp2_port *port, int pool, enum mvpp2_bm_type type,
3686 if (num != pkts_num) { 3701 if (num != pkts_num) {
3687 WARN(1, "pool %d: %d of %d allocated\n", 3702 WARN(1, "pool %d: %d of %d allocated\n",
3688 new_pool->id, num, pkts_num); 3703 new_pool->id, num, pkts_num);
3689 /* We need to undo the bufs_add() allocations */
3690 spin_unlock_irqrestore(&new_pool->lock, flags);
3691 return NULL; 3704 return NULL;
3692 } 3705 }
3693 } 3706 }
@@ -3695,15 +3708,12 @@ mvpp2_bm_pool_use(struct mvpp2_port *port, int pool, enum mvpp2_bm_type type,
3695 mvpp2_bm_pool_bufsize_set(port->priv, new_pool, 3708 mvpp2_bm_pool_bufsize_set(port->priv, new_pool,
3696 MVPP2_RX_BUF_SIZE(new_pool->pkt_size)); 3709 MVPP2_RX_BUF_SIZE(new_pool->pkt_size));
3697 3710
3698 spin_unlock_irqrestore(&new_pool->lock, flags);
3699
3700 return new_pool; 3711 return new_pool;
3701} 3712}
3702 3713
3703/* Initialize pools for swf */ 3714/* Initialize pools for swf */
3704static int mvpp2_swf_bm_pool_init(struct mvpp2_port *port) 3715static int mvpp2_swf_bm_pool_init(struct mvpp2_port *port)
3705{ 3716{
3706 unsigned long flags = 0;
3707 int rxq; 3717 int rxq;
3708 3718
3709 if (!port->pool_long) { 3719 if (!port->pool_long) {
@@ -3714,9 +3724,7 @@ static int mvpp2_swf_bm_pool_init(struct mvpp2_port *port)
3714 if (!port->pool_long) 3724 if (!port->pool_long)
3715 return -ENOMEM; 3725 return -ENOMEM;
3716 3726
3717 spin_lock_irqsave(&port->pool_long->lock, flags);
3718 port->pool_long->port_map |= (1 << port->id); 3727 port->pool_long->port_map |= (1 << port->id);
3719 spin_unlock_irqrestore(&port->pool_long->lock, flags);
3720 3728
3721 for (rxq = 0; rxq < rxq_number; rxq++) 3729 for (rxq = 0; rxq < rxq_number; rxq++)
3722 mvpp2_rxq_long_pool_set(port, rxq, port->pool_long->id); 3730 mvpp2_rxq_long_pool_set(port, rxq, port->pool_long->id);
@@ -3730,9 +3738,7 @@ static int mvpp2_swf_bm_pool_init(struct mvpp2_port *port)
3730 if (!port->pool_short) 3738 if (!port->pool_short)
3731 return -ENOMEM; 3739 return -ENOMEM;
3732 3740
3733 spin_lock_irqsave(&port->pool_short->lock, flags);
3734 port->pool_short->port_map |= (1 << port->id); 3741 port->pool_short->port_map |= (1 << port->id);
3735 spin_unlock_irqrestore(&port->pool_short->lock, flags);
3736 3742
3737 for (rxq = 0; rxq < rxq_number; rxq++) 3743 for (rxq = 0; rxq < rxq_number; rxq++)
3738 mvpp2_rxq_short_pool_set(port, rxq, 3744 mvpp2_rxq_short_pool_set(port, rxq,
@@ -3806,7 +3812,6 @@ static void mvpp2_interrupts_unmask(void *arg)
3806 3812
3807 mvpp2_write(port->priv, MVPP2_ISR_RX_TX_MASK_REG(port->id), 3813 mvpp2_write(port->priv, MVPP2_ISR_RX_TX_MASK_REG(port->id),
3808 (MVPP2_CAUSE_MISC_SUM_MASK | 3814 (MVPP2_CAUSE_MISC_SUM_MASK |
3809 MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_MASK |
3810 MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK)); 3815 MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK));
3811} 3816}
3812 3817
@@ -4382,23 +4387,6 @@ static void mvpp2_rx_time_coal_set(struct mvpp2_port *port,
4382 rxq->time_coal = usec; 4387 rxq->time_coal = usec;
4383} 4388}
4384 4389
4385/* Set threshold for TX_DONE pkts coalescing */
4386static void mvpp2_tx_done_pkts_coal_set(void *arg)
4387{
4388 struct mvpp2_port *port = arg;
4389 int queue;
4390 u32 val;
4391
4392 for (queue = 0; queue < txq_number; queue++) {
4393 struct mvpp2_tx_queue *txq = port->txqs[queue];
4394
4395 val = (txq->done_pkts_coal << MVPP2_TRANSMITTED_THRESH_OFFSET) &
4396 MVPP2_TRANSMITTED_THRESH_MASK;
4397 mvpp2_write(port->priv, MVPP2_TXQ_NUM_REG, txq->id);
4398 mvpp2_write(port->priv, MVPP2_TXQ_THRESH_REG, val);
4399 }
4400}
4401
4402/* Free Tx queue skbuffs */ 4390/* Free Tx queue skbuffs */
4403static void mvpp2_txq_bufs_free(struct mvpp2_port *port, 4391static void mvpp2_txq_bufs_free(struct mvpp2_port *port,
4404 struct mvpp2_tx_queue *txq, 4392 struct mvpp2_tx_queue *txq,
@@ -4407,8 +4395,8 @@ static void mvpp2_txq_bufs_free(struct mvpp2_port *port,
4407 int i; 4395 int i;
4408 4396
4409 for (i = 0; i < num; i++) { 4397 for (i = 0; i < num; i++) {
4410 struct mvpp2_tx_desc *tx_desc = txq->descs + 4398 dma_addr_t buf_phys_addr =
4411 txq_pcpu->txq_get_index; 4399 txq_pcpu->tx_buffs[txq_pcpu->txq_get_index];
4412 struct sk_buff *skb = txq_pcpu->tx_skb[txq_pcpu->txq_get_index]; 4400 struct sk_buff *skb = txq_pcpu->tx_skb[txq_pcpu->txq_get_index];
4413 4401
4414 mvpp2_txq_inc_get(txq_pcpu); 4402 mvpp2_txq_inc_get(txq_pcpu);
@@ -4416,8 +4404,8 @@ static void mvpp2_txq_bufs_free(struct mvpp2_port *port,
4416 if (!skb) 4404 if (!skb)
4417 continue; 4405 continue;
4418 4406
4419 dma_unmap_single(port->dev->dev.parent, tx_desc->buf_phys_addr, 4407 dma_unmap_single(port->dev->dev.parent, buf_phys_addr,
4420 tx_desc->data_size, DMA_TO_DEVICE); 4408 skb_headlen(skb), DMA_TO_DEVICE);
4421 dev_kfree_skb_any(skb); 4409 dev_kfree_skb_any(skb);
4422 } 4410 }
4423} 4411}
@@ -4433,7 +4421,7 @@ static inline struct mvpp2_rx_queue *mvpp2_get_rx_queue(struct mvpp2_port *port,
4433static inline struct mvpp2_tx_queue *mvpp2_get_tx_queue(struct mvpp2_port *port, 4421static inline struct mvpp2_tx_queue *mvpp2_get_tx_queue(struct mvpp2_port *port,
4434 u32 cause) 4422 u32 cause)
4435{ 4423{
4436 int queue = fls(cause >> 16) - 1; 4424 int queue = fls(cause) - 1;
4437 4425
4438 return port->txqs[queue]; 4426 return port->txqs[queue];
4439} 4427}
@@ -4460,6 +4448,29 @@ static void mvpp2_txq_done(struct mvpp2_port *port, struct mvpp2_tx_queue *txq,
4460 netif_tx_wake_queue(nq); 4448 netif_tx_wake_queue(nq);
4461} 4449}
4462 4450
4451static unsigned int mvpp2_tx_done(struct mvpp2_port *port, u32 cause)
4452{
4453 struct mvpp2_tx_queue *txq;
4454 struct mvpp2_txq_pcpu *txq_pcpu;
4455 unsigned int tx_todo = 0;
4456
4457 while (cause) {
4458 txq = mvpp2_get_tx_queue(port, cause);
4459 if (!txq)
4460 break;
4461
4462 txq_pcpu = this_cpu_ptr(txq->pcpu);
4463
4464 if (txq_pcpu->count) {
4465 mvpp2_txq_done(port, txq, txq_pcpu);
4466 tx_todo += txq_pcpu->count;
4467 }
4468
4469 cause &= ~(1 << txq->log_id);
4470 }
4471 return tx_todo;
4472}
4473
4463/* Rx/Tx queue initialization/cleanup methods */ 4474/* Rx/Tx queue initialization/cleanup methods */
4464 4475
4465/* Allocate and initialize descriptors for aggr TXQ */ 4476/* Allocate and initialize descriptors for aggr TXQ */
@@ -4649,12 +4660,13 @@ static int mvpp2_txq_init(struct mvpp2_port *port,
4649 txq_pcpu->tx_skb = kmalloc(txq_pcpu->size * 4660 txq_pcpu->tx_skb = kmalloc(txq_pcpu->size *
4650 sizeof(*txq_pcpu->tx_skb), 4661 sizeof(*txq_pcpu->tx_skb),
4651 GFP_KERNEL); 4662 GFP_KERNEL);
4652 if (!txq_pcpu->tx_skb) { 4663 if (!txq_pcpu->tx_skb)
4653 dma_free_coherent(port->dev->dev.parent, 4664 goto error;
4654 txq->size * MVPP2_DESC_ALIGNED_SIZE, 4665
4655 txq->descs, txq->descs_phys); 4666 txq_pcpu->tx_buffs = kmalloc(txq_pcpu->size *
4656 return -ENOMEM; 4667 sizeof(dma_addr_t), GFP_KERNEL);
4657 } 4668 if (!txq_pcpu->tx_buffs)
4669 goto error;
4658 4670
4659 txq_pcpu->count = 0; 4671 txq_pcpu->count = 0;
4660 txq_pcpu->reserved_num = 0; 4672 txq_pcpu->reserved_num = 0;
@@ -4663,6 +4675,19 @@ static int mvpp2_txq_init(struct mvpp2_port *port,
4663 } 4675 }
4664 4676
4665 return 0; 4677 return 0;
4678
4679error:
4680 for_each_present_cpu(cpu) {
4681 txq_pcpu = per_cpu_ptr(txq->pcpu, cpu);
4682 kfree(txq_pcpu->tx_skb);
4683 kfree(txq_pcpu->tx_buffs);
4684 }
4685
4686 dma_free_coherent(port->dev->dev.parent,
4687 txq->size * MVPP2_DESC_ALIGNED_SIZE,
4688 txq->descs, txq->descs_phys);
4689
4690 return -ENOMEM;
4666} 4691}
4667 4692
4668/* Free allocated TXQ resources */ 4693/* Free allocated TXQ resources */
@@ -4675,6 +4700,7 @@ static void mvpp2_txq_deinit(struct mvpp2_port *port,
4675 for_each_present_cpu(cpu) { 4700 for_each_present_cpu(cpu) {
4676 txq_pcpu = per_cpu_ptr(txq->pcpu, cpu); 4701 txq_pcpu = per_cpu_ptr(txq->pcpu, cpu);
4677 kfree(txq_pcpu->tx_skb); 4702 kfree(txq_pcpu->tx_skb);
4703 kfree(txq_pcpu->tx_buffs);
4678 } 4704 }
4679 4705
4680 if (txq->descs) 4706 if (txq->descs)
@@ -4805,7 +4831,6 @@ static int mvpp2_setup_txqs(struct mvpp2_port *port)
4805 goto err_cleanup; 4831 goto err_cleanup;
4806 } 4832 }
4807 4833
4808 on_each_cpu(mvpp2_tx_done_pkts_coal_set, port, 1);
4809 on_each_cpu(mvpp2_txq_sent_counter_clear, port, 1); 4834 on_each_cpu(mvpp2_txq_sent_counter_clear, port, 1);
4810 return 0; 4835 return 0;
4811 4836
@@ -4887,6 +4912,49 @@ static void mvpp2_link_event(struct net_device *dev)
4887 } 4912 }
4888} 4913}
4889 4914
4915static void mvpp2_timer_set(struct mvpp2_port_pcpu *port_pcpu)
4916{
4917 ktime_t interval;
4918
4919 if (!port_pcpu->timer_scheduled) {
4920 port_pcpu->timer_scheduled = true;
4921 interval = ktime_set(0, MVPP2_TXDONE_HRTIMER_PERIOD_NS);
4922 hrtimer_start(&port_pcpu->tx_done_timer, interval,
4923 HRTIMER_MODE_REL_PINNED);
4924 }
4925}
4926
4927static void mvpp2_tx_proc_cb(unsigned long data)
4928{
4929 struct net_device *dev = (struct net_device *)data;
4930 struct mvpp2_port *port = netdev_priv(dev);
4931 struct mvpp2_port_pcpu *port_pcpu = this_cpu_ptr(port->pcpu);
4932 unsigned int tx_todo, cause;
4933
4934 if (!netif_running(dev))
4935 return;
4936 port_pcpu->timer_scheduled = false;
4937
4938 /* Process all the Tx queues */
4939 cause = (1 << txq_number) - 1;
4940 tx_todo = mvpp2_tx_done(port, cause);
4941
4942 /* Set the timer in case not all the packets were processed */
4943 if (tx_todo)
4944 mvpp2_timer_set(port_pcpu);
4945}
4946
4947static enum hrtimer_restart mvpp2_hr_timer_cb(struct hrtimer *timer)
4948{
4949 struct mvpp2_port_pcpu *port_pcpu = container_of(timer,
4950 struct mvpp2_port_pcpu,
4951 tx_done_timer);
4952
4953 tasklet_schedule(&port_pcpu->tx_done_tasklet);
4954
4955 return HRTIMER_NORESTART;
4956}
4957
4890/* Main RX/TX processing routines */ 4958/* Main RX/TX processing routines */
4891 4959
4892/* Display more error info */ 4960/* Display more error info */
@@ -5144,11 +5212,11 @@ static int mvpp2_tx_frag_process(struct mvpp2_port *port, struct sk_buff *skb,
5144 if (i == (skb_shinfo(skb)->nr_frags - 1)) { 5212 if (i == (skb_shinfo(skb)->nr_frags - 1)) {
5145 /* Last descriptor */ 5213 /* Last descriptor */
5146 tx_desc->command = MVPP2_TXD_L_DESC; 5214 tx_desc->command = MVPP2_TXD_L_DESC;
5147 mvpp2_txq_inc_put(txq_pcpu, skb); 5215 mvpp2_txq_inc_put(txq_pcpu, skb, tx_desc);
5148 } else { 5216 } else {
5149 /* Descriptor in the middle: Not First, Not Last */ 5217 /* Descriptor in the middle: Not First, Not Last */
5150 tx_desc->command = 0; 5218 tx_desc->command = 0;
5151 mvpp2_txq_inc_put(txq_pcpu, NULL); 5219 mvpp2_txq_inc_put(txq_pcpu, NULL, tx_desc);
5152 } 5220 }
5153 } 5221 }
5154 5222
@@ -5214,12 +5282,12 @@ static int mvpp2_tx(struct sk_buff *skb, struct net_device *dev)
5214 /* First and Last descriptor */ 5282 /* First and Last descriptor */
5215 tx_cmd |= MVPP2_TXD_F_DESC | MVPP2_TXD_L_DESC; 5283 tx_cmd |= MVPP2_TXD_F_DESC | MVPP2_TXD_L_DESC;
5216 tx_desc->command = tx_cmd; 5284 tx_desc->command = tx_cmd;
5217 mvpp2_txq_inc_put(txq_pcpu, skb); 5285 mvpp2_txq_inc_put(txq_pcpu, skb, tx_desc);
5218 } else { 5286 } else {
5219 /* First but not Last */ 5287 /* First but not Last */
5220 tx_cmd |= MVPP2_TXD_F_DESC | MVPP2_TXD_PADDING_DISABLE; 5288 tx_cmd |= MVPP2_TXD_F_DESC | MVPP2_TXD_PADDING_DISABLE;
5221 tx_desc->command = tx_cmd; 5289 tx_desc->command = tx_cmd;
5222 mvpp2_txq_inc_put(txq_pcpu, NULL); 5290 mvpp2_txq_inc_put(txq_pcpu, NULL, tx_desc);
5223 5291
5224 /* Continue with other skb fragments */ 5292 /* Continue with other skb fragments */
5225 if (mvpp2_tx_frag_process(port, skb, aggr_txq, txq)) { 5293 if (mvpp2_tx_frag_process(port, skb, aggr_txq, txq)) {
@@ -5255,6 +5323,17 @@ out:
5255 dev_kfree_skb_any(skb); 5323 dev_kfree_skb_any(skb);
5256 } 5324 }
5257 5325
5326 /* Finalize TX processing */
5327 if (txq_pcpu->count >= txq->done_pkts_coal)
5328 mvpp2_txq_done(port, txq, txq_pcpu);
5329
5330 /* Set the timer in case not all frags were processed */
5331 if (txq_pcpu->count <= frags && txq_pcpu->count > 0) {
5332 struct mvpp2_port_pcpu *port_pcpu = this_cpu_ptr(port->pcpu);
5333
5334 mvpp2_timer_set(port_pcpu);
5335 }
5336
5258 return NETDEV_TX_OK; 5337 return NETDEV_TX_OK;
5259} 5338}
5260 5339
@@ -5268,10 +5347,11 @@ static inline void mvpp2_cause_error(struct net_device *dev, int cause)
5268 netdev_err(dev, "tx fifo underrun error\n"); 5347 netdev_err(dev, "tx fifo underrun error\n");
5269} 5348}
5270 5349
5271static void mvpp2_txq_done_percpu(void *arg) 5350static int mvpp2_poll(struct napi_struct *napi, int budget)
5272{ 5351{
5273 struct mvpp2_port *port = arg; 5352 u32 cause_rx_tx, cause_rx, cause_misc;
5274 u32 cause_rx_tx, cause_tx, cause_misc; 5353 int rx_done = 0;
5354 struct mvpp2_port *port = netdev_priv(napi->dev);
5275 5355
5276 /* Rx/Tx cause register 5356 /* Rx/Tx cause register
5277 * 5357 *
@@ -5285,7 +5365,7 @@ static void mvpp2_txq_done_percpu(void *arg)
5285 */ 5365 */
5286 cause_rx_tx = mvpp2_read(port->priv, 5366 cause_rx_tx = mvpp2_read(port->priv,
5287 MVPP2_ISR_RX_TX_CAUSE_REG(port->id)); 5367 MVPP2_ISR_RX_TX_CAUSE_REG(port->id));
5288 cause_tx = cause_rx_tx & MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_MASK; 5368 cause_rx_tx &= ~MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_MASK;
5289 cause_misc = cause_rx_tx & MVPP2_CAUSE_MISC_SUM_MASK; 5369 cause_misc = cause_rx_tx & MVPP2_CAUSE_MISC_SUM_MASK;
5290 5370
5291 if (cause_misc) { 5371 if (cause_misc) {
@@ -5297,26 +5377,6 @@ static void mvpp2_txq_done_percpu(void *arg)
5297 cause_rx_tx & ~MVPP2_CAUSE_MISC_SUM_MASK); 5377 cause_rx_tx & ~MVPP2_CAUSE_MISC_SUM_MASK);
5298 } 5378 }
5299 5379
5300 /* Release TX descriptors */
5301 if (cause_tx) {
5302 struct mvpp2_tx_queue *txq = mvpp2_get_tx_queue(port, cause_tx);
5303 struct mvpp2_txq_pcpu *txq_pcpu = this_cpu_ptr(txq->pcpu);
5304
5305 if (txq_pcpu->count)
5306 mvpp2_txq_done(port, txq, txq_pcpu);
5307 }
5308}
5309
5310static int mvpp2_poll(struct napi_struct *napi, int budget)
5311{
5312 u32 cause_rx_tx, cause_rx;
5313 int rx_done = 0;
5314 struct mvpp2_port *port = netdev_priv(napi->dev);
5315
5316 on_each_cpu(mvpp2_txq_done_percpu, port, 1);
5317
5318 cause_rx_tx = mvpp2_read(port->priv,
5319 MVPP2_ISR_RX_TX_CAUSE_REG(port->id));
5320 cause_rx = cause_rx_tx & MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK; 5380 cause_rx = cause_rx_tx & MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK;
5321 5381
5322 /* Process RX packets */ 5382 /* Process RX packets */
@@ -5561,6 +5621,8 @@ err_cleanup_rxqs:
5561static int mvpp2_stop(struct net_device *dev) 5621static int mvpp2_stop(struct net_device *dev)
5562{ 5622{
5563 struct mvpp2_port *port = netdev_priv(dev); 5623 struct mvpp2_port *port = netdev_priv(dev);
5624 struct mvpp2_port_pcpu *port_pcpu;
5625 int cpu;
5564 5626
5565 mvpp2_stop_dev(port); 5627 mvpp2_stop_dev(port);
5566 mvpp2_phy_disconnect(port); 5628 mvpp2_phy_disconnect(port);
@@ -5569,6 +5631,13 @@ static int mvpp2_stop(struct net_device *dev)
5569 on_each_cpu(mvpp2_interrupts_mask, port, 1); 5631 on_each_cpu(mvpp2_interrupts_mask, port, 1);
5570 5632
5571 free_irq(port->irq, port); 5633 free_irq(port->irq, port);
5634 for_each_present_cpu(cpu) {
5635 port_pcpu = per_cpu_ptr(port->pcpu, cpu);
5636
5637 hrtimer_cancel(&port_pcpu->tx_done_timer);
5638 port_pcpu->timer_scheduled = false;
5639 tasklet_kill(&port_pcpu->tx_done_tasklet);
5640 }
5572 mvpp2_cleanup_rxqs(port); 5641 mvpp2_cleanup_rxqs(port);
5573 mvpp2_cleanup_txqs(port); 5642 mvpp2_cleanup_txqs(port);
5574 5643
@@ -5784,7 +5853,6 @@ static int mvpp2_ethtool_set_coalesce(struct net_device *dev,
5784 txq->done_pkts_coal = c->tx_max_coalesced_frames; 5853 txq->done_pkts_coal = c->tx_max_coalesced_frames;
5785 } 5854 }
5786 5855
5787 on_each_cpu(mvpp2_tx_done_pkts_coal_set, port, 1);
5788 return 0; 5856 return 0;
5789} 5857}
5790 5858
@@ -6035,6 +6103,7 @@ static int mvpp2_port_probe(struct platform_device *pdev,
6035{ 6103{
6036 struct device_node *phy_node; 6104 struct device_node *phy_node;
6037 struct mvpp2_port *port; 6105 struct mvpp2_port *port;
6106 struct mvpp2_port_pcpu *port_pcpu;
6038 struct net_device *dev; 6107 struct net_device *dev;
6039 struct resource *res; 6108 struct resource *res;
6040 const char *dt_mac_addr; 6109 const char *dt_mac_addr;
@@ -6044,7 +6113,7 @@ static int mvpp2_port_probe(struct platform_device *pdev,
6044 int features; 6113 int features;
6045 int phy_mode; 6114 int phy_mode;
6046 int priv_common_regs_num = 2; 6115 int priv_common_regs_num = 2;
6047 int err, i; 6116 int err, i, cpu;
6048 6117
6049 dev = alloc_etherdev_mqs(sizeof(struct mvpp2_port), txq_number, 6118 dev = alloc_etherdev_mqs(sizeof(struct mvpp2_port), txq_number,
6050 rxq_number); 6119 rxq_number);
@@ -6135,6 +6204,24 @@ static int mvpp2_port_probe(struct platform_device *pdev,
6135 } 6204 }
6136 mvpp2_port_power_up(port); 6205 mvpp2_port_power_up(port);
6137 6206
6207 port->pcpu = alloc_percpu(struct mvpp2_port_pcpu);
6208 if (!port->pcpu) {
6209 err = -ENOMEM;
6210 goto err_free_txq_pcpu;
6211 }
6212
6213 for_each_present_cpu(cpu) {
6214 port_pcpu = per_cpu_ptr(port->pcpu, cpu);
6215
6216 hrtimer_init(&port_pcpu->tx_done_timer, CLOCK_MONOTONIC,
6217 HRTIMER_MODE_REL_PINNED);
6218 port_pcpu->tx_done_timer.function = mvpp2_hr_timer_cb;
6219 port_pcpu->timer_scheduled = false;
6220
6221 tasklet_init(&port_pcpu->tx_done_tasklet, mvpp2_tx_proc_cb,
6222 (unsigned long)dev);
6223 }
6224
6138 netif_napi_add(dev, &port->napi, mvpp2_poll, NAPI_POLL_WEIGHT); 6225 netif_napi_add(dev, &port->napi, mvpp2_poll, NAPI_POLL_WEIGHT);
6139 features = NETIF_F_SG | NETIF_F_IP_CSUM; 6226 features = NETIF_F_SG | NETIF_F_IP_CSUM;
6140 dev->features = features | NETIF_F_RXCSUM; 6227 dev->features = features | NETIF_F_RXCSUM;
@@ -6144,7 +6231,7 @@ static int mvpp2_port_probe(struct platform_device *pdev,
6144 err = register_netdev(dev); 6231 err = register_netdev(dev);
6145 if (err < 0) { 6232 if (err < 0) {
6146 dev_err(&pdev->dev, "failed to register netdev\n"); 6233 dev_err(&pdev->dev, "failed to register netdev\n");
6147 goto err_free_txq_pcpu; 6234 goto err_free_port_pcpu;
6148 } 6235 }
6149 netdev_info(dev, "Using %s mac address %pM\n", mac_from, dev->dev_addr); 6236 netdev_info(dev, "Using %s mac address %pM\n", mac_from, dev->dev_addr);
6150 6237
@@ -6153,6 +6240,8 @@ static int mvpp2_port_probe(struct platform_device *pdev,
6153 priv->port_list[id] = port; 6240 priv->port_list[id] = port;
6154 return 0; 6241 return 0;
6155 6242
6243err_free_port_pcpu:
6244 free_percpu(port->pcpu);
6156err_free_txq_pcpu: 6245err_free_txq_pcpu:
6157 for (i = 0; i < txq_number; i++) 6246 for (i = 0; i < txq_number; i++)
6158 free_percpu(port->txqs[i]->pcpu); 6247 free_percpu(port->txqs[i]->pcpu);
@@ -6171,6 +6260,7 @@ static void mvpp2_port_remove(struct mvpp2_port *port)
6171 int i; 6260 int i;
6172 6261
6173 unregister_netdev(port->dev); 6262 unregister_netdev(port->dev);
6263 free_percpu(port->pcpu);
6174 free_percpu(port->stats); 6264 free_percpu(port->stats);
6175 for (i = 0; i < txq_number; i++) 6265 for (i = 0; i < txq_number; i++)
6176 free_percpu(port->txqs[i]->pcpu); 6266 free_percpu(port->txqs[i]->pcpu);
diff --git a/drivers/net/ethernet/mellanox/mlx4/cmd.c b/drivers/net/ethernet/mellanox/mlx4/cmd.c
index 82040137d7d9..0a3202047569 100644
--- a/drivers/net/ethernet/mellanox/mlx4/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx4/cmd.c
@@ -686,6 +686,7 @@ static int mlx4_cmd_wait(struct mlx4_dev *dev, u64 in_param, u64 *out_param,
686{ 686{
687 struct mlx4_cmd *cmd = &mlx4_priv(dev)->cmd; 687 struct mlx4_cmd *cmd = &mlx4_priv(dev)->cmd;
688 struct mlx4_cmd_context *context; 688 struct mlx4_cmd_context *context;
689 long ret_wait;
689 int err = 0; 690 int err = 0;
690 691
691 down(&cmd->event_sem); 692 down(&cmd->event_sem);
@@ -711,8 +712,20 @@ static int mlx4_cmd_wait(struct mlx4_dev *dev, u64 in_param, u64 *out_param,
711 if (err) 712 if (err)
712 goto out_reset; 713 goto out_reset;
713 714
714 if (!wait_for_completion_timeout(&context->done, 715 if (op == MLX4_CMD_SENSE_PORT) {
715 msecs_to_jiffies(timeout))) { 716 ret_wait =
717 wait_for_completion_interruptible_timeout(&context->done,
718 msecs_to_jiffies(timeout));
719 if (ret_wait < 0) {
720 context->fw_status = 0;
721 context->out_param = 0;
722 context->result = 0;
723 }
724 } else {
725 ret_wait = (long)wait_for_completion_timeout(&context->done,
726 msecs_to_jiffies(timeout));
727 }
728 if (!ret_wait) {
716 mlx4_warn(dev, "command 0x%x timed out (go bit not cleared)\n", 729 mlx4_warn(dev, "command 0x%x timed out (go bit not cleared)\n",
717 op); 730 op);
718 if (op == MLX4_CMD_NOP) { 731 if (op == MLX4_CMD_NOP) {
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
index 7a4f20bb7fcb..9c145dddd717 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
@@ -246,7 +246,6 @@ static int mlx4_en_prepare_rx_desc(struct mlx4_en_priv *priv,
246 246
247static inline bool mlx4_en_is_ring_empty(struct mlx4_en_rx_ring *ring) 247static inline bool mlx4_en_is_ring_empty(struct mlx4_en_rx_ring *ring)
248{ 248{
249 BUG_ON((u32)(ring->prod - ring->cons) > ring->actual_size);
250 return ring->prod == ring->cons; 249 return ring->prod == ring->cons;
251} 250}
252 251
diff --git a/drivers/net/ethernet/mellanox/mlx4/eq.c b/drivers/net/ethernet/mellanox/mlx4/eq.c
index aae13adfb492..8e81e53c370e 100644
--- a/drivers/net/ethernet/mellanox/mlx4/eq.c
+++ b/drivers/net/ethernet/mellanox/mlx4/eq.c
@@ -601,7 +601,7 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq)
601 continue; 601 continue;
602 mlx4_dbg(dev, "%s: Sending MLX4_PORT_CHANGE_SUBTYPE_DOWN to slave: %d, port:%d\n", 602 mlx4_dbg(dev, "%s: Sending MLX4_PORT_CHANGE_SUBTYPE_DOWN to slave: %d, port:%d\n",
603 __func__, i, port); 603 __func__, i, port);
604 s_info = &priv->mfunc.master.vf_oper[slave].vport[port].state; 604 s_info = &priv->mfunc.master.vf_oper[i].vport[port].state;
605 if (IFLA_VF_LINK_STATE_AUTO == s_info->link_state) { 605 if (IFLA_VF_LINK_STATE_AUTO == s_info->link_state) {
606 eqe->event.port_change.port = 606 eqe->event.port_change.port =
607 cpu_to_be32( 607 cpu_to_be32(
@@ -640,7 +640,7 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq)
640 continue; 640 continue;
641 if (i == mlx4_master_func_num(dev)) 641 if (i == mlx4_master_func_num(dev))
642 continue; 642 continue;
643 s_info = &priv->mfunc.master.vf_oper[slave].vport[port].state; 643 s_info = &priv->mfunc.master.vf_oper[i].vport[port].state;
644 if (IFLA_VF_LINK_STATE_AUTO == s_info->link_state) { 644 if (IFLA_VF_LINK_STATE_AUTO == s_info->link_state) {
645 eqe->event.port_change.port = 645 eqe->event.port_change.port =
646 cpu_to_be32( 646 cpu_to_be32(
diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c
index 12fbfcb44d8a..29c2a017a450 100644
--- a/drivers/net/ethernet/mellanox/mlx4/main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/main.c
@@ -2273,6 +2273,11 @@ static int mlx4_allocate_default_counters(struct mlx4_dev *dev)
2273 } else if (err == -ENOENT) { 2273 } else if (err == -ENOENT) {
2274 err = 0; 2274 err = 0;
2275 continue; 2275 continue;
2276 } else if (mlx4_is_slave(dev) && err == -EINVAL) {
2277 priv->def_counter[port] = MLX4_SINK_COUNTER_INDEX(dev);
2278 mlx4_warn(dev, "can't allocate counter from old PF driver, using index %d\n",
2279 MLX4_SINK_COUNTER_INDEX(dev));
2280 err = 0;
2276 } else { 2281 } else {
2277 mlx4_err(dev, "%s: failed to allocate default counter port %d err %d\n", 2282 mlx4_err(dev, "%s: failed to allocate default counter port %d err %d\n",
2278 __func__, port + 1, err); 2283 __func__, port + 1, err);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
index afad529838de..06e3e1e54c35 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
@@ -391,6 +391,8 @@ static int handle_hca_cap(struct mlx5_core_dev *dev)
391 /* disable cmdif checksum */ 391 /* disable cmdif checksum */
392 MLX5_SET(cmd_hca_cap, set_hca_cap, cmdif_checksum, 0); 392 MLX5_SET(cmd_hca_cap, set_hca_cap, cmdif_checksum, 0);
393 393
394 MLX5_SET(cmd_hca_cap, set_hca_cap, log_uar_page_sz, PAGE_SHIFT - 12);
395
394 err = set_caps(dev, set_ctx, set_sz); 396 err = set_caps(dev, set_ctx, set_sz);
395 397
396query_ex: 398query_ex:
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
index 33669c29b341..753ea8bad953 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
@@ -1415,7 +1415,7 @@ static int qlcnic_83xx_copy_fw_file(struct qlcnic_adapter *adapter)
1415 if (fw->size & 0xF) { 1415 if (fw->size & 0xF) {
1416 addr = dest + size; 1416 addr = dest + size;
1417 for (i = 0; i < (fw->size & 0xF); i++) 1417 for (i = 0; i < (fw->size & 0xF); i++)
1418 data[i] = temp[size + i]; 1418 data[i] = ((u8 *)temp)[size + i];
1419 for (; i < 16; i++) 1419 for (; i < 16; i++)
1420 data[i] = 0; 1420 data[i] = 0;
1421 ret = qlcnic_ms_mem_write128(adapter, addr, 1421 ret = qlcnic_ms_mem_write128(adapter, addr,
diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
index 3df51faf18ae..f790f61ea78a 100644
--- a/drivers/net/ethernet/realtek/r8169.c
+++ b/drivers/net/ethernet/realtek/r8169.c
@@ -4875,10 +4875,12 @@ static void rtl_init_rxcfg(struct rtl8169_private *tp)
4875 case RTL_GIGA_MAC_VER_46: 4875 case RTL_GIGA_MAC_VER_46:
4876 case RTL_GIGA_MAC_VER_47: 4876 case RTL_GIGA_MAC_VER_47:
4877 case RTL_GIGA_MAC_VER_48: 4877 case RTL_GIGA_MAC_VER_48:
4878 RTL_W32(RxConfig, RX128_INT_EN | RX_DMA_BURST | RX_EARLY_OFF);
4879 break;
4878 case RTL_GIGA_MAC_VER_49: 4880 case RTL_GIGA_MAC_VER_49:
4879 case RTL_GIGA_MAC_VER_50: 4881 case RTL_GIGA_MAC_VER_50:
4880 case RTL_GIGA_MAC_VER_51: 4882 case RTL_GIGA_MAC_VER_51:
4881 RTL_W32(RxConfig, RX128_INT_EN | RX_DMA_BURST | RX_EARLY_OFF); 4883 RTL_W32(RxConfig, RX128_INT_EN | RX_MULTI_EN | RX_DMA_BURST | RX_EARLY_OFF);
4882 break; 4884 break;
4883 default: 4885 default:
4884 RTL_W32(RxConfig, RX128_INT_EN | RX_DMA_BURST); 4886 RTL_W32(RxConfig, RX128_INT_EN | RX_DMA_BURST);
diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c
index fd9745714d90..78849dd4ef8e 100644
--- a/drivers/net/ethernet/renesas/ravb_main.c
+++ b/drivers/net/ethernet/renesas/ravb_main.c
@@ -228,9 +228,7 @@ static void ravb_ring_format(struct net_device *ndev, int q)
228 struct ravb_desc *desc = NULL; 228 struct ravb_desc *desc = NULL;
229 int rx_ring_size = sizeof(*rx_desc) * priv->num_rx_ring[q]; 229 int rx_ring_size = sizeof(*rx_desc) * priv->num_rx_ring[q];
230 int tx_ring_size = sizeof(*tx_desc) * priv->num_tx_ring[q]; 230 int tx_ring_size = sizeof(*tx_desc) * priv->num_tx_ring[q];
231 struct sk_buff *skb;
232 dma_addr_t dma_addr; 231 dma_addr_t dma_addr;
233 void *buffer;
234 int i; 232 int i;
235 233
236 priv->cur_rx[q] = 0; 234 priv->cur_rx[q] = 0;
@@ -241,41 +239,28 @@ static void ravb_ring_format(struct net_device *ndev, int q)
241 memset(priv->rx_ring[q], 0, rx_ring_size); 239 memset(priv->rx_ring[q], 0, rx_ring_size);
242 /* Build RX ring buffer */ 240 /* Build RX ring buffer */
243 for (i = 0; i < priv->num_rx_ring[q]; i++) { 241 for (i = 0; i < priv->num_rx_ring[q]; i++) {
244 priv->rx_skb[q][i] = NULL;
245 skb = netdev_alloc_skb(ndev, PKT_BUF_SZ + RAVB_ALIGN - 1);
246 if (!skb)
247 break;
248 ravb_set_buffer_align(skb);
249 /* RX descriptor */ 242 /* RX descriptor */
250 rx_desc = &priv->rx_ring[q][i]; 243 rx_desc = &priv->rx_ring[q][i];
251 /* The size of the buffer should be on 16-byte boundary. */ 244 /* The size of the buffer should be on 16-byte boundary. */
252 rx_desc->ds_cc = cpu_to_le16(ALIGN(PKT_BUF_SZ, 16)); 245 rx_desc->ds_cc = cpu_to_le16(ALIGN(PKT_BUF_SZ, 16));
253 dma_addr = dma_map_single(&ndev->dev, skb->data, 246 dma_addr = dma_map_single(&ndev->dev, priv->rx_skb[q][i]->data,
254 ALIGN(PKT_BUF_SZ, 16), 247 ALIGN(PKT_BUF_SZ, 16),
255 DMA_FROM_DEVICE); 248 DMA_FROM_DEVICE);
256 if (dma_mapping_error(&ndev->dev, dma_addr)) { 249 /* We just set the data size to 0 for a failed mapping which
257 dev_kfree_skb(skb); 250 * should prevent DMA from happening...
258 break; 251 */
259 } 252 if (dma_mapping_error(&ndev->dev, dma_addr))
260 priv->rx_skb[q][i] = skb; 253 rx_desc->ds_cc = cpu_to_le16(0);
261 rx_desc->dptr = cpu_to_le32(dma_addr); 254 rx_desc->dptr = cpu_to_le32(dma_addr);
262 rx_desc->die_dt = DT_FEMPTY; 255 rx_desc->die_dt = DT_FEMPTY;
263 } 256 }
264 rx_desc = &priv->rx_ring[q][i]; 257 rx_desc = &priv->rx_ring[q][i];
265 rx_desc->dptr = cpu_to_le32((u32)priv->rx_desc_dma[q]); 258 rx_desc->dptr = cpu_to_le32((u32)priv->rx_desc_dma[q]);
266 rx_desc->die_dt = DT_LINKFIX; /* type */ 259 rx_desc->die_dt = DT_LINKFIX; /* type */
267 priv->dirty_rx[q] = (u32)(i - priv->num_rx_ring[q]);
268 260
269 memset(priv->tx_ring[q], 0, tx_ring_size); 261 memset(priv->tx_ring[q], 0, tx_ring_size);
270 /* Build TX ring buffer */ 262 /* Build TX ring buffer */
271 for (i = 0; i < priv->num_tx_ring[q]; i++) { 263 for (i = 0; i < priv->num_tx_ring[q]; i++) {
272 priv->tx_skb[q][i] = NULL;
273 priv->tx_buffers[q][i] = NULL;
274 buffer = kmalloc(PKT_BUF_SZ + RAVB_ALIGN - 1, GFP_KERNEL);
275 if (!buffer)
276 break;
277 /* Aligned TX buffer */
278 priv->tx_buffers[q][i] = buffer;
279 tx_desc = &priv->tx_ring[q][i]; 264 tx_desc = &priv->tx_ring[q][i];
280 tx_desc->die_dt = DT_EEMPTY; 265 tx_desc->die_dt = DT_EEMPTY;
281 } 266 }
@@ -298,7 +283,10 @@ static void ravb_ring_format(struct net_device *ndev, int q)
298static int ravb_ring_init(struct net_device *ndev, int q) 283static int ravb_ring_init(struct net_device *ndev, int q)
299{ 284{
300 struct ravb_private *priv = netdev_priv(ndev); 285 struct ravb_private *priv = netdev_priv(ndev);
286 struct sk_buff *skb;
301 int ring_size; 287 int ring_size;
288 void *buffer;
289 int i;
302 290
303 /* Allocate RX and TX skb rings */ 291 /* Allocate RX and TX skb rings */
304 priv->rx_skb[q] = kcalloc(priv->num_rx_ring[q], 292 priv->rx_skb[q] = kcalloc(priv->num_rx_ring[q],
@@ -308,12 +296,28 @@ static int ravb_ring_init(struct net_device *ndev, int q)
308 if (!priv->rx_skb[q] || !priv->tx_skb[q]) 296 if (!priv->rx_skb[q] || !priv->tx_skb[q])
309 goto error; 297 goto error;
310 298
299 for (i = 0; i < priv->num_rx_ring[q]; i++) {
300 skb = netdev_alloc_skb(ndev, PKT_BUF_SZ + RAVB_ALIGN - 1);
301 if (!skb)
302 goto error;
303 ravb_set_buffer_align(skb);
304 priv->rx_skb[q][i] = skb;
305 }
306
311 /* Allocate rings for the aligned buffers */ 307 /* Allocate rings for the aligned buffers */
312 priv->tx_buffers[q] = kcalloc(priv->num_tx_ring[q], 308 priv->tx_buffers[q] = kcalloc(priv->num_tx_ring[q],
313 sizeof(*priv->tx_buffers[q]), GFP_KERNEL); 309 sizeof(*priv->tx_buffers[q]), GFP_KERNEL);
314 if (!priv->tx_buffers[q]) 310 if (!priv->tx_buffers[q])
315 goto error; 311 goto error;
316 312
313 for (i = 0; i < priv->num_tx_ring[q]; i++) {
314 buffer = kmalloc(PKT_BUF_SZ + RAVB_ALIGN - 1, GFP_KERNEL);
315 if (!buffer)
316 goto error;
317 /* Aligned TX buffer */
318 priv->tx_buffers[q][i] = buffer;
319 }
320
317 /* Allocate all RX descriptors. */ 321 /* Allocate all RX descriptors. */
318 ring_size = sizeof(struct ravb_ex_rx_desc) * (priv->num_rx_ring[q] + 1); 322 ring_size = sizeof(struct ravb_ex_rx_desc) * (priv->num_rx_ring[q] + 1);
319 priv->rx_ring[q] = dma_alloc_coherent(NULL, ring_size, 323 priv->rx_ring[q] = dma_alloc_coherent(NULL, ring_size,
@@ -524,6 +528,10 @@ static bool ravb_rx(struct net_device *ndev, int *quota, int q)
524 if (--boguscnt < 0) 528 if (--boguscnt < 0)
525 break; 529 break;
526 530
531 /* We use 0-byte descriptors to mark the DMA mapping errors */
532 if (!pkt_len)
533 continue;
534
527 if (desc_status & MSC_MC) 535 if (desc_status & MSC_MC)
528 stats->multicast++; 536 stats->multicast++;
529 537
@@ -543,10 +551,9 @@ static bool ravb_rx(struct net_device *ndev, int *quota, int q)
543 551
544 skb = priv->rx_skb[q][entry]; 552 skb = priv->rx_skb[q][entry];
545 priv->rx_skb[q][entry] = NULL; 553 priv->rx_skb[q][entry] = NULL;
546 dma_sync_single_for_cpu(&ndev->dev, 554 dma_unmap_single(&ndev->dev, le32_to_cpu(desc->dptr),
547 le32_to_cpu(desc->dptr), 555 ALIGN(PKT_BUF_SZ, 16),
548 ALIGN(PKT_BUF_SZ, 16), 556 DMA_FROM_DEVICE);
549 DMA_FROM_DEVICE);
550 get_ts &= (q == RAVB_NC) ? 557 get_ts &= (q == RAVB_NC) ?
551 RAVB_RXTSTAMP_TYPE_V2_L2_EVENT : 558 RAVB_RXTSTAMP_TYPE_V2_L2_EVENT :
552 ~RAVB_RXTSTAMP_TYPE_V2_L2_EVENT; 559 ~RAVB_RXTSTAMP_TYPE_V2_L2_EVENT;
@@ -584,17 +591,15 @@ static bool ravb_rx(struct net_device *ndev, int *quota, int q)
584 if (!skb) 591 if (!skb)
585 break; /* Better luck next round. */ 592 break; /* Better luck next round. */
586 ravb_set_buffer_align(skb); 593 ravb_set_buffer_align(skb);
587 dma_unmap_single(&ndev->dev, le32_to_cpu(desc->dptr),
588 ALIGN(PKT_BUF_SZ, 16),
589 DMA_FROM_DEVICE);
590 dma_addr = dma_map_single(&ndev->dev, skb->data, 594 dma_addr = dma_map_single(&ndev->dev, skb->data,
591 le16_to_cpu(desc->ds_cc), 595 le16_to_cpu(desc->ds_cc),
592 DMA_FROM_DEVICE); 596 DMA_FROM_DEVICE);
593 skb_checksum_none_assert(skb); 597 skb_checksum_none_assert(skb);
594 if (dma_mapping_error(&ndev->dev, dma_addr)) { 598 /* We just set the data size to 0 for a failed mapping
595 dev_kfree_skb_any(skb); 599 * which should prevent DMA from happening...
596 break; 600 */
597 } 601 if (dma_mapping_error(&ndev->dev, dma_addr))
602 desc->ds_cc = cpu_to_le16(0);
598 desc->dptr = cpu_to_le32(dma_addr); 603 desc->dptr = cpu_to_le32(dma_addr);
599 priv->rx_skb[q][entry] = skb; 604 priv->rx_skb[q][entry] = skb;
600 } 605 }
@@ -1279,7 +1284,6 @@ static netdev_tx_t ravb_start_xmit(struct sk_buff *skb, struct net_device *ndev)
1279 u32 dma_addr; 1284 u32 dma_addr;
1280 void *buffer; 1285 void *buffer;
1281 u32 entry; 1286 u32 entry;
1282 u32 tccr;
1283 1287
1284 spin_lock_irqsave(&priv->lock, flags); 1288 spin_lock_irqsave(&priv->lock, flags);
1285 if (priv->cur_tx[q] - priv->dirty_tx[q] >= priv->num_tx_ring[q]) { 1289 if (priv->cur_tx[q] - priv->dirty_tx[q] >= priv->num_tx_ring[q]) {
@@ -1328,9 +1332,7 @@ static netdev_tx_t ravb_start_xmit(struct sk_buff *skb, struct net_device *ndev)
1328 dma_wmb(); 1332 dma_wmb();
1329 desc->die_dt = DT_FSINGLE; 1333 desc->die_dt = DT_FSINGLE;
1330 1334
1331 tccr = ravb_read(ndev, TCCR); 1335 ravb_write(ndev, ravb_read(ndev, TCCR) | (TCCR_TSRQ0 << q), TCCR);
1332 if (!(tccr & (TCCR_TSRQ0 << q)))
1333 ravb_write(ndev, tccr | (TCCR_TSRQ0 << q), TCCR);
1334 1336
1335 priv->cur_tx[q]++; 1337 priv->cur_tx[q]++;
1336 if (priv->cur_tx[q] - priv->dirty_tx[q] >= priv->num_tx_ring[q] && 1338 if (priv->cur_tx[q] - priv->dirty_tx[q] >= priv->num_tx_ring[q] &&
diff --git a/drivers/net/ethernet/rocker/rocker.c b/drivers/net/ethernet/rocker/rocker.c
index 2d8578cade03..2e7f9a2834be 100644
--- a/drivers/net/ethernet/rocker/rocker.c
+++ b/drivers/net/ethernet/rocker/rocker.c
@@ -4821,6 +4821,7 @@ static void rocker_remove_ports(const struct rocker *rocker)
4821 rocker_port_ig_tbl(rocker_port, SWITCHDEV_TRANS_NONE, 4821 rocker_port_ig_tbl(rocker_port, SWITCHDEV_TRANS_NONE,
4822 ROCKER_OP_FLAG_REMOVE); 4822 ROCKER_OP_FLAG_REMOVE);
4823 unregister_netdev(rocker_port->dev); 4823 unregister_netdev(rocker_port->dev);
4824 free_netdev(rocker_port->dev);
4824 } 4825 }
4825 kfree(rocker->ports); 4826 kfree(rocker->ports);
4826} 4827}
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c
index 7e3129e7f143..f0e4bb4e3ec5 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c
@@ -42,7 +42,7 @@
42#define NSS_COMMON_CLK_DIV_MASK 0x7f 42#define NSS_COMMON_CLK_DIV_MASK 0x7f
43 43
44#define NSS_COMMON_CLK_SRC_CTRL 0x14 44#define NSS_COMMON_CLK_SRC_CTRL 0x14
45#define NSS_COMMON_CLK_SRC_CTRL_OFFSET(x) (1 << x) 45#define NSS_COMMON_CLK_SRC_CTRL_OFFSET(x) (x)
46/* Mode is coded on 1 bit but is different depending on the MAC ID: 46/* Mode is coded on 1 bit but is different depending on the MAC ID:
47 * MAC0: QSGMII=0 RGMII=1 47 * MAC0: QSGMII=0 RGMII=1
48 * MAC1: QSGMII=0 SGMII=0 RGMII=1 48 * MAC1: QSGMII=0 SGMII=0 RGMII=1
@@ -291,7 +291,7 @@ static void *ipq806x_gmac_setup(struct platform_device *pdev)
291 291
292 /* Configure the clock src according to the mode */ 292 /* Configure the clock src according to the mode */
293 regmap_read(gmac->nss_common, NSS_COMMON_CLK_SRC_CTRL, &val); 293 regmap_read(gmac->nss_common, NSS_COMMON_CLK_SRC_CTRL, &val);
294 val &= ~NSS_COMMON_CLK_SRC_CTRL_OFFSET(gmac->id); 294 val &= ~(1 << NSS_COMMON_CLK_SRC_CTRL_OFFSET(gmac->id));
295 switch (gmac->phy_mode) { 295 switch (gmac->phy_mode) {
296 case PHY_INTERFACE_MODE_RGMII: 296 case PHY_INTERFACE_MODE_RGMII:
297 val |= NSS_COMMON_CLK_SRC_CTRL_RGMII(gmac->id) << 297 val |= NSS_COMMON_CLK_SRC_CTRL_RGMII(gmac->id) <<
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index 50f7a7a26821..864b476f7fd5 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -2843,7 +2843,7 @@ int stmmac_dvr_probe(struct device *device,
2843 if (res->mac) 2843 if (res->mac)
2844 memcpy(priv->dev->dev_addr, res->mac, ETH_ALEN); 2844 memcpy(priv->dev->dev_addr, res->mac, ETH_ALEN);
2845 2845
2846 dev_set_drvdata(device, priv); 2846 dev_set_drvdata(device, priv->dev);
2847 2847
2848 /* Verify driver arguments */ 2848 /* Verify driver arguments */
2849 stmmac_verify_args(); 2849 stmmac_verify_args();
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
index f3918c7e7eeb..bcdc8955c719 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
@@ -413,3 +413,7 @@ static int stmmac_pltfr_resume(struct device *dev)
413SIMPLE_DEV_PM_OPS(stmmac_pltfr_pm_ops, stmmac_pltfr_suspend, 413SIMPLE_DEV_PM_OPS(stmmac_pltfr_pm_ops, stmmac_pltfr_suspend,
414 stmmac_pltfr_resume); 414 stmmac_pltfr_resume);
415EXPORT_SYMBOL_GPL(stmmac_pltfr_pm_ops); 415EXPORT_SYMBOL_GPL(stmmac_pltfr_pm_ops);
416
417MODULE_DESCRIPTION("STMMAC 10/100/1000 Ethernet platform support");
418MODULE_AUTHOR("Giuseppe Cavallaro <peppe.cavallaro@st.com>");
419MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/sun/niu.c b/drivers/net/ethernet/sun/niu.c
index 0c5842aeb807..ab6051a43134 100644
--- a/drivers/net/ethernet/sun/niu.c
+++ b/drivers/net/ethernet/sun/niu.c
@@ -6658,10 +6658,8 @@ static netdev_tx_t niu_start_xmit(struct sk_buff *skb,
6658 struct sk_buff *skb_new; 6658 struct sk_buff *skb_new;
6659 6659
6660 skb_new = skb_realloc_headroom(skb, len); 6660 skb_new = skb_realloc_headroom(skb, len);
6661 if (!skb_new) { 6661 if (!skb_new)
6662 rp->tx_errors++;
6663 goto out_drop; 6662 goto out_drop;
6664 }
6665 kfree_skb(skb); 6663 kfree_skb(skb);
6666 skb = skb_new; 6664 skb = skb_new;
6667 } else 6665 } else
diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c
index f335bf119ab5..d155bf2573cd 100644
--- a/drivers/net/ethernet/ti/cpsw.c
+++ b/drivers/net/ethernet/ti/cpsw.c
@@ -793,9 +793,7 @@ static irqreturn_t cpsw_rx_interrupt(int irq, void *dev_id)
793static int cpsw_poll(struct napi_struct *napi, int budget) 793static int cpsw_poll(struct napi_struct *napi, int budget)
794{ 794{
795 struct cpsw_priv *priv = napi_to_priv(napi); 795 struct cpsw_priv *priv = napi_to_priv(napi);
796 int num_tx, num_rx; 796 int num_rx;
797
798 num_tx = cpdma_chan_process(priv->txch, 128);
799 797
800 num_rx = cpdma_chan_process(priv->rxch, budget); 798 num_rx = cpdma_chan_process(priv->rxch, budget);
801 if (num_rx < budget) { 799 if (num_rx < budget) {
@@ -810,9 +808,8 @@ static int cpsw_poll(struct napi_struct *napi, int budget)
810 } 808 }
811 } 809 }
812 810
813 if (num_rx || num_tx) 811 if (num_rx)
814 cpsw_dbg(priv, intr, "poll %d rx, %d tx pkts\n", 812 cpsw_dbg(priv, intr, "poll %d rx pkts\n", num_rx);
815 num_rx, num_tx);
816 813
817 return num_rx; 814 return num_rx;
818} 815}
diff --git a/drivers/net/ethernet/ti/netcp.h b/drivers/net/ethernet/ti/netcp.h
index bbacf5cccec2..bb1bb72121c0 100644
--- a/drivers/net/ethernet/ti/netcp.h
+++ b/drivers/net/ethernet/ti/netcp.h
@@ -85,7 +85,6 @@ struct netcp_intf {
85 struct list_head rxhook_list_head; 85 struct list_head rxhook_list_head;
86 unsigned int rx_queue_id; 86 unsigned int rx_queue_id;
87 void *rx_fdq[KNAV_DMA_FDQ_PER_CHAN]; 87 void *rx_fdq[KNAV_DMA_FDQ_PER_CHAN];
88 u32 rx_buffer_sizes[KNAV_DMA_FDQ_PER_CHAN];
89 struct napi_struct rx_napi; 88 struct napi_struct rx_napi;
90 struct napi_struct tx_napi; 89 struct napi_struct tx_napi;
91 90
@@ -223,6 +222,7 @@ void *netcp_device_find_module(struct netcp_device *netcp_device,
223 222
224/* SGMII functions */ 223/* SGMII functions */
225int netcp_sgmii_reset(void __iomem *sgmii_ofs, int port); 224int netcp_sgmii_reset(void __iomem *sgmii_ofs, int port);
225bool netcp_sgmii_rtreset(void __iomem *sgmii_ofs, int port, bool set);
226int netcp_sgmii_get_port_link(void __iomem *sgmii_ofs, int port); 226int netcp_sgmii_get_port_link(void __iomem *sgmii_ofs, int port);
227int netcp_sgmii_config(void __iomem *sgmii_ofs, int port, u32 interface); 227int netcp_sgmii_config(void __iomem *sgmii_ofs, int port, u32 interface);
228 228
diff --git a/drivers/net/ethernet/ti/netcp_core.c b/drivers/net/ethernet/ti/netcp_core.c
index 5ec4ed3f6c8d..4755838c6137 100644
--- a/drivers/net/ethernet/ti/netcp_core.c
+++ b/drivers/net/ethernet/ti/netcp_core.c
@@ -34,6 +34,7 @@
34#define NETCP_SOP_OFFSET (NET_IP_ALIGN + NET_SKB_PAD) 34#define NETCP_SOP_OFFSET (NET_IP_ALIGN + NET_SKB_PAD)
35#define NETCP_NAPI_WEIGHT 64 35#define NETCP_NAPI_WEIGHT 64
36#define NETCP_TX_TIMEOUT (5 * HZ) 36#define NETCP_TX_TIMEOUT (5 * HZ)
37#define NETCP_PACKET_SIZE (ETH_FRAME_LEN + ETH_FCS_LEN)
37#define NETCP_MIN_PACKET_SIZE ETH_ZLEN 38#define NETCP_MIN_PACKET_SIZE ETH_ZLEN
38#define NETCP_MAX_MCAST_ADDR 16 39#define NETCP_MAX_MCAST_ADDR 16
39 40
@@ -804,30 +805,28 @@ static void netcp_allocate_rx_buf(struct netcp_intf *netcp, int fdq)
804 if (likely(fdq == 0)) { 805 if (likely(fdq == 0)) {
805 unsigned int primary_buf_len; 806 unsigned int primary_buf_len;
806 /* Allocate a primary receive queue entry */ 807 /* Allocate a primary receive queue entry */
807 buf_len = netcp->rx_buffer_sizes[0] + NETCP_SOP_OFFSET; 808 buf_len = NETCP_PACKET_SIZE + NETCP_SOP_OFFSET;
808 primary_buf_len = SKB_DATA_ALIGN(buf_len) + 809 primary_buf_len = SKB_DATA_ALIGN(buf_len) +
809 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); 810 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
810 811
811 if (primary_buf_len <= PAGE_SIZE) { 812 bufptr = netdev_alloc_frag(primary_buf_len);
812 bufptr = netdev_alloc_frag(primary_buf_len); 813 pad[1] = primary_buf_len;
813 pad[1] = primary_buf_len;
814 } else {
815 bufptr = kmalloc(primary_buf_len, GFP_ATOMIC |
816 GFP_DMA32 | __GFP_COLD);
817 pad[1] = 0;
818 }
819 814
820 if (unlikely(!bufptr)) { 815 if (unlikely(!bufptr)) {
821 dev_warn_ratelimited(netcp->ndev_dev, "Primary RX buffer alloc failed\n"); 816 dev_warn_ratelimited(netcp->ndev_dev,
817 "Primary RX buffer alloc failed\n");
822 goto fail; 818 goto fail;
823 } 819 }
824 dma = dma_map_single(netcp->dev, bufptr, buf_len, 820 dma = dma_map_single(netcp->dev, bufptr, buf_len,
825 DMA_TO_DEVICE); 821 DMA_TO_DEVICE);
822 if (unlikely(dma_mapping_error(netcp->dev, dma)))
823 goto fail;
824
826 pad[0] = (u32)bufptr; 825 pad[0] = (u32)bufptr;
827 826
828 } else { 827 } else {
829 /* Allocate a secondary receive queue entry */ 828 /* Allocate a secondary receive queue entry */
830 page = alloc_page(GFP_ATOMIC | GFP_DMA32 | __GFP_COLD); 829 page = alloc_page(GFP_ATOMIC | GFP_DMA | __GFP_COLD);
831 if (unlikely(!page)) { 830 if (unlikely(!page)) {
832 dev_warn_ratelimited(netcp->ndev_dev, "Secondary page alloc failed\n"); 831 dev_warn_ratelimited(netcp->ndev_dev, "Secondary page alloc failed\n");
833 goto fail; 832 goto fail;
@@ -1010,7 +1009,7 @@ netcp_tx_map_skb(struct sk_buff *skb, struct netcp_intf *netcp)
1010 1009
1011 /* Map the linear buffer */ 1010 /* Map the linear buffer */
1012 dma_addr = dma_map_single(dev, skb->data, pkt_len, DMA_TO_DEVICE); 1011 dma_addr = dma_map_single(dev, skb->data, pkt_len, DMA_TO_DEVICE);
1013 if (unlikely(!dma_addr)) { 1012 if (unlikely(dma_mapping_error(dev, dma_addr))) {
1014 dev_err(netcp->ndev_dev, "Failed to map skb buffer\n"); 1013 dev_err(netcp->ndev_dev, "Failed to map skb buffer\n");
1015 return NULL; 1014 return NULL;
1016 } 1015 }
@@ -1546,8 +1545,8 @@ static int netcp_setup_navigator_resources(struct net_device *ndev)
1546 knav_queue_disable_notify(netcp->rx_queue); 1545 knav_queue_disable_notify(netcp->rx_queue);
1547 1546
1548 /* open Rx FDQs */ 1547 /* open Rx FDQs */
1549 for (i = 0; i < KNAV_DMA_FDQ_PER_CHAN && 1548 for (i = 0; i < KNAV_DMA_FDQ_PER_CHAN && netcp->rx_queue_depths[i];
1550 netcp->rx_queue_depths[i] && netcp->rx_buffer_sizes[i]; ++i) { 1549 ++i) {
1551 snprintf(name, sizeof(name), "rx-fdq-%s-%d", ndev->name, i); 1550 snprintf(name, sizeof(name), "rx-fdq-%s-%d", ndev->name, i);
1552 netcp->rx_fdq[i] = knav_queue_open(name, KNAV_QUEUE_GP, 0); 1551 netcp->rx_fdq[i] = knav_queue_open(name, KNAV_QUEUE_GP, 0);
1553 if (IS_ERR_OR_NULL(netcp->rx_fdq[i])) { 1552 if (IS_ERR_OR_NULL(netcp->rx_fdq[i])) {
@@ -1617,11 +1616,11 @@ static int netcp_ndo_open(struct net_device *ndev)
1617 } 1616 }
1618 mutex_unlock(&netcp_modules_lock); 1617 mutex_unlock(&netcp_modules_lock);
1619 1618
1620 netcp_rxpool_refill(netcp);
1621 napi_enable(&netcp->rx_napi); 1619 napi_enable(&netcp->rx_napi);
1622 napi_enable(&netcp->tx_napi); 1620 napi_enable(&netcp->tx_napi);
1623 knav_queue_enable_notify(netcp->tx_compl_q); 1621 knav_queue_enable_notify(netcp->tx_compl_q);
1624 knav_queue_enable_notify(netcp->rx_queue); 1622 knav_queue_enable_notify(netcp->rx_queue);
1623 netcp_rxpool_refill(netcp);
1625 netif_tx_wake_all_queues(ndev); 1624 netif_tx_wake_all_queues(ndev);
1626 dev_dbg(netcp->ndev_dev, "netcp device %s opened\n", ndev->name); 1625 dev_dbg(netcp->ndev_dev, "netcp device %s opened\n", ndev->name);
1627 return 0; 1626 return 0;
@@ -1941,14 +1940,6 @@ static int netcp_create_interface(struct netcp_device *netcp_device,
1941 netcp->rx_queue_depths[0] = 128; 1940 netcp->rx_queue_depths[0] = 128;
1942 } 1941 }
1943 1942
1944 ret = of_property_read_u32_array(node_interface, "rx-buffer-size",
1945 netcp->rx_buffer_sizes,
1946 KNAV_DMA_FDQ_PER_CHAN);
1947 if (ret) {
1948 dev_err(dev, "missing \"rx-buffer-size\" parameter\n");
1949 netcp->rx_buffer_sizes[0] = 1536;
1950 }
1951
1952 ret = of_property_read_u32_array(node_interface, "rx-pool", temp, 2); 1943 ret = of_property_read_u32_array(node_interface, "rx-pool", temp, 2);
1953 if (ret < 0) { 1944 if (ret < 0) {
1954 dev_err(dev, "missing \"rx-pool\" parameter\n"); 1945 dev_err(dev, "missing \"rx-pool\" parameter\n");
@@ -2112,6 +2103,7 @@ probe_quit:
2112static int netcp_remove(struct platform_device *pdev) 2103static int netcp_remove(struct platform_device *pdev)
2113{ 2104{
2114 struct netcp_device *netcp_device = platform_get_drvdata(pdev); 2105 struct netcp_device *netcp_device = platform_get_drvdata(pdev);
2106 struct netcp_intf *netcp_intf, *netcp_tmp;
2115 struct netcp_inst_modpriv *inst_modpriv, *tmp; 2107 struct netcp_inst_modpriv *inst_modpriv, *tmp;
2116 struct netcp_module *module; 2108 struct netcp_module *module;
2117 2109
@@ -2123,10 +2115,17 @@ static int netcp_remove(struct platform_device *pdev)
2123 list_del(&inst_modpriv->inst_list); 2115 list_del(&inst_modpriv->inst_list);
2124 kfree(inst_modpriv); 2116 kfree(inst_modpriv);
2125 } 2117 }
2126 WARN(!list_empty(&netcp_device->interface_head), "%s interface list not empty!\n",
2127 pdev->name);
2128 2118
2129 devm_kfree(&pdev->dev, netcp_device); 2119 /* now that all modules are removed, clean up the interfaces */
2120 list_for_each_entry_safe(netcp_intf, netcp_tmp,
2121 &netcp_device->interface_head,
2122 interface_list) {
2123 netcp_delete_interface(netcp_device, netcp_intf->ndev);
2124 }
2125
2126 WARN(!list_empty(&netcp_device->interface_head),
2127 "%s interface list not empty!\n", pdev->name);
2128
2130 pm_runtime_put_sync(&pdev->dev); 2129 pm_runtime_put_sync(&pdev->dev);
2131 pm_runtime_disable(&pdev->dev); 2130 pm_runtime_disable(&pdev->dev);
2132 platform_set_drvdata(pdev, NULL); 2131 platform_set_drvdata(pdev, NULL);
diff --git a/drivers/net/ethernet/ti/netcp_ethss.c b/drivers/net/ethernet/ti/netcp_ethss.c
index 9b7e0a34c98b..1974a8ae764a 100644
--- a/drivers/net/ethernet/ti/netcp_ethss.c
+++ b/drivers/net/ethernet/ti/netcp_ethss.c
@@ -1901,11 +1901,28 @@ static void gbe_port_config(struct gbe_priv *gbe_dev, struct gbe_slave *slave,
1901 writel(slave->mac_control, GBE_REG_ADDR(slave, emac_regs, mac_control)); 1901 writel(slave->mac_control, GBE_REG_ADDR(slave, emac_regs, mac_control));
1902} 1902}
1903 1903
1904static void gbe_sgmii_rtreset(struct gbe_priv *priv,
1905 struct gbe_slave *slave, bool set)
1906{
1907 void __iomem *sgmii_port_regs;
1908
1909 if (SLAVE_LINK_IS_XGMII(slave))
1910 return;
1911
1912 if ((priv->ss_version == GBE_SS_VERSION_14) && (slave->slave_num >= 2))
1913 sgmii_port_regs = priv->sgmii_port34_regs;
1914 else
1915 sgmii_port_regs = priv->sgmii_port_regs;
1916
1917 netcp_sgmii_rtreset(sgmii_port_regs, slave->slave_num, set);
1918}
1919
1904static void gbe_slave_stop(struct gbe_intf *intf) 1920static void gbe_slave_stop(struct gbe_intf *intf)
1905{ 1921{
1906 struct gbe_priv *gbe_dev = intf->gbe_dev; 1922 struct gbe_priv *gbe_dev = intf->gbe_dev;
1907 struct gbe_slave *slave = intf->slave; 1923 struct gbe_slave *slave = intf->slave;
1908 1924
1925 gbe_sgmii_rtreset(gbe_dev, slave, true);
1909 gbe_port_reset(slave); 1926 gbe_port_reset(slave);
1910 /* Disable forwarding */ 1927 /* Disable forwarding */
1911 cpsw_ale_control_set(gbe_dev->ale, slave->port_num, 1928 cpsw_ale_control_set(gbe_dev->ale, slave->port_num,
@@ -1947,6 +1964,7 @@ static int gbe_slave_open(struct gbe_intf *gbe_intf)
1947 1964
1948 gbe_sgmii_config(priv, slave); 1965 gbe_sgmii_config(priv, slave);
1949 gbe_port_reset(slave); 1966 gbe_port_reset(slave);
1967 gbe_sgmii_rtreset(priv, slave, false);
1950 gbe_port_config(priv, slave, priv->rx_packet_max); 1968 gbe_port_config(priv, slave, priv->rx_packet_max);
1951 gbe_set_slave_mac(slave, gbe_intf); 1969 gbe_set_slave_mac(slave, gbe_intf);
1952 /* enable forwarding */ 1970 /* enable forwarding */
@@ -2490,10 +2508,9 @@ static void free_secondary_ports(struct gbe_priv *gbe_dev)
2490{ 2508{
2491 struct gbe_slave *slave; 2509 struct gbe_slave *slave;
2492 2510
2493 for (;;) { 2511 while (!list_empty(&gbe_dev->secondary_slaves)) {
2494 slave = first_sec_slave(gbe_dev); 2512 slave = first_sec_slave(gbe_dev);
2495 if (!slave) 2513
2496 break;
2497 if (slave->phy) 2514 if (slave->phy)
2498 phy_disconnect(slave->phy); 2515 phy_disconnect(slave->phy);
2499 list_del(&slave->slave_list); 2516 list_del(&slave->slave_list);
@@ -2839,14 +2856,13 @@ static int gbe_probe(struct netcp_device *netcp_device, struct device *dev,
2839 &gbe_dev->dma_chan_name); 2856 &gbe_dev->dma_chan_name);
2840 if (ret < 0) { 2857 if (ret < 0) {
2841 dev_err(dev, "missing \"tx-channel\" parameter\n"); 2858 dev_err(dev, "missing \"tx-channel\" parameter\n");
2842 ret = -ENODEV; 2859 return -EINVAL;
2843 goto quit;
2844 } 2860 }
2845 2861
2846 if (!strcmp(node->name, "gbe")) { 2862 if (!strcmp(node->name, "gbe")) {
2847 ret = get_gbe_resource_version(gbe_dev, node); 2863 ret = get_gbe_resource_version(gbe_dev, node);
2848 if (ret) 2864 if (ret)
2849 goto quit; 2865 return ret;
2850 2866
2851 dev_dbg(dev, "ss_version: 0x%08x\n", gbe_dev->ss_version); 2867 dev_dbg(dev, "ss_version: 0x%08x\n", gbe_dev->ss_version);
2852 2868
@@ -2857,22 +2873,20 @@ static int gbe_probe(struct netcp_device *netcp_device, struct device *dev,
2857 else 2873 else
2858 ret = -ENODEV; 2874 ret = -ENODEV;
2859 2875
2860 if (ret)
2861 goto quit;
2862 } else if (!strcmp(node->name, "xgbe")) { 2876 } else if (!strcmp(node->name, "xgbe")) {
2863 ret = set_xgbe_ethss10_priv(gbe_dev, node); 2877 ret = set_xgbe_ethss10_priv(gbe_dev, node);
2864 if (ret) 2878 if (ret)
2865 goto quit; 2879 return ret;
2866 ret = netcp_xgbe_serdes_init(gbe_dev->xgbe_serdes_regs, 2880 ret = netcp_xgbe_serdes_init(gbe_dev->xgbe_serdes_regs,
2867 gbe_dev->ss_regs); 2881 gbe_dev->ss_regs);
2868 if (ret)
2869 goto quit;
2870 } else { 2882 } else {
2871 dev_err(dev, "unknown GBE node(%s)\n", node->name); 2883 dev_err(dev, "unknown GBE node(%s)\n", node->name);
2872 ret = -ENODEV; 2884 ret = -ENODEV;
2873 goto quit;
2874 } 2885 }
2875 2886
2887 if (ret)
2888 return ret;
2889
2876 interfaces = of_get_child_by_name(node, "interfaces"); 2890 interfaces = of_get_child_by_name(node, "interfaces");
2877 if (!interfaces) 2891 if (!interfaces)
2878 dev_err(dev, "could not find interfaces\n"); 2892 dev_err(dev, "could not find interfaces\n");
@@ -2880,11 +2894,11 @@ static int gbe_probe(struct netcp_device *netcp_device, struct device *dev,
2880 ret = netcp_txpipe_init(&gbe_dev->tx_pipe, netcp_device, 2894 ret = netcp_txpipe_init(&gbe_dev->tx_pipe, netcp_device,
2881 gbe_dev->dma_chan_name, gbe_dev->tx_queue_id); 2895 gbe_dev->dma_chan_name, gbe_dev->tx_queue_id);
2882 if (ret) 2896 if (ret)
2883 goto quit; 2897 return ret;
2884 2898
2885 ret = netcp_txpipe_open(&gbe_dev->tx_pipe); 2899 ret = netcp_txpipe_open(&gbe_dev->tx_pipe);
2886 if (ret) 2900 if (ret)
2887 goto quit; 2901 return ret;
2888 2902
2889 /* Create network interfaces */ 2903 /* Create network interfaces */
2890 INIT_LIST_HEAD(&gbe_dev->gbe_intf_head); 2904 INIT_LIST_HEAD(&gbe_dev->gbe_intf_head);
@@ -2899,6 +2913,7 @@ static int gbe_probe(struct netcp_device *netcp_device, struct device *dev,
2899 if (gbe_dev->num_slaves >= gbe_dev->max_num_slaves) 2913 if (gbe_dev->num_slaves >= gbe_dev->max_num_slaves)
2900 break; 2914 break;
2901 } 2915 }
2916 of_node_put(interfaces);
2902 2917
2903 if (!gbe_dev->num_slaves) 2918 if (!gbe_dev->num_slaves)
2904 dev_warn(dev, "No network interface configured\n"); 2919 dev_warn(dev, "No network interface configured\n");
@@ -2911,9 +2926,10 @@ static int gbe_probe(struct netcp_device *netcp_device, struct device *dev,
2911 of_node_put(secondary_ports); 2926 of_node_put(secondary_ports);
2912 2927
2913 if (!gbe_dev->num_slaves) { 2928 if (!gbe_dev->num_slaves) {
2914 dev_err(dev, "No network interface or secondary ports configured\n"); 2929 dev_err(dev,
2930 "No network interface or secondary ports configured\n");
2915 ret = -ENODEV; 2931 ret = -ENODEV;
2916 goto quit; 2932 goto free_sec_ports;
2917 } 2933 }
2918 2934
2919 memset(&ale_params, 0, sizeof(ale_params)); 2935 memset(&ale_params, 0, sizeof(ale_params));
@@ -2927,7 +2943,7 @@ static int gbe_probe(struct netcp_device *netcp_device, struct device *dev,
2927 if (!gbe_dev->ale) { 2943 if (!gbe_dev->ale) {
2928 dev_err(gbe_dev->dev, "error initializing ale engine\n"); 2944 dev_err(gbe_dev->dev, "error initializing ale engine\n");
2929 ret = -ENODEV; 2945 ret = -ENODEV;
2930 goto quit; 2946 goto free_sec_ports;
2931 } else { 2947 } else {
2932 dev_dbg(gbe_dev->dev, "Created a gbe ale engine\n"); 2948 dev_dbg(gbe_dev->dev, "Created a gbe ale engine\n");
2933 } 2949 }
@@ -2943,14 +2959,8 @@ static int gbe_probe(struct netcp_device *netcp_device, struct device *dev,
2943 *inst_priv = gbe_dev; 2959 *inst_priv = gbe_dev;
2944 return 0; 2960 return 0;
2945 2961
2946quit: 2962free_sec_ports:
2947 if (gbe_dev->hw_stats) 2963 free_secondary_ports(gbe_dev);
2948 devm_kfree(dev, gbe_dev->hw_stats);
2949 cpsw_ale_destroy(gbe_dev->ale);
2950 if (gbe_dev->ss_regs)
2951 devm_iounmap(dev, gbe_dev->ss_regs);
2952 of_node_put(interfaces);
2953 devm_kfree(dev, gbe_dev);
2954 return ret; 2964 return ret;
2955} 2965}
2956 2966
@@ -3023,12 +3033,9 @@ static int gbe_remove(struct netcp_device *netcp_device, void *inst_priv)
3023 free_secondary_ports(gbe_dev); 3033 free_secondary_ports(gbe_dev);
3024 3034
3025 if (!list_empty(&gbe_dev->gbe_intf_head)) 3035 if (!list_empty(&gbe_dev->gbe_intf_head))
3026 dev_alert(gbe_dev->dev, "unreleased ethss interfaces present\n"); 3036 dev_alert(gbe_dev->dev,
3037 "unreleased ethss interfaces present\n");
3027 3038
3028 devm_kfree(gbe_dev->dev, gbe_dev->hw_stats);
3029 devm_iounmap(gbe_dev->dev, gbe_dev->ss_regs);
3030 memset(gbe_dev, 0x00, sizeof(*gbe_dev));
3031 devm_kfree(gbe_dev->dev, gbe_dev);
3032 return 0; 3039 return 0;
3033} 3040}
3034 3041
diff --git a/drivers/net/ethernet/ti/netcp_sgmii.c b/drivers/net/ethernet/ti/netcp_sgmii.c
index dbeb14266e2f..5d8419f658d0 100644
--- a/drivers/net/ethernet/ti/netcp_sgmii.c
+++ b/drivers/net/ethernet/ti/netcp_sgmii.c
@@ -18,6 +18,9 @@
18 18
19#include "netcp.h" 19#include "netcp.h"
20 20
21#define SGMII_SRESET_RESET BIT(0)
22#define SGMII_SRESET_RTRESET BIT(1)
23
21#define SGMII_REG_STATUS_LOCK BIT(4) 24#define SGMII_REG_STATUS_LOCK BIT(4)
22#define SGMII_REG_STATUS_LINK BIT(0) 25#define SGMII_REG_STATUS_LINK BIT(0)
23#define SGMII_REG_STATUS_AUTONEG BIT(2) 26#define SGMII_REG_STATUS_AUTONEG BIT(2)
@@ -51,12 +54,35 @@ static void sgmii_write_reg_bit(void __iomem *base, int reg, u32 val)
51int netcp_sgmii_reset(void __iomem *sgmii_ofs, int port) 54int netcp_sgmii_reset(void __iomem *sgmii_ofs, int port)
52{ 55{
53 /* Soft reset */ 56 /* Soft reset */
54 sgmii_write_reg_bit(sgmii_ofs, SGMII_SRESET_REG(port), 0x1); 57 sgmii_write_reg_bit(sgmii_ofs, SGMII_SRESET_REG(port),
55 while (sgmii_read_reg(sgmii_ofs, SGMII_SRESET_REG(port)) != 0x0) 58 SGMII_SRESET_RESET);
59
60 while ((sgmii_read_reg(sgmii_ofs, SGMII_SRESET_REG(port)) &
61 SGMII_SRESET_RESET) != 0x0)
56 ; 62 ;
63
57 return 0; 64 return 0;
58} 65}
59 66
67/* port is 0 based */
68bool netcp_sgmii_rtreset(void __iomem *sgmii_ofs, int port, bool set)
69{
70 u32 reg;
71 bool oldval;
72
73 /* Initiate a soft reset */
74 reg = sgmii_read_reg(sgmii_ofs, SGMII_SRESET_REG(port));
75 oldval = (reg & SGMII_SRESET_RTRESET) != 0x0;
76 if (set)
77 reg |= SGMII_SRESET_RTRESET;
78 else
79 reg &= ~SGMII_SRESET_RTRESET;
80 sgmii_write_reg(sgmii_ofs, SGMII_SRESET_REG(port), reg);
81 wmb();
82
83 return oldval;
84}
85
60int netcp_sgmii_get_port_link(void __iomem *sgmii_ofs, int port) 86int netcp_sgmii_get_port_link(void __iomem *sgmii_ofs, int port)
61{ 87{
62 u32 status = 0, link = 0; 88 u32 status = 0, link = 0;
diff --git a/drivers/net/hamradio/mkiss.c b/drivers/net/hamradio/mkiss.c
index 2ffbf13471d0..216bfd350169 100644
--- a/drivers/net/hamradio/mkiss.c
+++ b/drivers/net/hamradio/mkiss.c
@@ -728,11 +728,12 @@ static int mkiss_open(struct tty_struct *tty)
728 dev->type = ARPHRD_AX25; 728 dev->type = ARPHRD_AX25;
729 729
730 /* Perform the low-level AX25 initialization. */ 730 /* Perform the low-level AX25 initialization. */
731 if ((err = ax_open(ax->dev))) { 731 err = ax_open(ax->dev);
732 if (err)
732 goto out_free_netdev; 733 goto out_free_netdev;
733 }
734 734
735 if (register_netdev(dev)) 735 err = register_netdev(dev);
736 if (err)
736 goto out_free_buffers; 737 goto out_free_buffers;
737 738
738 /* after register_netdev() - because else printk smashes the kernel */ 739 /* after register_netdev() - because else printk smashes the kernel */
diff --git a/drivers/net/ipvlan/ipvlan.h b/drivers/net/ipvlan/ipvlan.h
index 953a97492fab..9542b7bac61a 100644
--- a/drivers/net/ipvlan/ipvlan.h
+++ b/drivers/net/ipvlan/ipvlan.h
@@ -67,8 +67,6 @@ struct ipvl_dev {
67 struct ipvl_port *port; 67 struct ipvl_port *port;
68 struct net_device *phy_dev; 68 struct net_device *phy_dev;
69 struct list_head addrs; 69 struct list_head addrs;
70 int ipv4cnt;
71 int ipv6cnt;
72 struct ipvl_pcpu_stats __percpu *pcpu_stats; 70 struct ipvl_pcpu_stats __percpu *pcpu_stats;
73 DECLARE_BITMAP(mac_filters, IPVLAN_MAC_FILTER_SIZE); 71 DECLARE_BITMAP(mac_filters, IPVLAN_MAC_FILTER_SIZE);
74 netdev_features_t sfeatures; 72 netdev_features_t sfeatures;
@@ -106,6 +104,11 @@ static inline struct ipvl_port *ipvlan_port_get_rcu(const struct net_device *d)
106 return rcu_dereference(d->rx_handler_data); 104 return rcu_dereference(d->rx_handler_data);
107} 105}
108 106
107static inline struct ipvl_port *ipvlan_port_get_rcu_bh(const struct net_device *d)
108{
109 return rcu_dereference_bh(d->rx_handler_data);
110}
111
109static inline struct ipvl_port *ipvlan_port_get_rtnl(const struct net_device *d) 112static inline struct ipvl_port *ipvlan_port_get_rtnl(const struct net_device *d)
110{ 113{
111 return rtnl_dereference(d->rx_handler_data); 114 return rtnl_dereference(d->rx_handler_data);
@@ -124,5 +127,5 @@ struct ipvl_addr *ipvlan_find_addr(const struct ipvl_dev *ipvlan,
124bool ipvlan_addr_busy(struct ipvl_port *port, void *iaddr, bool is_v6); 127bool ipvlan_addr_busy(struct ipvl_port *port, void *iaddr, bool is_v6);
125struct ipvl_addr *ipvlan_ht_addr_lookup(const struct ipvl_port *port, 128struct ipvl_addr *ipvlan_ht_addr_lookup(const struct ipvl_port *port,
126 const void *iaddr, bool is_v6); 129 const void *iaddr, bool is_v6);
127void ipvlan_ht_addr_del(struct ipvl_addr *addr, bool sync); 130void ipvlan_ht_addr_del(struct ipvl_addr *addr);
128#endif /* __IPVLAN_H */ 131#endif /* __IPVLAN_H */
diff --git a/drivers/net/ipvlan/ipvlan_core.c b/drivers/net/ipvlan/ipvlan_core.c
index 8afbedad620d..207f62e8de9a 100644
--- a/drivers/net/ipvlan/ipvlan_core.c
+++ b/drivers/net/ipvlan/ipvlan_core.c
@@ -85,11 +85,9 @@ void ipvlan_ht_addr_add(struct ipvl_dev *ipvlan, struct ipvl_addr *addr)
85 hlist_add_head_rcu(&addr->hlnode, &port->hlhead[hash]); 85 hlist_add_head_rcu(&addr->hlnode, &port->hlhead[hash]);
86} 86}
87 87
88void ipvlan_ht_addr_del(struct ipvl_addr *addr, bool sync) 88void ipvlan_ht_addr_del(struct ipvl_addr *addr)
89{ 89{
90 hlist_del_init_rcu(&addr->hlnode); 90 hlist_del_init_rcu(&addr->hlnode);
91 if (sync)
92 synchronize_rcu();
93} 91}
94 92
95struct ipvl_addr *ipvlan_find_addr(const struct ipvl_dev *ipvlan, 93struct ipvl_addr *ipvlan_find_addr(const struct ipvl_dev *ipvlan,
@@ -531,7 +529,7 @@ static int ipvlan_xmit_mode_l2(struct sk_buff *skb, struct net_device *dev)
531int ipvlan_queue_xmit(struct sk_buff *skb, struct net_device *dev) 529int ipvlan_queue_xmit(struct sk_buff *skb, struct net_device *dev)
532{ 530{
533 struct ipvl_dev *ipvlan = netdev_priv(dev); 531 struct ipvl_dev *ipvlan = netdev_priv(dev);
534 struct ipvl_port *port = ipvlan_port_get_rcu(ipvlan->phy_dev); 532 struct ipvl_port *port = ipvlan_port_get_rcu_bh(ipvlan->phy_dev);
535 533
536 if (!port) 534 if (!port)
537 goto out; 535 goto out;
diff --git a/drivers/net/ipvlan/ipvlan_main.c b/drivers/net/ipvlan/ipvlan_main.c
index 1acc283160d9..20b58bdecf75 100644
--- a/drivers/net/ipvlan/ipvlan_main.c
+++ b/drivers/net/ipvlan/ipvlan_main.c
@@ -153,10 +153,9 @@ static int ipvlan_open(struct net_device *dev)
153 else 153 else
154 dev->flags &= ~IFF_NOARP; 154 dev->flags &= ~IFF_NOARP;
155 155
156 if (ipvlan->ipv6cnt > 0 || ipvlan->ipv4cnt > 0) { 156 list_for_each_entry(addr, &ipvlan->addrs, anode)
157 list_for_each_entry(addr, &ipvlan->addrs, anode) 157 ipvlan_ht_addr_add(ipvlan, addr);
158 ipvlan_ht_addr_add(ipvlan, addr); 158
159 }
160 return dev_uc_add(phy_dev, phy_dev->dev_addr); 159 return dev_uc_add(phy_dev, phy_dev->dev_addr);
161} 160}
162 161
@@ -171,10 +170,9 @@ static int ipvlan_stop(struct net_device *dev)
171 170
172 dev_uc_del(phy_dev, phy_dev->dev_addr); 171 dev_uc_del(phy_dev, phy_dev->dev_addr);
173 172
174 if (ipvlan->ipv6cnt > 0 || ipvlan->ipv4cnt > 0) { 173 list_for_each_entry(addr, &ipvlan->addrs, anode)
175 list_for_each_entry(addr, &ipvlan->addrs, anode) 174 ipvlan_ht_addr_del(addr);
176 ipvlan_ht_addr_del(addr, !dev->dismantle); 175
177 }
178 return 0; 176 return 0;
179} 177}
180 178
@@ -471,8 +469,6 @@ static int ipvlan_link_new(struct net *src_net, struct net_device *dev,
471 ipvlan->port = port; 469 ipvlan->port = port;
472 ipvlan->sfeatures = IPVLAN_FEATURES; 470 ipvlan->sfeatures = IPVLAN_FEATURES;
473 INIT_LIST_HEAD(&ipvlan->addrs); 471 INIT_LIST_HEAD(&ipvlan->addrs);
474 ipvlan->ipv4cnt = 0;
475 ipvlan->ipv6cnt = 0;
476 472
477 /* TODO Probably put random address here to be presented to the 473 /* TODO Probably put random address here to be presented to the
478 * world but keep using the physical-dev address for the outgoing 474 * world but keep using the physical-dev address for the outgoing
@@ -508,12 +504,12 @@ static void ipvlan_link_delete(struct net_device *dev, struct list_head *head)
508 struct ipvl_dev *ipvlan = netdev_priv(dev); 504 struct ipvl_dev *ipvlan = netdev_priv(dev);
509 struct ipvl_addr *addr, *next; 505 struct ipvl_addr *addr, *next;
510 506
511 if (ipvlan->ipv6cnt > 0 || ipvlan->ipv4cnt > 0) { 507 list_for_each_entry_safe(addr, next, &ipvlan->addrs, anode) {
512 list_for_each_entry_safe(addr, next, &ipvlan->addrs, anode) { 508 ipvlan_ht_addr_del(addr);
513 ipvlan_ht_addr_del(addr, !dev->dismantle); 509 list_del(&addr->anode);
514 list_del(&addr->anode); 510 kfree_rcu(addr, rcu);
515 }
516 } 511 }
512
517 list_del_rcu(&ipvlan->pnode); 513 list_del_rcu(&ipvlan->pnode);
518 unregister_netdevice_queue(dev, head); 514 unregister_netdevice_queue(dev, head);
519 netdev_upper_dev_unlink(ipvlan->phy_dev, dev); 515 netdev_upper_dev_unlink(ipvlan->phy_dev, dev);
@@ -627,7 +623,7 @@ static int ipvlan_add_addr6(struct ipvl_dev *ipvlan, struct in6_addr *ip6_addr)
627 memcpy(&addr->ip6addr, ip6_addr, sizeof(struct in6_addr)); 623 memcpy(&addr->ip6addr, ip6_addr, sizeof(struct in6_addr));
628 addr->atype = IPVL_IPV6; 624 addr->atype = IPVL_IPV6;
629 list_add_tail(&addr->anode, &ipvlan->addrs); 625 list_add_tail(&addr->anode, &ipvlan->addrs);
630 ipvlan->ipv6cnt++; 626
631 /* If the interface is not up, the address will be added to the hash 627 /* If the interface is not up, the address will be added to the hash
632 * list by ipvlan_open. 628 * list by ipvlan_open.
633 */ 629 */
@@ -645,10 +641,8 @@ static void ipvlan_del_addr6(struct ipvl_dev *ipvlan, struct in6_addr *ip6_addr)
645 if (!addr) 641 if (!addr)
646 return; 642 return;
647 643
648 ipvlan_ht_addr_del(addr, true); 644 ipvlan_ht_addr_del(addr);
649 list_del(&addr->anode); 645 list_del(&addr->anode);
650 ipvlan->ipv6cnt--;
651 WARN_ON(ipvlan->ipv6cnt < 0);
652 kfree_rcu(addr, rcu); 646 kfree_rcu(addr, rcu);
653 647
654 return; 648 return;
@@ -661,6 +655,10 @@ static int ipvlan_addr6_event(struct notifier_block *unused,
661 struct net_device *dev = (struct net_device *)if6->idev->dev; 655 struct net_device *dev = (struct net_device *)if6->idev->dev;
662 struct ipvl_dev *ipvlan = netdev_priv(dev); 656 struct ipvl_dev *ipvlan = netdev_priv(dev);
663 657
658 /* FIXME IPv6 autoconf calls us from bh without RTNL */
659 if (in_softirq())
660 return NOTIFY_DONE;
661
664 if (!netif_is_ipvlan(dev)) 662 if (!netif_is_ipvlan(dev))
665 return NOTIFY_DONE; 663 return NOTIFY_DONE;
666 664
@@ -699,7 +697,7 @@ static int ipvlan_add_addr4(struct ipvl_dev *ipvlan, struct in_addr *ip4_addr)
699 memcpy(&addr->ip4addr, ip4_addr, sizeof(struct in_addr)); 697 memcpy(&addr->ip4addr, ip4_addr, sizeof(struct in_addr));
700 addr->atype = IPVL_IPV4; 698 addr->atype = IPVL_IPV4;
701 list_add_tail(&addr->anode, &ipvlan->addrs); 699 list_add_tail(&addr->anode, &ipvlan->addrs);
702 ipvlan->ipv4cnt++; 700
703 /* If the interface is not up, the address will be added to the hash 701 /* If the interface is not up, the address will be added to the hash
704 * list by ipvlan_open. 702 * list by ipvlan_open.
705 */ 703 */
@@ -717,10 +715,8 @@ static void ipvlan_del_addr4(struct ipvl_dev *ipvlan, struct in_addr *ip4_addr)
717 if (!addr) 715 if (!addr)
718 return; 716 return;
719 717
720 ipvlan_ht_addr_del(addr, true); 718 ipvlan_ht_addr_del(addr);
721 list_del(&addr->anode); 719 list_del(&addr->anode);
722 ipvlan->ipv4cnt--;
723 WARN_ON(ipvlan->ipv4cnt < 0);
724 kfree_rcu(addr, rcu); 720 kfree_rcu(addr, rcu);
725 721
726 return; 722 return;
diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c
index 3b933bb5a8d5..edd77342773a 100644
--- a/drivers/net/macvtap.c
+++ b/drivers/net/macvtap.c
@@ -719,6 +719,7 @@ static ssize_t macvtap_get_user(struct macvtap_queue *q, struct msghdr *m,
719 struct virtio_net_hdr vnet_hdr = { 0 }; 719 struct virtio_net_hdr vnet_hdr = { 0 };
720 int vnet_hdr_len = 0; 720 int vnet_hdr_len = 0;
721 int copylen = 0; 721 int copylen = 0;
722 int depth;
722 bool zerocopy = false; 723 bool zerocopy = false;
723 size_t linear; 724 size_t linear;
724 ssize_t n; 725 ssize_t n;
@@ -804,6 +805,12 @@ static ssize_t macvtap_get_user(struct macvtap_queue *q, struct msghdr *m,
804 805
805 skb_probe_transport_header(skb, ETH_HLEN); 806 skb_probe_transport_header(skb, ETH_HLEN);
806 807
808 /* Move network header to the right position for VLAN tagged packets */
809 if ((skb->protocol == htons(ETH_P_8021Q) ||
810 skb->protocol == htons(ETH_P_8021AD)) &&
811 __vlan_get_protocol(skb, skb->protocol, &depth) != 0)
812 skb_set_network_header(skb, depth);
813
807 rcu_read_lock(); 814 rcu_read_lock();
808 vlan = rcu_dereference(q->vlan); 815 vlan = rcu_dereference(q->vlan);
809 /* copy skb_ubuf_info for callback when skb has no error */ 816 /* copy skb_ubuf_info for callback when skb has no error */
diff --git a/drivers/net/ntb_netdev.c b/drivers/net/ntb_netdev.c
index 3cc316cb7e6b..d8757bf9ad75 100644
--- a/drivers/net/ntb_netdev.c
+++ b/drivers/net/ntb_netdev.c
@@ -102,6 +102,12 @@ static void ntb_netdev_rx_handler(struct ntb_transport_qp *qp, void *qp_data,
102 102
103 netdev_dbg(ndev, "%s: %d byte payload received\n", __func__, len); 103 netdev_dbg(ndev, "%s: %d byte payload received\n", __func__, len);
104 104
105 if (len < 0) {
106 ndev->stats.rx_errors++;
107 ndev->stats.rx_length_errors++;
108 goto enqueue_again;
109 }
110
105 skb_put(skb, len); 111 skb_put(skb, len);
106 skb->protocol = eth_type_trans(skb, ndev); 112 skb->protocol = eth_type_trans(skb, ndev);
107 skb->ip_summed = CHECKSUM_NONE; 113 skb->ip_summed = CHECKSUM_NONE;
@@ -121,6 +127,7 @@ static void ntb_netdev_rx_handler(struct ntb_transport_qp *qp, void *qp_data,
121 return; 127 return;
122 } 128 }
123 129
130enqueue_again:
124 rc = ntb_transport_rx_enqueue(qp, skb, skb->data, ndev->mtu + ETH_HLEN); 131 rc = ntb_transport_rx_enqueue(qp, skb, skb->data, ndev->mtu + ETH_HLEN);
125 if (rc) { 132 if (rc) {
126 dev_kfree_skb(skb); 133 dev_kfree_skb(skb);
@@ -184,7 +191,7 @@ static int ntb_netdev_open(struct net_device *ndev)
184 191
185 rc = ntb_transport_rx_enqueue(dev->qp, skb, skb->data, 192 rc = ntb_transport_rx_enqueue(dev->qp, skb, skb->data,
186 ndev->mtu + ETH_HLEN); 193 ndev->mtu + ETH_HLEN);
187 if (rc == -EINVAL) { 194 if (rc) {
188 dev_kfree_skb(skb); 195 dev_kfree_skb(skb);
189 goto err; 196 goto err;
190 } 197 }
diff --git a/drivers/net/phy/dp83867.c b/drivers/net/phy/dp83867.c
index c7a12e2e07b7..8a3bf5469892 100644
--- a/drivers/net/phy/dp83867.c
+++ b/drivers/net/phy/dp83867.c
@@ -164,7 +164,7 @@ static int dp83867_config_init(struct phy_device *phydev)
164 return ret; 164 return ret;
165 } 165 }
166 166
167 if ((phydev->interface >= PHY_INTERFACE_MODE_RGMII_ID) || 167 if ((phydev->interface >= PHY_INTERFACE_MODE_RGMII_ID) &&
168 (phydev->interface <= PHY_INTERFACE_MODE_RGMII_RXID)) { 168 (phydev->interface <= PHY_INTERFACE_MODE_RGMII_RXID)) {
169 val = phy_read_mmd_indirect(phydev, DP83867_RGMIICTL, 169 val = phy_read_mmd_indirect(phydev, DP83867_RGMIICTL,
170 DP83867_DEVADDR, phydev->addr); 170 DP83867_DEVADDR, phydev->addr);
diff --git a/drivers/net/phy/mdio_bus.c b/drivers/net/phy/mdio_bus.c
index 095ef3fe369a..46a14cbb0215 100644
--- a/drivers/net/phy/mdio_bus.c
+++ b/drivers/net/phy/mdio_bus.c
@@ -421,6 +421,8 @@ static int mdio_bus_match(struct device *dev, struct device_driver *drv)
421{ 421{
422 struct phy_device *phydev = to_phy_device(dev); 422 struct phy_device *phydev = to_phy_device(dev);
423 struct phy_driver *phydrv = to_phy_driver(drv); 423 struct phy_driver *phydrv = to_phy_driver(drv);
424 const int num_ids = ARRAY_SIZE(phydev->c45_ids.device_ids);
425 int i;
424 426
425 if (of_driver_match_device(dev, drv)) 427 if (of_driver_match_device(dev, drv))
426 return 1; 428 return 1;
@@ -428,8 +430,21 @@ static int mdio_bus_match(struct device *dev, struct device_driver *drv)
428 if (phydrv->match_phy_device) 430 if (phydrv->match_phy_device)
429 return phydrv->match_phy_device(phydev); 431 return phydrv->match_phy_device(phydev);
430 432
431 return (phydrv->phy_id & phydrv->phy_id_mask) == 433 if (phydev->is_c45) {
432 (phydev->phy_id & phydrv->phy_id_mask); 434 for (i = 1; i < num_ids; i++) {
435 if (!(phydev->c45_ids.devices_in_package & (1 << i)))
436 continue;
437
438 if ((phydrv->phy_id & phydrv->phy_id_mask) ==
439 (phydev->c45_ids.device_ids[i] &
440 phydrv->phy_id_mask))
441 return 1;
442 }
443 return 0;
444 } else {
445 return (phydrv->phy_id & phydrv->phy_id_mask) ==
446 (phydev->phy_id & phydrv->phy_id_mask);
447 }
433} 448}
434 449
435#ifdef CONFIG_PM 450#ifdef CONFIG_PM
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
index f603f362504b..9d43460ce3c7 100644
--- a/drivers/net/usb/qmi_wwan.c
+++ b/drivers/net/usb/qmi_wwan.c
@@ -757,6 +757,7 @@ static const struct usb_device_id products[] = {
757 {QMI_FIXED_INTF(0x1199, 0x901c, 8)}, /* Sierra Wireless EM7700 */ 757 {QMI_FIXED_INTF(0x1199, 0x901c, 8)}, /* Sierra Wireless EM7700 */
758 {QMI_FIXED_INTF(0x1199, 0x901f, 8)}, /* Sierra Wireless EM7355 */ 758 {QMI_FIXED_INTF(0x1199, 0x901f, 8)}, /* Sierra Wireless EM7355 */
759 {QMI_FIXED_INTF(0x1199, 0x9041, 8)}, /* Sierra Wireless MC7305/MC7355 */ 759 {QMI_FIXED_INTF(0x1199, 0x9041, 8)}, /* Sierra Wireless MC7305/MC7355 */
760 {QMI_FIXED_INTF(0x1199, 0x9041, 10)}, /* Sierra Wireless MC7305/MC7355 */
760 {QMI_FIXED_INTF(0x1199, 0x9051, 8)}, /* Netgear AirCard 340U */ 761 {QMI_FIXED_INTF(0x1199, 0x9051, 8)}, /* Netgear AirCard 340U */
761 {QMI_FIXED_INTF(0x1199, 0x9053, 8)}, /* Sierra Wireless Modem */ 762 {QMI_FIXED_INTF(0x1199, 0x9053, 8)}, /* Sierra Wireless Modem */
762 {QMI_FIXED_INTF(0x1199, 0x9054, 8)}, /* Sierra Wireless Modem */ 763 {QMI_FIXED_INTF(0x1199, 0x9054, 8)}, /* Sierra Wireless Modem */
diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
index 7f6419ebb5e1..ad8cbc6c9ee7 100644
--- a/drivers/net/usb/r8152.c
+++ b/drivers/net/usb/r8152.c
@@ -27,7 +27,7 @@
27#include <linux/usb/cdc.h> 27#include <linux/usb/cdc.h>
28 28
29/* Version Information */ 29/* Version Information */
30#define DRIVER_VERSION "v1.08.0 (2015/01/13)" 30#define DRIVER_VERSION "v1.08.1 (2015/07/28)"
31#define DRIVER_AUTHOR "Realtek linux nic maintainers <nic_swsd@realtek.com>" 31#define DRIVER_AUTHOR "Realtek linux nic maintainers <nic_swsd@realtek.com>"
32#define DRIVER_DESC "Realtek RTL8152/RTL8153 Based USB Ethernet Adapters" 32#define DRIVER_DESC "Realtek RTL8152/RTL8153 Based USB Ethernet Adapters"
33#define MODULENAME "r8152" 33#define MODULENAME "r8152"
@@ -1902,11 +1902,10 @@ static void rtl_drop_queued_tx(struct r8152 *tp)
1902static void rtl8152_tx_timeout(struct net_device *netdev) 1902static void rtl8152_tx_timeout(struct net_device *netdev)
1903{ 1903{
1904 struct r8152 *tp = netdev_priv(netdev); 1904 struct r8152 *tp = netdev_priv(netdev);
1905 int i;
1906 1905
1907 netif_warn(tp, tx_err, netdev, "Tx timeout\n"); 1906 netif_warn(tp, tx_err, netdev, "Tx timeout\n");
1908 for (i = 0; i < RTL8152_MAX_TX; i++) 1907
1909 usb_unlink_urb(tp->tx_info[i].urb); 1908 usb_queue_reset_device(tp->intf);
1910} 1909}
1911 1910
1912static void rtl8152_set_rx_mode(struct net_device *netdev) 1911static void rtl8152_set_rx_mode(struct net_device *netdev)
@@ -2075,7 +2074,6 @@ static int rtl_start_rx(struct r8152 *tp)
2075{ 2074{
2076 int i, ret = 0; 2075 int i, ret = 0;
2077 2076
2078 napi_disable(&tp->napi);
2079 INIT_LIST_HEAD(&tp->rx_done); 2077 INIT_LIST_HEAD(&tp->rx_done);
2080 for (i = 0; i < RTL8152_MAX_RX; i++) { 2078 for (i = 0; i < RTL8152_MAX_RX; i++) {
2081 INIT_LIST_HEAD(&tp->rx_info[i].list); 2079 INIT_LIST_HEAD(&tp->rx_info[i].list);
@@ -2083,7 +2081,6 @@ static int rtl_start_rx(struct r8152 *tp)
2083 if (ret) 2081 if (ret)
2084 break; 2082 break;
2085 } 2083 }
2086 napi_enable(&tp->napi);
2087 2084
2088 if (ret && ++i < RTL8152_MAX_RX) { 2085 if (ret && ++i < RTL8152_MAX_RX) {
2089 struct list_head rx_queue; 2086 struct list_head rx_queue;
@@ -2166,6 +2163,7 @@ static int rtl8153_enable(struct r8152 *tp)
2166 if (test_bit(RTL8152_UNPLUG, &tp->flags)) 2163 if (test_bit(RTL8152_UNPLUG, &tp->flags))
2167 return -ENODEV; 2164 return -ENODEV;
2168 2165
2166 usb_disable_lpm(tp->udev);
2169 set_tx_qlen(tp); 2167 set_tx_qlen(tp);
2170 rtl_set_eee_plus(tp); 2168 rtl_set_eee_plus(tp);
2171 r8153_set_rx_early_timeout(tp); 2169 r8153_set_rx_early_timeout(tp);
@@ -2337,11 +2335,61 @@ static void __rtl_set_wol(struct r8152 *tp, u32 wolopts)
2337 device_set_wakeup_enable(&tp->udev->dev, false); 2335 device_set_wakeup_enable(&tp->udev->dev, false);
2338} 2336}
2339 2337
2338static void r8153_u1u2en(struct r8152 *tp, bool enable)
2339{
2340 u8 u1u2[8];
2341
2342 if (enable)
2343 memset(u1u2, 0xff, sizeof(u1u2));
2344 else
2345 memset(u1u2, 0x00, sizeof(u1u2));
2346
2347 usb_ocp_write(tp, USB_TOLERANCE, BYTE_EN_SIX_BYTES, sizeof(u1u2), u1u2);
2348}
2349
2350static void r8153_u2p3en(struct r8152 *tp, bool enable)
2351{
2352 u32 ocp_data;
2353
2354 ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_U2P3_CTRL);
2355 if (enable && tp->version != RTL_VER_03 && tp->version != RTL_VER_04)
2356 ocp_data |= U2P3_ENABLE;
2357 else
2358 ocp_data &= ~U2P3_ENABLE;
2359 ocp_write_word(tp, MCU_TYPE_USB, USB_U2P3_CTRL, ocp_data);
2360}
2361
2362static void r8153_power_cut_en(struct r8152 *tp, bool enable)
2363{
2364 u32 ocp_data;
2365
2366 ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_POWER_CUT);
2367 if (enable)
2368 ocp_data |= PWR_EN | PHASE2_EN;
2369 else
2370 ocp_data &= ~(PWR_EN | PHASE2_EN);
2371 ocp_write_word(tp, MCU_TYPE_USB, USB_POWER_CUT, ocp_data);
2372
2373 ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_MISC_0);
2374 ocp_data &= ~PCUT_STATUS;
2375 ocp_write_word(tp, MCU_TYPE_USB, USB_MISC_0, ocp_data);
2376}
2377
2378static bool rtl_can_wakeup(struct r8152 *tp)
2379{
2380 struct usb_device *udev = tp->udev;
2381
2382 return (udev->actconfig->desc.bmAttributes & USB_CONFIG_ATT_WAKEUP);
2383}
2384
2340static void rtl_runtime_suspend_enable(struct r8152 *tp, bool enable) 2385static void rtl_runtime_suspend_enable(struct r8152 *tp, bool enable)
2341{ 2386{
2342 if (enable) { 2387 if (enable) {
2343 u32 ocp_data; 2388 u32 ocp_data;
2344 2389
2390 r8153_u1u2en(tp, false);
2391 r8153_u2p3en(tp, false);
2392
2345 __rtl_set_wol(tp, WAKE_ANY); 2393 __rtl_set_wol(tp, WAKE_ANY);
2346 2394
2347 ocp_write_byte(tp, MCU_TYPE_PLA, PLA_CRWECR, CRWECR_CONFIG); 2395 ocp_write_byte(tp, MCU_TYPE_PLA, PLA_CRWECR, CRWECR_CONFIG);
@@ -2353,6 +2401,8 @@ static void rtl_runtime_suspend_enable(struct r8152 *tp, bool enable)
2353 ocp_write_byte(tp, MCU_TYPE_PLA, PLA_CRWECR, CRWECR_NORAML); 2401 ocp_write_byte(tp, MCU_TYPE_PLA, PLA_CRWECR, CRWECR_NORAML);
2354 } else { 2402 } else {
2355 __rtl_set_wol(tp, tp->saved_wolopts); 2403 __rtl_set_wol(tp, tp->saved_wolopts);
2404 r8153_u2p3en(tp, true);
2405 r8153_u1u2en(tp, true);
2356 } 2406 }
2357} 2407}
2358 2408
@@ -2599,46 +2649,6 @@ static void r8153_hw_phy_cfg(struct r8152 *tp)
2599 set_bit(PHY_RESET, &tp->flags); 2649 set_bit(PHY_RESET, &tp->flags);
2600} 2650}
2601 2651
2602static void r8153_u1u2en(struct r8152 *tp, bool enable)
2603{
2604 u8 u1u2[8];
2605
2606 if (enable)
2607 memset(u1u2, 0xff, sizeof(u1u2));
2608 else
2609 memset(u1u2, 0x00, sizeof(u1u2));
2610
2611 usb_ocp_write(tp, USB_TOLERANCE, BYTE_EN_SIX_BYTES, sizeof(u1u2), u1u2);
2612}
2613
2614static void r8153_u2p3en(struct r8152 *tp, bool enable)
2615{
2616 u32 ocp_data;
2617
2618 ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_U2P3_CTRL);
2619 if (enable)
2620 ocp_data |= U2P3_ENABLE;
2621 else
2622 ocp_data &= ~U2P3_ENABLE;
2623 ocp_write_word(tp, MCU_TYPE_USB, USB_U2P3_CTRL, ocp_data);
2624}
2625
2626static void r8153_power_cut_en(struct r8152 *tp, bool enable)
2627{
2628 u32 ocp_data;
2629
2630 ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_POWER_CUT);
2631 if (enable)
2632 ocp_data |= PWR_EN | PHASE2_EN;
2633 else
2634 ocp_data &= ~(PWR_EN | PHASE2_EN);
2635 ocp_write_word(tp, MCU_TYPE_USB, USB_POWER_CUT, ocp_data);
2636
2637 ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_MISC_0);
2638 ocp_data &= ~PCUT_STATUS;
2639 ocp_write_word(tp, MCU_TYPE_USB, USB_MISC_0, ocp_data);
2640}
2641
2642static void r8153_first_init(struct r8152 *tp) 2652static void r8153_first_init(struct r8152 *tp)
2643{ 2653{
2644 u32 ocp_data; 2654 u32 ocp_data;
@@ -2781,6 +2791,7 @@ static void rtl8153_disable(struct r8152 *tp)
2781 r8153_disable_aldps(tp); 2791 r8153_disable_aldps(tp);
2782 rtl_disable(tp); 2792 rtl_disable(tp);
2783 r8153_enable_aldps(tp); 2793 r8153_enable_aldps(tp);
2794 usb_enable_lpm(tp->udev);
2784} 2795}
2785 2796
2786static int rtl8152_set_speed(struct r8152 *tp, u8 autoneg, u16 speed, u8 duplex) 2797static int rtl8152_set_speed(struct r8152 *tp, u8 autoneg, u16 speed, u8 duplex)
@@ -2901,9 +2912,13 @@ static void rtl8153_up(struct r8152 *tp)
2901 if (test_bit(RTL8152_UNPLUG, &tp->flags)) 2912 if (test_bit(RTL8152_UNPLUG, &tp->flags))
2902 return; 2913 return;
2903 2914
2915 r8153_u1u2en(tp, false);
2904 r8153_disable_aldps(tp); 2916 r8153_disable_aldps(tp);
2905 r8153_first_init(tp); 2917 r8153_first_init(tp);
2906 r8153_enable_aldps(tp); 2918 r8153_enable_aldps(tp);
2919 r8153_u2p3en(tp, true);
2920 r8153_u1u2en(tp, true);
2921 usb_enable_lpm(tp->udev);
2907} 2922}
2908 2923
2909static void rtl8153_down(struct r8152 *tp) 2924static void rtl8153_down(struct r8152 *tp)
@@ -2914,6 +2929,7 @@ static void rtl8153_down(struct r8152 *tp)
2914 } 2929 }
2915 2930
2916 r8153_u1u2en(tp, false); 2931 r8153_u1u2en(tp, false);
2932 r8153_u2p3en(tp, false);
2917 r8153_power_cut_en(tp, false); 2933 r8153_power_cut_en(tp, false);
2918 r8153_disable_aldps(tp); 2934 r8153_disable_aldps(tp);
2919 r8153_enter_oob(tp); 2935 r8153_enter_oob(tp);
@@ -2932,8 +2948,10 @@ static void set_carrier(struct r8152 *tp)
2932 if (!netif_carrier_ok(netdev)) { 2948 if (!netif_carrier_ok(netdev)) {
2933 tp->rtl_ops.enable(tp); 2949 tp->rtl_ops.enable(tp);
2934 set_bit(RTL8152_SET_RX_MODE, &tp->flags); 2950 set_bit(RTL8152_SET_RX_MODE, &tp->flags);
2951 napi_disable(&tp->napi);
2935 netif_carrier_on(netdev); 2952 netif_carrier_on(netdev);
2936 rtl_start_rx(tp); 2953 rtl_start_rx(tp);
2954 napi_enable(&tp->napi);
2937 } 2955 }
2938 } else { 2956 } else {
2939 if (netif_carrier_ok(netdev)) { 2957 if (netif_carrier_ok(netdev)) {
@@ -3252,6 +3270,7 @@ static void r8153_init(struct r8152 *tp)
3252 msleep(20); 3270 msleep(20);
3253 } 3271 }
3254 3272
3273 usb_disable_lpm(tp->udev);
3255 r8153_u2p3en(tp, false); 3274 r8153_u2p3en(tp, false);
3256 3275
3257 if (tp->version == RTL_VER_04) { 3276 if (tp->version == RTL_VER_04) {
@@ -3319,6 +3338,59 @@ static void r8153_init(struct r8152 *tp)
3319 r8153_enable_aldps(tp); 3338 r8153_enable_aldps(tp);
3320 r8152b_enable_fc(tp); 3339 r8152b_enable_fc(tp);
3321 rtl_tally_reset(tp); 3340 rtl_tally_reset(tp);
3341 r8153_u2p3en(tp, true);
3342}
3343
3344static int rtl8152_pre_reset(struct usb_interface *intf)
3345{
3346 struct r8152 *tp = usb_get_intfdata(intf);
3347 struct net_device *netdev;
3348
3349 if (!tp)
3350 return 0;
3351
3352 netdev = tp->netdev;
3353 if (!netif_running(netdev))
3354 return 0;
3355
3356 napi_disable(&tp->napi);
3357 clear_bit(WORK_ENABLE, &tp->flags);
3358 usb_kill_urb(tp->intr_urb);
3359 cancel_delayed_work_sync(&tp->schedule);
3360 if (netif_carrier_ok(netdev)) {
3361 netif_stop_queue(netdev);
3362 mutex_lock(&tp->control);
3363 tp->rtl_ops.disable(tp);
3364 mutex_unlock(&tp->control);
3365 }
3366
3367 return 0;
3368}
3369
3370static int rtl8152_post_reset(struct usb_interface *intf)
3371{
3372 struct r8152 *tp = usb_get_intfdata(intf);
3373 struct net_device *netdev;
3374
3375 if (!tp)
3376 return 0;
3377
3378 netdev = tp->netdev;
3379 if (!netif_running(netdev))
3380 return 0;
3381
3382 set_bit(WORK_ENABLE, &tp->flags);
3383 if (netif_carrier_ok(netdev)) {
3384 mutex_lock(&tp->control);
3385 tp->rtl_ops.enable(tp);
3386 rtl8152_set_rx_mode(netdev);
3387 mutex_unlock(&tp->control);
3388 netif_wake_queue(netdev);
3389 }
3390
3391 napi_enable(&tp->napi);
3392
3393 return 0;
3322} 3394}
3323 3395
3324static int rtl8152_suspend(struct usb_interface *intf, pm_message_t message) 3396static int rtl8152_suspend(struct usb_interface *intf, pm_message_t message)
@@ -3374,9 +3446,11 @@ static int rtl8152_resume(struct usb_interface *intf)
3374 if (test_bit(SELECTIVE_SUSPEND, &tp->flags)) { 3446 if (test_bit(SELECTIVE_SUSPEND, &tp->flags)) {
3375 rtl_runtime_suspend_enable(tp, false); 3447 rtl_runtime_suspend_enable(tp, false);
3376 clear_bit(SELECTIVE_SUSPEND, &tp->flags); 3448 clear_bit(SELECTIVE_SUSPEND, &tp->flags);
3449 napi_disable(&tp->napi);
3377 set_bit(WORK_ENABLE, &tp->flags); 3450 set_bit(WORK_ENABLE, &tp->flags);
3378 if (netif_carrier_ok(tp->netdev)) 3451 if (netif_carrier_ok(tp->netdev))
3379 rtl_start_rx(tp); 3452 rtl_start_rx(tp);
3453 napi_enable(&tp->napi);
3380 } else { 3454 } else {
3381 tp->rtl_ops.up(tp); 3455 tp->rtl_ops.up(tp);
3382 rtl8152_set_speed(tp, AUTONEG_ENABLE, 3456 rtl8152_set_speed(tp, AUTONEG_ENABLE,
@@ -3403,12 +3477,15 @@ static void rtl8152_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
3403 if (usb_autopm_get_interface(tp->intf) < 0) 3477 if (usb_autopm_get_interface(tp->intf) < 0)
3404 return; 3478 return;
3405 3479
3406 mutex_lock(&tp->control); 3480 if (!rtl_can_wakeup(tp)) {
3407 3481 wol->supported = 0;
3408 wol->supported = WAKE_ANY; 3482 wol->wolopts = 0;
3409 wol->wolopts = __rtl_get_wol(tp); 3483 } else {
3410 3484 mutex_lock(&tp->control);
3411 mutex_unlock(&tp->control); 3485 wol->supported = WAKE_ANY;
3486 wol->wolopts = __rtl_get_wol(tp);
3487 mutex_unlock(&tp->control);
3488 }
3412 3489
3413 usb_autopm_put_interface(tp->intf); 3490 usb_autopm_put_interface(tp->intf);
3414} 3491}
@@ -3418,6 +3495,9 @@ static int rtl8152_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
3418 struct r8152 *tp = netdev_priv(dev); 3495 struct r8152 *tp = netdev_priv(dev);
3419 int ret; 3496 int ret;
3420 3497
3498 if (!rtl_can_wakeup(tp))
3499 return -EOPNOTSUPP;
3500
3421 ret = usb_autopm_get_interface(tp->intf); 3501 ret = usb_autopm_get_interface(tp->intf);
3422 if (ret < 0) 3502 if (ret < 0)
3423 goto out_set_wol; 3503 goto out_set_wol;
@@ -4059,6 +4139,9 @@ static int rtl8152_probe(struct usb_interface *intf,
4059 goto out1; 4139 goto out1;
4060 } 4140 }
4061 4141
4142 if (!rtl_can_wakeup(tp))
4143 __rtl_set_wol(tp, 0);
4144
4062 tp->saved_wolopts = __rtl_get_wol(tp); 4145 tp->saved_wolopts = __rtl_get_wol(tp);
4063 if (tp->saved_wolopts) 4146 if (tp->saved_wolopts)
4064 device_set_wakeup_enable(&udev->dev, true); 4147 device_set_wakeup_enable(&udev->dev, true);
@@ -4132,6 +4215,8 @@ static struct usb_driver rtl8152_driver = {
4132 .suspend = rtl8152_suspend, 4215 .suspend = rtl8152_suspend,
4133 .resume = rtl8152_resume, 4216 .resume = rtl8152_resume,
4134 .reset_resume = rtl8152_resume, 4217 .reset_resume = rtl8152_resume,
4218 .pre_reset = rtl8152_pre_reset,
4219 .post_reset = rtl8152_post_reset,
4135 .supports_autosuspend = 1, 4220 .supports_autosuspend = 1,
4136 .disable_hub_initiated_lpm = 1, 4221 .disable_hub_initiated_lpm = 1,
4137}; 4222};
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 63c7810e1545..237f8e5e493d 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -1756,9 +1756,9 @@ static int virtnet_probe(struct virtio_device *vdev)
1756 /* Do we support "hardware" checksums? */ 1756 /* Do we support "hardware" checksums? */
1757 if (virtio_has_feature(vdev, VIRTIO_NET_F_CSUM)) { 1757 if (virtio_has_feature(vdev, VIRTIO_NET_F_CSUM)) {
1758 /* This opens up the world of extra features. */ 1758 /* This opens up the world of extra features. */
1759 dev->hw_features |= NETIF_F_HW_CSUM|NETIF_F_SG|NETIF_F_FRAGLIST; 1759 dev->hw_features |= NETIF_F_HW_CSUM | NETIF_F_SG;
1760 if (csum) 1760 if (csum)
1761 dev->features |= NETIF_F_HW_CSUM|NETIF_F_SG|NETIF_F_FRAGLIST; 1761 dev->features |= NETIF_F_HW_CSUM | NETIF_F_SG;
1762 1762
1763 if (virtio_has_feature(vdev, VIRTIO_NET_F_GSO)) { 1763 if (virtio_has_feature(vdev, VIRTIO_NET_F_GSO)) {
1764 dev->hw_features |= NETIF_F_TSO | NETIF_F_UFO 1764 dev->hw_features |= NETIF_F_TSO | NETIF_F_UFO
@@ -1828,7 +1828,8 @@ static int virtnet_probe(struct virtio_device *vdev)
1828 else 1828 else
1829 vi->hdr_len = sizeof(struct virtio_net_hdr); 1829 vi->hdr_len = sizeof(struct virtio_net_hdr);
1830 1830
1831 if (virtio_has_feature(vdev, VIRTIO_F_ANY_LAYOUT)) 1831 if (virtio_has_feature(vdev, VIRTIO_F_ANY_LAYOUT) ||
1832 virtio_has_feature(vdev, VIRTIO_F_VERSION_1))
1832 vi->any_header_sg = true; 1833 vi->any_header_sg = true;
1833 1834
1834 if (virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_VQ)) 1835 if (virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_VQ))
diff --git a/drivers/net/wan/cosa.c b/drivers/net/wan/cosa.c
index 7193b7304fdd..848ea6a399f2 100644
--- a/drivers/net/wan/cosa.c
+++ b/drivers/net/wan/cosa.c
@@ -589,7 +589,8 @@ static int cosa_probe(int base, int irq, int dma)
589 chan->netdev->base_addr = chan->cosa->datareg; 589 chan->netdev->base_addr = chan->cosa->datareg;
590 chan->netdev->irq = chan->cosa->irq; 590 chan->netdev->irq = chan->cosa->irq;
591 chan->netdev->dma = chan->cosa->dma; 591 chan->netdev->dma = chan->cosa->dma;
592 if (register_hdlc_device(chan->netdev)) { 592 err = register_hdlc_device(chan->netdev);
593 if (err) {
593 netdev_warn(chan->netdev, 594 netdev_warn(chan->netdev,
594 "register_hdlc_device() failed\n"); 595 "register_hdlc_device() failed\n");
595 free_netdev(chan->netdev); 596 free_netdev(chan->netdev);
diff --git a/drivers/net/wireless/ath/ath9k/hw.c b/drivers/net/wireless/ath/ath9k/hw.c
index 5e15e8e10ed3..a31a6804dc34 100644
--- a/drivers/net/wireless/ath/ath9k/hw.c
+++ b/drivers/net/wireless/ath/ath9k/hw.c
@@ -279,6 +279,7 @@ static void ath9k_hw_read_revisions(struct ath_hw *ah)
279 return; 279 return;
280 case AR9300_DEVID_QCA956X: 280 case AR9300_DEVID_QCA956X:
281 ah->hw_version.macVersion = AR_SREV_VERSION_9561; 281 ah->hw_version.macVersion = AR_SREV_VERSION_9561;
282 return;
282 } 283 }
283 284
284 val = REG_READ(ah, AR_SREV) & AR_SREV_ID; 285 val = REG_READ(ah, AR_SREV) & AR_SREV_ID;
diff --git a/drivers/net/wireless/b43/tables_nphy.c b/drivers/net/wireless/b43/tables_nphy.c
index 25d1cbd34306..b2f0d245bcf3 100644
--- a/drivers/net/wireless/b43/tables_nphy.c
+++ b/drivers/net/wireless/b43/tables_nphy.c
@@ -3728,7 +3728,7 @@ const u32 *b43_nphy_get_tx_gain_table(struct b43_wldev *dev)
3728 switch (phy->rev) { 3728 switch (phy->rev) {
3729 case 6: 3729 case 6:
3730 case 5: 3730 case 5:
3731 if (sprom->fem.ghz5.extpa_gain == 3) 3731 if (sprom->fem.ghz2.extpa_gain == 3)
3732 return b43_ntab_tx_gain_epa_rev3_hi_pwr_2g; 3732 return b43_ntab_tx_gain_epa_rev3_hi_pwr_2g;
3733 /* fall through */ 3733 /* fall through */
3734 case 4: 3734 case 4:
diff --git a/drivers/net/wireless/iwlwifi/iwl-fh.h b/drivers/net/wireless/iwlwifi/iwl-fh.h
index d56064861a9c..d45dc021cda2 100644
--- a/drivers/net/wireless/iwlwifi/iwl-fh.h
+++ b/drivers/net/wireless/iwlwifi/iwl-fh.h
@@ -438,6 +438,12 @@ static inline unsigned int FH_MEM_CBBC_QUEUE(unsigned int chnl)
438#define RX_QUEUE_MASK 255 438#define RX_QUEUE_MASK 255
439#define RX_QUEUE_SIZE_LOG 8 439#define RX_QUEUE_SIZE_LOG 8
440 440
441/*
442 * RX related structures and functions
443 */
444#define RX_FREE_BUFFERS 64
445#define RX_LOW_WATERMARK 8
446
441/** 447/**
442 * struct iwl_rb_status - reserve buffer status 448 * struct iwl_rb_status - reserve buffer status
443 * host memory mapped FH registers 449 * host memory mapped FH registers
diff --git a/drivers/net/wireless/iwlwifi/iwl-nvm-parse.c b/drivers/net/wireless/iwlwifi/iwl-nvm-parse.c
index 80fefe7d7b8c..3b8e85e51002 100644
--- a/drivers/net/wireless/iwlwifi/iwl-nvm-parse.c
+++ b/drivers/net/wireless/iwlwifi/iwl-nvm-parse.c
@@ -540,13 +540,11 @@ static void iwl_set_hw_address_family_8000(struct device *dev,
540 hw_addr = (const u8 *)(mac_override + 540 hw_addr = (const u8 *)(mac_override +
541 MAC_ADDRESS_OVERRIDE_FAMILY_8000); 541 MAC_ADDRESS_OVERRIDE_FAMILY_8000);
542 542
543 /* The byte order is little endian 16 bit, meaning 214365 */ 543 /*
544 data->hw_addr[0] = hw_addr[1]; 544 * Store the MAC address from MAO section.
545 data->hw_addr[1] = hw_addr[0]; 545 * No byte swapping is required in MAO section
546 data->hw_addr[2] = hw_addr[3]; 546 */
547 data->hw_addr[3] = hw_addr[2]; 547 memcpy(data->hw_addr, hw_addr, ETH_ALEN);
548 data->hw_addr[4] = hw_addr[5];
549 data->hw_addr[5] = hw_addr[4];
550 548
551 /* 549 /*
552 * Force the use of the OTP MAC address in case of reserved MAC 550 * Force the use of the OTP MAC address in case of reserved MAC
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h b/drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h
index 5e4cbdb44c60..737774a01c74 100644
--- a/drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h
+++ b/drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h
@@ -660,7 +660,8 @@ struct iwl_scan_config {
660 * iwl_umac_scan_flags 660 * iwl_umac_scan_flags
661 *@IWL_UMAC_SCAN_FLAG_PREEMPTIVE: scan process triggered by this scan request 661 *@IWL_UMAC_SCAN_FLAG_PREEMPTIVE: scan process triggered by this scan request
662 * can be preempted by other scan requests with higher priority. 662 * can be preempted by other scan requests with higher priority.
663 * The low priority scan is aborted. 663 * The low priority scan will be resumed when the higher proirity scan is
664 * completed.
664 *@IWL_UMAC_SCAN_FLAG_START_NOTIF: notification will be sent to the driver 665 *@IWL_UMAC_SCAN_FLAG_START_NOTIF: notification will be sent to the driver
665 * when scan starts. 666 * when scan starts.
666 */ 667 */
diff --git a/drivers/net/wireless/iwlwifi/mvm/scan.c b/drivers/net/wireless/iwlwifi/mvm/scan.c
index 5de144968723..5514ad6d4e54 100644
--- a/drivers/net/wireless/iwlwifi/mvm/scan.c
+++ b/drivers/net/wireless/iwlwifi/mvm/scan.c
@@ -1023,7 +1023,7 @@ static void iwl_mvm_scan_umac_dwell(struct iwl_mvm *mvm,
1023 cmd->scan_priority = 1023 cmd->scan_priority =
1024 iwl_mvm_scan_priority(mvm, IWL_SCAN_PRIORITY_EXT_6); 1024 iwl_mvm_scan_priority(mvm, IWL_SCAN_PRIORITY_EXT_6);
1025 1025
1026 if (iwl_mvm_scan_total_iterations(params) == 0) 1026 if (iwl_mvm_scan_total_iterations(params) == 1)
1027 cmd->ooc_priority = 1027 cmd->ooc_priority =
1028 iwl_mvm_scan_priority(mvm, IWL_SCAN_PRIORITY_EXT_6); 1028 iwl_mvm_scan_priority(mvm, IWL_SCAN_PRIORITY_EXT_6);
1029 else 1029 else
@@ -1109,6 +1109,9 @@ static int iwl_mvm_scan_umac(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
1109 cmd->uid = cpu_to_le32(uid); 1109 cmd->uid = cpu_to_le32(uid);
1110 cmd->general_flags = cpu_to_le32(iwl_mvm_scan_umac_flags(mvm, params)); 1110 cmd->general_flags = cpu_to_le32(iwl_mvm_scan_umac_flags(mvm, params));
1111 1111
1112 if (type == IWL_MVM_SCAN_SCHED)
1113 cmd->flags = cpu_to_le32(IWL_UMAC_SCAN_FLAG_PREEMPTIVE);
1114
1112 if (iwl_mvm_scan_use_ebs(mvm, vif, n_iterations)) 1115 if (iwl_mvm_scan_use_ebs(mvm, vif, n_iterations))
1113 cmd->channel_flags = IWL_SCAN_CHANNEL_FLAG_EBS | 1116 cmd->channel_flags = IWL_SCAN_CHANNEL_FLAG_EBS |
1114 IWL_SCAN_CHANNEL_FLAG_EBS_ACCURATE | 1117 IWL_SCAN_CHANNEL_FLAG_EBS_ACCURATE |
diff --git a/drivers/net/wireless/iwlwifi/mvm/sta.c b/drivers/net/wireless/iwlwifi/mvm/sta.c
index d68dc697a4a0..26f076e82149 100644
--- a/drivers/net/wireless/iwlwifi/mvm/sta.c
+++ b/drivers/net/wireless/iwlwifi/mvm/sta.c
@@ -1401,6 +1401,7 @@ int iwl_mvm_set_sta_key(struct iwl_mvm *mvm,
1401 bool mcast = !(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE); 1401 bool mcast = !(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE);
1402 u8 sta_id; 1402 u8 sta_id;
1403 int ret; 1403 int ret;
1404 static const u8 __maybe_unused zero_addr[ETH_ALEN] = {0};
1404 1405
1405 lockdep_assert_held(&mvm->mutex); 1406 lockdep_assert_held(&mvm->mutex);
1406 1407
@@ -1467,7 +1468,7 @@ int iwl_mvm_set_sta_key(struct iwl_mvm *mvm,
1467end: 1468end:
1468 IWL_DEBUG_WEP(mvm, "key: cipher=%x len=%d idx=%d sta=%pM ret=%d\n", 1469 IWL_DEBUG_WEP(mvm, "key: cipher=%x len=%d idx=%d sta=%pM ret=%d\n",
1469 keyconf->cipher, keyconf->keylen, keyconf->keyidx, 1470 keyconf->cipher, keyconf->keylen, keyconf->keyidx,
1470 sta->addr, ret); 1471 sta ? sta->addr : zero_addr, ret);
1471 return ret; 1472 return ret;
1472} 1473}
1473 1474
diff --git a/drivers/net/wireless/iwlwifi/mvm/time-event.c b/drivers/net/wireless/iwlwifi/mvm/time-event.c
index d24b6a83e68c..e472729e5f14 100644
--- a/drivers/net/wireless/iwlwifi/mvm/time-event.c
+++ b/drivers/net/wireless/iwlwifi/mvm/time-event.c
@@ -86,7 +86,7 @@ void iwl_mvm_te_clear_data(struct iwl_mvm *mvm,
86{ 86{
87 lockdep_assert_held(&mvm->time_event_lock); 87 lockdep_assert_held(&mvm->time_event_lock);
88 88
89 if (te_data->id == TE_MAX) 89 if (!te_data->vif)
90 return; 90 return;
91 91
92 list_del(&te_data->list); 92 list_del(&te_data->list);
diff --git a/drivers/net/wireless/iwlwifi/mvm/tx.c b/drivers/net/wireless/iwlwifi/mvm/tx.c
index 7ba7a118ff5c..89116864d2a0 100644
--- a/drivers/net/wireless/iwlwifi/mvm/tx.c
+++ b/drivers/net/wireless/iwlwifi/mvm/tx.c
@@ -252,7 +252,7 @@ void iwl_mvm_set_tx_cmd_rate(struct iwl_mvm *mvm, struct iwl_tx_cmd *tx_cmd,
252 252
253 if (info->band == IEEE80211_BAND_2GHZ && 253 if (info->band == IEEE80211_BAND_2GHZ &&
254 !iwl_mvm_bt_coex_is_shared_ant_avail(mvm)) 254 !iwl_mvm_bt_coex_is_shared_ant_avail(mvm))
255 rate_flags = BIT(mvm->cfg->non_shared_ant) << RATE_MCS_ANT_POS; 255 rate_flags = mvm->cfg->non_shared_ant << RATE_MCS_ANT_POS;
256 else 256 else
257 rate_flags = 257 rate_flags =
258 BIT(mvm->mgmt_last_antenna_idx) << RATE_MCS_ANT_POS; 258 BIT(mvm->mgmt_last_antenna_idx) << RATE_MCS_ANT_POS;
diff --git a/drivers/net/wireless/iwlwifi/pcie/drv.c b/drivers/net/wireless/iwlwifi/pcie/drv.c
index 2ed1e4d2774d..9f65c1cff1b1 100644
--- a/drivers/net/wireless/iwlwifi/pcie/drv.c
+++ b/drivers/net/wireless/iwlwifi/pcie/drv.c
@@ -368,12 +368,14 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
368/* 3165 Series */ 368/* 3165 Series */
369 {IWL_PCI_DEVICE(0x3165, 0x4010, iwl3165_2ac_cfg)}, 369 {IWL_PCI_DEVICE(0x3165, 0x4010, iwl3165_2ac_cfg)},
370 {IWL_PCI_DEVICE(0x3165, 0x4012, iwl3165_2ac_cfg)}, 370 {IWL_PCI_DEVICE(0x3165, 0x4012, iwl3165_2ac_cfg)},
371 {IWL_PCI_DEVICE(0x3166, 0x4212, iwl3165_2ac_cfg)},
371 {IWL_PCI_DEVICE(0x3165, 0x4410, iwl3165_2ac_cfg)}, 372 {IWL_PCI_DEVICE(0x3165, 0x4410, iwl3165_2ac_cfg)},
372 {IWL_PCI_DEVICE(0x3165, 0x4510, iwl3165_2ac_cfg)}, 373 {IWL_PCI_DEVICE(0x3165, 0x4510, iwl3165_2ac_cfg)},
373 {IWL_PCI_DEVICE(0x3165, 0x4110, iwl3165_2ac_cfg)}, 374 {IWL_PCI_DEVICE(0x3165, 0x4110, iwl3165_2ac_cfg)},
374 {IWL_PCI_DEVICE(0x3166, 0x4310, iwl3165_2ac_cfg)}, 375 {IWL_PCI_DEVICE(0x3166, 0x4310, iwl3165_2ac_cfg)},
375 {IWL_PCI_DEVICE(0x3166, 0x4210, iwl3165_2ac_cfg)}, 376 {IWL_PCI_DEVICE(0x3166, 0x4210, iwl3165_2ac_cfg)},
376 {IWL_PCI_DEVICE(0x3165, 0x8010, iwl3165_2ac_cfg)}, 377 {IWL_PCI_DEVICE(0x3165, 0x8010, iwl3165_2ac_cfg)},
378 {IWL_PCI_DEVICE(0x3165, 0x8110, iwl3165_2ac_cfg)},
377 379
378/* 7265 Series */ 380/* 7265 Series */
379 {IWL_PCI_DEVICE(0x095A, 0x5010, iwl7265_2ac_cfg)}, 381 {IWL_PCI_DEVICE(0x095A, 0x5010, iwl7265_2ac_cfg)},
@@ -426,9 +428,8 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
426 {IWL_PCI_DEVICE(0x24F4, 0x1130, iwl8260_2ac_cfg)}, 428 {IWL_PCI_DEVICE(0x24F4, 0x1130, iwl8260_2ac_cfg)},
427 {IWL_PCI_DEVICE(0x24F4, 0x1030, iwl8260_2ac_cfg)}, 429 {IWL_PCI_DEVICE(0x24F4, 0x1030, iwl8260_2ac_cfg)},
428 {IWL_PCI_DEVICE(0x24F3, 0xC010, iwl8260_2ac_cfg)}, 430 {IWL_PCI_DEVICE(0x24F3, 0xC010, iwl8260_2ac_cfg)},
431 {IWL_PCI_DEVICE(0x24F3, 0xC110, iwl8260_2ac_cfg)},
429 {IWL_PCI_DEVICE(0x24F3, 0xD010, iwl8260_2ac_cfg)}, 432 {IWL_PCI_DEVICE(0x24F3, 0xD010, iwl8260_2ac_cfg)},
430 {IWL_PCI_DEVICE(0x24F4, 0xC030, iwl8260_2ac_cfg)},
431 {IWL_PCI_DEVICE(0x24F4, 0xD030, iwl8260_2ac_cfg)},
432 {IWL_PCI_DEVICE(0x24F3, 0xC050, iwl8260_2ac_cfg)}, 433 {IWL_PCI_DEVICE(0x24F3, 0xC050, iwl8260_2ac_cfg)},
433 {IWL_PCI_DEVICE(0x24F3, 0xD050, iwl8260_2ac_cfg)}, 434 {IWL_PCI_DEVICE(0x24F3, 0xD050, iwl8260_2ac_cfg)},
434 {IWL_PCI_DEVICE(0x24F3, 0x8010, iwl8260_2ac_cfg)}, 435 {IWL_PCI_DEVICE(0x24F3, 0x8010, iwl8260_2ac_cfg)},
diff --git a/drivers/net/wireless/iwlwifi/pcie/internal.h b/drivers/net/wireless/iwlwifi/pcie/internal.h
index 31f72a61cc3f..376b84e54ad7 100644
--- a/drivers/net/wireless/iwlwifi/pcie/internal.h
+++ b/drivers/net/wireless/iwlwifi/pcie/internal.h
@@ -44,15 +44,6 @@
44#include "iwl-io.h" 44#include "iwl-io.h"
45#include "iwl-op-mode.h" 45#include "iwl-op-mode.h"
46 46
47/*
48 * RX related structures and functions
49 */
50#define RX_NUM_QUEUES 1
51#define RX_POST_REQ_ALLOC 2
52#define RX_CLAIM_REQ_ALLOC 8
53#define RX_POOL_SIZE ((RX_CLAIM_REQ_ALLOC - RX_POST_REQ_ALLOC) * RX_NUM_QUEUES)
54#define RX_LOW_WATERMARK 8
55
56struct iwl_host_cmd; 47struct iwl_host_cmd;
57 48
58/*This file includes the declaration that are internal to the 49/*This file includes the declaration that are internal to the
@@ -86,29 +77,29 @@ struct isr_statistics {
86 * struct iwl_rxq - Rx queue 77 * struct iwl_rxq - Rx queue
87 * @bd: driver's pointer to buffer of receive buffer descriptors (rbd) 78 * @bd: driver's pointer to buffer of receive buffer descriptors (rbd)
88 * @bd_dma: bus address of buffer of receive buffer descriptors (rbd) 79 * @bd_dma: bus address of buffer of receive buffer descriptors (rbd)
80 * @pool:
81 * @queue:
89 * @read: Shared index to newest available Rx buffer 82 * @read: Shared index to newest available Rx buffer
90 * @write: Shared index to oldest written Rx packet 83 * @write: Shared index to oldest written Rx packet
91 * @free_count: Number of pre-allocated buffers in rx_free 84 * @free_count: Number of pre-allocated buffers in rx_free
92 * @used_count: Number of RBDs handled to allocator to use for allocation
93 * @write_actual: 85 * @write_actual:
94 * @rx_free: list of RBDs with allocated RB ready for use 86 * @rx_free: list of free SKBs for use
95 * @rx_used: list of RBDs with no RB attached 87 * @rx_used: List of Rx buffers with no SKB
96 * @need_update: flag to indicate we need to update read/write index 88 * @need_update: flag to indicate we need to update read/write index
97 * @rb_stts: driver's pointer to receive buffer status 89 * @rb_stts: driver's pointer to receive buffer status
98 * @rb_stts_dma: bus address of receive buffer status 90 * @rb_stts_dma: bus address of receive buffer status
99 * @lock: 91 * @lock:
100 * @pool: initial pool of iwl_rx_mem_buffer for the queue
101 * @queue: actual rx queue
102 * 92 *
103 * NOTE: rx_free and rx_used are used as a FIFO for iwl_rx_mem_buffers 93 * NOTE: rx_free and rx_used are used as a FIFO for iwl_rx_mem_buffers
104 */ 94 */
105struct iwl_rxq { 95struct iwl_rxq {
106 __le32 *bd; 96 __le32 *bd;
107 dma_addr_t bd_dma; 97 dma_addr_t bd_dma;
98 struct iwl_rx_mem_buffer pool[RX_QUEUE_SIZE + RX_FREE_BUFFERS];
99 struct iwl_rx_mem_buffer *queue[RX_QUEUE_SIZE];
108 u32 read; 100 u32 read;
109 u32 write; 101 u32 write;
110 u32 free_count; 102 u32 free_count;
111 u32 used_count;
112 u32 write_actual; 103 u32 write_actual;
113 struct list_head rx_free; 104 struct list_head rx_free;
114 struct list_head rx_used; 105 struct list_head rx_used;
@@ -116,32 +107,6 @@ struct iwl_rxq {
116 struct iwl_rb_status *rb_stts; 107 struct iwl_rb_status *rb_stts;
117 dma_addr_t rb_stts_dma; 108 dma_addr_t rb_stts_dma;
118 spinlock_t lock; 109 spinlock_t lock;
119 struct iwl_rx_mem_buffer pool[RX_QUEUE_SIZE];
120 struct iwl_rx_mem_buffer *queue[RX_QUEUE_SIZE];
121};
122
123/**
124 * struct iwl_rb_allocator - Rx allocator
125 * @pool: initial pool of allocator
126 * @req_pending: number of requests the allcator had not processed yet
127 * @req_ready: number of requests honored and ready for claiming
128 * @rbd_allocated: RBDs with pages allocated and ready to be handled to
129 * the queue. This is a list of &struct iwl_rx_mem_buffer
130 * @rbd_empty: RBDs with no page attached for allocator use. This is a list
131 * of &struct iwl_rx_mem_buffer
132 * @lock: protects the rbd_allocated and rbd_empty lists
133 * @alloc_wq: work queue for background calls
134 * @rx_alloc: work struct for background calls
135 */
136struct iwl_rb_allocator {
137 struct iwl_rx_mem_buffer pool[RX_POOL_SIZE];
138 atomic_t req_pending;
139 atomic_t req_ready;
140 struct list_head rbd_allocated;
141 struct list_head rbd_empty;
142 spinlock_t lock;
143 struct workqueue_struct *alloc_wq;
144 struct work_struct rx_alloc;
145}; 110};
146 111
147struct iwl_dma_ptr { 112struct iwl_dma_ptr {
@@ -285,7 +250,7 @@ iwl_pcie_get_scratchbuf_dma(struct iwl_txq *txq, int idx)
285/** 250/**
286 * struct iwl_trans_pcie - PCIe transport specific data 251 * struct iwl_trans_pcie - PCIe transport specific data
287 * @rxq: all the RX queue data 252 * @rxq: all the RX queue data
288 * @rba: allocator for RX replenishing 253 * @rx_replenish: work that will be called when buffers need to be allocated
289 * @drv - pointer to iwl_drv 254 * @drv - pointer to iwl_drv
290 * @trans: pointer to the generic transport area 255 * @trans: pointer to the generic transport area
291 * @scd_base_addr: scheduler sram base address in SRAM 256 * @scd_base_addr: scheduler sram base address in SRAM
@@ -308,7 +273,7 @@ iwl_pcie_get_scratchbuf_dma(struct iwl_txq *txq, int idx)
308 */ 273 */
309struct iwl_trans_pcie { 274struct iwl_trans_pcie {
310 struct iwl_rxq rxq; 275 struct iwl_rxq rxq;
311 struct iwl_rb_allocator rba; 276 struct work_struct rx_replenish;
312 struct iwl_trans *trans; 277 struct iwl_trans *trans;
313 struct iwl_drv *drv; 278 struct iwl_drv *drv;
314 279
diff --git a/drivers/net/wireless/iwlwifi/pcie/rx.c b/drivers/net/wireless/iwlwifi/pcie/rx.c
index a3fbaa0ef5e0..adad8d0fae7f 100644
--- a/drivers/net/wireless/iwlwifi/pcie/rx.c
+++ b/drivers/net/wireless/iwlwifi/pcie/rx.c
@@ -1,7 +1,7 @@
1/****************************************************************************** 1/******************************************************************************
2 * 2 *
3 * Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved. 3 * Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved.
4 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH 4 * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
5 * 5 *
6 * Portions of this file are derived from the ipw3945 project, as well 6 * Portions of this file are derived from the ipw3945 project, as well
7 * as portions of the ieee80211 subsystem header files. 7 * as portions of the ieee80211 subsystem header files.
@@ -74,29 +74,16 @@
74 * resets the Rx queue buffers with new memory. 74 * resets the Rx queue buffers with new memory.
75 * 75 *
76 * The management in the driver is as follows: 76 * The management in the driver is as follows:
77 * + A list of pre-allocated RBDs is stored in iwl->rxq->rx_free. 77 * + A list of pre-allocated SKBs is stored in iwl->rxq->rx_free. When
78 * When the interrupt handler is called, the request is processed. 78 * iwl->rxq->free_count drops to or below RX_LOW_WATERMARK, work is scheduled
79 * The page is either stolen - transferred to the upper layer 79 * to replenish the iwl->rxq->rx_free.
80 * or reused - added immediately to the iwl->rxq->rx_free list. 80 * + In iwl_pcie_rx_replenish (scheduled) if 'processed' != 'read' then the
81 * + When the page is stolen - the driver updates the matching queue's used 81 * iwl->rxq is replenished and the READ INDEX is updated (updating the
82 * count, detaches the RBD and transfers it to the queue used list. 82 * 'processed' and 'read' driver indexes as well)
83 * When there are two used RBDs - they are transferred to the allocator empty
84 * list. Work is then scheduled for the allocator to start allocating
85 * eight buffers.
86 * When there are another 6 used RBDs - they are transferred to the allocator
87 * empty list and the driver tries to claim the pre-allocated buffers and
88 * add them to iwl->rxq->rx_free. If it fails - it continues to claim them
89 * until ready.
90 * When there are 8+ buffers in the free list - either from allocation or from
91 * 8 reused unstolen pages - restock is called to update the FW and indexes.
92 * + In order to make sure the allocator always has RBDs to use for allocation
93 * the allocator has initial pool in the size of num_queues*(8-2) - the
94 * maximum missing RBDs per allocation request (request posted with 2
95 * empty RBDs, there is no guarantee when the other 6 RBDs are supplied).
96 * The queues supplies the recycle of the rest of the RBDs.
97 * + A received packet is processed and handed to the kernel network stack, 83 * + A received packet is processed and handed to the kernel network stack,
98 * detached from the iwl->rxq. The driver 'processed' index is updated. 84 * detached from the iwl->rxq. The driver 'processed' index is updated.
99 * + If there are no allocated buffers in iwl->rxq->rx_free, 85 * + The Host/Firmware iwl->rxq is replenished at irq thread time from the
86 * rx_free list. If there are no allocated buffers in iwl->rxq->rx_free,
100 * the READ INDEX is not incremented and iwl->status(RX_STALLED) is set. 87 * the READ INDEX is not incremented and iwl->status(RX_STALLED) is set.
101 * If there were enough free buffers and RX_STALLED is set it is cleared. 88 * If there were enough free buffers and RX_STALLED is set it is cleared.
102 * 89 *
@@ -105,32 +92,18 @@
105 * 92 *
106 * iwl_rxq_alloc() Allocates rx_free 93 * iwl_rxq_alloc() Allocates rx_free
107 * iwl_pcie_rx_replenish() Replenishes rx_free list from rx_used, and calls 94 * iwl_pcie_rx_replenish() Replenishes rx_free list from rx_used, and calls
108 * iwl_pcie_rxq_restock. 95 * iwl_pcie_rxq_restock
109 * Used only during initialization.
110 * iwl_pcie_rxq_restock() Moves available buffers from rx_free into Rx 96 * iwl_pcie_rxq_restock() Moves available buffers from rx_free into Rx
111 * queue, updates firmware pointers, and updates 97 * queue, updates firmware pointers, and updates
112 * the WRITE index. 98 * the WRITE index. If insufficient rx_free buffers
113 * iwl_pcie_rx_allocator() Background work for allocating pages. 99 * are available, schedules iwl_pcie_rx_replenish
114 * 100 *
115 * -- enable interrupts -- 101 * -- enable interrupts --
116 * ISR - iwl_rx() Detach iwl_rx_mem_buffers from pool up to the 102 * ISR - iwl_rx() Detach iwl_rx_mem_buffers from pool up to the
117 * READ INDEX, detaching the SKB from the pool. 103 * READ INDEX, detaching the SKB from the pool.
118 * Moves the packet buffer from queue to rx_used. 104 * Moves the packet buffer from queue to rx_used.
119 * Posts and claims requests to the allocator.
120 * Calls iwl_pcie_rxq_restock to refill any empty 105 * Calls iwl_pcie_rxq_restock to refill any empty
121 * slots. 106 * slots.
122 *
123 * RBD life-cycle:
124 *
125 * Init:
126 * rxq.pool -> rxq.rx_used -> rxq.rx_free -> rxq.queue
127 *
128 * Regular Receive interrupt:
129 * Page Stolen:
130 * rxq.queue -> rxq.rx_used -> allocator.rbd_empty ->
131 * allocator.rbd_allocated -> rxq.rx_free -> rxq.queue
132 * Page not Stolen:
133 * rxq.queue -> rxq.rx_free -> rxq.queue
134 * ... 107 * ...
135 * 108 *
136 */ 109 */
@@ -267,6 +240,10 @@ static void iwl_pcie_rxq_restock(struct iwl_trans *trans)
267 rxq->free_count--; 240 rxq->free_count--;
268 } 241 }
269 spin_unlock(&rxq->lock); 242 spin_unlock(&rxq->lock);
243 /* If the pre-allocated buffer pool is dropping low, schedule to
244 * refill it */
245 if (rxq->free_count <= RX_LOW_WATERMARK)
246 schedule_work(&trans_pcie->rx_replenish);
270 247
271 /* If we've added more space for the firmware to place data, tell it. 248 /* If we've added more space for the firmware to place data, tell it.
272 * Increment device's write pointer in multiples of 8. */ 249 * Increment device's write pointer in multiples of 8. */
@@ -278,44 +255,6 @@ static void iwl_pcie_rxq_restock(struct iwl_trans *trans)
278} 255}
279 256
280/* 257/*
281 * iwl_pcie_rx_alloc_page - allocates and returns a page.
282 *
283 */
284static struct page *iwl_pcie_rx_alloc_page(struct iwl_trans *trans)
285{
286 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
287 struct iwl_rxq *rxq = &trans_pcie->rxq;
288 struct page *page;
289 gfp_t gfp_mask = GFP_KERNEL;
290
291 if (rxq->free_count > RX_LOW_WATERMARK)
292 gfp_mask |= __GFP_NOWARN;
293
294 if (trans_pcie->rx_page_order > 0)
295 gfp_mask |= __GFP_COMP;
296
297 /* Alloc a new receive buffer */
298 page = alloc_pages(gfp_mask, trans_pcie->rx_page_order);
299 if (!page) {
300 if (net_ratelimit())
301 IWL_DEBUG_INFO(trans, "alloc_pages failed, order: %d\n",
302 trans_pcie->rx_page_order);
303 /* Issue an error if the hardware has consumed more than half
304 * of its free buffer list and we don't have enough
305 * pre-allocated buffers.
306` */
307 if (rxq->free_count <= RX_LOW_WATERMARK &&
308 iwl_rxq_space(rxq) > (RX_QUEUE_SIZE / 2) &&
309 net_ratelimit())
310 IWL_CRIT(trans,
311 "Failed to alloc_pages with GFP_KERNEL. Only %u free buffers remaining.\n",
312 rxq->free_count);
313 return NULL;
314 }
315 return page;
316}
317
318/*
319 * iwl_pcie_rxq_alloc_rbs - allocate a page for each used RBD 258 * iwl_pcie_rxq_alloc_rbs - allocate a page for each used RBD
320 * 259 *
321 * A used RBD is an Rx buffer that has been given to the stack. To use it again 260 * A used RBD is an Rx buffer that has been given to the stack. To use it again
@@ -324,12 +263,13 @@ static struct page *iwl_pcie_rx_alloc_page(struct iwl_trans *trans)
324 * iwl_pcie_rxq_restock. The latter function will update the HW to use the newly 263 * iwl_pcie_rxq_restock. The latter function will update the HW to use the newly
325 * allocated buffers. 264 * allocated buffers.
326 */ 265 */
327static void iwl_pcie_rxq_alloc_rbs(struct iwl_trans *trans) 266static void iwl_pcie_rxq_alloc_rbs(struct iwl_trans *trans, gfp_t priority)
328{ 267{
329 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 268 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
330 struct iwl_rxq *rxq = &trans_pcie->rxq; 269 struct iwl_rxq *rxq = &trans_pcie->rxq;
331 struct iwl_rx_mem_buffer *rxb; 270 struct iwl_rx_mem_buffer *rxb;
332 struct page *page; 271 struct page *page;
272 gfp_t gfp_mask = priority;
333 273
334 while (1) { 274 while (1) {
335 spin_lock(&rxq->lock); 275 spin_lock(&rxq->lock);
@@ -339,10 +279,32 @@ static void iwl_pcie_rxq_alloc_rbs(struct iwl_trans *trans)
339 } 279 }
340 spin_unlock(&rxq->lock); 280 spin_unlock(&rxq->lock);
341 281
282 if (rxq->free_count > RX_LOW_WATERMARK)
283 gfp_mask |= __GFP_NOWARN;
284
285 if (trans_pcie->rx_page_order > 0)
286 gfp_mask |= __GFP_COMP;
287
342 /* Alloc a new receive buffer */ 288 /* Alloc a new receive buffer */
343 page = iwl_pcie_rx_alloc_page(trans); 289 page = alloc_pages(gfp_mask, trans_pcie->rx_page_order);
344 if (!page) 290 if (!page) {
291 if (net_ratelimit())
292 IWL_DEBUG_INFO(trans, "alloc_pages failed, "
293 "order: %d\n",
294 trans_pcie->rx_page_order);
295
296 if ((rxq->free_count <= RX_LOW_WATERMARK) &&
297 net_ratelimit())
298 IWL_CRIT(trans, "Failed to alloc_pages with %s."
299 "Only %u free buffers remaining.\n",
300 priority == GFP_ATOMIC ?
301 "GFP_ATOMIC" : "GFP_KERNEL",
302 rxq->free_count);
303 /* We don't reschedule replenish work here -- we will
304 * call the restock method and if it still needs
305 * more buffers it will schedule replenish */
345 return; 306 return;
307 }
346 308
347 spin_lock(&rxq->lock); 309 spin_lock(&rxq->lock);
348 310
@@ -393,7 +355,7 @@ static void iwl_pcie_rxq_free_rbs(struct iwl_trans *trans)
393 355
394 lockdep_assert_held(&rxq->lock); 356 lockdep_assert_held(&rxq->lock);
395 357
396 for (i = 0; i < RX_QUEUE_SIZE; i++) { 358 for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++) {
397 if (!rxq->pool[i].page) 359 if (!rxq->pool[i].page)
398 continue; 360 continue;
399 dma_unmap_page(trans->dev, rxq->pool[i].page_dma, 361 dma_unmap_page(trans->dev, rxq->pool[i].page_dma,
@@ -410,144 +372,32 @@ static void iwl_pcie_rxq_free_rbs(struct iwl_trans *trans)
410 * When moving to rx_free an page is allocated for the slot. 372 * When moving to rx_free an page is allocated for the slot.
411 * 373 *
412 * Also restock the Rx queue via iwl_pcie_rxq_restock. 374 * Also restock the Rx queue via iwl_pcie_rxq_restock.
413 * This is called only during initialization 375 * This is called as a scheduled work item (except for during initialization)
414 */ 376 */
415static void iwl_pcie_rx_replenish(struct iwl_trans *trans) 377static void iwl_pcie_rx_replenish(struct iwl_trans *trans, gfp_t gfp)
416{ 378{
417 iwl_pcie_rxq_alloc_rbs(trans); 379 iwl_pcie_rxq_alloc_rbs(trans, gfp);
418 380
419 iwl_pcie_rxq_restock(trans); 381 iwl_pcie_rxq_restock(trans);
420} 382}
421 383
422/* 384static void iwl_pcie_rx_replenish_work(struct work_struct *data)
423 * iwl_pcie_rx_allocator - Allocates pages in the background for RX queues
424 *
425 * Allocates for each received request 8 pages
426 * Called as a scheduled work item.
427 */
428static void iwl_pcie_rx_allocator(struct iwl_trans *trans)
429{
430 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
431 struct iwl_rb_allocator *rba = &trans_pcie->rba;
432
433 while (atomic_read(&rba->req_pending)) {
434 int i;
435 struct list_head local_empty;
436 struct list_head local_allocated;
437
438 INIT_LIST_HEAD(&local_allocated);
439 spin_lock(&rba->lock);
440 /* swap out the entire rba->rbd_empty to a local list */
441 list_replace_init(&rba->rbd_empty, &local_empty);
442 spin_unlock(&rba->lock);
443
444 for (i = 0; i < RX_CLAIM_REQ_ALLOC;) {
445 struct iwl_rx_mem_buffer *rxb;
446 struct page *page;
447
448 /* List should never be empty - each reused RBD is
449 * returned to the list, and initial pool covers any
450 * possible gap between the time the page is allocated
451 * to the time the RBD is added.
452 */
453 BUG_ON(list_empty(&local_empty));
454 /* Get the first rxb from the rbd list */
455 rxb = list_first_entry(&local_empty,
456 struct iwl_rx_mem_buffer, list);
457 BUG_ON(rxb->page);
458
459 /* Alloc a new receive buffer */
460 page = iwl_pcie_rx_alloc_page(trans);
461 if (!page)
462 continue;
463 rxb->page = page;
464
465 /* Get physical address of the RB */
466 rxb->page_dma = dma_map_page(trans->dev, page, 0,
467 PAGE_SIZE << trans_pcie->rx_page_order,
468 DMA_FROM_DEVICE);
469 if (dma_mapping_error(trans->dev, rxb->page_dma)) {
470 rxb->page = NULL;
471 __free_pages(page, trans_pcie->rx_page_order);
472 continue;
473 }
474 /* dma address must be no more than 36 bits */
475 BUG_ON(rxb->page_dma & ~DMA_BIT_MASK(36));
476 /* and also 256 byte aligned! */
477 BUG_ON(rxb->page_dma & DMA_BIT_MASK(8));
478
479 /* move the allocated entry to the out list */
480 list_move(&rxb->list, &local_allocated);
481 i++;
482 }
483
484 spin_lock(&rba->lock);
485 /* add the allocated rbds to the allocator allocated list */
486 list_splice_tail(&local_allocated, &rba->rbd_allocated);
487 /* add the unused rbds back to the allocator empty list */
488 list_splice_tail(&local_empty, &rba->rbd_empty);
489 spin_unlock(&rba->lock);
490
491 atomic_dec(&rba->req_pending);
492 atomic_inc(&rba->req_ready);
493 }
494}
495
496/*
497 * iwl_pcie_rx_allocator_get - Returns the pre-allocated pages
498.*
499.* Called by queue when the queue posted allocation request and
500 * has freed 8 RBDs in order to restock itself.
501 */
502static int iwl_pcie_rx_allocator_get(struct iwl_trans *trans,
503 struct iwl_rx_mem_buffer
504 *out[RX_CLAIM_REQ_ALLOC])
505{
506 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
507 struct iwl_rb_allocator *rba = &trans_pcie->rba;
508 int i;
509
510 if (atomic_dec_return(&rba->req_ready) < 0) {
511 atomic_inc(&rba->req_ready);
512 IWL_DEBUG_RX(trans,
513 "Allocation request not ready, pending requests = %d\n",
514 atomic_read(&rba->req_pending));
515 return -ENOMEM;
516 }
517
518 spin_lock(&rba->lock);
519 for (i = 0; i < RX_CLAIM_REQ_ALLOC; i++) {
520 /* Get next free Rx buffer, remove it from free list */
521 out[i] = list_first_entry(&rba->rbd_allocated,
522 struct iwl_rx_mem_buffer, list);
523 list_del(&out[i]->list);
524 }
525 spin_unlock(&rba->lock);
526
527 return 0;
528}
529
530static void iwl_pcie_rx_allocator_work(struct work_struct *data)
531{ 385{
532 struct iwl_rb_allocator *rba_p =
533 container_of(data, struct iwl_rb_allocator, rx_alloc);
534 struct iwl_trans_pcie *trans_pcie = 386 struct iwl_trans_pcie *trans_pcie =
535 container_of(rba_p, struct iwl_trans_pcie, rba); 387 container_of(data, struct iwl_trans_pcie, rx_replenish);
536 388
537 iwl_pcie_rx_allocator(trans_pcie->trans); 389 iwl_pcie_rx_replenish(trans_pcie->trans, GFP_KERNEL);
538} 390}
539 391
540static int iwl_pcie_rx_alloc(struct iwl_trans *trans) 392static int iwl_pcie_rx_alloc(struct iwl_trans *trans)
541{ 393{
542 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 394 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
543 struct iwl_rxq *rxq = &trans_pcie->rxq; 395 struct iwl_rxq *rxq = &trans_pcie->rxq;
544 struct iwl_rb_allocator *rba = &trans_pcie->rba;
545 struct device *dev = trans->dev; 396 struct device *dev = trans->dev;
546 397
547 memset(&trans_pcie->rxq, 0, sizeof(trans_pcie->rxq)); 398 memset(&trans_pcie->rxq, 0, sizeof(trans_pcie->rxq));
548 399
549 spin_lock_init(&rxq->lock); 400 spin_lock_init(&rxq->lock);
550 spin_lock_init(&rba->lock);
551 401
552 if (WARN_ON(rxq->bd || rxq->rb_stts)) 402 if (WARN_ON(rxq->bd || rxq->rb_stts))
553 return -EINVAL; 403 return -EINVAL;
@@ -637,49 +487,15 @@ static void iwl_pcie_rx_init_rxb_lists(struct iwl_rxq *rxq)
637 INIT_LIST_HEAD(&rxq->rx_free); 487 INIT_LIST_HEAD(&rxq->rx_free);
638 INIT_LIST_HEAD(&rxq->rx_used); 488 INIT_LIST_HEAD(&rxq->rx_used);
639 rxq->free_count = 0; 489 rxq->free_count = 0;
640 rxq->used_count = 0;
641 490
642 for (i = 0; i < RX_QUEUE_SIZE; i++) 491 for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++)
643 list_add(&rxq->pool[i].list, &rxq->rx_used); 492 list_add(&rxq->pool[i].list, &rxq->rx_used);
644} 493}
645 494
646static void iwl_pcie_rx_init_rba(struct iwl_rb_allocator *rba)
647{
648 int i;
649
650 lockdep_assert_held(&rba->lock);
651
652 INIT_LIST_HEAD(&rba->rbd_allocated);
653 INIT_LIST_HEAD(&rba->rbd_empty);
654
655 for (i = 0; i < RX_POOL_SIZE; i++)
656 list_add(&rba->pool[i].list, &rba->rbd_empty);
657}
658
659static void iwl_pcie_rx_free_rba(struct iwl_trans *trans)
660{
661 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
662 struct iwl_rb_allocator *rba = &trans_pcie->rba;
663 int i;
664
665 lockdep_assert_held(&rba->lock);
666
667 for (i = 0; i < RX_POOL_SIZE; i++) {
668 if (!rba->pool[i].page)
669 continue;
670 dma_unmap_page(trans->dev, rba->pool[i].page_dma,
671 PAGE_SIZE << trans_pcie->rx_page_order,
672 DMA_FROM_DEVICE);
673 __free_pages(rba->pool[i].page, trans_pcie->rx_page_order);
674 rba->pool[i].page = NULL;
675 }
676}
677
678int iwl_pcie_rx_init(struct iwl_trans *trans) 495int iwl_pcie_rx_init(struct iwl_trans *trans)
679{ 496{
680 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 497 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
681 struct iwl_rxq *rxq = &trans_pcie->rxq; 498 struct iwl_rxq *rxq = &trans_pcie->rxq;
682 struct iwl_rb_allocator *rba = &trans_pcie->rba;
683 int i, err; 499 int i, err;
684 500
685 if (!rxq->bd) { 501 if (!rxq->bd) {
@@ -687,21 +503,11 @@ int iwl_pcie_rx_init(struct iwl_trans *trans)
687 if (err) 503 if (err)
688 return err; 504 return err;
689 } 505 }
690 if (!rba->alloc_wq)
691 rba->alloc_wq = alloc_workqueue("rb_allocator",
692 WQ_HIGHPRI | WQ_UNBOUND, 1);
693 INIT_WORK(&rba->rx_alloc, iwl_pcie_rx_allocator_work);
694
695 spin_lock(&rba->lock);
696 atomic_set(&rba->req_pending, 0);
697 atomic_set(&rba->req_ready, 0);
698 /* free all first - we might be reconfigured for a different size */
699 iwl_pcie_rx_free_rba(trans);
700 iwl_pcie_rx_init_rba(rba);
701 spin_unlock(&rba->lock);
702 506
703 spin_lock(&rxq->lock); 507 spin_lock(&rxq->lock);
704 508
509 INIT_WORK(&trans_pcie->rx_replenish, iwl_pcie_rx_replenish_work);
510
705 /* free all first - we might be reconfigured for a different size */ 511 /* free all first - we might be reconfigured for a different size */
706 iwl_pcie_rxq_free_rbs(trans); 512 iwl_pcie_rxq_free_rbs(trans);
707 iwl_pcie_rx_init_rxb_lists(rxq); 513 iwl_pcie_rx_init_rxb_lists(rxq);
@@ -716,7 +522,7 @@ int iwl_pcie_rx_init(struct iwl_trans *trans)
716 memset(rxq->rb_stts, 0, sizeof(*rxq->rb_stts)); 522 memset(rxq->rb_stts, 0, sizeof(*rxq->rb_stts));
717 spin_unlock(&rxq->lock); 523 spin_unlock(&rxq->lock);
718 524
719 iwl_pcie_rx_replenish(trans); 525 iwl_pcie_rx_replenish(trans, GFP_KERNEL);
720 526
721 iwl_pcie_rx_hw_init(trans, rxq); 527 iwl_pcie_rx_hw_init(trans, rxq);
722 528
@@ -731,7 +537,6 @@ void iwl_pcie_rx_free(struct iwl_trans *trans)
731{ 537{
732 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 538 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
733 struct iwl_rxq *rxq = &trans_pcie->rxq; 539 struct iwl_rxq *rxq = &trans_pcie->rxq;
734 struct iwl_rb_allocator *rba = &trans_pcie->rba;
735 540
736 /*if rxq->bd is NULL, it means that nothing has been allocated, 541 /*if rxq->bd is NULL, it means that nothing has been allocated,
737 * exit now */ 542 * exit now */
@@ -740,15 +545,7 @@ void iwl_pcie_rx_free(struct iwl_trans *trans)
740 return; 545 return;
741 } 546 }
742 547
743 cancel_work_sync(&rba->rx_alloc); 548 cancel_work_sync(&trans_pcie->rx_replenish);
744 if (rba->alloc_wq) {
745 destroy_workqueue(rba->alloc_wq);
746 rba->alloc_wq = NULL;
747 }
748
749 spin_lock(&rba->lock);
750 iwl_pcie_rx_free_rba(trans);
751 spin_unlock(&rba->lock);
752 549
753 spin_lock(&rxq->lock); 550 spin_lock(&rxq->lock);
754 iwl_pcie_rxq_free_rbs(trans); 551 iwl_pcie_rxq_free_rbs(trans);
@@ -769,43 +566,6 @@ void iwl_pcie_rx_free(struct iwl_trans *trans)
769 rxq->rb_stts = NULL; 566 rxq->rb_stts = NULL;
770} 567}
771 568
772/*
773 * iwl_pcie_rx_reuse_rbd - Recycle used RBDs
774 *
775 * Called when a RBD can be reused. The RBD is transferred to the allocator.
776 * When there are 2 empty RBDs - a request for allocation is posted
777 */
778static void iwl_pcie_rx_reuse_rbd(struct iwl_trans *trans,
779 struct iwl_rx_mem_buffer *rxb,
780 struct iwl_rxq *rxq)
781{
782 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
783 struct iwl_rb_allocator *rba = &trans_pcie->rba;
784
785 /* Count the used RBDs */
786 rxq->used_count++;
787
788 /* Move the RBD to the used list, will be moved to allocator in batches
789 * before claiming or posting a request*/
790 list_add_tail(&rxb->list, &rxq->rx_used);
791
792 /* If we have RX_POST_REQ_ALLOC new released rx buffers -
793 * issue a request for allocator. Modulo RX_CLAIM_REQ_ALLOC is
794 * used for the case we failed to claim RX_CLAIM_REQ_ALLOC,
795 * after but we still need to post another request.
796 */
797 if ((rxq->used_count % RX_CLAIM_REQ_ALLOC) == RX_POST_REQ_ALLOC) {
798 /* Move the 2 RBDs to the allocator ownership.
799 Allocator has another 6 from pool for the request completion*/
800 spin_lock(&rba->lock);
801 list_splice_tail_init(&rxq->rx_used, &rba->rbd_empty);
802 spin_unlock(&rba->lock);
803
804 atomic_inc(&rba->req_pending);
805 queue_work(rba->alloc_wq, &rba->rx_alloc);
806 }
807}
808
809static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans, 569static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans,
810 struct iwl_rx_mem_buffer *rxb) 570 struct iwl_rx_mem_buffer *rxb)
811{ 571{
@@ -928,13 +688,13 @@ static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans,
928 */ 688 */
929 __free_pages(rxb->page, trans_pcie->rx_page_order); 689 __free_pages(rxb->page, trans_pcie->rx_page_order);
930 rxb->page = NULL; 690 rxb->page = NULL;
931 iwl_pcie_rx_reuse_rbd(trans, rxb, rxq); 691 list_add_tail(&rxb->list, &rxq->rx_used);
932 } else { 692 } else {
933 list_add_tail(&rxb->list, &rxq->rx_free); 693 list_add_tail(&rxb->list, &rxq->rx_free);
934 rxq->free_count++; 694 rxq->free_count++;
935 } 695 }
936 } else 696 } else
937 iwl_pcie_rx_reuse_rbd(trans, rxb, rxq); 697 list_add_tail(&rxb->list, &rxq->rx_used);
938} 698}
939 699
940/* 700/*
@@ -944,7 +704,10 @@ static void iwl_pcie_rx_handle(struct iwl_trans *trans)
944{ 704{
945 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 705 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
946 struct iwl_rxq *rxq = &trans_pcie->rxq; 706 struct iwl_rxq *rxq = &trans_pcie->rxq;
947 u32 r, i, j; 707 u32 r, i;
708 u8 fill_rx = 0;
709 u32 count = 8;
710 int total_empty;
948 711
949restart: 712restart:
950 spin_lock(&rxq->lock); 713 spin_lock(&rxq->lock);
@@ -957,6 +720,14 @@ restart:
957 if (i == r) 720 if (i == r)
958 IWL_DEBUG_RX(trans, "HW = SW = %d\n", r); 721 IWL_DEBUG_RX(trans, "HW = SW = %d\n", r);
959 722
723 /* calculate total frames need to be restock after handling RX */
724 total_empty = r - rxq->write_actual;
725 if (total_empty < 0)
726 total_empty += RX_QUEUE_SIZE;
727
728 if (total_empty > (RX_QUEUE_SIZE / 2))
729 fill_rx = 1;
730
960 while (i != r) { 731 while (i != r) {
961 struct iwl_rx_mem_buffer *rxb; 732 struct iwl_rx_mem_buffer *rxb;
962 733
@@ -968,48 +739,29 @@ restart:
968 iwl_pcie_rx_handle_rb(trans, rxb); 739 iwl_pcie_rx_handle_rb(trans, rxb);
969 740
970 i = (i + 1) & RX_QUEUE_MASK; 741 i = (i + 1) & RX_QUEUE_MASK;
971 742 /* If there are a lot of unused frames,
972 /* If we have RX_CLAIM_REQ_ALLOC released rx buffers - 743 * restock the Rx queue so ucode wont assert. */
973 * try to claim the pre-allocated buffers from the allocator */ 744 if (fill_rx) {
974 if (rxq->used_count >= RX_CLAIM_REQ_ALLOC) { 745 count++;
975 struct iwl_rb_allocator *rba = &trans_pcie->rba; 746 if (count >= 8) {
976 struct iwl_rx_mem_buffer *out[RX_CLAIM_REQ_ALLOC]; 747 rxq->read = i;
977 748 spin_unlock(&rxq->lock);
978 /* Add the remaining 6 empty RBDs for allocator use */ 749 iwl_pcie_rx_replenish(trans, GFP_ATOMIC);
979 spin_lock(&rba->lock); 750 count = 0;
980 list_splice_tail_init(&rxq->rx_used, &rba->rbd_empty); 751 goto restart;
981 spin_unlock(&rba->lock);
982
983 /* If not ready - continue, will try to reclaim later.
984 * No need to reschedule work - allocator exits only on
985 * success */
986 if (!iwl_pcie_rx_allocator_get(trans, out)) {
987 /* If success - then RX_CLAIM_REQ_ALLOC
988 * buffers were retrieved and should be added
989 * to free list */
990 rxq->used_count -= RX_CLAIM_REQ_ALLOC;
991 for (j = 0; j < RX_CLAIM_REQ_ALLOC; j++) {
992 list_add_tail(&out[j]->list,
993 &rxq->rx_free);
994 rxq->free_count++;
995 }
996 } 752 }
997 } 753 }
998 /* handle restock for two cases:
999 * - we just pulled buffers from the allocator
1000 * - we have 8+ unstolen pages accumulated */
1001 if (rxq->free_count >= RX_CLAIM_REQ_ALLOC) {
1002 rxq->read = i;
1003 spin_unlock(&rxq->lock);
1004 iwl_pcie_rxq_restock(trans);
1005 goto restart;
1006 }
1007 } 754 }
1008 755
1009 /* Backtrack one entry */ 756 /* Backtrack one entry */
1010 rxq->read = i; 757 rxq->read = i;
1011 spin_unlock(&rxq->lock); 758 spin_unlock(&rxq->lock);
1012 759
760 if (fill_rx)
761 iwl_pcie_rx_replenish(trans, GFP_ATOMIC);
762 else
763 iwl_pcie_rxq_restock(trans);
764
1013 if (trans_pcie->napi.poll) 765 if (trans_pcie->napi.poll)
1014 napi_gro_flush(&trans_pcie->napi, false); 766 napi_gro_flush(&trans_pcie->napi, false);
1015} 767}
diff --git a/drivers/net/wireless/iwlwifi/pcie/trans.c b/drivers/net/wireless/iwlwifi/pcie/trans.c
index 43ae658af6ec..9e144e71da0b 100644
--- a/drivers/net/wireless/iwlwifi/pcie/trans.c
+++ b/drivers/net/wireless/iwlwifi/pcie/trans.c
@@ -182,7 +182,7 @@ static void iwl_trans_pcie_write_shr(struct iwl_trans *trans, u32 reg, u32 val)
182 182
183static void iwl_pcie_set_pwr(struct iwl_trans *trans, bool vaux) 183static void iwl_pcie_set_pwr(struct iwl_trans *trans, bool vaux)
184{ 184{
185 if (!trans->cfg->apmg_not_supported) 185 if (trans->cfg->apmg_not_supported)
186 return; 186 return;
187 187
188 if (vaux && pci_pme_capable(to_pci_dev(trans->dev), PCI_D3cold)) 188 if (vaux && pci_pme_capable(to_pci_dev(trans->dev), PCI_D3cold))
@@ -478,10 +478,16 @@ static void iwl_pcie_apm_stop(struct iwl_trans *trans, bool op_mode_leave)
478 if (trans->cfg->device_family == IWL_DEVICE_FAMILY_7000) 478 if (trans->cfg->device_family == IWL_DEVICE_FAMILY_7000)
479 iwl_set_bits_prph(trans, APMG_PCIDEV_STT_REG, 479 iwl_set_bits_prph(trans, APMG_PCIDEV_STT_REG,
480 APMG_PCIDEV_STT_VAL_WAKE_ME); 480 APMG_PCIDEV_STT_VAL_WAKE_ME);
481 else if (trans->cfg->device_family == IWL_DEVICE_FAMILY_8000) 481 else if (trans->cfg->device_family == IWL_DEVICE_FAMILY_8000) {
482 iwl_set_bit(trans, CSR_DBG_LINK_PWR_MGMT_REG,
483 CSR_RESET_LINK_PWR_MGMT_DISABLED);
482 iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG, 484 iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
483 CSR_HW_IF_CONFIG_REG_PREPARE | 485 CSR_HW_IF_CONFIG_REG_PREPARE |
484 CSR_HW_IF_CONFIG_REG_ENABLE_PME); 486 CSR_HW_IF_CONFIG_REG_ENABLE_PME);
487 mdelay(1);
488 iwl_clear_bit(trans, CSR_DBG_LINK_PWR_MGMT_REG,
489 CSR_RESET_LINK_PWR_MGMT_DISABLED);
490 }
485 mdelay(5); 491 mdelay(5);
486 } 492 }
487 493
@@ -575,6 +581,10 @@ static int iwl_pcie_prepare_card_hw(struct iwl_trans *trans)
575 if (ret >= 0) 581 if (ret >= 0)
576 return 0; 582 return 0;
577 583
584 iwl_set_bit(trans, CSR_DBG_LINK_PWR_MGMT_REG,
585 CSR_RESET_LINK_PWR_MGMT_DISABLED);
586 msleep(1);
587
578 for (iter = 0; iter < 10; iter++) { 588 for (iter = 0; iter < 10; iter++) {
579 /* If HW is not ready, prepare the conditions to check again */ 589 /* If HW is not ready, prepare the conditions to check again */
580 iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG, 590 iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
@@ -582,8 +592,10 @@ static int iwl_pcie_prepare_card_hw(struct iwl_trans *trans)
582 592
583 do { 593 do {
584 ret = iwl_pcie_set_hw_ready(trans); 594 ret = iwl_pcie_set_hw_ready(trans);
585 if (ret >= 0) 595 if (ret >= 0) {
586 return 0; 596 ret = 0;
597 goto out;
598 }
587 599
588 usleep_range(200, 1000); 600 usleep_range(200, 1000);
589 t += 200; 601 t += 200;
@@ -593,6 +605,10 @@ static int iwl_pcie_prepare_card_hw(struct iwl_trans *trans)
593 605
594 IWL_ERR(trans, "Couldn't prepare the card\n"); 606 IWL_ERR(trans, "Couldn't prepare the card\n");
595 607
608out:
609 iwl_clear_bit(trans, CSR_DBG_LINK_PWR_MGMT_REG,
610 CSR_RESET_LINK_PWR_MGMT_DISABLED);
611
596 return ret; 612 return ret;
597} 613}
598 614
@@ -2459,7 +2475,7 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
2459 struct iwl_trans_pcie *trans_pcie; 2475 struct iwl_trans_pcie *trans_pcie;
2460 struct iwl_trans *trans; 2476 struct iwl_trans *trans;
2461 u16 pci_cmd; 2477 u16 pci_cmd;
2462 int err; 2478 int ret;
2463 2479
2464 trans = iwl_trans_alloc(sizeof(struct iwl_trans_pcie), 2480 trans = iwl_trans_alloc(sizeof(struct iwl_trans_pcie),
2465 &pdev->dev, cfg, &trans_ops_pcie, 0); 2481 &pdev->dev, cfg, &trans_ops_pcie, 0);
@@ -2474,8 +2490,8 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
2474 spin_lock_init(&trans_pcie->ref_lock); 2490 spin_lock_init(&trans_pcie->ref_lock);
2475 init_waitqueue_head(&trans_pcie->ucode_write_waitq); 2491 init_waitqueue_head(&trans_pcie->ucode_write_waitq);
2476 2492
2477 err = pci_enable_device(pdev); 2493 ret = pci_enable_device(pdev);
2478 if (err) 2494 if (ret)
2479 goto out_no_pci; 2495 goto out_no_pci;
2480 2496
2481 if (!cfg->base_params->pcie_l1_allowed) { 2497 if (!cfg->base_params->pcie_l1_allowed) {
@@ -2491,23 +2507,23 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
2491 2507
2492 pci_set_master(pdev); 2508 pci_set_master(pdev);
2493 2509
2494 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(36)); 2510 ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(36));
2495 if (!err) 2511 if (!ret)
2496 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(36)); 2512 ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(36));
2497 if (err) { 2513 if (ret) {
2498 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); 2514 ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
2499 if (!err) 2515 if (!ret)
2500 err = pci_set_consistent_dma_mask(pdev, 2516 ret = pci_set_consistent_dma_mask(pdev,
2501 DMA_BIT_MASK(32)); 2517 DMA_BIT_MASK(32));
2502 /* both attempts failed: */ 2518 /* both attempts failed: */
2503 if (err) { 2519 if (ret) {
2504 dev_err(&pdev->dev, "No suitable DMA available\n"); 2520 dev_err(&pdev->dev, "No suitable DMA available\n");
2505 goto out_pci_disable_device; 2521 goto out_pci_disable_device;
2506 } 2522 }
2507 } 2523 }
2508 2524
2509 err = pci_request_regions(pdev, DRV_NAME); 2525 ret = pci_request_regions(pdev, DRV_NAME);
2510 if (err) { 2526 if (ret) {
2511 dev_err(&pdev->dev, "pci_request_regions failed\n"); 2527 dev_err(&pdev->dev, "pci_request_regions failed\n");
2512 goto out_pci_disable_device; 2528 goto out_pci_disable_device;
2513 } 2529 }
@@ -2515,7 +2531,7 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
2515 trans_pcie->hw_base = pci_ioremap_bar(pdev, 0); 2531 trans_pcie->hw_base = pci_ioremap_bar(pdev, 0);
2516 if (!trans_pcie->hw_base) { 2532 if (!trans_pcie->hw_base) {
2517 dev_err(&pdev->dev, "pci_ioremap_bar failed\n"); 2533 dev_err(&pdev->dev, "pci_ioremap_bar failed\n");
2518 err = -ENODEV; 2534 ret = -ENODEV;
2519 goto out_pci_release_regions; 2535 goto out_pci_release_regions;
2520 } 2536 }
2521 2537
@@ -2527,9 +2543,9 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
2527 trans_pcie->pci_dev = pdev; 2543 trans_pcie->pci_dev = pdev;
2528 iwl_disable_interrupts(trans); 2544 iwl_disable_interrupts(trans);
2529 2545
2530 err = pci_enable_msi(pdev); 2546 ret = pci_enable_msi(pdev);
2531 if (err) { 2547 if (ret) {
2532 dev_err(&pdev->dev, "pci_enable_msi failed(0X%x)\n", err); 2548 dev_err(&pdev->dev, "pci_enable_msi failed(0X%x)\n", ret);
2533 /* enable rfkill interrupt: hw bug w/a */ 2549 /* enable rfkill interrupt: hw bug w/a */
2534 pci_read_config_word(pdev, PCI_COMMAND, &pci_cmd); 2550 pci_read_config_word(pdev, PCI_COMMAND, &pci_cmd);
2535 if (pci_cmd & PCI_COMMAND_INTX_DISABLE) { 2551 if (pci_cmd & PCI_COMMAND_INTX_DISABLE) {
@@ -2547,11 +2563,16 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
2547 */ 2563 */
2548 if (trans->cfg->device_family == IWL_DEVICE_FAMILY_8000) { 2564 if (trans->cfg->device_family == IWL_DEVICE_FAMILY_8000) {
2549 unsigned long flags; 2565 unsigned long flags;
2550 int ret;
2551 2566
2552 trans->hw_rev = (trans->hw_rev & 0xfff0) | 2567 trans->hw_rev = (trans->hw_rev & 0xfff0) |
2553 (CSR_HW_REV_STEP(trans->hw_rev << 2) << 2); 2568 (CSR_HW_REV_STEP(trans->hw_rev << 2) << 2);
2554 2569
2570 ret = iwl_pcie_prepare_card_hw(trans);
2571 if (ret) {
2572 IWL_WARN(trans, "Exit HW not ready\n");
2573 goto out_pci_disable_msi;
2574 }
2575
2555 /* 2576 /*
2556 * in-order to recognize C step driver should read chip version 2577 * in-order to recognize C step driver should read chip version
2557 * id located at the AUX bus MISC address space. 2578 * id located at the AUX bus MISC address space.
@@ -2591,13 +2612,14 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
2591 /* Initialize the wait queue for commands */ 2612 /* Initialize the wait queue for commands */
2592 init_waitqueue_head(&trans_pcie->wait_command_queue); 2613 init_waitqueue_head(&trans_pcie->wait_command_queue);
2593 2614
2594 if (iwl_pcie_alloc_ict(trans)) 2615 ret = iwl_pcie_alloc_ict(trans);
2616 if (ret)
2595 goto out_pci_disable_msi; 2617 goto out_pci_disable_msi;
2596 2618
2597 err = request_threaded_irq(pdev->irq, iwl_pcie_isr, 2619 ret = request_threaded_irq(pdev->irq, iwl_pcie_isr,
2598 iwl_pcie_irq_handler, 2620 iwl_pcie_irq_handler,
2599 IRQF_SHARED, DRV_NAME, trans); 2621 IRQF_SHARED, DRV_NAME, trans);
2600 if (err) { 2622 if (ret) {
2601 IWL_ERR(trans, "Error allocating IRQ %d\n", pdev->irq); 2623 IWL_ERR(trans, "Error allocating IRQ %d\n", pdev->irq);
2602 goto out_free_ict; 2624 goto out_free_ict;
2603 } 2625 }
@@ -2617,5 +2639,5 @@ out_pci_disable_device:
2617 pci_disable_device(pdev); 2639 pci_disable_device(pdev);
2618out_no_pci: 2640out_no_pci:
2619 iwl_trans_free(trans); 2641 iwl_trans_free(trans);
2620 return ERR_PTR(err); 2642 return ERR_PTR(ret);
2621} 2643}
diff --git a/drivers/net/wireless/iwlwifi/pcie/tx.c b/drivers/net/wireless/iwlwifi/pcie/tx.c
index 2b86c2135de3..607acb53c847 100644
--- a/drivers/net/wireless/iwlwifi/pcie/tx.c
+++ b/drivers/net/wireless/iwlwifi/pcie/tx.c
@@ -1875,8 +1875,19 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
1875 1875
1876 /* start timer if queue currently empty */ 1876 /* start timer if queue currently empty */
1877 if (q->read_ptr == q->write_ptr) { 1877 if (q->read_ptr == q->write_ptr) {
1878 if (txq->wd_timeout) 1878 if (txq->wd_timeout) {
1879 mod_timer(&txq->stuck_timer, jiffies + txq->wd_timeout); 1879 /*
1880 * If the TXQ is active, then set the timer, if not,
1881 * set the timer in remainder so that the timer will
1882 * be armed with the right value when the station will
1883 * wake up.
1884 */
1885 if (!txq->frozen)
1886 mod_timer(&txq->stuck_timer,
1887 jiffies + txq->wd_timeout);
1888 else
1889 txq->frozen_expiry_remainder = txq->wd_timeout;
1890 }
1880 IWL_DEBUG_RPM(trans, "Q: %d first tx - take ref\n", q->id); 1891 IWL_DEBUG_RPM(trans, "Q: %d first tx - take ref\n", q->id);
1881 iwl_trans_pcie_ref(trans); 1892 iwl_trans_pcie_ref(trans);
1882 } 1893 }
diff --git a/drivers/net/wireless/rsi/rsi_91x_sdio_ops.c b/drivers/net/wireless/rsi/rsi_91x_sdio_ops.c
index b6cc9ff47fc2..1c6788aecc62 100644
--- a/drivers/net/wireless/rsi/rsi_91x_sdio_ops.c
+++ b/drivers/net/wireless/rsi/rsi_91x_sdio_ops.c
@@ -172,6 +172,7 @@ static int rsi_load_ta_instructions(struct rsi_common *common)
172 (struct rsi_91x_sdiodev *)adapter->rsi_dev; 172 (struct rsi_91x_sdiodev *)adapter->rsi_dev;
173 u32 len; 173 u32 len;
174 u32 num_blocks; 174 u32 num_blocks;
175 const u8 *fw;
175 const struct firmware *fw_entry = NULL; 176 const struct firmware *fw_entry = NULL;
176 u32 block_size = dev->tx_blk_size; 177 u32 block_size = dev->tx_blk_size;
177 int status = 0; 178 int status = 0;
@@ -200,6 +201,10 @@ static int rsi_load_ta_instructions(struct rsi_common *common)
200 return status; 201 return status;
201 } 202 }
202 203
204 /* Copy firmware into DMA-accessible memory */
205 fw = kmemdup(fw_entry->data, fw_entry->size, GFP_KERNEL);
206 if (!fw)
207 return -ENOMEM;
203 len = fw_entry->size; 208 len = fw_entry->size;
204 209
205 if (len % 4) 210 if (len % 4)
@@ -210,7 +215,8 @@ static int rsi_load_ta_instructions(struct rsi_common *common)
210 rsi_dbg(INIT_ZONE, "%s: Instruction size:%d\n", __func__, len); 215 rsi_dbg(INIT_ZONE, "%s: Instruction size:%d\n", __func__, len);
211 rsi_dbg(INIT_ZONE, "%s: num blocks: %d\n", __func__, num_blocks); 216 rsi_dbg(INIT_ZONE, "%s: num blocks: %d\n", __func__, num_blocks);
212 217
213 status = rsi_copy_to_card(common, fw_entry->data, len, num_blocks); 218 status = rsi_copy_to_card(common, fw, len, num_blocks);
219 kfree(fw);
214 release_firmware(fw_entry); 220 release_firmware(fw_entry);
215 return status; 221 return status;
216} 222}
diff --git a/drivers/net/wireless/rsi/rsi_91x_usb_ops.c b/drivers/net/wireless/rsi/rsi_91x_usb_ops.c
index 1106ce76707e..30c2cf7fa93b 100644
--- a/drivers/net/wireless/rsi/rsi_91x_usb_ops.c
+++ b/drivers/net/wireless/rsi/rsi_91x_usb_ops.c
@@ -146,7 +146,10 @@ static int rsi_load_ta_instructions(struct rsi_common *common)
146 return status; 146 return status;
147 } 147 }
148 148
149 /* Copy firmware into DMA-accessible memory */
149 fw = kmemdup(fw_entry->data, fw_entry->size, GFP_KERNEL); 150 fw = kmemdup(fw_entry->data, fw_entry->size, GFP_KERNEL);
151 if (!fw)
152 return -ENOMEM;
150 len = fw_entry->size; 153 len = fw_entry->size;
151 154
152 if (len % 4) 155 if (len % 4)
@@ -158,6 +161,7 @@ static int rsi_load_ta_instructions(struct rsi_common *common)
158 rsi_dbg(INIT_ZONE, "%s: num blocks: %d\n", __func__, num_blocks); 161 rsi_dbg(INIT_ZONE, "%s: num blocks: %d\n", __func__, num_blocks);
159 162
160 status = rsi_copy_to_card(common, fw, len, num_blocks); 163 status = rsi_copy_to_card(common, fw, len, num_blocks);
164 kfree(fw);
161 release_firmware(fw_entry); 165 release_firmware(fw_entry);
162 return status; 166 return status;
163} 167}
diff --git a/drivers/net/wireless/rtlwifi/core.c b/drivers/net/wireless/rtlwifi/core.c
index 3b3a88b53b11..585d0883c7e5 100644
--- a/drivers/net/wireless/rtlwifi/core.c
+++ b/drivers/net/wireless/rtlwifi/core.c
@@ -1015,9 +1015,12 @@ static void send_beacon_frame(struct ieee80211_hw *hw,
1015{ 1015{
1016 struct rtl_priv *rtlpriv = rtl_priv(hw); 1016 struct rtl_priv *rtlpriv = rtl_priv(hw);
1017 struct sk_buff *skb = ieee80211_beacon_get(hw, vif); 1017 struct sk_buff *skb = ieee80211_beacon_get(hw, vif);
1018 struct rtl_tcb_desc tcb_desc;
1018 1019
1019 if (skb) 1020 if (skb) {
1020 rtlpriv->intf_ops->adapter_tx(hw, NULL, skb, NULL); 1021 memset(&tcb_desc, 0, sizeof(struct rtl_tcb_desc));
1022 rtlpriv->intf_ops->adapter_tx(hw, NULL, skb, &tcb_desc);
1023 }
1021} 1024}
1022 1025
1023static void rtl_op_bss_info_changed(struct ieee80211_hw *hw, 1026static void rtl_op_bss_info_changed(struct ieee80211_hw *hw,
diff --git a/drivers/net/wireless/rtlwifi/rtl8723be/sw.c b/drivers/net/wireless/rtlwifi/rtl8723be/sw.c
index 1017f02d7bf7..7bf88d9dcdc3 100644
--- a/drivers/net/wireless/rtlwifi/rtl8723be/sw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8723be/sw.c
@@ -385,6 +385,7 @@ module_param_named(debug, rtl8723be_mod_params.debug, int, 0444);
385module_param_named(ips, rtl8723be_mod_params.inactiveps, bool, 0444); 385module_param_named(ips, rtl8723be_mod_params.inactiveps, bool, 0444);
386module_param_named(swlps, rtl8723be_mod_params.swctrl_lps, bool, 0444); 386module_param_named(swlps, rtl8723be_mod_params.swctrl_lps, bool, 0444);
387module_param_named(fwlps, rtl8723be_mod_params.fwctrl_lps, bool, 0444); 387module_param_named(fwlps, rtl8723be_mod_params.fwctrl_lps, bool, 0444);
388module_param_named(msi, rtl8723be_mod_params.msi_support, bool, 0444);
388module_param_named(disable_watchdog, rtl8723be_mod_params.disable_watchdog, 389module_param_named(disable_watchdog, rtl8723be_mod_params.disable_watchdog,
389 bool, 0444); 390 bool, 0444);
390MODULE_PARM_DESC(swenc, "Set to 1 for software crypto (default 0)\n"); 391MODULE_PARM_DESC(swenc, "Set to 1 for software crypto (default 0)\n");
diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c
index 1a83e190fc15..28577a31549d 100644
--- a/drivers/net/xen-netback/interface.c
+++ b/drivers/net/xen-netback/interface.c
@@ -61,6 +61,12 @@ void xenvif_skb_zerocopy_prepare(struct xenvif_queue *queue,
61void xenvif_skb_zerocopy_complete(struct xenvif_queue *queue) 61void xenvif_skb_zerocopy_complete(struct xenvif_queue *queue)
62{ 62{
63 atomic_dec(&queue->inflight_packets); 63 atomic_dec(&queue->inflight_packets);
64
65 /* Wake the dealloc thread _after_ decrementing inflight_packets so
66 * that if kthread_stop() has already been called, the dealloc thread
67 * does not wait forever with nothing to wake it.
68 */
69 wake_up(&queue->dealloc_wq);
64} 70}
65 71
66int xenvif_schedulable(struct xenvif *vif) 72int xenvif_schedulable(struct xenvif *vif)
diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c
index 880d0d63e872..3f44b522b831 100644
--- a/drivers/net/xen-netback/netback.c
+++ b/drivers/net/xen-netback/netback.c
@@ -810,23 +810,17 @@ static inline struct sk_buff *xenvif_alloc_skb(unsigned int size)
810static struct gnttab_map_grant_ref *xenvif_get_requests(struct xenvif_queue *queue, 810static struct gnttab_map_grant_ref *xenvif_get_requests(struct xenvif_queue *queue,
811 struct sk_buff *skb, 811 struct sk_buff *skb,
812 struct xen_netif_tx_request *txp, 812 struct xen_netif_tx_request *txp,
813 struct gnttab_map_grant_ref *gop) 813 struct gnttab_map_grant_ref *gop,
814 unsigned int frag_overflow,
815 struct sk_buff *nskb)
814{ 816{
815 struct skb_shared_info *shinfo = skb_shinfo(skb); 817 struct skb_shared_info *shinfo = skb_shinfo(skb);
816 skb_frag_t *frags = shinfo->frags; 818 skb_frag_t *frags = shinfo->frags;
817 u16 pending_idx = XENVIF_TX_CB(skb)->pending_idx; 819 u16 pending_idx = XENVIF_TX_CB(skb)->pending_idx;
818 int start; 820 int start;
819 pending_ring_idx_t index; 821 pending_ring_idx_t index;
820 unsigned int nr_slots, frag_overflow = 0; 822 unsigned int nr_slots;
821 823
822 /* At this point shinfo->nr_frags is in fact the number of
823 * slots, which can be as large as XEN_NETBK_LEGACY_SLOTS_MAX.
824 */
825 if (shinfo->nr_frags > MAX_SKB_FRAGS) {
826 frag_overflow = shinfo->nr_frags - MAX_SKB_FRAGS;
827 BUG_ON(frag_overflow > MAX_SKB_FRAGS);
828 shinfo->nr_frags = MAX_SKB_FRAGS;
829 }
830 nr_slots = shinfo->nr_frags; 824 nr_slots = shinfo->nr_frags;
831 825
832 /* Skip first skb fragment if it is on same page as header fragment. */ 826 /* Skip first skb fragment if it is on same page as header fragment. */
@@ -841,13 +835,6 @@ static struct gnttab_map_grant_ref *xenvif_get_requests(struct xenvif_queue *que
841 } 835 }
842 836
843 if (frag_overflow) { 837 if (frag_overflow) {
844 struct sk_buff *nskb = xenvif_alloc_skb(0);
845 if (unlikely(nskb == NULL)) {
846 if (net_ratelimit())
847 netdev_err(queue->vif->dev,
848 "Can't allocate the frag_list skb.\n");
849 return NULL;
850 }
851 838
852 shinfo = skb_shinfo(nskb); 839 shinfo = skb_shinfo(nskb);
853 frags = shinfo->frags; 840 frags = shinfo->frags;
@@ -1175,9 +1162,10 @@ static void xenvif_tx_build_gops(struct xenvif_queue *queue,
1175 unsigned *copy_ops, 1162 unsigned *copy_ops,
1176 unsigned *map_ops) 1163 unsigned *map_ops)
1177{ 1164{
1178 struct gnttab_map_grant_ref *gop = queue->tx_map_ops, *request_gop; 1165 struct gnttab_map_grant_ref *gop = queue->tx_map_ops;
1179 struct sk_buff *skb; 1166 struct sk_buff *skb, *nskb;
1180 int ret; 1167 int ret;
1168 unsigned int frag_overflow;
1181 1169
1182 while (skb_queue_len(&queue->tx_queue) < budget) { 1170 while (skb_queue_len(&queue->tx_queue) < budget) {
1183 struct xen_netif_tx_request txreq; 1171 struct xen_netif_tx_request txreq;
@@ -1265,6 +1253,29 @@ static void xenvif_tx_build_gops(struct xenvif_queue *queue,
1265 break; 1253 break;
1266 } 1254 }
1267 1255
1256 skb_shinfo(skb)->nr_frags = ret;
1257 if (data_len < txreq.size)
1258 skb_shinfo(skb)->nr_frags++;
1259 /* At this point shinfo->nr_frags is in fact the number of
1260 * slots, which can be as large as XEN_NETBK_LEGACY_SLOTS_MAX.
1261 */
1262 frag_overflow = 0;
1263 nskb = NULL;
1264 if (skb_shinfo(skb)->nr_frags > MAX_SKB_FRAGS) {
1265 frag_overflow = skb_shinfo(skb)->nr_frags - MAX_SKB_FRAGS;
1266 BUG_ON(frag_overflow > MAX_SKB_FRAGS);
1267 skb_shinfo(skb)->nr_frags = MAX_SKB_FRAGS;
1268 nskb = xenvif_alloc_skb(0);
1269 if (unlikely(nskb == NULL)) {
1270 kfree_skb(skb);
1271 xenvif_tx_err(queue, &txreq, idx);
1272 if (net_ratelimit())
1273 netdev_err(queue->vif->dev,
1274 "Can't allocate the frag_list skb.\n");
1275 break;
1276 }
1277 }
1278
1268 if (extras[XEN_NETIF_EXTRA_TYPE_GSO - 1].type) { 1279 if (extras[XEN_NETIF_EXTRA_TYPE_GSO - 1].type) {
1269 struct xen_netif_extra_info *gso; 1280 struct xen_netif_extra_info *gso;
1270 gso = &extras[XEN_NETIF_EXTRA_TYPE_GSO - 1]; 1281 gso = &extras[XEN_NETIF_EXTRA_TYPE_GSO - 1];
@@ -1272,6 +1283,7 @@ static void xenvif_tx_build_gops(struct xenvif_queue *queue,
1272 if (xenvif_set_skb_gso(queue->vif, skb, gso)) { 1283 if (xenvif_set_skb_gso(queue->vif, skb, gso)) {
1273 /* Failure in xenvif_set_skb_gso is fatal. */ 1284 /* Failure in xenvif_set_skb_gso is fatal. */
1274 kfree_skb(skb); 1285 kfree_skb(skb);
1286 kfree_skb(nskb);
1275 break; 1287 break;
1276 } 1288 }
1277 } 1289 }
@@ -1294,9 +1306,7 @@ static void xenvif_tx_build_gops(struct xenvif_queue *queue,
1294 1306
1295 (*copy_ops)++; 1307 (*copy_ops)++;
1296 1308
1297 skb_shinfo(skb)->nr_frags = ret;
1298 if (data_len < txreq.size) { 1309 if (data_len < txreq.size) {
1299 skb_shinfo(skb)->nr_frags++;
1300 frag_set_pending_idx(&skb_shinfo(skb)->frags[0], 1310 frag_set_pending_idx(&skb_shinfo(skb)->frags[0],
1301 pending_idx); 1311 pending_idx);
1302 xenvif_tx_create_map_op(queue, pending_idx, &txreq, gop); 1312 xenvif_tx_create_map_op(queue, pending_idx, &txreq, gop);
@@ -1310,13 +1320,8 @@ static void xenvif_tx_build_gops(struct xenvif_queue *queue,
1310 1320
1311 queue->pending_cons++; 1321 queue->pending_cons++;
1312 1322
1313 request_gop = xenvif_get_requests(queue, skb, txfrags, gop); 1323 gop = xenvif_get_requests(queue, skb, txfrags, gop,
1314 if (request_gop == NULL) { 1324 frag_overflow, nskb);
1315 kfree_skb(skb);
1316 xenvif_tx_err(queue, &txreq, idx);
1317 break;
1318 }
1319 gop = request_gop;
1320 1325
1321 __skb_queue_tail(&queue->tx_queue, skb); 1326 __skb_queue_tail(&queue->tx_queue, skb);
1322 1327
@@ -1536,7 +1541,6 @@ void xenvif_zerocopy_callback(struct ubuf_info *ubuf, bool zerocopy_success)
1536 smp_wmb(); 1541 smp_wmb();
1537 queue->dealloc_prod++; 1542 queue->dealloc_prod++;
1538 } while (ubuf); 1543 } while (ubuf);
1539 wake_up(&queue->dealloc_wq);
1540 spin_unlock_irqrestore(&queue->callback_lock, flags); 1544 spin_unlock_irqrestore(&queue->callback_lock, flags);
1541 1545
1542 if (likely(zerocopy_success)) 1546 if (likely(zerocopy_success))
@@ -1566,13 +1570,13 @@ static inline void xenvif_tx_dealloc_action(struct xenvif_queue *queue)
1566 smp_rmb(); 1570 smp_rmb();
1567 1571
1568 while (dc != dp) { 1572 while (dc != dp) {
1569 BUG_ON(gop - queue->tx_unmap_ops > MAX_PENDING_REQS); 1573 BUG_ON(gop - queue->tx_unmap_ops >= MAX_PENDING_REQS);
1570 pending_idx = 1574 pending_idx =
1571 queue->dealloc_ring[pending_index(dc++)]; 1575 queue->dealloc_ring[pending_index(dc++)];
1572 1576
1573 pending_idx_release[gop-queue->tx_unmap_ops] = 1577 pending_idx_release[gop - queue->tx_unmap_ops] =
1574 pending_idx; 1578 pending_idx;
1575 queue->pages_to_unmap[gop-queue->tx_unmap_ops] = 1579 queue->pages_to_unmap[gop - queue->tx_unmap_ops] =
1576 queue->mmap_pages[pending_idx]; 1580 queue->mmap_pages[pending_idx];
1577 gnttab_set_unmap_op(gop, 1581 gnttab_set_unmap_op(gop,
1578 idx_to_kaddr(queue, pending_idx), 1582 idx_to_kaddr(queue, pending_idx),