aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2013-12-02 13:09:07 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2013-12-02 13:09:07 -0500
commit5fc92de3c7106d17f85c245383ba072d810d6bb0 (patch)
tree1e27a49e82ca9736144cc73ad5544a7ddf906644 /drivers/net
parentb0d8d2292160bb63de1972361ebed100c64b5b37 (diff)
parent833846e8fa0c51fb3e47bca8adfdd7b10643b737 (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Pull networking updates from David Miller: "Here is a pile of bug fixes that accumulated while I was in Europe" 1) In fixing kernel leaks to userspace during copying of socket addresses, we broke a case that used to work, namely the user providing a buffer larger than the in-kernel generic socket address structure. This broke Ruby amongst other things. Fix from Dan Carpenter. 2) Fix regression added by byte queue limit support in 8139cp driver, from Yang Yingliang. 3) The addition of MSG_SENDPAGE_NOTLAST buggered up a few sendpage implementations, they should just treat it the same as MSG_MORE. Fix from Richard Weinberger and Shawn Landden. 4) Handle icmpv4 errors received on ipv6 SIT tunnels correctly, from Oussama Ghorbel. In particular we should send an ICMPv6 unreachable in such situations. 5) Fix some regressions in the recent genetlink fixes, in particular get the pmcraid driver to use the new safer interfaces correctly. From Johannes Berg. 6) macvtap was converted to use a per-cpu set of statistics, but some code was still bumping tx_dropped elsewhere. From Jason Wang. 7) Fix build failure of xen-netback due to missing include on some architectures, from Andy Whitecroft. 8) macvtap double counts received packets in statistics, fix from Vlad Yasevich. 9) Fix various cases of using *_STATS_BH() when *_STATS() is more appropriate. From Eric Dumazet and Hannes Frederic Sowa. 10) Pktgen ipsec mode doesn't update the ipv4 header length and checksum properly after encapsulation. Fix from Fan Du. * git://git.kernel.org/pub/scm/linux/kernel/git/davem/net: (61 commits) net/mlx4_en: Remove selftest TX queues empty condition {pktgen, xfrm} Update IPv4 header total len and checksum after tranformation virtio_net: make all RX paths handle erors consistently virtio_net: fix error handling for mergeable buffers virtio_net: Fixed a trivial typo (fitler --> filter) netem: fix gemodel loss generator netem: fix loss 4 state model netem: missing break in ge loss generator net/hsr: Support iproute print_opt ('ip -details ...') net/hsr: Very small fix of comment style. MAINTAINERS: Added net/hsr/ maintainer ipv6: fix possible seqlock deadlock in ip6_finish_output2 ixgbe: Make ixgbe_identify_qsfp_module_generic static ixgbe: turn NETIF_F_HW_L2FW_DOFFLOAD off by default ixgbe: ixgbe_fwd_ring_down needs to be static e1000: fix possible reset_task running after adapter down e1000: fix lockdep warning in e1000_reset_task e1000: prevent oops when adapter is being closed and reset simultaneously igb: Fixed Wake On LAN support inet: fix possible seqlock deadlocks ...
Diffstat (limited to 'drivers/net')
-rw-r--r--drivers/net/bonding/bond_main.c4
-rw-r--r--drivers/net/bonding/bond_options.c13
-rw-r--r--drivers/net/bonding/bond_sysfs.c4
-rw-r--r--drivers/net/bonding/bonding.h7
-rw-r--r--drivers/net/can/c_can/c_can.c22
-rw-r--r--drivers/net/can/flexcan.c2
-rw-r--r--drivers/net/can/sja1000/sja1000.c17
-rw-r--r--drivers/net/ethernet/broadcom/tg3.c25
-rw-r--r--drivers/net/ethernet/emulex/benet/be.h1
-rw-r--r--drivers/net/ethernet/emulex/benet/be_cmds.c7
-rw-r--r--drivers/net/ethernet/emulex/benet/be_main.c16
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000.h7
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_main.c60
-rw-r--r--drivers/net/ethernet/intel/igb/igb_ethtool.c7
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c9
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c3
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_selftest.c7
-rw-r--r--drivers/net/ethernet/realtek/8139cp.c5
-rw-r--r--drivers/net/ethernet/realtek/r8169.c5
-rw-r--r--drivers/net/ethernet/sfc/mcdi.h2
-rw-r--r--drivers/net/ethernet/sfc/mcdi_mon.c78
-rw-r--r--drivers/net/ethernet/smsc/smc91x.h22
-rw-r--r--drivers/net/ethernet/via/via-velocity.c11
-rw-r--r--drivers/net/macvtap.c12
-rw-r--r--drivers/net/phy/vitesse.c15
-rw-r--r--drivers/net/team/team.c4
-rw-r--r--drivers/net/virtio_net.c136
-rw-r--r--drivers/net/xen-netback/netback.c1
29 files changed, 263 insertions, 240 deletions
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 4dd5ee2a34cc..36eab0c4fb33 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -4110,7 +4110,7 @@ static int bond_check_params(struct bond_params *params)
4110 if (!miimon) { 4110 if (!miimon) {
4111 pr_warning("Warning: miimon must be specified, otherwise bonding will not detect link failure, speed and duplex which are essential for 802.3ad operation\n"); 4111 pr_warning("Warning: miimon must be specified, otherwise bonding will not detect link failure, speed and duplex which are essential for 802.3ad operation\n");
4112 pr_warning("Forcing miimon to 100msec\n"); 4112 pr_warning("Forcing miimon to 100msec\n");
4113 miimon = 100; 4113 miimon = BOND_DEFAULT_MIIMON;
4114 } 4114 }
4115 } 4115 }
4116 4116
@@ -4147,7 +4147,7 @@ static int bond_check_params(struct bond_params *params)
4147 if (!miimon) { 4147 if (!miimon) {
4148 pr_warning("Warning: miimon must be specified, otherwise bonding will not detect link failure and link speed which are essential for TLB/ALB load balancing\n"); 4148 pr_warning("Warning: miimon must be specified, otherwise bonding will not detect link failure and link speed which are essential for TLB/ALB load balancing\n");
4149 pr_warning("Forcing miimon to 100msec\n"); 4149 pr_warning("Forcing miimon to 100msec\n");
4150 miimon = 100; 4150 miimon = BOND_DEFAULT_MIIMON;
4151 } 4151 }
4152 } 4152 }
4153 4153
diff --git a/drivers/net/bonding/bond_options.c b/drivers/net/bonding/bond_options.c
index 9a5223c7b4d1..ea6f640782b7 100644
--- a/drivers/net/bonding/bond_options.c
+++ b/drivers/net/bonding/bond_options.c
@@ -45,10 +45,15 @@ int bond_option_mode_set(struct bonding *bond, int mode)
45 return -EPERM; 45 return -EPERM;
46 } 46 }
47 47
48 if (BOND_MODE_IS_LB(mode) && bond->params.arp_interval) { 48 if (BOND_NO_USES_ARP(mode) && bond->params.arp_interval) {
49 pr_err("%s: %s mode is incompatible with arp monitoring.\n", 49 pr_info("%s: %s mode is incompatible with arp monitoring, start mii monitoring\n",
50 bond->dev->name, bond_mode_tbl[mode].modename); 50 bond->dev->name, bond_mode_tbl[mode].modename);
51 return -EINVAL; 51 /* disable arp monitoring */
52 bond->params.arp_interval = 0;
53 /* set miimon to default value */
54 bond->params.miimon = BOND_DEFAULT_MIIMON;
55 pr_info("%s: Setting MII monitoring interval to %d.\n",
56 bond->dev->name, bond->params.miimon);
52 } 57 }
53 58
54 /* don't cache arp_validate between modes */ 59 /* don't cache arp_validate between modes */
diff --git a/drivers/net/bonding/bond_sysfs.c b/drivers/net/bonding/bond_sysfs.c
index 0ec2a7e8c8a9..abf5e106edc5 100644
--- a/drivers/net/bonding/bond_sysfs.c
+++ b/drivers/net/bonding/bond_sysfs.c
@@ -523,9 +523,7 @@ static ssize_t bonding_store_arp_interval(struct device *d,
523 ret = -EINVAL; 523 ret = -EINVAL;
524 goto out; 524 goto out;
525 } 525 }
526 if (bond->params.mode == BOND_MODE_ALB || 526 if (BOND_NO_USES_ARP(bond->params.mode)) {
527 bond->params.mode == BOND_MODE_TLB ||
528 bond->params.mode == BOND_MODE_8023AD) {
529 pr_info("%s: ARP monitoring cannot be used with ALB/TLB/802.3ad. Only MII monitoring is supported on %s.\n", 527 pr_info("%s: ARP monitoring cannot be used with ALB/TLB/802.3ad. Only MII monitoring is supported on %s.\n",
530 bond->dev->name, bond->dev->name); 528 bond->dev->name, bond->dev->name);
531 ret = -EINVAL; 529 ret = -EINVAL;
diff --git a/drivers/net/bonding/bonding.h b/drivers/net/bonding/bonding.h
index ca31286aa028..a9f4f9f4d8ce 100644
--- a/drivers/net/bonding/bonding.h
+++ b/drivers/net/bonding/bonding.h
@@ -35,6 +35,8 @@
35 35
36#define BOND_MAX_ARP_TARGETS 16 36#define BOND_MAX_ARP_TARGETS 16
37 37
38#define BOND_DEFAULT_MIIMON 100
39
38#define IS_UP(dev) \ 40#define IS_UP(dev) \
39 ((((dev)->flags & IFF_UP) == IFF_UP) && \ 41 ((((dev)->flags & IFF_UP) == IFF_UP) && \
40 netif_running(dev) && \ 42 netif_running(dev) && \
@@ -55,6 +57,11 @@
55 ((mode) == BOND_MODE_TLB) || \ 57 ((mode) == BOND_MODE_TLB) || \
56 ((mode) == BOND_MODE_ALB)) 58 ((mode) == BOND_MODE_ALB))
57 59
60#define BOND_NO_USES_ARP(mode) \
61 (((mode) == BOND_MODE_8023AD) || \
62 ((mode) == BOND_MODE_TLB) || \
63 ((mode) == BOND_MODE_ALB))
64
58#define TX_QUEUE_OVERRIDE(mode) \ 65#define TX_QUEUE_OVERRIDE(mode) \
59 (((mode) == BOND_MODE_ACTIVEBACKUP) || \ 66 (((mode) == BOND_MODE_ACTIVEBACKUP) || \
60 ((mode) == BOND_MODE_ROUNDROBIN)) 67 ((mode) == BOND_MODE_ROUNDROBIN))
diff --git a/drivers/net/can/c_can/c_can.c b/drivers/net/can/c_can/c_can.c
index e3fc07cf2f62..77061eebb034 100644
--- a/drivers/net/can/c_can/c_can.c
+++ b/drivers/net/can/c_can/c_can.c
@@ -712,22 +712,31 @@ static int c_can_set_mode(struct net_device *dev, enum can_mode mode)
712 return 0; 712 return 0;
713} 713}
714 714
715static int c_can_get_berr_counter(const struct net_device *dev, 715static int __c_can_get_berr_counter(const struct net_device *dev,
716 struct can_berr_counter *bec) 716 struct can_berr_counter *bec)
717{ 717{
718 unsigned int reg_err_counter; 718 unsigned int reg_err_counter;
719 struct c_can_priv *priv = netdev_priv(dev); 719 struct c_can_priv *priv = netdev_priv(dev);
720 720
721 c_can_pm_runtime_get_sync(priv);
722
723 reg_err_counter = priv->read_reg(priv, C_CAN_ERR_CNT_REG); 721 reg_err_counter = priv->read_reg(priv, C_CAN_ERR_CNT_REG);
724 bec->rxerr = (reg_err_counter & ERR_CNT_REC_MASK) >> 722 bec->rxerr = (reg_err_counter & ERR_CNT_REC_MASK) >>
725 ERR_CNT_REC_SHIFT; 723 ERR_CNT_REC_SHIFT;
726 bec->txerr = reg_err_counter & ERR_CNT_TEC_MASK; 724 bec->txerr = reg_err_counter & ERR_CNT_TEC_MASK;
727 725
726 return 0;
727}
728
729static int c_can_get_berr_counter(const struct net_device *dev,
730 struct can_berr_counter *bec)
731{
732 struct c_can_priv *priv = netdev_priv(dev);
733 int err;
734
735 c_can_pm_runtime_get_sync(priv);
736 err = __c_can_get_berr_counter(dev, bec);
728 c_can_pm_runtime_put_sync(priv); 737 c_can_pm_runtime_put_sync(priv);
729 738
730 return 0; 739 return err;
731} 740}
732 741
733/* 742/*
@@ -754,6 +763,7 @@ static void c_can_do_tx(struct net_device *dev)
754 if (!(val & (1 << (msg_obj_no - 1)))) { 763 if (!(val & (1 << (msg_obj_no - 1)))) {
755 can_get_echo_skb(dev, 764 can_get_echo_skb(dev,
756 msg_obj_no - C_CAN_MSG_OBJ_TX_FIRST); 765 msg_obj_no - C_CAN_MSG_OBJ_TX_FIRST);
766 c_can_object_get(dev, 0, msg_obj_no, IF_COMM_ALL);
757 stats->tx_bytes += priv->read_reg(priv, 767 stats->tx_bytes += priv->read_reg(priv,
758 C_CAN_IFACE(MSGCTRL_REG, 0)) 768 C_CAN_IFACE(MSGCTRL_REG, 0))
759 & IF_MCONT_DLC_MASK; 769 & IF_MCONT_DLC_MASK;
@@ -872,7 +882,7 @@ static int c_can_handle_state_change(struct net_device *dev,
872 if (unlikely(!skb)) 882 if (unlikely(!skb))
873 return 0; 883 return 0;
874 884
875 c_can_get_berr_counter(dev, &bec); 885 __c_can_get_berr_counter(dev, &bec);
876 reg_err_counter = priv->read_reg(priv, C_CAN_ERR_CNT_REG); 886 reg_err_counter = priv->read_reg(priv, C_CAN_ERR_CNT_REG);
877 rx_err_passive = (reg_err_counter & ERR_CNT_RP_MASK) >> 887 rx_err_passive = (reg_err_counter & ERR_CNT_RP_MASK) >>
878 ERR_CNT_RP_SHIFT; 888 ERR_CNT_RP_SHIFT;
diff --git a/drivers/net/can/flexcan.c b/drivers/net/can/flexcan.c
index ae08cf129ebb..aaed97bee471 100644
--- a/drivers/net/can/flexcan.c
+++ b/drivers/net/can/flexcan.c
@@ -1020,13 +1020,13 @@ static int flexcan_probe(struct platform_device *pdev)
1020 dev_err(&pdev->dev, "no ipg clock defined\n"); 1020 dev_err(&pdev->dev, "no ipg clock defined\n");
1021 return PTR_ERR(clk_ipg); 1021 return PTR_ERR(clk_ipg);
1022 } 1022 }
1023 clock_freq = clk_get_rate(clk_ipg);
1024 1023
1025 clk_per = devm_clk_get(&pdev->dev, "per"); 1024 clk_per = devm_clk_get(&pdev->dev, "per");
1026 if (IS_ERR(clk_per)) { 1025 if (IS_ERR(clk_per)) {
1027 dev_err(&pdev->dev, "no per clock defined\n"); 1026 dev_err(&pdev->dev, "no per clock defined\n");
1028 return PTR_ERR(clk_per); 1027 return PTR_ERR(clk_per);
1029 } 1028 }
1029 clock_freq = clk_get_rate(clk_per);
1030 } 1030 }
1031 1031
1032 mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1032 mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
diff --git a/drivers/net/can/sja1000/sja1000.c b/drivers/net/can/sja1000/sja1000.c
index 7164a999f50f..f17c3018b7c7 100644
--- a/drivers/net/can/sja1000/sja1000.c
+++ b/drivers/net/can/sja1000/sja1000.c
@@ -494,20 +494,20 @@ irqreturn_t sja1000_interrupt(int irq, void *dev_id)
494 uint8_t isrc, status; 494 uint8_t isrc, status;
495 int n = 0; 495 int n = 0;
496 496
497 /* Shared interrupts and IRQ off? */
498 if (priv->read_reg(priv, SJA1000_IER) == IRQ_OFF)
499 return IRQ_NONE;
500
501 if (priv->pre_irq) 497 if (priv->pre_irq)
502 priv->pre_irq(priv); 498 priv->pre_irq(priv);
503 499
500 /* Shared interrupts and IRQ off? */
501 if (priv->read_reg(priv, SJA1000_IER) == IRQ_OFF)
502 goto out;
503
504 while ((isrc = priv->read_reg(priv, SJA1000_IR)) && 504 while ((isrc = priv->read_reg(priv, SJA1000_IR)) &&
505 (n < SJA1000_MAX_IRQ)) { 505 (n < SJA1000_MAX_IRQ)) {
506 n++; 506
507 status = priv->read_reg(priv, SJA1000_SR); 507 status = priv->read_reg(priv, SJA1000_SR);
508 /* check for absent controller due to hw unplug */ 508 /* check for absent controller due to hw unplug */
509 if (status == 0xFF && sja1000_is_absent(priv)) 509 if (status == 0xFF && sja1000_is_absent(priv))
510 return IRQ_NONE; 510 goto out;
511 511
512 if (isrc & IRQ_WUI) 512 if (isrc & IRQ_WUI)
513 netdev_warn(dev, "wakeup interrupt\n"); 513 netdev_warn(dev, "wakeup interrupt\n");
@@ -535,7 +535,7 @@ irqreturn_t sja1000_interrupt(int irq, void *dev_id)
535 status = priv->read_reg(priv, SJA1000_SR); 535 status = priv->read_reg(priv, SJA1000_SR);
536 /* check for absent controller */ 536 /* check for absent controller */
537 if (status == 0xFF && sja1000_is_absent(priv)) 537 if (status == 0xFF && sja1000_is_absent(priv))
538 return IRQ_NONE; 538 goto out;
539 } 539 }
540 } 540 }
541 if (isrc & (IRQ_DOI | IRQ_EI | IRQ_BEI | IRQ_EPI | IRQ_ALI)) { 541 if (isrc & (IRQ_DOI | IRQ_EI | IRQ_BEI | IRQ_EPI | IRQ_ALI)) {
@@ -543,8 +543,9 @@ irqreturn_t sja1000_interrupt(int irq, void *dev_id)
543 if (sja1000_err(dev, isrc, status)) 543 if (sja1000_err(dev, isrc, status))
544 break; 544 break;
545 } 545 }
546 n++;
546 } 547 }
547 548out:
548 if (priv->post_irq) 549 if (priv->post_irq)
549 priv->post_irq(priv); 550 priv->post_irq(priv);
550 551
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
index a9e068423ba0..369b736dde05 100644
--- a/drivers/net/ethernet/broadcom/tg3.c
+++ b/drivers/net/ethernet/broadcom/tg3.c
@@ -10629,10 +10629,8 @@ static void tg3_sd_scan_scratchpad(struct tg3 *tp, struct tg3_ocir *ocir)
10629static ssize_t tg3_show_temp(struct device *dev, 10629static ssize_t tg3_show_temp(struct device *dev,
10630 struct device_attribute *devattr, char *buf) 10630 struct device_attribute *devattr, char *buf)
10631{ 10631{
10632 struct pci_dev *pdev = to_pci_dev(dev);
10633 struct net_device *netdev = pci_get_drvdata(pdev);
10634 struct tg3 *tp = netdev_priv(netdev);
10635 struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr); 10632 struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
10633 struct tg3 *tp = dev_get_drvdata(dev);
10636 u32 temperature; 10634 u32 temperature;
10637 10635
10638 spin_lock_bh(&tp->lock); 10636 spin_lock_bh(&tp->lock);
@@ -10650,29 +10648,25 @@ static SENSOR_DEVICE_ATTR(temp1_crit, S_IRUGO, tg3_show_temp, NULL,
10650static SENSOR_DEVICE_ATTR(temp1_max, S_IRUGO, tg3_show_temp, NULL, 10648static SENSOR_DEVICE_ATTR(temp1_max, S_IRUGO, tg3_show_temp, NULL,
10651 TG3_TEMP_MAX_OFFSET); 10649 TG3_TEMP_MAX_OFFSET);
10652 10650
10653static struct attribute *tg3_attributes[] = { 10651static struct attribute *tg3_attrs[] = {
10654 &sensor_dev_attr_temp1_input.dev_attr.attr, 10652 &sensor_dev_attr_temp1_input.dev_attr.attr,
10655 &sensor_dev_attr_temp1_crit.dev_attr.attr, 10653 &sensor_dev_attr_temp1_crit.dev_attr.attr,
10656 &sensor_dev_attr_temp1_max.dev_attr.attr, 10654 &sensor_dev_attr_temp1_max.dev_attr.attr,
10657 NULL 10655 NULL
10658}; 10656};
10659 10657ATTRIBUTE_GROUPS(tg3);
10660static const struct attribute_group tg3_group = {
10661 .attrs = tg3_attributes,
10662};
10663 10658
10664static void tg3_hwmon_close(struct tg3 *tp) 10659static void tg3_hwmon_close(struct tg3 *tp)
10665{ 10660{
10666 if (tp->hwmon_dev) { 10661 if (tp->hwmon_dev) {
10667 hwmon_device_unregister(tp->hwmon_dev); 10662 hwmon_device_unregister(tp->hwmon_dev);
10668 tp->hwmon_dev = NULL; 10663 tp->hwmon_dev = NULL;
10669 sysfs_remove_group(&tp->pdev->dev.kobj, &tg3_group);
10670 } 10664 }
10671} 10665}
10672 10666
10673static void tg3_hwmon_open(struct tg3 *tp) 10667static void tg3_hwmon_open(struct tg3 *tp)
10674{ 10668{
10675 int i, err; 10669 int i;
10676 u32 size = 0; 10670 u32 size = 0;
10677 struct pci_dev *pdev = tp->pdev; 10671 struct pci_dev *pdev = tp->pdev;
10678 struct tg3_ocir ocirs[TG3_SD_NUM_RECS]; 10672 struct tg3_ocir ocirs[TG3_SD_NUM_RECS];
@@ -10690,18 +10684,11 @@ static void tg3_hwmon_open(struct tg3 *tp)
10690 if (!size) 10684 if (!size)
10691 return; 10685 return;
10692 10686
10693 /* Register hwmon sysfs hooks */ 10687 tp->hwmon_dev = hwmon_device_register_with_groups(&pdev->dev, "tg3",
10694 err = sysfs_create_group(&pdev->dev.kobj, &tg3_group); 10688 tp, tg3_groups);
10695 if (err) {
10696 dev_err(&pdev->dev, "Cannot create sysfs group, aborting\n");
10697 return;
10698 }
10699
10700 tp->hwmon_dev = hwmon_device_register(&pdev->dev);
10701 if (IS_ERR(tp->hwmon_dev)) { 10689 if (IS_ERR(tp->hwmon_dev)) {
10702 tp->hwmon_dev = NULL; 10690 tp->hwmon_dev = NULL;
10703 dev_err(&pdev->dev, "Cannot register hwmon device, aborting\n"); 10691 dev_err(&pdev->dev, "Cannot register hwmon device, aborting\n");
10704 sysfs_remove_group(&pdev->dev.kobj, &tg3_group);
10705 } 10692 }
10706} 10693}
10707 10694
diff --git a/drivers/net/ethernet/emulex/benet/be.h b/drivers/net/ethernet/emulex/benet/be.h
index f4825db5d179..5878df619b53 100644
--- a/drivers/net/ethernet/emulex/benet/be.h
+++ b/drivers/net/ethernet/emulex/benet/be.h
@@ -503,6 +503,7 @@ struct be_adapter {
503}; 503};
504 504
505#define be_physfn(adapter) (!adapter->virtfn) 505#define be_physfn(adapter) (!adapter->virtfn)
506#define be_virtfn(adapter) (adapter->virtfn)
506#define sriov_enabled(adapter) (adapter->num_vfs > 0) 507#define sriov_enabled(adapter) (adapter->num_vfs > 0)
507#define sriov_want(adapter) (be_physfn(adapter) && \ 508#define sriov_want(adapter) (be_physfn(adapter) && \
508 (num_vfs || pci_num_vf(adapter->pdev))) 509 (num_vfs || pci_num_vf(adapter->pdev)))
diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.c b/drivers/net/ethernet/emulex/benet/be_cmds.c
index dbcd5262c016..e0e8bc1ef14c 100644
--- a/drivers/net/ethernet/emulex/benet/be_cmds.c
+++ b/drivers/net/ethernet/emulex/benet/be_cmds.c
@@ -1032,6 +1032,13 @@ int be_cmd_cq_create(struct be_adapter *adapter, struct be_queue_info *cq,
1032 } else { 1032 } else {
1033 req->hdr.version = 2; 1033 req->hdr.version = 2;
1034 req->page_size = 1; /* 1 for 4K */ 1034 req->page_size = 1; /* 1 for 4K */
1035
1036 /* coalesce-wm field in this cmd is not relevant to Lancer.
1037 * Lancer uses COMMON_MODIFY_CQ to set this field
1038 */
1039 if (!lancer_chip(adapter))
1040 AMAP_SET_BITS(struct amap_cq_context_v2, coalescwm,
1041 ctxt, coalesce_wm);
1035 AMAP_SET_BITS(struct amap_cq_context_v2, nodelay, ctxt, 1042 AMAP_SET_BITS(struct amap_cq_context_v2, nodelay, ctxt,
1036 no_delay); 1043 no_delay);
1037 AMAP_SET_BITS(struct amap_cq_context_v2, count, ctxt, 1044 AMAP_SET_BITS(struct amap_cq_context_v2, count, ctxt,
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
index abde97471636..fee64bf10446 100644
--- a/drivers/net/ethernet/emulex/benet/be_main.c
+++ b/drivers/net/ethernet/emulex/benet/be_main.c
@@ -2658,8 +2658,8 @@ static int be_close(struct net_device *netdev)
2658 2658
2659 be_roce_dev_close(adapter); 2659 be_roce_dev_close(adapter);
2660 2660
2661 for_all_evt_queues(adapter, eqo, i) { 2661 if (adapter->flags & BE_FLAGS_NAPI_ENABLED) {
2662 if (adapter->flags & BE_FLAGS_NAPI_ENABLED) { 2662 for_all_evt_queues(adapter, eqo, i) {
2663 napi_disable(&eqo->napi); 2663 napi_disable(&eqo->napi);
2664 be_disable_busy_poll(eqo); 2664 be_disable_busy_poll(eqo);
2665 } 2665 }
@@ -3253,12 +3253,10 @@ static int be_mac_setup(struct be_adapter *adapter)
3253 memcpy(mac, adapter->netdev->dev_addr, ETH_ALEN); 3253 memcpy(mac, adapter->netdev->dev_addr, ETH_ALEN);
3254 } 3254 }
3255 3255
3256 /* On BE3 VFs this cmd may fail due to lack of privilege. 3256 /* For BE3-R VFs, the PF programs the initial MAC address */
3257 * Ignore the failure as in this case pmac_id is fetched 3257 if (!(BEx_chip(adapter) && be_virtfn(adapter)))
3258 * in the IFACE_CREATE cmd. 3258 be_cmd_pmac_add(adapter, mac, adapter->if_handle,
3259 */ 3259 &adapter->pmac_id[0], 0);
3260 be_cmd_pmac_add(adapter, mac, adapter->if_handle,
3261 &adapter->pmac_id[0], 0);
3262 return 0; 3260 return 0;
3263} 3261}
3264 3262
@@ -4599,6 +4597,7 @@ static int be_suspend(struct pci_dev *pdev, pm_message_t state)
4599 if (adapter->wol) 4597 if (adapter->wol)
4600 be_setup_wol(adapter, true); 4598 be_setup_wol(adapter, true);
4601 4599
4600 be_intr_set(adapter, false);
4602 cancel_delayed_work_sync(&adapter->func_recovery_work); 4601 cancel_delayed_work_sync(&adapter->func_recovery_work);
4603 4602
4604 netif_device_detach(netdev); 4603 netif_device_detach(netdev);
@@ -4634,6 +4633,7 @@ static int be_resume(struct pci_dev *pdev)
4634 if (status) 4633 if (status)
4635 return status; 4634 return status;
4636 4635
4636 be_intr_set(adapter, true);
4637 /* tell fw we're ready to fire cmds */ 4637 /* tell fw we're ready to fire cmds */
4638 status = be_cmd_fw_init(adapter); 4638 status = be_cmd_fw_init(adapter);
4639 if (status) 4639 if (status)
diff --git a/drivers/net/ethernet/intel/e1000/e1000.h b/drivers/net/ethernet/intel/e1000/e1000.h
index 58c147271a36..f9313b36c887 100644
--- a/drivers/net/ethernet/intel/e1000/e1000.h
+++ b/drivers/net/ethernet/intel/e1000/e1000.h
@@ -83,6 +83,11 @@ struct e1000_adapter;
83 83
84#define E1000_MAX_INTR 10 84#define E1000_MAX_INTR 10
85 85
86/*
87 * Count for polling __E1000_RESET condition every 10-20msec.
88 */
89#define E1000_CHECK_RESET_COUNT 50
90
86/* TX/RX descriptor defines */ 91/* TX/RX descriptor defines */
87#define E1000_DEFAULT_TXD 256 92#define E1000_DEFAULT_TXD 256
88#define E1000_MAX_TXD 256 93#define E1000_MAX_TXD 256
@@ -312,8 +317,6 @@ struct e1000_adapter {
312 struct delayed_work watchdog_task; 317 struct delayed_work watchdog_task;
313 struct delayed_work fifo_stall_task; 318 struct delayed_work fifo_stall_task;
314 struct delayed_work phy_info_task; 319 struct delayed_work phy_info_task;
315
316 struct mutex mutex;
317}; 320};
318 321
319enum e1000_state_t { 322enum e1000_state_t {
diff --git a/drivers/net/ethernet/intel/e1000/e1000_main.c b/drivers/net/ethernet/intel/e1000/e1000_main.c
index e38622825fa7..46e6544ed1b7 100644
--- a/drivers/net/ethernet/intel/e1000/e1000_main.c
+++ b/drivers/net/ethernet/intel/e1000/e1000_main.c
@@ -494,13 +494,20 @@ static void e1000_down_and_stop(struct e1000_adapter *adapter)
494{ 494{
495 set_bit(__E1000_DOWN, &adapter->flags); 495 set_bit(__E1000_DOWN, &adapter->flags);
496 496
497 /* Only kill reset task if adapter is not resetting */
498 if (!test_bit(__E1000_RESETTING, &adapter->flags))
499 cancel_work_sync(&adapter->reset_task);
500
501 cancel_delayed_work_sync(&adapter->watchdog_task); 497 cancel_delayed_work_sync(&adapter->watchdog_task);
498
499 /*
500 * Since the watchdog task can reschedule other tasks, we should cancel
501 * it first, otherwise we can run into the situation when a work is
502 * still running after the adapter has been turned down.
503 */
504
502 cancel_delayed_work_sync(&adapter->phy_info_task); 505 cancel_delayed_work_sync(&adapter->phy_info_task);
503 cancel_delayed_work_sync(&adapter->fifo_stall_task); 506 cancel_delayed_work_sync(&adapter->fifo_stall_task);
507
508 /* Only kill reset task if adapter is not resetting */
509 if (!test_bit(__E1000_RESETTING, &adapter->flags))
510 cancel_work_sync(&adapter->reset_task);
504} 511}
505 512
506void e1000_down(struct e1000_adapter *adapter) 513void e1000_down(struct e1000_adapter *adapter)
@@ -544,21 +551,8 @@ void e1000_down(struct e1000_adapter *adapter)
544 e1000_clean_all_rx_rings(adapter); 551 e1000_clean_all_rx_rings(adapter);
545} 552}
546 553
547static void e1000_reinit_safe(struct e1000_adapter *adapter)
548{
549 while (test_and_set_bit(__E1000_RESETTING, &adapter->flags))
550 msleep(1);
551 mutex_lock(&adapter->mutex);
552 e1000_down(adapter);
553 e1000_up(adapter);
554 mutex_unlock(&adapter->mutex);
555 clear_bit(__E1000_RESETTING, &adapter->flags);
556}
557
558void e1000_reinit_locked(struct e1000_adapter *adapter) 554void e1000_reinit_locked(struct e1000_adapter *adapter)
559{ 555{
560 /* if rtnl_lock is not held the call path is bogus */
561 ASSERT_RTNL();
562 WARN_ON(in_interrupt()); 556 WARN_ON(in_interrupt());
563 while (test_and_set_bit(__E1000_RESETTING, &adapter->flags)) 557 while (test_and_set_bit(__E1000_RESETTING, &adapter->flags))
564 msleep(1); 558 msleep(1);
@@ -1316,7 +1310,6 @@ static int e1000_sw_init(struct e1000_adapter *adapter)
1316 e1000_irq_disable(adapter); 1310 e1000_irq_disable(adapter);
1317 1311
1318 spin_lock_init(&adapter->stats_lock); 1312 spin_lock_init(&adapter->stats_lock);
1319 mutex_init(&adapter->mutex);
1320 1313
1321 set_bit(__E1000_DOWN, &adapter->flags); 1314 set_bit(__E1000_DOWN, &adapter->flags);
1322 1315
@@ -1440,6 +1433,10 @@ static int e1000_close(struct net_device *netdev)
1440{ 1433{
1441 struct e1000_adapter *adapter = netdev_priv(netdev); 1434 struct e1000_adapter *adapter = netdev_priv(netdev);
1442 struct e1000_hw *hw = &adapter->hw; 1435 struct e1000_hw *hw = &adapter->hw;
1436 int count = E1000_CHECK_RESET_COUNT;
1437
1438 while (test_bit(__E1000_RESETTING, &adapter->flags) && count--)
1439 usleep_range(10000, 20000);
1443 1440
1444 WARN_ON(test_bit(__E1000_RESETTING, &adapter->flags)); 1441 WARN_ON(test_bit(__E1000_RESETTING, &adapter->flags));
1445 e1000_down(adapter); 1442 e1000_down(adapter);
@@ -2325,11 +2322,8 @@ static void e1000_update_phy_info_task(struct work_struct *work)
2325 struct e1000_adapter *adapter = container_of(work, 2322 struct e1000_adapter *adapter = container_of(work,
2326 struct e1000_adapter, 2323 struct e1000_adapter,
2327 phy_info_task.work); 2324 phy_info_task.work);
2328 if (test_bit(__E1000_DOWN, &adapter->flags)) 2325
2329 return;
2330 mutex_lock(&adapter->mutex);
2331 e1000_phy_get_info(&adapter->hw, &adapter->phy_info); 2326 e1000_phy_get_info(&adapter->hw, &adapter->phy_info);
2332 mutex_unlock(&adapter->mutex);
2333} 2327}
2334 2328
2335/** 2329/**
@@ -2345,9 +2339,6 @@ static void e1000_82547_tx_fifo_stall_task(struct work_struct *work)
2345 struct net_device *netdev = adapter->netdev; 2339 struct net_device *netdev = adapter->netdev;
2346 u32 tctl; 2340 u32 tctl;
2347 2341
2348 if (test_bit(__E1000_DOWN, &adapter->flags))
2349 return;
2350 mutex_lock(&adapter->mutex);
2351 if (atomic_read(&adapter->tx_fifo_stall)) { 2342 if (atomic_read(&adapter->tx_fifo_stall)) {
2352 if ((er32(TDT) == er32(TDH)) && 2343 if ((er32(TDT) == er32(TDH)) &&
2353 (er32(TDFT) == er32(TDFH)) && 2344 (er32(TDFT) == er32(TDFH)) &&
@@ -2368,7 +2359,6 @@ static void e1000_82547_tx_fifo_stall_task(struct work_struct *work)
2368 schedule_delayed_work(&adapter->fifo_stall_task, 1); 2359 schedule_delayed_work(&adapter->fifo_stall_task, 1);
2369 } 2360 }
2370 } 2361 }
2371 mutex_unlock(&adapter->mutex);
2372} 2362}
2373 2363
2374bool e1000_has_link(struct e1000_adapter *adapter) 2364bool e1000_has_link(struct e1000_adapter *adapter)
@@ -2422,10 +2412,6 @@ static void e1000_watchdog(struct work_struct *work)
2422 struct e1000_tx_ring *txdr = adapter->tx_ring; 2412 struct e1000_tx_ring *txdr = adapter->tx_ring;
2423 u32 link, tctl; 2413 u32 link, tctl;
2424 2414
2425 if (test_bit(__E1000_DOWN, &adapter->flags))
2426 return;
2427
2428 mutex_lock(&adapter->mutex);
2429 link = e1000_has_link(adapter); 2415 link = e1000_has_link(adapter);
2430 if ((netif_carrier_ok(netdev)) && link) 2416 if ((netif_carrier_ok(netdev)) && link)
2431 goto link_up; 2417 goto link_up;
@@ -2516,7 +2502,7 @@ link_up:
2516 adapter->tx_timeout_count++; 2502 adapter->tx_timeout_count++;
2517 schedule_work(&adapter->reset_task); 2503 schedule_work(&adapter->reset_task);
2518 /* exit immediately since reset is imminent */ 2504 /* exit immediately since reset is imminent */
2519 goto unlock; 2505 return;
2520 } 2506 }
2521 } 2507 }
2522 2508
@@ -2544,9 +2530,6 @@ link_up:
2544 /* Reschedule the task */ 2530 /* Reschedule the task */
2545 if (!test_bit(__E1000_DOWN, &adapter->flags)) 2531 if (!test_bit(__E1000_DOWN, &adapter->flags))
2546 schedule_delayed_work(&adapter->watchdog_task, 2 * HZ); 2532 schedule_delayed_work(&adapter->watchdog_task, 2 * HZ);
2547
2548unlock:
2549 mutex_unlock(&adapter->mutex);
2550} 2533}
2551 2534
2552enum latency_range { 2535enum latency_range {
@@ -3495,10 +3478,8 @@ static void e1000_reset_task(struct work_struct *work)
3495 struct e1000_adapter *adapter = 3478 struct e1000_adapter *adapter =
3496 container_of(work, struct e1000_adapter, reset_task); 3479 container_of(work, struct e1000_adapter, reset_task);
3497 3480
3498 if (test_bit(__E1000_DOWN, &adapter->flags))
3499 return;
3500 e_err(drv, "Reset adapter\n"); 3481 e_err(drv, "Reset adapter\n");
3501 e1000_reinit_safe(adapter); 3482 e1000_reinit_locked(adapter);
3502} 3483}
3503 3484
3504/** 3485/**
@@ -4963,6 +4944,11 @@ static int __e1000_shutdown(struct pci_dev *pdev, bool *enable_wake)
4963 netif_device_detach(netdev); 4944 netif_device_detach(netdev);
4964 4945
4965 if (netif_running(netdev)) { 4946 if (netif_running(netdev)) {
4947 int count = E1000_CHECK_RESET_COUNT;
4948
4949 while (test_bit(__E1000_RESETTING, &adapter->flags) && count--)
4950 usleep_range(10000, 20000);
4951
4966 WARN_ON(test_bit(__E1000_RESETTING, &adapter->flags)); 4952 WARN_ON(test_bit(__E1000_RESETTING, &adapter->flags));
4967 e1000_down(adapter); 4953 e1000_down(adapter);
4968 } 4954 }
diff --git a/drivers/net/ethernet/intel/igb/igb_ethtool.c b/drivers/net/ethernet/intel/igb/igb_ethtool.c
index b0f3666b1d7f..c3143da497c8 100644
--- a/drivers/net/ethernet/intel/igb/igb_ethtool.c
+++ b/drivers/net/ethernet/intel/igb/igb_ethtool.c
@@ -2062,14 +2062,15 @@ static void igb_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
2062{ 2062{
2063 struct igb_adapter *adapter = netdev_priv(netdev); 2063 struct igb_adapter *adapter = netdev_priv(netdev);
2064 2064
2065 wol->supported = WAKE_UCAST | WAKE_MCAST |
2066 WAKE_BCAST | WAKE_MAGIC |
2067 WAKE_PHY;
2068 wol->wolopts = 0; 2065 wol->wolopts = 0;
2069 2066
2070 if (!(adapter->flags & IGB_FLAG_WOL_SUPPORTED)) 2067 if (!(adapter->flags & IGB_FLAG_WOL_SUPPORTED))
2071 return; 2068 return;
2072 2069
2070 wol->supported = WAKE_UCAST | WAKE_MCAST |
2071 WAKE_BCAST | WAKE_MAGIC |
2072 WAKE_PHY;
2073
2073 /* apply any specific unsupported masks here */ 2074 /* apply any specific unsupported masks here */
2074 switch (adapter->hw.device_id) { 2075 switch (adapter->hw.device_id) {
2075 default: 2076 default:
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index 0c55079ebee3..cc06854296a3 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -4251,8 +4251,8 @@ static void ixgbe_disable_fwd_ring(struct ixgbe_fwd_adapter *vadapter,
4251 rx_ring->l2_accel_priv = NULL; 4251 rx_ring->l2_accel_priv = NULL;
4252} 4252}
4253 4253
4254int ixgbe_fwd_ring_down(struct net_device *vdev, 4254static int ixgbe_fwd_ring_down(struct net_device *vdev,
4255 struct ixgbe_fwd_adapter *accel) 4255 struct ixgbe_fwd_adapter *accel)
4256{ 4256{
4257 struct ixgbe_adapter *adapter = accel->real_adapter; 4257 struct ixgbe_adapter *adapter = accel->real_adapter;
4258 unsigned int rxbase = accel->rx_base_queue; 4258 unsigned int rxbase = accel->rx_base_queue;
@@ -7986,10 +7986,9 @@ skip_sriov:
7986 NETIF_F_TSO | 7986 NETIF_F_TSO |
7987 NETIF_F_TSO6 | 7987 NETIF_F_TSO6 |
7988 NETIF_F_RXHASH | 7988 NETIF_F_RXHASH |
7989 NETIF_F_RXCSUM | 7989 NETIF_F_RXCSUM;
7990 NETIF_F_HW_L2FW_DOFFLOAD;
7991 7990
7992 netdev->hw_features = netdev->features; 7991 netdev->hw_features = netdev->features | NETIF_F_HW_L2FW_DOFFLOAD;
7993 7992
7994 switch (adapter->hw.mac.type) { 7993 switch (adapter->hw.mac.type) {
7995 case ixgbe_mac_82599EB: 7994 case ixgbe_mac_82599EB:
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
index e4c676006be9..39217e5ff7dc 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
@@ -46,6 +46,7 @@ static bool ixgbe_get_i2c_data(u32 *i2cctl);
46static void ixgbe_i2c_bus_clear(struct ixgbe_hw *hw); 46static void ixgbe_i2c_bus_clear(struct ixgbe_hw *hw);
47static enum ixgbe_phy_type ixgbe_get_phy_type_from_id(u32 phy_id); 47static enum ixgbe_phy_type ixgbe_get_phy_type_from_id(u32 phy_id);
48static s32 ixgbe_get_phy_id(struct ixgbe_hw *hw); 48static s32 ixgbe_get_phy_id(struct ixgbe_hw *hw);
49static s32 ixgbe_identify_qsfp_module_generic(struct ixgbe_hw *hw);
49 50
50/** 51/**
51 * ixgbe_identify_phy_generic - Get physical layer module 52 * ixgbe_identify_phy_generic - Get physical layer module
@@ -1164,7 +1165,7 @@ err_read_i2c_eeprom:
1164 * 1165 *
1165 * Searches for and identifies the QSFP module and assigns appropriate PHY type 1166 * Searches for and identifies the QSFP module and assigns appropriate PHY type
1166 **/ 1167 **/
1167s32 ixgbe_identify_qsfp_module_generic(struct ixgbe_hw *hw) 1168static s32 ixgbe_identify_qsfp_module_generic(struct ixgbe_hw *hw)
1168{ 1169{
1169 struct ixgbe_adapter *adapter = hw->back; 1170 struct ixgbe_adapter *adapter = hw->back;
1170 s32 status = IXGBE_ERR_PHY_ADDR_INVALID; 1171 s32 status = IXGBE_ERR_PHY_ADDR_INVALID;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h
index aae900a256da..fffcbdd2bf0e 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h
@@ -145,7 +145,6 @@ s32 ixgbe_get_phy_firmware_version_generic(struct ixgbe_hw *hw,
145s32 ixgbe_reset_phy_nl(struct ixgbe_hw *hw); 145s32 ixgbe_reset_phy_nl(struct ixgbe_hw *hw);
146s32 ixgbe_identify_module_generic(struct ixgbe_hw *hw); 146s32 ixgbe_identify_module_generic(struct ixgbe_hw *hw);
147s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw); 147s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw);
148s32 ixgbe_identify_qsfp_module_generic(struct ixgbe_hw *hw);
149s32 ixgbe_get_sfp_init_sequence_offsets(struct ixgbe_hw *hw, 148s32 ixgbe_get_sfp_init_sequence_offsets(struct ixgbe_hw *hw,
150 u16 *list_offset, 149 u16 *list_offset,
151 u16 *data_offset); 150 u16 *data_offset);
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_selftest.c b/drivers/net/ethernet/mellanox/mlx4/en_selftest.c
index 40626690e8a8..c11d063473e5 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_selftest.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_selftest.c
@@ -140,7 +140,6 @@ void mlx4_en_ex_selftest(struct net_device *dev, u32 *flags, u64 *buf)
140{ 140{
141 struct mlx4_en_priv *priv = netdev_priv(dev); 141 struct mlx4_en_priv *priv = netdev_priv(dev);
142 struct mlx4_en_dev *mdev = priv->mdev; 142 struct mlx4_en_dev *mdev = priv->mdev;
143 struct mlx4_en_tx_ring *tx_ring;
144 int i, carrier_ok; 143 int i, carrier_ok;
145 144
146 memset(buf, 0, sizeof(u64) * MLX4_EN_NUM_SELF_TEST); 145 memset(buf, 0, sizeof(u64) * MLX4_EN_NUM_SELF_TEST);
@@ -150,16 +149,10 @@ void mlx4_en_ex_selftest(struct net_device *dev, u32 *flags, u64 *buf)
150 carrier_ok = netif_carrier_ok(dev); 149 carrier_ok = netif_carrier_ok(dev);
151 150
152 netif_carrier_off(dev); 151 netif_carrier_off(dev);
153retry_tx:
154 /* Wait until all tx queues are empty. 152 /* Wait until all tx queues are empty.
155 * there should not be any additional incoming traffic 153 * there should not be any additional incoming traffic
156 * since we turned the carrier off */ 154 * since we turned the carrier off */
157 msleep(200); 155 msleep(200);
158 for (i = 0; i < priv->tx_ring_num && carrier_ok; i++) {
159 tx_ring = priv->tx_ring[i];
160 if (tx_ring->prod != (tx_ring->cons + tx_ring->last_nr_txbb))
161 goto retry_tx;
162 }
163 156
164 if (priv->mdev->dev->caps.flags & 157 if (priv->mdev->dev->caps.flags &
165 MLX4_DEV_CAP_FLAG_UC_LOOPBACK) { 158 MLX4_DEV_CAP_FLAG_UC_LOOPBACK) {
diff --git a/drivers/net/ethernet/realtek/8139cp.c b/drivers/net/ethernet/realtek/8139cp.c
index f2a2128165dd..737c1a881f78 100644
--- a/drivers/net/ethernet/realtek/8139cp.c
+++ b/drivers/net/ethernet/realtek/8139cp.c
@@ -678,9 +678,6 @@ static void cp_tx (struct cp_private *cp)
678 le32_to_cpu(txd->opts1) & 0xffff, 678 le32_to_cpu(txd->opts1) & 0xffff,
679 PCI_DMA_TODEVICE); 679 PCI_DMA_TODEVICE);
680 680
681 bytes_compl += skb->len;
682 pkts_compl++;
683
684 if (status & LastFrag) { 681 if (status & LastFrag) {
685 if (status & (TxError | TxFIFOUnder)) { 682 if (status & (TxError | TxFIFOUnder)) {
686 netif_dbg(cp, tx_err, cp->dev, 683 netif_dbg(cp, tx_err, cp->dev,
@@ -702,6 +699,8 @@ static void cp_tx (struct cp_private *cp)
702 netif_dbg(cp, tx_done, cp->dev, 699 netif_dbg(cp, tx_done, cp->dev,
703 "tx done, slot %d\n", tx_tail); 700 "tx done, slot %d\n", tx_tail);
704 } 701 }
702 bytes_compl += skb->len;
703 pkts_compl++;
705 dev_kfree_skb_irq(skb); 704 dev_kfree_skb_irq(skb);
706 } 705 }
707 706
diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
index 799387570766..c737f0ea5de7 100644
--- a/drivers/net/ethernet/realtek/r8169.c
+++ b/drivers/net/ethernet/realtek/r8169.c
@@ -3465,6 +3465,11 @@ static void rtl8168g_1_hw_phy_config(struct rtl8169_private *tp)
3465 rtl_writephy(tp, 0x14, 0x9065); 3465 rtl_writephy(tp, 0x14, 0x9065);
3466 rtl_writephy(tp, 0x14, 0x1065); 3466 rtl_writephy(tp, 0x14, 0x1065);
3467 3467
3468 /* Check ALDPS bit, disable it if enabled */
3469 rtl_writephy(tp, 0x1f, 0x0a43);
3470 if (rtl_readphy(tp, 0x10) & 0x0004)
3471 rtl_w1w0_phy(tp, 0x10, 0x0000, 0x0004);
3472
3468 rtl_writephy(tp, 0x1f, 0x0000); 3473 rtl_writephy(tp, 0x1f, 0x0000);
3469} 3474}
3470 3475
diff --git a/drivers/net/ethernet/sfc/mcdi.h b/drivers/net/ethernet/sfc/mcdi.h
index 656a3277c2b2..15816cacb548 100644
--- a/drivers/net/ethernet/sfc/mcdi.h
+++ b/drivers/net/ethernet/sfc/mcdi.h
@@ -75,6 +75,8 @@ struct efx_mcdi_mon {
75 unsigned long last_update; 75 unsigned long last_update;
76 struct device *device; 76 struct device *device;
77 struct efx_mcdi_mon_attribute *attrs; 77 struct efx_mcdi_mon_attribute *attrs;
78 struct attribute_group group;
79 const struct attribute_group *groups[2];
78 unsigned int n_attrs; 80 unsigned int n_attrs;
79}; 81};
80 82
diff --git a/drivers/net/ethernet/sfc/mcdi_mon.c b/drivers/net/ethernet/sfc/mcdi_mon.c
index 4cc5d95b2a5a..d72ad4fc3617 100644
--- a/drivers/net/ethernet/sfc/mcdi_mon.c
+++ b/drivers/net/ethernet/sfc/mcdi_mon.c
@@ -139,17 +139,10 @@ static int efx_mcdi_mon_update(struct efx_nic *efx)
139 return rc; 139 return rc;
140} 140}
141 141
142static ssize_t efx_mcdi_mon_show_name(struct device *dev,
143 struct device_attribute *attr,
144 char *buf)
145{
146 return sprintf(buf, "%s\n", KBUILD_MODNAME);
147}
148
149static int efx_mcdi_mon_get_entry(struct device *dev, unsigned int index, 142static int efx_mcdi_mon_get_entry(struct device *dev, unsigned int index,
150 efx_dword_t *entry) 143 efx_dword_t *entry)
151{ 144{
152 struct efx_nic *efx = dev_get_drvdata(dev); 145 struct efx_nic *efx = dev_get_drvdata(dev->parent);
153 struct efx_mcdi_mon *hwmon = efx_mcdi_mon(efx); 146 struct efx_mcdi_mon *hwmon = efx_mcdi_mon(efx);
154 int rc; 147 int rc;
155 148
@@ -263,7 +256,7 @@ static ssize_t efx_mcdi_mon_show_label(struct device *dev,
263 efx_mcdi_sensor_type[mon_attr->type].label); 256 efx_mcdi_sensor_type[mon_attr->type].label);
264} 257}
265 258
266static int 259static void
267efx_mcdi_mon_add_attr(struct efx_nic *efx, const char *name, 260efx_mcdi_mon_add_attr(struct efx_nic *efx, const char *name,
268 ssize_t (*reader)(struct device *, 261 ssize_t (*reader)(struct device *,
269 struct device_attribute *, char *), 262 struct device_attribute *, char *),
@@ -272,7 +265,6 @@ efx_mcdi_mon_add_attr(struct efx_nic *efx, const char *name,
272{ 265{
273 struct efx_mcdi_mon *hwmon = efx_mcdi_mon(efx); 266 struct efx_mcdi_mon *hwmon = efx_mcdi_mon(efx);
274 struct efx_mcdi_mon_attribute *attr = &hwmon->attrs[hwmon->n_attrs]; 267 struct efx_mcdi_mon_attribute *attr = &hwmon->attrs[hwmon->n_attrs];
275 int rc;
276 268
277 strlcpy(attr->name, name, sizeof(attr->name)); 269 strlcpy(attr->name, name, sizeof(attr->name));
278 attr->index = index; 270 attr->index = index;
@@ -286,10 +278,7 @@ efx_mcdi_mon_add_attr(struct efx_nic *efx, const char *name,
286 attr->dev_attr.attr.name = attr->name; 278 attr->dev_attr.attr.name = attr->name;
287 attr->dev_attr.attr.mode = S_IRUGO; 279 attr->dev_attr.attr.mode = S_IRUGO;
288 attr->dev_attr.show = reader; 280 attr->dev_attr.show = reader;
289 rc = device_create_file(&efx->pci_dev->dev, &attr->dev_attr); 281 hwmon->group.attrs[hwmon->n_attrs++] = &attr->dev_attr.attr;
290 if (rc == 0)
291 ++hwmon->n_attrs;
292 return rc;
293} 282}
294 283
295int efx_mcdi_mon_probe(struct efx_nic *efx) 284int efx_mcdi_mon_probe(struct efx_nic *efx)
@@ -338,26 +327,22 @@ int efx_mcdi_mon_probe(struct efx_nic *efx)
338 efx_mcdi_mon_update(efx); 327 efx_mcdi_mon_update(efx);
339 328
340 /* Allocate space for the maximum possible number of 329 /* Allocate space for the maximum possible number of
341 * attributes for this set of sensors: name of the driver plus 330 * attributes for this set of sensors:
342 * value, min, max, crit, alarm and label for each sensor. 331 * value, min, max, crit, alarm and label for each sensor.
343 */ 332 */
344 n_attrs = 1 + 6 * n_sensors; 333 n_attrs = 6 * n_sensors;
345 hwmon->attrs = kcalloc(n_attrs, sizeof(*hwmon->attrs), GFP_KERNEL); 334 hwmon->attrs = kcalloc(n_attrs, sizeof(*hwmon->attrs), GFP_KERNEL);
346 if (!hwmon->attrs) { 335 if (!hwmon->attrs) {
347 rc = -ENOMEM; 336 rc = -ENOMEM;
348 goto fail; 337 goto fail;
349 } 338 }
350 339 hwmon->group.attrs = kcalloc(n_attrs + 1, sizeof(struct attribute *),
351 hwmon->device = hwmon_device_register(&efx->pci_dev->dev); 340 GFP_KERNEL);
352 if (IS_ERR(hwmon->device)) { 341 if (!hwmon->group.attrs) {
353 rc = PTR_ERR(hwmon->device); 342 rc = -ENOMEM;
354 goto fail; 343 goto fail;
355 } 344 }
356 345
357 rc = efx_mcdi_mon_add_attr(efx, "name", efx_mcdi_mon_show_name, 0, 0, 0);
358 if (rc)
359 goto fail;
360
361 for (i = 0, j = -1, type = -1; ; i++) { 346 for (i = 0, j = -1, type = -1; ; i++) {
362 enum efx_hwmon_type hwmon_type; 347 enum efx_hwmon_type hwmon_type;
363 const char *hwmon_prefix; 348 const char *hwmon_prefix;
@@ -372,7 +357,7 @@ int efx_mcdi_mon_probe(struct efx_nic *efx)
372 page = type / 32; 357 page = type / 32;
373 j = -1; 358 j = -1;
374 if (page == n_pages) 359 if (page == n_pages)
375 return 0; 360 goto hwmon_register;
376 361
377 MCDI_SET_DWORD(inbuf, SENSOR_INFO_EXT_IN_PAGE, 362 MCDI_SET_DWORD(inbuf, SENSOR_INFO_EXT_IN_PAGE,
378 page); 363 page);
@@ -453,28 +438,22 @@ int efx_mcdi_mon_probe(struct efx_nic *efx)
453 if (min1 != max1) { 438 if (min1 != max1) {
454 snprintf(name, sizeof(name), "%s%u_input", 439 snprintf(name, sizeof(name), "%s%u_input",
455 hwmon_prefix, hwmon_index); 440 hwmon_prefix, hwmon_index);
456 rc = efx_mcdi_mon_add_attr( 441 efx_mcdi_mon_add_attr(
457 efx, name, efx_mcdi_mon_show_value, i, type, 0); 442 efx, name, efx_mcdi_mon_show_value, i, type, 0);
458 if (rc)
459 goto fail;
460 443
461 if (hwmon_type != EFX_HWMON_POWER) { 444 if (hwmon_type != EFX_HWMON_POWER) {
462 snprintf(name, sizeof(name), "%s%u_min", 445 snprintf(name, sizeof(name), "%s%u_min",
463 hwmon_prefix, hwmon_index); 446 hwmon_prefix, hwmon_index);
464 rc = efx_mcdi_mon_add_attr( 447 efx_mcdi_mon_add_attr(
465 efx, name, efx_mcdi_mon_show_limit, 448 efx, name, efx_mcdi_mon_show_limit,
466 i, type, min1); 449 i, type, min1);
467 if (rc)
468 goto fail;
469 } 450 }
470 451
471 snprintf(name, sizeof(name), "%s%u_max", 452 snprintf(name, sizeof(name), "%s%u_max",
472 hwmon_prefix, hwmon_index); 453 hwmon_prefix, hwmon_index);
473 rc = efx_mcdi_mon_add_attr( 454 efx_mcdi_mon_add_attr(
474 efx, name, efx_mcdi_mon_show_limit, 455 efx, name, efx_mcdi_mon_show_limit,
475 i, type, max1); 456 i, type, max1);
476 if (rc)
477 goto fail;
478 457
479 if (min2 != max2) { 458 if (min2 != max2) {
480 /* Assume max2 is critical value. 459 /* Assume max2 is critical value.
@@ -482,32 +461,38 @@ int efx_mcdi_mon_probe(struct efx_nic *efx)
482 */ 461 */
483 snprintf(name, sizeof(name), "%s%u_crit", 462 snprintf(name, sizeof(name), "%s%u_crit",
484 hwmon_prefix, hwmon_index); 463 hwmon_prefix, hwmon_index);
485 rc = efx_mcdi_mon_add_attr( 464 efx_mcdi_mon_add_attr(
486 efx, name, efx_mcdi_mon_show_limit, 465 efx, name, efx_mcdi_mon_show_limit,
487 i, type, max2); 466 i, type, max2);
488 if (rc)
489 goto fail;
490 } 467 }
491 } 468 }
492 469
493 snprintf(name, sizeof(name), "%s%u_alarm", 470 snprintf(name, sizeof(name), "%s%u_alarm",
494 hwmon_prefix, hwmon_index); 471 hwmon_prefix, hwmon_index);
495 rc = efx_mcdi_mon_add_attr( 472 efx_mcdi_mon_add_attr(
496 efx, name, efx_mcdi_mon_show_alarm, i, type, 0); 473 efx, name, efx_mcdi_mon_show_alarm, i, type, 0);
497 if (rc)
498 goto fail;
499 474
500 if (type < ARRAY_SIZE(efx_mcdi_sensor_type) && 475 if (type < ARRAY_SIZE(efx_mcdi_sensor_type) &&
501 efx_mcdi_sensor_type[type].label) { 476 efx_mcdi_sensor_type[type].label) {
502 snprintf(name, sizeof(name), "%s%u_label", 477 snprintf(name, sizeof(name), "%s%u_label",
503 hwmon_prefix, hwmon_index); 478 hwmon_prefix, hwmon_index);
504 rc = efx_mcdi_mon_add_attr( 479 efx_mcdi_mon_add_attr(
505 efx, name, efx_mcdi_mon_show_label, i, type, 0); 480 efx, name, efx_mcdi_mon_show_label, i, type, 0);
506 if (rc)
507 goto fail;
508 } 481 }
509 } 482 }
510 483
484hwmon_register:
485 hwmon->groups[0] = &hwmon->group;
486 hwmon->device = hwmon_device_register_with_groups(&efx->pci_dev->dev,
487 KBUILD_MODNAME, NULL,
488 hwmon->groups);
489 if (IS_ERR(hwmon->device)) {
490 rc = PTR_ERR(hwmon->device);
491 goto fail;
492 }
493
494 return 0;
495
511fail: 496fail:
512 efx_mcdi_mon_remove(efx); 497 efx_mcdi_mon_remove(efx);
513 return rc; 498 return rc;
@@ -516,14 +501,11 @@ fail:
516void efx_mcdi_mon_remove(struct efx_nic *efx) 501void efx_mcdi_mon_remove(struct efx_nic *efx)
517{ 502{
518 struct efx_mcdi_mon *hwmon = efx_mcdi_mon(efx); 503 struct efx_mcdi_mon *hwmon = efx_mcdi_mon(efx);
519 unsigned int i;
520 504
521 for (i = 0; i < hwmon->n_attrs; i++)
522 device_remove_file(&efx->pci_dev->dev,
523 &hwmon->attrs[i].dev_attr);
524 kfree(hwmon->attrs);
525 if (hwmon->device) 505 if (hwmon->device)
526 hwmon_device_unregister(hwmon->device); 506 hwmon_device_unregister(hwmon->device);
507 kfree(hwmon->attrs);
508 kfree(hwmon->group.attrs);
527 efx_nic_free_buffer(efx, &hwmon->dma_buf); 509 efx_nic_free_buffer(efx, &hwmon->dma_buf);
528} 510}
529 511
diff --git a/drivers/net/ethernet/smsc/smc91x.h b/drivers/net/ethernet/smsc/smc91x.h
index c9d4c872e81d..749654b976bc 100644
--- a/drivers/net/ethernet/smsc/smc91x.h
+++ b/drivers/net/ethernet/smsc/smc91x.h
@@ -46,7 +46,8 @@
46 defined(CONFIG_MACH_LITTLETON) ||\ 46 defined(CONFIG_MACH_LITTLETON) ||\
47 defined(CONFIG_MACH_ZYLONITE2) ||\ 47 defined(CONFIG_MACH_ZYLONITE2) ||\
48 defined(CONFIG_ARCH_VIPER) ||\ 48 defined(CONFIG_ARCH_VIPER) ||\
49 defined(CONFIG_MACH_STARGATE2) 49 defined(CONFIG_MACH_STARGATE2) ||\
50 defined(CONFIG_ARCH_VERSATILE)
50 51
51#include <asm/mach-types.h> 52#include <asm/mach-types.h>
52 53
@@ -154,6 +155,8 @@ static inline void SMC_outw(u16 val, void __iomem *ioaddr, int reg)
154#define SMC_outl(v, a, r) writel(v, (a) + (r)) 155#define SMC_outl(v, a, r) writel(v, (a) + (r))
155#define SMC_insl(a, r, p, l) readsl((a) + (r), p, l) 156#define SMC_insl(a, r, p, l) readsl((a) + (r), p, l)
156#define SMC_outsl(a, r, p, l) writesl((a) + (r), p, l) 157#define SMC_outsl(a, r, p, l) writesl((a) + (r), p, l)
158#define SMC_insw(a, r, p, l) readsw((a) + (r), p, l)
159#define SMC_outsw(a, r, p, l) writesw((a) + (r), p, l)
157#define SMC_IRQ_FLAGS (-1) /* from resource */ 160#define SMC_IRQ_FLAGS (-1) /* from resource */
158 161
159/* We actually can't write halfwords properly if not word aligned */ 162/* We actually can't write halfwords properly if not word aligned */
@@ -206,23 +209,6 @@ SMC_outw(u16 val, void __iomem *ioaddr, int reg)
206#define RPC_LSA_DEFAULT RPC_LED_TX_RX 209#define RPC_LSA_DEFAULT RPC_LED_TX_RX
207#define RPC_LSB_DEFAULT RPC_LED_100_10 210#define RPC_LSB_DEFAULT RPC_LED_100_10
208 211
209#elif defined(CONFIG_ARCH_VERSATILE)
210
211#define SMC_CAN_USE_8BIT 1
212#define SMC_CAN_USE_16BIT 1
213#define SMC_CAN_USE_32BIT 1
214#define SMC_NOWAIT 1
215
216#define SMC_inb(a, r) readb((a) + (r))
217#define SMC_inw(a, r) readw((a) + (r))
218#define SMC_inl(a, r) readl((a) + (r))
219#define SMC_outb(v, a, r) writeb(v, (a) + (r))
220#define SMC_outw(v, a, r) writew(v, (a) + (r))
221#define SMC_outl(v, a, r) writel(v, (a) + (r))
222#define SMC_insl(a, r, p, l) readsl((a) + (r), p, l)
223#define SMC_outsl(a, r, p, l) writesl((a) + (r), p, l)
224#define SMC_IRQ_FLAGS (-1) /* from resource */
225
226#elif defined(CONFIG_MN10300) 212#elif defined(CONFIG_MN10300)
227 213
228/* 214/*
diff --git a/drivers/net/ethernet/via/via-velocity.c b/drivers/net/ethernet/via/via-velocity.c
index d022bf936572..ad61d26a44f3 100644
--- a/drivers/net/ethernet/via/via-velocity.c
+++ b/drivers/net/ethernet/via/via-velocity.c
@@ -2172,16 +2172,13 @@ static int velocity_poll(struct napi_struct *napi, int budget)
2172 unsigned int rx_done; 2172 unsigned int rx_done;
2173 unsigned long flags; 2173 unsigned long flags;
2174 2174
2175 spin_lock_irqsave(&vptr->lock, flags);
2176 /* 2175 /*
2177 * Do rx and tx twice for performance (taken from the VIA 2176 * Do rx and tx twice for performance (taken from the VIA
2178 * out-of-tree driver). 2177 * out-of-tree driver).
2179 */ 2178 */
2180 rx_done = velocity_rx_srv(vptr, budget / 2); 2179 rx_done = velocity_rx_srv(vptr, budget);
2181 velocity_tx_srv(vptr); 2180 spin_lock_irqsave(&vptr->lock, flags);
2182 rx_done += velocity_rx_srv(vptr, budget - rx_done);
2183 velocity_tx_srv(vptr); 2181 velocity_tx_srv(vptr);
2184
2185 /* If budget not fully consumed, exit the polling mode */ 2182 /* If budget not fully consumed, exit the polling mode */
2186 if (rx_done < budget) { 2183 if (rx_done < budget) {
2187 napi_complete(napi); 2184 napi_complete(napi);
@@ -2342,6 +2339,8 @@ static int velocity_change_mtu(struct net_device *dev, int new_mtu)
2342 if (ret < 0) 2339 if (ret < 0)
2343 goto out_free_tmp_vptr_1; 2340 goto out_free_tmp_vptr_1;
2344 2341
2342 napi_disable(&vptr->napi);
2343
2345 spin_lock_irqsave(&vptr->lock, flags); 2344 spin_lock_irqsave(&vptr->lock, flags);
2346 2345
2347 netif_stop_queue(dev); 2346 netif_stop_queue(dev);
@@ -2362,6 +2361,8 @@ static int velocity_change_mtu(struct net_device *dev, int new_mtu)
2362 2361
2363 velocity_give_many_rx_descs(vptr); 2362 velocity_give_many_rx_descs(vptr);
2364 2363
2364 napi_enable(&vptr->napi);
2365
2365 mac_enable_int(vptr->mac_regs); 2366 mac_enable_int(vptr->mac_regs);
2366 netif_start_queue(dev); 2367 netif_start_queue(dev);
2367 2368
diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c
index dc76670c2f2a..9093004f9b63 100644
--- a/drivers/net/macvtap.c
+++ b/drivers/net/macvtap.c
@@ -744,7 +744,7 @@ err:
744 rcu_read_lock(); 744 rcu_read_lock();
745 vlan = rcu_dereference(q->vlan); 745 vlan = rcu_dereference(q->vlan);
746 if (vlan) 746 if (vlan)
747 vlan->dev->stats.tx_dropped++; 747 this_cpu_inc(vlan->pcpu_stats->tx_dropped);
748 rcu_read_unlock(); 748 rcu_read_unlock();
749 749
750 return err; 750 return err;
@@ -767,7 +767,6 @@ static ssize_t macvtap_put_user(struct macvtap_queue *q,
767 const struct sk_buff *skb, 767 const struct sk_buff *skb,
768 const struct iovec *iv, int len) 768 const struct iovec *iv, int len)
769{ 769{
770 struct macvlan_dev *vlan;
771 int ret; 770 int ret;
772 int vnet_hdr_len = 0; 771 int vnet_hdr_len = 0;
773 int vlan_offset = 0; 772 int vlan_offset = 0;
@@ -821,15 +820,6 @@ static ssize_t macvtap_put_user(struct macvtap_queue *q,
821 copied += len; 820 copied += len;
822 821
823done: 822done:
824 rcu_read_lock();
825 vlan = rcu_dereference(q->vlan);
826 if (vlan) {
827 preempt_disable();
828 macvlan_count_rx(vlan, copied - vnet_hdr_len, ret == 0, 0);
829 preempt_enable();
830 }
831 rcu_read_unlock();
832
833 return ret ? ret : copied; 823 return ret ? ret : copied;
834} 824}
835 825
diff --git a/drivers/net/phy/vitesse.c b/drivers/net/phy/vitesse.c
index 508e4359338b..14372c65a7e8 100644
--- a/drivers/net/phy/vitesse.c
+++ b/drivers/net/phy/vitesse.c
@@ -64,6 +64,7 @@
64 64
65#define PHY_ID_VSC8234 0x000fc620 65#define PHY_ID_VSC8234 0x000fc620
66#define PHY_ID_VSC8244 0x000fc6c0 66#define PHY_ID_VSC8244 0x000fc6c0
67#define PHY_ID_VSC8514 0x00070670
67#define PHY_ID_VSC8574 0x000704a0 68#define PHY_ID_VSC8574 0x000704a0
68#define PHY_ID_VSC8662 0x00070660 69#define PHY_ID_VSC8662 0x00070660
69#define PHY_ID_VSC8221 0x000fc550 70#define PHY_ID_VSC8221 0x000fc550
@@ -131,6 +132,7 @@ static int vsc82xx_config_intr(struct phy_device *phydev)
131 err = phy_write(phydev, MII_VSC8244_IMASK, 132 err = phy_write(phydev, MII_VSC8244_IMASK,
132 (phydev->drv->phy_id == PHY_ID_VSC8234 || 133 (phydev->drv->phy_id == PHY_ID_VSC8234 ||
133 phydev->drv->phy_id == PHY_ID_VSC8244 || 134 phydev->drv->phy_id == PHY_ID_VSC8244 ||
135 phydev->drv->phy_id == PHY_ID_VSC8514 ||
134 phydev->drv->phy_id == PHY_ID_VSC8574) ? 136 phydev->drv->phy_id == PHY_ID_VSC8574) ?
135 MII_VSC8244_IMASK_MASK : 137 MII_VSC8244_IMASK_MASK :
136 MII_VSC8221_IMASK_MASK); 138 MII_VSC8221_IMASK_MASK);
@@ -246,6 +248,18 @@ static struct phy_driver vsc82xx_driver[] = {
246 .config_intr = &vsc82xx_config_intr, 248 .config_intr = &vsc82xx_config_intr,
247 .driver = { .owner = THIS_MODULE,}, 249 .driver = { .owner = THIS_MODULE,},
248}, { 250}, {
251 .phy_id = PHY_ID_VSC8514,
252 .name = "Vitesse VSC8514",
253 .phy_id_mask = 0x000ffff0,
254 .features = PHY_GBIT_FEATURES,
255 .flags = PHY_HAS_INTERRUPT,
256 .config_init = &vsc824x_config_init,
257 .config_aneg = &vsc82x4_config_aneg,
258 .read_status = &genphy_read_status,
259 .ack_interrupt = &vsc824x_ack_interrupt,
260 .config_intr = &vsc82xx_config_intr,
261 .driver = { .owner = THIS_MODULE,},
262}, {
249 .phy_id = PHY_ID_VSC8574, 263 .phy_id = PHY_ID_VSC8574,
250 .name = "Vitesse VSC8574", 264 .name = "Vitesse VSC8574",
251 .phy_id_mask = 0x000ffff0, 265 .phy_id_mask = 0x000ffff0,
@@ -315,6 +329,7 @@ module_exit(vsc82xx_exit);
315static struct mdio_device_id __maybe_unused vitesse_tbl[] = { 329static struct mdio_device_id __maybe_unused vitesse_tbl[] = {
316 { PHY_ID_VSC8234, 0x000ffff0 }, 330 { PHY_ID_VSC8234, 0x000ffff0 },
317 { PHY_ID_VSC8244, 0x000fffc0 }, 331 { PHY_ID_VSC8244, 0x000fffc0 },
332 { PHY_ID_VSC8514, 0x000ffff0 },
318 { PHY_ID_VSC8574, 0x000ffff0 }, 333 { PHY_ID_VSC8574, 0x000ffff0 },
319 { PHY_ID_VSC8662, 0x000ffff0 }, 334 { PHY_ID_VSC8662, 0x000ffff0 },
320 { PHY_ID_VSC8221, 0x000ffff0 }, 335 { PHY_ID_VSC8221, 0x000ffff0 },
diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
index 34b0de09d881..736050d6b451 100644
--- a/drivers/net/team/team.c
+++ b/drivers/net/team/team.c
@@ -1366,6 +1366,8 @@ static int team_user_linkup_option_get(struct team *team,
1366 return 0; 1366 return 0;
1367} 1367}
1368 1368
1369static void __team_carrier_check(struct team *team);
1370
1369static int team_user_linkup_option_set(struct team *team, 1371static int team_user_linkup_option_set(struct team *team,
1370 struct team_gsetter_ctx *ctx) 1372 struct team_gsetter_ctx *ctx)
1371{ 1373{
@@ -1373,6 +1375,7 @@ static int team_user_linkup_option_set(struct team *team,
1373 1375
1374 port->user.linkup = ctx->data.bool_val; 1376 port->user.linkup = ctx->data.bool_val;
1375 team_refresh_port_linkup(port); 1377 team_refresh_port_linkup(port);
1378 __team_carrier_check(port->team);
1376 return 0; 1379 return 0;
1377} 1380}
1378 1381
@@ -1392,6 +1395,7 @@ static int team_user_linkup_en_option_set(struct team *team,
1392 1395
1393 port->user.linkup_enabled = ctx->data.bool_val; 1396 port->user.linkup_enabled = ctx->data.bool_val;
1394 team_refresh_port_linkup(port); 1397 team_refresh_port_linkup(port);
1398 __team_carrier_check(port->team);
1395 return 0; 1399 return 0;
1396} 1400}
1397 1401
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 7bab4de658a9..916241d16c67 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -299,35 +299,76 @@ static struct sk_buff *page_to_skb(struct receive_queue *rq,
299 return skb; 299 return skb;
300} 300}
301 301
302static int receive_mergeable(struct receive_queue *rq, struct sk_buff *head_skb) 302static struct sk_buff *receive_small(void *buf, unsigned int len)
303{ 303{
304 struct skb_vnet_hdr *hdr = skb_vnet_hdr(head_skb); 304 struct sk_buff * skb = buf;
305
306 len -= sizeof(struct virtio_net_hdr);
307 skb_trim(skb, len);
308
309 return skb;
310}
311
312static struct sk_buff *receive_big(struct net_device *dev,
313 struct receive_queue *rq,
314 void *buf,
315 unsigned int len)
316{
317 struct page *page = buf;
318 struct sk_buff *skb = page_to_skb(rq, page, 0, len, PAGE_SIZE);
319
320 if (unlikely(!skb))
321 goto err;
322
323 return skb;
324
325err:
326 dev->stats.rx_dropped++;
327 give_pages(rq, page);
328 return NULL;
329}
330
331static struct sk_buff *receive_mergeable(struct net_device *dev,
332 struct receive_queue *rq,
333 void *buf,
334 unsigned int len)
335{
336 struct skb_vnet_hdr *hdr = buf;
337 int num_buf = hdr->mhdr.num_buffers;
338 struct page *page = virt_to_head_page(buf);
339 int offset = buf - page_address(page);
340 struct sk_buff *head_skb = page_to_skb(rq, page, offset, len,
341 MERGE_BUFFER_LEN);
305 struct sk_buff *curr_skb = head_skb; 342 struct sk_buff *curr_skb = head_skb;
306 char *buf;
307 struct page *page;
308 int num_buf, len, offset;
309 343
310 num_buf = hdr->mhdr.num_buffers; 344 if (unlikely(!curr_skb))
345 goto err_skb;
346
311 while (--num_buf) { 347 while (--num_buf) {
312 int num_skb_frags = skb_shinfo(curr_skb)->nr_frags; 348 int num_skb_frags;
349
313 buf = virtqueue_get_buf(rq->vq, &len); 350 buf = virtqueue_get_buf(rq->vq, &len);
314 if (unlikely(!buf)) { 351 if (unlikely(!buf)) {
315 pr_debug("%s: rx error: %d buffers missing\n", 352 pr_debug("%s: rx error: %d buffers out of %d missing\n",
316 head_skb->dev->name, hdr->mhdr.num_buffers); 353 dev->name, num_buf, hdr->mhdr.num_buffers);
317 head_skb->dev->stats.rx_length_errors++; 354 dev->stats.rx_length_errors++;
318 return -EINVAL; 355 goto err_buf;
319 } 356 }
320 if (unlikely(len > MERGE_BUFFER_LEN)) { 357 if (unlikely(len > MERGE_BUFFER_LEN)) {
321 pr_debug("%s: rx error: merge buffer too long\n", 358 pr_debug("%s: rx error: merge buffer too long\n",
322 head_skb->dev->name); 359 dev->name);
323 len = MERGE_BUFFER_LEN; 360 len = MERGE_BUFFER_LEN;
324 } 361 }
362
363 page = virt_to_head_page(buf);
364 --rq->num;
365
366 num_skb_frags = skb_shinfo(curr_skb)->nr_frags;
325 if (unlikely(num_skb_frags == MAX_SKB_FRAGS)) { 367 if (unlikely(num_skb_frags == MAX_SKB_FRAGS)) {
326 struct sk_buff *nskb = alloc_skb(0, GFP_ATOMIC); 368 struct sk_buff *nskb = alloc_skb(0, GFP_ATOMIC);
327 if (unlikely(!nskb)) { 369
328 head_skb->dev->stats.rx_dropped++; 370 if (unlikely(!nskb))
329 return -ENOMEM; 371 goto err_skb;
330 }
331 if (curr_skb == head_skb) 372 if (curr_skb == head_skb)
332 skb_shinfo(curr_skb)->frag_list = nskb; 373 skb_shinfo(curr_skb)->frag_list = nskb;
333 else 374 else
@@ -341,8 +382,7 @@ static int receive_mergeable(struct receive_queue *rq, struct sk_buff *head_skb)
341 head_skb->len += len; 382 head_skb->len += len;
342 head_skb->truesize += MERGE_BUFFER_LEN; 383 head_skb->truesize += MERGE_BUFFER_LEN;
343 } 384 }
344 page = virt_to_head_page(buf); 385 offset = buf - page_address(page);
345 offset = buf - (char *)page_address(page);
346 if (skb_can_coalesce(curr_skb, num_skb_frags, page, offset)) { 386 if (skb_can_coalesce(curr_skb, num_skb_frags, page, offset)) {
347 put_page(page); 387 put_page(page);
348 skb_coalesce_rx_frag(curr_skb, num_skb_frags - 1, 388 skb_coalesce_rx_frag(curr_skb, num_skb_frags - 1,
@@ -351,9 +391,28 @@ static int receive_mergeable(struct receive_queue *rq, struct sk_buff *head_skb)
351 skb_add_rx_frag(curr_skb, num_skb_frags, page, 391 skb_add_rx_frag(curr_skb, num_skb_frags, page,
352 offset, len, MERGE_BUFFER_LEN); 392 offset, len, MERGE_BUFFER_LEN);
353 } 393 }
394 }
395
396 return head_skb;
397
398err_skb:
399 put_page(page);
400 while (--num_buf) {
401 buf = virtqueue_get_buf(rq->vq, &len);
402 if (unlikely(!buf)) {
403 pr_debug("%s: rx error: %d buffers missing\n",
404 dev->name, num_buf);
405 dev->stats.rx_length_errors++;
406 break;
407 }
408 page = virt_to_head_page(buf);
409 put_page(page);
354 --rq->num; 410 --rq->num;
355 } 411 }
356 return 0; 412err_buf:
413 dev->stats.rx_dropped++;
414 dev_kfree_skb(head_skb);
415 return NULL;
357} 416}
358 417
359static void receive_buf(struct receive_queue *rq, void *buf, unsigned int len) 418static void receive_buf(struct receive_queue *rq, void *buf, unsigned int len)
@@ -362,7 +421,6 @@ static void receive_buf(struct receive_queue *rq, void *buf, unsigned int len)
362 struct net_device *dev = vi->dev; 421 struct net_device *dev = vi->dev;
363 struct virtnet_stats *stats = this_cpu_ptr(vi->stats); 422 struct virtnet_stats *stats = this_cpu_ptr(vi->stats);
364 struct sk_buff *skb; 423 struct sk_buff *skb;
365 struct page *page;
366 struct skb_vnet_hdr *hdr; 424 struct skb_vnet_hdr *hdr;
367 425
368 if (unlikely(len < sizeof(struct virtio_net_hdr) + ETH_HLEN)) { 426 if (unlikely(len < sizeof(struct virtio_net_hdr) + ETH_HLEN)) {
@@ -377,33 +435,15 @@ static void receive_buf(struct receive_queue *rq, void *buf, unsigned int len)
377 return; 435 return;
378 } 436 }
379 437
380 if (!vi->mergeable_rx_bufs && !vi->big_packets) { 438 if (vi->mergeable_rx_bufs)
381 skb = buf; 439 skb = receive_mergeable(dev, rq, buf, len);
382 len -= sizeof(struct virtio_net_hdr); 440 else if (vi->big_packets)
383 skb_trim(skb, len); 441 skb = receive_big(dev, rq, buf, len);
384 } else if (vi->mergeable_rx_bufs) { 442 else
385 struct page *page = virt_to_head_page(buf); 443 skb = receive_small(buf, len);
386 skb = page_to_skb(rq, page, 444
387 (char *)buf - (char *)page_address(page), 445 if (unlikely(!skb))
388 len, MERGE_BUFFER_LEN); 446 return;
389 if (unlikely(!skb)) {
390 dev->stats.rx_dropped++;
391 put_page(page);
392 return;
393 }
394 if (receive_mergeable(rq, skb)) {
395 dev_kfree_skb(skb);
396 return;
397 }
398 } else {
399 page = buf;
400 skb = page_to_skb(rq, page, 0, len, PAGE_SIZE);
401 if (unlikely(!skb)) {
402 dev->stats.rx_dropped++;
403 give_pages(rq, page);
404 return;
405 }
406 }
407 447
408 hdr = skb_vnet_hdr(skb); 448 hdr = skb_vnet_hdr(skb);
409 449
@@ -1084,7 +1124,7 @@ static void virtnet_set_rx_mode(struct net_device *dev)
1084 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MAC, 1124 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MAC,
1085 VIRTIO_NET_CTRL_MAC_TABLE_SET, 1125 VIRTIO_NET_CTRL_MAC_TABLE_SET,
1086 sg, NULL)) 1126 sg, NULL))
1087 dev_warn(&dev->dev, "Failed to set MAC fitler table.\n"); 1127 dev_warn(&dev->dev, "Failed to set MAC filter table.\n");
1088 1128
1089 kfree(buf); 1129 kfree(buf);
1090} 1130}
diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c
index 919b6509455c..64f0e0d18b81 100644
--- a/drivers/net/xen-netback/netback.c
+++ b/drivers/net/xen-netback/netback.c
@@ -39,6 +39,7 @@
39#include <linux/udp.h> 39#include <linux/udp.h>
40 40
41#include <net/tcp.h> 41#include <net/tcp.h>
42#include <net/ip6_checksum.h>
42 43
43#include <xen/xen.h> 44#include <xen/xen.h>
44#include <xen/events.h> 45#include <xen/events.h>